Merge "drm/msm/sde: Fix gamma correction table indexing" into msm-4.9
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
index 2347477..8a3e704 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -25,8 +25,9 @@
 	Value type: <stringlist>
 	Definition: Address names. Must be "osm_l3_base", "osm_pwrcl_base",
 		    "osm_perfcl_base", "l3_pll", "pwrcl_pll", "perfcl_pll",
-		    "l3_sequencer", "pwrcl_sequencer", "perfcl_sequencer".
-		    Optionally, "l3_efuse", "pwrcl_efuse", "perfcl_efuse".
+		    "l3_sequencer", "pwrcl_sequencer", or "perfcl_sequencer".
+		    Optionally, "l3_efuse", "pwrcl_efuse", "perfcl_efuse",
+		    "pwrcl_acd", "perfcl_acd", "l3_acd".
 		    Must be specified in the same order as the corresponding
 		    addresses are specified in the reg property.
 
@@ -328,6 +329,77 @@
 	Definition: Contains the addresses of the RAILx_CLKDOMy_PLL_MIN_FREQ
 		    registers for the three clock domains.
 
+- qcom,acdtd-val
+	Usage:      required if pwrcl_acd, perfcl_acd or l3_acd registers are
+		    specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the values to program to the ACD
+		    Tunable-Length Delay register for the L3, power and
+		    performance clusters.
+
+- qcom,acdcr-val
+	Usage:      required if pwrcl_acd, perfcl_acd or l3_acd registers are
+		    specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the values for the ACD control register
+		    for the L3, power and performance clusters.
+
+- qcom,acdsscr-val
+	Usage:      required if pwrcl_acd, perfcl_acd or l3_acd registers are
+		    specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the values for the ACD Soft Start Control
+		    register for the L3, power and performance clusters.
+
+- qcom,acdextint0-val
+	Usage:      required if pwrcl_acd, perfcl_acd or l3_acd registers are
+		    specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the initial values for the ACD
+		    external interface configuration register for the L3, power
+		    and performance clusters.
+
+- qcom,acdextint1-val
+	Usage:      required if pwrcl_acd, perfcl_acd or l3_acd registers are
+		    specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the final values for the ACD
+		    external interface configuration register for the L3, power
+		    and performance clusters.
+
+- qcom,acdautoxfer-val
+	Usage:      required if pwrcl_acd, perfcl_acd or l3_acd registers are
+		    specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the values for the ACD auto transfer
+		    control register for the L3, power and performance clusters.
+
+- qcom,acdavg-init
+	Usage:      optional if pwrcl_acd, perfcl_acd or l3_acd registers are
+		    specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines if the AVG feature for ACD should be
+		    initialized for the L3, power and performance clusters.
+		    Valid values are 0 or 1.
+
+- qcom,acdavgcfg0-val
+	Usage:      required if qcom,acdavg-init is true for an ACD clock domain
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the values for the ACD AVG CFG0
+		    registers for the L3, power and performance clusters.
+
+- qcom,acdavgcfg1-val
+	Usage:      required if qcom,acdavg-init is true for an ACD clock domain
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the values for the ACD AVG CFG1
+		    registers for the L3, power and performance clusters.
+
+- qcom,acdavgcfg2-val
+	Usage:      required if qcom,acdavg-init is true for an ACD clock domain
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the values for the ACD AVG CFG2
+		    registers for the L3, power and performance clusters.
+
 - clock-names
 	Usage:      required
 	Value type: <string>
@@ -349,11 +421,27 @@
 			<0x178b0000 0x1000>,
 			<0x17d42400 0x0c00>,
 			<0x17d44400 0x0c00>,
-			<0x17d46c00 0x0c00>;
+			<0x17d46c00 0x0c00>,
+			<0x17930000 0x10000>,
+			<0x17920000 0x10000>,
+			<0x17910000 0x10000>;
 		reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
 			"l3_pll", "pwrcl_pll", "perfcl_pll",
 			"l3_sequencer", "pwrcl_sequencer",
-			"perfcl_sequencer";
+			"perfcl_sequencer", "l3_acd", "pwrcl_acd",
+			"perfcl_acd";
+
+		/* ACD configurations for L3, Silver, and Gold clusters */
+		qcom,acdtd-val = <0x0000b411 0x0000b411 0x0000b411>;
+		qcom,acdcr-val = <0x002c5ffd 0x002c5ffd 0x002c5ffd>;
+		qcom,acdsscr-val = <0x00000901 0x00000901 0x00000901>;
+		qcom,acdextint0-val = <0x2cf9ae8 0x2cf9ae8 0x2cf9ae8>;
+		qcom,acdextint1-val = <0x2cf9afe 0x2cf9afe 0x2cf9afe>;
+		qcom,acdautoxfer-val = <0x00000015 0x00000015 0x00000015>;
+		qcom,acdavgcfg2-val = <0x0 0x56a38822 0x56a38822>;
+		qcom,acdavgcfg1-val = <0x0 0x27104e20 0x27104e20>;
+		qcom,acdavgcfg0-val = <0x0 0xa08007a1 0xa08007a1>;
+		qcom,acdavg-init = <0 1 1>;
 
 		vdd-l3-supply = <&apc0_l3_vreg>;
 		vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
index 06b219a..3c8a79a 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -28,6 +28,7 @@
   - qcom,use-sw-aes-ccm-algo : optional, indicates if use SW aes-ccm algorithm.
   - qcom,clk-mgmt-sus-res : optional, indicate if the ce clocks need to be disabled/enabled in suspend/resume function.
   - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+  - qcom,request-bw-before-clk : optional, indicates if the HW supports bandwidth requests prior to clock controls.
   - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target.Default value is 1 if not specified.
 
   - qcom,ce-opp-freq: optional, indicates the CE operating frequency in Hz,
diff --git a/Documentation/devicetree/bindings/devfreq/bimc-bwmon.txt b/Documentation/devicetree/bindings/devfreq/bimc-bwmon.txt
index c77f84b..29b7334 100644
--- a/Documentation/devicetree/bindings/devfreq/bimc-bwmon.txt
+++ b/Documentation/devicetree/bindings/devfreq/bimc-bwmon.txt
@@ -6,17 +6,25 @@
 
 Required properties:
 - compatible:		Must be "qcom,bimc-bwmon", "qcom,bimc-bwmon2",
-			"qcom,bimc-bwmon3" or "qcom,bimc-bwmon4"
+			"qcom,bimc-bwmon3" or "qcom,bimc-bwmon4" or
+			"qcom,bimc-bwmon5"
 - reg:			Pairs of physical base addresses and region sizes of
 			memory mapped registers.
 - reg-names:		Names of the bases for the above registers. Expected
 			bases are: "base", "global_base"
+			"global_base" should not be specified for
+			"qcom,bimc-bwmon5" compatibles.
 - interrupts:		Lists the threshold IRQ.
 - qcom,mport:		The hardware master port that this device can monitor
 - qcom,target-dev:	The DT device that corresponds to this master port
 - qcom,hw-timer-hz:	Hardware sampling rate in Hz. This field must be
 			specified for "qcom,bimc-bwmon4"
 
+Optional properties:
+- qcom,byte-mid-match:	Byte count MID match value
+- qcom,byte-mid-mask:	Byte count MID mask value
+- qcom,count-unit:	Number of bytes monitor counts in
+
 Example:
 	qcom,cpu-bwmon {
 		compatible = "qcom,bimc-bwmon";
@@ -26,4 +34,7 @@
 		qcom,mport = <0>;
 		qcom,target-dev = <&cpubw>;
 		qcom,hw-timer-hz = <19200000>;
+		qcom,byte-mid-match = <0x1e00>;
+		qcom,byte-mid-mask = <0x1e00>;
+		qcom,count-unit = <0x100000>;
 	};
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index a3ef34c..c766df8 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -274,6 +274,29 @@
 				applied in scenarios where panel interface can
 				be more tolerant to memory latency such as
 				command mode panels.
+- qcom,sde-core-ib-ff:		A string entry indicating the fudge factor for
+				core ib calculation.
+- qcom,sde-core-clk-ff:		A string entry indicating the fudge factor for
+				core clock calculation.
+- qcom,sde-comp-ratio-rt:	A string entry indicating the compression ratio
+				for each supported compressed format on realtime interface.
+				The string is composed of one or more of
+				<fourcc code>/<vendor code>/<modifier>/<compression ratio>
+				separated with spaces.
+- qcom,sde-comp-ratio-nrt:	A string entry indicating the compression ratio
+				for each supported compressed format on non-realtime interface.
+				The string is composed of one or more of
+				<fourcc code>/<vendor code>/<modifier>/<compression ratio>
+				separated with spaces.
+- qcom,sde-undersized-prefill-lines:	A u32 value indicates the size of undersized prefill in lines.
+- qcom,sde-xtra-prefill-lines:	A u32 value indicates the extra prefill in lines.
+- qcom,sde-dest-scale-prefill-lines:	A u32 value indicates the latency of destination scaler in lines.
+- qcom,sde-macrotile-prefill-lines:	A u32 value indicates the latency of macrotile in lines.
+- qcom,sde-yuv-nv12-prefill-lines:	A u32 value indicates the latency of yuv/nv12 in lines.
+- qcom,sde-linear-prefill-lines:	A u32 value indicates the latency of linear in lines.
+- qcom,sde-downscaling-prefill-lines:	A u32 value indicates the latency of downscaling in lines.
+- qcom,sde-max-per-pipe-bw-kbps:	Array of u32 value indicates the max per pipe bandwidth in Kbps.
+- qcom,sde-amortizable-threshold:	This value indicates the min for traffic shaping in lines.
 
 Bus Scaling Subnodes:
 - qcom,sde-reg-bus:		Property to provide Bus scaling for register access for
@@ -462,6 +485,21 @@
     qcom,sde-max-bw-high-kbps = <9000000>;
     qcom,sde-max-bw-low-kbps = <9000000>;
 
+    qcom,sde-core-ib-ff = "1.1";
+    qcom,sde-core-clk-ff = "1.0";
+    qcom,sde-comp-ratio-rt = "NV12/5/1/1.1 AB24/5/1/1.2 XB24/5/1/1.3";
+    qcom,sde-comp-ratio-nrt = "NV12/5/1/1.1 AB24/5/1/1.2 XB24/5/1/1.3";
+    qcom,sde-undersized-prefill-lines = <4>;
+    qcom,sde-xtra-prefill-lines = <5>;
+    qcom,sde-dest-scale-prefill-lines = <6>;
+    qcom,sde-macrotile-prefill-lines = <7>;
+    qcom,sde-yuv-nv12-prefill-lines = <8>;
+    qcom,sde-linear-prefill-lines = <9>;
+    qcom,sde-downscaling-prefill-lines = <10>;
+    qcom,sde-max-per-pipe-bw-kbps = <2400000 2400000 2400000 2400000
+        2400000 2400000 2400000 2400000>;
+    qcom,sde-amortizable-threshold = <11>;
+
     qcom,sde-sspp-vig-blocks {
         qcom,sde-vig-csc-off = <0x320>;
         qcom,sde-vig-qseed-off = <0x200>;
diff --git a/Documentation/devicetree/bindings/dma/qcom_gpi.txt b/Documentation/devicetree/bindings/dma/qcom_gpi.txt
new file mode 100644
index 0000000..3b3b713
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/qcom_gpi.txt
@@ -0,0 +1,84 @@
+Qualcomm Technologies Inc GPI DMA controller
+
+QCOM GPI DMA controller provides DMA capabilities for
+peripheral buses such as I2C, UART, and SPI.
+
+==============
+Node Structure
+==============
+
+Main node properties:
+
+- #dma-cells
+  Usage: required
+  Value type: <u32>
+  Definition: Number of parameters client will provide.  Must be set to 6.
+	1st parameter: gpii index
+	2nd parameter: channel index
+	3rd parameter: serial engine index
+	4th parameter: bus protocol, 1 for SPI, 2 for UART, 3 for I2C
+	5th parameter: channel ring length in transfer ring elements
+	6th parameter: event processing priority, set to 0 for lowest latency
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: "qcom,gpi-dma"
+
+- reg
+  Usage: required
+  Value type: Array of <u32>
+  Definition: register address space location and size
+
+- reg-name
+  Usage: required
+  Value type: <string>
+  Definition: register space name, must be "gpi-top"
+
+- interrupts
+  Usage: required
+  Value type: Array of <u32>
+  Definition: Array of tuples which describe interrupt line for each GPII
+	instance.
+
+- qcom,max-num-gpii
+  Usage: required
+  Value type: <u32>
+  Definition: Total number of GPII instances available for this controller.
+
+- qcom,gpii-mask
+  Usage: required
+  Value type: <u32>
+  Definition: Bitmap of supported GPII instances in hlos.
+
+- qcom,ev-factor
+  Usage: required
+  Value type: <u32>
+  Definition: Event ring transfer size compare to channel transfer ring. Event
+	ring length = ev-factor * transfer ring size
+
+- iommus
+  Usage: required
+  Value type: <phandle u32 u32>
+  Definition: phandle for apps smmu controller and SID, and mask
+	for the controller.  For more detail please check binding
+	documentation arm,smmu.txt
+
+========
+Example:
+========
+gpi_dma0: qcom,gpi-dma@0x800000 {
+	#dma-cells = <6>;
+	compatible = "qcom,gpi-dma";
+	reg = <0x800000 0x60000>;
+	reg-names = "gpi-top";
+	interrupts = <0 244 0>, <0 245 0>, <0 246 0>, <0 247 0>,
+                <0 248 0>, <0 249 0>, <0 250 0>, <0 251 0>,
+                <0 252 0>, <0 253 0>, <0 254 0>, <0 255 0>,
+                <0 256 0>;
+	qcom,max-num-gpii = <13>;
+	qcom,gpii-mask = <0xfa>;
+	qcom,ev-factor = <2>;
+	iommus = <&apps_smmu 0x0016 0x0>;
+	status = "ok";
+};
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index ffba081..669997c 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -193,6 +193,8 @@
 					"dsi_cmd_mode" = enable command mode.
 - qcom,5v-boost-gpio:			Specifies the panel gpio for display 5v boost.
 - qcom,mdss-dsi-te-check-enable:	Boolean to enable Tear Check configuration.
+- qcom,mdss-dsi-te-using-wd:		Boolean entry enables the watchdog timer support to generate the vsync signal
+					for command mode panel. By default, panel TE will be used to generate the vsync.
 - qcom,mdss-dsi-te-using-te-pin:	Boolean to specify whether using hardware vsync.
 - qcom,mdss-dsi-te-pin-select:		Specifies TE operating mode.
 					0 = TE through embedded dcs command
@@ -331,8 +333,13 @@
 					as below:
 					--> Reset GPIO value
 					--> Sleep value (in ms)
-- qcom,partial-update-enabled:		Boolean used to enable partial
+- qcom,partial-update-enabled:		String used to enable partial
 					panel update for command mode panels.
+					"none": partial update is disabled
+					"single_roi": default enable mode, only single roi is sent to panel
+					"dual_roi": two rois are merged into one big roi. Panel ddic should be able
+					to process two roi's along with the DCS command to send two rois.
+					disabled if property is not specified.
 - qcom,mdss-dsi-horizontal-line-idle:	List of width ranges (EC - SC) in pixels indicating
 					additional idle time in dsi clock cycles that is needed
 					to compensate for smaller line width.
@@ -568,6 +575,7 @@
 		qcom,mdss-dsi-interleave-mode = <0>;
 		qcom,mdss-dsi-panel-type = "dsi_video_mode";
 		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-wd;
 		qcom,mdss-dsi-te-using-te-pin;
 		qcom,mdss-dsi-te-dcs-command = <1>;
 		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
@@ -627,7 +635,7 @@
 		qcom,mdss-tear-check-rd-ptr-trigger-intr = <1281>;
 		qcom,mdss-tear-check-frame-rate = <6000>;
 		qcom,mdss-dsi-reset-sequence = <1 2>, <0 10>, <1 10>;
-		qcom,partial-update-enabled;
+		qcom,partial-update-enabled = "single_roi";
 		qcom,dcs-cmd-by-left;
 		qcom,mdss-dsi-lp11-init;
 		qcom,mdss-dsi-init-delay-us = <100>;
diff --git a/Documentation/devicetree/bindings/extcon/extcon-gpio.txt b/Documentation/devicetree/bindings/extcon/extcon-gpio.txt
new file mode 100644
index 0000000..1b3a1d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/extcon-gpio.txt
@@ -0,0 +1,32 @@
+GPIO Extcon device
+
+This is a virtual device used to generate USB cable states from the USB ID pin
+connected to a GPIO pin.
+
+Required properties:
+- compatible: Should be "extcon-gpio"
+- extcon-id: The unique id of specific external connector.
+	     Valid range is 0 (EXTCON_NONE) to 63 (EXTCON_NUM).
+	     Refer include/linux/extcon.h for details.
+- gpio: Specify GPIO (see gpio binding)
+- debounce-ms: Debounce time for GPIO IRQ in ms
+- irq-flags: interrupt flags (edge/level). Refer to "include/dt-bindings/interrupt-controller/irq.h"
+- pinctrl-names, pinctrl-0, pinctrl-1,.. pinctrl-n: Refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+  for these optional properties
+
+Example:
+	extcon_storage_cd {
+		compatible = "extcon-gpio";
+		extcon-id = <62>; /* EXTCON_MECHANICAL */
+		gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
+		debounce-ms = <200>;
+		irq-flags = <IRQ_TYPE_EDGE_BOTH>;
+	}
+
+	&ufshc_card {
+		extcon = <&extcon_storage_cd>;
+	};
+
+	&sd_card {
+		extcon = <&extcon_storage_cd>;
+	};
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
index a244d6c..51abe56 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
@@ -12,6 +12,7 @@
    or when entering sleep state.
  - #address-cells: Should be <1> Address cells for i2c device address
  - #size-cells: Should be <0> as i2c addresses have no size component
+ - qcom,wrapper-core: Wrapper QUPv3 core containing this I2C controller.
 
 Child nodes should conform to i2c bus binding.
 
@@ -30,4 +31,5 @@
 	pinctrl-1 = <&qup_1_i2c_5_sleep>;
 	#address-cells = <1>;
 	#size-cells = <0>;
+	qcom,wrapper-core = <&qupv3_0>;
 };
diff --git a/Documentation/devicetree/bindings/input/qpnp-power-on.txt b/Documentation/devicetree/bindings/input/qpnp-power-on.txt
index a596aa1..c2550e6 100644
--- a/Documentation/devicetree/bindings/input/qpnp-power-on.txt
+++ b/Documentation/devicetree/bindings/input/qpnp-power-on.txt
@@ -82,6 +82,8 @@
 - qcom,shutdown-poweroff-type	Same description as qcom,warm-reset-poweroff-
 				type but this applies for the system shutdown
 				case.
+- qcom,kpdpwr-sw-debounce	Boolean property to enable the debounce logic
+				on the KPDPWR_N rising edge.
 
 
 All the below properties are in the sub-node section (properties of the child
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
index 8e2bdee..c9aaa00 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
@@ -165,6 +165,15 @@
   should contain phandle of respective ir-cut node
 - qcom,special-support-sensors: if only some special sensors are supported
   on this board, add sensor name in this property.
+- qcom,clock-rates: clock rate in Hz.
+- qcom,clock-cntl-support: Says whether clock control support is present or not
+- qcom,clock-control: The valid fields are "NO_SET_RATE", "INIT_RATE" and
+  "SET_RATE". "NO_SET_RATE" the corresponding clock is enabled without setting
+  the rate assuming some other driver has already set it to appropriate rate.
+  "INIT_RATE" clock rate is not queried assuming some other driver has set
+  the clock rate and ispif will set the the clock to this rate.
+  "SET_RATE" clock is enabled and the rate is set to the value specified
+  in the property qcom,clock-rates.
 
 * Qualcomm Technologies, Inc. MSM ACTUATOR
 
@@ -205,6 +214,7 @@
   sensor
   - 0 -> MASTER 0
   - 1 -> MASTER 1
+- qcom,clock-rates: clock rate in Hz.
 
 Optional properties:
 - qcom,cam-vreg-name : should contain names of all regulators needed by this
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
new file mode 100644
index 0000000..d62910a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
@@ -0,0 +1,147 @@
+* Qualcomm Technologies, Inc. MSM Camera CDM
+
+CDM (Camera Data Mover) is module intended to provide means for fast programming
+camera registers and lookup tables.
+
+=======================
+Required Node Structure
+=======================
+CDM Interface node takes care of the handling has HW nodes and provide interface
+for camera clients.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-cdm-intf".
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cam-cdm-intf".
+
+- num-hw-cdm
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported HW blocks.
+
+- cdm-client-names
+  Usage: required
+  Value type: <string>
+  Definition: List of Clients supported by CDM interface.
+
+Example:
+	qcom,cam-cdm-intf {
+		compatible = "qcom,cam-cdm-intf";
+		label = "cam-cdm-intf";
+		num-hw-cdm = <1>;
+		cdm-client-names = "vfe",
+			"jpeg-dma",
+			"jpeg",
+			"fd";
+	};
+
+=======================
+Required Node Structure
+=======================
+CDM HW node provides interface for camera clients through
+to CDM interface node.
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-cdm-intf".
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cpas-cdm".
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: required
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+              to Camera base register space.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt associated with CDM HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for CDM HW.
+
+- camss-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+              in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for CDM HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for CDM HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- cdm-client-names
+  Usage: required
+  Value type: <string>
+  Definition: List of Clients supported by CDM HW node.
+
+Example:
+	qcom,cpas-cdm0@ac48000 {
+		cell-index = <0>;
+		compatible = "qcom,cam170-cpas-cdm0";
+		label = "cpas-cdm0";
+		reg = <0xac48000 0x1000>;
+		reg-names = "cpas-cdm";
+		interrupts = <0 461 0>;
+		interrupt-names = "cpas-cdm";
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "soc_ahb_clk",
+			"titan_top_ahb_clk",
+			"cam_axi_clk",
+			"camcc_slow_ahb_clk_src",
+			"cpas_top_ahb_clk",
+			"camnoc_axi_clk";
+		clocks = <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		qcom,clock-rates = <0 80000000 80000000 80000000 80000000 80000000>;
+		cdm-client-names = "ife";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
new file mode 100644
index 0000000..a61bab3
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
@@ -0,0 +1,282 @@
+* Qualcomm Technologies, Inc. MSM Camera CPAS
+
+The MSM camera CPAS device provides dependency definitions for
+enabling Camera CPAS HW and provides the Client definitions
+for all HW blocks that use CPAS driver for BW voting. These
+definitions consist of various properties that define the list
+of clients supported, AHB, AXI master-slave IDs used for BW
+voting.
+
+=======================
+Required Node Structure
+=======================
+The camera CPAS device must be described in four levels of device nodes. The
+first level describes the overall CPAS device. Within it, second level nodes
+describe the list of AXI ports that map different clients for AXI BW voting.
+Third level nodes describe the details of each AXI port having name, mnoc,
+camnoc AXI Bus information. Fourth level nodes describe the details of Bus
+master-slave IDs, ab, ib values for mnoc, camnoc bus interface.
+
+==================================
+First Level Node - CAM CPAS device
+==================================
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-cpas".
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cpas".
+
+- arch-compat
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cpas_top" or "camss_top".
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: required
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+              to Camera base register space.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt associated with CAMNOC HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for CPAS HW.
+
+- camss-vdd-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+              in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for CPAS HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for CPAS HW.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- qcom,msm-bus,name
+- qcom,msm-bus,num-cases
+- qcom,msm-bus,num-paths
+- qcom,msm-bus,vectors-KBps
+  Please refer Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+  for the properties above.
+
+- client-id-based
+  Usage: required
+  Value type: <empty>
+  Definition: Bool property specifying whether CPAS clients are ID based.
+
+- client-names
+  Usage: required
+  Value type: <string>
+  Definition: List of Clients supported by CPAS.
+
+- client-axi-port-names
+  Usage: required
+  Value type: <string>
+  Definition: AXI Port name for each client.
+
+- client-bus-camnoc-based
+  Usage: required
+  Value type: <empty>
+  Definition: Bool property specifying whether Clients are connected
+              through CAMNOC for AXI access.
+
+===================================================================
+Third Level Node - CAM AXI Port properties
+===================================================================
+- qcom,axi-port-name
+  Usage: required
+  Value type: <string>
+  Definition: Name of the AXI Port.
+
+===================================================================
+Fourth Level Node - CAM AXI Bus properties
+===================================================================
+
+- qcom,msm-bus,name
+- qcom,msm-bus,num-cases
+- qcom,msm-bus,num-paths
+- qcom,msm-bus,vectors-KBps
+  Please refer Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+  for the properties above.
+
+- qcom,msm-bus-vector-dyn-vote
+  Usage: optional
+  Value type: <empty>
+  Definition: Bool property specifying whether this bus client
+              is dynamic vote based.
+
+Example:
+
+	qcom,cam-cpas@ac40000 {
+		cell-index = <0>;
+		compatible = "qcom,cam-cpas";
+		label = "cpas";
+		arch-compat = "cpas_top";
+		status = "ok";
+		reg-names = "cam_cpas_top", "cam_camnoc";
+		reg = <0xac40000 0x1000>,
+			<0xac42000 0x5000>;
+		reg-cam-base = <0x40000 0x42000>;
+		interrupt-names = "cpas_camnoc";
+		interrupts = <0 459 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_ahb_clk",
+			"gcc_axi_clk",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		src-clock-name = "slow_ahb_clk_src";
+		clock-rates = <0 0 0 0 80000000 0>;
+		qcom,msm-bus,name = "cam_ahb";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>;
+		client-id-based;
+		client-names =
+			"ife0", "ife1", "ife2", "ipe0",
+			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
+			"icp0", "jpeg-dma0", "jpeg0", "fd0";
+		client-axi-port-names =
+			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+		client-bus-camnoc-based;
+		qcom,axi-port-list {
+			qcom,axi-port1 {
+				qcom,axi-port-name = "cam_hf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port2 {
+				qcom,axi-port-name = "cam_hf_2";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_2_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port3 {
+				qcom,axi-port-name = "cam_sf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_sf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_sf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
new file mode 100644
index 0000000..c560a05
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
@@ -0,0 +1,219 @@
+* Qualcomm Technologies, Inc. MSM Camera ICP
+
+The MSM camera ICP devices are implemented multiple device nodes.
+The root icp device node has properties defined to hint the driver
+about the number of A5,IPE and BPS nodes available during the
+probe sequence. Each node has multiple properties defined
+for interrupts, clocks and regulators.
+
+=======================
+Required Node Structure
+=======================
+ICP root interface node takes care of the handling account for number
+of A5, IPE and BPS devices present on the hardware.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-icp".
+
+- compat-hw-name
+
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,a5" or "qcom,ipe".
+
+- num-a5
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported A5 processors.
+
+- num-ipe
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported IPE HW blocks.
+
+- num-bps
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported BPS HW blocks.
+
+Example:
+	qcom,cam-icp {
+		compatible = "qcom,cam-icp";
+		compat-hw-name = "qcom,a5", "qcom,ipe0", "qcom,ipe1", "qcom,bps";
+		num-a5 = <1>;
+		num-ipe = <2>;
+		num-bps = <1>;
+		status = "ok";
+	};
+
+=======================
+Required Node Structure
+=======================
+A5/IPE/BPS Node's provides interface for Image Control Processor driver
+about the A5 register map, interrupt map, clocks, regulators
+and name of firmware image.
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-cdm-intf".
+
+- reg-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: optional
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: optional
+  Value type: <u32>
+  Definition: Register values.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt associated with CDM HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for CDM HW.
+
+- camss-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+              in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for CDM HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for CDM HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- fw_name
+  Usage: optional
+  Value type: <string>
+  Definition: Name of firmware image.
+
+Examples:
+a5: qcom,a5@a10000 {
+	cell-index = <0>;
+	compatible = "qcom,cam_a5";
+	reg = <0xac00000 0x6000>,
+		<0xac10000 0x8000>,
+		<0xac18000 0x3000>;
+	reg-names = "a5_qgic", "a5_sierra", "a5_csr";
+	interrupts = <0 463 0>;
+	interrupt-names = "a5";
+	regulator-names = "camss-vdd";
+	camss-vdd-supply = <&titan_top_gdsc>;
+	clock-names = "gcc_cam_ahb_clk",
+		"gcc_cam_axi_clk",
+		"soc_ahb_clk",
+		"cpas_ahb_clk",
+		"camnoc_axi_clk",
+		"icp_apb_clk",
+		"icp_atb_clk",
+		"icp_clk",
+		"icp_clk_src",
+		"icp_cti_clk",
+		"icp_ts_clk";
+	clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_ICP_APB_CLK>,
+			<&clock_camcc CAM_CC_ICP_ATB_CLK>,
+			<&clock_camcc CAM_CC_ICP_CLK>,
+			<&clock_camcc CAM_CC_ICP_CLK_SRC>,
+			<&clock_camcc CAM_CC_ICP_CTI_CLK>,
+			<&clock_camcc CAM_CC_ICP_TS_CLK>;
+
+	clock-rates = <0 0 0 80000000 0 0 0 0 600000000 0 0>;
+	fw_name = "CAMERA_ICP.elf";
+};
+
+qcom,ipe0 {
+	cell-index = <0>;
+	compatible = "qcom,cam_ipe";
+	regulator-names = "ipe0-vdd";
+	ipe0-vdd-supply = <&ipe_0_gdsc>;
+	clock-names = "ipe_0_ahb_clk",
+		"ipe_0_areg_clk",
+		"ipe_0_axi_clk",
+		"ipe_0_clk",
+		"ipe_0_clk_src";
+	clocks = <&clock_camcc CAM_CC_IPE_0_AHB_CLK>,
+			<&clock_camcc CAM_CC_IPE_0_AREG_CLK>,
+			<&clock_camcc CAM_CC_IPE_0_AXI_CLK>,
+			<&clock_camcc CAM_CC_IPE_0_CLK>,
+			<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
+
+	clock-rates = <80000000 400000000 0 0 600000000>;
+};
+
+qcom,ipe1 {
+	cell-index = <1>;
+	compatible = "qcom,cam_ipe";
+	regulator-names = "ipe1-vdd";
+	ipe1-vdd-supply = <&ipe_1_gdsc>;
+	clock-names = "ipe_1_ahb_clk",
+		"ipe_1_areg_clk",
+		"ipe_1_axi_clk",
+		"ipe_1_clk",
+		"ipe_1_clk_src";
+	clocks = <&clock_camcc CAM_CC_IPE_1_AHB_CLK>,
+			<&clock_camcc CAM_CC_IPE_1_AREG_CLK>,
+			<&clock_camcc CAM_CC_IPE_1_AXI_CLK>,
+			<&clock_camcc CAM_CC_IPE_1_CLK>,
+			<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
+
+		clock-rates = <80000000 400000000 0 0 600000000>;
+};
+
+bps: qcom,bps {
+	cell-index = <0>;
+	compatible = "qcom,cam_bps";
+	regulator-names = "bps-vdd";
+	bps-vdd-supply = <&bps_gdsc>;
+	clock-names = "bps_ahb_clk",
+		"bps_areg_clk",
+		"bps_axi_clk",
+		"bps_clk",
+		"bps_clk_src";
+	clocks = <&clock_camcc CAM_CC_BPS_AHB_CLK>,
+			<&clock_camcc CAM_CC_BPS_AREG_CLK>,
+			<&clock_camcc CAM_CC_BPS_AXI_CLK>,
+			<&clock_camcc CAM_CC_BPS_CLK>,
+			<&clock_camcc CAM_CC_BPS_CLK_SRC>;
+
+	clock-rates = <80000000 400000000 0 0 600000000>;
+};
+
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-ife-csid.txt b/Documentation/devicetree/bindings/media/video/msm-cam-ife-csid.txt
new file mode 100644
index 0000000..f9a5e0f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-ife-csid.txt
@@ -0,0 +1,111 @@
+* Qualcomm Technologies, Inc. MSM Camera IFE CSID
+
+Camera IFE CSID device provides the definitions for enabling
+the IFE CSID hardware. It also provides the functions for the client
+to control the IFE CSID hardware.
+
+=======================
+Required Node Structure
+=======================
+The IFE CSID device is described in one level of the device node.
+
+======================================
+First Level Node - CAM IFE CSID device
+======================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,csid170" or "qcom,csid-lite170".
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the hardware index id.
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Should be "csid".
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- interrupt-names
+  Usage: Required
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: Required
+  Value type: <u32>
+  Definition: Interrupt associated with IFE CSID HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for IFE CSID HW.
+
+- xxxx-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed in
+                "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for IFE CSID HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for IFE CSID HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+
+
+Example:
+
+	qcom,csid0@acb3000 {
+		cell-index = <0>;
+		compatible = "qcom,csid170";
+		reg = <0xacb3000 0x1000>;
+		reg-names = "csid";
+		interrupts = <0 464 0>;
+		interrupt-names = "csid";
+		vdd-names = "camss", "ife0";
+		camss-supply = <&titan_top_gdsc>;
+		ife0-supply = <&ife_0_gdsc>;
+		clock-names = "soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"ife_csid_clk",
+			"ife_csid_clk_src",
+			"ife_cphy_rx_clk",
+			"cphy_rx_clk_src";
+		clocks = <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CPHY_RX_CLK>,
+			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>;
+		clock-rates = <0 0 80000000 0 320000000 0 384000000 0 384000000>;
+		src-clock-name = "ife_csid_clk_src";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-isp.txt b/Documentation/devicetree/bindings/media/video/msm-cam-isp.txt
new file mode 100644
index 0000000..13aae64
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-isp.txt
@@ -0,0 +1,31 @@
+* Qualcomm Technologies, Inc. MSM Camera ISP
+
+The MSM camera ISP driver provides the definitions for enabling
+the Camera ISP hadware. It provides the functions for the Client to
+control the ISP hardware.
+
+=======================
+Required Node Structure
+=======================
+The camera ISP device is described in one level of device node.
+
+==================================
+First Level Node - CAM ISP device
+==================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-isp".
+
+- arch-compat
+  Usage: required
+  Value type: <string>
+  Definition: Should be "vfe" or "ife".
+
+Example:
+
+	qcom,cam-isp {
+		compatible = "qcom,cam-isp";
+		arch-compat = "ife";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt b/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt
new file mode 100644
index 0000000..1c18228
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt
@@ -0,0 +1,109 @@
+* Qualcomm Technologies, Inc. MSM Camera VFE
+
+Camera VFE device provides the definitions for enabling
+the VFE hardware. It also provides the functions for the client
+to control the VFE hardware.
+
+=======================
+Required Node Structure
+=======================
+The VFE device is described in one level of the device node.
+
+======================================
+First Level Node - CAM VFE device
+======================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should specify the compatibility string for matching the
+    driver. e.g. "qcom,vfe170", "qcom,vfe-lite170".
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the hardware index id.
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Should specify the name of the register block.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- interrupt-names
+  Usage: Required
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: Required
+  Value type: <u32>
+  Definition: Interrupt associated with VFE HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for VFE HW.
+
+- xxxx-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed in
+    "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for VFE HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for VFE HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+Example:
+	qcom,vfe0@acaf000 {
+		cell-index = <0>;
+		compatible = "qcom,vfe170";
+		reg-names = "ife";
+		reg = <0xacaf000 0x4000>;
+		interrupts = <0 465 0>;
+		interrupt-names = "ife";
+		vdd-names = "camss-vdd", "ife0-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		ife0-vdd-supply = <&ife_0_gdsc>;
+		clock-names = "soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"ife_csid_clk",
+			"ife_csid_clk_src",
+			"camnoc_axi_clk",
+			"ife_axi_clk",
+		clocks = <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_AXI_CLK>,
+		clock-rates = <0 0 80000000 0 320000000 0 384000000 0 0 0>;
+		src-clock-name = "ife_clk_src";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc-vmem.txt b/Documentation/devicetree/bindings/media/video/msm-vidc-vmem.txt
deleted file mode 100644
index 84a8765..0000000
--- a/Documentation/devicetree/bindings/media/video/msm-vidc-vmem.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-* Qualcomm Technologies Inc MSM VIDC VMEM
-
-Required properties:
-- compatible : "qcom,msm-vmem".
-- interrupts : Contains the interrupt that maps to the VMEM module.
-- reg : A set of 2 start address and size pairs that describe the hardware
-register address space and mappable memory address space.
-- reg-names : Strings that describe the pairs in "reg".  The register address
-space should be called "reg-base" and the memory space should be called "mem-base".
-- clocks : A set of clocks that correspond to the AHB and MAXI clocks that the
-hardware uses.
-- clock-names : A string that describes the "clocks" property.  The AHB clock
-should be named "ahb" and the MAXI clock should be named "maxi".
-- qcom,bank-size : The size of each memory bank, in bytes.
-- vdd-supply: phandle to a regulator that is considered to be the footswitch for vmem.
-- qcom,msm-bus,(name|num-cases,num-paths,vectors-KBps) - Bus to be voted for prior to
-  issuing any IO transactions to vmem.  Refer to Documentation/devicetree/bindings/arm/\
-  msm/msm_bus_adhoc.txt for further details.
-
-Example:
-
-qcom,vmem@880000 {
-	compatible = "qcom,msm-vmem";
-	interrupts = <0 429 0>;
-	reg = <0x880000 0x800>,
-	    <0x6800000 0x100000>;
-	reg-names = "reg-base", "mem-base";
-
-	vdd-supply = <&gdsc_mmagic_video>;
-	clocks = <&clock_mmss clk_vmem_ahb_clk>,
-	       <&clock_mmss clk_vmem_maxi_clk>;
-	clock-names = "ahb", "maxi";
-
-	qcom,bank-size = <131072>;
-
-	qcom,msm-bus,name = "vmem";
-	qcom,msm-bus,num-cases = <2>;
-	qcom,msm-bus,num-paths = <1>;
-	qcom,msm-bus,vectors-KBps =
-	        <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_VMEM_CFG   0   0>,
-	        <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_VMEM_CFG 500 800>;
-};
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index bdc0eba..53f419c 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -104,6 +104,9 @@
   memory, performance etc.
 - qcom,debug-timeout = A bool indicating that FW errors such as SYS_ERROR,
   SESSION_ERROR and timeouts will be treated as Fatal.
+- cache-slice-names = An array of supported cache slice names by llcc
+- cache-slices = An array of supported cache slice ids corresponding
+  to cache-slice-names by llcc
 
 [Second level nodes]
 Context Banks
@@ -149,7 +152,7 @@
 Optional properties:
 - qcom,bus-governor : governor to use when scaling bus, generally any commonly
   found devfreq governor might be used.  In addition to those governors, the
-  custom Venus governors, "msm-vidc-ddr" or "msm-vidc-vmem" are also
+  custom Venus governors, "msm-vidc-ddr" or "msm-vidc-llcc" are also
   acceptable values.
   In the absence of this property the "performance" governor is used.
 - qcom,bus-rage-kbps : an array of two items (<min max>) that indicate the
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index 8a37782..f978c58 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -52,6 +52,7 @@
 - no-sdio: controller is limited to send sdio cmd during initialization
 - no-sd: controller is limited to send sd cmd during initialization
 - no-mmc: controller is limited to send mmc cmd during initialization
+- extcon: phandle to external connector (Refer Documentation/devicetree/bindings/extcon/extcon-gpio.txt for more details).
 
 *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
 polarity properties, we have to fix the meaning of the "normal" and "inverted"
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 3a03add..e821feb 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -19,10 +19,13 @@
                    "bam-irq" - string to identify the IPA BAM interrupt.
                    "a2-bam-irq" - string to identify the A2 BAM interrupt.
 - qcom,ipa-hw-ver: Specifies the IPA hardware version.
+- qcom,ipa-ram-mmap: An array of unsigned integers representing addresses and
+                     sizes which are used by the driver to access IPA RAM.
 
 Optional:
 
-- qcom,wan-rx-ring-size: size of WAN rx ring, default is 32
+- qcom,wan-rx-ring-size: size of WAN rx ring, default is 192
+- qcom,lan-rx-ring-size: size of LAN rx ring, default is 192
 - qcom,arm-smmu: SMMU is present and ARM SMMU driver is used
 - qcom,msm-smmu: SMMU is present and QSMMU driver is used
 - qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass
@@ -59,12 +62,12 @@
 - qcom,bandwidth-vote-for-ipa:	Boolean context flag to indicate whether
 				ipa clock voting is done by bandwidth
 				voting via msm-bus-scale driver or not
+- qcom,use-64-bit-dma-mask:     Boolean context flag to indicate whether
+                                using 64bit dma mask or not
 - qcom,use-dma-zone:            Boolean context flag to indicate whether memory
                                 allocations controlled by IPA driver that do not
 				specify a struct device * should use GFP_DMA to
 				workaround IPA HW limitations
-- qcom,use-gsi:                 Boolean context flag to indicate if the
-                                transport protocol is GSI
 - qcom,use-rg10-limitation-mitigation:	Boolean context flag to activate
 					the mitigation to register group 10
 					AP access limitation
@@ -123,6 +126,9 @@
 
 - qcom,iova-mapping: specifies the start address and size of iova space.
 
+- qcom,additional-mapping: specifies any addtional mapping needed for this
+				context bank. The format is <iova pa size>
+
 IPA SMP2P sub nodes
 
 -compatible: "qcom,smp2pgpio-map-ipa-1-out" - represents the out gpio from
@@ -198,18 +204,24 @@
 
 	ipa_smmu_ap: ipa_smmu_ap {
 		compatible = "qcom,ipa-smmu-ap-cb";
-		iommus = <&anoc2_smmu 0x30>;
-		qcom,iova-mapping = <0x10000000 0x40000000>;
+		iommus = <&apps_smmu 0x720>;
+		qcom,iova-mapping = <0x20000000 0x40000000>;
+		qcom,additional-mapping =
+		/* modem tables in IMEM */
+		<0x146bd000 0x146bd000 0x2000>;
 	};
 
 	ipa_smmu_wlan: ipa_smmu_wlan {
 		compatible = "qcom,ipa-smmu-wlan-cb";
-		iommus = <&anoc2_smmu 0x31>;
+		iommus = <&apps_smmu 0x721>;
+		qcom,additional-mapping =
+		/* ipa-uc ram */
+		<0x1e60000 0x1e60000 0x80000>;
 	};
 
 	ipa_smmu_uc: ipa_smmu_uc {
 		compatible = "qcom,ipa-smmu-uc-cb";
-		iommus = <&anoc2_smmu 0x32>;
+		iommus = <&apps_smmu 0x722>;
 		qcom,iova-mapping = <0x40000000 0x20000000>;
 	};
 };
diff --git a/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt b/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
new file mode 100644
index 0000000..7da95f8
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
@@ -0,0 +1,37 @@
+Qualcomm Technologies, Inc. GENI Serial Engine Driver
+
+GENI Serial Engine Driver is used to configure and read the configuration
+from the Serial Engines on Qualcomm Technologies, Inc. Universal Peripheral
+(QUPv3) core. It is also used to enable the stage1 IOMMU translation and
+manage resources associated with the QUPv3 core.
+
+Required properties:
+- compatible:		Must be "qcom,qupv3-geni-se".
+- reg:			Must contain QUPv3 register address and length.
+- qcom,bus-mas-id:	Master Endpoint ID for bus driver.
+- qcom,bus-slv-id:	Slave Endpoint ID for bus driver.
+
+Optional properties:
+- qcom,iommu-s1-bypass:	Boolean flag to bypass IOMMU stage 1 translation.
+
+Optional subnodes:
+qcom,iommu_qupv3_geni_se_cb:	Child node representing the QUPV3 context
+				bank.
+
+Subnode Required properties:
+- compatible :		Must be "qcom,qupv3-geni-se-cb";
+- iommus:		A list of phandle and IOMMU specifier pairs that
+			describe the IOMMU master interfaces of the device.
+
+Example:
+	qupv3_0: qcom,qupv3_0_geni_se@8c0000 {
+		compatible = "qcom,qupv3-geni-se";
+		reg = <0x8c0000 0x6000>;
+		qcom,bus-mas-id = <100>;
+		qcom,bus-slv-id = <300>;
+
+		iommu_qupv3_0_geni_se_cb: qcom,iommu_qupv3_0_geni_se_cb {
+			compatible = "qcom,qupv3-geni-se-cb";
+			iommus = <&apps_smmu 0x1 0x0>;
+		};
+	}
diff --git a/Documentation/devicetree/bindings/qdsp/msm-ssc-sensors.txt b/Documentation/devicetree/bindings/qdsp/msm-ssc-sensors.txt
new file mode 100644
index 0000000..db7ab75
--- /dev/null
+++ b/Documentation/devicetree/bindings/qdsp/msm-ssc-sensors.txt
@@ -0,0 +1,21 @@
+Qualcomm Technologies, Inc. SSC Driver
+
+msm-ssc-sensors driver implements the mechanism that allows to load SLPI firmware images.
+
+Required properties:
+
+ - compatible:  This must be "qcom,msm-ssc-sensors"
+
+Optional properties:
+
+ - qcom,firmware-name: SLPI firmware name, must be "slpi" or "slpi_v1" or "slpi_v2"
+	Firmware name is not required, if sensors driver is sharing processor for execution.
+
+
+Example:
+ The following for sdm845.
+
+	qcom,msm-ssc-sensors {
+		compatible = "qcom,msm-ssc-sensors";
+		qcom,firmware-name = "slpi";
+	};
diff --git a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
index 8efa85d..0c6a9f2 100644
--- a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
@@ -213,6 +213,18 @@
 		    target quotient adjustment due to an ACD up recommendation.
 		    Valid values are 0 through 3.
 
+- qcom,cpr-acd-notwait-for-cl-settled
+	Usage:      optional; meaningful only if qcom,cpr-acd-avg-enable is specified.
+	Value type: <empty>
+	Definition: Boolean flag which indicates ACD down recommendations do not
+		    need to wait for CPR closed-loop to settle.
+
+- qcom,cpr-acd-avg-fast-update
+	Usage:      optional; meaningful only if qcom,cpr-acd-avg-enable is specified.
+	Value type: <empty>
+	Definition: Boolean flag which indicates CPR should issue immediate
+		    voltage updates following ACD requests.
+
 - qcom,cpr-acd-avg-enable
 	Usage:      optional
 	Value type: <empty>
diff --git a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
index 5d0499c..803df6f 100644
--- a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
@@ -59,6 +59,8 @@
  - qcom,poll-cfg-gdscr:	Poll the CFG register of the GDSC to determine if the
 			GDSC is enabled/disabled. This flag should not be set
 			in conjunction with "hw-ctrl-addr".
+ - qcom,toggle-sw-collapse-in-disable: If set, SW_COLLAPSE bit is toggled
+			in disable call.
 
 Example:
 	gdsc_oxili_gx: qcom,gdsc@fd8c4024 {
diff --git a/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt b/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
index 0173a3d..b616bf3 100644
--- a/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
+++ b/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
@@ -15,10 +15,9 @@
 - pinctrl-names/pinctrl-0/1: The GPIOs assigned to this core. The names
   Should be "active" and "sleep" for the pin confuguration when core is active
   or when entering sleep state.
+- qcom,wrapper-core: Wrapper QUPv3 core containing this UART controller.
 
 Optional properties:
-- qcom,bus-mas: contains the bus master id needed to put in bus bandwidth votes
-		for inter-connect buses.
 - qcom,wakeup-byte: Byte to be injected in the tty layer during wakeup isr.
 
 Example:
@@ -34,6 +33,6 @@
 	pinctrl-0 = <&qup_1_uart_3_active>;
 	pinctrl-1 = <&qup_1_uart_3_sleep>;
 	interrupts = <0 355 0>;
-	qcom,bus-mas = <MASTER_BLSP_2>;
+	qcom,wrapper-core = <&qupv3_0>;
 	qcom,wakeup-byte = <0xFF>;
 };
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
index 868a5f0..cd2d2ea 100644
--- a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
@@ -23,6 +23,7 @@
 - spi-max-frequency: Specifies maximum SPI clock frequency,
 		     Units - Hz. Definition as per
 		     Documentation/devicetree/bindings/spi/spi-bus.txt
+- qcom,wrapper-core: Wrapper QUPv3 core containing this SPI controller.
 
 SPI slave nodes must be children of the SPI master node and can contain
 properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
@@ -44,6 +45,7 @@
 		pinctrl-1 = <&qup_1_spi_2_sleep>;
 		interrupts = <GIC_SPI 354 0>;
 		spi-max-frequency = <19200000>;
+		qcom,wrapper-core = <&qupv3_0>;
 
 		dev@0 {
 			compatible = "dummy,slave";
diff --git a/Documentation/timers/timer_stats.txt b/Documentation/timers/timer_stats.txt
deleted file mode 100644
index de835ee..0000000
--- a/Documentation/timers/timer_stats.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-timer_stats - timer usage statistics
-------------------------------------
-
-timer_stats is a debugging facility to make the timer (ab)usage in a Linux
-system visible to kernel and userspace developers. If enabled in the config
-but not used it has almost zero runtime overhead, and a relatively small
-data structure overhead. Even if collection is enabled runtime all the
-locking is per-CPU and lookup is hashed.
-
-timer_stats should be used by kernel and userspace developers to verify that
-their code does not make unduly use of timers. This helps to avoid unnecessary
-wakeups, which should be avoided to optimize power consumption.
-
-It can be enabled by CONFIG_TIMER_STATS in the "Kernel hacking" configuration
-section.
-
-timer_stats collects information about the timer events which are fired in a
-Linux system over a sample period:
-
-- the pid of the task(process) which initialized the timer
-- the name of the process which initialized the timer
-- the function where the timer was initialized
-- the callback function which is associated to the timer
-- the number of events (callbacks)
-
-timer_stats adds an entry to /proc: /proc/timer_stats
-
-This entry is used to control the statistics functionality and to read out the
-sampled information.
-
-The timer_stats functionality is inactive on bootup.
-
-To activate a sample period issue:
-# echo 1 >/proc/timer_stats
-
-To stop a sample period issue:
-# echo 0 >/proc/timer_stats
-
-The statistics can be retrieved by:
-# cat /proc/timer_stats
-
-While sampling is enabled, each readout from /proc/timer_stats will see
-newly updated statistics. Once sampling is disabled, the sampled information
-is kept until a new sample period is started. This allows multiple readouts.
-
-Sample output of /proc/timer_stats:
-
-Timerstats sample period: 3.888770 s
-  12,     0 swapper          hrtimer_stop_sched_tick (hrtimer_sched_tick)
-  15,     1 swapper          hcd_submit_urb (rh_timer_func)
-   4,   959 kedac            schedule_timeout (process_timeout)
-   1,     0 swapper          page_writeback_init (wb_timer_fn)
-  28,     0 swapper          hrtimer_stop_sched_tick (hrtimer_sched_tick)
-  22,  2948 IRQ 4            tty_flip_buffer_push (delayed_work_timer_fn)
-   3,  3100 bash             schedule_timeout (process_timeout)
-   1,     1 swapper          queue_delayed_work_on (delayed_work_timer_fn)
-   1,     1 swapper          queue_delayed_work_on (delayed_work_timer_fn)
-   1,     1 swapper          neigh_table_init_no_netlink (neigh_periodic_timer)
-   1,  2292 ip               __netdev_watchdog_up (dev_watchdog)
-   1,    23 events/1         do_cache_clean (delayed_work_timer_fn)
-90 total events, 30.0 events/sec
-
-The first column is the number of events, the second column the pid, the third
-column is the name of the process. The forth column shows the function which
-initialized the timer and in parenthesis the callback function which was
-executed on expiry.
-
-    Thomas, Ingo
-
-Added flag to indicate 'deferrable timer' in /proc/timer_stats. A deferrable
-timer will appear as follows
-  10D,     1 swapper          queue_delayed_work_on (delayed_work_timer_fn)
-
diff --git a/Makefile b/Makefile
index f834951..f47cd95 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 27
+SUBLEVEL = 28
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts
index a21b0fd..417f657 100644
--- a/arch/arm/boot/dts/bcm958522er.dts
+++ b/arch/arm/boot/dts/bcm958522er.dts
@@ -55,6 +55,7 @@
 	gpio-restart {
 		compatible = "gpio-restart";
 		gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+		open-source;
 		priority = <200>;
 	};
 };
diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts
index be7f2f8..5279b76 100644
--- a/arch/arm/boot/dts/bcm958525er.dts
+++ b/arch/arm/boot/dts/bcm958525er.dts
@@ -55,6 +55,7 @@
 	gpio-restart {
 		compatible = "gpio-restart";
 		gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+		open-source;
 		priority = <200>;
 	};
 };
diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts
index 959cde9..872882b 100644
--- a/arch/arm/boot/dts/bcm958525xmc.dts
+++ b/arch/arm/boot/dts/bcm958525xmc.dts
@@ -55,6 +55,7 @@
 	gpio-restart {
 		compatible = "gpio-restart";
 		gpios = <&gpioa 31 GPIO_ACTIVE_LOW>;
+		open-source;
 		priority = <200>;
 	};
 };
diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts
index ad2aa87..a340e1d 100644
--- a/arch/arm/boot/dts/bcm958622hr.dts
+++ b/arch/arm/boot/dts/bcm958622hr.dts
@@ -55,6 +55,7 @@
 	gpio-restart {
 		compatible = "gpio-restart";
 		gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+		open-source;
 		priority = <200>;
 	};
 };
diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
index 4ceb8fe..226b652 100644
--- a/arch/arm/boot/dts/bcm958623hr.dts
+++ b/arch/arm/boot/dts/bcm958623hr.dts
@@ -55,6 +55,7 @@
 	gpio-restart {
 		compatible = "gpio-restart";
 		gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+		open-source;
 		priority = <200>;
 	};
 };
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index 4420025..a1658d0 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -55,6 +55,7 @@
 	gpio-restart {
 		compatible = "gpio-restart";
 		gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+		open-source;
 		priority = <200>;
 	};
 };
diff --git a/arch/arm/boot/dts/bcm988312hr.dts b/arch/arm/boot/dts/bcm988312hr.dts
index 104afe9..ed05e33 100644
--- a/arch/arm/boot/dts/bcm988312hr.dts
+++ b/arch/arm/boot/dts/bcm988312hr.dts
@@ -55,6 +55,7 @@
 	gpio-restart {
 		compatible = "gpio-restart";
 		gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+		open-source;
 		priority = <200>;
 	};
 };
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index 2e37557..76f4e89 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -65,13 +65,13 @@
 		cxo_board {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
-			clock-frequency = <19200000>;
+			clock-frequency = <25000000>;
 		};
 
 		pxo_board {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
-			clock-frequency = <27000000>;
+			clock-frequency = <25000000>;
 		};
 
 		sleep_clk: sleep_clk {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
new file mode 100644
index 0000000..cc126f6
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
@@ -0,0 +1,114 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+/* Stub regulators */
+/ {
+	pmxpoorwills_s1: regualtor-pmxpoorwills-s1 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmxpoorwills_s1";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <752000>;
+		regulator-max-microvolt = <752000>;
+	};
+
+	/* VDD CX supply */
+	pmxpoorwills_s5_level: regualtor-pmxpoorwills-s5-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmxpoorwills_s5_level";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pmxpoorwills_s5_level_ao: regualtor-pmxpoorwills-s5-level-ao {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmxpoorwills_s5_level_ao";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pmxpoorwills_l1: regualtor-pmxpoorwills-11 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmxpoorwills_l1";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1200000>;
+		regulator-max-microvolt = <1200000>;
+	};
+
+	pmxpoorwills_l3: regualtor-pmxpoorwills-l3 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmxpoorwills_l3";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <800000>;
+		regulator-max-microvolt = <800000>;
+	};
+
+	pmxpoorwills_l4: regualtor-pmxpoorwills-l4 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmxpoorwills_l4";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <872000>;
+		regulator-max-microvolt = <872000>;
+	};
+
+	pmxpoorwills_l5: regualtor-pmxpoorwills-l5 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmxpoorwills_l5";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pmxpoorwills_l6: regualtor-pmxpoorwills-l6 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmxpoorwills_l6";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pmxpoorwills_l8: regualtor-pmxpoorwills-l8 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmxpoorwills_l8";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <800000>;
+		regulator-max-microvolt = <800000>;
+	};
+
+	/* VDD MX supply */
+	pmxpoorwills_l9_level: regualtor-pmxpoorwills-l9-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmxpoorwills_l9_level";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pmxpoorwills_l9_level_ao: regualtor-pmxpoorwills-l9-level_ao {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmxpoorwills_l9_level_ao";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pmxpoorwills_l10: regualtor-pmxpoorwills-l10 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmxpoorwills_l10";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <3088000>;
+		regulator-max-microvolt = <3088000>;
+	};
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index ca6922d..ba1da74 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -180,3 +180,5 @@
 		status = "ok";
 	};
 };
+
+#include "sdxpoorwills-regulator.dtsi"
diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
index 73c05da..e00539a 100644
--- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
+++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
@@ -167,7 +167,7 @@
 					reg = <8>;
 					label = "cpu";
 					ethernet = <&gmac>;
-					phy-mode = "rgmii";
+					phy-mode = "rgmii-txid";
 					fixed-link {
 						speed = <1000>;
 						full-duplex;
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index bcef117..fc0d3b0 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -174,6 +174,8 @@
 # CONFIG_NET_VENDOR_INTEL is not set
 CONFIG_KS8851=y
 # CONFIG_NET_VENDOR_MICROCHIP is not set
+CONFIG_ECM_IPA=y
+CONFIG_RNDIS_IPA=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
@@ -255,6 +257,8 @@
 CONFIG_UIO=y
 CONFIG_STAGING=y
 CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_IPA_UT=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_USB_BAM=y
@@ -284,7 +288,6 @@
 CONFIG_PANIC_TIMEOUT=5
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_SCHEDSTATS=y
-CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
 CONFIG_BLK_DEV_IO_TRACE=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 5601276..9d12771 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -135,6 +135,10 @@
 CONFIG_BRIDGE=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_PRIO=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_DEBUGFS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_CFG80211_WEXT=y
 CONFIG_RFKILL=y
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
@@ -162,6 +166,8 @@
 # CONFIG_NET_VENDOR_INTEL is not set
 CONFIG_KS8851=y
 # CONFIG_NET_VENDOR_MICROCHIP is not set
+CONFIG_ECM_IPA=y
+CONFIG_RNDIS_IPA=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
@@ -200,6 +206,7 @@
 CONFIG_MSM_CDC_SUPPLY=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_STUB=y
 CONFIG_FB=y
 CONFIG_SOUND=y
 CONFIG_SND=y
@@ -244,6 +251,9 @@
 CONFIG_DMADEVICES=y
 CONFIG_UIO=y
 CONFIG_STAGING=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_IPA_UT=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_HWSPINLOCK_QCOM=y
@@ -278,7 +288,6 @@
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_PANIC_TIMEOUT=5
 CONFIG_SCHEDSTATS=y
-CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index fe36ce2..4c6f14c 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -17,6 +17,7 @@
 
 #include <linux/linkage.h>
 #include <linux/init.h>
+#include <asm/assembler.h>
 
 #include "omap44xx.h"
 
@@ -66,7 +67,7 @@
 	cmp	r0, r4
 	bne	wait_2
 	ldr	r12, =API_HYP_ENTRY
-	adr	r0, hyp_boot
+	badr	r0, hyp_boot
 	smc	#0
 hyp_boot:
 	b	omap_secondary_startup
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index ff2cc3e..cd13516 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -14,6 +14,15 @@
 	sdm845-4k-panel-cdp.dtb \
 	sdm845-4k-panel-qrd.dtb
 
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+	dtbo-$(CONFIG_ARCH_SDM845) += \
+		sdm845-cdp-overlay.dtbo \
+		sdm845-mtp-overlay.dtbo
+
+sdm845-cdp-overlay.dtbo-base := sdm845.dtb
+sdm845-mtp-overlay.dtbo-base := sdm845.dtb
+endif
+
 dtb-$(CONFIG_ARCH_SDM830) += sdm830-sim.dtb \
 	sdm830-rumi.dtb \
 	sdm830-mtp.dtb \
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
index e43da55..b314e99 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
@@ -40,7 +40,7 @@
 		qcom,mdss-dsi-lane-3-state;
 		qcom,mdss-dsi-dma-trigger = "trigger_sw";
 		qcom,mdss-dsi-mdp-trigger = "none";
-		qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>;
+		qcom,mdss-dsi-reset-sequence = <1 100>, <0 100>, <1 100>;
 		qcom,mdss-dsi-tx-eot-append;
 
 		qcom,adjust-timer-wakeup-ms = <1>;
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
index 241aa71..1f08294 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
@@ -58,6 +58,7 @@
 		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
 		qcom,mdss-dsi-te-dcs-command = <1>;
 		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-wd;
 		qcom,mdss-dsi-te-using-te-pin;
 		qcom,mdss-dsi-panel-hdr-enabled;
 		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
index 509547f..36f36fb 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
@@ -55,6 +55,7 @@
 		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
 		qcom,mdss-dsi-te-dcs-command = <1>;
 		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-wd;
 		qcom,mdss-dsi-te-using-te-pin;
 		qcom,mdss-dsi-on-command = [29 01 00 00 00 00 02 b0 03
 			05 01 00 00 0a 00 01 00
diff --git a/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
index 84a6a84..2194a42 100644
--- a/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
@@ -217,6 +217,7 @@
 		sw-reset = <&gpu_gx_sw_reset>;
 		qcom,reset-aon-logic;
 		qcom,poll-cfg-gdscr;
+		qcom,toggle-sw-collapse-in-disable;
 		status = "disabled";
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index b9a6c79..450295e 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -36,6 +36,7 @@
 			interrupt-names = "kpdpwr", "resin",
 					"resin-bark", "kpdpwr-resin-bark";
 			qcom,pon-dbc-delay = <15625>;
+			qcom,kpdpwr-sw-debounce;
 			qcom,system-reset;
 			qcom,store-hard-reset-reason;
 
@@ -139,7 +140,6 @@
 			interrupt-names = "eoc-int-en-set";
 			qcom,adc-bit-resolution = <15>;
 			qcom,adc-vdd-reference = <1875>;
-			#thermal-sensor-cells = <1>;
 
 			chan@6 {
 				label = "die_temp";
@@ -190,6 +190,7 @@
 			qcom,adc_tm-vadc = <&pm8998_vadc>;
 			qcom,decimation = <0>;
 			qcom,fast-avg-setup = <0>;
+			#thermal-sensor-cells = <1>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index b53f7ac..886e792 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -76,6 +76,7 @@
 			compatible = "qcom,qpnp-smb2";
 			#address-cells = <1>;
 			#size-cells = <1>;
+			#cooling-cells = <2>;
 
 			qcom,pmic-revid = <&pmi8998_revid>;
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index 6569219..122299c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -27,7 +27,7 @@
 };
 
 &mdss_mdp {
-	connectors = <&sde_wb &dsi_sharp_4k_dsc_video_display>;
+	connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
 };
 
 &dsi_sharp_4k_dsc_video {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index 2e893de..55e615c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -27,7 +27,7 @@
 };
 
 &mdss_mdp {
-	connectors = <&sde_wb &dsi_sharp_4k_dsc_video_display>;
+	connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
 };
 
 &dsi_sharp_4k_dsc_video {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 3b9c26f..4c642e3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -232,12 +232,10 @@
 
 		msm_cam_smmu_ife {
 			compatible = "qcom,msm-cam-smmu-cb";
-			iommus = <&apps_smmu 0x808>,
-				<&apps_smmu 0x810>,
-				<&apps_smmu 0x818>,
-				<&apps_smmu 0xc08>,
-				<&apps_smmu 0xc10>,
-				<&apps_smmu 0xc18>;
+			iommus = <&apps_smmu 0x808 0x0>,
+				<&apps_smmu 0x810 0x8>,
+				<&apps_smmu 0xc08 0x0>,
+				<&apps_smmu 0xc10 0x8>;
 			label = "ife";
 			ife_iova_mem_map: iova-mem-map {
 				/* IO region is approximately 3.4 GB */
@@ -259,13 +257,11 @@
 
 		msm_cam_smmu_icp {
 			compatible = "qcom,msm-cam-smmu-cb";
-			iommus = <&apps_smmu 0x1078>,
-				<&apps_smmu 0x1020>,
-				<&apps_smmu 0x1028>,
-				<&apps_smmu 0x1040>,
-				<&apps_smmu 0x1048>,
-				<&apps_smmu 0x1030>,
-				<&apps_smmu 0x1050>;
+			iommus = <&apps_smmu 0x1078 0x2>,
+				<&apps_smmu 0x1020 0x8>,
+				<&apps_smmu 0x1040 0x8>,
+				<&apps_smmu 0x1030 0x0>,
+				<&apps_smmu 0x1050 0x0>;
 			label = "icp";
 			icp_iova_mem_map: iova-mem-map {
 				iova-mem-region-firmware {
@@ -299,7 +295,7 @@
 
 		msm_cam_smmu_cpas_cdm {
 			compatible = "qcom,msm-cam-smmu-cb";
-			iommus = <&apps_smmu 0x1000>;
+			iommus = <&apps_smmu 0x1000 0x0>;
 			label = "cpas-cdm0";
 			cpas_cdm_iova_mem_map: iova-mem-map {
 				iova-mem-region-io {
@@ -315,7 +311,7 @@
 
 		msm_cam_smmu_secure {
 			compatible = "qcom,msm-cam-smmu-cb";
-			iommus = <&apps_smmu 0x1001>;
+			iommus = <&apps_smmu 0x1001 0x0>;
 			label = "cam-secure";
 			cam_secure_iova_mem_map: iova-mem-map {
 				/* Secure IO region is approximately 3.4 GB */
@@ -329,4 +325,521 @@
 			};
 		};
 	};
+
+	qcom,cam-cpas@ac40000 {
+		cell-index = <0>;
+		compatible = "qcom,cam-cpas";
+		label = "cpas";
+		arch-compat = "cpas_top";
+		status = "ok";
+		reg-names = "cam_cpas_top", "cam_camnoc";
+		reg = <0xac40000 0x1000>,
+			<0xac42000 0x5000>;
+		reg-cam-base = <0x40000 0x42000>;
+		interrupt-names = "cpas_camnoc";
+		interrupts = <0 459 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_ahb_clk",
+			"gcc_axi_clk",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		src-clock-name = "slow_ahb_clk_src";
+		clock-rates = <0 0 0 0 80000000 0>;
+		qcom,msm-bus,name = "cam_ahb";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>;
+		client-id-based;
+		client-names =
+			"csiphy0", "csiphy1", "csiphy2", "cci0",
+			"csid0", "csid1", "csid2",
+			"ife0", "ife1", "ife2", "ipe0",
+			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
+			"icp0", "jpeg-dma0", "jpeg0", "fd0";
+		client-axi-port-names =
+			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+			"cam_hf_1", "cam_hf_2", "cam_hf_2",
+			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+		client-bus-camnoc-based;
+		qcom,axi-port-list {
+			qcom,axi-port1 {
+				qcom,axi-port-name = "cam_hf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port2 {
+				qcom,axi-port-name = "cam_hf_2";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_2_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port3 {
+				qcom,axi-port-name = "cam_sf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_sf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_sf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+		};
+	};
+
+	qcom,cam-cdm-intf {
+		compatible = "qcom,cam-cdm-intf";
+		cell-index = <0>;
+		label = "cam-cdm-intf";
+		num-hw-cdm = <1>;
+		cdm-client-names = "ife",
+			"jpeg-dma",
+			"jpeg",
+			"fd";
+		status = "ok";
+	};
+
+	qcom,cpas-cdm0@ac48000 {
+		cell-index = <0>;
+		compatible = "qcom,cam170-cpas-cdm0";
+		label = "cpas-cdm";
+		reg = <0xac48000 0x1000>;
+		reg-names = "cpas-cdm";
+		reg-cam-base = <0x48000>;
+		interrupts = <0 461 0>;
+		interrupt-names = "cpas-cdm";
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_camera_ahb",
+			"gcc_camera_axi",
+			"cam_cc_soc_ahb_clk",
+			"cam_cc_cpas_ahb_clk",
+			"cam_cc_camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		clock-rates = <0 0 0 0 0>;
+		cdm-client-names = "vfe";
+		status = "ok";
+	};
+
+	qcom,cam-isp {
+		compatible = "qcom,cam-isp";
+		arch-compat = "ife";
+		status = "ok";
+	};
+
+	qcom,csid0@acb3000 {
+		cell-index = <0>;
+		compatible = "qcom,csid170";
+		reg-names = "csid";
+		reg = <0xacb3000 0x1000>;
+		reg-cam-base = <0xb3000>;
+		interrupt-names = "csid";
+		interrupts = <0 464 0>;
+		regulator-names = "camss", "ife0";
+		camss-supply = <&titan_top_gdsc>;
+		ife0-supply = <&ife_0_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_csid_clk",
+			"ife_csid_clk_src",
+			"ife_cphy_rx_clk",
+			"cphy_rx_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"camnoc_axi_clk",
+			"ife_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CPHY_RX_CLK>,
+			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
+		clock-rates = <0 0 0 0 0 0 500000000 0 0 0 60000000 0 0>;
+		src-clock-name = "ife_csid_clk_src";
+		status = "ok";
+	};
+
+	qcom,vfe0@acaf000 {
+		cell-index = <0>;
+		compatible = "qcom,vfe170";
+		reg-names = "ife";
+		reg = <0xacaf000 0x4000>;
+		reg-cam-base = <0xaf000>;
+		interrupt-names = "ife";
+		interrupts = <0 465 0>;
+		regulator-names = "camss", "ife0";
+		camss-supply = <&titan_top_gdsc>;
+		ife0-supply = <&ife_0_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"camnoc_axi_clk",
+			"ife_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
+		clock-rates = <0 0 0 0 0 0 600000000 0 0>;
+		src-clock-name = "ife_clk_src";
+		clock-names-option =  "ife_dsp_clk";
+		clocks-option = <&clock_camcc CAM_CC_IFE_0_DSP_CLK>;
+		clock-rates-option = <404000000>;
+		status = "ok";
+	};
+
+	qcom,csid1@acba000 {
+		cell-index = <1>;
+		compatible = "qcom,csid170";
+		reg-names = "csid";
+		reg = <0xacba000 0x1000>;
+		reg-cam-base = <0xba000>;
+		interrupt-names = "csid";
+		interrupts = <0 466 0>;
+		regulator-names = "camss", "ife1";
+		camss-supply = <&titan_top_gdsc>;
+		ife1-supply = <&ife_1_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_csid_clk",
+			"ife_csid_clk_src",
+			"ife_cphy_rx_clk",
+			"cphy_rx_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"camnoc_axi_clk",
+			"ife_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_1_CSID_CLK>,
+			<&clock_camcc CAM_CC_IFE_1_CSID_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_1_CPHY_RX_CLK>,
+			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_1_CLK>,
+			<&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
+		clock-rates = <0 0 0 0 0 0 500000000 0 0 0 60000000 0 0>;
+		src-clock-name = "ife_csid_clk_src";
+		status = "ok";
+	};
+
+	qcom,vfe1@acb6000 {
+		cell-index = <1>;
+		compatible = "qcom,vfe170";
+		reg-names = "ife";
+		reg = <0xacb6000 0x4000>;
+		reg-cam-base = <0xb6000>;
+		interrupt-names = "ife";
+		interrupts = <0 467 0>;
+		regulator-names = "camss", "ife1";
+		camss-supply = <&titan_top_gdsc>;
+		ife1-supply = <&ife_1_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"camnoc_axi_clk",
+			"ife_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_1_CLK>,
+			<&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
+		clock-rates = <0 0 0 0 0 0 600000000 0 0>;
+		src-clock-name = "ife_clk_src";
+		clock-names-option =  "ife_dsp_clk";
+		clocks-option = <&clock_camcc CAM_CC_IFE_1_DSP_CLK>;
+		clock-rates-option = <404000000>;
+		status = "ok";
+	};
+
+	qcom,csid-lite@acc8000 {
+		cell-index = <2>;
+		compatible = "qcom,csid-lite170";
+		reg-names = "csid-lite";
+		reg = <0xacc8000 0x1000>;
+		reg-cam-base = <0xc8000>;
+		interrupt-names = "csid-lite";
+		interrupts = <0 468 0>;
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_csid_clk",
+			"ife_csid_clk_src",
+			"ife_cphy_rx_clk",
+			"cphy_rx_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_LITE_CSID_CLK>,
+			<&clock_camcc CAM_CC_IFE_LITE_CSID_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_LITE_CPHY_RX_CLK>,
+			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_LITE_CLK>,
+			<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		clock-rates = <0 0 0 0 0 0 384000000 0 0 0 40400000 0>;
+		src-clock-name = "ife_csid_clk_src";
+		status = "ok";
+	};
+
+	qcom,vfe-lite@acc4000 {
+		cell-index = <2>;
+		compatible = "qcom,vfe-lite170";
+		reg-names = "ife-lite";
+		reg = <0xacc4000 0x4000>;
+		reg-cam-base = <0xc4000>;
+		interrupt-names = "ife-lite";
+		interrupts = <0 469 0>;
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_LITE_CLK>,
+			<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		qcom,clock-rates = <0 0 0 0 0 0 404000000 0>;
+		src-clock-name = "ife_clk_src";
+		status = "ok";
+	};
+
+	qcom,cam-icp {
+		compatible = "qcom,cam-icp";
+		compat-hw-name = "qcom,a5",
+			"qcom,ipe0",
+			"qcom,ipe1",
+			"qcom,bps";
+		num-a5 = <1>;
+		num-ipe = <2>;
+		num-bps = <1>;
+		status = "ok";
+	};
+
+	qcom,a5@ac00000 {
+		cell-index = <0>;
+		compatible = "qcom,cam_a5";
+		reg = <0xac00000 0x6000>,
+			<0xac10000 0x8000>,
+			<0xac18000 0x3000>;
+		reg-names = "a5_qgic", "a5_sierra", "a5_csr";
+		reg-cam-base = <0x00000 0x10000 0x18000>;
+		interrupts = <0 463 0>;
+		interrupt-names = "a5";
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_cam_ahb_clk",
+			"gcc_cam_axi_clk",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"icp_apb_clk",
+			"icp_atb_clk",
+			"icp_clk",
+			"icp_clk_src",
+			"icp_cti_clk",
+			"icp_ts_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+				<&clock_gcc GCC_CAMERA_AXI_CLK>,
+				<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+				<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+				<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+				<&clock_camcc CAM_CC_ICP_APB_CLK>,
+				<&clock_camcc CAM_CC_ICP_ATB_CLK>,
+				<&clock_camcc CAM_CC_ICP_CLK>,
+				<&clock_camcc CAM_CC_ICP_CLK_SRC>,
+				<&clock_camcc CAM_CC_ICP_CTI_CLK>,
+				<&clock_camcc CAM_CC_ICP_TS_CLK>;
+
+		clock-rates = <0 0 0 80000000 0 0 0 0 600000000 0 0>;
+		fw_name = "CAMERA_ICP.elf";
+		status = "ok";
+	};
+
+	qcom,ipe0 {
+		cell-index = <0>;
+		compatible = "qcom,cam_ipe";
+		regulator-names = "ipe0-vdd";
+		ipe0-vdd-supply = <&ipe_0_gdsc>;
+		clock-names = "ipe_0_ahb_clk",
+			"ipe_0_areg_clk",
+			"ipe_0_axi_clk",
+			"ipe_0_clk",
+			"ipe_0_clk_src";
+		clocks = <&clock_camcc CAM_CC_IPE_0_AHB_CLK>,
+				<&clock_camcc CAM_CC_IPE_0_AREG_CLK>,
+				<&clock_camcc CAM_CC_IPE_0_AXI_CLK>,
+				<&clock_camcc CAM_CC_IPE_0_CLK>,
+				<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
+
+		clock-rates = <80000000 400000000 0 0 600000000>;
+		status = "ok";
+	};
+
+	qcom,ipe1 {
+		cell-index = <1>;
+		compatible = "qcom,cam_ipe";
+		regulator-names = "ipe1-vdd";
+		ipe1-vdd-supply = <&ipe_1_gdsc>;
+		clock-names = "ipe_1_ahb_clk",
+			"ipe_1_areg_clk",
+			"ipe_1_axi_clk",
+			"ipe_1_clk",
+			"ipe_1_clk_src";
+		clocks = <&clock_camcc CAM_CC_IPE_1_AHB_CLK>,
+				<&clock_camcc CAM_CC_IPE_1_AREG_CLK>,
+				<&clock_camcc CAM_CC_IPE_1_AXI_CLK>,
+				<&clock_camcc CAM_CC_IPE_1_CLK>,
+				<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
+
+		clock-rates = <80000000 400000000 0 0 600000000>;
+		status = "ok";
+	};
+
+	qcom,bps {
+		cell-index = <0>;
+		compatible = "qcom,cam_bps";
+		regulator-names = "bps-vdd";
+		bps-vdd-supply = <&bps_gdsc>;
+		clock-names = "bps_ahb_clk",
+			"bps_areg_clk",
+			"bps_axi_clk",
+			"bps_clk",
+			"bps_clk_src";
+		clocks = <&clock_camcc CAM_CC_BPS_AHB_CLK>,
+				<&clock_camcc CAM_CC_BPS_AREG_CLK>,
+				<&clock_camcc CAM_CC_BPS_AXI_CLK>,
+				<&clock_camcc CAM_CC_BPS_CLK>,
+				<&clock_camcc CAM_CC_BPS_CLK_SRC>;
+
+		clock-rates = <80000000 400000000 0 0 600000000>;
+		status = "ok";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
new file mode 100644
index 0000000..fff9160
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
@@ -0,0 +1,24 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "sdm845.dtsi"
+#include "sdm845-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM845 v1 CDP";
+	compatible = "qcom,sdm845-cdp", "qcom,sdm845", "qcom,cdp";
+	qcom,msm-id = <321 0x0>;
+	qcom,board-id = <1 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index af28003..1fdf740 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -122,6 +122,17 @@
 	status = "ok";
 };
 
+&extcon_storage_cd {
+	gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
+	debounce-ms = <200>;
+	irq-flags = <IRQ_TYPE_EDGE_BOTH>;
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&storage_cd>;
+
+	status = "ok";
+};
+
 &ufsphy_card {
 	compatible = "qcom,ufs-phy-qmp-v3";
 
@@ -164,6 +175,8 @@
 				50000000 100000000 200000000>;
 	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
 
+	extcon = <&extcon_storage_cd>;
+
 	status = "ok";
 };
 
@@ -229,7 +242,41 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
-&dsi_dual_nt35597_truly_video_display {
+&dsi_dual_nt35597_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_nt35597_truly_cmd_display {
 	qcom,dsi-display-active;
 };
 
@@ -300,7 +347,6 @@
 		qcom,scale-function = <4>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
-		qcom,vadc-thermal-node;
 	};
 
 	chan@4d {
@@ -312,7 +358,6 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
-		qcom,vadc-thermal-node;
 	};
 
 	chan@4f {
@@ -324,7 +369,6 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
-		qcom,vadc-thermal-node;
 	};
 
 	chan@51 {
@@ -336,7 +380,6 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
-		qcom,vadc-thermal-node;
 	};
 };
 
@@ -350,30 +393,114 @@
 		qcom,hw-settle-time = <0>;
 		qcom,btm-channel-number = <0x60>;
 	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x68>;
+		qcom,thermal-node;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x70>;
+		qcom,thermal-node;
+	};
+
+	chan@4f {
+		label = "pa_therm1";
+		reg = <0x4f>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x78>;
+		qcom,thermal-node;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x80>;
+		qcom,thermal-node;
+	};
 };
 
 &thermal_zones {
 	xo-therm-adc {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
-		thermal-sensors = <&pm8998_vadc 0x4c>;
+		thermal-sensors = <&pm8998_adc_tm 0x4c>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
 	};
 
 	msm-therm-adc {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
-		thermal-sensors = <&pm8998_vadc 0x4d>;
+		thermal-sensors = <&pm8998_adc_tm 0x4d>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
 	};
 
 	pa-therm1-adc {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
-		thermal-sensors = <&pm8998_vadc 0x4f>;
+		thermal-sensors = <&pm8998_adc_tm 0x4f>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
 	};
 
 	quiet-therm-adc {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
-		thermal-sensors = <&pm8998_vadc 0x51>;
+		thermal-sensors = <&pm8998_adc_tm 0x51>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
 	};
 };
+
+&wil6210 {
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index f6493ac..c4169c7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -1731,7 +1731,7 @@
 		compatible = "qcom,coresight-remote-etm";
 
 		coresight-name = "coresight-turing-etm0";
-		qcom,inst-id = <1>;
+		qcom,inst-id = <13>;
 
 		port{
 			turing_etm0_out_funnel_turing: endpoint {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index 77edb85..1b3f2a6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -64,7 +64,6 @@
 		qcom,initial-pwrlevel = <2>;
 
 		qcom,gpu-quirk-hfi-use-reg;
-		qcom,gpu-quirk-two-pass-use-wfi;
 
 		qcom,idle-timeout = <100000000>; //msecs
 		qcom,no-nap;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
new file mode 100644
index 0000000..79fa580
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
@@ -0,0 +1,24 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "sdm845.dtsi"
+#include "sdm845-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM845 v1 MTP";
+	compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
+	qcom,msm-id = <321 0x0>;
+	qcom,board-id = <8 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index d316d63..508b645 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -94,7 +94,41 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
-&dsi_dual_nt35597_truly_video_display {
+&dsi_dual_nt35597_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_nt35597_truly_cmd_display {
 	qcom,dsi-display-active;
 };
 
@@ -283,7 +317,6 @@
 		qcom,scale-function = <4>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
-		qcom,vadc-thermal-node;
 	};
 
 	chan@4d {
@@ -295,7 +328,6 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
-		qcom,vadc-thermal-node;
 	};
 
 	chan@4f {
@@ -307,7 +339,6 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
-		qcom,vadc-thermal-node;
 	};
 
 	chan@51 {
@@ -319,7 +350,6 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
-		qcom,vadc-thermal-node;
 	};
 };
 
@@ -333,30 +363,114 @@
 		qcom,hw-settle-time = <0>;
 		qcom,btm-channel-number = <0x60>;
 	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x68>;
+		qcom,thermal-node;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x70>;
+		qcom,thermal-node;
+	};
+
+	chan@4f {
+		label = "pa_therm1";
+		reg = <0x4f>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x78>;
+		qcom,thermal-node;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x80>;
+		qcom,thermal-node;
+	};
 };
 
 &thermal_zones {
 	xo-therm-adc {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
-		thermal-sensors = <&pm8998_vadc 0x4c>;
+		thermal-sensors = <&pm8998_adc_tm 0x4c>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
 	};
 
 	msm-therm-adc {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
-		thermal-sensors = <&pm8998_vadc 0x4d>;
+		thermal-sensors = <&pm8998_adc_tm 0x4d>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
 	};
 
 	pa-therm1-adc {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
-		thermal-sensors = <&pm8998_vadc 0x4f>;
+		thermal-sensors = <&pm8998_adc_tm 0x4f>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
 	};
 
 	quiet-therm-adc {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
-		thermal-sensors = <&pm8998_vadc 0x51>;
+		thermal-sensors = <&pm8998_adc_tm 0x51>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
 	};
 };
+
+&wil6210 {
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 947262fb..59b3396 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -112,6 +112,19 @@
 			};
 		};
 
+		storage_cd: storage_cd {
+			mux {
+				pins = "gpio126";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio126";
+				bias-pull-up;           /* pull up */
+				drive-strength = <2>;   /* 2 MA */
+			};
+		};
+
 		sdc2_clk_on: sdc2_clk_on {
 			config {
 				pins = "sdc2_clk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 4a8d06d..b51996d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -118,3 +118,37 @@
 
 	status = "ok";
 };
+
+&labibb {
+	status = "ok";
+	qcom,qpnp-labibb-mode = "lcd";
+};
+
+&pmi8998_wled {
+	status = "okay";
+	qcom,led-strings-list = [01 02];
+};
+
+&mdss_mdp {
+	connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
+};
+
+&dsi_sharp_4k_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+	qcom,mdss-dsi-panel-orientation = "180";
+};
+
+&dsi_sharp_4k_dsc_video_display {
+	qcom,dsi-display-active;
+};
+
+&wil6210 {
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
index e5d1a74..0fb455f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
@@ -14,6 +14,18 @@
 
 &soc {
 	/* QUPv3 South instances */
+	qupv3_0: qcom,qupv3_0_geni_se@8c0000 {
+		compatible = "qcom,qupv3-geni-se";
+		reg = <0x8c0000 0x6000>;
+		qcom,bus-mas-id = <MSM_BUS_MASTER_BLSP_1>;
+		qcom,bus-slv-id = <MSM_BUS_SLAVE_EBI_CH0>;
+		qcom,iommu-s1-bypass;
+
+		iommu_qupv3_0_geni_se_cb: qcom,iommu_qupv3_0_geni_se_cb {
+			compatible = "qcom,qupv3-geni-se-cb";
+			iommus = <&apps_smmu 0x003 0x0>;
+		};
+	};
 
 	/*
 	 * HS UART instances. HS UART usecases can be supported on these
@@ -33,8 +45,8 @@
 		interrupts-extended = <&intc GIC_SPI 607 0>,
 				<&tlmm 48 0>;
 		status = "disabled";
-		qcom,bus-mas = <MSM_BUS_MASTER_BLSP_1>;
 		qcom,wakeup-byte = <0xFD>;
+		qcom,wrapper-core = <&qupv3_0>;
 	};
 
 	qupv3_se7_4uart: qcom,qup_uart@0x89c000 {
@@ -51,8 +63,8 @@
 		interrupts-extended = <&intc GIC_SPI 608 0>,
 				<&tlmm 96 0>;
 		status = "disabled";
-		qcom,bus-mas = <MSM_BUS_MASTER_BLSP_1>;
 		qcom,wakeup-byte = <0xFD>;
+		qcom,wrapper-core = <&qupv3_0>;
 	};
 
 	/* I2C */
@@ -69,6 +81,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se0_i2c_active>;
 		pinctrl-1 = <&qupv3_se0_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -85,6 +98,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se1_i2c_active>;
 		pinctrl-1 = <&qupv3_se1_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -101,6 +115,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se2_i2c_active>;
 		pinctrl-1 = <&qupv3_se2_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -117,6 +132,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se3_i2c_active>;
 		pinctrl-1 = <&qupv3_se3_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -133,6 +149,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se4_i2c_active>;
 		pinctrl-1 = <&qupv3_se4_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -149,6 +166,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se5_i2c_active>;
 		pinctrl-1 = <&qupv3_se5_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -165,6 +183,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se6_i2c_active>;
 		pinctrl-1 = <&qupv3_se6_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -181,6 +200,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se7_i2c_active>;
 		pinctrl-1 = <&qupv3_se7_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -200,6 +220,7 @@
 		pinctrl-1 = <&qupv3_se0_spi_sleep>;
 		interrupts = <GIC_SPI 601 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -218,6 +239,7 @@
 		pinctrl-1 = <&qupv3_se1_spi_sleep>;
 		interrupts = <GIC_SPI 602 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -236,6 +258,7 @@
 		pinctrl-1 = <&qupv3_se2_spi_sleep>;
 		interrupts = <GIC_SPI 603 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -254,6 +277,7 @@
 		pinctrl-1 = <&qupv3_se3_spi_sleep>;
 		interrupts = <GIC_SPI 604 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -272,6 +296,7 @@
 		pinctrl-1 = <&qupv3_se4_spi_sleep>;
 		interrupts = <GIC_SPI 605 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -290,6 +315,7 @@
 		pinctrl-1 = <&qupv3_se5_spi_sleep>;
 		interrupts = <GIC_SPI 606 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -308,6 +334,7 @@
 		pinctrl-1 = <&qupv3_se6_spi_sleep>;
 		interrupts = <GIC_SPI 607 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
@@ -326,10 +353,24 @@
 		pinctrl-1 = <&qupv3_se7_spi_sleep>;
 		interrupts = <GIC_SPI 608 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
 		status = "disabled";
 	};
 
 	/* QUPv3 North Instances */
+	qupv3_1: qcom,qupv3_1_geni_se@ac0000 {
+		compatible = "qcom,qupv3-geni-se";
+		reg = <0xac0000 0x6000>;
+		qcom,bus-mas-id = <MSM_BUS_MASTER_BLSP_2>;
+		qcom,bus-slv-id = <MSM_BUS_SLAVE_EBI_CH0>;
+		qcom,iommu-s1-bypass;
+
+		iommu_qupv3_1_geni_se_cb: qcom,iommu_qupv3_1_geni_se_cb {
+			compatible = "qcom,qupv3-geni-se-cb";
+			iommus = <&apps_smmu 0x6c3 0x0>;
+		};
+	};
+
 	/* 2-wire UART */
 
 	/* Debug UART Instance for CDP/MTP platform */
@@ -344,8 +385,8 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se9_2uart_active>;
 		pinctrl-1 = <&qupv3_se9_2uart_sleep>;
-		qcom,bus-mas = <MSM_BUS_MASTER_BLSP_2>;
 		interrupts = <GIC_SPI 354 0>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -362,7 +403,7 @@
 		pinctrl-0 = <&qupv3_se10_2uart_active>;
 		pinctrl-1 = <&qupv3_se10_2uart_sleep>;
 		interrupts = <GIC_SPI 355 0>;
-		qcom,bus-mas = <MSM_BUS_MASTER_BLSP_2>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -380,6 +421,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se8_i2c_active>;
 		pinctrl-1 = <&qupv3_se8_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -396,6 +438,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se9_i2c_active>;
 		pinctrl-1 = <&qupv3_se9_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -412,6 +455,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se10_i2c_active>;
 		pinctrl-1 = <&qupv3_se10_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -428,6 +472,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se11_i2c_active>;
 		pinctrl-1 = <&qupv3_se11_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -444,6 +489,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se12_i2c_active>;
 		pinctrl-1 = <&qupv3_se12_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -460,6 +506,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se13_i2c_active>;
 		pinctrl-1 = <&qupv3_se13_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -476,6 +523,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se14_i2c_active>;
 		pinctrl-1 = <&qupv3_se14_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -492,6 +540,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se15_i2c_active>;
 		pinctrl-1 = <&qupv3_se15_i2c_sleep>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -511,6 +560,7 @@
 		pinctrl-1 = <&qupv3_se8_spi_sleep>;
 		interrupts = <GIC_SPI 353 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -529,6 +579,7 @@
 		pinctrl-1 = <&qupv3_se9_spi_sleep>;
 		interrupts = <GIC_SPI 354 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -547,6 +598,7 @@
 		pinctrl-1 = <&qupv3_se10_spi_sleep>;
 		interrupts = <GIC_SPI 355 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -565,6 +617,7 @@
 		pinctrl-1 = <&qupv3_se11_spi_sleep>;
 		interrupts = <GIC_SPI 356 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -583,6 +636,7 @@
 		pinctrl-1 = <&qupv3_se12_spi_sleep>;
 		interrupts = <GIC_SPI 357 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -601,6 +655,7 @@
 		pinctrl-1 = <&qupv3_se13_spi_sleep>;
 		interrupts = <GIC_SPI 358 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -619,6 +674,7 @@
 		pinctrl-1 = <&qupv3_se14_spi_sleep>;
 		interrupts = <GIC_SPI 359 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 
@@ -637,6 +693,7 @@
 		pinctrl-1 = <&qupv3_se15_spi_sleep>;
 		interrupts = <GIC_SPI 360 0>;
 		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
 		status = "disabled";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index ac3352e..255c0b3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -291,6 +291,78 @@
 		ibb-supply = <&ibb_regulator>;
 	};
 
+	dsi_sim_vid_display: qcom,dsi-display@8 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sim_vid_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0>;
+		qcom,dsi-phy = <&mdss_dsi_phy0>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_sim_vid>;
+	};
+
+	dsi_dual_sim_vid_display: qcom,dsi-display@9 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_sim_vid_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_dual_sim_vid>;
+	};
+
+	dsi_sim_cmd_display: qcom,dsi-display@10 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sim_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0>;
+		qcom,dsi-phy = <&mdss_dsi_phy0>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_sim_cmd>;
+	};
+
+	dsi_dual_sim_cmd_display: qcom,dsi-display@11 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_sim_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_dual_sim_cmd>;
+	};
+
 	sde_wb: qcom,wb-display@0 {
 		compatible = "qcom,wb-display";
 		cell-index = <0>;
@@ -299,7 +371,7 @@
 };
 
 &mdss_mdp {
-	connectors = <&sde_wb &dsi_dual_nt35597_truly_video_display>;
+	connectors = <&sde_rscc &sde_wb &dsi_dual_nt35597_truly_cmd_display>;
 };
 
 &dsi_dual_nt35597_truly_video {
@@ -366,3 +438,27 @@
 				<1 0 2>;
 	qcom,default-topology-index = <0>;
 };
+
+&dsi_sim_vid {
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
+
+&dsi_dual_sim_vid {
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
+
+&dsi_sim_cmd {
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
+
+&dsi_dual_sim_cmd {
+	qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 469c0be..86489a0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -39,8 +39,8 @@
 		interrupts = <0 83 0>;
 		interrupt-controller;
 		#interrupt-cells = <1>;
-		iommus = <&apps_smmu 0x880 0x0>, <&apps_smmu 0x888 0x0>,
-			<&apps_smmu 0xc80 0x0>, <&apps_smmu 0xc88 0x0>;
+		iommus = <&apps_smmu 0x880 0x8>,
+			<&apps_smmu 0xc80 0x8>;
 
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -123,6 +123,7 @@
 		qcom,sde-has-cdp;
 		qcom,sde-has-src-split;
 		qcom,sde-has-dim-layer;
+		qcom,sde-has-idle-pc;
 		qcom,sde-max-bw-low-kbps = <9600000>;
 		qcom,sde-max-bw-high-kbps = <9600000>;
 		qcom,sde-dram-channels = <2>;
@@ -189,7 +190,6 @@
 	};
 
 	sde_rscc: qcom,sde_rscc@af20000 {
-		status = "disabled";
 		cell-index = <0>;
 		compatible = "qcom,sde-rsc";
 		reg = <0xaf20000 0x1c44>,
@@ -198,13 +198,16 @@
 		qcom,sde-rsc-version = <1>;
 
 		vdd-supply = <&mdss_core_gdsc>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_AHB_CLK>,
-			<&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>;
-		clock-names = "iface_clk", "vsync_clk";
+		clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_RSCC_AHB_CLK>;
+		clock-names = "vsync_clk", "iface_clk";
 		clock-rate = <0 0>;
 
 		qcom,sde-dram-channels = <2>;
 
+		mboxes = <&disp_rsc 0>;
+		mbox-names = "disp_rsc";
+
 		/* data and reg bus scale settings */
 		qcom,sde-data-bus {
 			qcom,msm-bus,name = "disp_rsc";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index aac63ee..6fb6fb8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -305,7 +305,7 @@
 
 	usb_audio_qmi_dev {
 		compatible = "qcom,usb-audio-qmi-dev";
-		iommus = <&apps_smmu 0x182c>;
+		iommus = <&apps_smmu 0x182c 0x0>;
 		qcom,usb-audio-stream-id = <0xc>;
 		qcom,usb-audio-intr-num = <2>;
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dts b/arch/arm64/boot/dts/qcom/sdm845.dts
new file mode 100644
index 0000000..a3fa3af
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845.dts
@@ -0,0 +1,21 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM845 v1 SoC";
+	compatible = "qcom,sdm845";
+	qcom,board-id = <0 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 987d38b..d9f79ae 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -24,6 +24,7 @@
 #include <dt-bindings/soc/qcom,tcs-mbox.h>
 #include <dt-bindings/spmi/spmi.h>
 #include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM845";
@@ -68,11 +69,14 @@
 			};
 			L1_I_0: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
+				qcom,dump-size = <0xa000>;
 			};
 			L1_D_0: l1-dcache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
+				qcom,dump-size = <0xa000>;
+			};
+			L1_TLB_0: l1-tlb {
+				qcom,dump-size = <0x3000>;
 			};
 		};
 
@@ -96,11 +100,14 @@
 			};
 			L1_I_100: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
+				qcom,dump-size = <0xa000>;
 			};
 			L1_D_100: l1-dcache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
+				qcom,dump-size = <0xa000>;
+			};
+			L1_TLB_100: l1-tlb {
+				qcom,dump-size = <0x3000>;
 			};
 		};
 
@@ -124,11 +131,14 @@
 			};
 			L1_I_200: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
+				qcom,dump-size = <0xa000>;
 			};
 			L1_D_200: l1-dcache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
+				qcom,dump-size = <0xa000>;
+			};
+			L1_TLB_200: l1-tlb {
+				qcom,dump-size = <0x3000>;
 			};
 		};
 
@@ -152,11 +162,14 @@
 			};
 			L1_I_300: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
+				qcom,dump-size = <0xa000>;
 			};
 			L1_D_300: l1-dcache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x9000>;
+				qcom,dump-size = <0xa000>;
+			};
+			L1_TLB_300: l1-tlb {
+				qcom,dump-size = <0x3000>;
 			};
 		};
 
@@ -180,11 +193,14 @@
 			};
 			L1_I_400: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x12000>;
+				qcom,dump-size = <0x14000>;
 			};
 			L1_D_400: l1-dcache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x12000>;
+				qcom,dump-size = <0x14000>;
+			};
+			L1_TLB_400: l1-tlb {
+				qcom,dump-size = <0x3c000>;
 			};
 		};
 
@@ -208,11 +224,14 @@
 			};
 			L1_I_500: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x12000>;
+				qcom,dump-size = <0x14000>;
 			};
 			L1_D_500: l1-dcache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x12000>;
+				qcom,dump-size = <0x14000>;
+			};
+			L1_TLB_500: l1-tlb {
+				qcom,dump-size = <0x3c000>;
 			};
 		};
 
@@ -236,11 +255,14 @@
 			};
 			L1_I_600: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x12000>;
+				qcom,dump-size = <0x14000>;
 			};
 			L1_D_600: l1-dcache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x12000>;
+				qcom,dump-size = <0x14000>;
+			};
+			L1_TLB_600: l1-tlb {
+				qcom,dump-size = <0x3c000>;
 			};
 		};
 
@@ -264,11 +286,14 @@
 			};
 			L1_I_700: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x12000>;
+				qcom,dump-size = <0x14000>;
 			};
 			L1_D_700: l1-dcache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x12000>;
+				qcom,dump-size = <0x14000>;
+			};
+			L1_TLB_700: l1-tlb {
+				qcom,dump-size = <0x3c000>;
 			};
 		};
 
@@ -427,6 +452,13 @@
 
 	soc: soc { };
 
+	vendor: vendor {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0 0xffffffff>;
+		compatible = "simple-bus";
+	};
+
 	reserved-memory {
 		#address-cells = <2>;
 		#size-cells = <2>;
@@ -741,7 +773,9 @@
 			< 1363200 >,
 			< 1440000 >,
 			< 1516800 >,
-			< 1593600 >;
+			< 1593600 >,
+			< 1651200 >,
+			< 1708800 >;
 
 		qcom,cpufreq-table-4 =
 			<  300000 >,
@@ -765,7 +799,9 @@
 			< 1728000 >,
 			< 1804800 >,
 			< 1881600 >,
-			< 1958400 >;
+			< 1958400 >,
+			< 2035200 >,
+			< 2092800 >;
 	};
 
 	cpubw: qcom,cpubw {
@@ -796,6 +832,37 @@
 		qcom,target-dev = <&cpubw>;
 	};
 
+	llccbw: qcom,llccbw {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports =
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_EBI_CH0>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<  762 /*  200 MHz */ >,
+			< 1144 /*  300 MHz */ >,
+			< 1720 /*  451 MHz */ >,
+			< 2086 /*  547 MHz */ >,
+			< 2597 /*  681 MHz */ >,
+			< 2929 /*  768 MHz */ >,
+			< 3879 /* 1017 MHz */ >,
+			< 4943 /* 1296 MHz */ >,
+			< 5931 /* 1555 MHz */ >,
+			< 6881 /* 1804 MHz */ >;
+	};
+
+	llcc_bwmon: qcom,llcc-bwmon {
+		compatible = "qcom,bimc-bwmon5";
+		reg = <0x0114A000 0x1000>;
+		reg-names = "base";
+		interrupts = <GIC_SPI 580 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,hw-timer-hz = <19200000>;
+		qcom,target-dev = <&llccbw>;
+		qcom,count-unit = <0x400000>;
+		qcom,byte-mid-mask = <0xe000>;
+		qcom,byte-mid-match = <0xe000>;
+	};
+
 	memlat_cpu0: qcom,memlat-cpu0 {
 		compatible = "qcom,devbw";
 		governor = "powersave";
@@ -876,6 +943,7 @@
 		clock-names = "devfreq_clk";
 		clocks = <&clock_cpucc L3_CLUSTER0_VOTE_CLK>;
 		governor = "performance";
+		qcom,prepare-clk;
 		freq-tbl-khz =
 			< 300000 >,
 			< 422400 >,
@@ -885,7 +953,9 @@
 			< 729600 >,
 			< 806400 >,
 			< 883200 >,
-			< 960000 >;
+			< 960000 >,
+			< 1036800 >,
+			< 1094400 >;
 	};
 
 	l3_cpu4: qcom,l3-cpu4 {
@@ -893,6 +963,7 @@
 		clock-names = "devfreq_clk";
 		clocks = <&clock_cpucc L3_CLUSTER1_VOTE_CLK>;
 		governor = "performance";
+		qcom,prepare-clk;
 		freq-tbl-khz =
 			< 300000 >,
 			< 422400 >,
@@ -902,7 +973,9 @@
 			< 729600 >,
 			< 806400 >,
 			< 883200 >,
-			< 960000 >;
+			< 960000 >,
+			< 1036800 >,
+			< 1094400 >;
 	};
 
 	devfreq_l3lat_0: qcom,cpu0-l3lat-mon {
@@ -1004,6 +1077,11 @@
 		#reset-cells = <1>;
 	};
 
+	cpucc_debug: syscon@17970018 {
+		compatible = "syscon";
+		reg = <0x17970018 0x4>;
+	};
+
 	clock_cpucc: qcom,cpucc@0x17d41000 {
 		compatible = "qcom,clk-cpu-osm";
 		reg = <0x17d41000 0x1400>,
@@ -1014,11 +1092,14 @@
 			<0x178b0000 0x1000>,
 			<0x17d42400 0x0c00>,
 			<0x17d44400 0x0c00>,
-			<0x17d46c00 0x0c00>;
+			<0x17d46c00 0x0c00>,
+			<0x00784130 0x4>,
+			<0x00784130 0x4>,
+			<0x00784130 0x4>;
 		reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
-			"l3_pll", "pwrcl_pll", "perfcl_pll",
-			"l3_sequencer", "pwrcl_sequencer",
-			"perfcl_sequencer";
+			"l3_pll", "pwrcl_pll", "perfcl_pll", "l3_sequencer",
+			"pwrcl_sequencer", "perfcl_sequencer", "l3_efuse",
+			"pwrcl_efuse", "perfcl_efuse";
 
 		vdd-l3-supply = <&apc0_l3_vreg>;
 		vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
@@ -1031,7 +1112,22 @@
 			<   576000000 0x5014031e 0x00002020 0x1 4 >,
 			<   652800000 0x401c0422 0x00002020 0x1 5 >,
 			<   729600000 0x401c0526 0x00002020 0x1 6 >,
-			<   806400000 0x401c062a 0x00002222 0x1 7 >;
+			<   806400000 0x401c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072e 0x00002525 0x2 8 >,
+			<   960000000 0x40240832 0x00002828 0x2 9 >;
+
+		qcom,l3-speedbin1-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   729600000 0x401c0526 0x00002020 0x1 6 >,
+			<   806400000 0x401c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072e 0x00002525 0x2 8 >,
+			<   960000000 0x40240832 0x00002828 0x2 9 >,
+			<  1036800000 0x40240936 0x00002b2b 0x3 10 >,
+			<  1094400000 0x402c0a39 0x00002e2e 0x3 11 >;
 
 		qcom,pwrcl-speedbin0-v0 =
 			<   300000000 0x000c000f 0x00002020 0x1 1 >,
@@ -1045,7 +1141,33 @@
 			<   979200000 0x40240833 0x00002929 0x1 9 >,
 			<  1056000000 0x402c0937 0x00002c2c 0x1 10 >,
 			<  1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
-			<  1209600000 0x402c0b3f 0x00003333 0x1 12 >;
+			<  1209600000 0x402c0b3f 0x00003232 0x1 12 >,
+			<  1286400000 0x40340c43 0x00003636 0x2 13 >,
+			<  1363200000 0x40340d47 0x00003939 0x2 14 >,
+			<  1440000000 0x40340e4b 0x00003c3c 0x2 15 >,
+			<  1516800000 0x403c0f4f 0x00003f3f 0x2 16 >,
+			<  1593600000 0x403c1053 0x00004242 0x2 17 >;
+
+		qcom,pwrcl-speedbin1-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   748800000 0x401c0527 0x00002020 0x1 6 >,
+			<   825600000 0x401c062b 0x00002222 0x1 7 >,
+			<   902400000 0x4024072f 0x00002626 0x1 8 >,
+			<   979200000 0x40240833 0x00002929 0x1 9 >,
+			<  1056000000 0x402c0937 0x00002c2c 0x1 10 >,
+			<  1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
+			<  1209600000 0x402c0b3f 0x00003232 0x1 12 >,
+			<  1286400000 0x40340c43 0x00003636 0x2 13 >,
+			<  1363200000 0x40340d47 0x00003939 0x2 14 >,
+			<  1440000000 0x40340e4b 0x00003c3c 0x2 15 >,
+			<  1516800000 0x403c0f4f 0x00003f3f 0x2 16 >,
+			<  1593600000 0x403c1053 0x00004242 0x2 17 >,
+			<  1651200000 0x403c1156 0x00004545 0x3 18 >,
+			<  1708800000 0x40441259 0x00004747 0x3 19 >;
 
 		qcom,perfcl-speedbin0-v0 =
 			<   300000000 0x000c000f 0x00002020 0x1 1 >,
@@ -1059,7 +1181,43 @@
 			<   960000000 0x40240832 0x00002828 0x1 9 >,
 			<  1036800000 0x40240936 0x00002b2b 0x1 10 >,
 			<  1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
-			<  1190400000 0x402c0b3e 0x00003232 0x1 12 >;
+			<  1190400000 0x402c0b3e 0x00003232 0x1 12 >,
+			<  1267200000 0x40340c42 0x00003535 0x2 13 >,
+			<  1344000000 0x40340d46 0x00003838 0x2 14 >,
+			<  1420800000 0x40340e4a 0x00003b3b 0x2 15 >,
+			<  1497600000 0x403c0f4e 0x00003e3e 0x2 16 >,
+			<  1574400000 0x403c1052 0x00004242 0x2 17 >,
+			<  1651200000 0x403c1156 0x00004545 0x2 18 >,
+			<  1728000000 0x4044125a 0x00004848 0x3 19 >,
+			<  1804800000 0x4044135e 0x00004b4b 0x3 20 >,
+			<  1881600000 0x404c1462 0x00004e4e 0x3 21 >,
+			<  1958400000 0x404c1566 0x00005252 0x3 22 >;
+
+		qcom,perfcl-speedbin1-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   729600000 0x401c0526 0x00002020 0x1 6 >,
+			<   806400000 0x401c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072e 0x00002525 0x1 8 >,
+			<   960000000 0x40240832 0x00002828 0x1 9 >,
+			<  1036800000 0x40240936 0x00002b2b 0x1 10 >,
+			<  1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
+			<  1190400000 0x402c0b3e 0x00003232 0x1 12 >,
+			<  1267200000 0x40340c42 0x00003535 0x2 13 >,
+			<  1344000000 0x40340d46 0x00003838 0x2 14 >,
+			<  1420800000 0x40340e4a 0x00003b3b 0x2 15 >,
+			<  1497600000 0x403c0f4e 0x00003e3e 0x2 16 >,
+			<  1574400000 0x403c1052 0x00004242 0x2 17 >,
+			<  1651200000 0x403c1156 0x00004545 0x2 18 >,
+			<  1728000000 0x4044125a 0x00004848 0x3 19 >,
+			<  1804800000 0x4044135e 0x00004b4b 0x3 20 >,
+			<  1881600000 0x404c1462 0x00004e4e 0x3 21 >,
+			<  1958400000 0x404c1566 0x00005252 0x3 22 >,
+			<  2035200000 0x404c166a 0x00005555 0x3 23 >,
+			<  2092800000 0x4054176d 0x00005757 0x3 24 >;
 
 		qcom,l3-min-cpr-vc-bin0 = <7>;
 		qcom,pwrcl-min-cpr-vc-bin0 = <6>;
@@ -1127,6 +1285,7 @@
 		qcom,camcc = <&clock_camcc>;
 		qcom,dispcc = <&clock_dispcc>;
 		qcom,gpucc = <&clock_gpucc>;
+		qcom,cpucc = <&cpucc_debug>;
 		clock-names = "xo_clk_src";
 		clocks = <&clock_rpmh RPMH_CXO_CLK>;
 		#clock-cells = <1>;
@@ -1174,7 +1333,7 @@
 			"ref_aux_clk";
 		clocks = <&clock_rpmh RPMH_CXO_CLK>,
 			<&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
-			<&clock_gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+			<&clock_gcc GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK>;
 
 		status = "disabled";
 	};
@@ -1200,13 +1359,12 @@
 			"tx_lane0_sync_clk",
 			"rx_lane0_sync_clk",
 			"rx_lane1_sync_clk";
-		/* TODO: add HW CTL clocks when available */
 		clocks =
-			<&clock_gcc GCC_UFS_PHY_AXI_CLK>,
-			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+			<&clock_gcc GCC_UFS_PHY_AXI_HW_CTL_CLK>,
+			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK>,
 			<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
-			<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
-			<&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>,
+			<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK>,
+			<&clock_gcc GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK>,
 			<&clock_rpmh RPMH_CXO_CLK>,
 			<&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
 			<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
@@ -1283,6 +1441,12 @@
 		status = "disabled";
 	};
 
+	extcon_storage_cd: extcon_storage_cd {
+		compatible = "extcon-gpio";
+		extcon-id = <62>; /* EXTCON_MECHANICAL */
+		status = "disabled";
+	};
+
 	ufsphy_card: ufsphy_card@1da7000 {
 		reg = <0x1da7000 0xda8>; /* PHY regs */
 		reg-names = "phy_mem";
@@ -1295,7 +1459,7 @@
 			"ref_aux_clk";
 		clocks = <&clock_rpmh RPMH_CXO_CLK>,
 			<&clock_gcc GCC_UFS_CARD_CLKREF_CLK>,
-			<&clock_gcc GCC_UFS_CARD_PHY_AUX_CLK>;
+			<&clock_gcc GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK>;
 
 		status = "disabled";
 	};
@@ -1319,13 +1483,12 @@
 			"ref_clk",
 			"tx_lane0_sync_clk",
 			"rx_lane0_sync_clk";
-		/* TODO: add HW CTL clocks when available */
 		clocks =
-			<&clock_gcc GCC_UFS_CARD_AXI_CLK>,
-			<&clock_gcc GCC_AGGRE_UFS_CARD_AXI_CLK>,
+			<&clock_gcc GCC_UFS_CARD_AXI_HW_CTL_CLK>,
+			<&clock_gcc GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK>,
 			<&clock_gcc GCC_UFS_CARD_AHB_CLK>,
-			<&clock_gcc GCC_UFS_CARD_UNIPRO_CORE_CLK>,
-			<&clock_gcc GCC_UFS_CARD_ICE_CORE_CLK>,
+			<&clock_gcc GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK>,
+			<&clock_gcc GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK>,
 			<&clock_rpmh RPMH_CXO_CLK>,
 			<&clock_gcc GCC_UFS_CARD_TX_SYMBOL_0_CLK>,
 			<&clock_gcc GCC_UFS_CARD_RX_SYMBOL_0_CLK>;
@@ -1691,66 +1854,54 @@
 		qcom,msm_fastrpc_compute_cb1 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1401 0x0>,
-				 <&apps_smmu 0x1421 0x0>;
+			iommus = <&apps_smmu 0x1401 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb2 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1402 0x0>,
-				 <&apps_smmu 0x1422 0x0>;
+			iommus = <&apps_smmu 0x1402 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb3 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1403 0x0>,
-				 <&apps_smmu 0x1423 0x0>;
+			iommus = <&apps_smmu 0x1403 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb4 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1404 0x0>,
-				 <&apps_smmu 0x1424 0x0>;
+			iommus = <&apps_smmu 0x1404 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb5 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1405 0x0>,
-				 <&apps_smmu 0x1425 0x0>;
+			iommus = <&apps_smmu 0x1405 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb6 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1406 0x0>,
-				 <&apps_smmu 0x1426 0x0>;
+			iommus = <&apps_smmu 0x1406 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb7 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1407 0x0>,
-				 <&apps_smmu 0x1427 0x0>;
+			iommus = <&apps_smmu 0x1407 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb8 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1408 0x0>,
-				 <&apps_smmu 0x1428 0x0>;
+			iommus = <&apps_smmu 0x1408 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb9 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
 			qcom,secure-context-bank;
-			iommus = <&apps_smmu 0x1409 0x0>,
-				 <&apps_smmu 0x1419 0x0>,
-				 <&apps_smmu 0x1429 0x0>;
+			iommus = <&apps_smmu 0x1409 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb10 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
 			qcom,secure-context-bank;
-			iommus = <&apps_smmu 0x140A 0x0>,
-				 <&apps_smmu 0x141A 0x0>,
-				 <&apps_smmu 0x142A 0x0>;
+			iommus = <&apps_smmu 0x140A 0x30>;
 		};
 		qcom,msm_fastrpc_compute_cb11 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
@@ -1828,6 +1979,12 @@
 		status = "ok";
 	};
 
+	ssc_sensors: qcom,msm-ssc-sensors {
+		compatible = "qcom,msm-ssc-sensors";
+		status = "ok";
+		qcom,firmware-name = "slpi";
+	};
+
 	cpuss_dump {
 		compatible = "qcom,cpuss-dump";
 		qcom,l1_i_cache0 {
@@ -1910,6 +2067,38 @@
 			qcom,dump-node = <&LLCC_4>;
 			qcom,dump-id = <0x124>;
 		};
+		qcom,l1_tlb_dump0 {
+			qcom,dump-node = <&L1_TLB_0>;
+			qcom,dump-id = <0x20>;
+		};
+		qcom,l1_tlb_dump100 {
+			qcom,dump-node = <&L1_TLB_100>;
+			qcom,dump-id = <0x21>;
+		};
+		qcom,l1_tlb_dump200 {
+			qcom,dump-node = <&L1_TLB_200>;
+			qcom,dump-id = <0x22>;
+		};
+		qcom,l1_tlb_dump300 {
+			qcom,dump-node = <&L1_TLB_300>;
+			qcom,dump-id = <0x23>;
+		};
+		qcom,l1_tlb_dump400 {
+			qcom,dump-node = <&L1_TLB_400>;
+			qcom,dump-id = <0x24>;
+		};
+		qcom,l1_tlb_dump500 {
+			qcom,dump-node = <&L1_TLB_500>;
+			qcom,dump-id = <0x25>;
+		};
+		qcom,l1_tlb_dump600 {
+			qcom,dump-node = <&L1_TLB_600>;
+			qcom,dump-id = <0x26>;
+		};
+		qcom,l1_tlb_dump700 {
+			qcom,dump-node = <&L1_TLB_700>;
+			qcom,dump-id = <0x27>;
+		};
 	};
 
 	kryo3xx-erp {
@@ -2059,6 +2248,18 @@
 		qcom,irq-mask = <0x100>;
 		interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
 		label = "lpass";
+		qcom,qos-config = <&glink_qos_adsp>;
+		qcom,ramp-time = <0xaf>;
+	};
+
+	glink_qos_adsp: qcom,glink-qos-config-adsp {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x3c 0x0>,
+				<0x3c 0x0>,
+				<0x3c 0x0>,
+				<0x3c 0x0>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
 	};
 
 	qcom,glink-smem-native-xprt-dsps@86000000 {
@@ -2071,6 +2272,35 @@
 		label = "dsps";
 	};
 
+	glink_spi_xprt_wdsp: qcom,glink-spi-xprt-wdsp {
+		compatible = "qcom,glink-spi-xprt";
+		label = "wdsp";
+		qcom,remote-fifo-config = <&glink_fifo_wdsp>;
+		qcom,qos-config = <&glink_qos_wdsp>;
+		qcom,ramp-time = <0x10>,
+				     <0x20>,
+				     <0x30>,
+				     <0x40>;
+	};
+
+	glink_fifo_wdsp: qcom,glink-fifo-config-wdsp {
+		compatible = "qcom,glink-fifo-config";
+		qcom,out-read-idx-reg = <0x12000>;
+		qcom,out-write-idx-reg = <0x12004>;
+		qcom,in-read-idx-reg = <0x1200C>;
+		qcom,in-write-idx-reg = <0x12010>;
+	};
+
+	glink_qos_wdsp: qcom,glink-qos-config-wdsp {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x80 0x0>,
+				 <0x70 0x1>,
+				 <0x60 0x2>,
+				 <0x50 0x3>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
+
 	qcom,glink-smem-native-xprt-cdsp@86000000 {
 		compatible = "qcom,glink-smem-native-xprt";
 		reg = <0x86000000 0x200000>,
@@ -2331,6 +2561,7 @@
 			 <&clock_gcc GCC_CE1_AHB_CLK>,
 			 <&clock_gcc GCC_CE1_AXI_CLK>;
 		qcom,ce-opp-freq = <171430000>;
+		qcom,request-bw-before-clk;
 	};
 
 	qcom_crypto: qcrypto@1de0000 {
@@ -2358,6 +2589,7 @@
 			 <&clock_gcc GCC_CE1_AHB_CLK>,
 			 <&clock_gcc GCC_CE1_AXI_CLK>;
 		qcom,ce-opp-freq = <171430000>;
+		qcom,request-bw-before-clk;
 		qcom,use-sw-aes-cbc-ecb-ctr-algo;
 		qcom,use-sw-aes-xts-algo;
 		qcom,use-sw-aes-ccm-algo;
@@ -2597,8 +2829,7 @@
 		      <0xa0000000 0x10000000>,
 		      <0xb0000000 0x10000>;
 		reg-names = "membase", "smmu_iova_base", "smmu_iova_ipa";
-		iommus = <&apps_smmu 0x0040 0x0>,
-			 <&apps_smmu 0x0041 0x0>;
+		iommus = <&apps_smmu 0x0040 0x1>;
 		interrupts = <0 414 0 /* CE0 */ >,
 			     <0 415 0 /* CE1 */ >,
 			     <0 416 0 /* CE2 */ >,
@@ -2612,10 +2843,17 @@
 			     <0 424 0 /* CE10 */ >,
 			     <0 425 0 /* CE11 */ >;
 		qcom,wlan-msa-memory = <0x100000>;
+
+		vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+		vdd-1.8-xo-supply = <&pm8998_l7>;
+		vdd-1.3-rfa-supply = <&pm8998_l17>;
+		vdd-3.3-ch0-supply = <&pm8998_l25>;
+		qcom,vdd-0.8-cx-mx-config = <800000 800000>;
+		qcom,vdd-3.3-ch0-config = <3104000 3312000>;
 	};
 
 	thermal_zones: thermal-zones {
-		aoss0-ts0-h {
+		aoss0-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "user_space";
@@ -2629,7 +2867,7 @@
 			};
 		};
 
-		cpu0-silver-ts0-h {
+		cpu0-silver-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "user_space";
@@ -2643,7 +2881,7 @@
 			};
 		};
 
-		cpu1-silver-ts0-h {
+		cpu1-silver-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "user_space";
@@ -2657,7 +2895,7 @@
 			};
 		};
 
-		cpu2-silver-ts0-h {
+		cpu2-silver-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "user_space";
@@ -2671,7 +2909,7 @@
 			};
 		};
 
-		cpu3-silver-ts0-h {
+		cpu3-silver-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 4>;
@@ -2685,7 +2923,7 @@
 			};
 		};
 
-		kryo-l3-0-ts0-h {
+		kryo-l3-0-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 5>;
@@ -2699,7 +2937,7 @@
 			};
 		};
 
-		kryo-l3-1-ts0-h {
+		kryo-l3-1-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 6>;
@@ -2713,7 +2951,7 @@
 			};
 		};
 
-		cpu0-gold-ts0-h {
+		cpu0-gold-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 7>;
@@ -2727,7 +2965,7 @@
 			};
 		};
 
-		cpu1-gold-ts0-h {
+		cpu1-gold-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 8>;
@@ -2741,7 +2979,7 @@
 			};
 		};
 
-		cpu2-gold-ts0-h {
+		cpu2-gold-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 9>;
@@ -2755,7 +2993,7 @@
 			};
 		};
 
-		cpu3-gold-ts0-h {
+		cpu3-gold-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 10>;
@@ -2769,7 +3007,7 @@
 			};
 		};
 
-		gpu0-ts0-h {
+		gpu0-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 11>;
@@ -2783,7 +3021,7 @@
 			};
 		};
 
-		gpu1-ts0-h {
+		gpu1-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "user_space";
@@ -2797,7 +3035,7 @@
 			};
 		};
 
-		aoss1-ts1-h {
+		aoss1-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 0>;
@@ -2811,7 +3049,7 @@
 			};
 		};
 
-		mdm-dsp-ts1-h {
+		mdm-dsp-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 1>;
@@ -2827,7 +3065,7 @@
 
 
 
-		ddr-ts1-h {
+		ddr-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 2>;
@@ -2841,7 +3079,7 @@
 			};
 		};
 
-		wlan-ts1-h {
+		wlan-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 3>;
@@ -2855,7 +3093,7 @@
 			};
 		};
 
-		compute-hvx-ts1-h {
+		compute-hvx-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 4>;
@@ -2869,7 +3107,7 @@
 			};
 		};
 
-		camera-ts1-h {
+		camera-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 5>;
@@ -2883,7 +3121,7 @@
 			};
 		};
 
-		mmss-ts1-h {
+		mmss-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 6>;
@@ -2897,7 +3135,7 @@
 			};
 		};
 
-		mdm-core-ts1-h {
+		mdm-core-usr {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 7>;
@@ -2911,7 +3149,7 @@
 			};
 		};
 
-		gpu0 {
+		gpu0-step {
 			polling-delay-passive = <10>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 11>;
@@ -2932,7 +3170,7 @@
 			};
 		};
 
-		gpu1 {
+		gpu1-step {
 			polling-delay-passive = <10>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 12>;
@@ -2953,7 +3191,7 @@
 			};
 		};
 
-		pop-mem {
+		pop-mem-step {
 			polling-delay-passive = <10>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 2>;
@@ -2974,7 +3212,7 @@
 			};
 		};
 
-		aoss0-ts0-l {
+		aoss0-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3003,7 +3241,7 @@
 			};
 		};
 
-		cpu0-silver-ts0-l {
+		cpu0-silver-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3032,7 +3270,7 @@
 			};
 		};
 
-		cpu1-silver-ts0-l {
+		cpu1-silver-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3061,7 +3299,7 @@
 			};
 		};
 
-		cpu2-silver-ts0-l {
+		cpu2-silver-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3090,7 +3328,7 @@
 			};
 		};
 
-		cpu3-silver-ts0-l {
+		cpu3-silver-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3119,7 +3357,7 @@
 			};
 		};
 
-		kryo-l3-0-ts0-l {
+		kryo-l3-0-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3148,7 +3386,7 @@
 			};
 		};
 
-		kryo-l3-1-ts0-l {
+		kryo-l3-1-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3177,7 +3415,7 @@
 			};
 		};
 
-		cpu0-gold-ts0-l {
+		cpu0-gold-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3206,7 +3444,7 @@
 			};
 		};
 
-		cpu1-gold-ts0-l {
+		cpu1-gold-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3235,7 +3473,7 @@
 			};
 		};
 
-		cpu2-gold-ts0-l {
+		cpu2-gold-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3264,11 +3502,11 @@
 			};
 		};
 
-		cpu3-gold-ts0-l {
+		cpu3-gold-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 9>;
+			thermal-sensors = <&tsens0 10>;
 			tracks-low;
 			trips {
 				cpug3_trip: cpug3-trip {
@@ -3293,11 +3531,11 @@
 			};
 		};
 
-		gpu0-ts0-l {
+		gpu0-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 10>;
+			thermal-sensors = <&tsens0 11>;
 			tracks-low;
 			trips {
 				gpu0_trip_l: gpu0-trip {
@@ -3322,11 +3560,11 @@
 			};
 		};
 
-		gpu1-ts0-l {
+		gpu1-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
-			thermal-sensors = <&tsens0 11>;
+			thermal-sensors = <&tsens0 12>;
 			tracks-low;
 			trips {
 				gpu1_trip_l: gpu1-trip_l {
@@ -3351,7 +3589,7 @@
 			};
 		};
 
-		aoss1-ts1-l {
+		aoss1-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3380,7 +3618,7 @@
 			};
 		};
 
-		mdm-dsp-ts1-l {
+		mdm-dsp-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3409,7 +3647,7 @@
 			};
 		};
 
-		ddr-ts1-l {
+		ddr-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3438,7 +3676,7 @@
 			};
 		};
 
-		wlan-ts1-l {
+		wlan-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3467,7 +3705,7 @@
 			};
 		};
 
-		compute-hvx-ts1-l {
+		compute-hvx-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3496,7 +3734,7 @@
 			};
 		};
 
-		camera-ts1-l {
+		camera-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3525,7 +3763,7 @@
 			};
 		};
 
-		mmss-ts1-l {
+		mmss-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3554,7 +3792,7 @@
 			};
 		};
 
-		mdm-core-ts1-l {
+		mdm-core-lowf {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-governor = "low_limits_floor";
@@ -3636,22 +3874,75 @@
 		interrupt-names = "tsens-upper-lower", "tsens-critical";
 		#thermal-sensor-cells = <1>;
 	};
+
+	gpi_dma0: qcom,gpi-dma@0x800000 {
+		#dma-cells = <6>;
+		compatible = "qcom,gpi-dma";
+		reg = <0x800000 0x60000>;
+		reg-names = "gpi-top";
+		interrupts = <0 244 0>, <0 245 0>, <0 246 0>, <0 247 0>,
+			     <0 248 0>, <0 249 0>, <0 250 0>, <0 251 0>,
+			     <0 252 0>, <0 253 0>, <0 254 0>, <0 255 0>,
+			     <0 256 0>;
+		qcom,max-num-gpii = <13>;
+		qcom,gpii-mask = <0xfa>;
+		qcom,ev-factor = <2>;
+		iommus = <&apps_smmu 0x0016 0x0>;
+		status = "ok";
+	};
+
+	gpi_dma1: qcom,gpi-dma@0xa00000 {
+		#dma-cells = <6>;
+		compatible = "qcom,gpi-dma";
+		reg = <0xa00000 0x60000>;
+		reg-names = "gpi-top";
+		interrupts = <0 279 0>, <0 280 0>, <0 281 0>, <0 282 0>,
+			     <0 283 0>, <0 284 0>, <0 293 0>, <0 294 0>,
+			     <0 295 0>, <0 296 0>, <0 297 0>, <0 298 0>,
+			     <0 299 0>;
+		qcom,max-num-gpii = <13>;
+		qcom,gpii-mask = <0xfa>;
+		qcom,ev-factor = <2>;
+		iommus = <&apps_smmu 0x06d6 0x0>;
+		status = "ok";
+	};
 };
 
 &clock_cpucc {
 	lmh_dcvs0: qcom,limits-dcvs@0 {
 		compatible = "qcom,msm-hw-limits";
-		interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,affinity = <0>;
 		#thermal-sensor-cells = <0>;
 	};
 
 	lmh_dcvs1: qcom,limits-dcvs@1 {
 		compatible = "qcom,msm-hw-limits";
-		interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,affinity = <1>;
 		#thermal-sensor-cells = <0>;
 	};
+
+	wil6210: qcom,wil6210 {
+		compatible = "qcom,wil6210";
+		qcom,pcie-parent = <&pcie0>;
+		qcom,wigig-en = <&tlmm 39 0>;
+		qcom,msm-bus,name = "wil6210";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<45 512 0 0>,
+			<45 512 600000 800000>; /* ~4.6Gbps (MCS12) */
+		qcom,use-ext-supply;
+		vdd-supply= <&pm8998_s7>;
+		vddio-supply= <&pm8998_s5>;
+		qcom,use-ext-clocks;
+		clocks = <&clock_rpmh RPMH_RF_CLK3>,
+			 <&clock_rpmh RPMH_RF_CLK3_A>;
+		clock-names = "rf_clk3_clk", "rf_clk3_pin_clk";
+		qcom,smmu-support;
+		status = "disabled";
+	};
 };
 
 &pcie_0_gdsc {
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index 8c15040..9536f20 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -553,6 +553,7 @@
 			phy-mode = "rgmii-id";
 			#address-cells = <1>;
 			#size-cells = <0>;
+			status = "disabled";
 		};
 
 		can0: can@e6c30000 {
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 9b5de00..10b44f8 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -78,7 +78,6 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
@@ -264,6 +263,8 @@
 CONFIG_PPPOLAC=y
 CONFIG_PPPOPNS=y
 CONFIG_USB_USBNET=y
+CONFIG_WIL6210=m
+# CONFIG_WIL6210_TRACING is not set
 CONFIG_WCNSS_MEM_PRE_ALLOC=y
 CONFIG_CLD_LL_CORE=y
 CONFIG_INPUT_EVDEV=y
@@ -338,7 +339,6 @@
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_SPECTRA_CAMERA=y
 CONFIG_MSM_VIDC_V4L2=y
-CONFIG_MSM_VIDC_VMEM=y
 CONFIG_MSM_VIDC_GOVERNORS=y
 CONFIG_MSM_SDE_ROTATOR=y
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
@@ -415,6 +415,7 @@
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_QPNP=y
 CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
 CONFIG_UIO=y
 CONFIG_UIO_MSM_SHAREDMEM=y
 CONFIG_STAGING=y
@@ -431,7 +432,9 @@
 CONFIG_QPNP_COINCELL=y
 CONFIG_QPNP_REVID=y
 CONFIG_USB_BAM=y
+CONFIG_MSM_11AD=m
 CONFIG_SEEMP_CORE=y
+CONFIG_QCOM_GENI_SE=y
 CONFIG_MSM_GCC_SDM845=y
 CONFIG_MSM_VIDEOCC_SDM845=y
 CONFIG_MSM_CAMCC_SDM845=y
@@ -483,11 +486,13 @@
 CONFIG_ICNSS=y
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_CDSP_LOADER=y
 CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
 CONFIG_APSS_CORE_EA=y
+CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
@@ -498,7 +503,6 @@
 CONFIG_DEVFREQ_GOV_MEMLAT=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
-CONFIG_EXTCON=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_QCOM_RRADC=y
@@ -507,6 +511,7 @@
 CONFIG_ARM_GIC_V3_ACL=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SENSORS_SSC=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
@@ -527,7 +532,6 @@
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_PANIC_TIMEOUT=5
 CONFIG_SCHEDSTATS=y
-CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
 CONFIG_CPU_FREQ_SWITCH_PROFILER=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 615150a..737f47f 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -84,7 +84,6 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
@@ -255,6 +254,7 @@
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
 CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
@@ -273,6 +273,7 @@
 CONFIG_PPPOLAC=y
 CONFIG_PPPOPNS=y
 CONFIG_USB_USBNET=y
+CONFIG_WIL6210=m
 CONFIG_WCNSS_MEM_PRE_ALLOC=y
 CONFIG_CLD_LL_CORE=y
 CONFIG_INPUT_EVDEV=y
@@ -347,7 +348,6 @@
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_SPECTRA_CAMERA=y
 CONFIG_MSM_VIDC_V4L2=y
-CONFIG_MSM_VIDC_VMEM=y
 CONFIG_MSM_VIDC_GOVERNORS=y
 CONFIG_MSM_SDE_ROTATOR=y
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
@@ -431,6 +431,8 @@
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_QPNP=y
 CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_QCOM_GPI_DMA_DEBUG=y
 CONFIG_UIO=y
 CONFIG_UIO_MSM_SHAREDMEM=y
 CONFIG_STAGING=y
@@ -447,7 +449,9 @@
 CONFIG_QPNP_COINCELL=y
 CONFIG_QPNP_REVID=y
 CONFIG_USB_BAM=y
+CONFIG_MSM_11AD=m
 CONFIG_SEEMP_CORE=y
+CONFIG_QCOM_GENI_SE=y
 CONFIG_MSM_GCC_SDM845=y
 CONFIG_MSM_VIDEOCC_SDM845=y
 CONFIG_MSM_CAMCC_SDM845=y
@@ -464,6 +468,7 @@
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
 CONFIG_IOMMU_TESTS=y
 CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
@@ -503,12 +508,14 @@
 CONFIG_ICNSS_DEBUG=y
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_CDSP_LOADER=y
 CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
 CONFIG_APSS_CORE_EA=y
 CONFIG_QCOM_DCC_V2=y
+CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
@@ -519,7 +526,6 @@
 CONFIG_DEVFREQ_GOV_MEMLAT=y
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
-CONFIG_EXTCON=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_QCOM_RRADC=y
@@ -529,6 +535,7 @@
 CONFIG_PHY_XGENE=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SENSORS_SSC=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
@@ -576,7 +583,6 @@
 CONFIG_PANIC_ON_RT_THROTTLING=y
 CONFIG_SCHEDSTATS=y
 CONFIG_SCHED_STACK_END_CHECK=y
-CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
@@ -596,6 +602,7 @@
 CONFIG_CPU_FREQ_SWITCH_PROFILER=y
 CONFIG_LKDTM=y
 CONFIG_MEMTEST=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
 CONFIG_ARM64_PTDUMP=y
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_CORESIGHT=y
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 0363fe8..dc06a33 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -30,14 +30,20 @@
 #include <asm/pgtable.h>
 #include <asm/sysreg.h>
 #include <asm/tlbflush.h>
+#include <linux/msm_rtb.h>
 
 static inline void contextidr_thread_switch(struct task_struct *next)
 {
+	pid_t pid = task_pid_nr(next);
+
 	if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
 		return;
 
-	write_sysreg(task_pid_nr(next), contextidr_el1);
+	write_sysreg(pid, contextidr_el1);
 	isb();
+
+	uncached_logk(LOGK_CTXID, (void *)(u64)pid);
+
 }
 
 /*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 875545d..3845f33 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -72,9 +72,8 @@
 #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
 #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
-#define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
+#define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
 #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
-#define pte_ng(pte)		(!!(pte_val(pte) & PTE_NG))
 
 #ifdef CONFIG_ARM64_HW_AFDBM
 #define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
@@ -85,8 +84,12 @@
 #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
 
 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
-#define pte_valid_global(pte) \
-	((pte_val(pte) & (PTE_VALID | PTE_NG)) == PTE_VALID)
+/*
+ * Execute-only user mappings do not have the PTE_USER bit set. All valid
+ * kernel mappings have the PTE_UXN bit set.
+ */
+#define pte_valid_not_user(pte) \
+	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
 #define pte_valid_young(pte) \
 	((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
 
@@ -207,7 +210,7 @@
 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
 	 * or update_mmu_cache() have the necessary barriers.
 	 */
-	if (pte_valid_global(pte)) {
+	if (pte_valid_not_user(pte)) {
 		dsb(ishst);
 		isb();
 	}
@@ -241,7 +244,7 @@
 			pte_val(pte) &= ~PTE_RDONLY;
 		else
 			pte_val(pte) |= PTE_RDONLY;
-		if (pte_ng(pte) && pte_exec(pte) && !pte_special(pte))
+		if (pte_user_exec(pte) && !pte_special(pte))
 			__sync_icache_dcache(pte, addr);
 	}
 
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 837bbab..75088c00 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -943,6 +943,8 @@
 	.sync_single_for_device = __iommu_sync_single_for_device,
 	.sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
 	.sync_sg_for_device = __iommu_sync_sg_for_device,
+	.map_resource = iommu_dma_map_resource,
+	.unmap_resource = iommu_dma_unmap_resource,
 	.dma_supported = iommu_dma_supported,
 	.mapping_error = iommu_dma_mapping_error,
 };
@@ -1847,6 +1849,45 @@
 		__dma_page_cpu_to_dev(page, offset, size, dir);
 }
 
+static dma_addr_t arm_iommu_dma_map_resource(
+			struct device *dev, phys_addr_t phys_addr,
+			size_t size, enum dma_data_direction dir,
+			unsigned long attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	size_t offset = phys_addr & ~PAGE_MASK;
+	size_t len = PAGE_ALIGN(size + offset);
+	dma_addr_t dma_addr;
+	int prot;
+
+	dma_addr = __alloc_iova(mapping, len);
+	if (dma_addr == DMA_ERROR_CODE)
+		return dma_addr;
+
+	prot = __dma_direction_to_prot(dir);
+	prot |= IOMMU_MMIO;
+
+	if (iommu_map(mapping->domain, dma_addr, phys_addr - offset,
+			len, prot)) {
+		__free_iova(mapping, dma_addr, len);
+		return DMA_ERROR_CODE;
+	}
+	return dma_addr + offset;
+}
+
+static void arm_iommu_dma_unmap_resource(
+			struct device *dev, dma_addr_t addr,
+			size_t size, enum dma_data_direction dir,
+			unsigned long attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	size_t offset = addr & ~PAGE_MASK;
+	size_t len = PAGE_ALIGN(size + offset);
+
+	iommu_unmap(mapping->domain, addr - offset, len);
+	__free_iova(mapping, addr - offset, len);
+}
+
 static int arm_iommu_mapping_error(struct device *dev,
 				   dma_addr_t dma_addr)
 {
@@ -1869,6 +1910,9 @@
 	.sync_sg_for_cpu	= arm_iommu_sync_sg_for_cpu,
 	.sync_sg_for_device	= arm_iommu_sync_sg_for_device,
 
+	.map_resource		= arm_iommu_dma_map_resource,
+	.unmap_resource		= arm_iommu_dma_unmap_resource,
+
 	.set_dma_mask		= arm_dma_set_mask,
 	.mapping_error		= arm_iommu_mapping_error,
 };
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index b2fc97a..9c4b57a 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -779,14 +779,14 @@
 		int ret;
 
 		ret = build_insn(insn, ctx);
-
-		if (ctx->image == NULL)
-			ctx->offset[i] = ctx->idx;
-
 		if (ret > 0) {
 			i++;
+			if (ctx->image == NULL)
+				ctx->offset[i] = ctx->idx;
 			continue;
 		}
+		if (ctx->image == NULL)
+			ctx->offset[i] = ctx->idx;
 		if (ret)
 			return ret;
 	}
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index bd09853..d8227f2 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -433,8 +433,8 @@
 	rs = regs->regs[MIPSInst_RS(ir)];
 	res = (u64)rt * (u64)rs;
 	rt = res;
-	regs->lo = (s64)rt;
-	regs->hi = (s64)(res >> 32);
+	regs->lo = (s64)(s32)rt;
+	regs->hi = (s64)(s32)(res >> 32);
 
 	MIPS_R2_STATS(muls);
 
@@ -670,9 +670,9 @@
 	res += ((((s64)rt) << 32) | (u32)rs);
 
 	rt = res;
-	regs->lo = (s64)rt;
+	regs->lo = (s64)(s32)rt;
 	rs = res >> 32;
-	regs->hi = (s64)rs;
+	regs->hi = (s64)(s32)rs;
 
 	MIPS_R2_STATS(dsps);
 
@@ -728,9 +728,9 @@
 	res = ((((s64)rt) << 32) | (u32)rs) - res;
 
 	rt = res;
-	regs->lo = (s64)rt;
+	regs->lo = (s64)(s32)rt;
 	rs = res >> 32;
-	regs->hi = (s64)rs;
+	regs->hi = (s64)(s32)rs;
 
 	MIPS_R2_STATS(dsps);
 
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 65fba4c..8f01f21 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -388,8 +388,8 @@
 	  be disabled also.
 
 	  If you have a toolchain which supports mprofile-kernel, then you can
-	  enable this. Otherwise leave it disabled. If you're not sure, say
-	  "N".
+	  disable this. Otherwise leave it enabled. If you're not sure, say
+	  "Y".
 
 config MPROFILE_KERNEL
 	depends on PPC64 && CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 13f5fad..e7d9eca 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -337,7 +337,7 @@
 #define   LPCR_DPFD_SH		52
 #define   LPCR_DPFD		(ASM_CONST(7) << LPCR_DPFD_SH)
 #define   LPCR_VRMASD_SH	47
-#define   LPCR_VRMASD		(ASM_CONST(1) << LPCR_VRMASD_SH)
+#define   LPCR_VRMASD		(ASM_CONST(0x1f) << LPCR_VRMASD_SH)
 #define   LPCR_VRMA_L		ASM_CONST(0x0008000000000000)
 #define   LPCR_VRMA_LP0		ASM_CONST(0x0001000000000000)
 #define   LPCR_VRMA_LP1		ASM_CONST(0x0000800000000000)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 1925341..adb52d1 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -15,7 +15,7 @@
 endif
 
 CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
-CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 44d2d84..483d8c0 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -146,7 +146,7 @@
 opal_tracepoint_return:
 	std	r3,STK_REG(R31)(r1)
 	mr	r4,r3
-	ld	r0,STK_REG(R23)(r1)
+	ld	r3,STK_REG(R23)(r1)
 	bl	__trace_opal_exit
 	ld	r3,STK_REG(R31)(r1)
 	addi	r1,r1,STACKFRAMESIZE
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 6aa3da1..9835152 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -935,3 +935,9 @@
 	retl
 	 mov	%o1, %o0
 ENDPROC(__retl_o1)
+
+ENTRY(__retl_o1_asi)
+	wr      %o5, 0x0, %asi
+	retl
+	 mov    %o1, %o0
+ENDPROC(__retl_o1_asi)
diff --git a/arch/sparc/lib/GENbzero.S b/arch/sparc/lib/GENbzero.S
index 8e7a843..2fbf629 100644
--- a/arch/sparc/lib/GENbzero.S
+++ b/arch/sparc/lib/GENbzero.S
@@ -8,7 +8,7 @@
 98:	x,y;			\
 	.section __ex_table,"a";\
 	.align 4;		\
-	.word 98b, __retl_o1;	\
+	.word 98b, __retl_o1_asi;\
 	.text;			\
 	.align 4;
 
diff --git a/arch/sparc/lib/NGbzero.S b/arch/sparc/lib/NGbzero.S
index beab29b..33053bd 100644
--- a/arch/sparc/lib/NGbzero.S
+++ b/arch/sparc/lib/NGbzero.S
@@ -8,7 +8,7 @@
 98:	x,y;			\
 	.section __ex_table,"a";\
 	.align 4;		\
-	.word 98b, __retl_o1;	\
+	.word 98b, __retl_o1_asi;\
 	.text;			\
 	.align 4;
 
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index c5047b8..df60b58 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -106,18 +106,24 @@
 };
 
 PMU_FORMAT_ATTR(cyc,		"config:1"	);
+PMU_FORMAT_ATTR(pwr_evt,	"config:4"	);
+PMU_FORMAT_ATTR(fup_on_ptw,	"config:5"	);
 PMU_FORMAT_ATTR(mtc,		"config:9"	);
 PMU_FORMAT_ATTR(tsc,		"config:10"	);
 PMU_FORMAT_ATTR(noretcomp,	"config:11"	);
+PMU_FORMAT_ATTR(ptw,		"config:12"	);
 PMU_FORMAT_ATTR(mtc_period,	"config:14-17"	);
 PMU_FORMAT_ATTR(cyc_thresh,	"config:19-22"	);
 PMU_FORMAT_ATTR(psb_period,	"config:24-27"	);
 
 static struct attribute *pt_formats_attr[] = {
 	&format_attr_cyc.attr,
+	&format_attr_pwr_evt.attr,
+	&format_attr_fup_on_ptw.attr,
 	&format_attr_mtc.attr,
 	&format_attr_tsc.attr,
 	&format_attr_noretcomp.attr,
+	&format_attr_ptw.attr,
 	&format_attr_mtc_period.attr,
 	&format_attr_cyc_thresh.attr,
 	&format_attr_psb_period.attr,
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 608a79d..e6911ca 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -20,4 +20,15 @@
 /* No need for a barrier -- XCHG is a barrier on x86. */
 #define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
 
+extern int xen_have_vector_callback;
+
+/*
+ * Events delivered via platform PCI interrupts are always
+ * routed to vcpu 0 and hence cannot be rebound.
+ */
+static inline bool xen_support_evtchn_rebind(void)
+{
+	return (!xen_hvm_domain() || xen_have_vector_callback);
+}
+
 #endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d1e2556..7249f15 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1876,6 +1876,7 @@
 	.irq_ack		= irq_chip_ack_parent,
 	.irq_eoi		= ioapic_ack_level,
 	.irq_set_affinity	= ioapic_set_affinity,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.flags			= IRQCHIP_SKIP_SET_WAKE,
 };
 
@@ -1887,6 +1888,7 @@
 	.irq_ack		= irq_chip_ack_parent,
 	.irq_eoi		= ioapic_ir_ack_level,
 	.irq_set_affinity	= ioapic_set_affinity,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.flags			= IRQCHIP_SKIP_SET_WAKE,
 };
 
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index c6ee63f..d688826 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -67,7 +67,7 @@
 #endif
 
 /* Ensure if the instruction can be boostable */
-extern int can_boost(kprobe_opcode_t *instruction);
+extern int can_boost(kprobe_opcode_t *instruction, void *addr);
 /* Recover instruction if given address is probed */
 extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
 					 unsigned long addr);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index d9d8d16..b55d07b 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -166,12 +166,12 @@
  * Returns non-zero if opcode is boostable.
  * RIP relative instructions are adjusted at copying time in 64 bits mode
  */
-int can_boost(kprobe_opcode_t *opcodes)
+int can_boost(kprobe_opcode_t *opcodes, void *addr)
 {
 	kprobe_opcode_t opcode;
 	kprobe_opcode_t *orig_opcodes = opcodes;
 
-	if (search_exception_tables((unsigned long)opcodes))
+	if (search_exception_tables((unsigned long)addr))
 		return 0;	/* Page fault may occur on this address. */
 
 retry:
@@ -416,7 +416,7 @@
 	 * __copy_instruction can modify the displacement of the instruction,
 	 * but it doesn't affect boostable check.
 	 */
-	if (can_boost(p->ainsn.insn))
+	if (can_boost(p->ainsn.insn, p->addr))
 		p->ainsn.boostable = 0;
 	else
 		p->ainsn.boostable = -1;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 3bb4c5f..4d74f73 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -178,7 +178,7 @@
 
 	while (len < RELATIVEJUMP_SIZE) {
 		ret = __copy_instruction(dest + len, src + len);
-		if (!ret || !can_boost(dest + len))
+		if (!ret || !can_boost(dest + len, src + len))
 			return -EINVAL;
 		len += ret;
 	}
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 5d400ba..d475179 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -296,7 +296,7 @@
 
 	/* were we called with bad_dma_address? */
 	badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
-	if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) {
+	if (unlikely(dma_addr < badend)) {
 		WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
 		       "address 0x%Lx\n", dma_addr);
 		return;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index afa7bbb..967e459 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -846,12 +846,6 @@
 	if (!best)
 		best = check_cpuid_limit(vcpu, function, index);
 
-	/*
-	 * Perfmon not yet supported for L2 guest.
-	 */
-	if (is_guest_mode(vcpu) && function == 0xa)
-		best = NULL;
-
 	if (best) {
 		*eax = best->eax;
 		*ebx = best->ebx;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 43b55ef..89b98e0 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8051,8 +8051,6 @@
 	case EXIT_REASON_TASK_SWITCH:
 		return true;
 	case EXIT_REASON_CPUID:
-		if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
-			return false;
 		return true;
 	case EXIT_REASON_HLT:
 		return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
@@ -8137,6 +8135,9 @@
 		return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
 	case EXIT_REASON_PREEMPTION_TIMER:
 		return false;
+	case EXIT_REASON_PML_FULL:
+		/* We don't expose PML support to L1. */
+		return false;
 	default:
 		return true;
 	}
@@ -10073,6 +10074,18 @@
 
 	}
 
+	if (enable_pml) {
+		/*
+		 * Conceptually we want to copy the PML address and index from
+		 * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
+		 * since we always flush the log on each vmexit, this happens
+		 * to be equivalent to simply resetting the fields in vmcs02.
+		 */
+		ASSERT(vmx->pml_pg);
+		vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+		vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+	}
+
 	if (nested_cpu_has_ept(vmcs12)) {
 		kvm_mmu_unload(vcpu);
 		nested_ept_init_mmu_context(vcpu);
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index a00a6c0..4ea9f29 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -447,7 +447,7 @@
 
 int __init pci_xen_hvm_init(void)
 {
-	if (!xen_feature(XENFEAT_hvm_pirqs))
+	if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
 		return 0;
 
 #ifdef CONFIG_ACPI
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index 3f1f1c7..10bad1e 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -19,7 +19,7 @@
 #include <asm/intel_scu_ipc.h>
 #include <asm/io_apic.h>
 
-#define TANGIER_EXT_TIMER0_MSI 15
+#define TANGIER_EXT_TIMER0_MSI 12
 
 static struct platform_device wdt_dev = {
 	.name = "intel_mid_wdt",
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index bdd8556..8f1f7ef 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -137,6 +137,8 @@
 void *xen_initial_gdt;
 
 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
+__read_mostly int xen_have_vector_callback;
+EXPORT_SYMBOL_GPL(xen_have_vector_callback);
 
 static int xen_cpu_up_prepare(unsigned int cpu);
 static int xen_cpu_up_online(unsigned int cpu);
@@ -1521,7 +1523,10 @@
 	if (!xen_feature(XENFEAT_auto_translated_physmap))
 		return;
 
-	BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
+	if (!xen_feature(XENFEAT_hvm_callback_vector))
+		return;
+
+	xen_have_vector_callback = 1;
 
 	xen_pvh_early_cpu_init(0, false);
 	xen_pvh_set_cr_flags(0);
@@ -1860,7 +1865,9 @@
 		xen_vcpu_setup(cpu);
 	}
 
-	if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
+	if (xen_pv_domain() ||
+	    (xen_have_vector_callback &&
+	     xen_feature(XENFEAT_hvm_safe_pvclock)))
 		xen_setup_timer(cpu);
 
 	rc = xen_smp_intr_init(cpu);
@@ -1876,7 +1883,9 @@
 {
 	xen_smp_intr_free(cpu);
 
-	if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
+	if (xen_pv_domain() ||
+	    (xen_have_vector_callback &&
+	     xen_feature(XENFEAT_hvm_safe_pvclock)))
 		xen_teardown_timer(cpu);
 
 	return 0;
@@ -1915,8 +1924,8 @@
 
 	xen_panic_handler_init();
 
-	BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
-
+	if (xen_feature(XENFEAT_hvm_callback_vector))
+		xen_have_vector_callback = 1;
 	xen_hvm_smp_init();
 	WARN_ON(xen_cpuhp_setup());
 	xen_unplug_emulated_devices();
@@ -1954,7 +1963,7 @@
 		return false;
 	if (!xen_hvm_domain())
 		return false;
-	if (xen_feature(XENFEAT_hvm_pirqs))
+	if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
 		return false;
 	return true;
 }
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 311acad..137afbb 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -765,6 +765,8 @@
 
 void __init xen_hvm_smp_init(void)
 {
+	if (!xen_have_vector_callback)
+		return;
 	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
 	smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
 	smp_ops.cpu_die = xen_cpu_die;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 33d8f6a..67356d2 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -432,6 +432,11 @@
 
 void __init xen_hvm_init_time_ops(void)
 {
+	/* vector callback is needed otherwise we cannot receive interrupts
+	 * on cpu > 0 and at this point we don't know how many cpus are
+	 * available */
+	if (!xen_have_vector_callback)
+		return;
 	if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
 		printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
 				"disable pv timer\n");
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index d69c5c7..319f2e4 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -417,7 +417,7 @@
 	bi->tuple_size = template->tuple_size;
 	bi->tag_size = template->tag_size;
 
-	blk_integrity_revalidate(disk);
+	disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
 }
 EXPORT_SYMBOL(blk_integrity_register);
 
@@ -430,26 +430,11 @@
  */
 void blk_integrity_unregister(struct gendisk *disk)
 {
-	blk_integrity_revalidate(disk);
+	disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES;
 	memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
 }
 EXPORT_SYMBOL(blk_integrity_unregister);
 
-void blk_integrity_revalidate(struct gendisk *disk)
-{
-	struct blk_integrity *bi = &disk->queue->integrity;
-
-	if (!(disk->flags & GENHD_FL_UP))
-		return;
-
-	if (bi->profile)
-		disk->queue->backing_dev_info.capabilities |=
-			BDI_CAP_STABLE_WRITES;
-	else
-		disk->queue->backing_dev_info.capabilities &=
-			~BDI_CAP_STABLE_WRITES;
-}
-
 void blk_integrity_add(struct gendisk *disk)
 {
 	if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 71d9ed9..a2437c0 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -447,7 +447,6 @@
 
 	if (disk->fops->revalidate_disk)
 		disk->fops->revalidate_disk(disk);
-	blk_integrity_revalidate(disk);
 	check_disk_size_change(disk, bdev);
 	bdev->bd_invalidated = 0;
 	if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 6266a37..5f8abc3 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -208,4 +208,6 @@
 
 source "drivers/fpga/Kconfig"
 
+source "drivers/sensors/Kconfig"
+
 endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 1419893..413dff9 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -176,3 +176,4 @@
 obj-$(CONFIG_NVMEM)		+= nvmem/
 obj-$(CONFIG_ESOC)              += esoc/
 obj-$(CONFIG_FPGA)		+= fpga/
+obj-$(CONFIG_SENSORS_SSC)		+= sensors/
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index f18ae62..4256d9b 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -1178,11 +1178,11 @@
 	ret = fw_get_filesystem_firmware(device, fw->priv);
 	if (ret) {
 		if (!(opt_flags & FW_OPT_NO_WARN))
-			dev_warn(device,
-				 "Direct firmware load for %s failed with error %d\n",
+			dev_dbg(device,
+				 "Firmware %s was not found in kernel paths. rc:%d\n",
 				 name, ret);
 		if (opt_flags & FW_OPT_USERHELPER) {
-			dev_warn(device, "Falling back to user helper\n");
+			dev_dbg(device, "Falling back to user helper\n");
 			ret = fw_load_from_user_helper(fw, name, device,
 						       opt_flags, timeout);
 		}
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index e595013..a017ccd 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -140,7 +140,7 @@
  * Allocates a new struct tpm_chip instance and assigns a free
  * device number for it. Must be paired with put_device(&chip->dev).
  */
-struct tpm_chip *tpm_chip_alloc(struct device *dev,
+struct tpm_chip *tpm_chip_alloc(struct device *pdev,
 				const struct tpm_class_ops *ops)
 {
 	struct tpm_chip *chip;
@@ -159,7 +159,7 @@
 	rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL);
 	mutex_unlock(&idr_lock);
 	if (rc < 0) {
-		dev_err(dev, "No available tpm device numbers\n");
+		dev_err(pdev, "No available tpm device numbers\n");
 		kfree(chip);
 		return ERR_PTR(rc);
 	}
@@ -169,7 +169,7 @@
 
 	chip->dev.class = tpm_class;
 	chip->dev.release = tpm_dev_release;
-	chip->dev.parent = dev;
+	chip->dev.parent = pdev;
 	chip->dev.groups = chip->groups;
 
 	if (chip->dev_num == 0)
@@ -181,7 +181,7 @@
 	if (rc)
 		goto out;
 
-	if (!dev)
+	if (!pdev)
 		chip->flags |= TPM_CHIP_FLAG_VIRTUAL;
 
 	cdev_init(&chip->cdev, &tpm_fops);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 4d183c9..aa4299c 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -518,6 +518,11 @@
 }
 #endif
 
+static inline inline u32 tpm2_rc_value(u32 rc)
+{
+	return (rc & BIT(7)) ? rc & 0xff : rc;
+}
+
 int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
 int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash);
 int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 7df55d58..17896d6 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -529,7 +529,7 @@
 	tpm_buf_destroy(&buf);
 
 	if (rc > 0) {
-		if ((rc & TPM2_RC_HASH) == TPM2_RC_HASH)
+		if (tpm2_rc_value(rc) == TPM2_RC_HASH)
 			rc = -EINVAL;
 		else
 			rc = -EPERM;
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 925081e..42042c0 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -87,6 +87,8 @@
 obj-$(CONFIG_CLK_UNIPHIER)		+= uniphier/
 obj-$(CONFIG_ARCH_U8500)		+= ux500/
 obj-$(CONFIG_COMMON_CLK_VERSATILE)	+= versatile/
+ifeq ($(CONFIG_COMMON_CLK), y)
 obj-$(CONFIG_X86)			+= x86/
+endif
 obj-$(CONFIG_ARCH_ZX)			+= zte/
 obj-$(CONFIG_ARCH_ZYNQ)			+= zynq/
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 1b545d6..1f0c111 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1801,6 +1801,15 @@
 		clk_enable_unlock(flags);
 	}
 
+	trace_clk_set_rate(core, core->new_rate);
+
+	/* Enforce vdd requirements for new frequency. */
+	if (core->prepare_count) {
+		rc = clk_vote_rate_vdd(core, core->new_rate);
+		if (rc)
+			goto out;
+	}
+
 	if (core->new_parent && core->new_parent != core->parent) {
 		old_parent = __clk_set_parent_before(core, core->new_parent);
 		trace_clk_set_parent(core, core->new_parent);
@@ -1821,15 +1830,6 @@
 	if (core->flags & CLK_OPS_PARENT_ENABLE)
 		clk_core_prepare_enable(parent);
 
-	trace_clk_set_rate(core, core->new_rate);
-
-	/* Enforce vdd requirements for new frequency. */
-	if (core->prepare_count) {
-		rc = clk_vote_rate_vdd(core, core->new_rate);
-		if (rc)
-			goto out;
-	}
-
 	if (!skip_set_rate && core->ops->set_rate) {
 		rc = core->ops->set_rate(core->hw, core->new_rate,
 						best_parent_rate);
@@ -2329,6 +2329,21 @@
 }
 EXPORT_SYMBOL_GPL(clk_set_flags);
 
+unsigned long clk_list_frequency(struct clk *clk, unsigned int index)
+{
+	int ret = 0;
+
+	if (!clk || !clk->core->ops->list_rate)
+		return -EINVAL;
+
+	clk_prepare_lock();
+	ret = clk->core->ops->list_rate(clk->core->hw, index, ULONG_MAX);
+	clk_prepare_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(clk_list_frequency);
+
 /***        debugfs support        ***/
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 6296c40..adbabea 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -764,7 +764,8 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
-	F(24000000, P_CAM_CC_PLL3_OUT_EVEN, 16, 0, 0),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(24000000, P_CAM_CC_PLL2_OUT_EVEN, 10, 1, 2),
 	F(33333333, P_CAM_CC_PLL0_OUT_EVEN, 2, 1, 9),
 	F(34285714, P_CAM_CC_PLL2_OUT_EVEN, 14, 0, 0),
 	{ }
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 884656f..4efecef 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -40,6 +40,7 @@
 #include "clk-regmap.h"
 #include "clk-rcg.h"
 #include "clk-voter.h"
+#include "clk-debug.h"
 
 #define OSM_TABLE_SIZE			40
 #define SINGLE_CORE			1
@@ -53,10 +54,10 @@
 
 #define OSM_REG_SIZE			32
 
-#define L3_EFUSE_SHIFT			0
-#define L3_EFUSE_MASK			0
-#define PWRCL_EFUSE_SHIFT		0
-#define PWRCL_EFUSE_MASK		0
+#define L3_EFUSE_SHIFT			29
+#define L3_EFUSE_MASK			0x7
+#define PWRCL_EFUSE_SHIFT		29
+#define PWRCL_EFUSE_MASK		0x7
 #define PERFCL_EFUSE_SHIFT		29
 #define PERFCL_EFUSE_MASK		0x7
 
@@ -153,6 +154,42 @@
 #define OSM_CYCLE_COUNTER_STATUS_REG(n)	(OSM_CYCLE_COUNTER_STATUS_REG_0 + \
 					(4 * n))
 
+/* ACD registers */
+#define ACD_HW_VERSION		0x0
+#define ACDCR			0x4
+#define ACDTD			0x8
+#define ACDSSCR			0x28
+#define ACD_EXTINT_CFG		0x30
+#define ACD_DCVS_SW		0x34
+#define ACD_GFMUX_CFG		0x3c
+#define ACD_READOUT_CFG		0x48
+#define ACD_AVG_CFG_0		0x4c
+#define ACD_AVG_CFG_1		0x50
+#define ACD_AVG_CFG_2		0x54
+#define ACD_AUTOXFER_CFG	0x80
+#define ACD_AUTOXFER		0x84
+#define ACD_AUTOXFER_CTL	0x88
+#define ACD_AUTOXFER_STATUS	0x8c
+#define ACD_WRITE_CTL		0x90
+#define ACD_WRITE_STATUS	0x94
+#define ACD_READOUT		0x98
+
+#define ACD_MASTER_ONLY_REG_ADDR	0x80
+#define ACD_1P1_MAX_REG_OFFSET		0x100
+#define ACD_WRITE_CTL_UPDATE_EN		BIT(0)
+#define ACD_WRITE_CTL_SELECT_SHIFT	1
+#define ACD_GFMUX_CFG_SELECT		BIT(0)
+#define ACD_AUTOXFER_START_CLEAR	0
+#define ACD_AUTOXFER_START_SET		1
+#define AUTO_XFER_DONE_MASK		BIT(0)
+#define ACD_DCVS_SW_DCVS_IN_PRGR_SET	BIT(0)
+#define ACD_DCVS_SW_DCVS_IN_PRGR_CLEAR	0
+#define ACD_LOCAL_TRANSFER_TIMEOUT_NS   500
+
+#define ACD_REG_RELATIVE_ADDR(addr) (addr / 4)
+#define ACD_REG_RELATIVE_ADDR_BITMASK(addr) \
+			(1 << (ACD_REG_RELATIVE_ADDR(addr)))
+
 static const struct regmap_config osm_qcom_regmap_config = {
 	.reg_bits       = 32,
 	.reg_stride     = 4,
@@ -165,6 +202,7 @@
 	PLL_BASE,
 	EFUSE_BASE,
 	SEQ_BASE,
+	ACD_BASE,
 	NUM_BASES,
 };
 
@@ -186,6 +224,8 @@
 	long frequency;
 };
 
+static struct dentry *osm_debugfs_base;
+
 struct clk_osm {
 	struct clk_hw hw;
 	struct osm_entry osm_table[OSM_TABLE_SIZE];
@@ -235,12 +275,173 @@
 	u32 trace_periodic_timer;
 	bool trace_en;
 	bool wdog_trace_en;
+
+	bool acd_init;
+	u32 acd_td;
+	u32 acd_cr;
+	u32 acd_sscr;
+	u32 acd_extint0_cfg;
+	u32 acd_extint1_cfg;
+	u32 acd_autoxfer_ctl;
+	u32 acd_debugfs_addr;
+	bool acd_avg_init;
+	u32 acd_avg_cfg0;
+	u32 acd_avg_cfg1;
+	u32 acd_avg_cfg2;
 };
 
 static struct regulator *vdd_l3;
 static struct regulator *vdd_pwrcl;
 static struct regulator *vdd_perfcl;
 
+static inline int clk_osm_acd_mb(struct clk_osm *c)
+{
+	return readl_relaxed_no_log((char *)c->vbases[ACD_BASE] +
+					ACD_HW_VERSION);
+}
+
+static int clk_osm_acd_local_read_reg(struct clk_osm *c, u32 offset)
+{
+	u32 reg = 0;
+	int timeout;
+
+	if (offset >= ACD_MASTER_ONLY_REG_ADDR) {
+		pr_err("ACD register at offset=0x%x not locally readable\n",
+			offset);
+		return -EINVAL;
+	}
+
+	/* Set select field in read control register */
+	writel_relaxed(ACD_REG_RELATIVE_ADDR(offset),
+			(char *)c->vbases[ACD_BASE] + ACD_READOUT_CFG);
+
+	/* Clear write control register */
+	writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+	/* Set select and update_en fields in write control register */
+	reg = (ACD_REG_RELATIVE_ADDR(ACD_READOUT_CFG)
+	       << ACD_WRITE_CTL_SELECT_SHIFT)
+		| ACD_WRITE_CTL_UPDATE_EN;
+	writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+	/* Ensure writes complete before polling */
+	clk_osm_acd_mb(c);
+
+	/* Poll write status register */
+	for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS; timeout > 0;
+	     timeout -= 100) {
+		reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+					+ ACD_WRITE_STATUS);
+		if ((reg & (ACD_REG_RELATIVE_ADDR_BITMASK(ACD_READOUT_CFG))))
+			break;
+		ndelay(100);
+	}
+
+	if (!timeout) {
+		pr_err("local read timed out, offset=0x%x status=0x%x\n",
+			offset, reg);
+		return -ETIMEDOUT;
+	}
+
+	reg = readl_relaxed((char *)c->vbases[ACD_BASE] + ACD_READOUT);
+	return reg;
+}
+
+static int clk_osm_acd_local_write_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+	u32 reg = 0;
+	int timeout;
+
+	if (offset >= ACD_MASTER_ONLY_REG_ADDR) {
+		pr_err("ACD register at offset=0x%x not transferrable\n",
+			offset);
+		return -EINVAL;
+	}
+
+	/* Clear write control register */
+	writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+	/* Set select and update_en fields in write control register */
+	reg = (ACD_REG_RELATIVE_ADDR(offset) << ACD_WRITE_CTL_SELECT_SHIFT)
+		| ACD_WRITE_CTL_UPDATE_EN;
+	writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+	/* Ensure writes complete before polling */
+	clk_osm_acd_mb(c);
+
+	/* Poll write status register */
+	for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS; timeout > 0;
+	     timeout -= 100) {
+		reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+					+ ACD_WRITE_STATUS);
+		if ((reg & (ACD_REG_RELATIVE_ADDR_BITMASK(offset))))
+			break;
+		ndelay(100);
+	}
+
+	if (!timeout) {
+		pr_err("local write timed out, offset=0x%x val=0x%x status=0x%x\n",
+			offset, val, reg);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int clk_osm_acd_master_write_through_reg(struct clk_osm *c,
+						u32 val, u32 offset)
+{
+	writel_relaxed(val, (char *)c->vbases[ACD_BASE] + offset);
+
+	/* Ensure writes complete before transfer to local copy */
+	clk_osm_acd_mb(c);
+
+	return clk_osm_acd_local_write_reg(c, val, offset);
+}
+
+static int clk_osm_acd_auto_local_write_reg(struct clk_osm *c, u32 mask)
+{
+	u32 numregs, bitmask = mask;
+	u32 reg = 0;
+	int timeout;
+
+	/* count number of bits set in register mask */
+	for (numregs = 0; bitmask; numregs++)
+		bitmask &= bitmask - 1;
+
+	/* Program auto-transfer mask */
+	writel_relaxed(mask, (char *)c->vbases[ACD_BASE] + ACD_AUTOXFER_CFG);
+
+	/* Clear start field in auto-transfer register */
+	writel_relaxed(ACD_AUTOXFER_START_CLEAR,
+			(char *)c->vbases[ACD_BASE] + ACD_AUTOXFER);
+
+	/* Set start field in auto-transfer register */
+	writel_relaxed(ACD_AUTOXFER_START_SET,
+			(char *)c->vbases[ACD_BASE] + ACD_AUTOXFER);
+
+	/* Ensure writes complete before polling */
+	clk_osm_acd_mb(c);
+
+	/* Poll auto-transfer status register */
+	for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS * numregs;
+	     timeout > 0; timeout -= 100) {
+		reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+					+ ACD_AUTOXFER_STATUS);
+		if (reg & AUTO_XFER_DONE_MASK)
+			break;
+		ndelay(100);
+	}
+
+	if (!timeout) {
+		pr_err("local register auto-transfer timed out, mask=0x%x registers=%d status=0x%x\n",
+			mask, numregs, reg);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
 static inline struct clk_osm *to_clk_osm(struct clk_hw *_hw)
 {
 	return container_of(_hw, struct clk_osm, hw);
@@ -264,9 +465,10 @@
 	writel_relaxed(val, (char *)c->vbases[SEQ_BASE] + offset);
 }
 
-static inline void clk_osm_write_reg(struct clk_osm *c, u32 val, u32 offset)
+static inline void clk_osm_write_reg(struct clk_osm *c, u32 val, u32 offset,
+					int base)
 {
-	writel_relaxed(val, (char *)c->vbases[OSM_BASE] + offset);
+	writel_relaxed(val, (char *)c->vbases[base] + offset);
 }
 
 static inline int clk_osm_read_reg(struct clk_osm *c, u32 offset)
@@ -355,7 +557,7 @@
 {
 	struct clk_osm *cpuclk = to_clk_osm(hw);
 
-	clk_osm_write_reg(cpuclk, 1, ENABLE_REG);
+	clk_osm_write_reg(cpuclk, 1, ENABLE_REG, OSM_BASE);
 
 	/* Make sure the write goes through before proceeding */
 	clk_osm_mb(cpuclk, OSM_BASE);
@@ -372,6 +574,7 @@
 	.enable = clk_osm_enable,
 	.round_rate = clk_osm_round_rate,
 	.list_rate = clk_osm_list_rate,
+	.debug_init = clk_debug_measure_add,
 };
 
 static struct clk_ops clk_ops_core;
@@ -408,7 +611,8 @@
 	 * TODO: Program INACTIVE_OS_REQUEST if needed.
 	 */
 	clk_osm_write_reg(parent, index,
-			DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num));
+			DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num),
+			OSM_BASE);
 
 	/* Make sure the write goes through before proceeding */
 	clk_osm_mb(parent, OSM_BASE);
@@ -442,7 +646,8 @@
 	}
 	pr_debug("rate: %lu --> index %d\n", rate, index);
 
-	clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG_0);
+	clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG_0,
+				OSM_BASE);
 
 	/* Make sure the write goes through before proceeding */
 	clk_osm_mb(cpuclk, OSM_BASE);
@@ -507,6 +712,7 @@
 	.list_rate = clk_osm_list_rate,
 	.recalc_rate = l3_clk_recalc_rate,
 	.set_rate = l3_clk_set_rate,
+	.debug_init = clk_debug_measure_add,
 };
 
 static struct clk_init_data osm_clks_init[] = {
@@ -913,50 +1119,51 @@
 	if (c->red_fsm_en) {
 		val = clk_osm_read_reg(c, VMIN_REDUCTION_ENABLE_REG) | BIT(0);
 		val |= BVAL(6, 1, c->min_cpr_vc);
-		clk_osm_write_reg(c, val, VMIN_REDUCTION_ENABLE_REG);
+		clk_osm_write_reg(c, val, VMIN_REDUCTION_ENABLE_REG,
+					OSM_BASE);
 
 		clk_osm_write_reg(c, clk_osm_count_ns(c, 10000),
-				  VMIN_REDUCTION_TIMER_REG);
+				  VMIN_REDUCTION_TIMER_REG, OSM_BASE);
 	}
 
 	/* Boost FSM */
 	if (c->boost_fsm_en) {
 		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
 		val |= DELTA_DEX_VAL | CC_BOOST_FSM_EN | IGNORE_PLL_LOCK;
-		clk_osm_write_reg(c, val, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val, PDN_FSM_CTRL_REG, OSM_BASE);
 
 		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG0);
 		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
 		val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
-		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG0);
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG0, OSM_BASE);
 
 		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG1);
 		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
 		val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
-		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG1);
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG1, OSM_BASE);
 
 		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG2);
 		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
-		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG2);
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG2, OSM_BASE);
 	}
 
 	/* Safe Freq FSM */
 	if (c->safe_fsm_en) {
 		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
 		clk_osm_write_reg(c, val | DCVS_BOOST_FSM_EN_MASK,
-				  PDN_FSM_CTRL_REG);
+				  PDN_FSM_CTRL_REG, OSM_BASE);
 
 		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG0);
 		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
-		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG0);
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG0, OSM_BASE);
 
 		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG1);
 		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
-		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG1);
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG1, OSM_BASE);
 
 		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG2);
 		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
-		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG2);
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG2, OSM_BASE);
 
 	}
 
@@ -964,46 +1171,46 @@
 	if (c->ps_fsm_en) {
 		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
 		clk_osm_write_reg(c, val | PS_BOOST_FSM_EN_MASK,
-							PDN_FSM_CTRL_REG);
+					PDN_FSM_CTRL_REG, OSM_BASE);
 
 		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG0);
 		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
 		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
-		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG0);
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG0, OSM_BASE);
 
 		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG1);
 		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
 		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
-		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG1);
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG1, OSM_BASE);
 
 		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG2);
 		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
-		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG2);
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG2, OSM_BASE);
 	}
 
 	/* PLL signal timing control */
 	if (c->boost_fsm_en || c->safe_fsm_en || c->ps_fsm_en)
-		clk_osm_write_reg(c, 0x2, BOOST_PROG_SYNC_DELAY_REG);
+		clk_osm_write_reg(c, 0x2, BOOST_PROG_SYNC_DELAY_REG, OSM_BASE);
 
 	/* DCVS droop FSM - only if RCGwRC is not used for di/dt control */
 	if (c->droop_fsm_en) {
 		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
 		clk_osm_write_reg(c, val | DCVS_DROOP_FSM_EN_MASK,
-				  PDN_FSM_CTRL_REG);
+					PDN_FSM_CTRL_REG, OSM_BASE);
 	}
 
 	if (c->ps_fsm_en || c->droop_fsm_en) {
-		clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG);
+		clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG, OSM_BASE);
 		clk_osm_write_reg(c, clk_osm_count_ns(c, 100),
-				  DROOP_RELEASE_TIMER_CTRL);
+					DROOP_RELEASE_TIMER_CTRL, OSM_BASE);
 		clk_osm_write_reg(c, clk_osm_count_ns(c, 150),
-				  DCVS_DROOP_TIMER_CTRL);
+					DCVS_DROOP_TIMER_CTRL, OSM_BASE);
 		/*
 		 * TODO: Check if DCVS_DROOP_CODE used is correct. Also check
 		 * if RESYNC_CTRL should be set for L3.
 		 */
 		val = BIT(31) | BVAL(22, 16, 0x2) | BVAL(6, 0, 0x8);
-		clk_osm_write_reg(c, val, DROOP_CTRL_REG);
+		clk_osm_write_reg(c, val, DROOP_CTRL_REG, OSM_BASE);
 	}
 }
 
@@ -1031,17 +1238,20 @@
 	} else {
 		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
 		clk_osm_write_reg(&l3_clk, val,
-					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS,
+					OSM_BASE);
 
 		val = clk_osm_count_ns(&pwrcl_clk,
 						array[pwrcl_clk.cluster_num]);
 		clk_osm_write_reg(&pwrcl_clk, val,
-					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS,
+					OSM_BASE);
 
 		val = clk_osm_count_ns(&perfcl_clk,
 						array[perfcl_clk.cluster_num]);
 		clk_osm_write_reg(&perfcl_clk, val,
-					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS,
+					OSM_BASE);
 	}
 
 	/*
@@ -1057,17 +1267,20 @@
 	} else {
 		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
 		clk_osm_write_reg(&l3_clk, val,
-					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS,
+					OSM_BASE);
 
 		val = clk_osm_count_ns(&pwrcl_clk,
 					       array[pwrcl_clk.cluster_num]);
 		clk_osm_write_reg(&pwrcl_clk, val,
-					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS,
+					OSM_BASE);
 
 		val = clk_osm_count_ns(&perfcl_clk,
-					       array[perfcl_clk.cluster_num]);
+					array[perfcl_clk.cluster_num]);
 		clk_osm_write_reg(&perfcl_clk, val,
-					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS,
+					OSM_BASE);
 	}
 
 	/* Enable or disable honoring of LLM Voltage requests */
@@ -1081,11 +1294,11 @@
 
 	/* Enable or disable LLM VOLT DVCS */
 	regval = val | clk_osm_read_reg(&l3_clk, LLM_INTF_DCVS_DISABLE);
-	clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE, OSM_BASE);
 	regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
-	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE, OSM_BASE);
 	regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
-	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE, OSM_BASE);
 
 	/* Wait for the writes to complete */
 	clk_osm_mb(&perfcl_clk, OSM_BASE);
@@ -1117,17 +1330,20 @@
 			rc);
 	} else {
 		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
-		clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_INC_HYSTERESIS);
+		clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_INC_HYSTERESIS,
+			OSM_BASE);
 
 		val = clk_osm_count_ns(&pwrcl_clk,
 						array[pwrcl_clk.cluster_num]);
 		clk_osm_write_reg(&pwrcl_clk, val,
-						LLM_FREQ_VOTE_INC_HYSTERESIS);
+					LLM_FREQ_VOTE_INC_HYSTERESIS,
+					OSM_BASE);
 
 		val = clk_osm_count_ns(&perfcl_clk,
 						array[perfcl_clk.cluster_num]);
 		clk_osm_write_reg(&perfcl_clk, val,
-						LLM_FREQ_VOTE_INC_HYSTERESIS);
+					LLM_FREQ_VOTE_INC_HYSTERESIS,
+					OSM_BASE);
 	}
 
 	/*
@@ -1142,17 +1358,18 @@
 			rc);
 	} else {
 		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
-		clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_DEC_HYSTERESIS);
+		clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_DEC_HYSTERESIS,
+					OSM_BASE);
 
 		val = clk_osm_count_ns(&pwrcl_clk,
 					       array[pwrcl_clk.cluster_num]);
 		clk_osm_write_reg(&pwrcl_clk, val,
-						LLM_FREQ_VOTE_DEC_HYSTERESIS);
+					LLM_FREQ_VOTE_DEC_HYSTERESIS, OSM_BASE);
 
 		val = clk_osm_count_ns(&perfcl_clk,
 					       array[perfcl_clk.cluster_num]);
 		clk_osm_write_reg(&perfcl_clk, val,
-						LLM_FREQ_VOTE_DEC_HYSTERESIS);
+					LLM_FREQ_VOTE_DEC_HYSTERESIS, OSM_BASE);
 	}
 
 	/* Enable or disable honoring of LLM frequency requests */
@@ -1166,11 +1383,11 @@
 
 	/* Enable or disable LLM FREQ DVCS */
 	regval = val | clk_osm_read_reg(&l3_clk, LLM_INTF_DCVS_DISABLE);
-	clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE, OSM_BASE);
 	regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
-	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE, OSM_BASE);
 	regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
-	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE, OSM_BASE);
 
 	/* Wait for the write to complete */
 	clk_osm_mb(&perfcl_clk, OSM_BASE);
@@ -1198,15 +1415,18 @@
 	} else {
 		val = clk_osm_count_ns(&l3_clk,
 					array[l3_clk.cluster_num]);
-		clk_osm_write_reg(&l3_clk, val, SPM_CC_INC_HYSTERESIS);
+		clk_osm_write_reg(&l3_clk, val, SPM_CC_INC_HYSTERESIS,
+			OSM_BASE);
 
 		val = clk_osm_count_ns(&pwrcl_clk,
 					array[pwrcl_clk.cluster_num]);
-		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_INC_HYSTERESIS);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_INC_HYSTERESIS,
+					OSM_BASE);
 
 		val = clk_osm_count_ns(&perfcl_clk,
 					array[perfcl_clk.cluster_num]);
-		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_INC_HYSTERESIS);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_INC_HYSTERESIS,
+					OSM_BASE);
 	}
 
 	rc = of_property_read_u32_array(of, "qcom,down-timer",
@@ -1216,15 +1436,18 @@
 	} else {
 		val = clk_osm_count_ns(&l3_clk,
 				       array[l3_clk.cluster_num]);
-		clk_osm_write_reg(&l3_clk, val, SPM_CC_DEC_HYSTERESIS);
+		clk_osm_write_reg(&l3_clk, val, SPM_CC_DEC_HYSTERESIS,
+					OSM_BASE);
 
 		val = clk_osm_count_ns(&pwrcl_clk,
 				       array[pwrcl_clk.cluster_num]);
-		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DEC_HYSTERESIS);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DEC_HYSTERESIS,
+					OSM_BASE);
 
 		clk_osm_count_ns(&perfcl_clk,
 				       array[perfcl_clk.cluster_num]);
-		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DEC_HYSTERESIS);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DEC_HYSTERESIS,
+					OSM_BASE);
 	}
 
 	/* OSM index override for cluster PC */
@@ -1233,15 +1456,18 @@
 	if (rc) {
 		dev_dbg(&pdev->dev, "No PC override index value, rc=%d\n",
 			rc);
-		clk_osm_write_reg(&pwrcl_clk, 0, CC_ZERO_BEHAV_CTRL);
-		clk_osm_write_reg(&perfcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+		clk_osm_write_reg(&pwrcl_clk, 0, CC_ZERO_BEHAV_CTRL, OSM_BASE);
+		clk_osm_write_reg(&perfcl_clk, 0, CC_ZERO_BEHAV_CTRL,
+					OSM_BASE);
 	} else {
 		val = BVAL(6, 1, array[pwrcl_clk.cluster_num])
 			| ENABLE_OVERRIDE;
-		clk_osm_write_reg(&pwrcl_clk, val, CC_ZERO_BEHAV_CTRL);
+		clk_osm_write_reg(&pwrcl_clk, val, CC_ZERO_BEHAV_CTRL,
+					OSM_BASE);
 		val = BVAL(6, 1, array[perfcl_clk.cluster_num])
 			| ENABLE_OVERRIDE;
-		clk_osm_write_reg(&perfcl_clk, val, CC_ZERO_BEHAV_CTRL);
+		clk_osm_write_reg(&perfcl_clk, val, CC_ZERO_BEHAV_CTRL,
+					OSM_BASE);
 	}
 
 	/* Wait for the writes to complete */
@@ -1253,15 +1479,18 @@
 
 		val = clk_osm_read_reg(&l3_clk, SPM_CORE_INACTIVE_MAPPING);
 		val &= ~BIT(2);
-		clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING);
+		clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING,
+					OSM_BASE);
 
 		val = clk_osm_read_reg(&pwrcl_clk, SPM_CORE_INACTIVE_MAPPING);
 		val &= ~BIT(2);
-		clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING,
+					OSM_BASE);
 
 		val = clk_osm_read_reg(&perfcl_clk, SPM_CORE_INACTIVE_MAPPING);
 		val &= ~BIT(2);
-		clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING,
+					OSM_BASE);
 	}
 
 	rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-c2-active");
@@ -1270,15 +1499,18 @@
 
 		val = clk_osm_read_reg(&l3_clk, SPM_CORE_INACTIVE_MAPPING);
 		val &= ~BIT(1);
-		clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING);
+		clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING,
+					OSM_BASE);
 
 		val = clk_osm_read_reg(&pwrcl_clk, SPM_CORE_INACTIVE_MAPPING);
 		val &= ~BIT(1);
-		clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING,
+					OSM_BASE);
 
 		val = clk_osm_read_reg(&perfcl_clk, SPM_CORE_INACTIVE_MAPPING);
 		val &= ~BIT(1);
-		clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING,
+					OSM_BASE);
 	}
 
 	rc = of_property_read_bool(pdev->dev.of_node, "qcom,disable-cc-dvcs");
@@ -1288,9 +1520,9 @@
 	} else
 		val = 0;
 
-	clk_osm_write_reg(&l3_clk, val, SPM_CC_DCVS_DISABLE);
-	clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DCVS_DISABLE);
-	clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DCVS_DISABLE);
+	clk_osm_write_reg(&l3_clk, val, SPM_CC_DCVS_DISABLE, OSM_BASE);
+	clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DCVS_DISABLE, OSM_BASE);
+	clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DCVS_DISABLE, OSM_BASE);
 
 	/* Wait for the writes to complete */
 	clk_osm_mb(&perfcl_clk, OSM_BASE);
@@ -1332,8 +1564,9 @@
 	u32 lval = 0xFF, val;
 	int i;
 
-	clk_osm_write_reg(c, BVAL(23, 16, 0xF), SPM_CORE_COUNT_CTRL);
-	clk_osm_write_reg(c, PLL_MIN_LVAL, PLL_MIN_FREQ_REG);
+	clk_osm_write_reg(c, BVAL(23, 16, 0xF), SPM_CORE_COUNT_CTRL,
+				OSM_BASE);
+	clk_osm_write_reg(c, PLL_MIN_LVAL, PLL_MIN_FREQ_REG, OSM_BASE);
 
 	/* Pattern to set/clear PLL lock in PDN_FSM_CTRL_REG */
 	val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
@@ -1393,10 +1626,12 @@
 		}
 
 		table_entry_offset = i * OSM_REG_SIZE;
-		clk_osm_write_reg(c, freq_val, FREQ_REG + table_entry_offset);
-		clk_osm_write_reg(c, volt_val, VOLT_REG + table_entry_offset);
+		clk_osm_write_reg(c, freq_val, FREQ_REG + table_entry_offset,
+					OSM_BASE);
+		clk_osm_write_reg(c, volt_val, VOLT_REG + table_entry_offset,
+					OSM_BASE);
 		clk_osm_write_reg(c, override_val, OVERRIDE_REG +
-				  table_entry_offset);
+				  table_entry_offset, OSM_BASE);
 	}
 
 	/* Make sure all writes go through */
@@ -1572,7 +1807,7 @@
 	do_div(ratio, c->xo_clk_rate);
 	val |= BVAL(5, 1, ratio - 1) | OSM_CYCLE_COUNTER_USE_XO_EDGE_EN;
 
-	clk_osm_write_reg(c, val, OSM_CYCLE_COUNTER_CTRL_REG);
+	clk_osm_write_reg(c, val, OSM_CYCLE_COUNTER_CTRL_REG, OSM_BASE);
 	pr_debug("OSM to XO clock ratio: %d\n", ratio);
 }
 
@@ -1747,6 +1982,149 @@
 	return rc;
 }
 
+static int clk_osm_parse_acd_dt_configs(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+				GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	l3_clk.acd_init = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+				"l3_acd") != NULL ? true : false;
+	pwrcl_clk.acd_init = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+				"pwrcl_acd") != NULL ? true : false;
+	perfcl_clk.acd_init = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+				"perfcl_acd") != NULL ? true : false;
+
+	if (pwrcl_clk.acd_init || perfcl_clk.acd_init || l3_clk.acd_init) {
+		rc = of_property_read_u32_array(of, "qcom,acdtd-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdtd-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		pwrcl_clk.acd_td = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_td = array[perfcl_clk.cluster_num];
+		l3_clk.acd_td = array[l3_clk.cluster_num];
+
+		rc = of_property_read_u32_array(of, "qcom,acdcr-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdcr-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		pwrcl_clk.acd_cr = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_cr = array[perfcl_clk.cluster_num];
+		l3_clk.acd_cr = array[l3_clk.cluster_num];
+
+		rc = of_property_read_u32_array(of, "qcom,acdsscr-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdsscr-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		pwrcl_clk.acd_sscr = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_sscr = array[perfcl_clk.cluster_num];
+		l3_clk.acd_sscr =  array[l3_clk.cluster_num];
+
+		rc = of_property_read_u32_array(of, "qcom,acdextint0-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdextint0-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		pwrcl_clk.acd_extint0_cfg = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_extint0_cfg = array[perfcl_clk.cluster_num];
+		l3_clk.acd_extint0_cfg =  array[l3_clk.cluster_num];
+
+		rc = of_property_read_u32_array(of, "qcom,acdextint1-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdextint1-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		pwrcl_clk.acd_extint1_cfg = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_extint1_cfg = array[perfcl_clk.cluster_num];
+		l3_clk.acd_extint1_cfg =  array[l3_clk.cluster_num];
+
+		rc = of_property_read_u32_array(of, "qcom,acdautoxfer-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdautoxfer-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		pwrcl_clk.acd_autoxfer_ctl = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_autoxfer_ctl = array[perfcl_clk.cluster_num];
+		l3_clk.acd_autoxfer_ctl =  array[l3_clk.cluster_num];
+
+		rc = of_property_read_u32_array(of, "qcom,acdavg-init",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdavg-init property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+		pwrcl_clk.acd_avg_init = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_avg_init = array[perfcl_clk.cluster_num];
+		l3_clk.acd_avg_init =  array[l3_clk.cluster_num];
+	}
+
+	if (pwrcl_clk.acd_avg_init || perfcl_clk.acd_avg_init ||
+	    l3_clk.acd_avg_init) {
+		rc = of_property_read_u32_array(of, "qcom,acdavgcfg0-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdavgcfg0-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+		pwrcl_clk.acd_avg_cfg0 = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_avg_cfg0 = array[perfcl_clk.cluster_num];
+		l3_clk.acd_avg_cfg0 =  array[l3_clk.cluster_num];
+
+		rc = of_property_read_u32_array(of, "qcom,acdavgcfg1-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdavgcfg1-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+		pwrcl_clk.acd_avg_cfg1 = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_avg_cfg1 = array[perfcl_clk.cluster_num];
+		l3_clk.acd_avg_cfg1 =  array[l3_clk.cluster_num];
+
+		rc = of_property_read_u32_array(of, "qcom,acdavgcfg2-val",
+						array, MAX_CLUSTER_CNT);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,acdavgcfg2-val property, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+		pwrcl_clk.acd_avg_cfg2 = array[pwrcl_clk.cluster_num];
+		perfcl_clk.acd_avg_cfg2 = array[perfcl_clk.cluster_num];
+		l3_clk.acd_avg_cfg2 =  array[l3_clk.cluster_num];
+	}
+
+	devm_kfree(&pdev->dev, array);
+	return rc;
+}
+
 static int clk_osm_parse_dt_configs(struct platform_device *pdev)
 {
 	struct device_node *of = pdev->dev.of_node;
@@ -1935,7 +2313,7 @@
 						  resource_size(res));
 
 	if (!l3_clk.vbases[SEQ_BASE]) {
-		dev_err(&pdev->dev, "Unable to map in l3_sequencer base\n");
+		dev_err(&pdev->dev, "Unable to map l3_sequencer base\n");
 		return -ENOMEM;
 	}
 
@@ -1952,7 +2330,7 @@
 						  resource_size(res));
 
 	if (!pwrcl_clk.vbases[SEQ_BASE]) {
-		dev_err(&pdev->dev, "Unable to map in pwrcl_sequencer base\n");
+		dev_err(&pdev->dev, "Unable to map pwrcl_sequencer base\n");
 		return -ENOMEM;
 	}
 
@@ -1969,7 +2347,7 @@
 						  resource_size(res));
 
 	if (!perfcl_clk.vbases[SEQ_BASE]) {
-		dev_err(&pdev->dev, "Unable to map in perfcl_sequencer base\n");
+		dev_err(&pdev->dev, "Unable to map perfcl_sequencer base\n");
 		return -ENOMEM;
 	}
 
@@ -2035,6 +2413,57 @@
 	return rc;
 }
 
+static int clk_osm_acd_resources_init(struct platform_device *pdev)
+{
+	struct resource *res;
+	unsigned long pbase;
+	void *vbase;
+	int rc = 0;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"pwrcl_acd");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map pwrcl_acd base\n");
+			return -ENOMEM;
+		}
+		pwrcl_clk.pbases[ACD_BASE] = pbase;
+		pwrcl_clk.vbases[ACD_BASE] = vbase;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"perfcl_acd");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map perfcl_acd base\n");
+			return -ENOMEM;
+		}
+		perfcl_clk.pbases[ACD_BASE] = pbase;
+		perfcl_clk.vbases[ACD_BASE] = vbase;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"l3_acd");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map l3_acd base\n");
+			return -ENOMEM;
+		}
+		l3_clk.pbases[ACD_BASE] = pbase;
+		l3_clk.vbases[ACD_BASE] = vbase;
+	}
+	return rc;
+}
+
 static int clk_osm_resources_init(struct platform_device *pdev)
 {
 	struct device_node *node;
@@ -2056,7 +2485,7 @@
 						  resource_size(res));
 
 	if (!l3_clk.vbases[OSM_BASE]) {
-		dev_err(&pdev->dev, "Unable to map in osm_l3_base base\n");
+		dev_err(&pdev->dev, "Unable to map osm_l3_base base\n");
 		return -ENOMEM;
 	}
 
@@ -2072,7 +2501,7 @@
 	pwrcl_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
 						  resource_size(res));
 	if (!pwrcl_clk.vbases[OSM_BASE]) {
-		dev_err(&pdev->dev, "Unable to map in osm_pwrcl_base base\n");
+		dev_err(&pdev->dev, "Unable to map osm_pwrcl_base base\n");
 		return -ENOMEM;
 	}
 
@@ -2089,7 +2518,7 @@
 						  resource_size(res));
 
 	if (!perfcl_clk.vbases[OSM_BASE]) {
-		dev_err(&pdev->dev, "Unable to map in osm_perfcl_base base\n");
+		dev_err(&pdev->dev, "Unable to map osm_perfcl_base base\n");
 		return -ENOMEM;
 	}
 
@@ -2146,13 +2575,27 @@
 
 	/* efuse speed bin fuses are optional */
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "l3_efuse");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in l3_efuse base\n");
+			return -ENOMEM;
+		}
+		l3_clk.pbases[EFUSE_BASE] = pbase;
+		l3_clk.vbases[EFUSE_BASE] = vbase;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 					   "pwrcl_efuse");
 	if (res) {
 		pbase = (unsigned long)res->start;
 		vbase = devm_ioremap(&pdev->dev, res->start,
 				     resource_size(res));
 		if (!vbase) {
-			dev_err(&pdev->dev, "Unable to map in pwrcl_efuse base\n");
+			dev_err(&pdev->dev, "Unable to map pwrcl_efuse base\n");
 			return -ENOMEM;
 		}
 		pwrcl_clk.pbases[EFUSE_BASE] = pbase;
@@ -2166,7 +2609,7 @@
 		vbase = devm_ioremap(&pdev->dev, res->start,
 				     resource_size(res));
 		if (!vbase) {
-			dev_err(&pdev->dev, "Unable to map in perfcl_efuse base\n");
+			dev_err(&pdev->dev, "Unable to map perfcl_efuse base\n");
 			return -ENOMEM;
 		}
 		perfcl_clk.pbases[EFUSE_BASE] = pbase;
@@ -2242,12 +2685,213 @@
 	return 0;
 }
 
+static int debugfs_get_debug_reg(void *data, u64 *val)
+{
+	struct clk_osm *c = data;
+
+	if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
+		*val = readl_relaxed((char *)c->vbases[ACD_BASE] +
+					c->acd_debugfs_addr);
+	else
+		*val = clk_osm_acd_local_read_reg(c, c->acd_debugfs_addr);
+	return 0;
+}
+
+static int debugfs_set_debug_reg(void *data, u64 val)
+{
+	struct clk_osm *c = data;
+
+	if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
+		clk_osm_write_reg(c, val, c->acd_debugfs_addr, ACD_BASE);
+	else
+		clk_osm_acd_master_write_through_reg(c, val,
+							c->acd_debugfs_addr);
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_acd_debug_reg_fops,
+			debugfs_get_debug_reg,
+			debugfs_set_debug_reg,
+			"0x%llx\n");
+
+static int debugfs_get_debug_reg_addr(void *data, u64 *val)
+{
+	struct clk_osm *c = data;
+
+	*val = c->acd_debugfs_addr;
+	return 0;
+}
+
+static int debugfs_set_debug_reg_addr(void *data, u64 val)
+{
+	struct clk_osm *c = data;
+
+	if (val > ACD_1P1_MAX_REG_OFFSET) {
+		pr_err("invalid ACD register address offset, must be between 0-0x%x\n",
+			ACD_1P1_MAX_REG_OFFSET);
+		return 0;
+	}
+
+	c->acd_debugfs_addr = val;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_acd_debug_reg_addr_fops,
+			debugfs_get_debug_reg_addr,
+			debugfs_set_debug_reg_addr,
+			"%llu\n");
+
+static void populate_debugfs_dir(struct clk_osm *c)
+{
+	struct dentry *temp;
+
+	if (osm_debugfs_base == NULL) {
+		osm_debugfs_base = debugfs_create_dir("osm", NULL);
+		if (IS_ERR_OR_NULL(osm_debugfs_base)) {
+			pr_err("osm debugfs base directory creation failed\n");
+			osm_debugfs_base = NULL;
+			return;
+		}
+	}
+
+	c->debugfs = debugfs_create_dir(clk_hw_get_name(&c->hw),
+					osm_debugfs_base);
+	if (IS_ERR_OR_NULL(c->debugfs)) {
+		pr_err("osm debugfs directory creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("acd_debug_reg",
+			0644,
+			c->debugfs, c,
+			&debugfs_acd_debug_reg_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		pr_err("debugfs_acd_debug_reg_fops debugfs file creation failed\n");
+		goto exit;
+	}
+
+	temp = debugfs_create_file("acd_debug_reg_addr",
+			0644,
+			c->debugfs, c,
+			&debugfs_acd_debug_reg_addr_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		pr_err("debugfs_acd_debug_reg_addr_fops debugfs file creation failed\n");
+		goto exit;
+	}
+
+exit:
+	if (IS_ERR_OR_NULL(temp))
+		debugfs_remove_recursive(c->debugfs);
+}
+
+static int clk_osm_acd_init(struct clk_osm *c)
+{
+
+	int rc = 0;
+	u32 auto_xfer_mask = 0;
+
+	if (c->secure_init) {
+		clk_osm_write_reg(c, c->pbases[ACD_BASE] + ACDCR,
+					DATA_MEM(115), OSM_BASE);
+		clk_osm_write_reg(c, c->pbases[ACD_BASE] + ACD_WRITE_CTL,
+					DATA_MEM(116), OSM_BASE);
+	}
+
+	if (!c->acd_init)
+		return 0;
+
+	c->acd_debugfs_addr = ACD_HW_VERSION;
+
+	/* Program ACD tunable-length delay register */
+	clk_osm_write_reg(c, c->acd_td, ACDTD, ACD_BASE);
+	auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDTD);
+
+	/* Program ACD control register */
+	clk_osm_write_reg(c, c->acd_cr, ACDCR, ACD_BASE);
+	auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDCR);
+
+	/* Program ACD soft start control register */
+	clk_osm_write_reg(c, c->acd_sscr, ACDSSCR, ACD_BASE);
+	auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDSSCR);
+
+	/* Program initial ACD external interface configuration register */
+	clk_osm_write_reg(c, c->acd_extint0_cfg, ACD_EXTINT_CFG, ACD_BASE);
+	auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_EXTINT_CFG);
+
+	/* Program ACD auto-register transfer control register */
+	clk_osm_write_reg(c, c->acd_autoxfer_ctl, ACD_AUTOXFER_CTL, ACD_BASE);
+
+	/* Ensure writes complete before transfers to local copy */
+	clk_osm_acd_mb(c);
+
+	/* Transfer master copies */
+	rc = clk_osm_acd_auto_local_write_reg(c, auto_xfer_mask);
+	if (rc)
+		return rc;
+
+	/* Switch CPUSS clock source to ACD clock */
+	auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_GFMUX_CFG);
+	rc = clk_osm_acd_master_write_through_reg(c, ACD_GFMUX_CFG_SELECT,
+							ACD_GFMUX_CFG);
+	if (rc)
+		return rc;
+
+	/* Program ACD_DCVS_SW */
+	rc = clk_osm_acd_master_write_through_reg(c,
+					ACD_DCVS_SW_DCVS_IN_PRGR_SET,
+					ACD_DCVS_SW);
+	if (rc)
+		return rc;
+
+	rc = clk_osm_acd_master_write_through_reg(c,
+					ACD_DCVS_SW_DCVS_IN_PRGR_CLEAR,
+					ACD_DCVS_SW);
+	if (rc)
+		return rc;
+
+	udelay(1);
+
+	/* Program final ACD external interface configuration register */
+	rc = clk_osm_acd_master_write_through_reg(c, c->acd_extint1_cfg,
+							ACD_EXTINT_CFG);
+	if (rc)
+		return rc;
+
+	if (c->acd_avg_init) {
+		auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_AVG_CFG_2);
+		rc = clk_osm_acd_master_write_through_reg(c, c->acd_avg_cfg2,
+								ACD_AVG_CFG_2);
+		if (rc)
+			return rc;
+
+		auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_AVG_CFG_1);
+		rc = clk_osm_acd_master_write_through_reg(c, c->acd_avg_cfg1,
+								ACD_AVG_CFG_1);
+		if (rc)
+			return rc;
+
+		auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_AVG_CFG_0);
+		rc = clk_osm_acd_master_write_through_reg(c, c->acd_avg_cfg0,
+								ACD_AVG_CFG_0);
+		if (rc)
+			return rc;
+	}
+
+	/*
+	 * ACDCR, ACDTD, ACDSSCR, ACD_EXTINT_CFG, ACD_GFMUX_CFG
+	 * must be copied from master to local copy on PC exit.
+	 * Also, ACD_AVG_CFG0, ACF_AVG_CFG1, and ACD_AVG_CFG2 when
+	 * AVG is enabled.
+	 */
+	clk_osm_write_reg(c, auto_xfer_mask, ACD_AUTOXFER_CFG, ACD_BASE);
+	return 0;
+}
+
 static unsigned long init_rate = 300000000;
 
 static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
 {
 	int rc = 0, cpu, i;
-	int speedbin = 0, pvs_ver = 0;
+	int pvs_ver = 0;
 	u32 pte_efuse, val;
 	int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
 	struct clk *ext_xo_clk, *clk;
@@ -2286,15 +2930,28 @@
 
 	rc = clk_osm_parse_dt_configs(pdev);
 	if (rc) {
-		dev_err(&pdev->dev, "Unable to parse device tree configurations\n");
+		dev_err(&pdev->dev, "Unable to parse OSM device tree configurations\n");
+		return rc;
+	}
+
+	rc = clk_osm_parse_acd_dt_configs(pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to parse ACD device tree configurations\n");
 		return rc;
 	}
 
 	rc = clk_osm_resources_init(pdev);
 	if (rc) {
 		if (rc != -EPROBE_DEFER)
-			dev_err(&pdev->dev, "resources init failed, rc=%d\n",
-									rc);
+			dev_err(&pdev->dev, "OSM resources init failed, rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	rc = clk_osm_acd_resources_init(pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "ACD resources init failed, rc=%d\n",
+			rc);
 		return rc;
 	}
 
@@ -2304,11 +2961,11 @@
 		l3_clk.speedbin = ((pte_efuse >> L3_EFUSE_SHIFT) &
 						    L3_EFUSE_MASK);
 		snprintf(l3speedbinstr, ARRAY_SIZE(l3speedbinstr),
-			 "qcom,l3-speedbin%d-v%d", speedbin, pvs_ver);
+			 "qcom,l3-speedbin%d-v%d", l3_clk.speedbin, pvs_ver);
 	}
 
 	dev_info(&pdev->dev, "using L3 speed bin %u and pvs_ver %d\n",
-		 speedbin, pvs_ver);
+		 l3_clk.speedbin, pvs_ver);
 
 	rc = clk_osm_get_lut(pdev, &l3_clk, l3speedbinstr);
 	if (rc) {
@@ -2323,11 +2980,12 @@
 		pwrcl_clk.speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) &
 						    PWRCL_EFUSE_MASK);
 		snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
-			 "qcom,pwrcl-speedbin%d-v%d", speedbin, pvs_ver);
+			 "qcom,pwrcl-speedbin%d-v%d", pwrcl_clk.speedbin,
+							pvs_ver);
 	}
 
 	dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n",
-		 speedbin, pvs_ver);
+		 pwrcl_clk.speedbin, pvs_ver);
 
 	rc = clk_osm_get_lut(pdev, &pwrcl_clk, pwrclspeedbinstr);
 	if (rc) {
@@ -2342,11 +3000,12 @@
 		perfcl_clk.speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT) &
 							PERFCL_EFUSE_MASK);
 		snprintf(perfclspeedbinstr, ARRAY_SIZE(perfclspeedbinstr),
-			 "qcom,perfcl-speedbin%d-v%d", speedbin, pvs_ver);
+			 "qcom,perfcl-speedbin%d-v%d", perfcl_clk.speedbin,
+							pvs_ver);
 	}
 
 	dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n",
-		 speedbin, pvs_ver);
+		 perfcl_clk.speedbin, pvs_ver);
 
 	rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
 	if (rc) {
@@ -2447,7 +3106,7 @@
 
 	/* Program VC at which the array power supply needs to be switched */
 	clk_osm_write_reg(&perfcl_clk, perfcl_clk.apm_threshold_vc,
-					APM_CROSSOVER_VC);
+				APM_CROSSOVER_VC, OSM_BASE);
 	if (perfcl_clk.secure_init) {
 		clk_osm_write_seq_reg(&perfcl_clk, perfcl_clk.apm_crossover_vc,
 				DATA_MEM(77));
@@ -2491,11 +3150,11 @@
 	if (pwrcl_clk.per_core_dcvs) {
 		val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
 		val |= BIT(0);
-		clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL);
+		clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL, OSM_BASE);
 
 		val = clk_osm_read_reg(&perfcl_clk, CORE_DCVS_CTRL);
 		val |= BIT(0);
-		clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL);
+		clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL, OSM_BASE);
 	}
 
 	clk_ops_core = clk_dummy_ops;
@@ -2503,6 +3162,22 @@
 	clk_ops_core.round_rate = cpu_clk_round_rate;
 	clk_ops_core.recalc_rate = cpu_clk_recalc_rate;
 
+	rc = clk_osm_acd_init(&l3_clk);
+	if (rc) {
+		pr_err("failed to initialize ACD for L3, rc=%d\n", rc);
+		goto exit;
+	}
+	rc = clk_osm_acd_init(&pwrcl_clk);
+	if (rc) {
+		pr_err("failed to initialize ACD for pwrcl, rc=%d\n", rc);
+		goto exit;
+	}
+	rc = clk_osm_acd_init(&perfcl_clk);
+	if (rc) {
+		pr_err("failed to initialize ACD for perfcl, rc=%d\n", rc);
+		goto exit;
+	}
+
 	spin_lock_init(&l3_clk.lock);
 	spin_lock_init(&pwrcl_clk.lock);
 	spin_lock_init(&perfcl_clk.lock);
@@ -2576,6 +3251,9 @@
 	clk_prepare_enable(perfcl_clk.hw.clk);
 
 	populate_opp_table(pdev);
+	populate_debugfs_dir(&l3_clk);
+	populate_debugfs_dir(&pwrcl_clk);
+	populate_debugfs_dir(&perfcl_clk);
 
 	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 	register_cpu_cycle_counter_cb(&cb);
diff --git a/drivers/clk/qcom/debugcc-sdm845.c b/drivers/clk/qcom/debugcc-sdm845.c
index d30675c..44c5b81 100644
--- a/drivers/clk/qcom/debugcc-sdm845.c
+++ b/drivers/clk/qcom/debugcc-sdm845.c
@@ -272,6 +272,9 @@
 	"video_cc_venus_ahb_clk",
 	"video_cc_venus_ctl_axi_clk",
 	"video_cc_venus_ctl_core_clk",
+	"l3_clk",
+	"pwrcl_clk",
+	"perfcl_clk",
 };
 
 static struct clk_debug_mux gcc_debug_mux = {
@@ -766,6 +769,12 @@
 			0x4, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
 		{ "video_cc_venus_ctl_core_clk", 0x48, 4, VIDEO_CC,
 			0x1, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
+		{ "l3_clk", 0xD6, 4, CPU,
+			0x46, 0x7F, 4, 0xf, 11, 1, 0x0, 0x0, U32_MAX, 16 },
+		{ "pwrcl_clk", 0xD6, 4, CPU,
+			0x44, 0x7F, 4, 0xf, 11, 1, 0x0, 0x0, U32_MAX, 16 },
+		{ "perfcl_clk", 0xD6, 4, CPU,
+			0x45, 0x7F, 4, 0xf, 11, 1, 0x0, 0x0, U32_MAX, 16 },
 	),
 	.hw.init = &(struct clk_init_data){
 		.name = "gcc_debug_mux",
@@ -862,6 +871,16 @@
 		}
 	}
 
+	if (of_get_property(pdev->dev.of_node, "qcom,cpucc", NULL)) {
+		gcc_debug_mux.regmap[CPU] =
+			syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+					"qcom,cpucc");
+		if (IS_ERR(gcc_debug_mux.regmap[CPU])) {
+			pr_err("Failed to map qcom,cpucc\n");
+			return PTR_ERR(gcc_debug_mux.regmap[CPU]);
+		}
+	}
+
 	clk = devm_clk_register(&pdev->dev, &gcc_debug_mux.hw);
 	if (IS_ERR(clk)) {
 		dev_err(&pdev->dev, "Unable to register GCC debug mux\n");
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 29421a1..228f716 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -1698,6 +1698,19 @@
 	},
 };
 
+static struct clk_branch gcc_gpu_iref_clk = {
+	.halt_reg = 0x8c010,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8c010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_iref_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
 	.halt_reg = 0x7100c,
 	.halt_check = BRANCH_VOTED,
@@ -3310,6 +3323,7 @@
 	[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
 	[GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
 	[GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+	[GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
 	[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
 	[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
 	[GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr,
diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c
index 3bb7c04..90c76e6 100644
--- a/drivers/clk/qcom/gdsc-regulator.c
+++ b/drivers/clk/qcom/gdsc-regulator.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -54,6 +54,9 @@
 /* Timeout Delay */
 #define TIMEOUT_US		100
 
+/* TOGGLE SW COLLAPSE */
+#define TOGGLE_SW_COLLAPSE_IN_DISABLE	BIT(0)
+
 struct gdsc {
 	struct regulator_dev	*rdev;
 	struct regulator_desc	rdesc;
@@ -79,6 +82,7 @@
 	int			reset_count;
 	int			root_clk_idx;
 	u32			gds_timeout;
+	u32			flags;
 };
 
 enum gdscr_status {
@@ -378,6 +382,13 @@
 		regval |= SW_COLLAPSE_MASK;
 		regmap_write(sc->regmap, REG_OFFSET, regval);
 
+		if (sc->flags & TOGGLE_SW_COLLAPSE_IN_DISABLE) {
+			regval &= ~SW_COLLAPSE_MASK;
+			regmap_write(sc->regmap, REG_OFFSET, regval);
+			regval |= SW_COLLAPSE_MASK;
+			regmap_write(sc->regmap, REG_OFFSET, regval);
+		}
+
 		/* Wait for 8 XO cycles before polling the status bit. */
 		mb();
 		udelay(1);
@@ -522,7 +533,7 @@
 	struct resource *res;
 	struct gdsc *sc;
 	uint32_t regval, clk_dis_wait_val = 0;
-	bool retain_mem, retain_periph, support_hw_trigger;
+	bool retain_mem, retain_periph, support_hw_trigger, prop_val;
 	int i, ret;
 	u32 timeout;
 
@@ -613,6 +624,11 @@
 	sc->force_root_en = of_property_read_bool(pdev->dev.of_node,
 						"qcom,force-enable-root-clk");
 
+	prop_val = of_property_read_bool(pdev->dev.of_node,
+						"qcom,toggle-sw-collapse-in-disable");
+	if (prop_val)
+		sc->flags |= TOGGLE_SW_COLLAPSE_IN_DISABLE;
+
 	for (i = 0; i < sc->clock_count; i++) {
 		const char *clock_name;
 
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index 0115bb1..ae9d509 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -644,8 +644,6 @@
 		return ret;
 	}
 
-	clk_prepare_enable(gpu_cc_cxo_clk.clkr.hw.clk);
-
 	dev_info(&pdev->dev, "Registered GFX CC clocks.\n");
 
 	return ret;
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index 924f560..dcde70f 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -127,7 +127,7 @@
 PNAME(mux_pll_src_3plls_p)	= { "apll", "dpll", "gpll" };
 PNAME(mux_timer_p)		= { "xin24m", "pclk_peri_src" };
 
-PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p)	= { "apll", "dpll", "gpll" "usb480m" };
+PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p)	= { "apll", "dpll", "gpll", "usb480m" };
 
 PNAME(mux_mmc_src_p)	= { "apll", "dpll", "gpll", "xin24m" };
 PNAME(mux_i2s_pre_p)	= { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 660dc20..2474f14 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -154,6 +154,7 @@
 					ctx_len, DMA_FROM_DEVICE);
 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
 		dev_err(jrdev, "unable to map ctx\n");
+		state->ctx_dma = 0;
 		return -ENOMEM;
 	}
 
@@ -214,6 +215,7 @@
 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
 		dev_err(jrdev, "unable to map ctx\n");
+		state->ctx_dma = 0;
 		return -ENOMEM;
 	}
 
@@ -620,8 +622,10 @@
 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 	struct caam_hash_state *state = ahash_request_ctx(req);
 
-	if (state->ctx_dma)
+	if (state->ctx_dma) {
 		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+		state->ctx_dma = 0;
+	}
 	ahash_unmap(dev, edesc, req, dst_len);
 }
 
@@ -1605,6 +1609,7 @@
 	state->finup = ahash_finup_first;
 	state->final = ahash_final_no_ctx;
 
+	state->ctx_dma = 0;
 	state->current_buf = 0;
 	state->buf_dma = 0;
 	state->buflen_0 = 0;
diff --git a/drivers/crypto/msm/compat_qcedev.h b/drivers/crypto/msm/compat_qcedev.h
index 4cc3933..6c041cb 100644
--- a/drivers/crypto/msm/compat_qcedev.h
+++ b/drivers/crypto/msm/compat_qcedev.h
@@ -1,3 +1,16 @@
+/*
+ * Copyright (c) 2014, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
 #ifndef _UAPI_COMPAT_QCEDEV__H
 #define _UAPI_COMPAT_QCEDEV__H
 
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
index 7b4ca24..c3b96f0 100644
--- a/drivers/crypto/msm/qce.h
+++ b/drivers/crypto/msm/qce.h
@@ -56,6 +56,12 @@
 /* Maximum Nonce bytes  */
 #define MAX_NONCE  16
 
+/* Crypto clock control flags */
+#define QCE_CLK_ENABLE_FIRST		1
+#define QCE_BW_REQUEST_FIRST		2
+#define QCE_CLK_DISABLE_FIRST		3
+#define QCE_BW_REQUEST_RESET_FIRST	4
+
 typedef void (*qce_comp_func_ptr_t)(void *areq,
 		unsigned char *icv, unsigned char *iv, int ret);
 
@@ -124,6 +130,7 @@
 	bool use_sw_hmac_algo;
 	bool use_sw_aes_ccm_algo;
 	bool clk_mgmt_sus_res;
+	bool req_bw_before_clk;
 	unsigned int ce_device;
 	unsigned int ce_hw_instance;
 	unsigned int max_request;
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 0cf4386..8af73ac 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -121,6 +121,7 @@
 	bool support_hw_key;
 	bool support_clk_mgmt_sus_res;
 	bool support_only_core_src_clk;
+	bool request_bw_before_clk;
 
 	void __iomem *iobase;	    /* Virtual io base of CE HW  */
 	unsigned int phy_iobase;    /* Physical io base of CE HW    */
@@ -298,7 +299,7 @@
 
 	pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
 
-	dev_info(pce_dev->pdev, "CE device = 0x%x\n, IO base, CE = 0x%p\n, Consumer (IN) PIPE %d,    Producer (OUT) PIPE %d\n IO base BAM = 0x%p\n BAM IRQ %d\n Engines Availability = 0x%x\n",
+	dev_info(pce_dev->pdev, "CE device = %#x IO base, CE = %pK Consumer (IN) PIPE %d,\nProducer (OUT) PIPE %d IO base BAM = %pK\nBAM IRQ %d Engines Availability = %#x\n",
 			pce_dev->ce_bam_info.ce_device, pce_dev->iobase,
 			pce_dev->ce_bam_info.dest_pipe_index,
 			pce_dev->ce_bam_info.src_pipe_index,
@@ -5675,6 +5676,8 @@
 		(&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
 	pce_dev->support_only_core_src_clk = of_property_read_bool(
 		(&pdev->dev)->of_node, "qcom,support-core-clk-only");
+	pce_dev->request_bw_before_clk = of_property_read_bool(
+		(&pdev->dev)->of_node, "qcom,request-bw-before-clk");
 
 	if (of_property_read_u32((&pdev->dev)->of_node,
 				"qcom,bam-pipe-pair",
@@ -5762,6 +5765,9 @@
 
 	pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
 	if (!IS_ERR(pce_dev->ce_core_src_clk)) {
+		if (pce_dev->request_bw_before_clk)
+			goto skip_set_rate;
+
 		rc = clk_set_rate(pce_dev->ce_core_src_clk,
 						pce_dev->ce_opp_freq_hz);
 		if (rc) {
@@ -5780,6 +5786,7 @@
 		pce_dev->ce_core_src_clk = NULL;
 	}
 
+skip_set_rate:
 	if (pce_dev->support_only_core_src_clk) {
 		pce_dev->ce_core_clk = NULL;
 		pce_dev->ce_clk = NULL;
@@ -6096,6 +6103,7 @@
 	ce_support->hw_key = pce_dev->support_hw_key;
 	ce_support->aes_ccm = true;
 	ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
+	ce_support->req_bw_before_clk = pce_dev->request_bw_before_clk;
 	if (pce_dev->ce_bam_info.minor_version)
 		ce_support->aligned_only = false;
 	else
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 0860e59..5d6e0c2 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -58,6 +58,97 @@
 static DEFINE_MUTEX(send_cmd_lock);
 static DEFINE_MUTEX(qcedev_sent_bw_req);
 
+static int qcedev_control_clocks(struct qcedev_control *podev, bool enable)
+{
+	unsigned int control_flag;
+	int ret = 0;
+
+	if (podev->ce_support.req_bw_before_clk) {
+		if (enable)
+			control_flag = QCE_BW_REQUEST_FIRST;
+		else
+			control_flag = QCE_CLK_DISABLE_FIRST;
+	} else {
+		if (enable)
+			control_flag = QCE_CLK_ENABLE_FIRST;
+		else
+			control_flag = QCE_BW_REQUEST_RESET_FIRST;
+	}
+
+	switch (control_flag) {
+	case QCE_CLK_ENABLE_FIRST:
+		ret = qce_enable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			return ret;
+		}
+		ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 1);
+		if (ret) {
+			pr_err("%s Unable to set high bw\n", __func__);
+			ret = qce_disable_clk(podev->qce);
+			if (ret)
+				pr_err("%s Unable disable clk\n", __func__);
+			return ret;
+		}
+		break;
+	case QCE_BW_REQUEST_FIRST:
+		ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 1);
+		if (ret) {
+			pr_err("%s Unable to set high bw\n", __func__);
+			return ret;
+		}
+		ret = qce_enable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 0);
+			if (ret)
+				pr_err("%s Unable to set low bw\n", __func__);
+			return ret;
+		}
+		break;
+	case QCE_CLK_DISABLE_FIRST:
+		ret = qce_disable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable to disable clk\n", __func__);
+			return ret;
+		}
+		ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 0);
+		if (ret) {
+			pr_err("%s Unable to set low bw\n", __func__);
+			ret = qce_enable_clk(podev->qce);
+			if (ret)
+				pr_err("%s Unable enable clk\n", __func__);
+			return ret;
+		}
+		break;
+	case QCE_BW_REQUEST_RESET_FIRST:
+		ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 0);
+		if (ret) {
+			pr_err("%s Unable to set low bw\n", __func__);
+			return ret;
+		}
+		ret = qce_disable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable to disable clk\n", __func__);
+			ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 1);
+			if (ret)
+				pr_err("%s Unable to set high bw\n", __func__);
+			return ret;
+		}
+		break;
+	default:
+		return -ENOENT;
+	}
+
+	return 0;
+}
+
 static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
 							bool high_bw_req)
 {
@@ -66,47 +157,21 @@
 	mutex_lock(&qcedev_sent_bw_req);
 	if (high_bw_req) {
 		if (podev->high_bw_req_count == 0) {
-			ret = qce_enable_clk(podev->qce);
-			if (ret) {
-				pr_err("%s Unable enable clk\n", __func__);
-				mutex_unlock(&qcedev_sent_bw_req);
-				return;
-			}
-			ret = msm_bus_scale_client_update_request(
-					podev->bus_scale_handle, 1);
-			if (ret) {
-				pr_err("%s Unable to set to high bandwidth\n",
-							__func__);
-				ret = qce_disable_clk(podev->qce);
-				mutex_unlock(&qcedev_sent_bw_req);
-				return;
-			}
+			ret = qcedev_control_clocks(podev, true);
+			if (ret)
+				goto exit_unlock_mutex;
 		}
 		podev->high_bw_req_count++;
 	} else {
 		if (podev->high_bw_req_count == 1) {
-			ret = msm_bus_scale_client_update_request(
-					podev->bus_scale_handle, 0);
-			if (ret) {
-				pr_err("%s Unable to set to low bandwidth\n",
-							__func__);
-				mutex_unlock(&qcedev_sent_bw_req);
-				return;
-			}
-			ret = qce_disable_clk(podev->qce);
-			if (ret) {
-				pr_err("%s Unable disable clk\n", __func__);
-				ret = msm_bus_scale_client_update_request(
-					podev->bus_scale_handle, 1);
-				if (ret)
-					pr_err("%s Unable to set to high bandwidth\n",
-							__func__);
-				mutex_unlock(&qcedev_sent_bw_req);
-				return;
-			}
+			ret = qcedev_control_clocks(podev, false);
+			if (ret)
+				goto exit_unlock_mutex;
 		}
 		podev->high_bw_req_count--;
 	}
+
+exit_unlock_mutex:
 	mutex_unlock(&qcedev_sent_bw_req);
 }
 
@@ -1767,32 +1832,47 @@
 
 	tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
 
-	/* open qce */
+	podev->platform_support.bus_scale_table = (struct msm_bus_scale_pdata *)
+					msm_bus_cl_get_pdata(pdev);
+	if (!podev->platform_support.bus_scale_table) {
+		pr_err("bus_scale_table is NULL\n");
+		return -ENODATA;
+	}
+	podev->bus_scale_handle = msm_bus_scale_register_client(
+				(struct msm_bus_scale_pdata *)
+				podev->platform_support.bus_scale_table);
+	if (!podev->bus_scale_handle) {
+		pr_err("%s not able to get bus scale\n", __func__);
+		return -ENOMEM;
+	}
+
+	rc = msm_bus_scale_client_update_request(podev->bus_scale_handle, 1);
+	if (rc) {
+		pr_err("%s Unable to set to high bandwidth\n", __func__);
+		goto exit_unregister_bus_scale;
+	}
 	handle = qce_open(pdev, &rc);
 	if (handle == NULL) {
-		platform_set_drvdata(pdev, NULL);
-		return rc;
+		rc = -ENODEV;
+		goto exit_scale_busbandwidth;
+	}
+	rc = msm_bus_scale_client_update_request(podev->bus_scale_handle, 0);
+	if (rc) {
+		pr_err("%s Unable to set to low bandwidth\n", __func__);
+		goto exit_qce_close;
 	}
 
 	podev->qce = handle;
 	podev->pdev = pdev;
 	platform_set_drvdata(pdev, podev);
 
-	rc = misc_register(&podev->miscdevice);
 	qce_hw_support(podev->qce, &podev->ce_support);
 	if (podev->ce_support.bam) {
 		podev->platform_support.ce_shared = 0;
 		podev->platform_support.shared_ce_resource = 0;
 		podev->platform_support.hw_key_support =
 						podev->ce_support.hw_key;
-		podev->platform_support.bus_scale_table = NULL;
 		podev->platform_support.sha_hmac = 1;
-
-		podev->platform_support.bus_scale_table =
-			(struct msm_bus_scale_pdata *)
-					msm_bus_cl_get_pdata(pdev);
-		if (!podev->platform_support.bus_scale_table)
-			pr_err("bus_scale_table is NULL\n");
 	} else {
 		platform_support =
 			(struct msm_ce_hw_support *)pdev->dev.platform_data;
@@ -1801,35 +1881,27 @@
 				platform_support->shared_ce_resource;
 		podev->platform_support.hw_key_support =
 				platform_support->hw_key_support;
-		podev->platform_support.bus_scale_table =
-				platform_support->bus_scale_table;
 		podev->platform_support.sha_hmac = platform_support->sha_hmac;
 	}
-	if (podev->platform_support.bus_scale_table != NULL) {
-		podev->bus_scale_handle =
-			msm_bus_scale_register_client(
-				(struct msm_bus_scale_pdata *)
-				podev->platform_support.bus_scale_table);
-		if (!podev->bus_scale_handle) {
-			pr_err("%s not able to get bus scale\n",
-				__func__);
-			rc =  -ENOMEM;
-			goto err;
-		}
-	}
 
+	rc = misc_register(&podev->miscdevice);
 	if (rc >= 0)
 		return 0;
 
-	if (podev->platform_support.bus_scale_table != NULL)
-		msm_bus_scale_unregister_client(podev->bus_scale_handle);
-err:
+	misc_deregister(&podev->miscdevice);
 
+exit_qce_close:
 	if (handle)
 		qce_close(handle);
+exit_scale_busbandwidth:
+	msm_bus_scale_client_update_request(podev->bus_scale_handle, 0);
+exit_unregister_bus_scale:
+	if (podev->platform_support.bus_scale_table != NULL)
+		msm_bus_scale_unregister_client(podev->bus_scale_handle);
 	platform_set_drvdata(pdev, NULL);
-	podev->qce = NULL;
 	podev->pdev = NULL;
+	podev->qce = NULL;
+
 	return rc;
 };
 
@@ -1864,23 +1936,9 @@
 
 	mutex_lock(&qcedev_sent_bw_req);
 	if (podev->high_bw_req_count) {
-		ret = msm_bus_scale_client_update_request(
-				podev->bus_scale_handle, 0);
-		if (ret) {
-			pr_err("%s Unable to set to low bandwidth\n",
-						__func__);
+		ret = qcedev_control_clocks(podev, false);
+		if (ret)
 			goto suspend_exit;
-		}
-		ret = qce_disable_clk(podev->qce);
-		if (ret) {
-			pr_err("%s Unable disable clk\n", __func__);
-			ret = msm_bus_scale_client_update_request(
-				podev->bus_scale_handle, 1);
-			if (ret)
-				pr_err("%s Unable to set to high bandwidth\n",
-					__func__);
-			goto suspend_exit;
-		}
 	}
 
 suspend_exit:
@@ -1900,22 +1958,9 @@
 
 	mutex_lock(&qcedev_sent_bw_req);
 	if (podev->high_bw_req_count) {
-		ret = qce_enable_clk(podev->qce);
-		if (ret) {
-			pr_err("%s Unable enable clk\n", __func__);
+		ret = qcedev_control_clocks(podev, true);
+		if (ret)
 			goto resume_exit;
-		}
-		ret = msm_bus_scale_client_update_request(
-				podev->bus_scale_handle, 1);
-		if (ret) {
-			pr_err("%s Unable to set to high bandwidth\n",
-						__func__);
-			ret = qce_disable_clk(podev->qce);
-			if (ret)
-				pr_err("%s Unable enable clk\n",
-					__func__);
-			goto resume_exit;
-		}
 	}
 
 resume_exit:
diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c
index ed83185..5802c21 100644
--- a/drivers/devfreq/arm-memlat-mon.c
+++ b/drivers/devfreq/arm-memlat-mon.c
@@ -47,22 +47,32 @@
 	unsigned long prev_count;
 };
 
-struct memlat_hwmon_data {
+struct cpu_pmu_stats {
 	struct event_data events[NUM_EVENTS];
 	ktime_t prev_ts;
-	bool init_pending;
-	unsigned long cache_miss_event;
-	unsigned long inst_event;
 };
-static DEFINE_PER_CPU(struct memlat_hwmon_data, pm_data);
 
 struct cpu_grp_info {
 	cpumask_t cpus;
+	cpumask_t inited_cpus;
+	unsigned long cache_miss_event;
+	unsigned long inst_event;
+	struct cpu_pmu_stats *cpustats;
 	struct memlat_hwmon hw;
 	struct notifier_block arm_memlat_cpu_notif;
+	struct list_head mon_list;
 };
 
-static unsigned long compute_freq(struct memlat_hwmon_data *hw_data,
+#define to_cpustats(cpu_grp, cpu) \
+	(&cpu_grp->cpustats[cpu - cpumask_first(&cpu_grp->cpus)])
+#define to_devstats(cpu_grp, cpu) \
+	(&cpu_grp->hw.core_stats[cpu - cpumask_first(&cpu_grp->cpus)])
+#define to_cpu_grp(hwmon) container_of(hwmon, struct cpu_grp_info, hw)
+
+static LIST_HEAD(memlat_mon_list);
+static DEFINE_MUTEX(list_lock);
+
+static unsigned long compute_freq(struct cpu_pmu_stats *cpustats,
 						unsigned long cyc_cnt)
 {
 	ktime_t ts;
@@ -70,10 +80,10 @@
 	unsigned long freq = 0;
 
 	ts = ktime_get();
-	diff = ktime_to_us(ktime_sub(ts, hw_data->prev_ts));
+	diff = ktime_to_us(ktime_sub(ts, cpustats->prev_ts));
 	if (!diff)
 		diff = 1;
-	hw_data->prev_ts = ts;
+	cpustats->prev_ts = ts;
 	freq = cyc_cnt;
 	do_div(freq, diff);
 
@@ -99,69 +109,59 @@
 
 static void read_perf_counters(int cpu, struct cpu_grp_info *cpu_grp)
 {
-	int cpu_idx;
-	struct memlat_hwmon_data *hw_data = &per_cpu(pm_data, cpu);
-	struct memlat_hwmon *hw = &cpu_grp->hw;
+	struct cpu_pmu_stats *cpustats = to_cpustats(cpu_grp, cpu);
+	struct dev_stats *devstats = to_devstats(cpu_grp, cpu);
 	unsigned long cyc_cnt;
 
-	if (hw_data->init_pending)
-		return;
-
-	cpu_idx = cpu - cpumask_first(&cpu_grp->cpus);
-
-	hw->core_stats[cpu_idx].inst_count =
-			read_event(&hw_data->events[INST_IDX]);
-
-	hw->core_stats[cpu_idx].mem_count =
-			read_event(&hw_data->events[CM_IDX]);
-
-	cyc_cnt = read_event(&hw_data->events[CYC_IDX]);
-	hw->core_stats[cpu_idx].freq = compute_freq(hw_data, cyc_cnt);
+	devstats->inst_count = read_event(&cpustats->events[INST_IDX]);
+	devstats->mem_count = read_event(&cpustats->events[CM_IDX]);
+	cyc_cnt = read_event(&cpustats->events[CYC_IDX]);
+	devstats->freq = compute_freq(cpustats, cyc_cnt);
 }
 
 static unsigned long get_cnt(struct memlat_hwmon *hw)
 {
 	int cpu;
-	struct cpu_grp_info *cpu_grp = container_of(hw,
-					struct cpu_grp_info, hw);
+	struct cpu_grp_info *cpu_grp = to_cpu_grp(hw);
 
-	for_each_cpu(cpu, &cpu_grp->cpus)
+	for_each_cpu(cpu, &cpu_grp->inited_cpus)
 		read_perf_counters(cpu, cpu_grp);
 
 	return 0;
 }
 
-static void delete_events(struct memlat_hwmon_data *hw_data)
+static void delete_events(struct cpu_pmu_stats *cpustats)
 {
 	int i;
 
 	for (i = 0; i < NUM_EVENTS; i++) {
-		hw_data->events[i].prev_count = 0;
-		perf_event_release_kernel(hw_data->events[i].pevent);
+		cpustats->events[i].prev_count = 0;
+		perf_event_release_kernel(cpustats->events[i].pevent);
 	}
 }
 
 static void stop_hwmon(struct memlat_hwmon *hw)
 {
-	int cpu, idx;
-	struct memlat_hwmon_data *hw_data;
-	struct cpu_grp_info *cpu_grp = container_of(hw,
-					struct cpu_grp_info, hw);
+	int cpu;
+	struct cpu_grp_info *cpu_grp = to_cpu_grp(hw);
+	struct dev_stats *devstats;
 
 	get_online_cpus();
-	for_each_cpu(cpu, &cpu_grp->cpus) {
-		hw_data = &per_cpu(pm_data, cpu);
-		if (hw_data->init_pending)
-			hw_data->init_pending = false;
-		else
-			delete_events(hw_data);
+	for_each_cpu(cpu, &cpu_grp->inited_cpus) {
+		delete_events(to_cpustats(cpu_grp, cpu));
 
 		/* Clear governor data */
-		idx = cpu - cpumask_first(&cpu_grp->cpus);
-		hw->core_stats[idx].inst_count = 0;
-		hw->core_stats[idx].mem_count = 0;
-		hw->core_stats[idx].freq = 0;
+		devstats = to_devstats(cpu_grp, cpu);
+		devstats->inst_count = 0;
+		devstats->mem_count = 0;
+		devstats->freq = 0;
 	}
+	mutex_lock(&list_lock);
+	if (!cpumask_equal(&cpu_grp->cpus, &cpu_grp->inited_cpus))
+		list_del(&cpu_grp->mon_list);
+	mutex_unlock(&list_lock);
+	cpumask_clear(&cpu_grp->inited_cpus);
+
 	put_online_cpus();
 
 	unregister_cpu_notifier(&cpu_grp->arm_memlat_cpu_notif);
@@ -173,7 +173,7 @@
 
 	attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
 	if (!attr)
-		return ERR_PTR(-ENOMEM);
+		return attr;
 
 	attr->type = PERF_TYPE_RAW;
 	attr->size = sizeof(struct perf_event_attr);
@@ -183,37 +183,38 @@
 	return attr;
 }
 
-static int set_events(struct memlat_hwmon_data *hw_data, int cpu)
+static int set_events(struct cpu_grp_info *cpu_grp, int cpu)
 {
 	struct perf_event *pevent;
 	struct perf_event_attr *attr;
 	int err;
+	struct cpu_pmu_stats *cpustats = to_cpustats(cpu_grp, cpu);
 
 	/* Allocate an attribute for event initialization */
 	attr = alloc_attr();
-	if (IS_ERR(attr))
-		return PTR_ERR(attr);
+	if (!attr)
+		return -ENOMEM;
 
-	attr->config = hw_data->inst_event;
+	attr->config = cpu_grp->inst_event;
 	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
 	if (IS_ERR(pevent))
 		goto err_out;
-	hw_data->events[INST_IDX].pevent = pevent;
-	perf_event_enable(hw_data->events[INST_IDX].pevent);
+	cpustats->events[INST_IDX].pevent = pevent;
+	perf_event_enable(cpustats->events[INST_IDX].pevent);
 
-	attr->config = hw_data->cache_miss_event;
+	attr->config = cpu_grp->cache_miss_event;
 	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
 	if (IS_ERR(pevent))
 		goto err_out;
-	hw_data->events[CM_IDX].pevent = pevent;
-	perf_event_enable(hw_data->events[CM_IDX].pevent);
+	cpustats->events[CM_IDX].pevent = pevent;
+	perf_event_enable(cpustats->events[CM_IDX].pevent);
 
 	attr->config = CYC_EV;
 	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
 	if (IS_ERR(pevent))
 		goto err_out;
-	hw_data->events[CYC_IDX].pevent = pevent;
-	perf_event_enable(hw_data->events[CYC_IDX].pevent);
+	cpustats->events[CYC_IDX].pevent = pevent;
+	perf_event_enable(cpustats->events[CYC_IDX].pevent);
 
 	kfree(attr);
 	return 0;
@@ -228,15 +229,24 @@
 		unsigned long action, void *hcpu)
 {
 	unsigned long cpu = (unsigned long)hcpu;
-	struct memlat_hwmon_data *hw_data = &per_cpu(pm_data, cpu);
+	struct cpu_grp_info *cpu_grp, *tmp;
 
-	if ((action != CPU_ONLINE) || !hw_data->init_pending)
+	if (action != CPU_ONLINE)
 		return NOTIFY_OK;
 
-	if (set_events(hw_data, cpu))
-		pr_warn("Failed to create perf event for CPU%lu\n", cpu);
-
-	hw_data->init_pending = false;
+	mutex_lock(&list_lock);
+	list_for_each_entry_safe(cpu_grp, tmp, &memlat_mon_list, mon_list) {
+		if (!cpumask_test_cpu(cpu, &cpu_grp->cpus) ||
+		    cpumask_test_cpu(cpu, &cpu_grp->inited_cpus))
+			continue;
+		if (set_events(cpu_grp, cpu))
+			pr_warn("Failed to create perf ev for CPU%lu\n", cpu);
+		else
+			cpumask_set_cpu(cpu, &cpu_grp->inited_cpus);
+		if (cpumask_equal(&cpu_grp->cpus, &cpu_grp->inited_cpus))
+			list_del(&cpu_grp->mon_list);
+	}
+	mutex_unlock(&list_lock);
 
 	return NOTIFY_OK;
 }
@@ -244,29 +254,32 @@
 static int start_hwmon(struct memlat_hwmon *hw)
 {
 	int cpu, ret = 0;
-	struct memlat_hwmon_data *hw_data;
-	struct cpu_grp_info *cpu_grp = container_of(hw,
-					struct cpu_grp_info, hw);
+	struct cpu_grp_info *cpu_grp = to_cpu_grp(hw);
 
 	register_cpu_notifier(&cpu_grp->arm_memlat_cpu_notif);
 
 	get_online_cpus();
 	for_each_cpu(cpu, &cpu_grp->cpus) {
-		hw_data = &per_cpu(pm_data, cpu);
-		ret = set_events(hw_data, cpu);
+		ret = set_events(cpu_grp, cpu);
 		if (ret) {
 			if (!cpu_online(cpu)) {
-				hw_data->init_pending = true;
 				ret = 0;
 			} else {
 				pr_warn("Perf event init failed on CPU%d\n",
 					cpu);
 				break;
 			}
+		} else {
+			cpumask_set_cpu(cpu, &cpu_grp->inited_cpus);
 		}
 	}
+	mutex_lock(&list_lock);
+	if (!cpumask_equal(&cpu_grp->cpus, &cpu_grp->inited_cpus))
+		list_add_tail(&cpu_grp->mon_list, &memlat_mon_list);
+	mutex_unlock(&list_lock);
 
 	put_online_cpus();
+
 	return ret;
 }
 
@@ -328,6 +341,11 @@
 	if (!hw->core_stats)
 		return -ENOMEM;
 
+	cpu_grp->cpustats = devm_kzalloc(dev, hw->num_cores *
+			sizeof(*(cpu_grp->cpustats)), GFP_KERNEL);
+	if (!cpu_grp->cpustats)
+		return -ENOMEM;
+
 	ret = of_property_read_u32(dev->of_node, "qcom,cachemiss-ev",
 			&cachemiss_ev);
 	if (ret) {
@@ -335,6 +353,7 @@
 				L2DM_EV);
 		cachemiss_ev = L2DM_EV;
 	}
+	cpu_grp->cache_miss_event = cachemiss_ev;
 
 	ret = of_property_read_u32(dev->of_node, "qcom,inst-ev", &inst_ev);
 	if (ret) {
@@ -342,12 +361,10 @@
 				INST_EV);
 		inst_ev = INST_EV;
 	}
+	cpu_grp->inst_event = inst_ev;
 
-	for_each_cpu(cpu, &cpu_grp->cpus) {
-		hw->core_stats[cpu - cpumask_first(&cpu_grp->cpus)].id = cpu;
-		(&per_cpu(pm_data, cpu))->cache_miss_event = cachemiss_ev;
-		(&per_cpu(pm_data, cpu))->inst_event = inst_ev;
-	}
+	for_each_cpu(cpu, &cpu_grp->cpus)
+		to_devstats(cpu_grp, cpu)->id = cpu;
 
 	hw->start_hwmon = &start_hwmon;
 	hw->stop_hwmon = &stop_hwmon;
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index d70104d..ffe60de 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -26,14 +26,20 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/spinlock.h>
+#include <linux/log2.h>
+#include <linux/sizes.h>
 #include "governor_bw_hwmon.h"
 
 #define GLB_INT_STATUS(m)	((m)->global_base + 0x100)
 #define GLB_INT_CLR(m)		((m)->global_base + 0x108)
 #define	GLB_INT_EN(m)		((m)->global_base + 0x10C)
 #define MON_INT_STATUS(m)	((m)->base + 0x100)
+#define MON_INT_STATUS_MASK	0x03
+#define MON2_INT_STATUS_MASK	0xF0
+#define MON2_INT_STATUS_SHIFT	4
 #define MON_INT_CLR(m)		((m)->base + 0x108)
 #define	MON_INT_EN(m)		((m)->base + 0x10C)
+#define MON_INT_ENABLE		0x1
 #define	MON_EN(m)		((m)->base + 0x280)
 #define MON_CLEAR(m)		((m)->base + 0x284)
 #define MON_CNT(m)		((m)->base + 0x288)
@@ -54,9 +60,29 @@
 #define MON2_ZONE_CNT(m)	((m)->base + 0x2D8)
 #define MON2_ZONE_MAX(m, zone)	((m)->base + 0x2E0 + 0x4 * zone)
 
-enum bwmon_type {
-	BWMON_1,
-	BWMON_2,
+#define MON3_INT_STATUS(m)	((m)->base + 0x00)
+#define MON3_INT_CLR(m)		((m)->base + 0x08)
+#define MON3_INT_EN(m)		((m)->base + 0x0C)
+#define MON3_INT_STATUS_MASK	0x0F
+#define MON3_EN(m)		((m)->base + 0x10)
+#define MON3_CLEAR(m)		((m)->base + 0x14)
+#define MON3_MASK(m)		((m)->base + 0x18)
+#define MON3_MATCH(m)		((m)->base + 0x1C)
+#define MON3_SW(m)		((m)->base + 0x20)
+#define MON3_THRES_HI(m)	((m)->base + 0x24)
+#define MON3_THRES_MED(m)	((m)->base + 0x28)
+#define MON3_THRES_LO(m)	((m)->base + 0x2C)
+#define MON3_ZONE_ACTIONS(m)	((m)->base + 0x30)
+#define MON3_ZONE_CNT_THRES(m)	((m)->base + 0x34)
+#define MON3_BYTE_CNT(m)	((m)->base + 0x38)
+#define MON3_WIN_TIMER(m)	((m)->base + 0x3C)
+#define MON3_ZONE_CNT(m)	((m)->base + 0x40)
+#define MON3_ZONE_MAX(m, zone)	((m)->base + 0x44 + 0x4 * zone)
+
+enum mon_reg_type {
+	MON1,
+	MON2,
+	MON3,
 };
 
 struct bwmon_spec {
@@ -64,6 +90,8 @@
 	bool overflow;
 	bool throt_adj;
 	bool hw_sampling;
+	bool has_global_base;
+	enum mon_reg_type reg_type;
 };
 
 struct bwmon {
@@ -78,6 +106,10 @@
 	u32 throttle_adj;
 	u32 sample_size_ms;
 	u32 intr_status;
+	u8 count_shift;
+	u32 thres_lim;
+	u32 byte_mask;
+	u32 byte_match;
 };
 
 #define to_bwmon(ptr)		container_of(ptr, struct bwmon, hw)
@@ -85,33 +117,36 @@
 #define ENABLE_MASK BIT(0)
 #define THROTTLE_MASK 0x1F
 #define THROTTLE_SHIFT 16
-#define INT_ENABLE_V1	0x1
-#define INT_STATUS_MASK	0x03
-#define INT_STATUS_MASK_HWS	0xF0
 
 static DEFINE_SPINLOCK(glb_lock);
 
-static __always_inline void mon_enable(struct bwmon *m, enum bwmon_type type)
+static __always_inline void mon_enable(struct bwmon *m, enum mon_reg_type type)
 {
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		writel_relaxed(ENABLE_MASK | m->throttle_adj, MON_EN(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		writel_relaxed(ENABLE_MASK | m->throttle_adj, MON2_EN(m));
 		break;
+	case MON3:
+		writel_relaxed(ENABLE_MASK | m->throttle_adj, MON3_EN(m));
+		break;
 	}
 }
 
-static __always_inline void mon_disable(struct bwmon *m, enum bwmon_type type)
+static __always_inline void mon_disable(struct bwmon *m, enum mon_reg_type type)
 {
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		writel_relaxed(m->throttle_adj, MON_EN(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		writel_relaxed(m->throttle_adj, MON2_EN(m));
 		break;
+	case MON3:
+		writel_relaxed(m->throttle_adj, MON3_EN(m));
+		break;
 	}
 	/*
 	 * mon_disable() and mon_irq_clear(),
@@ -124,18 +159,24 @@
 #define MON_CLEAR_BIT	0x1
 #define MON_CLEAR_ALL_BIT	0x2
 static __always_inline
-void mon_clear(struct bwmon *m, bool clear_all, enum bwmon_type type)
+void mon_clear(struct bwmon *m, bool clear_all, enum mon_reg_type type)
 {
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		writel_relaxed(MON_CLEAR_BIT, MON_CLEAR(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		if (clear_all)
 			writel_relaxed(MON_CLEAR_ALL_BIT, MON2_CLEAR(m));
 		else
 			writel_relaxed(MON_CLEAR_BIT, MON2_CLEAR(m));
 		break;
+	case MON3:
+		if (clear_all)
+			writel_relaxed(MON_CLEAR_ALL_BIT, MON3_CLEAR(m));
+		else
+			writel_relaxed(MON_CLEAR_BIT, MON3_CLEAR(m));
+		break;
 	}
 	/*
 	 * The counter clear and IRQ clear bits are not in the same 4KB
@@ -146,7 +187,9 @@
 }
 
 #define	SAMPLE_WIN_LIM	0xFFFFF
-static void mon_set_hw_sampling_window(struct bwmon *m, unsigned int sample_ms)
+static __always_inline
+void mon_set_hw_sampling_window(struct bwmon *m, unsigned int sample_ms,
+				enum mon_reg_type type)
 {
 	u32 rate;
 
@@ -158,7 +201,17 @@
 			pr_warn("Sample window %u larger than hw limit: %u\n",
 					rate, SAMPLE_WIN_LIM);
 		}
-		writel_relaxed(rate, MON2_SW(m));
+		switch (type) {
+		case MON1:
+			WARN(1, "Invalid\n");
+			return;
+		case MON2:
+			writel_relaxed(rate, MON2_SW(m));
+			break;
+		case MON3:
+			writel_relaxed(rate, MON3_SW(m));
+			break;
+		}
 	}
 }
 
@@ -172,24 +225,29 @@
 }
 
 static __always_inline
-void mon_irq_enable(struct bwmon *m, enum bwmon_type type)
+void mon_irq_enable(struct bwmon *m, enum mon_reg_type type)
 {
 	u32 val;
 
 	spin_lock(&glb_lock);
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		mon_glb_irq_enable(m);
 		val = readl_relaxed(MON_INT_EN(m));
-		val |= INT_ENABLE_V1;
+		val |= MON_INT_ENABLE;
 		writel_relaxed(val, MON_INT_EN(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		mon_glb_irq_enable(m);
 		val = readl_relaxed(MON_INT_EN(m));
-		val |= INT_STATUS_MASK_HWS;
+		val |= MON2_INT_STATUS_MASK;
 		writel_relaxed(val, MON_INT_EN(m));
 		break;
+	case MON3:
+		val = readl_relaxed(MON3_INT_EN(m));
+		val |= MON3_INT_STATUS_MASK;
+		writel_relaxed(val, MON3_INT_EN(m));
+		break;
 	}
 	spin_unlock(&glb_lock);
 	/*
@@ -209,25 +267,30 @@
 }
 
 static __always_inline
-void mon_irq_disable(struct bwmon *m, enum bwmon_type type)
+void mon_irq_disable(struct bwmon *m, enum mon_reg_type type)
 {
 	u32 val;
 
 	spin_lock(&glb_lock);
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		mon_glb_irq_disable(m);
 		val = readl_relaxed(MON_INT_EN(m));
-		val &= ~INT_ENABLE_V1;
+		val &= ~MON_INT_ENABLE;
 		writel_relaxed(val, MON_INT_EN(m));
 		break;
-	case BWMON_2:
+	case MON2:
 		mon_glb_irq_disable(m);
 		val = readl_relaxed(MON_INT_EN(m));
-		val &= ~INT_STATUS_MASK_HWS;
+		val &= ~MON2_INT_STATUS_MASK;
 		writel_relaxed(val, MON_INT_EN(m));
 		break;
+	case MON3:
+		val = readl_relaxed(MON3_INT_EN(m));
+		val &= ~MON3_INT_STATUS_MASK;
+		writel_relaxed(val, MON3_INT_EN(m));
+		break;
 	}
 	spin_unlock(&glb_lock);
 	/*
@@ -238,22 +301,28 @@
 }
 
 static __always_inline
-unsigned int mon_irq_status(struct bwmon *m, enum bwmon_type type)
+unsigned int mon_irq_status(struct bwmon *m, enum mon_reg_type type)
 {
 	u32 mval;
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		mval = readl_relaxed(MON_INT_STATUS(m));
 		dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
 				readl_relaxed(GLB_INT_STATUS(m)));
-		mval &= INT_STATUS_MASK;
+		mval &= MON_INT_STATUS_MASK;
 		break;
-	case BWMON_2:
+	case MON2:
 		mval = readl_relaxed(MON_INT_STATUS(m));
 		dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
 				readl_relaxed(GLB_INT_STATUS(m)));
-		mval &= INT_STATUS_MASK_HWS;
+		mval &= MON2_INT_STATUS_MASK;
+		mval >>= MON2_INT_STATUS_SHIFT;
+		break;
+	case MON3:
+		mval = readl_relaxed(MON3_INT_STATUS(m));
+		dev_dbg(m->dev, "IRQ status p:%x\n", mval);
+		mval &= MON3_INT_STATUS_MASK;
 		break;
 	}
 
@@ -283,17 +352,20 @@
 }
 
 static __always_inline
-void mon_irq_clear(struct bwmon *m, enum bwmon_type type)
+void mon_irq_clear(struct bwmon *m, enum mon_reg_type type)
 {
 	switch (type) {
-	case BWMON_1:
-		writel_relaxed(INT_STATUS_MASK, MON_INT_CLR(m));
+	case MON1:
+		writel_relaxed(MON_INT_STATUS_MASK, MON_INT_CLR(m));
 		mon_glb_irq_clear(m);
 		break;
-	case BWMON_2:
-		writel_relaxed(INT_STATUS_MASK_HWS, MON_INT_CLR(m));
+	case MON2:
+		writel_relaxed(MON2_INT_STATUS_MASK, MON_INT_CLR(m));
 		mon_glb_irq_clear(m);
 		break;
+	case MON3:
+		writel_relaxed(MON3_INT_STATUS_MASK, MON3_INT_CLR(m));
+		break;
 	}
 }
 
@@ -350,11 +422,18 @@
 	return zone_counts;
 }
 
-static unsigned int mbps_to_mb(unsigned long mbps, unsigned int ms)
+#define MB_SHIFT	20
+
+static u32 mbps_to_count(unsigned long mbps, unsigned int ms, u8 shift)
 {
 	mbps *= ms;
-	mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
-	return mbps;
+
+	if (shift > MB_SHIFT)
+		mbps >>= shift - MB_SHIFT;
+	else
+		mbps <<= MB_SHIFT - shift;
+
+	return DIV_ROUND_UP(mbps, MSEC_PER_SEC);
 }
 
 /*
@@ -362,43 +441,60 @@
  * Zone 0: byte count < THRES_LO
  * Zone 1: THRES_LO < byte count < THRES_MED
  * Zone 2: THRES_MED < byte count < THRES_HI
- * Zone 3: byte count > THRES_HI
+ * Zone 3: THRES_LIM > byte count > THRES_HI
  */
-#define	THRES_LIM	0x7FFU
-static void set_zone_thres(struct bwmon *m, unsigned int sample_ms)
-{
-	struct bw_hwmon *hw = &(m->hw);
-	u32 hi, med, lo;
+#define	THRES_LIM(shift)	(0xFFFFFFFF >> shift)
 
-	hi = mbps_to_mb(hw->up_wake_mbps, sample_ms);
-	med = mbps_to_mb(hw->down_wake_mbps, sample_ms);
+static __always_inline
+void set_zone_thres(struct bwmon *m, unsigned int sample_ms,
+		    enum mon_reg_type type)
+{
+	struct bw_hwmon *hw = &m->hw;
+	u32 hi, med, lo;
+	u32 zone_cnt_thres = calc_zone_counts(hw);
+
+	hi = mbps_to_count(hw->up_wake_mbps, sample_ms, m->count_shift);
+	med = mbps_to_count(hw->down_wake_mbps, sample_ms, m->count_shift);
 	lo = 0;
 
-	if (unlikely((hi > THRES_LIM) || (med > hi) || (lo > med))) {
+	if (unlikely((hi > m->thres_lim) || (med > hi) || (lo > med))) {
 		pr_warn("Zone thres larger than hw limit: hi:%u med:%u lo:%u\n",
 				hi, med, lo);
-		hi = min(hi, THRES_LIM);
+		hi = min(hi, m->thres_lim);
 		med = min(med, hi - 1);
 		lo = min(lo, med-1);
 	}
 
-	writel_relaxed(hi, MON2_THRES_HI(m));
-	writel_relaxed(med, MON2_THRES_MED(m));
-	writel_relaxed(lo, MON2_THRES_LO(m));
+	switch (type) {
+	case MON1:
+		WARN(1, "Invalid\n");
+		return;
+	case MON2:
+		writel_relaxed(hi, MON2_THRES_HI(m));
+		writel_relaxed(med, MON2_THRES_MED(m));
+		writel_relaxed(lo, MON2_THRES_LO(m));
+		/* Set the zone count thresholds for interrupts */
+		writel_relaxed(zone_cnt_thres, MON2_ZONE_CNT_THRES(m));
+		break;
+	case MON3:
+		writel_relaxed(hi, MON3_THRES_HI(m));
+		writel_relaxed(med, MON3_THRES_MED(m));
+		writel_relaxed(lo, MON3_THRES_LO(m));
+		/* Set the zone count thresholds for interrupts */
+		writel_relaxed(zone_cnt_thres, MON3_ZONE_CNT_THRES(m));
+		break;
+	}
+
 	dev_dbg(m->dev, "Thres: hi:%u med:%u lo:%u\n", hi, med, lo);
+	dev_dbg(m->dev, "Zone Count Thres: %0x\n", zone_cnt_thres);
 }
 
-static void mon_set_zones(struct bwmon *m, unsigned int sample_ms)
+static __always_inline
+void mon_set_zones(struct bwmon *m, unsigned int sample_ms,
+		   enum mon_reg_type type)
 {
-	struct bw_hwmon *hw = &(m->hw);
-	u32 zone_cnt_thres = calc_zone_counts(hw);
-
-	mon_set_hw_sampling_window(m, sample_ms);
-	set_zone_thres(m, sample_ms);
-	/* Set the zone count thresholds for interrupts */
-	writel_relaxed(zone_cnt_thres, MON2_ZONE_CNT_THRES(m));
-
-	dev_dbg(m->dev, "Zone Count Thres: %0x\n", zone_cnt_thres);
+	mon_set_hw_sampling_window(m, sample_ms, type);
+	set_zone_thres(m, sample_ms, type);
 }
 
 static void mon_set_limit(struct bwmon *m, u32 count)
@@ -419,7 +515,7 @@
 	unsigned long count, status;
 
 	count = readl_relaxed(MON_CNT(m));
-	status = mon_irq_status(m, BWMON_1);
+	status = mon_irq_status(m, MON1);
 
 	dev_dbg(m->dev, "Counter: %08lx\n", count);
 
@@ -433,16 +529,28 @@
 	return count;
 }
 
-static unsigned int get_zone(struct bwmon *m)
+static __always_inline
+unsigned int get_zone(struct bwmon *m, enum mon_reg_type type)
 {
 	u32 zone_counts;
 	u32 zone;
 
-	zone = get_bitmask_order((m->intr_status & INT_STATUS_MASK_HWS) >> 4);
+	zone = get_bitmask_order(m->intr_status);
 	if (zone) {
 		zone--;
 	} else {
-		zone_counts = readl_relaxed(MON2_ZONE_CNT(m));
+		switch (type) {
+		case MON1:
+			WARN(1, "Invalid\n");
+			return 0;
+		case MON2:
+			zone_counts = readl_relaxed(MON2_ZONE_CNT(m));
+			break;
+		case MON3:
+			zone_counts = readl_relaxed(MON3_ZONE_CNT(m));
+			break;
+		}
+
 		if (zone_counts) {
 			zone = get_bitmask_order(zone_counts) - 1;
 			zone /= 8;
@@ -453,15 +561,36 @@
 	return zone;
 }
 
-static unsigned long mon_get_zone_stats(struct bwmon *m)
+static __always_inline
+unsigned long get_zone_count(struct bwmon *m, unsigned int zone,
+			     enum mon_reg_type type)
+{
+	unsigned long count;
+
+	switch (type) {
+	case MON1:
+		WARN(1, "Invalid\n");
+		return 0;
+	case MON2:
+		count = readl_relaxed(MON2_ZONE_MAX(m, zone)) + 1;
+		break;
+	case MON3:
+		count = readl_relaxed(MON3_ZONE_MAX(m, zone)) + 1;
+		break;
+	}
+
+	return count;
+}
+
+static __always_inline
+unsigned long mon_get_zone_stats(struct bwmon *m, enum mon_reg_type type)
 {
 	unsigned int zone;
 	unsigned long count = 0;
 
-	zone = get_zone(m);
-
-	count = readl_relaxed(MON2_ZONE_MAX(m, zone)) + 1;
-	count *= SZ_1M;
+	zone = get_zone(m, type);
+	count = get_zone_count(m, zone, type);
+	count <<= m->count_shift;
 
 	dev_dbg(m->dev, "Zone%d Max byte count: %08lx\n", zone, count);
 
@@ -469,16 +598,17 @@
 }
 
 static __always_inline
-unsigned long mon_get_count(struct bwmon *m, enum bwmon_type type)
+unsigned long mon_get_count(struct bwmon *m, enum mon_reg_type type)
 {
 	unsigned long count;
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		count = mon_get_count1(m);
 		break;
-	case BWMON_2:
-		count = mon_get_zone_stats(m);
+	case MON2:
+	case MON3:
+		count = mon_get_zone_stats(m, type);
 		break;
 	}
 
@@ -499,7 +629,7 @@
 }
 
 static __always_inline
-unsigned long __get_bytes_and_clear(struct bw_hwmon *hw, enum bwmon_type type)
+unsigned long __get_bytes_and_clear(struct bw_hwmon *hw, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 	unsigned long count;
@@ -515,12 +645,17 @@
 
 static unsigned long get_bytes_and_clear(struct bw_hwmon *hw)
 {
-	return __get_bytes_and_clear(hw, BWMON_1);
+	return __get_bytes_and_clear(hw, MON1);
 }
 
 static unsigned long get_bytes_and_clear2(struct bw_hwmon *hw)
 {
-	return __get_bytes_and_clear(hw, BWMON_2);
+	return __get_bytes_and_clear(hw, MON2);
+}
+
+static unsigned long get_bytes_and_clear3(struct bw_hwmon *hw)
+{
+	return __get_bytes_and_clear(hw, MON3);
 }
 
 static unsigned long set_thres(struct bw_hwmon *hw, unsigned long bytes)
@@ -529,10 +664,10 @@
 	u32 limit;
 	struct bwmon *m = to_bwmon(hw);
 
-	mon_disable(m, BWMON_1);
+	mon_disable(m, MON1);
 	count = mon_get_count1(m);
-	mon_clear(m, false, BWMON_1);
-	mon_irq_clear(m, BWMON_1);
+	mon_clear(m, false, MON1);
+	mon_irq_clear(m, MON1);
 
 	if (likely(!m->spec->wrap_on_thres))
 		limit = bytes;
@@ -540,27 +675,40 @@
 		limit = max(bytes, 500000UL);
 
 	mon_set_limit(m, limit);
-	mon_enable(m, BWMON_1);
+	mon_enable(m, MON1);
 
 	return count;
 }
 
-static unsigned long set_hw_events(struct bw_hwmon *hw, unsigned int sample_ms)
+static unsigned long
+__set_hw_events(struct bw_hwmon *hw, unsigned int sample_ms,
+		enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 
-	mon_disable(m, BWMON_2);
-	mon_clear(m, false, BWMON_2);
-	mon_irq_clear(m, BWMON_2);
+	mon_disable(m, type);
+	mon_clear(m, false, type);
+	mon_irq_clear(m, type);
 
-	mon_set_zones(m, sample_ms);
-	mon_enable(m, BWMON_2);
+	mon_set_zones(m, sample_ms, type);
+	mon_enable(m, type);
 
 	return 0;
 }
 
+static unsigned long set_hw_events(struct bw_hwmon *hw, unsigned int sample_ms)
+{
+	return __set_hw_events(hw, sample_ms, MON2);
+}
+
+static unsigned long
+set_hw_events3(struct bw_hwmon *hw, unsigned int sample_ms)
+{
+	return __set_hw_events(hw, sample_ms, MON3);
+}
+
 static irqreturn_t
-__bwmon_intr_handler(int irq, void *dev, enum bwmon_type type)
+__bwmon_intr_handler(int irq, void *dev, enum mon_reg_type type)
 {
 	struct bwmon *m = dev;
 
@@ -576,12 +724,17 @@
 
 static irqreturn_t bwmon_intr_handler(int irq, void *dev)
 {
-	return __bwmon_intr_handler(irq, dev, BWMON_1);
+	return __bwmon_intr_handler(irq, dev, MON1);
 }
 
 static irqreturn_t bwmon_intr_handler2(int irq, void *dev)
 {
-	return __bwmon_intr_handler(irq, dev, BWMON_2);
+	return __bwmon_intr_handler(irq, dev, MON2);
+}
+
+static irqreturn_t bwmon_intr_handler3(int irq, void *dev)
+{
+	return __bwmon_intr_handler(irq, dev, MON3);
 }
 
 static irqreturn_t bwmon_intr_thread(int irq, void *dev)
@@ -592,8 +745,27 @@
 	return IRQ_HANDLED;
 }
 
-static __always_inline int
-__start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps, enum bwmon_type type)
+static __always_inline
+void mon_set_byte_count_filter(struct bwmon *m, enum mon_reg_type type)
+{
+	if (!m->byte_mask)
+		return;
+
+	switch (type) {
+	case MON1:
+	case MON2:
+		writel_relaxed(m->byte_mask, MON_MASK(m));
+		writel_relaxed(m->byte_match, MON_MATCH(m));
+		break;
+	case MON3:
+		writel_relaxed(m->byte_mask, MON3_MASK(m));
+		writel_relaxed(m->byte_match, MON3_MATCH(m));
+		break;
+	}
+}
+
+static __always_inline int __start_bw_hwmon(struct bw_hwmon *hw,
+		unsigned long mbps, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 	u32 limit, zone_actions;
@@ -601,14 +773,18 @@
 	irq_handler_t handler;
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		handler = bwmon_intr_handler;
 		limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0);
 		break;
-	case BWMON_2:
+	case MON2:
 		zone_actions = calc_zone_actions();
 		handler = bwmon_intr_handler2;
 		break;
+	case MON3:
+		zone_actions = calc_zone_actions();
+		handler = bwmon_intr_handler3;
+		break;
 	}
 
 	ret = request_threaded_irq(m->irq, handler, bwmon_intr_thread,
@@ -625,17 +801,22 @@
 	mon_clear(m, false, type);
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		handler = bwmon_intr_handler;
 		mon_set_limit(m, limit);
 		break;
-	case BWMON_2:
-		mon_set_zones(m, hw->df->profile->polling_ms);
+	case MON2:
+		mon_set_zones(m, hw->df->profile->polling_ms, type);
 		/* Set the zone actions to increment appropriate counters */
 		writel_relaxed(zone_actions, MON2_ZONE_ACTIONS(m));
 		break;
+	case MON3:
+		mon_set_zones(m, hw->df->profile->polling_ms, type);
+		/* Set the zone actions to increment appropriate counters */
+		writel_relaxed(zone_actions, MON3_ZONE_ACTIONS(m));
 	}
 
+	mon_set_byte_count_filter(m, type);
 	mon_irq_clear(m, type);
 	mon_irq_enable(m, type);
 	mon_enable(m, type);
@@ -645,16 +826,21 @@
 
 static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
 {
-	return __start_bw_hwmon(hw, mbps, BWMON_1);
+	return __start_bw_hwmon(hw, mbps, MON1);
 }
 
 static int start_bw_hwmon2(struct bw_hwmon *hw, unsigned long mbps)
 {
-	return __start_bw_hwmon(hw, mbps, BWMON_2);
+	return __start_bw_hwmon(hw, mbps, MON2);
+}
+
+static int start_bw_hwmon3(struct bw_hwmon *hw, unsigned long mbps)
+{
+	return __start_bw_hwmon(hw, mbps, MON3);
 }
 
 static __always_inline
-void __stop_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
+void __stop_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 
@@ -667,16 +853,21 @@
 
 static void stop_bw_hwmon(struct bw_hwmon *hw)
 {
-	return __stop_bw_hwmon(hw, BWMON_1);
+	return __stop_bw_hwmon(hw, MON1);
 }
 
 static void stop_bw_hwmon2(struct bw_hwmon *hw)
 {
-	return __stop_bw_hwmon(hw, BWMON_2);
+	return __stop_bw_hwmon(hw, MON2);
+}
+
+static void stop_bw_hwmon3(struct bw_hwmon *hw)
+{
+	return __stop_bw_hwmon(hw, MON3);
 }
 
 static __always_inline
-int __suspend_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
+int __suspend_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 
@@ -690,27 +881,36 @@
 
 static int suspend_bw_hwmon(struct bw_hwmon *hw)
 {
-	return __suspend_bw_hwmon(hw, BWMON_1);
+	return __suspend_bw_hwmon(hw, MON1);
 }
 
 static int suspend_bw_hwmon2(struct bw_hwmon *hw)
 {
-	return __suspend_bw_hwmon(hw, BWMON_2);
+	return __suspend_bw_hwmon(hw, MON2);
 }
 
-static int __resume_bw_hwmon(struct bw_hwmon *hw, enum bwmon_type type)
+static int suspend_bw_hwmon3(struct bw_hwmon *hw)
+{
+	return __suspend_bw_hwmon(hw, MON3);
+}
+
+static __always_inline
+int __resume_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
 {
 	struct bwmon *m = to_bwmon(hw);
 	int ret;
 	irq_handler_t handler;
 
 	switch (type) {
-	case BWMON_1:
+	case MON1:
 		handler = bwmon_intr_handler;
 		break;
-	case BWMON_2:
+	case MON2:
 		handler = bwmon_intr_handler2;
 		break;
+	case MON3:
+		handler = bwmon_intr_handler3;
+		break;
 	}
 
 	mon_clear(m, false, type);
@@ -731,12 +931,17 @@
 
 static int resume_bw_hwmon(struct bw_hwmon *hw)
 {
-	return __resume_bw_hwmon(hw, BWMON_1);
+	return __resume_bw_hwmon(hw, MON1);
 }
 
 static int resume_bw_hwmon2(struct bw_hwmon *hw)
 {
-	return __resume_bw_hwmon(hw, BWMON_2);
+	return __resume_bw_hwmon(hw, MON2);
+}
+
+static int resume_bw_hwmon3(struct bw_hwmon *hw)
+{
+	return __resume_bw_hwmon(hw, MON3);
 }
 
 /*************************************************************************/
@@ -746,25 +951,40 @@
 		.wrap_on_thres = true,
 		.overflow = false,
 		.throt_adj = false,
-		.hw_sampling = false
+		.hw_sampling = false,
+		.has_global_base = true,
+		.reg_type = MON1,
 	},
 	[1] = {
 		.wrap_on_thres = false,
 		.overflow = true,
 		.throt_adj = false,
-		.hw_sampling = false
+		.hw_sampling = false,
+		.has_global_base = true,
+		.reg_type = MON1,
 	},
 	[2] = {
 		.wrap_on_thres = false,
 		.overflow = true,
 		.throt_adj = true,
-		.hw_sampling = false
+		.hw_sampling = false,
+		.has_global_base = true,
+		.reg_type = MON1,
 	},
 	[3] = {
 		.wrap_on_thres = false,
 		.overflow = true,
 		.throt_adj = true,
-		.hw_sampling = true
+		.hw_sampling = true,
+		.has_global_base = true,
+		.reg_type = MON2,
+	},
+	[4] = {
+		.wrap_on_thres = false,
+		.overflow = true,
+		.throt_adj = false,
+		.hw_sampling = true,
+		.reg_type = MON3,
 	},
 };
 
@@ -773,6 +993,7 @@
 	{ .compatible = "qcom,bimc-bwmon2", .data = &spec[1] },
 	{ .compatible = "qcom,bimc-bwmon3", .data = &spec[2] },
 	{ .compatible = "qcom,bimc-bwmon4", .data = &spec[3] },
+	{ .compatible = "qcom,bimc-bwmon5", .data = &spec[4] },
 	{}
 };
 
@@ -782,20 +1003,13 @@
 	struct resource *res;
 	struct bwmon *m;
 	int ret;
-	u32 data;
+	u32 data, count_unit;
 
 	m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL);
 	if (!m)
 		return -ENOMEM;
 	m->dev = dev;
 
-	ret = of_property_read_u32(dev->of_node, "qcom,mport", &data);
-	if (ret) {
-		dev_err(dev, "mport not found!\n");
-		return ret;
-	}
-	m->mport = data;
-
 	m->spec = of_device_get_match_data(dev);
 	if (!m->spec) {
 		dev_err(dev, "Unknown device type!\n");
@@ -813,15 +1027,26 @@
 		return -ENOMEM;
 	}
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global_base");
-	if (!res) {
-		dev_err(dev, "global_base not found!\n");
-		return -EINVAL;
-	}
-	m->global_base = devm_ioremap(dev, res->start, resource_size(res));
-	if (!m->global_base) {
-		dev_err(dev, "Unable map global_base!\n");
-		return -ENOMEM;
+	if (m->spec->has_global_base) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   "global_base");
+		if (!res) {
+			dev_err(dev, "global_base not found!\n");
+			return -EINVAL;
+		}
+		m->global_base = devm_ioremap(dev, res->start,
+					      resource_size(res));
+		if (!m->global_base) {
+			dev_err(dev, "Unable map global_base!\n");
+			return -ENOMEM;
+		}
+
+		ret = of_property_read_u32(dev->of_node, "qcom,mport", &data);
+		if (ret) {
+			dev_err(dev, "mport not found!\n");
+			return ret;
+		}
+		m->mport = data;
 	}
 
 	m->irq = platform_get_irq(pdev, 0);
@@ -841,22 +1066,45 @@
 			dev_err(dev, "HW sampling rate not specified!\n");
 			return ret;
 		}
+	}
 
+	if (of_property_read_u32(dev->of_node, "qcom,count-unit", &count_unit))
+		count_unit = SZ_1M;
+	m->count_shift = order_base_2(count_unit);
+	m->thres_lim = THRES_LIM(m->count_shift);
+
+	switch (m->spec->reg_type) {
+	case MON3:
+		m->hw.start_hwmon = start_bw_hwmon3;
+		m->hw.stop_hwmon = stop_bw_hwmon3;
+		m->hw.suspend_hwmon = suspend_bw_hwmon3;
+		m->hw.resume_hwmon = resume_bw_hwmon3;
+		m->hw.get_bytes_and_clear = get_bytes_and_clear3;
+		m->hw.set_hw_events = set_hw_events3;
+		break;
+	case MON2:
 		m->hw.start_hwmon = start_bw_hwmon2;
 		m->hw.stop_hwmon = stop_bw_hwmon2;
 		m->hw.suspend_hwmon = suspend_bw_hwmon2;
 		m->hw.resume_hwmon = resume_bw_hwmon2;
 		m->hw.get_bytes_and_clear = get_bytes_and_clear2;
 		m->hw.set_hw_events = set_hw_events;
-	} else {
+		break;
+	case MON1:
 		m->hw.start_hwmon = start_bw_hwmon;
 		m->hw.stop_hwmon = stop_bw_hwmon;
 		m->hw.suspend_hwmon = suspend_bw_hwmon;
 		m->hw.resume_hwmon = resume_bw_hwmon;
 		m->hw.get_bytes_and_clear = get_bytes_and_clear;
 		m->hw.set_thres = set_thres;
+		break;
 	}
 
+	of_property_read_u32(dev->of_node, "qcom,byte-mid-match",
+			     &m->byte_match);
+	of_property_read_u32(dev->of_node, "qcom,byte-mid-mask",
+			     &m->byte_mask);
+
 	if (m->spec->throt_adj) {
 		m->hw.set_throttle_adj = mon_set_throttle_adj;
 		m->hw.get_throttle_adj = mon_get_throttle_adj;
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
index a7761c4..2afceb1 100644
--- a/drivers/dma/qcom/Kconfig
+++ b/drivers/dma/qcom/Kconfig
@@ -27,3 +27,22 @@
 	  (user to kernel, kernel to kernel, etc.).  It only supports
 	  memcpy interface. The core is not intended for general
 	  purpose slave DMA.
+
+config QCOM_GPI_DMA
+	tristate "Qualcomm Technologies Inc GPI DMA support"
+	depends on ARCH_QCOM
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the QCOM GPI DMA controller. This controller
+	  provides DMA capabilities for a variety of peripheral buses such
+	  as I2C, UART, and SPI. By using GPI dmaengine driver, bus drivers
+	  can use a standardize interface that is protocol independent to
+	  transfer data between DDR and peripheral.
+
+config QCOM_GPI_DMA_DEBUG
+	bool "Qualcomm Technologies Inc GPI debug support"
+	depends on QCOM_GPI_DMA
+	help
+	  Enable detailed logging for QCOM GPI driver. Extra logging will be
+	  helpful when debugging critical issues.
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile
index 4bfc38b..6476ac5 100644
--- a/drivers/dma/qcom/Makefile
+++ b/drivers/dma/qcom/Makefile
@@ -3,3 +3,4 @@
 hdma_mgmt-objs	 := hidma_mgmt.o hidma_mgmt_sys.o
 obj-$(CONFIG_QCOM_HIDMA) +=  hdma.o
 hdma-objs        := hidma_ll.o hidma.o hidma_dbg.o
+obj-$(CONFIG_QCOM_GPI_DMA) += gpi.o
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
new file mode 100644
index 0000000..6e6f28f
--- /dev/null
+++ b/drivers/dma/qcom/gpi.c
@@ -0,0 +1,2816 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/dma-iommu.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include <linux/msm_gpi.h>
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+#include "msm_gpi_mmio.h"
+
+/* global logging macros */
+#define GPI_LOG(gpi_dev, fmt, ...) do { \
+	if (gpi_dev->klog_lvl != LOG_LVL_MASK_ALL) \
+		dev_dbg(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
+	if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl != LOG_LVL_MASK_ALL) \
+		ipc_log_string(gpi_dev->ilctxt, \
+			"%s: " fmt, __func__, ##__VA_ARGS__); \
+	} while (0)
+#define GPI_ERR(gpi_dev, fmt, ...) do { \
+	if (gpi_dev->klog_lvl >= LOG_LVL_ERROR) \
+		dev_err(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
+	if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl >= LOG_LVL_ERROR) \
+		ipc_log_string(gpi_dev->ilctxt, \
+			"%s: " fmt, __func__, ##__VA_ARGS__); \
+	} while (0)
+
+/* gpii specific logging macros */
+#define GPII_REG(gpii, ch, fmt, ...) do { \
+	if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
+		pr_info("%s:%u:%s: " fmt, gpii->label, \
+			ch, __func__, ##__VA_ARGS__); \
+	if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
+		ipc_log_string(gpii->ilctxt, \
+			       "ch:%u %s: " fmt, ch, \
+			       __func__, ##__VA_ARGS__); \
+	} while (0)
+#define GPII_VERB(gpii, ch, fmt, ...) do { \
+	if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
+		pr_info("%s:%u:%s: " fmt, gpii->label, \
+			ch, __func__, ##__VA_ARGS__); \
+	if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
+		ipc_log_string(gpii->ilctxt, \
+			       "ch:%u %s: " fmt, ch, \
+			       __func__, ##__VA_ARGS__); \
+	} while (0)
+#define GPII_INFO(gpii, ch, fmt, ...) do { \
+	if (gpii->klog_lvl >= LOG_LVL_INFO) \
+		pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
+			__func__, ##__VA_ARGS__); \
+	if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_INFO) \
+		ipc_log_string(gpii->ilctxt, \
+			       "ch:%u %s: " fmt, ch, \
+			       __func__, ##__VA_ARGS__); \
+	} while (0)
+#define GPII_ERR(gpii, ch, fmt, ...) do { \
+	if (gpii->klog_lvl >= LOG_LVL_ERROR) \
+		pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
+		       __func__, ##__VA_ARGS__); \
+	if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_ERROR) \
+		ipc_log_string(gpii->ilctxt, \
+			       "ch:%u %s: " fmt, ch, \
+			       __func__, ##__VA_ARGS__); \
+	} while (0)
+#define GPII_CRITIC(gpii, ch, fmt, ...) do { \
+	if (gpii->klog_lvl >= LOG_LVL_CRITICAL) \
+		pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
+		       __func__, ##__VA_ARGS__); \
+	if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_CRITICAL) \
+		ipc_log_string(gpii->ilctxt, \
+			       "ch:%u %s: " fmt, ch, \
+			       __func__, ##__VA_ARGS__); \
+	} while (0)
+
+enum DEBUG_LOG_LVL {
+	LOG_LVL_MASK_ALL,
+	LOG_LVL_CRITICAL,
+	LOG_LVL_ERROR,
+	LOG_LVL_INFO,
+	LOG_LVL_VERBOSE,
+	LOG_LVL_REG_ACCESS,
+};
+
+enum EV_PRIORITY {
+	EV_PRIORITY_ISR,
+	EV_PRIORITY_TASKLET,
+};
+
+#define GPI_DMA_DRV_NAME "gpi_dma"
+#define DEFAULT_KLOG_LVL (LOG_LVL_CRITICAL)
+#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
+#define DEFAULT_IPC_LOG_LVL (LOG_LVL_VERBOSE)
+#define IPC_LOG_PAGES (40)
+#define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
+#else
+#define IPC_LOG_PAGES (2)
+#define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
+#define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
+#endif
+
+#define GPI_LABEL_SIZE (256)
+#define GPI_DBG_COMMON (99)
+#define MAX_CHANNELS_PER_GPII (2)
+#define CMD_TIMEOUT_MS (50)
+#define STATE_IGNORE (U32_MAX)
+#define REQ_OF_DMA_ARGS (6) /* # of arguments required from client */
+
+struct __packed gpi_error_log_entry {
+	u32 routine : 4;
+	u32 type : 4;
+	u32 reserved0 : 4;
+	u32 code : 4;
+	u32 reserved1 : 3;
+	u32 chid : 5;
+	u32 reserved2 : 1;
+	u32 chtype : 1;
+	u32 ee : 1;
+};
+
+struct __packed xfer_compl_event {
+	u64 ptr;
+	u32 length : 24;
+	u8 code;
+	u16 status;
+	u8 type;
+	u8 chid;
+};
+
+struct __packed immediate_data_event {
+	u8 data_bytes[8];
+	u8 length : 4;
+	u8 resvd : 4;
+	u16 tre_index;
+	u8 code;
+	u16 status;
+	u8 type;
+	u8 chid;
+};
+
+struct __packed qup_notif_event {
+	u32 status;
+	u32 time;
+	u32 count :24;
+	u8 resvd;
+	u16 resvd1;
+	u8 type;
+	u8 chid;
+};
+
+struct __packed gpi_ere {
+	u32 dword[4];
+};
+
+enum GPI_EV_TYPE {
+	XFER_COMPLETE_EV_TYPE = 0x22,
+	IMMEDIATE_DATA_EV_TYPE = 0x30,
+	QUP_NOTIF_EV_TYPE = 0x31,
+	STALE_EV_TYPE = 0xFF,
+};
+
+union __packed gpi_event {
+	struct __packed xfer_compl_event xfer_compl_event;
+	struct __packed immediate_data_event immediate_data_event;
+	struct __packed qup_notif_event qup_notif_event;
+	struct __packed gpi_ere gpi_ere;
+};
+
+enum gpii_irq_settings {
+	DEFAULT_IRQ_SETTINGS,
+	MASK_IEOB_SETTINGS,
+};
+
+enum gpi_ev_state {
+	DEFAULT_EV_CH_STATE = 0,
+	EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
+	EV_STATE_ALLOCATED,
+	MAX_EV_STATES
+};
+
+static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
+	[EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
+	[EV_STATE_ALLOCATED] = "ALLOCATED",
+};
+
+#define TO_GPI_EV_STATE_STR(state) ((state >= MAX_EV_STATES) ? \
+				    "INVALID" : gpi_ev_state_str[state])
+
+enum gpi_ch_state {
+	DEFAULT_CH_STATE = 0x0,
+	CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
+	CH_STATE_ALLOCATED = 0x1,
+	CH_STATE_STARTED = 0x2,
+	CH_STATE_STOPPED = 0x3,
+	CH_STATE_STOP_IN_PROC = 0x4,
+	CH_STATE_ERROR = 0xf,
+	MAX_CH_STATES
+};
+
+static const char *const gpi_ch_state_str[MAX_CH_STATES] = {
+	[CH_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
+	[CH_STATE_ALLOCATED] = "ALLOCATED",
+	[CH_STATE_STARTED] = "STARTED",
+	[CH_STATE_STOPPED] = "STOPPED",
+	[CH_STATE_STOP_IN_PROC] = "STOP IN PROCESS",
+	[CH_STATE_ERROR] = "ERROR",
+};
+
+#define TO_GPI_CH_STATE_STR(state) ((state >= MAX_CH_STATES) ? \
+				    "INVALID" : gpi_ch_state_str[state])
+
+enum gpi_cmd {
+	GPI_CH_CMD_BEGIN,
+	GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
+	GPI_CH_CMD_START,
+	GPI_CH_CMD_STOP,
+	GPI_CH_CMD_RESET,
+	GPI_CH_CMD_DE_ALLOC,
+	GPI_CH_CMD_UART_SW_STALE,
+	GPI_CH_CMD_UART_RFR_READY,
+	GPI_CH_CMD_UART_RFR_NOT_READY,
+	GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY,
+	GPI_EV_CMD_BEGIN,
+	GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
+	GPI_EV_CMD_RESET,
+	GPI_EV_CMD_DEALLOC,
+	GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
+	GPI_MAX_CMD,
+};
+
+#define IS_CHAN_CMD(cmd) (cmd <= GPI_CH_CMD_END)
+
+static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
+	[GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
+	[GPI_CH_CMD_START] = "CH START",
+	[GPI_CH_CMD_STOP] = "CH STOP",
+	[GPI_CH_CMD_RESET] = "CH_RESET",
+	[GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
+	[GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
+	[GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
+	[GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
+	[GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
+	[GPI_EV_CMD_RESET] = "EV RESET",
+	[GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
+};
+
+#define TO_GPI_CMD_STR(cmd) ((cmd >= GPI_MAX_CMD) ? "INVALID" : \
+			     gpi_cmd_str[cmd])
+
+static const char *const gpi_cb_event_str[MSM_GPI_QUP_MAX_EVENT] = {
+	[MSM_GPI_QUP_NOTIFY] = "NOTIFY",
+	[MSM_GPI_QUP_ERROR] = "GLOBAL ERROR",
+	[MSM_GPI_QUP_CH_ERROR] = "CHAN ERROR",
+	[MSM_GPI_QUP_PENDING_EVENT] = "PENDING EVENT",
+	[MSM_GPI_QUP_EOT_DESC_MISMATCH] = "EOT/DESC MISMATCH",
+	[MSM_GPI_QUP_SW_ERROR] = "SW ERROR",
+};
+
+#define TO_GPI_CB_EVENT_STR(event) ((event >= MSM_GPI_QUP_MAX_EVENT) ? \
+				    "INVALID" : gpi_cb_event_str[event])
+
+enum se_protocol {
+	SE_PROTOCOL_SPI = 1,
+	SE_PROTOCOL_UART = 2,
+	SE_PROTOCOL_I2C = 3,
+	SE_MAX_PROTOCOL
+};
+
+/*
+ * @DISABLE_STATE: no register access allowed
+ * @CONFIG_STATE:  client has configured the channel
+ * @PREP_HARDWARE: register access is allowed
+ *		   however, no processing EVENTS
+ * @ACTIVE_STATE: channels are fully operational
+ * @PREPARE_TERIMNATE: graceful termination of channels
+ *		       register access is allowed
+ * @PAUSE_STATE: channels are active, but not processing any events
+ */
+enum gpi_pm_state {
+	DISABLE_STATE,
+	CONFIG_STATE,
+	PREPARE_HARDWARE,
+	ACTIVE_STATE,
+	PREPARE_TERMINATE,
+	PAUSE_STATE,
+	MAX_PM_STATE
+};
+
+#define REG_ACCESS_VALID(pm_state) (pm_state >= PREPARE_HARDWARE)
+
+static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
+	[DISABLE_STATE] = "DISABLE",
+	[CONFIG_STATE] = "CONFIG",
+	[PREPARE_HARDWARE] = "PREPARE HARDWARE",
+	[ACTIVE_STATE] = "ACTIVE",
+	[PREPARE_TERMINATE] = "PREPARE TERMINATE",
+	[PAUSE_STATE] = "PAUSE",
+};
+
+#define TO_GPI_PM_STR(state) ((state >= MAX_PM_STATE) ? \
+			      "INVALID" : gpi_pm_state_str[state])
+
+static const struct {
+	enum gpi_cmd gpi_cmd;
+	u32 opcode;
+	u32 state;
+	u32 timeout_ms;
+} gpi_cmd_info[GPI_MAX_CMD] = {
+	{
+		GPI_CH_CMD_ALLOCATE,
+		GPI_GPII_n_CH_CMD_ALLOCATE,
+		CH_STATE_ALLOCATED,
+		CMD_TIMEOUT_MS,
+	},
+	{
+		GPI_CH_CMD_START,
+		GPI_GPII_n_CH_CMD_START,
+		CH_STATE_STARTED,
+		CMD_TIMEOUT_MS,
+	},
+	{
+		GPI_CH_CMD_STOP,
+		GPI_GPII_n_CH_CMD_STOP,
+		CH_STATE_STOPPED,
+		CMD_TIMEOUT_MS,
+	},
+	{
+		GPI_CH_CMD_RESET,
+		GPI_GPII_n_CH_CMD_RESET,
+		CH_STATE_ALLOCATED,
+		CMD_TIMEOUT_MS,
+	},
+	{
+		GPI_CH_CMD_DE_ALLOC,
+		GPI_GPII_n_CH_CMD_DE_ALLOC,
+		CH_STATE_NOT_ALLOCATED,
+		CMD_TIMEOUT_MS,
+	},
+	{
+		GPI_CH_CMD_UART_SW_STALE,
+		GPI_GPII_n_CH_CMD_UART_SW_STALE,
+		STATE_IGNORE,
+		CMD_TIMEOUT_MS,
+	},
+	{
+		GPI_CH_CMD_UART_RFR_READY,
+		GPI_GPII_n_CH_CMD_UART_RFR_READY,
+		STATE_IGNORE,
+		CMD_TIMEOUT_MS,
+	},
+	{
+		GPI_CH_CMD_UART_RFR_NOT_READY,
+		GPI_GPII_n_CH_CMD_UART_RFR_NOT_READY,
+		STATE_IGNORE,
+		CMD_TIMEOUT_MS,
+	},
+	{
+		GPI_EV_CMD_ALLOCATE,
+		GPI_GPII_n_EV_CH_CMD_ALLOCATE,
+		EV_STATE_ALLOCATED,
+		CMD_TIMEOUT_MS,
+	},
+	{
+		GPI_EV_CMD_RESET,
+		GPI_GPII_n_EV_CH_CMD_RESET,
+		EV_STATE_ALLOCATED,
+		CMD_TIMEOUT_MS,
+	},
+	{
+		GPI_EV_CMD_DEALLOC,
+		GPI_GPII_n_EV_CH_CMD_DE_ALLOC,
+		EV_STATE_NOT_ALLOCATED,
+		CMD_TIMEOUT_MS,
+	},
+};
+
+struct gpi_ring {
+	void *pre_aligned;
+	size_t alloc_size;
+	phys_addr_t phys_addr;
+	dma_addr_t dma_handle;
+	void *base;
+	void *wp;
+	void *rp;
+	u32 len;
+	u32 el_size;
+	u32 elements;
+	bool configured;
+};
+
+struct sg_tre {
+	void *ptr;
+	void *wp; /* store chan wp for debugging */
+};
+
+struct gpi_dbg_log {
+	void *addr;
+	u64 time;
+	u32 val;
+	bool read;
+};
+
+struct gpi_dev {
+	struct dma_device dma_device;
+	struct device *dev;
+	struct resource *res;
+	void __iomem *regs;
+	u32 max_gpii; /* maximum # of gpii instances available per gpi block */
+	u32 gpii_mask; /* gpii instances available for apps */
+	u32 ev_factor; /* ev ring length factor */
+	struct gpii *gpiis;
+	void *ilctxt;
+	u32 ipc_log_lvl;
+	u32 klog_lvl;
+	struct dentry *dentry;
+};
+
+struct gpii_chan {
+	struct virt_dma_chan vc;
+	u32 chid;
+	u32 seid;
+	enum se_protocol protocol;
+	enum EV_PRIORITY priority; /* comes from clients DT node */
+	struct gpii *gpii;
+	enum gpi_ch_state ch_state;
+	enum gpi_pm_state pm_state;
+	void __iomem *ch_cntxt_base_reg;
+	void __iomem *ch_cntxt_db_reg;
+	void __iomem *ch_ring_base_lsb_reg,
+		*ch_ring_rp_lsb_reg,
+		*ch_ring_wp_lsb_reg;
+	void __iomem *ch_cmd_reg;
+	u32 req_tres; /* # of tre's client requested */
+	u32 dir;
+	struct gpi_ring ch_ring;
+	struct gpi_ring sg_ring; /* points to client scatterlist */
+	struct gpi_client_info client_info;
+};
+
+struct gpii {
+	u32 gpii_id;
+	struct gpii_chan gpii_chan[MAX_CHANNELS_PER_GPII];
+	struct gpi_dev *gpi_dev;
+	enum EV_PRIORITY ev_priority;
+	enum se_protocol protocol;
+	int irq;
+	void __iomem *regs; /* points to gpi top */
+	void __iomem *ev_cntxt_base_reg;
+	void __iomem *ev_cntxt_db_reg;
+	void __iomem *ev_ring_base_lsb_reg,
+		*ev_ring_rp_lsb_reg,
+		*ev_ring_wp_lsb_reg;
+	void __iomem *ev_cmd_reg;
+	void __iomem *ieob_src_reg;
+	void __iomem *ieob_clr_reg;
+	struct mutex ctrl_lock;
+	enum gpi_ev_state ev_state;
+	bool configured_irq;
+	enum gpi_pm_state pm_state;
+	rwlock_t pm_lock;
+	struct gpi_ring ev_ring;
+	struct tasklet_struct ev_task; /* event processing tasklet */
+	struct completion cmd_completion;
+	enum gpi_cmd gpi_cmd;
+	u32 cntxt_type_irq_msk;
+	void *ilctxt;
+	u32 ipc_log_lvl;
+	u32 klog_lvl;
+	struct gpi_dbg_log dbg_log[GPI_DBG_LOG_SIZE];
+	atomic_t dbg_index;
+	char label[GPI_LABEL_SIZE];
+	struct dentry *dentry;
+};
+
+struct gpi_desc {
+	struct virt_dma_desc vd;
+	void *wp; /* points to TRE last queued during issue_pending */
+	struct sg_tre *sg_tre; /* points to last scatterlist */
+	void *db; /* DB register to program */
+	struct gpii_chan *gpii_chan;
+};
+
+const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
+	GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
+};
+
+struct dentry *pdentry;
+static irqreturn_t gpi_handle_irq(int irq, void *data);
+static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
+static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
+static void gpi_process_events(struct gpii *gpii);
+
+static inline struct gpii_chan *to_gpii_chan(struct dma_chan *dma_chan)
+{
+	return container_of(dma_chan, struct gpii_chan, vc.chan);
+}
+
+static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct gpi_desc, vd);
+}
+
+static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
+				      void *addr)
+{
+	return ring->phys_addr + (addr - ring->base);
+}
+
+static inline void *to_virtual(const struct gpi_ring *const ring,
+				      phys_addr_t addr)
+{
+	return ring->base + (addr - ring->phys_addr);
+}
+
+#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
+static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
+{
+	u64 time = sched_clock();
+	unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
+	u32 val;
+
+	val = readl_relaxed(addr);
+	index &= (GPI_DBG_LOG_SIZE - 1);
+	(gpii->dbg_log + index)->addr = addr;
+	(gpii->dbg_log + index)->time = time;
+	(gpii->dbg_log + index)->val = val;
+	(gpii->dbg_log + index)->read = true;
+	GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
+		 addr - gpii->regs, val);
+	return val;
+}
+static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
+{
+	u64 time = sched_clock();
+	unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
+
+	index &= (GPI_DBG_LOG_SIZE - 1);
+	(gpii->dbg_log + index)->addr = addr;
+	(gpii->dbg_log + index)->time = time;
+	(gpii->dbg_log + index)->val = val;
+	(gpii->dbg_log + index)->read = false;
+
+	GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx  val:0x%x\n",
+		 addr - gpii->regs, val);
+	writel_relaxed(val, addr);
+}
+#else
+static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
+{
+	u32 val = readl_relaxed(addr);
+
+	GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
+		 addr - gpii->regs, val);
+	return val;
+}
+static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
+{
+	GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx  val:0x%x\n",
+		 addr - gpii->regs, val);
+	writel_relaxed(val, addr);
+}
+#endif
+
+/* gpi_write_reg_field - write to specific bit field */
+static inline void gpi_write_reg_field(struct gpii *gpii,
+				       void __iomem *addr,
+				       u32 mask,
+				       u32 shift,
+				       u32 val)
+{
+	u32 tmp = gpi_read_reg(gpii, addr);
+
+	tmp &= ~mask;
+	val = tmp | ((val << shift) & mask);
+	gpi_write_reg(gpii, addr, val);
+}
+
+static void gpi_disable_interrupts(struct gpii *gpii)
+{
+	struct {
+		u32 offset;
+		u32 mask;
+		u32 shift;
+		u32 val;
+	} default_reg[] = {
+		{
+			GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
+			GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
+			0,
+		},
+		{
+			GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
+			GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
+			0,
+		},
+		{
+			GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
+			GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
+			0,
+		},
+		{
+			GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
+			GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
+			0,
+		},
+		{
+			GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
+			GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
+			0,
+		},
+		{
+			GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
+			GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
+			0,
+		},
+		{
+			GPI_GPII_n_CNTXT_INTSET_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_INTSET_BMSK,
+			GPI_GPII_n_CNTXT_INTSET_SHFT,
+			0,
+		},
+		{ 0 },
+	};
+	int i;
+
+	for (i = 0; default_reg[i].offset; i++)
+		gpi_write_reg_field(gpii, gpii->regs +
+				    default_reg[i].offset,
+				    default_reg[i].mask,
+				    default_reg[i].shift,
+				    default_reg[i].val);
+	gpii->cntxt_type_irq_msk = 0;
+	devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
+	gpii->configured_irq = false;
+}
+
+/* configure and enable interrupts */
+static int gpi_config_interrupts(struct gpii *gpii,
+				 enum gpii_irq_settings settings,
+				 bool mask)
+{
+	int ret;
+	int i;
+	const u32 def_type = (GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
+			      GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
+			      GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
+			      GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
+			      GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
+	struct {
+		u32 offset;
+		u32 mask;
+		u32 shift;
+		u32 val;
+	} default_reg[] = {
+		{
+			GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
+			GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
+			def_type,
+		},
+		{
+			GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
+			GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
+			GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
+		},
+		{
+			GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
+			GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
+			GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
+		},
+		{
+			GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
+			GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
+			GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
+		},
+		{
+			GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
+			GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
+			GPI_GPII_n_CNTXT_GLOB_IRQ_EN_ERROR_INT,
+		},
+		{
+			GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
+			GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
+			GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
+		},
+		{
+			GPI_GPII_n_CNTXT_MSI_BASE_LSB_OFFS
+			(gpii->gpii_id),
+			U32_MAX,
+			0,
+			0x0,
+		},
+		{
+			GPI_GPII_n_CNTXT_MSI_BASE_MSB_OFFS
+			(gpii->gpii_id),
+			U32_MAX,
+			0,
+			0x0,
+		},
+		{
+			GPI_GPII_n_CNTXT_SCRATCH_0_OFFS
+			(gpii->gpii_id),
+			U32_MAX,
+			0,
+			0x0,
+		},
+		{
+			GPI_GPII_n_CNTXT_SCRATCH_1_OFFS
+			(gpii->gpii_id),
+			U32_MAX,
+			0,
+			0x0,
+		},
+		{
+			GPI_GPII_n_CNTXT_INTSET_OFFS
+			(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_INTSET_BMSK,
+			GPI_GPII_n_CNTXT_INTSET_SHFT,
+			0x01,
+		},
+		{
+			GPI_GPII_n_ERROR_LOG_OFFS
+			(gpii->gpii_id),
+			U32_MAX,
+			0,
+			0x00,
+		},
+		{ 0 },
+	};
+
+	GPII_VERB(gpii, GPI_DBG_COMMON, "configured:%c setting:%s mask:%c\n",
+		  (gpii->configured_irq) ? 'F' : 'T',
+		  (settings == DEFAULT_IRQ_SETTINGS) ? "default" : "user_spec",
+		  (mask) ? 'T' : 'F');
+
+	if (gpii->configured_irq == false) {
+		ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
+				       gpi_handle_irq, IRQF_TRIGGER_HIGH,
+				       gpii->label, gpii);
+		if (ret < 0) {
+			GPII_CRITIC(gpii, GPI_DBG_COMMON,
+				    "error request irq:%d ret:%d\n",
+				    gpii->irq, ret);
+			return ret;
+		}
+	}
+
+	if (settings == MASK_IEOB_SETTINGS) {
+		/*
+		 * GPII only uses one EV ring per gpii so we can globally
+		 * enable/disable IEOB interrupt
+		 */
+		if (mask)
+			gpii->cntxt_type_irq_msk |=
+				GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
+		else
+			gpii->cntxt_type_irq_msk &=
+				~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
+		gpi_write_reg_field(gpii, gpii->regs +
+			GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
+			GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
+			GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
+			gpii->cntxt_type_irq_msk);
+	} else {
+		for (i = 0; default_reg[i].offset; i++)
+			gpi_write_reg_field(gpii, gpii->regs +
+					    default_reg[i].offset,
+					    default_reg[i].mask,
+					    default_reg[i].shift,
+					    default_reg[i].val);
+		gpii->cntxt_type_irq_msk = def_type;
+	};
+
+	gpii->configured_irq = true;
+
+	return 0;
+}
+
+/* Sends gpii event or channel command */
+static int gpi_send_cmd(struct gpii *gpii,
+			struct gpii_chan *gpii_chan,
+			enum gpi_cmd gpi_cmd)
+{
+	u32 chid = MAX_CHANNELS_PER_GPII;
+	u32 cmd;
+	unsigned long timeout;
+	void __iomem *cmd_reg;
+
+	if (gpi_cmd >= GPI_MAX_CMD)
+		return -EINVAL;
+	if (IS_CHAN_CMD(gpi_cmd))
+		chid = gpii_chan->chid;
+
+	GPII_INFO(gpii, chid,
+		  "sending cmd: %s\n", TO_GPI_CMD_STR(gpi_cmd));
+
+	/* send opcode and wait for completion */
+	reinit_completion(&gpii->cmd_completion);
+	gpii->gpi_cmd = gpi_cmd;
+
+	cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gpii_chan->ch_cmd_reg :
+		gpii->ev_cmd_reg;
+	cmd = IS_CHAN_CMD(gpi_cmd) ?
+		GPI_GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
+		GPI_GPII_n_EV_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
+	gpi_write_reg(gpii, cmd_reg, cmd);
+	timeout = wait_for_completion_timeout(&gpii->cmd_completion,
+			msecs_to_jiffies(gpi_cmd_info[gpi_cmd].timeout_ms));
+
+	if (!timeout) {
+		GPII_ERR(gpii, chid, "cmd: %s completion timeout\n",
+			 TO_GPI_CMD_STR(gpi_cmd));
+		return -EIO;
+	}
+
+	/* confirm new ch state is correct , if the cmd is a state change cmd */
+	if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
+		return 0;
+	if (IS_CHAN_CMD(gpi_cmd) &&
+	    gpii_chan->ch_state == gpi_cmd_info[gpi_cmd].state)
+		return 0;
+	if (!IS_CHAN_CMD(gpi_cmd) &&
+	    gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
+		return 0;
+
+	return -EIO;
+}
+
+/* program transfer ring DB register */
+static inline void gpi_write_ch_db(struct gpii_chan *gpii_chan,
+				   struct gpi_ring *ring,
+				   void *wp)
+{
+	struct gpii *gpii = gpii_chan->gpii;
+	phys_addr_t p_wp;
+
+	p_wp = to_physical(ring, wp);
+	gpi_write_reg(gpii, gpii_chan->ch_cntxt_db_reg, (u32)p_wp);
+}
+
+/* program event ring DB register */
+static inline void gpi_write_ev_db(struct gpii *gpii,
+				   struct gpi_ring *ring,
+				   void *wp)
+{
+	phys_addr_t p_wp;
+
+	p_wp = ring->phys_addr + (wp - ring->base);
+	gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, (u32)p_wp);
+}
+
+/* notify client with generic event */
+static void gpi_generate_cb_event(struct gpii_chan *gpii_chan,
+				  enum msm_gpi_cb_event event,
+				  u64 status)
+{
+	struct gpii *gpii = gpii_chan->gpii;
+	struct gpi_client_info *client_info = &gpii_chan->client_info;
+	struct msm_gpi_cb msm_gpi_cb = {0};
+
+	GPII_ERR(gpii, gpii_chan->chid,
+		 "notifying event:%s with status:%llu\n",
+		 TO_GPI_CB_EVENT_STR(event), status);
+
+	msm_gpi_cb.cb_event = event;
+	msm_gpi_cb.status = status;
+	msm_gpi_cb.timestamp = sched_clock();
+	client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
+			      client_info->cb_param);
+}
+
+/* process transfer completion interrupt */
+static void gpi_process_ieob(struct gpii *gpii)
+{
+	u32 ieob_irq;
+
+	ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
+	gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
+	GPII_VERB(gpii, GPI_DBG_COMMON, "IEOB_IRQ:0x%x\n", ieob_irq);
+
+	/* process events based on priority */
+	if (likely(gpii->ev_priority >= EV_PRIORITY_TASKLET)) {
+		GPII_VERB(gpii, GPI_DBG_COMMON, "scheduling tasklet\n");
+		gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
+		tasklet_schedule(&gpii->ev_task);
+	} else {
+		GPII_VERB(gpii, GPI_DBG_COMMON, "processing events from isr\n");
+		gpi_process_events(gpii);
+	}
+}
+
+/* process channel control interrupt */
+static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
+{
+	u32 gpii_id = gpii->gpii_id;
+	u32 offset = GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
+	u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
+	u32 chid;
+	struct gpii_chan *gpii_chan;
+	u32 state;
+
+	/* clear the status */
+	offset = GPI_GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
+	gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
+
+	for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
+		if (!(BIT(chid) & ch_irq))
+			continue;
+
+		gpii_chan = &gpii->gpii_chan[chid];
+		GPII_VERB(gpii, chid, "processing channel ctrl irq\n");
+		state = gpi_read_reg(gpii, gpii_chan->ch_cntxt_base_reg +
+				     CNTXT_0_CONFIG);
+		state = (state & GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK) >>
+			GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_SHFT;
+
+		/*
+		 * CH_CMD_DEALLOC cmd always successful. However cmd does
+		 * not change hardware status. So overwriting software state
+		 * to default state.
+		 */
+		if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
+			state = DEFAULT_CH_STATE;
+		gpii_chan->ch_state = state;
+		GPII_VERB(gpii, chid, "setting channel to state:%s\n",
+			  TO_GPI_CH_STATE_STR(gpii_chan->ch_state));
+
+		/*
+		 * Triggering complete all if ch_state is not a stop in process.
+		 * Stop in process is a transition state and we will wait for
+		 * stop interrupt before notifying.
+		 */
+		if (gpii_chan->ch_state != CH_STATE_STOP_IN_PROC)
+			complete_all(&gpii->cmd_completion);
+
+		/* notifying clients if in error state */
+		if (gpii_chan->ch_state == CH_STATE_ERROR)
+			gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_CH_ERROR,
+					      __LINE__);
+	}
+}
+
+/* processing gpi level error interrupts */
+static void gpi_process_glob_err_irq(struct gpii *gpii)
+{
+	u32 gpii_id = gpii->gpii_id;
+	u32 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
+	u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
+	u32 error_log;
+	u32 chid;
+	struct gpii_chan *gpii_chan;
+	struct gpi_client_info *client_info;
+	struct msm_gpi_cb msm_gpi_cb;
+	struct gpi_error_log_entry *log_entry =
+		(struct gpi_error_log_entry *)&error_log;
+
+	offset = GPI_GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
+	gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
+
+	/* only error interrupt should be set */
+	if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
+		GPII_ERR(gpii, GPI_DBG_COMMON, "invalid error status:0x%x\n",
+			 irq_stts);
+		goto error_irq;
+	}
+
+	offset = GPI_GPII_n_ERROR_LOG_OFFS(gpii_id);
+	error_log = gpi_read_reg(gpii, gpii->regs + offset);
+	gpi_write_reg(gpii, gpii->regs + offset, 0);
+
+	/* get channel info */
+	chid = ((struct gpi_error_log_entry *)&error_log)->chid;
+	if (unlikely(chid >= MAX_CHANNELS_PER_GPII)) {
+		GPII_ERR(gpii, GPI_DBG_COMMON, "invalid chid reported:%u\n",
+			 chid);
+		goto error_irq;
+	}
+
+	gpii_chan = &gpii->gpii_chan[chid];
+	client_info = &gpii_chan->client_info;
+
+	/* notify client with error log */
+	msm_gpi_cb.cb_event = MSM_GPI_QUP_ERROR;
+	msm_gpi_cb.error_log.routine = log_entry->routine;
+	msm_gpi_cb.error_log.type = log_entry->type;
+	msm_gpi_cb.error_log.error_code = log_entry->code;
+	GPII_INFO(gpii, gpii_chan->chid, "sending CB event:%s\n",
+		  TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
+	GPII_ERR(gpii, gpii_chan->chid,
+		 "ee:%u chtype:%u routine:%u type:%u error_code:%u\n",
+		 log_entry->ee, log_entry->chtype,
+		 msm_gpi_cb.error_log.routine,
+		 msm_gpi_cb.error_log.type,
+		 msm_gpi_cb.error_log.error_code);
+	client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
+			      client_info->cb_param);
+
+	return;
+
+error_irq:
+	for (chid = 0, gpii_chan = gpii->gpii_chan;
+	     chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
+		gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
+				      irq_stts);
+}
+
+/* gpii interrupt handler */
+static irqreturn_t gpi_handle_irq(int irq, void *data)
+{
+	struct gpii *gpii = data;
+	u32 type;
+	unsigned long flags;
+	u32 offset;
+	u32 gpii_id = gpii->gpii_id;
+
+	GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
+
+	read_lock_irqsave(&gpii->pm_lock, flags);
+
+	/*
+	 * States are out of sync to receive interrupt
+	 * while software state is in DISABLE state, bailing out.
+	 */
+	if (!REG_ACCESS_VALID(gpii->pm_state)) {
+		GPII_CRITIC(gpii, GPI_DBG_COMMON,
+			    "receive interrupt while in %s state\n",
+			    TO_GPI_PM_STR(gpii->pm_state));
+		goto exit_irq;
+	}
+
+	offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
+	type = gpi_read_reg(gpii, gpii->regs + offset);
+
+	do {
+		GPII_VERB(gpii, GPI_DBG_COMMON, "CNTXT_TYPE_IRQ:0x%08x\n",
+			  type);
+		/* global gpii error */
+		if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
+			GPII_ERR(gpii, GPI_DBG_COMMON,
+				 "processing global error irq\n");
+			gpi_process_glob_err_irq(gpii);
+			type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
+		}
+
+		/* event control irq */
+		if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
+			u32 ev_state;
+			u32 ev_ch_irq;
+
+			GPII_INFO(gpii, GPI_DBG_COMMON,
+				  "processing EV CTRL interrupt\n");
+			offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
+			ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
+
+			offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
+				(gpii_id);
+			gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
+			ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
+						CNTXT_0_CONFIG);
+			ev_state &= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK;
+			ev_state >>= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
+
+			/*
+			 * CMD EV_CMD_DEALLOC is always successful. However
+			 * cmd does not change hardware status. So overwriting
+			 * software state to default state.
+			 */
+			if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
+				ev_state = DEFAULT_EV_CH_STATE;
+
+			gpii->ev_state = ev_state;
+			GPII_INFO(gpii, GPI_DBG_COMMON,
+				  "setting EV state to %s\n",
+				  TO_GPI_EV_STATE_STR(gpii->ev_state));
+			complete_all(&gpii->cmd_completion);
+			type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
+		}
+
+		/* channel control irq */
+		if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
+			GPII_INFO(gpii, GPI_DBG_COMMON,
+				  "process CH CTRL interrupts\n");
+			gpi_process_ch_ctrl_irq(gpii);
+			type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
+		}
+
+		/* transfer complete interrupt */
+		if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
+			GPII_VERB(gpii, GPI_DBG_COMMON,
+				  "process IEOB interrupts\n");
+			gpi_process_ieob(gpii);
+			type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
+		}
+
+		if (type) {
+			GPII_CRITIC(gpii, GPI_DBG_COMMON,
+				 "Unhandled interrupt status:0x%x\n", type);
+			goto exit_irq;
+		}
+		offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
+		type = gpi_read_reg(gpii, gpii->regs + offset);
+	} while (type);
+
+exit_irq:
+	read_unlock_irqrestore(&gpii->pm_lock, flags);
+	GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
+
+	return IRQ_HANDLED;
+}
+
+/* process qup notification events */
+static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
+					struct qup_notif_event *notif_event)
+{
+	struct gpii *gpii = gpii_chan->gpii;
+	struct gpi_client_info *client_info = &gpii_chan->client_info;
+	struct msm_gpi_cb msm_gpi_cb;
+
+	GPII_VERB(gpii, gpii_chan->chid,
+		  "status:0x%x time:0x%x count:0x%x\n",
+		  notif_event->status, notif_event->time, notif_event->count);
+
+	msm_gpi_cb.cb_event = MSM_GPI_QUP_NOTIFY;
+	msm_gpi_cb.status = notif_event->status;
+	msm_gpi_cb.timestamp = notif_event->time;
+	msm_gpi_cb.count = notif_event->count;
+	GPII_VERB(gpii, gpii_chan->chid, "sending CB event:%s\n",
+		  TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
+	client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
+			      client_info->cb_param);
+}
+
+/* process DMA Immediate completion data events */
+static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
+					struct immediate_data_event *imed_event)
+{
+	struct gpii *gpii = gpii_chan->gpii;
+	struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
+	struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
+	struct virt_dma_desc *vd;
+	struct gpi_desc *gpi_desc;
+	struct msm_gpi_tre *client_tre;
+	void *sg_tre;
+	void *tre = ch_ring->base +
+		(ch_ring->el_size * imed_event->tre_index);
+	struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
+
+	/*
+	 * If channel not active don't process event but let
+	 * client know pending event is available
+	 */
+	if (gpii_chan->pm_state != ACTIVE_STATE) {
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "skipping processing event because ch @ %s state\n",
+			 TO_GPI_PM_STR(gpii_chan->pm_state));
+		gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
+				      __LINE__);
+		return;
+	}
+
+	spin_lock_irq(&gpii_chan->vc.lock);
+	vd = vchan_next_desc(&gpii_chan->vc);
+	if (!vd) {
+		struct gpi_ere *gpi_ere;
+		struct msm_gpi_tre *gpi_tre;
+
+		spin_unlock_irq(&gpii_chan->vc.lock);
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "event without a pending descriptor!\n");
+		gpi_ere = (struct gpi_ere *)imed_event;
+		GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
+			 gpi_ere->dword[0], gpi_ere->dword[1],
+			 gpi_ere->dword[2], gpi_ere->dword[3]);
+		gpi_tre = tre;
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "Pending TRE: %08x %08x %08x %08x\n",
+			 gpi_tre->dword[0], gpi_tre->dword[1],
+			 gpi_tre->dword[2], gpi_tre->dword[3]);
+		gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
+				      __LINE__);
+		return;
+	}
+	gpi_desc = to_gpi_desc(vd);
+
+	/* Event TR RP gen. don't match descriptor TR */
+	if (gpi_desc->wp != tre) {
+		spin_unlock_irq(&gpii_chan->vc.lock);
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "EOT/EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
+			 to_physical(ch_ring, gpi_desc->wp),
+			 to_physical(ch_ring, tre));
+		gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
+				      __LINE__);
+		return;
+	}
+
+	list_del(&vd->node);
+	spin_unlock_irq(&gpii_chan->vc.lock);
+
+	sg_tre = gpi_desc->sg_tre;
+	client_tre = ((struct sg_tre *)sg_tre)->ptr;
+
+	/*
+	 * RP pointed by Event is to last TRE processed,
+	 * we need to update ring rp to tre + 1
+	 */
+	tre += ch_ring->el_size;
+	if (tre >= (ch_ring->base + ch_ring->len))
+		tre = ch_ring->base;
+	ch_ring->rp = tre;
+	sg_tre += sg_ring->el_size;
+	if (sg_tre >= (sg_ring->base + sg_ring->len))
+		sg_tre = sg_ring->base;
+	sg_ring->rp = sg_tre;
+
+	/* make sure rp updates are immediately visible to all cores */
+	smp_wmb();
+
+	/* update Immediate data from Event back in to TRE if it's RX channel */
+	if (gpii_chan->dir == GPI_CHTYPE_DIR_IN) {
+		client_tre->dword[0] =
+			((struct msm_gpi_tre *)imed_event)->dword[0];
+		client_tre->dword[1] =
+			((struct msm_gpi_tre *)imed_event)->dword[1];
+		client_tre->dword[2] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD2(
+						      imed_event->length);
+	}
+
+	tx_cb_param = vd->tx.callback_param;
+	if (tx_cb_param) {
+		GPII_VERB(gpii, gpii_chan->chid,
+			  "cb_length:%u compl_code:0x%x status:0x%x\n",
+			  imed_event->length, imed_event->code,
+			  imed_event->status);
+		tx_cb_param->length = imed_event->length;
+		tx_cb_param->completion_code = imed_event->code;
+		tx_cb_param->status = imed_event->status;
+	}
+
+	spin_lock_irq(&gpii_chan->vc.lock);
+	vchan_cookie_complete(vd);
+	spin_unlock_irq(&gpii_chan->vc.lock);
+}
+
+/* processing transfer completion events */
+static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
+					 struct xfer_compl_event *compl_event)
+{
+	struct gpii *gpii = gpii_chan->gpii;
+	struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
+	struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
+	void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
+	struct msm_gpi_tre *client_tre;
+	struct virt_dma_desc *vd;
+	struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
+	struct gpi_desc *gpi_desc;
+	void *sg_tre = NULL;
+
+	/* only process events on active channel */
+	if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "skipping processing event because ch @ %s state\n",
+			 TO_GPI_PM_STR(gpii_chan->pm_state));
+		gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
+				      __LINE__);
+		return;
+	}
+
+	spin_lock_irq(&gpii_chan->vc.lock);
+	vd = vchan_next_desc(&gpii_chan->vc);
+	if (!vd) {
+		struct gpi_ere *gpi_ere;
+
+		spin_unlock_irq(&gpii_chan->vc.lock);
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "Event without a pending descriptor!\n");
+		gpi_ere = (struct gpi_ere *)compl_event;
+		GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
+			 gpi_ere->dword[0], gpi_ere->dword[1],
+			 gpi_ere->dword[2], gpi_ere->dword[3]);
+		gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
+				      __LINE__);
+		return;
+	}
+
+	gpi_desc = to_gpi_desc(vd);
+
+	/* TRE Event generated didn't match descriptor's TRE */
+	if (gpi_desc->wp != ev_rp) {
+		spin_unlock_irq(&gpii_chan->vc.lock);
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "EOT\EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
+			 to_physical(ch_ring, gpi_desc->wp),
+			 to_physical(ch_ring, ev_rp));
+		gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
+				      __LINE__);
+		return;
+	}
+
+	list_del(&vd->node);
+	spin_unlock_irq(&gpii_chan->vc.lock);
+
+	sg_tre = gpi_desc->sg_tre;
+	client_tre = ((struct sg_tre *)sg_tre)->ptr;
+
+	/*
+	 * RP pointed by Event is to last TRE processed,
+	 * we need to update ring rp to ev_rp + 1
+	 */
+	ev_rp += ch_ring->el_size;
+	if (ev_rp >= (ch_ring->base + ch_ring->len))
+		ev_rp = ch_ring->base;
+	ch_ring->rp = ev_rp;
+	sg_tre += sg_ring->el_size;
+	if (sg_tre >= (sg_ring->base + sg_ring->len))
+		sg_tre = sg_ring->base;
+	sg_ring->rp = sg_tre;
+
+	/* update must be visible to other cores */
+	smp_wmb();
+
+	tx_cb_param = vd->tx.callback_param;
+	if (tx_cb_param) {
+		GPII_VERB(gpii, gpii_chan->chid,
+			  "cb_length:%u compl_code:0x%x status:0x%x\n",
+			  compl_event->length, compl_event->code,
+			  compl_event->status);
+		tx_cb_param->length = compl_event->length;
+		tx_cb_param->completion_code = compl_event->code;
+		tx_cb_param->status = compl_event->status;
+	}
+
+	spin_lock_irq(&gpii_chan->vc.lock);
+	vchan_cookie_complete(vd);
+	spin_unlock_irq(&gpii_chan->vc.lock);
+}
+
+/* process all events */
+static void gpi_process_events(struct gpii *gpii)
+{
+	struct gpi_ring *ev_ring = &gpii->ev_ring;
+	u32 cntxt_rp, local_rp;
+	union gpi_event *gpi_event;
+	struct gpii_chan *gpii_chan;
+	u32 chid, type;
+	u32 ieob_irq;
+
+	cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+	local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
+
+	GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp: 0x08%x local_rp:0x08%x\n",
+		  cntxt_rp, local_rp);
+
+	do {
+		while (local_rp != cntxt_rp) {
+			gpi_event = ev_ring->rp;
+			chid = gpi_event->xfer_compl_event.chid;
+			type = gpi_event->xfer_compl_event.type;
+			GPII_VERB(gpii, GPI_DBG_COMMON,
+				  "rp:0x%08x chid:%u type:0x%x %08x %08x %08x %08x\n",
+				  local_rp, chid, type,
+				  gpi_event->gpi_ere.dword[0],
+				  gpi_event->gpi_ere.dword[1],
+				  gpi_event->gpi_ere.dword[2],
+				  gpi_event->gpi_ere.dword[3]);
+
+			switch (type) {
+			case XFER_COMPLETE_EV_TYPE:
+				gpii_chan = &gpii->gpii_chan[chid];
+				gpi_process_xfer_compl_event(gpii_chan,
+						&gpi_event->xfer_compl_event);
+				break;
+			case STALE_EV_TYPE:
+				GPII_VERB(gpii, GPI_DBG_COMMON,
+					  "stale event, not processing\n");
+				break;
+			case IMMEDIATE_DATA_EV_TYPE:
+				gpii_chan = &gpii->gpii_chan[chid];
+				gpi_process_imed_data_event(gpii_chan,
+					&gpi_event->immediate_data_event);
+				break;
+			case QUP_NOTIF_EV_TYPE:
+				gpii_chan = &gpii->gpii_chan[chid];
+				gpi_process_qup_notif_event(gpii_chan,
+						&gpi_event->qup_notif_event);
+				break;
+			default:
+				GPII_VERB(gpii, GPI_DBG_COMMON,
+					  "not supported event type:0x%x\n",
+					  type);
+			}
+			gpi_ring_recycle_ev_element(ev_ring);
+			local_rp = (u32)to_physical(ev_ring,
+						    (void *)ev_ring->rp);
+		}
+		gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
+
+		/* clear pending IEOB events */
+		ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
+		gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
+
+		cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+		local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
+
+	} while (cntxt_rp != local_rp);
+
+	GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:0x%x l_rp:0x%x\n", cntxt_rp,
+		  local_rp);
+}
+
+/* processing events using tasklet */
+static void gpi_ev_tasklet(unsigned long data)
+{
+	struct gpii *gpii = (struct gpii *)data;
+
+	GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
+
+	read_lock_bh(&gpii->pm_lock);
+	if (!REG_ACCESS_VALID(gpii->pm_state)) {
+		read_unlock_bh(&gpii->pm_lock);
+		GPII_ERR(gpii, GPI_DBG_COMMON,
+			 "not processing any events, pm_state:%s\n",
+			 TO_GPI_PM_STR(gpii->pm_state));
+		return;
+	}
+
+	/* process the events */
+	gpi_process_events(gpii);
+
+	/* enable IEOB, switching back to interrupts */
+	gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
+	read_unlock_bh(&gpii->pm_lock);
+
+	GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
+}
+
+/* marks all pending events for the channel as stale */
+void gpi_mark_stale_events(struct gpii_chan *gpii_chan)
+{
+	struct gpii *gpii = gpii_chan->gpii;
+	struct gpi_ring *ev_ring = &gpii->ev_ring;
+	void *ev_rp;
+	u32 cntxt_rp, local_rp;
+
+	GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
+	cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+
+	ev_rp = ev_ring->rp;
+	local_rp = (u32)to_physical(ev_ring, ev_rp);
+	while (local_rp != cntxt_rp) {
+		union gpi_event *gpi_event = ev_rp;
+		u32 chid = gpi_event->xfer_compl_event.chid;
+
+		if (chid == gpii_chan->chid)
+			gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
+		ev_rp += ev_ring->el_size;
+		if (ev_rp >= (ev_ring->base + ev_ring->len))
+			ev_rp = ev_ring->base;
+		cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+		local_rp = (u32)to_physical(ev_ring, ev_rp);
+	}
+}
+
+/* reset sw state and issue channel reset or de-alloc */
+static int gpi_reset_chan(struct gpii_chan *gpii_chan, enum gpi_cmd gpi_cmd)
+{
+	struct gpii *gpii = gpii_chan->gpii;
+	struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
+	struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
+	unsigned long flags;
+	LIST_HEAD(list);
+	int ret;
+
+	GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
+	ret = gpi_send_cmd(gpii, gpii_chan, gpi_cmd);
+	if (ret) {
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "Error with cmd:%s ret:%d\n",
+			 TO_GPI_CMD_STR(gpi_cmd), ret);
+		return ret;
+	}
+
+	/* initialize the local ring ptrs */
+	ch_ring->rp = ch_ring->base;
+	ch_ring->wp = ch_ring->base;
+	sg_ring->rp = sg_ring->base;
+	sg_ring->wp = sg_ring->base;
+
+	/* visible to other cores */
+	smp_wmb();
+
+	/* check event ring for any stale events */
+	write_lock_irq(&gpii->pm_lock);
+	gpi_mark_stale_events(gpii_chan);
+
+	/* remove all async descriptors */
+	spin_lock_irqsave(&gpii_chan->vc.lock, flags);
+	vchan_get_all_descriptors(&gpii_chan->vc, &list);
+	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
+	write_unlock_irq(&gpii->pm_lock);
+	vchan_dma_desc_free_list(&gpii_chan->vc, &list);
+
+	return 0;
+}
+
+static int gpi_start_chan(struct gpii_chan *gpii_chan)
+{
+	struct gpii *gpii = gpii_chan->gpii;
+	int ret;
+
+	GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
+
+	ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_START);
+	if (ret) {
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "Error with cmd:%s ret:%d\n",
+			 TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
+		return ret;
+	}
+
+	/* gpii CH is active now */
+	write_lock_irq(&gpii->pm_lock);
+	gpii_chan->pm_state = ACTIVE_STATE;
+	write_unlock_irq(&gpii->pm_lock);
+
+	return 0;
+}
+
+/* allocate and configure the transfer channel */
+static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd)
+{
+	struct gpii *gpii = gpii_chan->gpii;
+	struct gpi_ring *ring = &gpii_chan->ch_ring;
+	int i;
+	int ret;
+	struct {
+		void *base;
+		int offset;
+		u32 val;
+	} ch_reg[] = {
+		{
+			gpii_chan->ch_cntxt_base_reg,
+			CNTXT_0_CONFIG,
+			GPI_GPII_n_CH_k_CNTXT_0(ring->el_size, 0,
+						gpii_chan->dir,
+						GPI_CHTYPE_PROTO_GPI),
+		},
+		{
+			gpii_chan->ch_cntxt_base_reg,
+			CNTXT_1_R_LENGTH,
+			ring->len,
+		},
+		{
+			gpii_chan->ch_cntxt_base_reg,
+			CNTXT_2_RING_BASE_LSB,
+			(u32)ring->phys_addr,
+		},
+		{
+			gpii_chan->ch_cntxt_base_reg,
+			CNTXT_3_RING_BASE_MSB,
+			(u32)(ring->phys_addr >> 32),
+		},
+		{ /* program MSB of DB register with ring base */
+			gpii_chan->ch_cntxt_db_reg,
+			CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
+			(u32)(ring->phys_addr >> 32),
+		},
+		{
+			gpii->regs,
+			GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
+						       gpii_chan->chid),
+			GPI_GPII_n_CH_K_SCRATCH_0(!gpii_chan->chid,
+						  gpii_chan->protocol,
+						  gpii_chan->seid),
+		},
+		{
+			gpii->regs,
+			GPI_GPII_n_CH_k_SCRATCH_1_OFFS(gpii->gpii_id,
+						       gpii_chan->chid),
+			0,
+		},
+		{
+			gpii->regs,
+			GPI_GPII_n_CH_k_SCRATCH_2_OFFS(gpii->gpii_id,
+						       gpii_chan->chid),
+			0,
+		},
+		{
+			gpii->regs,
+			GPI_GPII_n_CH_k_SCRATCH_3_OFFS(gpii->gpii_id,
+						       gpii_chan->chid),
+			0,
+		},
+		{
+			gpii->regs,
+			GPI_GPII_n_CH_k_QOS_OFFS(gpii->gpii_id,
+						 gpii_chan->chid),
+			1,
+		},
+		{ NULL },
+	};
+
+	GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
+
+	if (send_alloc_cmd) {
+		ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_ALLOCATE);
+		if (ret) {
+			GPII_ERR(gpii, gpii_chan->chid,
+				 "Error with cmd:%s ret:%d\n",
+				 TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
+			return ret;
+		}
+	}
+
+	/* program channel cntxt registers */
+	for (i = 0; ch_reg[i].base; i++)
+		gpi_write_reg(gpii, ch_reg[i].base + ch_reg[i].offset,
+			      ch_reg[i].val);
+	/* flush all the writes */
+	wmb();
+	return 0;
+}
+
+/* allocate and configure event ring */
+static int gpi_alloc_ev_chan(struct gpii *gpii)
+{
+	struct gpi_ring *ring = &gpii->ev_ring;
+	int i;
+	int ret;
+	struct {
+		void *base;
+		int offset;
+		u32 val;
+	} ev_reg[] = {
+		{
+			gpii->ev_cntxt_base_reg,
+			CNTXT_0_CONFIG,
+			GPI_GPII_n_EV_CH_k_CNTXT_0(ring->el_size,
+						   GPI_INTTYPE_IRQ,
+						   GPI_CHTYPE_GPI_EV),
+		},
+		{
+			gpii->ev_cntxt_base_reg,
+			CNTXT_1_R_LENGTH,
+			ring->len,
+		},
+		{
+			gpii->ev_cntxt_base_reg,
+			CNTXT_2_RING_BASE_LSB,
+			(u32)ring->phys_addr,
+		},
+		{
+			gpii->ev_cntxt_base_reg,
+			CNTXT_3_RING_BASE_MSB,
+			(u32)(ring->phys_addr >> 32),
+		},
+		{
+			/* program db msg with ring base msb */
+			gpii->ev_cntxt_db_reg,
+			CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
+			(u32)(ring->phys_addr >> 32),
+		},
+		{
+			gpii->ev_cntxt_base_reg,
+			CNTXT_8_RING_INT_MOD,
+			0,
+		},
+		{
+			gpii->ev_cntxt_base_reg,
+			CNTXT_10_RING_MSI_LSB,
+			0,
+		},
+		{
+			gpii->ev_cntxt_base_reg,
+			CNTXT_11_RING_MSI_MSB,
+			0,
+		},
+		{
+			gpii->ev_cntxt_base_reg,
+			CNTXT_8_RING_INT_MOD,
+			0,
+		},
+		{
+			gpii->ev_cntxt_base_reg,
+			CNTXT_12_RING_RP_UPDATE_LSB,
+			0,
+		},
+		{
+			gpii->ev_cntxt_base_reg,
+			CNTXT_13_RING_RP_UPDATE_MSB,
+			0,
+		},
+		{ NULL },
+	};
+
+	GPII_INFO(gpii, GPI_DBG_COMMON, "enter\n");
+
+	ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
+	if (ret) {
+		GPII_ERR(gpii, GPI_DBG_COMMON, "error with cmd:%s ret:%d\n",
+			 TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
+		return ret;
+	}
+
+	/* program event context */
+	for (i = 0; ev_reg[i].base; i++)
+		gpi_write_reg(gpii, ev_reg[i].base + ev_reg[i].offset,
+			      ev_reg[i].val);
+
+	/* add events to ring */
+	ring->wp = (ring->base + ring->len - ring->el_size);
+
+	/* flush all the writes */
+	wmb();
+
+	/* gpii is active now */
+	write_lock_irq(&gpii->pm_lock);
+	gpii->pm_state = ACTIVE_STATE;
+	write_unlock_irq(&gpii->pm_lock);
+	gpi_write_ev_db(gpii, ring, ring->wp);
+
+	return 0;
+}
+
+/* calculate # of ERE/TRE available to queue */
+static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
+{
+	int elements = 0;
+
+	if (ring->wp < ring->rp)
+		elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
+	else {
+		elements = (ring->rp - ring->base) / ring->el_size;
+		elements += ((ring->base + ring->len - ring->wp) /
+			     ring->el_size) - 1;
+	}
+
+	return elements;
+}
+
+static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
+{
+
+	if (gpi_ring_num_elements_avail(ring) <= 0)
+		return -ENOMEM;
+
+	*wp = ring->wp;
+	ring->wp += ring->el_size;
+	if (ring->wp  >= (ring->base + ring->len))
+		ring->wp = ring->base;
+
+	/* visible to other cores */
+	smp_wmb();
+
+	return 0;
+}
+
+static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
+{
+	/* Update the WP */
+	ring->wp += ring->el_size;
+	if (ring->wp  >= (ring->base + ring->len))
+		ring->wp = ring->base;
+
+	/* Update the RP */
+	ring->rp += ring->el_size;
+	if (ring->rp  >= (ring->base + ring->len))
+		ring->rp = ring->base;
+
+	/* visible to other cores */
+	smp_wmb();
+}
+
+static void gpi_free_ring(struct gpi_ring *ring,
+			  struct gpii *gpii)
+{
+	if (ring->dma_handle)
+		dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
+				  ring->pre_aligned, ring->dma_handle);
+	else
+		vfree(ring->pre_aligned);
+	memset(ring, 0, sizeof(*ring));
+}
+
+/* allocate memory for transfer and event rings */
+static int gpi_alloc_ring(struct gpi_ring *ring,
+			  u32 elements,
+			  u32 el_size,
+			  struct gpii *gpii,
+			  bool alloc_coherent)
+{
+	u64 len = elements * el_size;
+	int bit;
+
+	if (alloc_coherent) {
+		/* ring len must be power of 2 */
+		bit = find_last_bit((unsigned long *)&len, 32);
+		if (((1 << bit) - 1) & len)
+			bit++;
+		len = 1 << bit;
+		ring->alloc_size = (len + (len - 1));
+		GPII_INFO(gpii, GPI_DBG_COMMON,
+			  "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
+			  elements, el_size, (elements * el_size), len,
+			  ring->alloc_size);
+		ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
+						       ring->alloc_size,
+						       &ring->dma_handle,
+						       GFP_KERNEL);
+		if (!ring->pre_aligned) {
+			GPII_CRITIC(gpii, GPI_DBG_COMMON,
+				    "could not alloc size:%lu mem for ring\n",
+				    ring->alloc_size);
+			return -ENOMEM;
+		}
+
+		/* align the physical mem */
+		ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
+		ring->base = ring->pre_aligned +
+			(ring->phys_addr - ring->dma_handle);
+	} else {
+		ring->pre_aligned = vmalloc(len);
+		if (!ring->pre_aligned) {
+			GPII_CRITIC(gpii, GPI_DBG_COMMON,
+				    "could not allocsize:%llu mem for ring\n",
+				    len);
+			return -ENOMEM;
+		}
+		ring->phys_addr = 0;
+		ring->dma_handle = 0;
+		ring->base = ring->pre_aligned;
+	}
+
+	ring->rp = ring->base;
+	ring->wp = ring->base;
+	ring->len = len;
+	ring->el_size = el_size;
+	ring->elements = ring->len / ring->el_size;
+	memset(ring->base, 0, ring->len);
+	ring->configured = true;
+
+	/* update to other cores */
+	smp_wmb();
+
+	GPII_INFO(gpii, GPI_DBG_COMMON,
+		  "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
+		  ring->dma_handle, ring->phys_addr, ring->len, ring->el_size,
+		  ring->elements);
+
+	return 0;
+}
+
+/* copy tre into transfer ring */
+static void gpi_queue_xfer(struct gpii *gpii,
+			   struct gpii_chan *gpii_chan,
+			   struct msm_gpi_tre *gpi_tre,
+			   void **wp,
+			   struct sg_tre **sg_tre)
+{
+	struct msm_gpi_tre *ch_tre;
+	int ret;
+
+	/* get next tre location we can copy */
+	ret = gpi_ring_add_element(&gpii_chan->ch_ring, (void **)&ch_tre);
+	if (unlikely(ret)) {
+		GPII_CRITIC(gpii, gpii_chan->chid,
+			    "Error adding ring element to xfer ring\n");
+		return;
+	}
+	/* get next sg tre location we can use */
+	ret = gpi_ring_add_element(&gpii_chan->sg_ring, (void **)sg_tre);
+	if (unlikely(ret)) {
+		GPII_CRITIC(gpii, gpii_chan->chid,
+			    "Error adding ring element to sg ring\n");
+		return;
+	}
+
+	/* copy the tre info */
+	memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
+	(*sg_tre)->ptr = gpi_tre;
+	(*sg_tre)->wp = ch_tre;
+	*wp = ch_tre;
+}
+
+/* reset and restart transfer channel */
+int gpi_terminate_all(struct dma_chan *chan)
+{
+	struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+	struct gpii *gpii = gpii_chan->gpii;
+	int schid, echid, i;
+	int ret = 0;
+
+	GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
+	mutex_lock(&gpii->ctrl_lock);
+
+	/*
+	 * treat both channels as a group if its protocol is not UART
+	 * STOP, RESET, or START needs to be in lockstep
+	 */
+	schid = (gpii->protocol == SE_PROTOCOL_UART) ? gpii_chan->chid : 0;
+	echid = (gpii->protocol == SE_PROTOCOL_UART) ? schid + 1 :
+		MAX_CHANNELS_PER_GPII;
+
+	/* stop the channel */
+	for (i = schid; i < echid; i++) {
+		gpii_chan = &gpii->gpii_chan[i];
+
+		/* disable ch state so no more TRE processing */
+		write_lock_irq(&gpii->pm_lock);
+		gpii_chan->pm_state = PREPARE_TERMINATE;
+		write_unlock_irq(&gpii->pm_lock);
+
+		/* send command to Stop the channel */
+		ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
+		if (ret)
+			GPII_ERR(gpii, gpii_chan->chid,
+				 "Error Stopping Channel:%d resetting anyway\n",
+				 ret);
+	}
+
+	/* reset the channels (clears any pending tre) */
+	for (i = schid; i < echid; i++) {
+		gpii_chan = &gpii->gpii_chan[i];
+
+		ret = gpi_reset_chan(gpii_chan, GPI_CH_CMD_RESET);
+		if (ret) {
+			GPII_ERR(gpii, gpii_chan->chid,
+				 "Error resetting channel ret:%d\n", ret);
+			goto terminate_exit;
+		}
+
+		/* reprogram channel CNTXT */
+		ret = gpi_alloc_chan(gpii_chan, false);
+		if (ret) {
+			GPII_ERR(gpii, gpii_chan->chid,
+				 "Error alloc_channel ret:%d\n", ret);
+			goto terminate_exit;
+		}
+	}
+
+	/* restart the channels */
+	for (i = schid; i < echid; i++) {
+		gpii_chan = &gpii->gpii_chan[i];
+
+		ret = gpi_start_chan(gpii_chan);
+		if (ret) {
+			GPII_ERR(gpii, gpii_chan->chid,
+				 "Error Starting Channel ret:%d\n", ret);
+			goto terminate_exit;
+		}
+	}
+
+terminate_exit:
+	mutex_unlock(&gpii->ctrl_lock);
+	return ret;
+}
+
+/* pause dma transfer for all channels */
+static int gpi_pause(struct dma_chan *chan)
+{
+	struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+	struct gpii *gpii = gpii_chan->gpii;
+	int i, ret;
+
+	GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
+	mutex_lock(&gpii->ctrl_lock);
+
+	/*
+	 * pause/resume are per gpii not per channel, so
+	 * client needs to call pause only once
+	 */
+	if (gpii->pm_state == PAUSE_STATE) {
+		GPII_INFO(gpii, gpii_chan->chid,
+			  "channel is already paused\n");
+		mutex_unlock(&gpii->ctrl_lock);
+		return 0;
+	}
+
+	/* send stop command to stop the channels */
+	for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+		ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_STOP);
+		if (ret) {
+			GPII_ERR(gpii, gpii->gpii_chan[i].chid,
+				 "Error stopping chan, ret:%d\n", ret);
+			mutex_unlock(&gpii->ctrl_lock);
+			return ret;
+		}
+	}
+
+	disable_irq(gpii->irq);
+
+	/* Wait for threads to complete out */
+	tasklet_kill(&gpii->ev_task);
+
+	write_lock_irq(&gpii->pm_lock);
+	gpii->pm_state = PAUSE_STATE;
+	write_unlock_irq(&gpii->pm_lock);
+	mutex_unlock(&gpii->ctrl_lock);
+
+	return 0;
+}
+
+/* resume dma transfer */
+static int gpi_resume(struct dma_chan *chan)
+{
+	struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+	struct gpii *gpii = gpii_chan->gpii;
+	int i;
+	int ret;
+
+	GPII_INFO(gpii, gpii_chan->chid, "enter\n");
+
+	mutex_lock(&gpii->ctrl_lock);
+	if (gpii->pm_state == ACTIVE_STATE) {
+		GPII_INFO(gpii, gpii_chan->chid,
+			  "channel is already active\n");
+		mutex_unlock(&gpii->ctrl_lock);
+		return 0;
+	}
+
+	enable_irq(gpii->irq);
+
+	/* send start command to start the channels */
+	for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+		ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_START);
+		if (ret) {
+			GPII_ERR(gpii, gpii->gpii_chan[i].chid,
+				 "Erro starting chan, ret:%d\n", ret);
+			mutex_unlock(&gpii->ctrl_lock);
+			return ret;
+		}
+	}
+
+	write_lock_irq(&gpii->pm_lock);
+	gpii->pm_state = ACTIVE_STATE;
+	write_unlock_irq(&gpii->pm_lock);
+	mutex_unlock(&gpii->ctrl_lock);
+
+	return 0;
+}
+
+void gpi_desc_free(struct virt_dma_desc *vd)
+{
+	struct gpi_desc *gpi_desc = to_gpi_desc(vd);
+
+	kfree(gpi_desc);
+}
+
+/* copy tre into transfer ring */
+struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
+					struct scatterlist *sgl,
+					unsigned int sg_len,
+					enum dma_transfer_direction direction,
+					unsigned long flags,
+					void *context)
+{
+	struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+	struct gpii *gpii = gpii_chan->gpii;
+	u32 nr, sg_nr;
+	u32 nr_req = 0;
+	int i, j;
+	struct scatterlist *sg;
+	struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
+	struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
+	void *tre, *wp = NULL;
+	struct sg_tre *sg_tre = NULL;
+	const gfp_t gfp = GFP_ATOMIC;
+	struct gpi_desc *gpi_desc;
+
+	GPII_VERB(gpii, gpii_chan->chid, "enter\n");
+
+	if (!is_slave_direction(direction)) {
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "invalid dma direction: %d\n", direction);
+		return NULL;
+	}
+
+	/* calculate # of elements required & available */
+	nr = gpi_ring_num_elements_avail(ch_ring);
+	sg_nr = gpi_ring_num_elements_avail(sg_ring);
+	for_each_sg(sgl, sg, sg_len, i) {
+		GPII_VERB(gpii, gpii_chan->chid,
+			  "%d of %u len:%u\n", i, sg_len, sg->length);
+		nr_req += (sg->length / ch_ring->el_size);
+	}
+	GPII_VERB(gpii, gpii_chan->chid,
+		  "nr_elements_avail:%u sg_avail:%u required:%u\n",
+		  nr, sg_nr, nr_req);
+
+	if (nr < nr_req || sg_nr < nr_req) {
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "not enough space in ring, avail:%u,%u required:%u\n",
+			 nr, sg_nr, nr_req);
+		return NULL;
+	}
+
+	gpi_desc = kzalloc(sizeof(*gpi_desc), gfp);
+	if (!gpi_desc) {
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "out of memory for descriptor\n");
+		return NULL;
+	}
+
+	/* copy each tre into transfer ring */
+	for_each_sg(sgl, sg, sg_len, i)
+		for (j = 0, tre = sg_virt(sg); j < sg->length;
+		     j += ch_ring->el_size, tre += ch_ring->el_size)
+			gpi_queue_xfer(gpii, gpii_chan, tre, &wp, &sg_tre);
+
+	/* set up the descriptor */
+	gpi_desc->db = ch_ring->wp;
+	gpi_desc->wp = wp;
+	gpi_desc->sg_tre = sg_tre;
+	gpi_desc->gpii_chan = gpii_chan;
+	GPII_VERB(gpii, gpii_chan->chid, "exit wp:0x%0llx rp:0x%0llx\n",
+		  to_physical(ch_ring, ch_ring->wp),
+		  to_physical(ch_ring, ch_ring->rp));
+
+	return vchan_tx_prep(&gpii_chan->vc, &gpi_desc->vd, flags);
+}
+
+/* rings transfer ring db to being transfer */
+static void gpi_issue_pending(struct dma_chan *chan)
+{
+	struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+	struct gpii *gpii = gpii_chan->gpii;
+	unsigned long flags, pm_lock_flags;
+	struct virt_dma_desc *vd = NULL;
+	struct gpi_desc *gpi_desc;
+
+	GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
+
+	read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
+
+	/* move all submitted discriptors to issued list */
+	spin_lock_irqsave(&gpii_chan->vc.lock, flags);
+	if (vchan_issue_pending(&gpii_chan->vc))
+		vd = list_last_entry(&gpii_chan->vc.desc_issued,
+				     struct virt_dma_desc, node);
+	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
+
+	/* nothing to do list is empty */
+	if (!vd) {
+		read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
+		GPII_VERB(gpii, gpii_chan->chid, "no descriptors submitted\n");
+		return;
+	}
+
+	gpi_desc = to_gpi_desc(vd);
+	gpi_write_ch_db(gpii_chan, &gpii_chan->ch_ring, gpi_desc->db);
+	read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
+}
+
+/* configure or issue async command */
+static int gpi_config(struct dma_chan *chan,
+		      struct dma_slave_config *config)
+{
+	struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+	struct gpii *gpii = gpii_chan->gpii;
+	struct msm_gpi_ctrl *gpi_ctrl = chan->private;
+	const int ev_factor = gpii->gpi_dev->ev_factor;
+	u32 elements;
+	int i = 0;
+	int ret = 0;
+
+	GPII_INFO(gpii, gpii_chan->chid, "enter\n");
+	if (!gpi_ctrl) {
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "no config ctrl data provided");
+		return -EINVAL;
+	}
+
+	mutex_lock(&gpii->ctrl_lock);
+
+	switch (gpi_ctrl->cmd) {
+	case MSM_GPI_INIT:
+		GPII_INFO(gpii, gpii_chan->chid, "cmd: msm_gpi_init\n");
+
+		gpii_chan->client_info.callback = gpi_ctrl->init.callback;
+		gpii_chan->client_info.cb_param = gpi_ctrl->init.cb_param;
+		gpii_chan->pm_state = CONFIG_STATE;
+
+		/* check if both channels are configured before continue */
+		for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
+			if (gpii->gpii_chan[i].pm_state != CONFIG_STATE)
+				goto exit_gpi_init;
+
+		/* configure to highest priority from  two channels */
+		gpii->ev_priority = min(gpii->gpii_chan[0].priority,
+					gpii->gpii_chan[1].priority);
+
+		/* protocol must be same for both channels */
+		if (gpii->gpii_chan[0].protocol !=
+		    gpii->gpii_chan[1].protocol) {
+			GPII_ERR(gpii, gpii_chan->chid,
+				 "protocol did not match protocol %u != %u\n",
+				 gpii->gpii_chan[0].protocol,
+				 gpii->gpii_chan[1].protocol);
+			ret = -EINVAL;
+			goto exit_gpi_init;
+		}
+		gpii->protocol = gpii_chan->protocol;
+
+		/* allocate memory for event ring */
+		elements = max(gpii->gpii_chan[0].req_tres,
+			       gpii->gpii_chan[1].req_tres);
+		ret = gpi_alloc_ring(&gpii->ev_ring, elements << ev_factor,
+				     sizeof(union gpi_event), gpii, true);
+		if (ret) {
+			GPII_ERR(gpii, gpii_chan->chid,
+				 "error allocating mem for ev ring\n");
+			goto exit_gpi_init;
+		}
+
+		/* configure interrupts */
+		write_lock_irq(&gpii->pm_lock);
+		gpii->pm_state = PREPARE_HARDWARE;
+		write_unlock_irq(&gpii->pm_lock);
+		ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
+		if (ret) {
+			GPII_ERR(gpii, gpii_chan->chid,
+				 "error config. interrupts, ret:%d\n", ret);
+			goto error_config_int;
+		}
+
+		/* allocate event rings */
+		ret = gpi_alloc_ev_chan(gpii);
+		if (ret) {
+			GPII_ERR(gpii, gpii_chan->chid,
+				 "error alloc_ev_chan:%d\n", ret);
+				goto error_alloc_ev_ring;
+		}
+
+		/* Allocate all channels */
+		for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+			ret = gpi_alloc_chan(&gpii->gpii_chan[i], true);
+			if (ret) {
+				GPII_ERR(gpii, gpii->gpii_chan[i].chid,
+					 "Error allocating chan:%d\n", ret);
+				goto error_alloc_chan;
+			}
+		}
+
+		/* start channels  */
+		for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+			ret = gpi_start_chan(&gpii->gpii_chan[i]);
+			if (ret) {
+				GPII_ERR(gpii, gpii->gpii_chan[i].chid,
+					 "Error start chan:%d\n", ret);
+				goto error_start_chan;
+			}
+		}
+
+		break;
+	case MSM_GPI_CMD_UART_SW_STALE:
+		GPII_INFO(gpii, gpii_chan->chid, "sending UART SW STALE cmd\n");
+		ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_SW_STALE);
+		break;
+	case MSM_GPI_CMD_UART_RFR_READY:
+		GPII_INFO(gpii, gpii_chan->chid,
+			  "sending UART RFR READY cmd\n");
+		ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_RFR_READY);
+		break;
+	case MSM_GPI_CMD_UART_RFR_NOT_READY:
+		GPII_INFO(gpii, gpii_chan->chid,
+			  "sending UART RFR READY NOT READY cmd\n");
+		ret = gpi_send_cmd(gpii, gpii_chan,
+				   GPI_CH_CMD_UART_RFR_NOT_READY);
+		break;
+	default:
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "unsupported ctrl cmd:%d\n", gpi_ctrl->cmd);
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&gpii->ctrl_lock);
+	return ret;
+
+error_start_chan:
+	for (i = i - 1; i >= 0; i++) {
+		gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
+		gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
+	}
+	i = 2;
+error_alloc_chan:
+	for (i = i - 1; i >= 0; i--)
+		gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
+error_alloc_ev_ring:
+	gpi_disable_interrupts(gpii);
+error_config_int:
+	gpi_free_ring(&gpii->ev_ring, gpii);
+exit_gpi_init:
+	mutex_unlock(&gpii->ctrl_lock);
+	return ret;
+}
+
+/* release all channel resources */
+static void gpi_free_chan_resources(struct dma_chan *chan)
+{
+	struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+	struct gpii *gpii = gpii_chan->gpii;
+	enum gpi_pm_state cur_state;
+	int ret, i;
+
+	GPII_INFO(gpii, gpii_chan->chid, "enter\n");
+
+	mutex_lock(&gpii->ctrl_lock);
+
+	cur_state = gpii_chan->pm_state;
+
+	/* disable ch state so no more TRE processing for this channel */
+	write_lock_irq(&gpii->pm_lock);
+	gpii_chan->pm_state = PREPARE_TERMINATE;
+	write_unlock_irq(&gpii->pm_lock);
+
+	/* attemp to do graceful hardware shutdown */
+	if (cur_state == ACTIVE_STATE) {
+		ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
+		if (ret)
+			GPII_ERR(gpii, gpii_chan->chid,
+				 "error stopping channel:%d\n", ret);
+
+		ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
+		if (ret)
+			GPII_ERR(gpii, gpii_chan->chid,
+				 "error resetting channel:%d\n", ret);
+
+		gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
+	}
+
+	/* free all allocated memory */
+	gpi_free_ring(&gpii_chan->ch_ring, gpii);
+	gpi_free_ring(&gpii_chan->sg_ring, gpii);
+	vchan_free_chan_resources(&gpii_chan->vc);
+
+	write_lock_irq(&gpii->pm_lock);
+	gpii_chan->pm_state = DISABLE_STATE;
+	write_unlock_irq(&gpii->pm_lock);
+
+	/* if other rings are still active exit */
+	for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
+		if (gpii->gpii_chan[i].ch_ring.configured)
+			goto exit_free;
+
+	GPII_INFO(gpii, gpii_chan->chid, "disabling gpii\n");
+
+	/* deallocate EV Ring */
+	cur_state = gpii->pm_state;
+	write_lock_irq(&gpii->pm_lock);
+	gpii->pm_state = PREPARE_TERMINATE;
+	write_unlock_irq(&gpii->pm_lock);
+
+	/* wait for threads to complete out */
+	tasklet_kill(&gpii->ev_task);
+
+	/* send command to de allocate event ring */
+	if (cur_state == ACTIVE_STATE)
+		gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
+
+	gpi_free_ring(&gpii->ev_ring, gpii);
+
+	/* disable interrupts */
+	if (cur_state == ACTIVE_STATE)
+		gpi_disable_interrupts(gpii);
+
+	/* set final state to disable */
+	write_lock_irq(&gpii->pm_lock);
+	gpii->pm_state = DISABLE_STATE;
+	write_unlock_irq(&gpii->pm_lock);
+
+exit_free:
+	mutex_unlock(&gpii->ctrl_lock);
+}
+
+/* allocate channel resources */
+static int gpi_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct gpii_chan *gpii_chan = to_gpii_chan(chan);
+	struct gpii *gpii = gpii_chan->gpii;
+	int ret;
+
+	GPII_INFO(gpii, gpii_chan->chid, "enter\n");
+
+	mutex_lock(&gpii->ctrl_lock);
+
+	/* allocate memory for transfer ring */
+	ret = gpi_alloc_ring(&gpii_chan->ch_ring, gpii_chan->req_tres,
+			     sizeof(struct msm_gpi_tre), gpii, true);
+	if (ret) {
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "error allocating xfer ring, ret:%d\n", ret);
+		goto xfer_alloc_err;
+	}
+
+	ret = gpi_alloc_ring(&gpii_chan->sg_ring, gpii_chan->ch_ring.elements,
+			     sizeof(struct sg_tre), gpii, false);
+	if (ret) {
+		GPII_ERR(gpii, gpii_chan->chid,
+			 "error allocating sg ring, ret:%d\n", ret);
+		goto sg_alloc_error;
+	}
+	mutex_unlock(&gpii->ctrl_lock);
+
+	return 0;
+
+sg_alloc_error:
+	gpi_free_ring(&gpii_chan->ch_ring, gpii);
+xfer_alloc_err:
+	mutex_unlock(&gpii->ctrl_lock);
+
+	return ret;
+}
+
+/* gpi_of_dma_xlate: open client requested channel */
+static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
+					 struct of_dma *of_dma)
+{
+	struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
+	u32 gpii, chid;
+	struct gpii_chan *gpii_chan;
+
+	if (args->args_count < REQ_OF_DMA_ARGS) {
+		GPI_ERR(gpi_dev,
+			"gpii require minimum 6 args, client passed:%d args\n",
+			args->args_count);
+		return NULL;
+	}
+
+	/* Check if valid gpii instance */
+	gpii = args->args[0];
+	if (!((1 << gpii) & gpi_dev->gpii_mask)) {
+		GPI_ERR(gpi_dev, "gpii instance:%d is not supported\n", gpii);
+		return NULL;
+	}
+
+	chid = args->args[1];
+	if (chid >= MAX_CHANNELS_PER_GPII) {
+		GPI_ERR(gpi_dev, "gpii channel:%d not valid\n", chid);
+		return NULL;
+	}
+
+	/* get ring size, protocol, se_id, and priority */
+	gpii_chan = &gpi_dev->gpiis[gpii].gpii_chan[chid];
+	gpii_chan->seid = args->args[2];
+	gpii_chan->protocol = args->args[3];
+	gpii_chan->req_tres = args->args[4];
+	gpii_chan->priority = args->args[5];
+
+	GPI_LOG(gpi_dev,
+		"client req. gpii:%u chid:%u #_tre:%u priority:%u protocol:%u\n",
+		gpii, chid, gpii_chan->req_tres, gpii_chan->priority,
+		gpii_chan->protocol);
+
+	return dma_get_slave_channel(&gpii_chan->vc.chan);
+}
+
+/* gpi_setup_debug - setup debug capabilities */
+static void gpi_setup_debug(struct gpi_dev *gpi_dev)
+{
+	char node_name[GPI_LABEL_SIZE];
+	const umode_t mode = 0600;
+	int i;
+
+	snprintf(node_name, sizeof(node_name), "%s%llx", GPI_DMA_DRV_NAME,
+		 (u64)gpi_dev->res->start);
+
+	gpi_dev->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
+						 node_name, 0);
+	gpi_dev->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
+	if (!IS_ERR_OR_NULL(pdentry)) {
+		snprintf(node_name, sizeof(node_name), "%llx",
+			 (u64)gpi_dev->res->start);
+		gpi_dev->dentry = debugfs_create_dir(node_name, pdentry);
+		if (!IS_ERR_OR_NULL(gpi_dev->dentry)) {
+			debugfs_create_u32("ipc_log_lvl", mode, gpi_dev->dentry,
+					   &gpi_dev->ipc_log_lvl);
+			debugfs_create_u32("klog_lvl", mode,
+					   gpi_dev->dentry, &gpi_dev->klog_lvl);
+		}
+	}
+
+	for (i = 0; i < gpi_dev->max_gpii; i++) {
+		struct gpii *gpii;
+
+		if (!((1 << i) & gpi_dev->gpii_mask))
+			continue;
+
+		gpii = &gpi_dev->gpiis[i];
+		snprintf(gpii->label, sizeof(gpii->label),
+			 "%s%llx_gpii%d",
+			 GPI_DMA_DRV_NAME, (u64)gpi_dev->res->start, i);
+		gpii->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
+						      gpii->label, 0);
+		gpii->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
+		gpii->klog_lvl = DEFAULT_KLOG_LVL;
+
+		if (IS_ERR_OR_NULL(gpi_dev->dentry))
+			continue;
+
+		snprintf(node_name, sizeof(node_name), "gpii%d", i);
+		gpii->dentry = debugfs_create_dir(node_name, gpi_dev->dentry);
+		if (IS_ERR_OR_NULL(gpii->dentry))
+			continue;
+
+		debugfs_create_u32("ipc_log_lvl", mode, gpii->dentry,
+				   &gpii->ipc_log_lvl);
+		debugfs_create_u32("klog_lvl", mode, gpii->dentry,
+				   &gpii->klog_lvl);
+	}
+}
+
+static int gpi_smmu_init(struct gpi_dev *gpi_dev)
+{
+	u64 size = U64_MAX;
+	dma_addr_t base = 0x0;
+	struct dma_iommu_mapping *map;
+	int attr, ret;
+
+	map = arm_iommu_create_mapping(&platform_bus_type, base, size);
+	if (IS_ERR_OR_NULL(map)) {
+		ret = PTR_ERR(map) ? : -EIO;
+		GPI_ERR(gpi_dev, "error create_mapping, ret:%d\n", ret);
+		return ret;
+	}
+
+	attr = 1;
+	ret = iommu_domain_set_attr(map->domain, DOMAIN_ATTR_ATOMIC, &attr);
+	if (ret) {
+		GPI_ERR(gpi_dev, "error setting ATTTR_ATOMIC, ret:%d\n", ret);
+		goto error_smmu;
+	}
+
+	attr = 1;
+	ret = iommu_domain_set_attr(map->domain, DOMAIN_ATTR_S1_BYPASS, &attr);
+	if (ret) {
+		GPI_ERR(gpi_dev, "error setting S1_BYPASS, ret:%d\n", ret);
+		goto error_smmu;
+	}
+
+	ret = arm_iommu_attach_device(gpi_dev->dev, map);
+	if (ret) {
+		GPI_ERR(gpi_dev, "error iommu_attach, ret:%d\n", ret);
+		goto error_smmu;
+	}
+
+	ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
+	if (ret) {
+		GPI_ERR(gpi_dev, "error setting dma_mask, ret:%d\n", ret);
+		goto error_set_mask;
+	}
+
+	return ret;
+
+error_set_mask:
+	arm_iommu_detach_device(gpi_dev->dev);
+error_smmu:
+	arm_iommu_release_mapping(map);
+	return ret;
+}
+
+static int gpi_probe(struct platform_device *pdev)
+{
+	struct gpi_dev *gpi_dev;
+	int ret, i;
+
+	gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
+	if (!gpi_dev)
+		return -ENOMEM;
+
+	gpi_dev->dev = &pdev->dev;
+	gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
+	gpi_dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						    "gpi-top");
+	if (!gpi_dev->res) {
+		GPI_ERR(gpi_dev, "missing 'reg' DT node\n");
+		return -EINVAL;
+	}
+	gpi_dev->regs = devm_ioremap_nocache(gpi_dev->dev, gpi_dev->res->start,
+					     resource_size(gpi_dev->res));
+	if (!gpi_dev->regs) {
+		GPI_ERR(gpi_dev, "IO remap failed\n");
+		return -EFAULT;
+	}
+
+	ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,max-num-gpii",
+				   &gpi_dev->max_gpii);
+	if (ret) {
+		GPI_ERR(gpi_dev, "missing 'max-no-gpii' DT node\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,gpii-mask",
+				   &gpi_dev->gpii_mask);
+	if (ret) {
+		GPI_ERR(gpi_dev, "missing 'gpii-mask' DT node\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,ev-factor",
+				   &gpi_dev->ev_factor);
+	if (ret) {
+		GPI_ERR(gpi_dev, "missing 'qcom,ev-factor' DT node\n");
+		return ret;
+	}
+
+	ret = gpi_smmu_init(gpi_dev);
+	if (ret) {
+		GPI_ERR(gpi_dev, "error configuring smmu, ret:%d\n", ret);
+		return ret;
+	}
+
+	gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev,
+				sizeof(*gpi_dev->gpiis) * gpi_dev->max_gpii,
+				GFP_KERNEL);
+	if (!gpi_dev->gpiis)
+		return -ENOMEM;
+
+
+	/* setup all the supported gpii */
+	INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
+	for (i = 0; i < gpi_dev->max_gpii; i++) {
+		struct gpii *gpii = &gpi_dev->gpiis[i];
+		int chan;
+
+		if (!((1 << i) & gpi_dev->gpii_mask))
+			continue;
+
+		/* set up ev cntxt register map */
+		gpii->ev_cntxt_base_reg = gpi_dev->regs +
+			GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
+		gpii->ev_cntxt_db_reg = gpi_dev->regs +
+			GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
+		gpii->ev_ring_base_lsb_reg = gpii->ev_cntxt_base_reg +
+			CNTXT_2_RING_BASE_LSB;
+		gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg +
+			CNTXT_4_RING_RP_LSB;
+		gpii->ev_ring_wp_lsb_reg = gpii->ev_cntxt_base_reg +
+			CNTXT_6_RING_WP_LSB;
+		gpii->ev_cmd_reg = gpi_dev->regs +
+			GPI_GPII_n_EV_CH_CMD_OFFS(i);
+		gpii->ieob_src_reg = gpi_dev->regs +
+			GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(i);
+		gpii->ieob_clr_reg = gpi_dev->regs +
+			GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
+
+		/* set up irq */
+		ret = platform_get_irq(pdev, i);
+		if (ret < 0) {
+			GPI_ERR(gpi_dev, "could not req. irq for gpii%d ret:%d",
+				i, ret);
+			return ret;
+		}
+		gpii->irq = ret;
+
+		/* set up channel specific register info */
+		for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
+			struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
+
+			/* set up ch cntxt register map */
+			gpii_chan->ch_cntxt_base_reg = gpi_dev->regs +
+				GPI_GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
+			gpii_chan->ch_cntxt_db_reg = gpi_dev->regs +
+				GPI_GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
+			gpii_chan->ch_ring_base_lsb_reg =
+				gpii_chan->ch_cntxt_base_reg +
+				CNTXT_2_RING_BASE_LSB;
+			gpii_chan->ch_ring_rp_lsb_reg =
+				gpii_chan->ch_cntxt_base_reg +
+				CNTXT_4_RING_RP_LSB;
+			gpii_chan->ch_ring_wp_lsb_reg =
+				gpii_chan->ch_cntxt_base_reg +
+				CNTXT_6_RING_WP_LSB;
+			gpii_chan->ch_cmd_reg = gpi_dev->regs +
+				GPI_GPII_n_CH_CMD_OFFS(i);
+
+			/* vchan setup */
+			vchan_init(&gpii_chan->vc, &gpi_dev->dma_device);
+			gpii_chan->vc.desc_free = gpi_desc_free;
+			gpii_chan->chid = chan;
+			gpii_chan->gpii = gpii;
+			gpii_chan->dir = GPII_CHAN_DIR[chan];
+		}
+		mutex_init(&gpii->ctrl_lock);
+		rwlock_init(&gpii->pm_lock);
+		tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
+			     (unsigned long)gpii);
+		init_completion(&gpii->cmd_completion);
+		gpii->gpii_id = i;
+		gpii->regs = gpi_dev->regs;
+		gpii->gpi_dev = gpi_dev;
+		atomic_set(&gpii->dbg_index, 0);
+	}
+
+	platform_set_drvdata(pdev, gpi_dev);
+
+	/* clear and Set capabilities */
+	dma_cap_zero(gpi_dev->dma_device.cap_mask);
+	dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
+
+	/* configure dmaengine apis */
+	gpi_dev->dma_device.directions =
+		BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	gpi_dev->dma_device.residue_granularity =
+		DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+	gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
+	gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
+	gpi_dev->dma_device.device_alloc_chan_resources =
+		gpi_alloc_chan_resources;
+	gpi_dev->dma_device.device_free_chan_resources =
+		gpi_free_chan_resources;
+	gpi_dev->dma_device.device_tx_status = dma_cookie_status;
+	gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
+	gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
+	gpi_dev->dma_device.device_config = gpi_config;
+	gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
+	gpi_dev->dma_device.dev = gpi_dev->dev;
+	gpi_dev->dma_device.device_pause = gpi_pause;
+	gpi_dev->dma_device.device_resume = gpi_resume;
+
+	/* register with dmaengine framework */
+	ret = dma_async_device_register(&gpi_dev->dma_device);
+	if (ret) {
+		GPI_ERR(gpi_dev, "async_device_register failed ret:%d", ret);
+		return ret;
+	}
+
+	ret = of_dma_controller_register(gpi_dev->dev->of_node,
+					 gpi_of_dma_xlate, gpi_dev);
+	if (ret) {
+		GPI_ERR(gpi_dev, "of_dma_controller_reg failed ret:%d", ret);
+		return ret;
+	}
+
+	/* setup debug capabilities */
+	gpi_setup_debug(gpi_dev);
+	GPI_LOG(gpi_dev, "probe success\n");
+
+	return ret;
+}
+
+static const struct of_device_id gpi_of_match[] = {
+	{ .compatible = "qcom,gpi-dma" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, gpi_of_match);
+
+static struct platform_driver gpi_driver = {
+	.probe = gpi_probe,
+	.driver = {
+		.name = GPI_DMA_DRV_NAME,
+		.of_match_table = gpi_of_match,
+	},
+};
+
+static int __init gpi_init(void)
+{
+	pdentry = debugfs_create_dir(GPI_DMA_DRV_NAME, NULL);
+	return platform_driver_register(&gpi_driver);
+}
+module_init(gpi_init)
+
+MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/msm_gpi_mmio.h b/drivers/dma/qcom/msm_gpi_mmio.h
new file mode 100644
index 0000000..3fcff9e
--- /dev/null
+++ b/drivers/dma/qcom/msm_gpi_mmio.h
@@ -0,0 +1,224 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Register offsets from gpi-top */
+#define GPI_GPII_n_CH_k_CNTXT_0_OFFS(n, k) \
+	(0x20000 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPI_GPII_n_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK (0xFF000000)
+#define GPI_GPII_n_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT (24)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK (0xF00000)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_SHFT (20)
+#define GPI_GPII_n_CH_k_CNTXT_0_ERINDEX_BMSK (0x7C000)
+#define GPI_GPII_n_CH_k_CNTXT_0_ERINDEX_SHFT (14)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHID_BMSK (0x1F00)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHID_SHFT (8)
+#define GPI_GPII_n_CH_k_CNTXT_0_EE_BMSK (0xF0)
+#define GPI_GPII_n_CH_k_CNTXT_0_EE_SHFT (4)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHTYPE_DIR_BMSK (0x8)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHTYPE_DIR_SHFT (3)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHTYPE_PROTO_BMSK (0x7)
+#define GPI_GPII_n_CH_k_CNTXT_0_CHTYPE_PROTO_SHFT (0)
+#define GPI_GPII_n_CH_k_CNTXT_0(el_size, erindex, chtype_dir, chtype_proto) \
+	((el_size << 24) | (erindex << 14) | (chtype_dir << 3) | (chtype_proto))
+#define GPI_CHTYPE_DIR_IN (0)
+#define GPI_CHTYPE_DIR_OUT (1)
+#define GPI_CHTYPE_PROTO_GPI (0x2)
+#define GPI_GPII_n_CH_k_CNTXT_1_R_LENGTH_BMSK (0xFFFF)
+#define GPI_GPII_n_CH_k_CNTXT_1_R_LENGTH_SHFT (0)
+#define GPI_GPII_n_CH_k_DOORBELL_0_OFFS(n, k) (0x22000 + (0x4000 * (n)) \
+					       + (0x8 * (k)))
+#define GPI_GPII_n_CH_CMD_OFFS(n) (0x23008 + (0x4000 * (n)))
+#define GPI_GPII_n_CH_CMD_OPCODE_BMSK (0xFF000000)
+#define GPI_GPII_n_CH_CMD_OPCODE_SHFT (24)
+#define GPI_GPII_n_CH_CMD_CHID_BMSK (0xFF)
+#define GPI_GPII_n_CH_CMD_CHID_SHFT (0)
+#define GPI_GPII_n_CH_CMD(opcode, chid) ((opcode << 24) | chid)
+#define GPI_GPII_n_CH_CMD_ALLOCATE (0)
+#define GPI_GPII_n_CH_CMD_START (1)
+#define GPI_GPII_n_CH_CMD_STOP (2)
+#define GPI_GPII_n_CH_CMD_RESET (9)
+#define GPI_GPII_n_CH_CMD_DE_ALLOC (10)
+#define GPI_GPII_n_CH_CMD_UART_SW_STALE (32)
+#define GPI_GPII_n_CH_CMD_UART_RFR_READY (33)
+#define GPI_GPII_n_CH_CMD_UART_RFR_NOT_READY (34)
+
+/* EV Context Array */
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(n, k) \
+	(0x21000 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK (0xFF000000)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT (24)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK (0xF00000)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT (20)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_INTYPE_BMSK (0x10000)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_INTYPE_SHFT (16)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_EVCHID_BMSK (0xFF00)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_EVCHID_SHFT (8)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_EE_BMSK (0xF0)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_EE_SHFT (4)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK (0xF)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT (0)
+#define GPI_GPII_n_EV_CH_k_CNTXT_0(el_size, intype, chtype) \
+	((el_size << 24) | (intype << 16) | (chtype))
+#define GPI_INTTYPE_IRQ (1)
+#define GPI_CHTYPE_GPI_EV (0x2)
+#define GPI_GPII_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK (0xFFFF)
+#define GPI_GPII_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT (0)
+
+enum CNTXT_OFFS {
+	CNTXT_0_CONFIG = 0x0,
+	CNTXT_1_R_LENGTH = 0x4,
+	CNTXT_2_RING_BASE_LSB = 0x8,
+	CNTXT_3_RING_BASE_MSB = 0xC,
+	CNTXT_4_RING_RP_LSB = 0x10,
+	CNTXT_5_RING_RP_MSB = 0x14,
+	CNTXT_6_RING_WP_LSB = 0x18,
+	CNTXT_7_RING_WP_MSB = 0x1C,
+	CNTXT_8_RING_INT_MOD = 0x20,
+	CNTXT_9_RING_INTVEC = 0x24,
+	CNTXT_10_RING_MSI_LSB = 0x28,
+	CNTXT_11_RING_MSI_MSB = 0x2C,
+	CNTXT_12_RING_RP_UPDATE_LSB = 0x30,
+	CNTXT_13_RING_RP_UPDATE_MSB = 0x34,
+};
+
+#define GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(n, k) \
+	(0x22100 + (0x4000 * (n)) + (0x8 * (k)))
+#define GPI_GPII_n_EV_CH_CMD_OFFS(n) \
+	(0x23010 + (0x4000 * (n)))
+#define GPI_GPII_n_EV_CH_CMD_OPCODE_BMSK (0xFF000000)
+#define GPI_GPII_n_EV_CH_CMD_OPCODE_SHFT (24)
+#define GPI_GPII_n_EV_CH_CMD_CHID_BMSK (0xFF)
+#define GPI_GPII_n_EV_CH_CMD_CHID_SHFT (0)
+#define GPI_GPII_n_EV_CH_CMD(opcode, chid) \
+	((opcode << 24) | chid)
+#define GPI_GPII_n_EV_CH_CMD_ALLOCATE (0x00)
+#define GPI_GPII_n_EV_CH_CMD_RESET (0x09)
+#define GPI_GPII_n_EV_CH_CMD_DE_ALLOC (0x0A)
+
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(n) \
+	(0x23080 + (0x4000 * (n)))
+
+/* mask type register */
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) \
+	(0x23088 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK (0x7F)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT (0)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL (0x40)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_INTER_GPII_EV_CTRL (0x20)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_INTER_GPII_CH_CTRL (0x10)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB (0x08)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB (0x04)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL (0x02)
+#define GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL (0x01)
+
+#define GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(n) \
+	(0x23090 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) \
+	(0x23094 + (0x4000 * (n)))
+
+/* Mask channel control interrupt register */
+#define GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(n) \
+	(0x23098 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK (0x3)
+#define GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT (0)
+
+/* Mask event control interrupt register */
+#define GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) \
+	(0x2309C + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK (0x1)
+#define GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT (0)
+
+#define GPI_GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(n) \
+	(0x230A0 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) \
+	(0x230A4 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) \
+	(0x230B0 + (0x4000 * (n)))
+
+/* Mask event interrupt register */
+#define GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) \
+	(0x230B8 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK (0x1)
+#define GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT (0)
+
+#define GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) \
+	(0x230C0 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) \
+	(0x23100 + (0x4000 * (n)))
+#define GPI_GLOB_IRQ_ERROR_INT_MSK (0x1)
+#define GPI_GLOB_IRQ_GP_INT1_MSK (0x2)
+#define GPI_GLOB_IRQ_GP_INT2_MSK (0x4)
+#define GPI_GLOB_IRQ_GP_INT3_MSK (0x8)
+
+/* GPII specific Global - Enable bit register */
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(n) \
+	(0x23108 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK (0xF)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT (0)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_GP_INT3 (0x8)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_GP_INT2 (0x4)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_GP_INT1 (0x2)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_ERROR_INT (0x1)
+
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) \
+	(0x23110 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(n) \
+	(0x23118 + (0x4000 * (n)))
+
+/* GPII general interrupt - Enable bit register */
+#define GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS(n) \
+	(0x23120 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK (0xF)
+#define GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT (0)
+#define GPI_GPII_n_CNTXT_GPII_IRQ_EN_STACK_OVRFLOW (0x8)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_CMD_FIFO_OVRFLOW (0x4)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BUS_ERROR (0x2)
+#define GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BREAK_POINT (0x1)
+
+#define GPI_GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(n) \
+	(0x23128 + (0x4000 * (n)))
+
+/* GPII Interrupt Type register */
+#define GPI_GPII_n_CNTXT_INTSET_OFFS(n) \
+	(0x23180 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_INTSET_BMSK (0x1)
+#define GPI_GPII_n_CNTXT_INTSET_SHFT (0)
+
+#define GPI_GPII_n_CNTXT_MSI_BASE_LSB_OFFS(n) \
+	(0x23188 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_MSI_BASE_MSB_OFFS(n) \
+	(0x2318C + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SCRATCH_0_OFFS(n) \
+	(0x23400 + (0x4000 * (n)))
+#define GPI_GPII_n_CNTXT_SCRATCH_1_OFFS(n) \
+	(0x23404 + (0x4000 * (n)))
+
+#define GPI_GPII_n_ERROR_LOG_OFFS(n) \
+	(0x23200 + (0x4000 * (n)))
+#define GPI_GPII_n_ERROR_LOG_CLR_OFFS(n) \
+	(0x23210 + (0x4000 * (n)))
+
+/* QOS Registers */
+#define GPI_GPII_n_CH_k_QOS_OFFS(n, k) \
+	(0x2005C + (0x4000 * (n)) + (0x80 * (k)))
+
+/* Scratch registeres */
+#define GPI_GPII_n_CH_k_SCRATCH_0_OFFS(n, k) \
+	(0x20060 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPI_GPII_n_CH_K_SCRATCH_0(pair, proto, seid) \
+	((pair << 16) | (proto << 4) | seid)
+#define GPI_GPII_n_CH_k_SCRATCH_1_OFFS(n, k) \
+	(0x20064 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPI_GPII_n_CH_k_SCRATCH_2_OFFS(n, k) \
+	(0x20068 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPI_GPII_n_CH_k_SCRATCH_3_OFFS(n, k) \
+	(0x2006C + (0x4000 * (n)) + (0x80 * (k)))
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index ebed22f..296ec12 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -28,6 +28,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
+#include <linux/of_gpio.h>
 
 struct gpio_extcon_data {
 	struct extcon_dev *edev;
@@ -37,6 +38,7 @@
 
 	struct gpio_desc *id_gpiod;
 	struct gpio_extcon_pdata *pdata;
+	unsigned int *supported_cable;
 };
 
 static void gpio_extcon_work(struct work_struct *work)
@@ -91,15 +93,93 @@
 	return 0;
 }
 
+static int extcon_parse_pinctrl_data(struct device *dev,
+				     struct gpio_extcon_pdata *pdata)
+{
+	struct pinctrl *pctrl;
+	int ret = 0;
+
+	/* Try to obtain pinctrl handle */
+	pctrl = devm_pinctrl_get(dev);
+	if (IS_ERR(pctrl)) {
+		ret = PTR_ERR(pctrl);
+		goto out;
+	}
+	pdata->pctrl = pctrl;
+
+	/* Look-up and keep the state handy to be used later */
+	pdata->pins_default = pinctrl_lookup_state(pdata->pctrl,
+						   "default");
+	if (IS_ERR(pdata->pins_default)) {
+		ret = PTR_ERR(pdata->pins_default);
+		dev_err(dev, "Can't get default pinctrl state, ret %d\n", ret);
+	}
+out:
+	return ret;
+}
+
+/* Parse platform data */
+static
+struct gpio_extcon_pdata *extcon_populate_pdata(struct device *dev)
+{
+	struct gpio_extcon_pdata *pdata = NULL;
+	struct device_node *np = dev->of_node;
+	enum of_gpio_flags flags;
+	u32 val;
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		goto out;
+
+	if (of_property_read_u32(np, "extcon-id", &pdata->extcon_id)) {
+		dev_err(dev, "extcon-id property not found\n");
+		goto out;
+	}
+
+	pdata->gpio = of_get_named_gpio_flags(np, "gpio", 0, &flags);
+	if (gpio_is_valid(pdata->gpio)) {
+		if (flags & OF_GPIO_ACTIVE_LOW)
+			pdata->gpio_active_low = true;
+	} else {
+		dev_err(dev, "gpio property not found or invalid\n");
+		goto out;
+	}
+
+	if (of_property_read_u32(np, "irq-flags", &val)) {
+		dev_err(dev, "irq-flags property not found\n");
+		goto out;
+	}
+	pdata->irq_flags = val;
+
+	if (of_property_read_u32(np, "debounce-ms", &val)) {
+		dev_err(dev, "debounce-ms property not found\n");
+		goto out;
+	}
+	pdata->debounce = val;
+
+	if (extcon_parse_pinctrl_data(dev, pdata)) {
+		dev_err(dev, "failed to parse pinctrl data\n");
+		goto out;
+	}
+
+	return pdata;
+out:
+	return NULL;
+}
+
 static int gpio_extcon_probe(struct platform_device *pdev)
 {
 	struct gpio_extcon_pdata *pdata = dev_get_platdata(&pdev->dev);
 	struct gpio_extcon_data *data;
 	int ret;
 
-	if (!pdata)
-		return -EBUSY;
-	if (!pdata->irq_flags || pdata->extcon_id > EXTCON_NONE)
+	if (!pdata) {
+		/* try populating pdata from device tree */
+		pdata = extcon_populate_pdata(&pdev->dev);
+		if (!pdata)
+			return -EBUSY;
+	}
+	if (!pdata->irq_flags || pdata->extcon_id >= EXTCON_NUM)
 		return -EINVAL;
 
 	data = devm_kzalloc(&pdev->dev, sizeof(struct gpio_extcon_data),
@@ -108,13 +188,27 @@
 		return -ENOMEM;
 	data->pdata = pdata;
 
+	ret = pinctrl_select_state(pdata->pctrl, pdata->pins_default);
+	if (ret < 0)
+		dev_err(&pdev->dev, "pinctrl state select failed, ret %d\n",
+			ret);
+
 	/* Initialize the gpio */
 	ret = gpio_extcon_init(&pdev->dev, data);
 	if (ret < 0)
 		return ret;
 
+	data->supported_cable = devm_kzalloc(&pdev->dev,
+					     sizeof(*data->supported_cable) * 2,
+					     GFP_KERNEL);
+	if (!data->supported_cable)
+		return -ENOMEM;
+
+	data->supported_cable[0] = pdata->extcon_id;
+	data->supported_cable[1] = EXTCON_NONE;
 	/* Allocate the memory of extcon devie and register extcon device */
-	data->edev = devm_extcon_dev_allocate(&pdev->dev, &pdata->extcon_id);
+	data->edev = devm_extcon_dev_allocate(&pdev->dev,
+					      data->supported_cable);
 	if (IS_ERR(data->edev)) {
 		dev_err(&pdev->dev, "failed to allocate extcon device\n");
 		return -ENOMEM;
@@ -168,12 +262,18 @@
 
 static SIMPLE_DEV_PM_OPS(gpio_extcon_pm_ops, NULL, gpio_extcon_resume);
 
+static const struct of_device_id extcon_gpio_of_match[] = {
+	{ .compatible = "extcon-gpio"},
+	{},
+};
+
 static struct platform_driver gpio_extcon_driver = {
 	.probe		= gpio_extcon_probe,
 	.remove		= gpio_extcon_remove,
 	.driver		= {
 		.name	= "extcon-gpio",
 		.pm	= &gpio_extcon_pm_ops,
+		.of_match_table = of_match_ptr(extcon_gpio_of_match),
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.c b/drivers/gpu/drm/msm/dp/dp_usbpd.c
new file mode 100644
index 0000000..089177f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "dp_usbpd.h"
+
+/* DP specific VDM commands */
+#define DP_USBPD_VDM_STATUS	0x10
+#define DP_USBPD_VDM_CONFIGURE	0x11
+
+/* USBPD-TypeC specific Macros */
+#define VDM_VERSION		0x0
+#define USB_C_DP_SID		0xFF01
+
+enum dp_usbpd_pin_assignment {
+	DP_USBPD_PIN_A,
+	DP_USBPD_PIN_B,
+	DP_USBPD_PIN_C,
+	DP_USBPD_PIN_D,
+	DP_USBPD_PIN_E,
+	DP_USBPD_PIN_F,
+	DP_USBPD_PIN_MAX,
+};
+
+enum dp_usbpd_events {
+	DP_USBPD_EVT_DISCOVER,
+	DP_USBPD_EVT_ENTER,
+	DP_USBPD_EVT_STATUS,
+	DP_USBPD_EVT_CONFIGURE,
+	DP_USBPD_EVT_CC_PIN_POLARITY,
+	DP_USBPD_EVT_EXIT,
+	DP_USBPD_EVT_ATTENTION,
+};
+
+enum dp_usbpd_alt_mode {
+	DP_USBPD_ALT_MODE_NONE	    = 0,
+	DP_USBPD_ALT_MODE_INIT	    = BIT(0),
+	DP_USBPD_ALT_MODE_DISCOVER  = BIT(1),
+	DP_USBPD_ALT_MODE_ENTER	    = BIT(2),
+	DP_USBPD_ALT_MODE_STATUS    = BIT(3),
+	DP_USBPD_ALT_MODE_CONFIGURE = BIT(4),
+};
+
+struct dp_usbpd_capabilities {
+	enum dp_usbpd_port port;
+	bool receptacle_state;
+	u8 ulink_pin_config;
+	u8 dlink_pin_config;
+};
+
+struct dp_usbpd_private {
+	u32 vdo;
+	struct device *dev;
+	struct usbpd *pd;
+	struct usbpd_svid_handler svid_handler;
+	struct dp_usbpd_cb *dp_cb;
+	struct dp_usbpd_capabilities cap;
+	struct dp_usbpd dp_usbpd;
+	enum dp_usbpd_alt_mode alt_mode;
+	u32 dp_usbpd_config;
+};
+
+static const char *dp_usbpd_pin_name(u8 pin)
+{
+	switch (pin) {
+	case DP_USBPD_PIN_A: return "DP_USBPD_PIN_ASSIGNMENT_A";
+	case DP_USBPD_PIN_B: return "DP_USBPD_PIN_ASSIGNMENT_B";
+	case DP_USBPD_PIN_C: return "DP_USBPD_PIN_ASSIGNMENT_C";
+	case DP_USBPD_PIN_D: return "DP_USBPD_PIN_ASSIGNMENT_D";
+	case DP_USBPD_PIN_E: return "DP_USBPD_PIN_ASSIGNMENT_E";
+	case DP_USBPD_PIN_F: return "DP_USBPD_PIN_ASSIGNMENT_F";
+	default: return "UNKNOWN";
+	}
+}
+
+static const char *dp_usbpd_port_name(enum dp_usbpd_port port)
+{
+	switch (port) {
+	case DP_USBPD_PORT_NONE: return "DP_USBPD_PORT_NONE";
+	case DP_USBPD_PORT_UFP_D: return "DP_USBPD_PORT_UFP_D";
+	case DP_USBPD_PORT_DFP_D: return "DP_USBPD_PORT_DFP_D";
+	case DP_USBPD_PORT_D_UFP_D: return "DP_USBPD_PORT_D_UFP_D";
+	default: return "DP_USBPD_PORT_NONE";
+	}
+}
+
+static const char *dp_usbpd_cmd_name(u8 cmd)
+{
+	switch (cmd) {
+	case USBPD_SVDM_DISCOVER_MODES: return "USBPD_SVDM_DISCOVER_MODES";
+	case USBPD_SVDM_ENTER_MODE: return "USBPD_SVDM_ENTER_MODE";
+	case USBPD_SVDM_ATTENTION: return "USBPD_SVDM_ATTENTION";
+	case DP_USBPD_VDM_STATUS: return "DP_USBPD_VDM_STATUS";
+	case DP_USBPD_VDM_CONFIGURE: return "DP_USBPD_VDM_CONFIGURE";
+	default: return "DP_USBPD_VDM_ERROR";
+	}
+}
+
+static void dp_usbpd_init_port(enum dp_usbpd_port *port, u32 in_port)
+{
+	switch (in_port) {
+	case 0:
+		*port = DP_USBPD_PORT_NONE;
+		break;
+	case 1:
+		*port = DP_USBPD_PORT_UFP_D;
+		break;
+	case 2:
+		*port = DP_USBPD_PORT_DFP_D;
+		break;
+	case 3:
+		*port = DP_USBPD_PORT_D_UFP_D;
+		break;
+	default:
+		*port = DP_USBPD_PORT_NONE;
+	}
+	pr_debug("port:%s\n", dp_usbpd_port_name(*port));
+}
+
+static void dp_usbpd_get_capabilities(struct dp_usbpd_private *pd)
+{
+	struct dp_usbpd_capabilities *cap = &pd->cap;
+	u32 buf = pd->vdo;
+	int port = buf & 0x3;
+
+	cap->receptacle_state = (buf & BIT(6)) ? true : false;
+	cap->dlink_pin_config = (buf >> 8) & 0xff;
+	cap->ulink_pin_config = (buf >> 16) & 0xff;
+
+	dp_usbpd_init_port(&cap->port, port);
+}
+
+static void dp_usbpd_get_status(struct dp_usbpd_private *pd)
+{
+	struct dp_usbpd *status = &pd->dp_usbpd;
+	u32 buf = pd->vdo;
+	int port = buf & 0x3;
+
+	status->low_pow_st     = (buf & BIT(2)) ? true : false;
+	status->adaptor_dp_en  = (buf & BIT(3)) ? true : false;
+	status->multi_func     = (buf & BIT(4)) ? true : false;
+	status->usb_config_req = (buf & BIT(5)) ? true : false;
+	status->exit_dp_mode   = (buf & BIT(6)) ? true : false;
+	status->hpd_high       = (buf & BIT(7)) ? true : false;
+	status->hpd_irq        = (buf & BIT(8)) ? true : false;
+
+	pr_debug("low_pow_st = %d, adaptor_dp_en = %d, multi_func = %d\n",
+			status->low_pow_st, status->adaptor_dp_en,
+			status->multi_func);
+	pr_debug("usb_config_req = %d, exit_dp_mode = %d, hpd_high =%d\n",
+			status->usb_config_req,
+			status->exit_dp_mode, status->hpd_high);
+	pr_debug("hpd_irq = %d\n", status->hpd_irq);
+
+	dp_usbpd_init_port(&status->port, port);
+}
+
+static u32 dp_usbpd_gen_config_pkt(struct dp_usbpd_private *pd)
+{
+	u8 pin_cfg, pin;
+	u32 config = 0;
+	const u32 ufp_d_config = 0x2, dp_ver = 0x1;
+
+	pin_cfg = pd->cap.dlink_pin_config;
+
+	for (pin = DP_USBPD_PIN_A; pin < DP_USBPD_PIN_MAX; pin++) {
+		if (pin_cfg & BIT(pin)) {
+			if (pd->dp_usbpd.multi_func) {
+				if (pin == DP_USBPD_PIN_D)
+					break;
+			} else {
+				break;
+			}
+		}
+	}
+
+	if (pin == DP_USBPD_PIN_MAX)
+		pin = DP_USBPD_PIN_C;
+
+	pr_debug("pin assignment: %s\n", dp_usbpd_pin_name(pin));
+
+	config |= BIT(pin) << 8;
+
+	config |= (dp_ver << 2);
+	config |= ufp_d_config;
+
+	pr_debug("config = 0x%x\n", config);
+	return config;
+}
+
+static void dp_usbpd_send_event(struct dp_usbpd_private *pd,
+		enum dp_usbpd_events event)
+{
+	u32 config;
+
+	switch (event) {
+	case DP_USBPD_EVT_DISCOVER:
+		usbpd_send_svdm(pd->pd, USB_C_DP_SID,
+			USBPD_SVDM_DISCOVER_MODES,
+			SVDM_CMD_TYPE_INITIATOR, 0x0, 0x0, 0x0);
+		break;
+	case DP_USBPD_EVT_ENTER:
+		usbpd_send_svdm(pd->pd, USB_C_DP_SID,
+			USBPD_SVDM_ENTER_MODE,
+			SVDM_CMD_TYPE_INITIATOR, 0x1, 0x0, 0x0);
+		break;
+	case DP_USBPD_EVT_EXIT:
+		usbpd_send_svdm(pd->pd, USB_C_DP_SID,
+			USBPD_SVDM_EXIT_MODE,
+			SVDM_CMD_TYPE_INITIATOR, 0x1, 0x0, 0x0);
+		break;
+	case DP_USBPD_EVT_STATUS:
+		config = 0x1; /* DFP_D connected */
+		usbpd_send_svdm(pd->pd, USB_C_DP_SID, DP_USBPD_VDM_STATUS,
+			SVDM_CMD_TYPE_INITIATOR, 0x1, &config, 0x1);
+		break;
+	case DP_USBPD_EVT_CONFIGURE:
+		config = dp_usbpd_gen_config_pkt(pd);
+		usbpd_send_svdm(pd->pd, USB_C_DP_SID, DP_USBPD_VDM_CONFIGURE,
+			SVDM_CMD_TYPE_INITIATOR, 0x1, &config, 0x1);
+		break;
+	default:
+		pr_err("unknown event:%d\n", event);
+	}
+}
+
+static void dp_usbpd_connect_cb(struct usbpd_svid_handler *hdlr)
+{
+	struct dp_usbpd_private *pd;
+
+	pd = container_of(hdlr, struct dp_usbpd_private, svid_handler);
+	if (!pd) {
+		pr_err("get_usbpd phandle failed\n");
+		return;
+	}
+
+	pr_debug("\n");
+	dp_usbpd_send_event(pd, DP_USBPD_EVT_DISCOVER);
+}
+
+static void dp_usbpd_disconnect_cb(struct usbpd_svid_handler *hdlr)
+{
+	struct dp_usbpd_private *pd;
+
+	pd = container_of(hdlr, struct dp_usbpd_private, svid_handler);
+	if (!pd) {
+		pr_err("get_usbpd phandle failed\n");
+		return;
+	}
+
+	pd->alt_mode = DP_USBPD_ALT_MODE_NONE;
+	pd->dp_usbpd.alt_mode_cfg_done = false;
+	pr_debug("\n");
+
+	if (pd->dp_cb && pd->dp_cb->disconnect)
+		pd->dp_cb->disconnect(pd->dev);
+}
+
+static int dp_usbpd_validate_callback(u8 cmd,
+	enum usbpd_svdm_cmd_type cmd_type, int num_vdos)
+{
+	int ret = 0;
+
+	if (cmd_type == SVDM_CMD_TYPE_RESP_NAK) {
+		pr_err("error: NACK\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	if (cmd_type == SVDM_CMD_TYPE_RESP_BUSY) {
+		pr_err("error: BUSY\n");
+		ret = -EBUSY;
+		goto end;
+	}
+
+	if (cmd == USBPD_SVDM_ATTENTION) {
+		if (cmd_type != SVDM_CMD_TYPE_INITIATOR) {
+			pr_err("error: invalid cmd type for attention\n");
+			ret = -EINVAL;
+			goto end;
+		}
+
+		if (!num_vdos) {
+			pr_err("error: no vdo provided\n");
+			ret = -EINVAL;
+			goto end;
+		}
+	} else {
+		if (cmd_type != SVDM_CMD_TYPE_RESP_ACK) {
+			pr_err("error: invalid cmd type\n");
+			ret = -EINVAL;
+		}
+	}
+end:
+	return ret;
+}
+
+static void dp_usbpd_response_cb(struct usbpd_svid_handler *hdlr, u8 cmd,
+				enum usbpd_svdm_cmd_type cmd_type,
+				const u32 *vdos, int num_vdos)
+{
+	struct dp_usbpd_private *pd;
+
+	pd = container_of(hdlr, struct dp_usbpd_private, svid_handler);
+
+	pr_debug("callback -> cmd: %s, *vdos = 0x%x, num_vdos = %d\n",
+				dp_usbpd_cmd_name(cmd), *vdos, num_vdos);
+
+	if (dp_usbpd_validate_callback(cmd, cmd_type, num_vdos)) {
+		pr_debug("invalid callback received\n");
+		return;
+	}
+
+	switch (cmd) {
+	case USBPD_SVDM_DISCOVER_MODES:
+		pd->vdo = *vdos;
+		dp_usbpd_get_capabilities(pd);
+
+		pd->alt_mode |= DP_USBPD_ALT_MODE_DISCOVER;
+
+		if (pd->cap.port & BIT(0))
+			dp_usbpd_send_event(pd, DP_USBPD_EVT_ENTER);
+		break;
+	case USBPD_SVDM_ENTER_MODE:
+		pd->alt_mode |= DP_USBPD_ALT_MODE_ENTER;
+
+		dp_usbpd_send_event(pd, DP_USBPD_EVT_STATUS);
+		break;
+	case USBPD_SVDM_ATTENTION:
+		pd->vdo = *vdos;
+		dp_usbpd_get_status(pd);
+
+		if (pd->dp_cb && pd->dp_cb->attention)
+			pd->dp_cb->attention(pd->dev);
+		break;
+	case DP_USBPD_VDM_STATUS:
+		pd->vdo = *vdos;
+		dp_usbpd_get_status(pd);
+
+		if (!(pd->alt_mode & DP_USBPD_ALT_MODE_CONFIGURE)) {
+			pd->alt_mode |= DP_USBPD_ALT_MODE_STATUS;
+
+			if (pd->dp_usbpd.port & BIT(1))
+				dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE);
+		}
+		break;
+	case DP_USBPD_VDM_CONFIGURE:
+		pd->alt_mode |= DP_USBPD_ALT_MODE_CONFIGURE;
+		pd->dp_usbpd.alt_mode_cfg_done = true;
+		dp_usbpd_get_status(pd);
+
+		pd->dp_usbpd.orientation = usbpd_get_plug_orientation(pd->pd);
+
+		if (pd->dp_cb && pd->dp_cb->configure)
+			pd->dp_cb->configure(pd->dev);
+		break;
+	default:
+		pr_err("unknown cmd: %d\n", cmd);
+		break;
+	}
+}
+
+struct dp_usbpd *dp_usbpd_get(struct device *dev, struct dp_usbpd_cb *cb)
+{
+	int rc = 0;
+	const char *pd_phandle = "qcom,dp-usbpd-detection";
+	struct usbpd *pd = NULL;
+	struct dp_usbpd_private *usbpd;
+	struct usbpd_svid_handler svid_handler = {
+		.svid		= USB_C_DP_SID,
+		.vdm_received	= NULL,
+		.connect	= &dp_usbpd_connect_cb,
+		.svdm_received	= &dp_usbpd_response_cb,
+		.disconnect	= &dp_usbpd_disconnect_cb,
+	};
+
+	if (!cb) {
+		pr_err("invalid cb data\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	pd = devm_usbpd_get_by_phandle(dev, pd_phandle);
+	if (IS_ERR(pd)) {
+		pr_err("usbpd phandle failed (%ld)\n", PTR_ERR(pd));
+		rc = PTR_ERR(pd);
+		goto error;
+	}
+
+	usbpd = devm_kzalloc(dev, sizeof(*usbpd), GFP_KERNEL);
+	if (!usbpd) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	usbpd->dev = dev;
+	usbpd->pd = pd;
+	usbpd->svid_handler = svid_handler;
+	usbpd->dp_cb = cb;
+
+	rc = usbpd_register_svid(pd, &usbpd->svid_handler);
+	if (rc) {
+		pr_err("pd registration failed\n");
+		rc = -ENODEV;
+		kfree(usbpd);
+		goto error;
+	}
+	return &usbpd->dp_usbpd;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_usbpd_put(struct dp_usbpd *dp_usbpd)
+{
+	struct dp_usbpd_private *usbpd;
+
+	if (!dp_usbpd)
+		return;
+
+	usbpd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
+
+	kfree(usbpd);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.h b/drivers/gpu/drm/msm/dp/dp_usbpd.h
new file mode 100644
index 0000000..67f380a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_USBPD_H_
+#define _DP_USBPD_H_
+
+#include <linux/usb/usbpd.h>
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+/**
+ * enum dp_usbpd_port - usb/dp port type
+ * @DP_USBPD_PORT_NONE: port not configured
+ * @DP_USBPD_PORT_UFP_D: Upstream Facing Port - DisplayPort
+ * @DP_USBPD_PORT_DFP_D: Downstream Facing Port - DisplayPort
+ * @DP_USBPD_PORT_D_UFP_D: Both UFP & DFP - DisplayPort
+ */
+
+enum dp_usbpd_port {
+	DP_USBPD_PORT_NONE,
+	DP_USBPD_PORT_UFP_D,
+	DP_USBPD_PORT_DFP_D,
+	DP_USBPD_PORT_D_UFP_D,
+};
+
+/**
+ * struct dp_usbpd - DisplayPort status
+ *
+ * @port: port configured
+ * orientation: plug orientation configuration
+ * @low_pow_st: low power state
+ * @adaptor_dp_en: adaptor functionality enabled
+ * @multi_func: multi-function preferred
+ * @usb_config_req: request to switch to usb
+ * @exit_dp_mode: request exit from displayport mode
+ * @hpd_high: Hot Plug Detect signal is high.
+ * @hpd_irq: Change in the status since last message
+ * @alt_mode_cfg_done: bool to specify alt mode status
+ */
+struct dp_usbpd {
+	enum dp_usbpd_port port;
+	enum plug_orientation orientation;
+	bool low_pow_st;
+	bool adaptor_dp_en;
+	bool multi_func;
+	bool usb_config_req;
+	bool exit_dp_mode;
+	bool hpd_high;
+	bool hpd_irq;
+	bool alt_mode_cfg_done;
+};
+
+/**
+ * struct dp_usbpd_cb - callback functions provided by the client
+ *
+ * @configure: called by usbpd module when PD communication has
+ * been completed and the usb peripheral has been configured on
+ * dp mode.
+ * @disconnect: notify the cable disconnect issued by usb.
+ * @attention: notify any attention message issued by usb.
+ */
+struct dp_usbpd_cb {
+	int (*configure)(struct device *dev);
+	int (*disconnect)(struct device *dev);
+	int (*attention)(struct device *dev);
+};
+
+/**
+ * dp_usbpd_get() - setup usbpd module
+ *
+ * @dev: device instance of the caller
+ * @cb: struct containing callback function pointers.
+ *
+ * This function allows the client to initialize the usbpd
+ * module. The module will communicate with usb driver and
+ * handles the power delivery (PD) communication with the
+ * sink/usb device. This module will notify the client using
+ * the callback functions about the connection and status.
+ */
+struct dp_usbpd *dp_usbpd_get(struct device *dev, struct dp_usbpd_cb *cb);
+
+void dp_usbpd_put(struct dp_usbpd *pd);
+#endif /* _DP_USBPD_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 5604bf1..f187ad1 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -697,7 +697,7 @@
 	bus->bus_scale_table = msm_bus_cl_get_pdata(pdev);
 	if (IS_ERR_OR_NULL(bus->bus_scale_table)) {
 		rc = PTR_ERR(bus->bus_scale_table);
-		pr_err("msm_bus_cl_get_pdata() failed, rc = %d\n", rc);
+		pr_debug("msm_bus_cl_get_pdata() failed, rc = %d\n", rc);
 		bus->bus_scale_table = NULL;
 		return rc;
 	}
@@ -1256,7 +1256,7 @@
 
 	rc = dsi_ctrl_axi_bus_client_init(pdev, dsi_ctrl);
 	if (rc)
-		pr_err("failed to init axi bus client, rc = %d\n", rc);
+		pr_debug("failed to init axi bus client, rc = %d\n", rc);
 
 	item->ctrl = dsi_ctrl;
 
@@ -1556,19 +1556,9 @@
 	return rc;
 }
 
-/**
- * dsi_ctrl_setup() - Setup DSI host hardware while coming out of idle screen.
- * @dsi_ctrl:        DSI controller handle.
- *
- * Initializes DSI controller hardware with host configuration provided by
- * dsi_ctrl_update_host_config(). Initialization can be performed only during
- * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
- * performed.
- *
- * Return: error code.
- */
 int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
 {
+	struct dsi_mode_info video_timing;
 	int rc = 0;
 
 	if (!dsi_ctrl) {
@@ -1578,6 +1568,12 @@
 
 	mutex_lock(&dsi_ctrl->ctrl_lock);
 
+	/* replace video mode width with actual roi width */
+	memcpy(&video_timing, &dsi_ctrl->host_config.video_timing,
+			sizeof(video_timing));
+	video_timing.h_active = dsi_ctrl->roi.w;
+	video_timing.v_active = dsi_ctrl->roi.h;
+
 	dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
 					&dsi_ctrl->host_config.lane_map);
 
@@ -1590,8 +1586,8 @@
 					&dsi_ctrl->host_config.u.cmd_engine);
 
 		dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
-				&dsi_ctrl->host_config.video_timing,
-				dsi_ctrl->host_config.video_timing.h_active * 3,
+				&video_timing,
+				video_timing.h_active * 3,
 				0x0);
 		dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, true);
 	} else {
@@ -1611,6 +1607,26 @@
 	return rc;
 }
 
+int dsi_ctrl_set_roi(struct dsi_ctrl *dsi_ctrl, struct dsi_rect *roi,
+		bool *changed)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl || !roi || !changed) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	if (!dsi_rect_is_equal(&dsi_ctrl->roi, roi)) {
+		*changed = true;
+		memcpy(&dsi_ctrl->roi, roi, sizeof(dsi_ctrl->roi));
+	} else
+		*changed = false;
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
 /**
  * dsi_ctrl_phy_reset_config() - Mask/unmask propagation of ahb reset signal
  *	to DSI PHY hardware.
@@ -1789,6 +1805,13 @@
 
 	pr_debug("[DSI_%d]Host config updated\n", ctrl->cell_index);
 	memcpy(&ctrl->host_config, config, sizeof(ctrl->host_config));
+	ctrl->mode_bounds.x = ctrl->host_config.video_timing.h_active *
+			ctrl->horiz_index;
+	ctrl->mode_bounds.y = 0;
+	ctrl->mode_bounds.w = ctrl->host_config.video_timing.h_active;
+	ctrl->mode_bounds.h = ctrl->host_config.video_timing.v_active;
+	memcpy(&ctrl->roi, &ctrl->mode_bounds, sizeof(ctrl->mode_bounds));
+	ctrl->roi.x = 0;
 error:
 	mutex_unlock(&ctrl->ctrl_lock);
 	return rc;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index f8adbea..f89cb68 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -168,6 +168,7 @@
  * struct dsi_ctrl - DSI controller object
  * @pdev:                Pointer to platform device.
  * @cell_index:          Instance cell id.
+ * @horiz_index:         Index in physical horizontal CTRL layout, 0 = leftmost
  * @name:                Name of the controller instance.
  * @refcount:            ref counter.
  * @ctrl_lock:           Mutex for hardware and object access.
@@ -182,6 +183,10 @@
  * @pwr_info:            Power information.
  * @axi_bus_info:        AXI bus information.
  * @host_config:         Current host configuration.
+ * @mode_bounds:         Boundaries of the default mode ROI.
+ *                       Origin is at top left of all CTRLs.
+ * @roi:                 Partial update region of interest.
+ *                       Origin is top left of this CTRL.
  * @tx_cmd_buf:          Tx command buffer.
  * @cmd_buffer_size:     Size of command buffer.
  * @debugfs_root:        Root for debugfs entries.
@@ -189,6 +194,7 @@
 struct dsi_ctrl {
 	struct platform_device *pdev;
 	u32 cell_index;
+	u32 horiz_index;
 	const char *name;
 	u32 refcount;
 	struct mutex ctrl_lock;
@@ -209,6 +215,9 @@
 	struct dsi_ctrl_bus_scale_info axi_bus_info;
 
 	struct dsi_host_config host_config;
+	struct dsi_rect mode_bounds;
+	struct dsi_rect roi;
+
 	/* Command tx and rx */
 	struct drm_gem_object *tx_cmd_buf;
 	u32 cmd_buffer_size;
@@ -387,11 +396,25 @@
  * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
  * performed.
  *
+ * Also used to program the video mode timing values.
+ *
  * Return: error code.
  */
 int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl);
 
 /**
+ * dsi_ctrl_set_roi() - Set DSI controller's region of interest
+ * @dsi_ctrl:        DSI controller handle.
+ * @roi:             Region of interest rectangle, must be less than mode bounds
+ * @changed:         Output parameter, set to true of the controller's ROI was
+ *                   dirtied by setting the new ROI, and DCS cmd update needed
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_roi(struct dsi_ctrl *dsi_ctrl, struct dsi_rect *roi,
+		bool *changed);
+
+/**
  * dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller
  * @dsi_ctrl:          DSI controller handle.
  * @on:                enable/disable test pattern.
@@ -401,7 +424,6 @@
  *
  * Return: error code.
  */
-
 int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on);
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index 563285d..cf36315 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -411,4 +411,40 @@
 	struct msm_mode_info *mode_info;
 };
 
+/**
+ * struct dsi_rect - dsi rectangle representation
+ * Note: sde_rect is also using u16, this must be maintained for memcpy
+ */
+struct dsi_rect {
+	u16 x;
+	u16 y;
+	u16 w;
+	u16 h;
+};
+
+/**
+ * dsi_rect_intersect - intersect two rectangles
+ * @r1: first rectangle
+ * @r2: scissor rectangle
+ * @result: result rectangle, all 0's on no intersection found
+ */
+void dsi_rect_intersect(const struct dsi_rect *r1,
+		const struct dsi_rect *r2,
+		struct dsi_rect *result);
+
+/**
+ * dsi_rect_is_equal - compares two rects
+ * @r1: rect value to compare
+ * @r2: rect value to compare
+ *
+ * Returns true if the rects are same
+ */
+static inline bool dsi_rect_is_equal(struct dsi_rect *r1,
+		struct dsi_rect *r2)
+{
+	return r1->x == r2->x && r1->y == r2->y && r1->w == r2->w &&
+			r1->h == r2->h;
+}
+
+
 #endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 3402d48..c2cf2cb 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -38,6 +38,30 @@
 
 static struct dsi_display *main_display;
 
+void dsi_rect_intersect(const struct dsi_rect *r1,
+		const struct dsi_rect *r2,
+		struct dsi_rect *result)
+{
+	int l, t, r, b;
+
+	if (!r1 || !r2 || !result)
+		return;
+
+	l = max(r1->x, r2->x);
+	t = max(r1->y, r2->y);
+	r = min((r1->x + r1->w), (r2->x + r2->w));
+	b = min((r1->y + r1->h), (r2->y + r2->h));
+
+	if (r <= l || b <= t) {
+		memset(result, 0, sizeof(*result));
+	} else {
+		result->x = l;
+		result->y = t;
+		result->w = r - l;
+		result->h = b - t;
+	}
+}
+
 int dsi_display_set_backlight(void *display, u32 bl_lvl)
 {
 	struct dsi_display *dsi_display = display;
@@ -1243,7 +1267,7 @@
 	mux->byte_clk = devm_clk_get(&display->pdev->dev, "mux_byte_clk");
 	if (IS_ERR_OR_NULL(mux->byte_clk)) {
 		rc = PTR_ERR(mux->byte_clk);
-		pr_err("failed to get mux_byte_clk, rc=%d\n", rc);
+		pr_debug("failed to get mux_byte_clk, rc=%d\n", rc);
 		mux->byte_clk = NULL;
 		/*
 		 * Skip getting rest of clocks since one failed. This is a
@@ -1258,7 +1282,7 @@
 	if (IS_ERR_OR_NULL(mux->pixel_clk)) {
 		rc = PTR_ERR(mux->pixel_clk);
 		mux->pixel_clk = NULL;
-		pr_err("failed to get mux_pixel_clk, rc=%d\n", rc);
+		pr_debug("failed to get mux_pixel_clk, rc=%d\n", rc);
 		/*
 		 * Skip getting rest of clocks since one failed. This is a
 		 * non-critical failure since these clocks are requied only for
@@ -1570,7 +1594,7 @@
 			display->lane_map.lane_map_v2[i] = BIT(temp[i]);
 		return 0;
 	} else if (rc != EINVAL) {
-		pr_warn("Incorrect mapping, configure default\n");
+		pr_debug("Incorrect mapping, configure default\n");
 		goto set_default;
 	}
 
@@ -2285,6 +2309,7 @@
 			       display->name, i, rc);
 			goto error_ctrl_deinit;
 		}
+		display_ctrl->ctrl->horiz_index = i;
 
 		rc = dsi_phy_drv_init(display_ctrl->phy);
 		if (rc) {
@@ -2742,12 +2767,18 @@
 		break;
 	case DSI_OP_CMD_MODE:
 		info->capabilities |= MSM_DISPLAY_CAP_CMD_MODE;
+		info->is_te_using_watchdog_timer =
+			display->panel->te_using_watchdog_timer;
 		break;
 	default:
 		pr_err("unknwown dsi panel mode %d\n",
 				display->panel->mode.panel_mode);
 		break;
 	}
+
+	memcpy(&info->roi_caps, &display->panel->roi_caps,
+			sizeof(info->roi_caps));
+
 error:
 	mutex_unlock(&display->display_lock);
 	return rc;
@@ -3034,10 +3065,110 @@
 	return rc;
 }
 
+static int dsi_display_calc_ctrl_roi(const struct dsi_display *display,
+		const struct dsi_display_ctrl *ctrl,
+		const struct msm_roi_list *req_rois,
+		struct dsi_rect *out_roi)
+{
+	const struct dsi_rect *bounds = &ctrl->ctrl->mode_bounds;
+	struct dsi_rect req_roi = { 0 };
+	int rc = 0;
+
+	if (req_rois->num_rects > display->panel->roi_caps.num_roi) {
+		pr_err("request for %d rois greater than max %d\n",
+				req_rois->num_rects,
+				display->panel->roi_caps.num_roi);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	/**
+	 * if no rois, user wants to reset back to full resolution
+	 * note: h_active is already divided by ctrl_count
+	 */
+	if (!req_rois->num_rects) {
+		*out_roi = *bounds;
+		goto exit;
+	}
+
+	/* intersect with the bounds */
+	req_roi.x = req_rois->roi[0].x1;
+	req_roi.y = req_rois->roi[0].y1;
+	req_roi.w = req_rois->roi[0].x2 - req_rois->roi[0].x1;
+	req_roi.h = req_rois->roi[0].y2 - req_rois->roi[0].y1;
+	dsi_rect_intersect(&req_roi, bounds, out_roi);
+
+exit:
+	/* adjust the ctrl origin to be top left within the ctrl */
+	out_roi->x = out_roi->x - bounds->x;
+
+	pr_debug("ctrl%d:%d: req (%d,%d,%d,%d) bnd (%d,%d,%d,%d) out (%d,%d,%d,%d)\n",
+			ctrl->dsi_ctrl_idx, ctrl->ctrl->cell_index,
+			req_roi.x, req_roi.y, req_roi.w, req_roi.h,
+			bounds->x, bounds->y, bounds->w, bounds->h,
+			out_roi->x, out_roi->y, out_roi->w, out_roi->h);
+
+	return rc;
+}
+
+static int dsi_display_set_roi(struct dsi_display *display,
+		struct msm_roi_list *rois)
+{
+	int rc = 0;
+	int i;
+
+	if (!display || !rois || !display->panel)
+		return -EINVAL;
+
+	if (!display->panel->roi_caps.enabled)
+		return 0;
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		struct dsi_display_ctrl *ctrl = &display->ctrl[i];
+		struct dsi_rect ctrl_roi;
+		bool changed = false;
+
+		rc = dsi_display_calc_ctrl_roi(display, ctrl, rois, &ctrl_roi);
+		if (rc) {
+			pr_err("dsi_display_calc_ctrl_roi failed rc %d\n", rc);
+			return rc;
+		}
+
+		rc = dsi_ctrl_set_roi(ctrl->ctrl, &ctrl_roi, &changed);
+		if (rc) {
+			pr_err("dsi_ctrl_set_roi failed rc %d\n", rc);
+			return rc;
+		}
+
+		if (!changed)
+			continue;
+
+		/* send the new roi to the panel via dcs commands */
+		rc = dsi_panel_send_roi_dcs(display->panel, i, &ctrl_roi);
+		if (rc) {
+			pr_err("dsi_panel_set_roi failed rc %d\n", rc);
+			return rc;
+		}
+
+		/* re-program the ctrl with the timing based on the new roi */
+		rc = dsi_ctrl_setup(ctrl->ctrl);
+		if (rc) {
+			pr_err("dsi_ctrl_setup failed rc %d\n", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
 int dsi_display_pre_kickoff(struct dsi_display *display,
 		struct msm_display_kickoff_params *params)
 {
-	return 0;
+	int rc = 0;
+
+	rc = dsi_display_set_roi(display, params->rois);
+
+	return rc;
 }
 
 int dsi_display_enable(struct dsi_display *display)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 3f4bb5a5..37ed411 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -357,6 +357,10 @@
 	case DSI_PANEL_ROTATE_V_FLIP:
 		sde_kms_info_add_keystr(info, "panel orientation", "vert flip");
 		break;
+	case DSI_PANEL_ROTATE_HV_FLIP:
+		sde_kms_info_add_keystr(info, "panel orientation",
+							"horz & vert flip");
+		break;
 	default:
 		pr_debug("invalid panel rotation:%d\n",
 						panel->phy_props.rotation);
@@ -379,6 +383,25 @@
 		break;
 	}
 
+	if (panel->roi_caps.enabled) {
+		sde_kms_info_add_keyint(info, "partial_update_num_roi",
+				panel->roi_caps.num_roi);
+		sde_kms_info_add_keyint(info, "partial_update_xstart",
+				panel->roi_caps.align.xstart_pix_align);
+		sde_kms_info_add_keyint(info, "partial_update_walign",
+				panel->roi_caps.align.width_pix_align);
+		sde_kms_info_add_keyint(info, "partial_update_wmin",
+				panel->roi_caps.align.min_width);
+		sde_kms_info_add_keyint(info, "partial_update_ystart",
+				panel->roi_caps.align.ystart_pix_align);
+		sde_kms_info_add_keyint(info, "partial_update_halign",
+				panel->roi_caps.align.height_pix_align);
+		sde_kms_info_add_keyint(info, "partial_update_hmin",
+				panel->roi_caps.align.min_height);
+		sde_kms_info_add_keyint(info, "partial_update_roimerge",
+				panel->roi_caps.merge_rois);
+	}
+
 end:
 	return 0;
 }
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 34aaea2..dcb787b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -12,10 +12,12 @@
  *
  */
 
+#define pr_fmt(fmt)	"msm-dsi-panel:[%s:%d] " fmt, __func__, __LINE__
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
+#include <video/mipi_display.h>
 
 #include "dsi_panel.h"
 #include "dsi_ctrl_hw.h"
@@ -1289,6 +1291,8 @@
 	"qcom,video-to-cmd-mode-switch-commands",
 	"qcom,video-to-cmd-mode-post-switch-commands",
 	"qcom,mdss-dsi-panel-status-command",
+	"PPS not parsed from DTSI, generated dynamically",
+	"ROI not parsed from DTSI, generated dynamically",
 };
 
 const char *cmd_set_state_map[DSI_CMD_SET_MAX] = {
@@ -1306,6 +1310,8 @@
 	"qcom,video-to-cmd-mode-switch-commands-state",
 	"qcom,video-to-cmd-mode-post-switch-commands-state",
 	"qcom,mdss-dsi-panel-status-command-state",
+	"PPS not parsed from DTSI, generated dynamically",
+	"ROI not parsed from DTSI, generated dynamically",
 };
 
 static int dsi_panel_get_cmd_pkt_count(const char *data, u32 length, u32 *cnt)
@@ -1348,6 +1354,7 @@
 		cmd[i].last_command = (data[1] == 1 ? true : false);
 		cmd[i].msg.channel = data[2];
 		cmd[i].msg.flags |= (data[3] == 1 ? MIPI_DSI_MSG_REQ_ACK : 0);
+		cmd[i].msg.ctrl = 0;
 		cmd[i].post_wait_ms = data[4];
 		cmd[i].msg.tx_len = ((data[5] << 8) | (data[6]));
 
@@ -1376,7 +1383,7 @@
 	return rc;
 }
 
-static void dsi_panel_destroy_cmd_packets(struct dsi_panel_cmd_set *set)
+void dsi_panel_destroy_cmd_packets(struct dsi_panel_cmd_set *set)
 {
 	u32 i = 0;
 	struct dsi_cmd_desc *cmd;
@@ -1415,7 +1422,7 @@
 
 	data = of_get_property(of_node, cmd_set_prop_map[type], &length);
 	if (!data) {
-		pr_err("%s commands not defined\n", cmd_set_prop_map[type]);
+		pr_debug("%s commands not defined\n", cmd_set_prop_map[type]);
 		rc = -ENOTSUPP;
 		goto error;
 	}
@@ -1471,6 +1478,8 @@
 	for (i = DSI_CMD_SET_PRE_ON; i < DSI_CMD_SET_MAX; i++) {
 		set = &panel->cmd_sets[i];
 		set->type = i;
+		set->count = 0;
+
 		if (i == DSI_CMD_SET_PPS) {
 			rc = dsi_panel_alloc_cmd_packets(set, 1);
 			if (rc)
@@ -1480,7 +1489,7 @@
 		} else {
 			rc = dsi_panel_parse_cmd_sets_sub(set, i, of_node);
 			if (rc)
-				pr_err("[%s] failed to parse set %d\n",
+				pr_debug("[%s] failed to parse set %d\n",
 					panel->name, i);
 		}
 	}
@@ -1556,14 +1565,17 @@
 	return rc;
 }
 
-static int dsi_panel_parse_features(struct dsi_panel *panel,
+static int dsi_panel_parse_misc_features(struct dsi_panel *panel,
 				     struct device_node *of_node)
 {
 	panel->ulps_enabled =
 		of_property_read_bool(of_node, "qcom,ulps-enabled");
 
-	pr_debug("ulps_enabled:%d\n", panel->ulps_enabled);
+	if (panel->ulps_enabled)
+		pr_debug("ulps_enabled:%d\n", panel->ulps_enabled);
 
+	panel->te_using_watchdog_timer = of_property_read_bool(of_node,
+					"qcom,mdss-dsi-te-using-wd");
 	return 0;
 }
 
@@ -1786,8 +1798,8 @@
 					      "qcom,platform-bklight-en-gpio",
 					      0);
 	if (!gpio_is_valid(panel->bl_config.en_gpio)) {
-		pr_err("[%s] failed get bklt gpio, rc=%d\n", panel->name, rc);
-		rc = -EINVAL;
+		pr_debug("[%s] failed get bklt gpio, rc=%d\n", panel->name, rc);
+		rc = 0;
 		goto error;
 	}
 
@@ -2172,6 +2184,87 @@
 	return rc;
 }
 
+static int dsi_panel_parse_roi_alignment(struct device_node *of_node,
+					 struct msm_roi_alignment *align)
+{
+	int len = 0, rc = 0;
+	u32 value[6];
+	struct property *data;
+
+	if (!align || !of_node)
+		return -EINVAL;
+
+	memset(align, 0, sizeof(*align));
+
+	data = of_find_property(of_node, "qcom,panel-roi-alignment", &len);
+	len /= sizeof(u32);
+	if (!data) {
+		pr_err("panel roi alignment not found\n");
+		rc = -EINVAL;
+	} else if (len != 6) {
+		pr_err("incorrect roi alignment len %d\n", len);
+		rc = -EINVAL;
+	} else {
+		rc = of_property_read_u32_array(of_node,
+				"qcom,panel-roi-alignment", value, len);
+		if (rc)
+			pr_debug("error reading panel roi alignment values\n");
+		else {
+			align->xstart_pix_align = value[0];
+			align->ystart_pix_align = value[1];
+			align->width_pix_align = value[2];
+			align->height_pix_align = value[3];
+			align->min_width = value[4];
+			align->min_height = value[5];
+		}
+
+		pr_info("roi alignment: [%d, %d, %d, %d, %d, %d]\n",
+			align->xstart_pix_align,
+			align->width_pix_align,
+			align->ystart_pix_align,
+			align->height_pix_align,
+			align->min_width,
+			align->min_height);
+	}
+
+	return rc;
+}
+
+static int dsi_panel_parse_partial_update_caps(struct dsi_panel *panel,
+					       struct device_node *of_node)
+{
+	struct msm_roi_caps *roi_caps = &panel->roi_caps;
+	const char *data;
+	int rc = 0;
+
+	memset(roi_caps, 0, sizeof(*roi_caps));
+
+	data = of_get_property(of_node, "qcom,partial-update-enabled", NULL);
+	if (data) {
+		if (!strcmp(data, "dual_roi"))
+			roi_caps->num_roi = 2;
+		else
+			roi_caps->num_roi = 1;
+	}
+
+	roi_caps->merge_rois = of_property_read_bool(of_node,
+			"qcom,partial-update-roi-merge");
+
+	roi_caps->enabled = roi_caps->num_roi > 0;
+
+	pr_info("partial update num_rois=%d enabled=%d\n", roi_caps->num_roi,
+			roi_caps->enabled);
+
+	if (roi_caps->enabled)
+		rc = dsi_panel_parse_roi_alignment(of_node,
+				&panel->roi_caps.align);
+
+	if (rc)
+		memset(roi_caps, 0, sizeof(*roi_caps));
+
+	return rc;
+}
+
 struct dsi_panel *dsi_panel_get(struct device *parent,
 				struct device_node *of_node)
 {
@@ -2280,7 +2373,7 @@
 	if (rc)
 		pr_err("failed to parse panel jitter config, rc=%d\n", rc);
 
-	rc = dsi_panel_parse_features(panel, of_node);
+	rc = dsi_panel_parse_misc_features(panel, of_node);
 	if (rc)
 		pr_err("failed to parse panel features, rc=%d\n", rc);
 
@@ -2288,6 +2381,10 @@
 	if (rc)
 		pr_err("failed to parse hdr config, rc=%d\n", rc);
 
+	rc = dsi_panel_parse_partial_update_caps(panel, of_node);
+	if (rc)
+		pr_debug("failed to partial update caps, rc=%d\n", rc);
+
 	panel->panel_of_node = of_node;
 	drm_panel_init(&panel->drm_panel);
 	mutex_init(&panel->panel_lock);
@@ -2621,6 +2718,114 @@
 	return rc;
 }
 
+static int dsi_panel_roi_prepare_dcs_cmds(struct dsi_panel *panel,
+		struct dsi_rect *roi, int ctrl_idx, int unicast)
+{
+	static const int ROI_CMD_LEN = 5;
+	struct dsi_panel_cmd_set *set = &panel->cmd_sets[DSI_CMD_SET_ROI];
+	int rc = 0;
+
+	/* DTYPE_DCS_LWRITE */
+	static char *caset, *paset;
+
+	set->cmds = NULL;
+
+	caset = kzalloc(ROI_CMD_LEN, GFP_KERNEL);
+	if (!caset) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+	caset[0] = 0x2a;
+	caset[1] = (roi->x & 0xFF00) >> 8;
+	caset[2] = roi->x & 0xFF;
+	caset[3] = ((roi->x - 1 + roi->w) & 0xFF00) >> 8;
+	caset[4] = (roi->x - 1 + roi->w) & 0xFF;
+
+	paset = kzalloc(ROI_CMD_LEN, GFP_KERNEL);
+	if (!paset) {
+		rc = -ENOMEM;
+		goto error_free_mem;
+	}
+	paset[0] = 0x2b;
+	paset[1] = (roi->y & 0xFF00) >> 8;
+	paset[2] = roi->y & 0xFF;
+	paset[3] = ((roi->y - 1 + roi->h) & 0xFF00) >> 8;
+	paset[4] = (roi->y - 1 + roi->h) & 0xFF;
+
+	set->type = DSI_CMD_SET_ROI;
+	set->state = DSI_CMD_SET_STATE_LP;
+	set->count = 2; /* send caset + paset together */
+	set->cmds = kcalloc(set->count, sizeof(*set->cmds), GFP_KERNEL);
+	if (!set->cmds) {
+		rc = -ENOMEM;
+		goto error_free_mem;
+	}
+	set->cmds[0].msg.channel = 0;
+	set->cmds[0].msg.type = MIPI_DSI_DCS_LONG_WRITE;
+	set->cmds[0].msg.flags = unicast ? MIPI_DSI_MSG_UNICAST : 0;
+	set->cmds[0].msg.ctrl = unicast ? ctrl_idx : 0;
+	set->cmds[0].msg.tx_len = ROI_CMD_LEN;
+	set->cmds[0].msg.tx_buf = caset;
+	set->cmds[0].msg.rx_len = 0;
+	set->cmds[0].msg.rx_buf = 0;
+	set->cmds[0].last_command = 0;
+	set->cmds[0].post_wait_ms = 1;
+
+	set->cmds[1].msg.channel = 0;
+	set->cmds[1].msg.type = MIPI_DSI_DCS_LONG_WRITE;
+	set->cmds[1].msg.flags = unicast ? MIPI_DSI_MSG_UNICAST : 0;
+	set->cmds[1].msg.ctrl = unicast ? ctrl_idx : 0;
+	set->cmds[1].msg.tx_len = ROI_CMD_LEN;
+	set->cmds[1].msg.tx_buf = paset;
+	set->cmds[1].msg.rx_len = 0;
+	set->cmds[1].msg.rx_buf = 0;
+	set->cmds[1].last_command = 1;
+	set->cmds[1].post_wait_ms = 1;
+
+	goto exit;
+
+error_free_mem:
+	kfree(caset);
+	kfree(paset);
+	kfree(set->cmds);
+
+exit:
+	return rc;
+}
+
+int dsi_panel_send_roi_dcs(struct dsi_panel *panel, int ctrl_idx,
+		struct dsi_rect *roi)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	rc = dsi_panel_roi_prepare_dcs_cmds(panel, roi, ctrl_idx, true);
+	if (rc) {
+		pr_err("[%s] failed to prepare DSI_CMD_SET_ROI cmds, rc=%d\n",
+				panel->name, rc);
+		return rc;
+	}
+	pr_debug("[%s] send roi x %d y %d w %d h %d\n", panel->name,
+			roi->x, roi->y, roi->w, roi->h);
+
+	mutex_lock(&panel->panel_lock);
+
+	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_ROI);
+	if (rc)
+		pr_err("[%s] failed to send DSI_CMD_SET_ROI cmds, rc=%d\n",
+				panel->name, rc);
+
+	mutex_unlock(&panel->panel_lock);
+
+	dsi_panel_destroy_cmd_packets(&panel->cmd_sets[DSI_CMD_SET_ROI]);
+
+	return rc;
+}
+
 int dsi_panel_enable(struct dsi_panel *panel)
 {
 	int rc = 0;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 9f63089..4c9fbbe 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -28,6 +28,7 @@
 #include "dsi_ctrl_hw.h"
 #include "dsi_clk.h"
 #include "dsi_pwr.h"
+#include "msm_drv.h"
 
 #define MAX_BL_LEVEL 4096
 #define DSI_CMD_PPS_SIZE 135
@@ -55,6 +56,7 @@
 	DSI_CMD_SET_POST_VID_TO_CMD_SWITCH,
 	DSI_CMD_SET_PANEL_STATUS,
 	DSI_CMD_SET_PPS,
+	DSI_CMD_SET_ROI,
 	DSI_CMD_SET_MAX
 };
 
@@ -162,6 +164,7 @@
 	struct dsi_cmd_engine_cfg cmd_config;
 
 	struct dsi_dfps_capabilities dfps_caps;
+	struct msm_roi_caps roi_caps;
 
 	struct dsi_panel_cmd_set cmd_sets[DSI_CMD_SET_MAX];
 	struct dsi_panel_phy_props phy_props;
@@ -184,6 +187,7 @@
 	u32 panel_jitter;
 	u32 panel_prefill_lines;
 	bool panel_initialized;
+	bool te_using_watchdog_timer;
 
 	bool dsc_enabled;
 	char dsc_pps_cmd[DSI_CMD_PPS_SIZE];
@@ -241,4 +245,8 @@
 int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl);
 
 int dsi_panel_update_pps(struct dsi_panel *panel);
+
+int dsi_panel_send_roi_dcs(struct dsi_panel *panel, int ctrl_idx,
+		struct dsi_rect *roi);
+
 #endif /* _DSI_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
index 609c5ff..e2219aa 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
@@ -238,7 +238,8 @@
 	if (!supply_root_node) {
 		supply_root_node = of_parse_phandle(of_node, supply_name, 0);
 		if (!supply_root_node) {
-			pr_err("No supply entry present for %s\n", supply_name);
+			pr_debug("No supply entry present for %s\n",
+					supply_name);
 			return -EINVAL;
 		}
 	}
@@ -296,7 +297,8 @@
 	if (!supply_root_node) {
 		supply_root_node = of_parse_phandle(of_node, supply_name, 0);
 		if (!supply_root_node) {
-			pr_err("No supply entry present for %s\n", supply_name);
+			pr_debug("No supply entry present for %s\n",
+					supply_name);
 			return -EINVAL;
 		}
 	}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 322b7f2..d50a185 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -389,6 +389,8 @@
  * @max_height:         Max height of display. In case of hot pluggable display
  *                      this is max height supported by controller
  * @is_primary:         Set to true if display is primary display
+ * @is_te_using_watchdog_timer:  Boolean to indicate watchdog TE is
+ *				 used instead of panel TE in cmd mode panels
  * @frame_rate:		Display frame rate
  * @prefill_lines:	prefill lines based on porches.
  * @vtotal:		display vertical total
@@ -412,6 +414,7 @@
 	uint32_t max_height;
 
 	bool is_primary;
+	bool is_te_using_watchdog_timer;
 	uint32_t frame_rate;
 	uint32_t prefill_lines;
 	uint32_t vtotal;
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 58222f3..6593b47 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -31,18 +31,20 @@
 		(c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
 
 static const struct drm_prop_enum_list e_topology_name[] = {
-	{SDE_RM_TOPOLOGY_UNKNOWN,	"sde_unknown"},
+	{SDE_RM_TOPOLOGY_NONE,	"sde_none"},
 	{SDE_RM_TOPOLOGY_SINGLEPIPE,	"sde_singlepipe"},
+	{SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,	"sde_singlepipe_dsc"},
 	{SDE_RM_TOPOLOGY_DUALPIPE,	"sde_dualpipe"},
+	{SDE_RM_TOPOLOGY_DUALPIPE_DSC,	"sde_dualpipe_dsc"},
+	{SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,	"sde_dualpipemerge"},
+	{SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC,	"sde_dualpipemerge_dsc"},
+	{SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,	"sde_dualpipe_dscmerge"},
 	{SDE_RM_TOPOLOGY_PPSPLIT,	"sde_ppsplit"},
-	{SDE_RM_TOPOLOGY_DUALPIPEMERGE,	"sde_dualpipemerge"}
 };
 static const struct drm_prop_enum_list e_topology_control[] = {
 	{SDE_RM_TOPCTL_RESERVE_LOCK,	"reserve_lock"},
 	{SDE_RM_TOPCTL_RESERVE_CLEAR,	"reserve_clear"},
 	{SDE_RM_TOPCTL_DSPP,		"dspp"},
-	{SDE_RM_TOPCTL_FORCE_TILING,	"force_tiling"},
-	{SDE_RM_TOPCTL_PPSPLIT,		"ppsplit"}
 };
 static const struct drm_prop_enum_list e_power_mode[] = {
 	{SDE_MODE_DPMS_ON,	"ON"},
@@ -656,11 +658,6 @@
 				SDE_ERROR("prep fb failed, %d\n", rc);
 		}
 		break;
-	case CONNECTOR_PROP_TOPOLOGY_CONTROL:
-		rc = sde_rm_check_property_topctl(val);
-		if (rc)
-			SDE_ERROR("invalid topology_control: 0x%llX\n", val);
-		break;
 	case CONNECTOR_PROP_LP:
 		mutex_lock(&c_conn->lock);
 		c_conn->lp_mode = val;
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index db2c515..7671649 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -27,6 +27,21 @@
 #include "sde_crtc.h"
 #include "sde_core_perf.h"
 
+#define SDE_PERF_MODE_STRING_SIZE	128
+
+/**
+ * enum sde_perf_mode - performance tuning mode
+ * @SDE_PERF_MODE_NORMAL: performance controlled by user mode client
+ * @SDE_PERF_MODE_MINIMUM: performance bounded by minimum setting
+ * @SDE_PERF_MODE_FIXED: performance bounded by fixed setting
+ */
+enum sde_perf_mode {
+	SDE_PERF_MODE_NORMAL,
+	SDE_PERF_MODE_MINIMUM,
+	SDE_PERF_MODE_FIXED,
+	SDE_PERF_MODE_MAX
+};
+
 static struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
 {
 	struct msm_drm_private *priv;
@@ -72,6 +87,31 @@
 	return intf_connected;
 }
 
+static void _sde_core_perf_calc_crtc(struct drm_crtc *crtc,
+		struct drm_crtc_state *state,
+		struct sde_core_perf_params *perf)
+{
+	struct sde_crtc_state *sde_cstate;
+
+	if (!crtc || !state || !perf) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	sde_cstate = to_sde_crtc_state(state);
+	memset(perf, 0, sizeof(struct sde_core_perf_params));
+
+	perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+	perf->max_per_pipe_ib =
+			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+	perf->core_clk_rate =
+			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
+
+	SDE_DEBUG("crtc=%d clk_rate=%llu ib=%llu ab=%llu\n",
+			crtc->base.id, perf->core_clk_rate,
+			perf->max_per_pipe_ib, perf->bw_ctl);
+}
+
 int sde_core_perf_crtc_check(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
@@ -100,7 +140,11 @@
 
 	sde_cstate = to_sde_crtc_state(state);
 
-	bw_sum_of_intfs = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+	/* swap state and obtain new values */
+	sde_cstate->cur_perf = sde_cstate->new_perf;
+	_sde_core_perf_calc_crtc(crtc, state, &sde_cstate->new_perf);
+
+	bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl;
 	curr_client_type = sde_crtc_get_client_type(crtc);
 
 	drm_for_each_crtc(tmp_crtc, crtc->dev) {
@@ -110,7 +154,7 @@
 			struct sde_crtc_state *tmp_cstate =
 					to_sde_crtc_state(tmp_crtc->state);
 
-			bw_sum_of_intfs += tmp_cstate->cur_perf.bw_ctl;
+			bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
 		}
 	}
 
@@ -126,11 +170,11 @@
 	SDE_DEBUG("final threshold bw limit = %d\n", threshold);
 
 	if (!threshold) {
-		sde_cstate->cur_perf.bw_ctl = 0;
+		sde_cstate->new_perf = sde_cstate->cur_perf;
 		SDE_ERROR("no bandwidth limits specified\n");
 		return -E2BIG;
 	} else if (bw > threshold) {
-		sde_cstate->cur_perf.bw_ctl = 0;
+		sde_cstate->new_perf = sde_cstate->cur_perf;
 		SDE_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
 		return -E2BIG;
 	}
@@ -138,26 +182,6 @@
 	return 0;
 }
 
-static void _sde_core_perf_calc_crtc(struct sde_kms *kms,
-		struct drm_crtc *crtc,
-		struct sde_core_perf_params *perf)
-{
-	struct sde_crtc_state *sde_cstate;
-
-	sde_cstate = to_sde_crtc_state(crtc->state);
-	memset(perf, 0, sizeof(struct sde_core_perf_params));
-
-	perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
-	perf->max_per_pipe_ib =
-			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
-	perf->core_clk_rate =
-			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
-
-	SDE_DEBUG("crtc=%d clk_rate=%u ib=%llu ab=%llu\n",
-			crtc->base.id, perf->core_clk_rate,
-			perf->max_per_pipe_ib, perf->bw_ctl);
-}
-
 static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
 		struct drm_crtc *crtc)
 {
@@ -175,19 +199,24 @@
 			sde_cstate = to_sde_crtc_state(tmp_crtc->state);
 
 			perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
-				sde_cstate->cur_perf.max_per_pipe_ib);
+				sde_cstate->new_perf.max_per_pipe_ib);
 
-			bw_sum_of_intfs += sde_cstate->cur_perf.bw_ctl;
+			bw_sum_of_intfs += sde_cstate->new_perf.bw_ctl;
 
 			SDE_DEBUG("crtc=%d bw=%llu\n",
 				tmp_crtc->base.id,
-				sde_cstate->cur_perf.bw_ctl);
+				sde_cstate->new_perf.bw_ctl);
 		}
 	}
 
 	bus_ab_quota = max(bw_sum_of_intfs, kms->perf.perf_tune.min_bus_vote);
 	bus_ib_quota = perf.max_per_pipe_ib;
 
+	if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED) {
+		bus_ab_quota = kms->perf.fix_core_ab_vote;
+		bus_ib_quota = kms->perf.fix_core_ib_vote;
+	}
+
 	switch (curr_client_type) {
 	case NRT_CLIENT:
 		sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
@@ -273,22 +302,25 @@
 	}
 }
 
-static u32 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms)
+static u64 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms)
 {
-	u32 clk_rate = 0;
+	u64 clk_rate = kms->perf.perf_tune.min_core_clk;
 	struct drm_crtc *crtc;
 	struct sde_crtc_state *sde_cstate;
 
 	drm_for_each_crtc(crtc, kms->dev) {
 		if (_sde_core_perf_crtc_is_power_on(crtc)) {
 			sde_cstate = to_sde_crtc_state(crtc->state);
-			clk_rate = max(sde_cstate->cur_perf.core_clk_rate,
+			clk_rate = max(sde_cstate->new_perf.core_clk_rate,
 							clk_rate);
 			clk_rate = clk_round_rate(kms->perf.core_clk, clk_rate);
 		}
 	}
 
-	SDE_DEBUG("clk:%u\n", clk_rate);
+	if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED)
+		clk_rate = kms->perf.fix_core_clk_rate;
+
+	SDE_DEBUG("clk:%llu\n", clk_rate);
 
 	return clk_rate;
 }
@@ -298,7 +330,7 @@
 {
 	struct sde_core_perf_params *new, *old;
 	int update_bus = 0, update_clk = 0;
-	u32 clk_rate = 0;
+	u64 clk_rate = 0;
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *sde_cstate;
 	int ret;
@@ -320,16 +352,13 @@
 	sde_crtc = to_sde_crtc(crtc);
 	sde_cstate = to_sde_crtc_state(crtc->state);
 
-	SDE_DEBUG("crtc:%d stop_req:%d core_clk:%u\n",
+	SDE_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
 			crtc->base.id, stop_req, kms->perf.core_clk_rate);
 
 	old = &sde_cstate->cur_perf;
 	new = &sde_cstate->new_perf;
 
 	if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
-		if (params_changed)
-			_sde_core_perf_calc_crtc(kms, crtc, new);
-
 		/*
 		 * cases for bus bandwidth update.
 		 * 1. new bandwidth vote or writeback output vote
@@ -376,13 +405,13 @@
 		ret = sde_power_clk_set_rate(&priv->phandle,
 				kms->perf.clk_name, clk_rate);
 		if (ret) {
-			SDE_ERROR("failed to set %s clock rate %u\n",
+			SDE_ERROR("failed to set %s clock rate %llu\n",
 					kms->perf.clk_name, clk_rate);
 			return;
 		}
 
 		kms->perf.core_clk_rate = clk_rate;
-		SDE_DEBUG("update clk rate = %d HZ\n", clk_rate);
+		SDE_DEBUG("update clk rate = %lld HZ\n", clk_rate);
 	}
 }
 
@@ -393,7 +422,7 @@
 {
 	struct sde_core_perf *perf = file->private_data;
 	struct sde_perf_cfg *cfg = &perf->catalog->perf;
-	int perf_mode = 0;
+	u32 perf_mode = 0;
 	char buf[10];
 
 	if (!perf)
@@ -407,19 +436,28 @@
 
 	buf[count] = 0;	/* end of string */
 
-	if (kstrtoint(buf, 0, &perf_mode))
+	if (kstrtouint(buf, 0, &perf_mode))
 		return -EFAULT;
 
-	if (perf_mode) {
+	if (perf_mode >= SDE_PERF_MODE_MAX)
+		return -EFAULT;
+
+	if (perf_mode == SDE_PERF_MODE_FIXED) {
+		DRM_INFO("fix performance mode\n");
+	} else if (perf_mode == SDE_PERF_MODE_MINIMUM) {
 		/* run the driver with max clk and BW vote */
 		perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
 		perf->perf_tune.min_bus_vote =
 				(u64) cfg->max_bw_high * 1000;
-	} else {
+		DRM_INFO("minimum performance mode\n");
+	} else if (perf_mode == SDE_PERF_MODE_NORMAL) {
 		/* reset the perf tune params to 0 */
 		perf->perf_tune.min_core_clk = 0;
 		perf->perf_tune.min_bus_vote = 0;
+		DRM_INFO("normal performance mode\n");
 	}
+	perf->perf_tune.mode = perf_mode;
+
 	return count;
 }
 
@@ -428,7 +466,7 @@
 {
 	struct sde_core_perf *perf = file->private_data;
 	int len = 0;
-	char buf[40] = {'\0'};
+	char buf[SDE_PERF_MODE_STRING_SIZE] = {'\0'};
 
 	if (!perf)
 		return -ENODEV;
@@ -436,7 +474,9 @@
 	if (*ppos)
 		return 0;	/* the end */
 
-	len = snprintf(buf, sizeof(buf), "min_mdp_clk %lu min_bus_vote %llu\n",
+	len = snprintf(buf, sizeof(buf),
+			"mode %d min_mdp_clk %llu min_bus_vote %llu\n",
+			perf->perf_tune.mode,
 			perf->perf_tune.min_core_clk,
 			perf->perf_tune.min_bus_vote);
 	if (len < 0 || len >= sizeof(buf))
@@ -485,7 +525,7 @@
 
 	debugfs_create_u64("max_core_clk_rate", 0644, perf->debugfs_root,
 			&perf->max_core_clk_rate);
-	debugfs_create_u32("core_clk_rate", 0644, perf->debugfs_root,
+	debugfs_create_u64("core_clk_rate", 0644, perf->debugfs_root,
 			&perf->core_clk_rate);
 	debugfs_create_u32("enable_bw_release", 0644, perf->debugfs_root,
 			(u32 *)&perf->enable_bw_release);
@@ -495,6 +535,12 @@
 			(u32 *)&catalog->perf.max_bw_high);
 	debugfs_create_file("perf_mode", 0644, perf->debugfs_root,
 			(u32 *)perf, &sde_core_perf_mode_fops);
+	debugfs_create_u64("fix_core_clk_rate", 0644, perf->debugfs_root,
+			&perf->fix_core_clk_rate);
+	debugfs_create_u64("fix_core_ib_vote", 0644, perf->debugfs_root,
+			&perf->fix_core_ib_vote);
+	debugfs_create_u64("fix_core_ab_vote", 0644, perf->debugfs_root,
+			&perf->fix_core_ab_vote);
 
 	return 0;
 }
@@ -554,8 +600,8 @@
 
 	perf->max_core_clk_rate = sde_power_clk_get_max_rate(phandle, clk_name);
 	if (!perf->max_core_clk_rate) {
-		SDE_ERROR("invalid max core clk rate\n");
-		goto err;
+		SDE_DEBUG("optional max core clk rate, use default\n");
+		perf->max_core_clk_rate = SDE_PERF_DEFAULT_MAX_CORE_CLK_RATE;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.h b/drivers/gpu/drm/msm/sde/sde_core_perf.h
index 20e4eb5..31851be 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.h
@@ -21,6 +21,10 @@
 #include "sde_hw_catalog.h"
 #include "sde_power_handle.h"
 
+#define	SDE_PERF_DEFAULT_MAX_CORE_CLK_RATE	320000000
+#define	SDE_PERF_DEFAULT_MAX_BUS_AB_QUOTA	2000000000
+#define	SDE_PERF_DEFAULT_MAX_BUS_IB_QUOTA	2000000000
+
 /**
  * struct sde_core_perf_params - definition of performance parameters
  * @max_per_pipe_ib: maximum instantaneous bandwidth request
@@ -30,16 +34,18 @@
 struct sde_core_perf_params {
 	u64 max_per_pipe_ib;
 	u64 bw_ctl;
-	u32 core_clk_rate;
+	u64 core_clk_rate;
 };
 
 /**
  * struct sde_core_perf_tune - definition of performance tuning control
+ * @mode: performance mode
  * @min_core_clk: minimum core clock
  * @min_bus_vote: minimum bus vote
  */
 struct sde_core_perf_tune {
-	unsigned long min_core_clk;
+	u32 mode;
+	u64 min_core_clk;
 	u64 min_bus_vote;
 };
 
@@ -57,6 +63,9 @@
  * @max_core_clk_rate: maximum allowable core clock rate
  * @perf_tune: debug control for performance tuning
  * @enable_bw_release: debug control for bandwidth release
+ * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
  */
 struct sde_core_perf {
 	struct drm_device *dev;
@@ -67,10 +76,13 @@
 	struct sde_power_client *pclient;
 	char *clk_name;
 	struct clk *core_clk;
-	u32 core_clk_rate;
+	u64 core_clk_rate;
 	u64 max_core_clk_rate;
 	struct sde_core_perf_tune perf_tune;
 	u32 enable_bw_release;
+	u64 fix_core_clk_rate;
+	u64 fix_core_ib_vote;
+	u64 fix_core_ab_vote;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 1bd7654..d80a305 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -731,7 +731,6 @@
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *crtc_state;
 	struct sde_rect *crtc_roi;
-	struct drm_clip_rect crtc_clip, *user_rect;
 	int i, num_attached_conns = 0;
 
 	if (!crtc || !state)
@@ -741,12 +740,6 @@
 	crtc_state = to_sde_crtc_state(state);
 	crtc_roi = &crtc_state->crtc_roi;
 
-	/* init to invalid range maxes */
-	crtc_clip.x1 = ~0;
-	crtc_clip.y1 = ~0;
-	crtc_clip.x2 = 0;
-	crtc_clip.y2 = 0;
-
 	for_each_connector_in_state(state->state, conn, conn_state, i) {
 		struct sde_connector_state *sde_conn_state;
 
@@ -771,36 +764,7 @@
 		}
 	}
 
-	/* aggregate all clipping rectangles together for overall crtc roi */
-	for (i = 0; i < crtc_state->user_roi_list.num_rects; i++) {
-		user_rect = &crtc_state->user_roi_list.roi[i];
-
-		crtc_clip.x1 = min(crtc_clip.x1, user_rect->x1);
-		crtc_clip.y1 = min(crtc_clip.y1, user_rect->y1);
-		crtc_clip.x2 = max(crtc_clip.x2, user_rect->x2);
-		crtc_clip.y2 = max(crtc_clip.y2, user_rect->y2);
-
-		SDE_DEBUG(
-			"%s: conn%d roi%d (%d,%d),(%d,%d) -> crtc (%d,%d),(%d,%d)\n",
-				sde_crtc->name, DRMID(crtc), i,
-				user_rect->x1, user_rect->y1,
-				user_rect->x2, user_rect->y2,
-				crtc_clip.x1, crtc_clip.y1,
-				crtc_clip.x2, crtc_clip.y2);
-
-	}
-
-	if (crtc_clip.x2  && crtc_clip.y2) {
-		crtc_roi->x = crtc_clip.x1;
-		crtc_roi->y = crtc_clip.y1;
-		crtc_roi->w = crtc_clip.x2 - crtc_clip.x1;
-		crtc_roi->h = crtc_clip.y2 - crtc_clip.y1;
-	} else {
-		crtc_roi->x = 0;
-		crtc_roi->y = 0;
-		crtc_roi->w = 0;
-		crtc_roi->h = 0;
-	}
+	sde_kms_rect_merge_rectangles(&crtc_state->user_roi_list, crtc_roi);
 
 	SDE_DEBUG("%s: crtc roi (%d,%d,%d,%d)\n", sde_crtc->name,
 			crtc_roi->x, crtc_roi->y, crtc_roi->w, crtc_roi->h);
@@ -1399,6 +1363,7 @@
 	struct sde_crtc_frame_event *fevent;
 	struct drm_crtc *crtc;
 	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
 	struct sde_kms *sde_kms;
 	unsigned long flags;
 
@@ -1408,13 +1373,14 @@
 	}
 
 	fevent = container_of(work, struct sde_crtc_frame_event, work);
-	if (!fevent->crtc) {
+	if (!fevent->crtc || !fevent->crtc->state) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 
 	crtc = fevent->crtc;
 	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
 
 	sde_kms = _sde_crtc_get_kms(crtc);
 	if (!sde_kms) {
@@ -1453,6 +1419,9 @@
 			SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
 							SDE_EVTLOG_FUNC_CASE3);
 		}
+
+		if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
+			sde_core_perf_crtc_update(crtc, 0, false);
 	} else {
 		SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
 				ktime_to_ns(fevent->ts),
@@ -2382,6 +2351,82 @@
 	return rc;
 }
 
+static int _sde_crtc_excl_rect_overlap_check(struct plane_state pstates[],
+	int cnt, int curr_cnt, struct sde_rect *excl_rect, int z_pos)
+{
+	struct sde_rect dst_rect, intersect;
+	int i, rc = -EINVAL;
+	const struct drm_plane_state *pstate;
+
+	/* start checking from next plane */
+	for (i = curr_cnt; i < cnt; i++) {
+		pstate = pstates[i].drm_pstate;
+		POPULATE_RECT(&dst_rect, pstate->crtc_x, pstate->crtc_y,
+				pstate->crtc_w, pstate->crtc_h, true);
+		sde_kms_rect_intersect(&dst_rect, excl_rect, &intersect);
+
+		if (intersect.w == excl_rect->w && intersect.h == excl_rect->h
+				/* next plane may be on same z-order */
+				&& z_pos != pstates[i].stage) {
+			rc = 0;
+			goto end;
+		}
+	}
+
+	SDE_ERROR("excl rect does not find top overlapping rect\n");
+end:
+	return rc;
+}
+
+/* no input validation - caller API has all the checks */
+static int _sde_crtc_excl_dim_layer_check(struct drm_crtc_state *state,
+		struct plane_state pstates[], int cnt)
+{
+	struct sde_crtc_state *cstate = to_sde_crtc_state(state);
+	struct drm_display_mode *mode = &state->adjusted_mode;
+	const struct drm_plane_state *pstate;
+	struct sde_plane_state *sde_pstate;
+	int rc = 0, i;
+
+	/* Check dim layer rect bounds and stage */
+	for (i = 0; i < cstate->num_dim_layers; i++) {
+		if ((CHECK_LAYER_BOUNDS(cstate->dim_layer[i].rect.y,
+			cstate->dim_layer[i].rect.h, mode->vdisplay)) ||
+		    (CHECK_LAYER_BOUNDS(cstate->dim_layer[i].rect.x,
+			cstate->dim_layer[i].rect.w, mode->hdisplay)) ||
+		    (cstate->dim_layer[i].stage >= SDE_STAGE_MAX) ||
+		    (!cstate->dim_layer[i].rect.w) ||
+		    (!cstate->dim_layer[i].rect.h)) {
+			SDE_ERROR("invalid dim_layer:{%d,%d,%d,%d}, stage:%d\n",
+					cstate->dim_layer[i].rect.x,
+					cstate->dim_layer[i].rect.y,
+					cstate->dim_layer[i].rect.w,
+					cstate->dim_layer[i].rect.h,
+					cstate->dim_layer[i].stage);
+			SDE_ERROR("display: %dx%d\n", mode->hdisplay,
+					mode->vdisplay);
+			rc = -E2BIG;
+			goto end;
+		}
+	}
+
+	/* this is traversing on sorted z-order pstates */
+	for (i = 0; i < cnt; i++) {
+		pstate = pstates[i].drm_pstate;
+		sde_pstate = to_sde_plane_state(pstate);
+		if (sde_pstate->excl_rect.w && sde_pstate->excl_rect.h) {
+			/* check overlap on all top z-order */
+			rc = _sde_crtc_excl_rect_overlap_check(pstates, cnt,
+			     i + 1, &sde_pstate->excl_rect, pstates[i].stage);
+			if (rc)
+				goto end;
+		}
+	}
+
+end:
+	return rc;
+}
+
 static int sde_crtc_atomic_check(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
@@ -2490,31 +2535,13 @@
 		}
 	}
 
-	/* Check dim layer rect bounds and stage */
-	for (i = 0; i < cstate->num_dim_layers; i++) {
-		if ((CHECK_LAYER_BOUNDS(cstate->dim_layer[i].rect.y,
-			cstate->dim_layer[i].rect.h, mode->vdisplay)) ||
-		    (CHECK_LAYER_BOUNDS(cstate->dim_layer[i].rect.x,
-			cstate->dim_layer[i].rect.w, mode->hdisplay)) ||
-		    (cstate->dim_layer[i].stage >= SDE_STAGE_MAX) ||
-		    (!cstate->dim_layer[i].rect.w) ||
-		    (!cstate->dim_layer[i].rect.h)) {
-			SDE_ERROR("invalid dim_layer:{%d,%d,%d,%d}, stage:%d\n",
-					cstate->dim_layer[i].rect.x,
-					cstate->dim_layer[i].rect.y,
-					cstate->dim_layer[i].rect.w,
-					cstate->dim_layer[i].rect.h,
-					cstate->dim_layer[i].stage);
-			SDE_ERROR("display: %dx%d\n", mode->hdisplay,
-					mode->vdisplay);
-			rc = -E2BIG;
-			goto end;
-		}
-	}
-
 	/* assign mixer stages based on sorted zpos property */
 	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
 
+	rc = _sde_crtc_excl_dim_layer_check(state, pstates, cnt);
+	if (rc)
+		goto end;
+
 	if (!sde_is_custom_client()) {
 		int stage_old = pstates[0].stage;
 
@@ -2709,19 +2736,19 @@
 			CRTC_PROP_CORE_CLK);
 	msm_property_install_range(&sde_crtc->property_info,
 			"core_ab", 0x0, 0, U64_MAX,
-			SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
+			SDE_PERF_DEFAULT_MAX_BUS_AB_QUOTA,
 			CRTC_PROP_CORE_AB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"core_ib", 0x0, 0, U64_MAX,
-			SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA,
+			SDE_PERF_DEFAULT_MAX_BUS_IB_QUOTA,
 			CRTC_PROP_CORE_IB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"mem_ab", 0x0, 0, U64_MAX,
-			SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
+			SDE_PERF_DEFAULT_MAX_BUS_AB_QUOTA,
 			CRTC_PROP_MEM_AB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"mem_ib", 0x0, 0, U64_MAX,
-			SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
+			SDE_PERF_DEFAULT_MAX_BUS_IB_QUOTA,
 			CRTC_PROP_MEM_IB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"rot_prefill_bw", 0, 0, U64_MAX,
@@ -2767,13 +2794,40 @@
 	sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split);
 	if (catalog->perf.max_bw_low)
 		sde_kms_info_add_keyint(info, "max_bandwidth_low",
-				catalog->perf.max_bw_low);
+				catalog->perf.max_bw_low * 1000LL);
 	if (catalog->perf.max_bw_high)
 		sde_kms_info_add_keyint(info, "max_bandwidth_high",
-				catalog->perf.max_bw_high);
+				catalog->perf.max_bw_high * 1000LL);
 	if (sde_kms->perf.max_core_clk_rate)
 		sde_kms_info_add_keyint(info, "max_mdp_clk",
 				sde_kms->perf.max_core_clk_rate);
+	sde_kms_info_add_keystr(info, "core_ib_ff",
+			catalog->perf.core_ib_ff);
+	sde_kms_info_add_keystr(info, "core_clk_ff",
+			catalog->perf.core_clk_ff);
+	sde_kms_info_add_keystr(info, "comp_ratio_rt",
+			catalog->perf.comp_ratio_rt);
+	sde_kms_info_add_keystr(info, "comp_ratio_nrt",
+			catalog->perf.comp_ratio_nrt);
+	sde_kms_info_add_keyint(info, "dest_scale_prefill_lines",
+			catalog->perf.dest_scale_prefill_lines);
+	sde_kms_info_add_keyint(info, "undersized_prefill_lines",
+			catalog->perf.undersized_prefill_lines);
+	sde_kms_info_add_keyint(info, "macrotile_prefill_lines",
+			catalog->perf.macrotile_prefill_lines);
+	sde_kms_info_add_keyint(info, "yuv_nv12_prefill_lines",
+			catalog->perf.yuv_nv12_prefill_lines);
+	sde_kms_info_add_keyint(info, "linear_prefill_lines",
+			catalog->perf.linear_prefill_lines);
+	sde_kms_info_add_keyint(info, "downscaling_prefill_lines",
+			catalog->perf.downscaling_prefill_lines);
+	sde_kms_info_add_keyint(info, "xtra_prefill_lines",
+			catalog->perf.xtra_prefill_lines);
+	sde_kms_info_add_keyint(info, "amortizable_threshold",
+			catalog->perf.amortizable_threshold);
+	sde_kms_info_add_keyint(info, "min_prefill_lines",
+			catalog->perf.min_prefill_lines);
+
 	msm_property_set_blob(&sde_crtc->property_info, &sde_crtc->blob_info,
 			info->data, info->len, CRTC_PROP_INFO);
 
@@ -3148,7 +3202,7 @@
 	seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
 	seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
 	seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl);
-	seq_printf(s, "core_clk_rate: %u\n", cstate->cur_perf.core_clk_rate);
+	seq_printf(s, "core_clk_rate: %llu\n", cstate->cur_perf.core_clk_rate);
 	seq_printf(s, "max_per_pipe_ib: %llu\n",
 			cstate->cur_perf.max_per_pipe_ib);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index ec5ec1d..6a22115 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -420,6 +420,19 @@
 }
 
 /**
+ * sde_crtc_get_inline_prefill - get current inline rotation prefill
+ * @crtc: Pointer to crtc
+ * return: number of prefill lines
+ */
+static inline u32 sde_crtc_get_inline_prefill(struct drm_crtc *crtc)
+{
+	if (!crtc || !crtc->state)
+		return 0;
+
+	return to_sde_crtc_state(crtc->state)->sbuf_prefill_line;
+}
+
+/**
  * sde_crtc_event_queue - request event callback
  * @crtc: Pointer to drm crtc structure
  * @func: Pointer to callback function
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index a136645..f11ba51 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -152,6 +152,9 @@
  * @rc_state:			resource controller state
  * @delayed_off_work:		delayed worker to schedule disabling of
  *				clks and resources after IDLE_TIMEOUT time.
+ * @topology:                   topology of the display
+ * @mode_set_complete:          flag to indicate modeset completion
+ * @rsc_cfg:			rsc configuration
  */
 struct sde_encoder_virt {
 	struct drm_encoder base;
@@ -188,6 +191,10 @@
 	struct mutex rc_lock;
 	enum sde_enc_rc_states rc_state;
 	struct delayed_work delayed_off_work;
+	struct msm_display_topology topology;
+	bool mode_set_complete;
+
+	struct sde_encoder_rsc_config rsc_cfg;
 };
 
 #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -249,15 +256,14 @@
 	memset(hw_res, 0, sizeof(*hw_res));
 	hw_res->display_num_of_h_tiles = sde_enc->display_num_of_h_tiles;
 
-	if (_sde_is_dsc_enabled(sde_enc))
-		hw_res->needs_dsc = true;
-
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
 		if (phys && phys->ops.get_hw_resources)
 			phys->ops.get_hw_resources(phys, hw_res, conn_state);
 	}
+
+	hw_res->topology = sde_enc->topology;
 }
 
 void sde_encoder_destroy(struct drm_encoder *drm_enc)
@@ -423,9 +429,18 @@
 	}
 
 	/* Reserve dynamic resources now. Indicating AtomicTest phase */
-	if (!ret)
-		ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state,
+	if (!ret) {
+		/*
+		 * Avoid reserving resources when mode set is pending. Topology
+		 * info may not be available to complete reservation.
+		 */
+		if (drm_atomic_crtc_needs_modeset(crtc_state)
+				&& sde_enc->mode_set_complete) {
+			ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state,
 				conn_state, true);
+			sde_enc->mode_set_complete = false;
+		}
+	}
 
 	if (!ret)
 		drm_mode_set_crtcinfo(adj_mode, 0);
@@ -720,7 +735,7 @@
 	int ret = 0;
 
 	topology = sde_connector_get_topology_name(drm_conn);
-	if (topology == SDE_RM_TOPOLOGY_UNKNOWN) {
+	if (topology == SDE_RM_TOPOLOGY_NONE) {
 		SDE_ERROR_ENC(sde_enc, "topology not set yet\n");
 		return -EINVAL;
 	}
@@ -729,16 +744,15 @@
 	SDE_EVT32(DRMID(&sde_enc->base));
 
 	switch (topology) {
-	case SDE_RM_TOPOLOGY_SINGLEPIPE:
+	case SDE_RM_TOPOLOGY_SINGLEPIPE_DSC:
 		ret = _sde_encoder_dsc_1_lm_1_enc_1_intf(sde_enc);
 		break;
-	case SDE_RM_TOPOLOGY_DUALPIPEMERGE:
+	case SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE:
 		ret = _sde_encoder_dsc_2_lm_2_enc_1_intf(sde_enc);
 		break;
-	case SDE_RM_TOPOLOGY_DUALPIPE:
+	case SDE_RM_TOPOLOGY_DUALPIPE_DSC:
 		ret = _sde_encoder_dsc_2_lm_2_enc_2_intf(sde_enc);
 		break;
-	case SDE_RM_TOPOLOGY_PPSPLIT:
 	default:
 		SDE_ERROR_ENC(sde_enc, "No DSC support for topology %d",
 				topology);
@@ -749,7 +763,8 @@
 }
 
 static int sde_encoder_update_rsc_client(
-		struct drm_encoder *drm_enc, bool enable)
+		struct drm_encoder *drm_enc,
+		struct sde_encoder_rsc_config *config, bool enable)
 {
 	struct sde_encoder_virt *sde_enc;
 	enum sde_rsc_state rsc_state;
@@ -780,14 +795,22 @@
 		  disp_info->is_primary) ? SDE_RSC_CMD_STATE :
 		SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
 
+	if (config && memcmp(&sde_enc->rsc_cfg, config,
+			sizeof(sde_enc->rsc_cfg)))
+		sde_enc->rsc_state_init = false;
+
 	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_init
 					&& disp_info->is_primary) {
 		rsc_config.fps = disp_info->frame_rate;
 		rsc_config.vtotal = disp_info->vtotal;
 		rsc_config.prefill_lines = disp_info->prefill_lines;
 		rsc_config.jitter = disp_info->jitter;
+		rsc_config.prefill_lines += config ?
+				config->inline_rotate_prefill : 0;
 		/* update it only once */
 		sde_enc->rsc_state_init = true;
+		if (config)
+			sde_enc->rsc_cfg = *config;
 
 		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
 			rsc_state, &rsc_config,
@@ -824,6 +847,7 @@
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
 	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_rsc_config rsc_cfg = { 0 };
 	int i;
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
@@ -854,13 +878,16 @@
 				phys->ops.irq_control(phys, true);
 		}
 
+		rsc_cfg.inline_rotate_prefill =
+				sde_crtc_get_inline_prefill(drm_enc->crtc);
+
 		/* enable RSC */
-		sde_encoder_update_rsc_client(drm_enc, true);
+		sde_encoder_update_rsc_client(drm_enc, &rsc_cfg, true);
 
 	} else {
 
 		/* disable RSC */
-		sde_encoder_update_rsc_client(drm_enc, false);
+		sde_encoder_update_rsc_client(drm_enc, NULL, false);
 
 		/* disable all the irq */
 		for (i = 0; i < sde_enc->num_phys_encs; i++) {
@@ -1098,6 +1125,7 @@
 	struct sde_kms *sde_kms;
 	struct list_head *connector_list;
 	struct drm_connector *conn = NULL, *conn_iter;
+	struct sde_connector *sde_conn = NULL;
 	struct sde_rm_hw_iter dsc_iter, pp_iter;
 	int i = 0, ret;
 
@@ -1127,6 +1155,17 @@
 		return;
 	}
 
+	sde_conn = to_sde_connector(conn);
+	if (sde_conn) {
+		ret = sde_conn->ops.get_topology(adj_mode, &sde_enc->topology,
+				sde_kms->catalog->max_mixer_width);
+		if (ret) {
+			SDE_ERROR_ENC(sde_enc,
+				"invalid topology for the mode\n");
+			return;
+		}
+	}
+
 	/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
 	ret = sde_rm_reserve(&sde_kms->rm, drm_enc, drm_enc->crtc->state,
 			conn->state, false);
@@ -1167,6 +1206,8 @@
 				phys->ops.mode_set(phys, mode, adj_mode);
 		}
 	}
+
+	sde_enc->mode_set_complete = true;
 }
 
 static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
@@ -1174,7 +1215,10 @@
 	struct sde_encoder_virt *sde_enc = NULL;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
+	struct sde_hw_mdp *hw_mdptop;
+	int i = 0;
 	int ret = 0;
+	struct sde_watchdog_te_status te_cfg = { 0 };
 
 	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
 		SDE_ERROR("invalid parameters\n");
@@ -1189,6 +1233,14 @@
 	}
 
 	sde_kms = to_sde_kms(priv->kms);
+	hw_mdptop = sde_kms->hw_mdp;
+
+	if (!hw_mdptop) {
+		SDE_ERROR("invalid mdptop\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
 	if (!sde_kms) {
 		SDE_ERROR("invalid sde_kms\n");
 		return;
@@ -1205,6 +1257,16 @@
 		if (ret)
 			SDE_ERROR_ENC(sde_enc, "failed to setup DSC:%d\n", ret);
 	}
+
+	if (hw_mdptop->ops.setup_vsync_sel) {
+		for (i = 0; i < sde_enc->num_phys_encs; i++)
+			te_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
+
+		te_cfg.pp_count = sde_enc->num_phys_encs;
+		te_cfg.frame_rate = sde_enc->disp_info.frame_rate;
+		hw_mdptop->ops.setup_vsync_sel(hw_mdptop, &te_cfg,
+				sde_enc->disp_info.is_te_using_watchdog_timer);
+	}
 }
 
 void sde_encoder_virt_restore(struct drm_encoder *drm_enc)
@@ -1659,7 +1721,7 @@
 	/* don't perform flush/start operations for slave encoders */
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-		enum sde_rm_topology_name topology = SDE_RM_TOPOLOGY_UNKNOWN;
+		enum sde_rm_topology_name topology = SDE_RM_TOPOLOGY_NONE;
 
 		if (!phys || phys->enable_state == SDE_ENC_DISABLED)
 			continue;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index c5ddee6..7292a12 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -33,15 +33,16 @@
  * @intfs:	Interfaces this encoder is using, INTF_MODE_NONE if unused
  * @wbs:	Writebacks this encoder is using, INTF_MODE_NONE if unused
  * @needs_cdm:	Encoder requests a CDM based on pixel format conversion needs
- * @needs_dsc:	Request to allocate DSC block
- * @display_num_of_h_tiles:
+ * @display_num_of_h_tiles: Number of horizontal tiles in case of split
+ *                          interface
+ * @topology:   Topology of the display
  */
 struct sde_encoder_hw_resources {
 	enum sde_intf_mode intfs[INTF_MAX];
 	enum sde_intf_mode wbs[WB_MAX];
 	bool needs_cdm;
-	bool needs_dsc;
 	u32 display_num_of_h_tiles;
+	struct msm_display_topology topology;
 };
 
 /**
@@ -56,6 +57,14 @@
 };
 
 /**
+ * sde_encoder_rsc_config - rsc configuration for encoder
+ * @inline_rotate_prefill: number of lines to prefill for inline rotation
+ */
+struct sde_encoder_rsc_config {
+	u32 inline_rotate_prefill;
+};
+
+/**
  * sde_encoder_get_hw_resources - Populate table of required hardware resources
  * @encoder:	encoder pointer
  * @hw_res:	resource table to populate with encoder required resources
@@ -140,24 +149,6 @@
 void sde_encoder_virt_restore(struct drm_encoder *encoder);
 
 /**
- * enum sde_encoder_property - property tags for sde enoder
- * @SDE_ENCODER_PROPERTY_INLINE_ROTATE_REFILL: # of prefill line, 0 to disable
- */
-enum sde_encoder_property {
-	SDE_ENCODER_PROPERTY_INLINE_ROTATE_PREFILL,
-	SDE_ENCODER_PROPERTY_MAX,
-};
-
-/*
- * sde_encoder_set_property - set the property tag to the given value
- * @encoder: Pointer to drm encoder object
- * @tag: property tag
- * @val: property value
- * return: 0 if success; errror code otherwise
- */
-int sde_encoder_set_property(struct drm_encoder *encoder, u32 tag, u64 val);
-
-/**
  * sde_encoder_init - initialize virtual encoder object
  * @dev:        Pointer to drm device structure
  * @disp_info:  Pointer to display information structure
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index a3b112d..3d6dc32 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -432,8 +432,7 @@
 
 	topology = sde_connector_get_topology_name(phys_enc->connector);
 	if (phys_enc->split_role == ENC_ROLE_SOLO &&
-			topology == SDE_RM_TOPOLOGY_DUALPIPEMERGE &&
-			phys_enc->comp_type == MSM_DISPLAY_COMPRESSION_NONE)
+			topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE)
 		return BLEND_3D_H_ROW_INT;
 
 	return BLEND_3D_NONE;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index b8ab066..1faa46e2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -93,6 +93,27 @@
 
 #define DEFAULT_SBUF_HEADROOM		(20)
 
+/*
+ * Default parameter values
+ */
+#define DEFAULT_MAX_BW_HIGH			7000000
+#define DEFAULT_MAX_BW_LOW			7000000
+#define DEFAULT_UNDERSIZED_PREFILL_LINES	2
+#define DEFAULT_XTRA_PREFILL_LINES		2
+#define DEFAULT_DEST_SCALE_PREFILL_LINES	3
+#define DEFAULT_MACROTILE_PREFILL_LINES		4
+#define DEFAULT_YUV_NV12_PREFILL_LINES		8
+#define DEFAULT_LINEAR_PREFILL_LINES		1
+#define DEFAULT_DOWNSCALING_PREFILL_LINES	1
+#define DEFAULT_CORE_IB_FF			"6.0"
+#define DEFAULT_CORE_CLK_FF			"1.0"
+#define DEFAULT_COMP_RATIO_RT \
+		"NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23"
+#define DEFAULT_COMP_RATIO_NRT \
+		"NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25"
+#define DEFAULT_MAX_PER_PIPE_BW			2400000
+#define DEFAULT_AMORTIZABLE_THRESHOLD		25
+
 /*************************************************************
  *  DTSI PROPERTY INDEX
  *************************************************************/
@@ -127,6 +148,18 @@
 enum {
 	PERF_MAX_BW_LOW,
 	PERF_MAX_BW_HIGH,
+	PERF_CORE_IB_FF,
+	PERF_CORE_CLK_FF,
+	PERF_COMP_RATIO_RT,
+	PERF_COMP_RATIO_NRT,
+	PERF_UNDERSIZED_PREFILL_LINES,
+	PERF_DEST_SCALE_PREFILL_LINES,
+	PERF_MACROTILE_PREFILL_LINES,
+	PERF_YUV_NV12_PREFILL_LINES,
+	PERF_LINEAR_PREFILL_LINES,
+	PERF_DOWNSCALING_PREFILL_LINES,
+	PERF_XTRA_PREFILL_LINES,
+	PERF_AMORTIZABLE_THRESHOLD,
 	PERF_PROP_MAX,
 };
 
@@ -144,6 +177,7 @@
 	SSPP_RGB_BLOCKS,
 	SSPP_EXCL_RECT,
 	SSPP_SMART_DMA,
+	SSPP_MAX_PER_PIPE_BW,
 	SSPP_PROP_MAX,
 };
 
@@ -320,6 +354,28 @@
 static struct sde_prop_type sde_perf_prop[] = {
 	{PERF_MAX_BW_LOW, "qcom,sde-max-bw-low-kbps", false, PROP_TYPE_U32},
 	{PERF_MAX_BW_HIGH, "qcom,sde-max-bw-high-kbps", false, PROP_TYPE_U32},
+	{PERF_CORE_IB_FF, "qcom,sde-core-ib-ff", false, PROP_TYPE_STRING},
+	{PERF_CORE_CLK_FF, "qcom,sde-core-clk-ff", false, PROP_TYPE_STRING},
+	{PERF_COMP_RATIO_RT, "qcom,sde-comp-ratio-rt", false,
+			PROP_TYPE_STRING},
+	{PERF_COMP_RATIO_NRT, "qcom,sde-comp-ratio-nrt", false,
+			PROP_TYPE_STRING},
+	{PERF_UNDERSIZED_PREFILL_LINES, "qcom,sde-undersizedprefill-lines",
+			false, PROP_TYPE_U32},
+	{PERF_DEST_SCALE_PREFILL_LINES, "qcom,sde-dest-scaleprefill-lines",
+			false, PROP_TYPE_U32},
+	{PERF_MACROTILE_PREFILL_LINES, "qcom,sde-macrotileprefill-lines",
+			false, PROP_TYPE_U32},
+	{PERF_YUV_NV12_PREFILL_LINES, "qcom,sde-yuv-nv12prefill-lines",
+			false, PROP_TYPE_U32},
+	{PERF_LINEAR_PREFILL_LINES, "qcom,sde-linearprefill-lines",
+			false, PROP_TYPE_U32},
+	{PERF_DOWNSCALING_PREFILL_LINES, "qcom,sde-downscalingprefill-lines",
+			false, PROP_TYPE_U32},
+	{PERF_XTRA_PREFILL_LINES, "qcom,sde-xtra-prefill-lines",
+			false, PROP_TYPE_U32},
+	{PERF_AMORTIZABLE_THRESHOLD, "qcom,sde-amortizable-threshold",
+			false, PROP_TYPE_U32},
 };
 
 static struct sde_prop_type sspp_prop[] = {
@@ -339,6 +395,8 @@
 	{SSPP_EXCL_RECT, "qcom,sde-sspp-excl-rect", false, PROP_TYPE_U32_ARRAY},
 	{SSPP_SMART_DMA, "qcom,sde-sspp-smart-dma-priority", false,
 		PROP_TYPE_U32_ARRAY},
+	{SSPP_MAX_PER_PIPE_BW, "qcom,sde-max-per-pipe-bw-kbps", false,
+		PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type vig_prop[] = {
@@ -470,37 +528,6 @@
  * static API list
  *************************************************************/
 
-/**
- * _sde_copy_formats   - copy formats from src_list to dst_list
- * @dst_list:          pointer to destination list where to copy formats
- * @dst_list_size:     size of destination list
- * @dst_list_pos:      starting position on the list where to copy formats
- * @src_list:          pointer to source list where to copy formats from
- * @src_list_size:     size of source list
- * Return: number of elements populated
- */
-static uint32_t _sde_copy_formats(
-		struct sde_format_extended *dst_list,
-		uint32_t dst_list_size,
-		uint32_t dst_list_pos,
-		const struct sde_format_extended *src_list,
-		uint32_t src_list_size)
-{
-	uint32_t cur_pos, i;
-
-	if (!dst_list || !src_list || (dst_list_pos >= (dst_list_size - 1)))
-		return 0;
-
-	for (i = 0, cur_pos = dst_list_pos;
-		(cur_pos < (dst_list_size - 1)) && (i < src_list_size)
-		&& src_list[i].fourcc_format; ++i, ++cur_pos)
-		dst_list[cur_pos] = src_list[i];
-
-	dst_list[cur_pos].fourcc_format = 0;
-
-	return i;
-}
-
 static int _parse_dt_u32_handler(struct device_node *np,
 	char *prop_name, u32 *offsets, int len, bool mandatory)
 {
@@ -1078,6 +1105,12 @@
 		if (PROP_VALUE_ACCESS(prop_value, SSPP_EXCL_RECT, i) == 1)
 			set_bit(SDE_SSPP_EXCL_RECT, &sspp->features);
 
+		if (prop_exists[SSPP_MAX_PER_PIPE_BW])
+			sblk->max_per_pipe_bw = PROP_VALUE_ACCESS(prop_value,
+					SSPP_MAX_PER_PIPE_BW, i);
+		else
+			sblk->max_per_pipe_bw = DEFAULT_MAX_PER_PIPE_BW;
+
 		for (j = 0; j < sde_cfg->mdp_count; j++) {
 			sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].reg_off =
 				PROP_BITVALUE_ACCESS(prop_value,
@@ -1449,7 +1482,13 @@
 		wb->clk_ctrl = SDE_CLK_CTRL_WB0 +
 			PROP_VALUE_ACCESS(prop_value, WB_ID, i);
 		wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i);
-		wb->vbif_idx = VBIF_NRT;
+
+		if (IS_SDE_MAJOR_MINOR_SAME((sde_cfg->hwversion),
+				SDE_HW_VER_170))
+			wb->vbif_idx = VBIF_NRT;
+		else
+			wb->vbif_idx = VBIF_RT;
+
 		wb->len = PROP_VALUE_ACCESS(prop_value, WB_LEN, 0);
 		if (!prop_exists[WB_LEN])
 			wb->len = DEFAULT_SDE_HW_BLOCK_LEN;
@@ -2260,6 +2299,7 @@
 	int rc, len, prop_count[PERF_PROP_MAX];
 	struct sde_prop_value *prop_value = NULL;
 	bool prop_exists[PERF_PROP_MAX];
+	const char *str = NULL;
 
 	if (!cfg) {
 		SDE_ERROR("invalid argument\n");
@@ -2285,9 +2325,72 @@
 		goto freeprop;
 
 	cfg->perf.max_bw_low =
-			PROP_VALUE_ACCESS(prop_value, PERF_MAX_BW_LOW, 0);
+			prop_exists[PERF_MAX_BW_LOW] ?
+			PROP_VALUE_ACCESS(prop_value, PERF_MAX_BW_LOW, 0) :
+			DEFAULT_MAX_BW_LOW;
 	cfg->perf.max_bw_high =
-			PROP_VALUE_ACCESS(prop_value, PERF_MAX_BW_HIGH, 0);
+			prop_exists[PERF_MAX_BW_HIGH] ?
+			PROP_VALUE_ACCESS(prop_value, PERF_MAX_BW_HIGH, 0) :
+			DEFAULT_MAX_BW_HIGH;
+
+	/*
+	 * The following performance parameters (e.g. core_ib_ff) are
+	 * mapped directly as device tree string constants.
+	 */
+	rc = of_property_read_string(np,
+			sde_perf_prop[PERF_CORE_IB_FF].prop_name, &str);
+	cfg->perf.core_ib_ff = rc ? DEFAULT_CORE_IB_FF : str;
+	rc = of_property_read_string(np,
+			sde_perf_prop[PERF_CORE_CLK_FF].prop_name, &str);
+	cfg->perf.core_clk_ff = rc ? DEFAULT_CORE_CLK_FF : str;
+	rc = of_property_read_string(np,
+			sde_perf_prop[PERF_COMP_RATIO_RT].prop_name, &str);
+	cfg->perf.comp_ratio_rt = rc ? DEFAULT_COMP_RATIO_RT : str;
+	rc = of_property_read_string(np,
+			sde_perf_prop[PERF_COMP_RATIO_NRT].prop_name, &str);
+	cfg->perf.comp_ratio_nrt = rc ? DEFAULT_COMP_RATIO_NRT : str;
+	rc = 0;
+
+	cfg->perf.undersized_prefill_lines =
+			prop_exists[PERF_UNDERSIZED_PREFILL_LINES] ?
+			PROP_VALUE_ACCESS(prop_value,
+					PERF_UNDERSIZED_PREFILL_LINES, 0) :
+			DEFAULT_UNDERSIZED_PREFILL_LINES;
+	cfg->perf.xtra_prefill_lines =
+			prop_exists[PERF_XTRA_PREFILL_LINES] ?
+			PROP_VALUE_ACCESS(prop_value,
+					PERF_XTRA_PREFILL_LINES, 0) :
+			DEFAULT_XTRA_PREFILL_LINES;
+	cfg->perf.dest_scale_prefill_lines =
+			prop_exists[PERF_DEST_SCALE_PREFILL_LINES] ?
+			PROP_VALUE_ACCESS(prop_value,
+					PERF_DEST_SCALE_PREFILL_LINES, 0) :
+			DEFAULT_DEST_SCALE_PREFILL_LINES;
+	cfg->perf.macrotile_prefill_lines =
+			prop_exists[PERF_MACROTILE_PREFILL_LINES] ?
+			PROP_VALUE_ACCESS(prop_value,
+					PERF_MACROTILE_PREFILL_LINES, 0) :
+			DEFAULT_MACROTILE_PREFILL_LINES;
+	cfg->perf.yuv_nv12_prefill_lines =
+			prop_exists[PERF_YUV_NV12_PREFILL_LINES] ?
+			PROP_VALUE_ACCESS(prop_value,
+					PERF_YUV_NV12_PREFILL_LINES, 0) :
+			DEFAULT_YUV_NV12_PREFILL_LINES;
+	cfg->perf.linear_prefill_lines =
+			prop_exists[PERF_LINEAR_PREFILL_LINES] ?
+			PROP_VALUE_ACCESS(prop_value,
+					PERF_LINEAR_PREFILL_LINES, 0) :
+			DEFAULT_LINEAR_PREFILL_LINES;
+	cfg->perf.downscaling_prefill_lines =
+			prop_exists[PERF_DOWNSCALING_PREFILL_LINES] ?
+			PROP_VALUE_ACCESS(prop_value,
+					PERF_DOWNSCALING_PREFILL_LINES, 0) :
+			DEFAULT_DOWNSCALING_PREFILL_LINES;
+	cfg->perf.amortizable_threshold =
+			prop_exists[PERF_AMORTIZABLE_THRESHOLD] ?
+			PROP_VALUE_ACCESS(prop_value,
+					PERF_AMORTIZABLE_THRESHOLD, 0) :
+			DEFAULT_AMORTIZABLE_THRESHOLD;
 
 freeprop:
 	kfree(prop_value);
@@ -2311,7 +2414,7 @@
 			rc = -ENOMEM;
 			goto end;
 		}
-		index = _sde_copy_formats(sde_cfg->cursor_formats,
+		index = sde_copy_formats(sde_cfg->cursor_formats,
 			cursor_list_size, 0, cursor_formats,
 			ARRAY_SIZE(cursor_formats));
 	}
@@ -2352,34 +2455,34 @@
 		goto end;
 	}
 
-	index = _sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
+	index = sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
 		0, plane_formats, ARRAY_SIZE(plane_formats));
-	index += _sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
+	index += sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
 		index, rgb_10bit_formats,
 		ARRAY_SIZE(rgb_10bit_formats));
 
-	index = _sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
+	index = sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
 		0, plane_formats_yuv, ARRAY_SIZE(plane_formats_yuv));
-	index += _sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
+	index += sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
 		index, rgb_10bit_formats,
 		ARRAY_SIZE(rgb_10bit_formats));
-	index += _sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
+	index += sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
 		index, p010_formats, ARRAY_SIZE(p010_formats));
 	if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_400))
-		index += _sde_copy_formats(sde_cfg->vig_formats,
+		index += sde_copy_formats(sde_cfg->vig_formats,
 			vig_list_size, index, p010_ubwc_formats,
 			ARRAY_SIZE(p010_ubwc_formats));
 
-	index += _sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
+	index += sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
 		index, tp10_ubwc_formats,
 		ARRAY_SIZE(tp10_ubwc_formats));
 
-	index = _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
+	index = sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
 		0, wb2_formats, ARRAY_SIZE(wb2_formats));
-	index += _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
+	index += sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
 		index, rgb_10bit_formats,
 		ARRAY_SIZE(rgb_10bit_formats));
-	index += _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
+	index += sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
 		index, tp10_ubwc_formats,
 		ARRAY_SIZE(tp10_ubwc_formats));
 end:
@@ -2400,14 +2503,21 @@
 	case SDE_HW_VER_171:
 	case SDE_HW_VER_172:
 		/* update msm8996 target here */
+		sde_cfg->perf.min_prefill_lines = 21;
 		break;
 	case SDE_HW_VER_300:
 	case SDE_HW_VER_301:
+		/* update msm8998 target here */
+		sde_cfg->has_wb_ubwc = true;
+		sde_cfg->perf.min_prefill_lines = 25;
+		break;
 	case SDE_HW_VER_400:
 		/* update msm8998 and sdm845 target here */
 		sde_cfg->has_wb_ubwc = true;
+		sde_cfg->perf.min_prefill_lines = 24;
 		break;
 	default:
+		sde_cfg->perf.min_prefill_lines = 0xffff;
 		break;
 	}
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index b5f83ad..cfb1b67 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -361,6 +361,7 @@
  * @pcc_blk:
  * @igc_blk:
  * @format_list: Pointer to list of supported formats
+ * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
  */
 struct sde_sspp_sub_blks {
 	u32 maxlinewidth;
@@ -379,6 +380,7 @@
 	u32 maxhdeciexp; /* max decimation is 2^value */
 	u32 maxvdeciexp; /* max decimation is 2^value */
 	u32 smart_dma_priority;
+	u32 max_per_pipe_bw;
 	struct sde_src_blk src_blk;
 	struct sde_scaler_blk scaler_blk;
 	struct sde_pp_blk csc_blk;
@@ -687,10 +689,36 @@
  * struct sde_perf_cfg - performance control settings
  * @max_bw_low         low threshold of maximum bandwidth (kbps)
  * @max_bw_high        high threshold of maximum bandwidth (kbps)
+ * @core_ib_ff         core instantaneous bandwidth fudge factor
+ * @core_clk_ff        core clock fudge factor
+ * @comp_ratio_rt      string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @comp_ratio_nrt     string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @undersized_prefill_lines   undersized prefill in lines
+ * @xtra_prefill_lines         extra prefill latency in lines
+ * @dest_scale_prefill_lines   destination scaler latency in lines
+ * @macrotile_perfill_lines    macrotile latency in lines
+ * @yuv_nv12_prefill_lines     yuv_nv12 latency in lines
+ * @linear_prefill_lines       linear latency in lines
+ * @downscaling_prefill_lines  downscaling latency in lines
+ * @amortizable_theshold minimum y position for traffic shaping prefill
+ * @min_prefill_lines  minimum pipeline latency in lines
  */
 struct sde_perf_cfg {
 	u32 max_bw_low;
 	u32 max_bw_high;
+	const char *core_ib_ff;
+	const char *core_clk_ff;
+	const char *comp_ratio_rt;
+	const char *comp_ratio_nrt;
+	u32 undersized_prefill_lines;
+	u32 xtra_prefill_lines;
+	u32 dest_scale_prefill_lines;
+	u32 macrotile_prefill_lines;
+	u32 yuv_nv12_prefill_lines;
+	u32 linear_prefill_lines;
+	u32 downscaling_prefill_lines;
+	u32 amortizable_threshold;
+	u32 min_prefill_lines;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index cf54611..bd212e2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -34,6 +34,17 @@
 #define TRAFFIC_SHAPER_WR_CLIENT(num)     (0x060 + (num * 4))
 #define TRAFFIC_SHAPER_FIXPOINT_FACTOR    4
 
+#define MDP_WD_TIMER_0_CTL                0x380
+#define MDP_WD_TIMER_0_CTL2               0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE         0x388
+
+#define MDP_TICK_COUNT                    16
+#define XO_CLK_RATE                       19200
+#define MS_TICKS_IN_SEC                   1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+	((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
 #define DCE_SEL                           0x450
 
 static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
@@ -192,6 +203,39 @@
 	status->wb[WB_3] = 0;
 }
 
+static void sde_hw_setup_vsync_sel(struct sde_hw_mdp *mdp,
+		struct sde_watchdog_te_status *cfg, bool watchdog_te)
+{
+	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	u32 reg = 0;
+	int i = 0;
+	u32 pp_offset[] = {0xC, 0x8, 0x4, 0x13};
+
+	if (!mdp)
+		return;
+
+	reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
+	for (i = 0; i < cfg->pp_count; i++) {
+		if (watchdog_te)
+			reg |= 0xF << pp_offset[cfg->ppnumber[i] - 1];
+		else
+			reg &= ~(0xF << pp_offset[cfg->ppnumber[i] - 1]);
+	}
+
+	SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+	if (watchdog_te) {
+		SDE_REG_WRITE(c, MDP_WD_TIMER_0_LOAD_VALUE,
+				CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+		SDE_REG_WRITE(c, MDP_WD_TIMER_0_CTL, BIT(0)); /* clear timer */
+		reg = SDE_REG_READ(c, MDP_WD_TIMER_0_CTL2);
+		reg |= BIT(8);		/* enable heartbeat timer */
+		reg |= BIT(0);		/* enable WD timer */
+		SDE_REG_WRITE(c, MDP_WD_TIMER_0_CTL2, reg);
+	}
+}
+
 static void sde_hw_get_safe_status(struct sde_hw_mdp *mdp,
 		struct sde_danger_safe_status *status)
 {
@@ -261,6 +305,7 @@
 	ops->setup_cdm_output = sde_hw_setup_cdm_output;
 	ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
 	ops->get_danger_status = sde_hw_get_danger_status;
+	ops->setup_vsync_sel = sde_hw_setup_vsync_sel;
 	ops->get_safe_status = sde_hw_get_safe_status;
 	ops->setup_dce = sde_hw_setup_dce;
 	ops->reset_ubwc = sde_hw_reset_ubwc;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
index 7511358..9cb4494 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -77,6 +77,18 @@
 };
 
 /**
+ * struct sde_watchdog_te_status - configure watchdog timer to generate TE
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: base address of ping pong info
+ */
+struct sde_watchdog_te_status {
+	u32 pp_count;
+	u32 frame_rate;
+	u32 ppnumber[];
+};
+
+/**
  * struct sde_hw_mdp_ops - interface to the MDP TOP Hw driver functions
  * Assumption is these functions will be called after clocks are enabled.
  * @setup_split_pipe : Programs the pipe control registers
@@ -142,6 +154,15 @@
 			struct sde_danger_safe_status *status);
 
 	/**
+	 * setup_vsync_sel - get vsync configuration details
+	 * @mdp: mdp top context driver
+	 * @cfg: watchdog timer configuration
+	 * @watchdog_te: watchdog timer enable
+	 */
+	void (*setup_vsync_sel)(struct sde_hw_mdp *mdp,
+			struct sde_watchdog_te_status *cfg, bool watchdog_te);
+
+	/**
 	 * get_safe_status - get safe status
 	 * @mdp: mdp top context driver
 	 * @status: Pointer to danger safe status
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.c b/drivers/gpu/drm/msm/sde/sde_hw_util.c
index b899f0c..7df5736 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_util.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_util.c
@@ -91,3 +91,33 @@
 	SDE_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
 }
 
+/**
+ * _sde_copy_formats   - copy formats from src_list to dst_list
+ * @dst_list:          pointer to destination list where to copy formats
+ * @dst_list_size:     size of destination list
+ * @dst_list_pos:      starting position on the list where to copy formats
+ * @src_list:          pointer to source list where to copy formats from
+ * @src_list_size:     size of source list
+ * Return: number of elements populated
+ */
+uint32_t sde_copy_formats(
+		struct sde_format_extended *dst_list,
+		uint32_t dst_list_size,
+		uint32_t dst_list_pos,
+		const struct sde_format_extended *src_list,
+		uint32_t src_list_size)
+{
+	uint32_t cur_pos, i;
+
+	if (!dst_list || !src_list || (dst_list_pos >= (dst_list_size - 1)))
+		return 0;
+
+	for (i = 0, cur_pos = dst_list_pos;
+		(cur_pos < (dst_list_size - 1)) && (i < src_list_size)
+		&& src_list[i].fourcc_format; ++i, ++cur_pos)
+		dst_list[cur_pos] = src_list[i];
+
+	dst_list[cur_pos].fourcc_format = 0;
+
+	return i;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.h b/drivers/gpu/drm/msm/sde/sde_hw_util.h
index c1bfb79..8f469b2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_util.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_util.h
@@ -17,6 +17,8 @@
 #include <linux/slab.h>
 #include "sde_hw_mdss.h"
 
+struct sde_format_extended;
+
 /*
  * This is the common struct maintained by each sub block
  * for mapping the register offsets in this block to the
@@ -59,5 +61,11 @@
 		u32 csc_reg_off,
 		struct sde_csc_cfg *data, bool csc10);
 
-#endif /* _SDE_HW_UTIL_H */
+uint32_t sde_copy_formats(
+		struct sde_format_extended *dst_list,
+		uint32_t dst_list_size,
+		uint32_t dst_list_pos,
+		const struct sde_format_extended *src_list,
+		uint32_t src_list_size);
 
+#endif /* _SDE_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index c6cccbe..8cc196a 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -567,7 +567,8 @@
 		.set_backlight = dsi_display_set_backlight,
 		.soft_reset   = dsi_display_soft_reset,
 		.pre_kickoff  = dsi_conn_pre_kickoff,
-		.clk_ctrl = dsi_display_clk_ctrl
+		.clk_ctrl = dsi_display_clk_ctrl,
+		.get_topology = dsi_conn_get_topology
 	};
 	static const struct sde_connector_ops wb_ops = {
 		.post_init =    sde_wb_connector_post_init,
@@ -575,7 +576,8 @@
 		.get_modes =    sde_wb_connector_get_modes,
 		.set_property = sde_wb_connector_set_property,
 		.get_info =     sde_wb_get_info,
-		.soft_reset =   NULL
+		.soft_reset =   NULL,
+		.get_topology = sde_wb_get_topology
 	};
 	struct msm_display_info info;
 	struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index d38a6b9..1f56d73 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -325,11 +325,11 @@
  * sde_kms_info_add_keyint - add integer value to 'sde_kms_info'
  * @info: Pointer to sde_kms_info structure
  * @key: Pointer to key string
- * @value: Signed 32-bit integer value
+ * @value: Signed 64-bit integer value
  */
 void sde_kms_info_add_keyint(struct sde_kms_info *info,
 		const char *key,
-		int32_t value);
+		int64_t value);
 
 /**
  * sde_kms_info_add_keystr - add string value to 'sde_kms_info'
@@ -407,6 +407,14 @@
 		struct sde_rect *result);
 
 /**
+ * sde_kms_rect_merge_rectangles - merge a rectangle list into one rect
+ * @rois: pointer to the list of rois
+ * @result: output rectangle, all 0 on error
+ */
+void sde_kms_rect_merge_rectangles(const struct msm_roi_list *rois,
+		struct sde_rect *result);
+
+/**
  * sde_kms_rect_is_equal - compares two rects
  * @r1: rect value to compare
  * @r2: rect value to compare
diff --git a/drivers/gpu/drm/msm/sde/sde_kms_utils.c b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
index 30e12c9..dcc0bd5 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms_utils.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
@@ -24,14 +24,14 @@
 
 void sde_kms_info_add_keyint(struct sde_kms_info *info,
 		const char *key,
-		int32_t value)
+		int64_t value)
 {
 	uint32_t len;
 
 	if (info && key) {
 		len = snprintf(info->data + info->len,
 				SDE_KMS_INFO_MAX_SIZE - info->len,
-				"%s=%d\n",
+				"%s=%lld\n",
 				key,
 				value);
 
@@ -175,3 +175,46 @@
 		result->h = b - t;
 	}
 }
+
+void sde_kms_rect_merge_rectangles(const struct msm_roi_list *rois,
+		struct sde_rect *result)
+{
+	struct drm_clip_rect clip;
+	const struct drm_clip_rect *roi_rect;
+	int i;
+
+	if (!rois || !result)
+		return;
+
+	memset(result, 0, sizeof(*result));
+
+	/* init to invalid range maxes */
+	clip.x1 = ~0;
+	clip.y1 = ~0;
+	clip.x2 = 0;
+	clip.y2 = 0;
+
+	/* aggregate all clipping rectangles together for overall roi */
+	for (i = 0; i < rois->num_rects; i++) {
+		roi_rect = &rois->roi[i];
+
+		clip.x1 = min(clip.x1, roi_rect->x1);
+		clip.y1 = min(clip.y1, roi_rect->y1);
+		clip.x2 = max(clip.x2, roi_rect->x2);
+		clip.y2 = max(clip.y2, roi_rect->y2);
+
+		SDE_DEBUG("roi%d (%d,%d),(%d,%d) -> crtc (%d,%d),(%d,%d)\n", i,
+				roi_rect->x1, roi_rect->y1,
+				roi_rect->x2, roi_rect->y2,
+				clip.x1, clip.y1,
+				clip.x2, clip.y2);
+	}
+
+	if (clip.x2  && clip.y2) {
+		result->x = clip.x1;
+		result->y = clip.y1;
+		result->w = clip.x2 - clip.x1;
+		result->h = clip.y2 - clip.y1;
+	}
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index c408861..deca83e 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -2886,11 +2886,12 @@
 
 		/*
 		 * Check exclusion rect against src rect.
-		 * Cropping is not required as hardware will consider only the
-		 * intersecting region with the src rect.
+		 * it must intersect with source rect.
 		 */
 		sde_kms_rect_intersect(&src, &pstate->excl_rect, &intersect);
-		if (!intersect.w || !intersect.h || SDE_FORMAT_IS_YUV(fmt)) {
+		if (intersect.w != pstate->excl_rect.w ||
+				intersect.h != pstate->excl_rect.h ||
+				SDE_FORMAT_IS_YUV(fmt)) {
 			SDE_ERROR_PLANE(psde,
 				"invalid excl_rect:{%d,%d,%d,%d} src:{%d,%d,%d,%d}, fmt: %4.4s\n",
 				pstate->excl_rect.x, pstate->excl_rect.y,
@@ -3027,6 +3028,7 @@
 		{SDE_DRM_DEINTERLACE, "deinterlace"}
 	};
 	const struct sde_format_extended *format_list;
+	struct sde_format_extended *virt_format_list = NULL;
 	struct sde_kms_info *info;
 	struct sde_plane *psde = to_sde_plane(plane);
 	int zpos_max = 255;
@@ -3165,9 +3167,28 @@
 	format_list = psde->pipe_sblk->format_list;
 
 	if (master_plane_id) {
+		int index, array_size;
+
+		array_size = ARRAY_SIZE(plane_formats)
+					+ ARRAY_SIZE(rgb_10bit_formats);
+		virt_format_list = kcalloc(array_size,
+				sizeof(struct sde_format_extended), GFP_KERNEL);
+		if (!virt_format_list) {
+			SDE_ERROR(
+			"failed to allocate virtual pipe format list\n");
+			return;
+		}
+
+		index = sde_copy_formats(virt_format_list, array_size,
+				0, plane_formats, ARRAY_SIZE(plane_formats));
+		sde_copy_formats(virt_format_list, array_size,
+				index, rgb_10bit_formats,
+				ARRAY_SIZE(rgb_10bit_formats));
+
+		format_list = virt_format_list;
+
 		sde_kms_info_add_keyint(info, "primary_smart_plane_id",
-				master_plane_id);
-		format_list = plane_formats;
+						master_plane_id);
 	}
 
 	if (format_list) {
@@ -3191,10 +3212,13 @@
 			psde->pipe_sblk->maxhdeciexp);
 	sde_kms_info_add_keyint(info, "max_vertical_deci",
 			psde->pipe_sblk->maxvdeciexp);
+	sde_kms_info_add_keyint(info, "max_per_pipe_bw",
+			psde->pipe_sblk->max_per_pipe_bw * 1000LL);
 	msm_property_set_blob(&psde->property_info, &psde->blob_info,
 			info->data, info->len, PLANE_PROP_INFO);
 
 	kfree(info);
+	kfree(virt_format_list);
 
 	if (psde->features & BIT(SDE_SSPP_MEMCOLOR)) {
 		snprintf(feature_name, sizeof(feature_name), "%s%d",
@@ -3912,6 +3936,7 @@
 {
 	struct drm_plane *plane = NULL;
 	const struct sde_format_extended *format_list;
+	struct sde_format_extended *virt_format_list = NULL;
 	struct sde_plane *psde;
 	struct msm_drm_private *priv;
 	struct sde_kms *kms;
@@ -3986,8 +4011,28 @@
 
 	format_list = psde->pipe_sblk->format_list;
 
-	if (master_plane_id)
-		format_list = plane_formats;
+	if (master_plane_id) {
+		int index, array_size;
+
+		array_size = ARRAY_SIZE(plane_formats)
+					+ ARRAY_SIZE(rgb_10bit_formats);
+		virt_format_list = kcalloc(array_size,
+					sizeof(struct sde_format_extended),
+					GFP_KERNEL);
+		if (!virt_format_list) {
+			SDE_ERROR(
+			"failed to allocate virtual pipe format list\n");
+			goto clean_sspp;
+		}
+
+		index = sde_copy_formats(virt_format_list, array_size,
+				0, plane_formats, ARRAY_SIZE(plane_formats));
+		sde_copy_formats(virt_format_list, array_size,
+				index, rgb_10bit_formats,
+				ARRAY_SIZE(rgb_10bit_formats));
+
+		format_list = virt_format_list;
+	}
 
 	psde->nformats = sde_populate_formats(format_list,
 				psde->formats,
@@ -4038,5 +4083,6 @@
 clean_plane:
 	kfree(psde);
 exit:
+	kfree(virt_format_list);
 	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 66318b3..427a93b 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -32,22 +32,40 @@
 #define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK))
 #define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_CLEAR))
 #define RM_RQ_DSPP(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DSPP))
-#define RM_RQ_PPSPLIT(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_PPSPLIT))
-#define RM_RQ_FORCE_TILING(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_FORCE_TILING))
+#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
+				(t).num_comp_enc == (r).num_enc && \
+				(t).num_intf == (r).num_intf)
+
+struct sde_rm_topology_def {
+	enum sde_rm_topology_name top_name;
+	int num_lm;
+	int num_comp_enc;
+	int num_intf;
+	int num_ctl;
+	int needs_split_display;
+};
+
+static const struct sde_rm_topology_def g_top_table[] = {
+	{   SDE_RM_TOPOLOGY_NONE,                 0, 0, 0, 0, false },
+	{   SDE_RM_TOPOLOGY_SINGLEPIPE,           1, 0, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,       1, 1, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_DUALPIPE,             2, 0, 2, 2, true  },
+	{   SDE_RM_TOPOLOGY_DUALPIPE_DSC,         2, 2, 2, 2, true  },
+	{   SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,     2, 0, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC, 2, 1, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,    2, 2, 1, 1, false },
+	{   SDE_RM_TOPOLOGY_PPSPLIT,              1, 0, 2, 1, true  },
+};
 
 /**
  * struct sde_rm_requirements - Reservation requirements parameter bundle
- * @top_name:	DRM<->HW topology use case user is trying to enable
- * @dspp:	Whether the user requires a DSPP
- * @num_lm:	Number of layer mixers needed in the use case
- * @hw_res:	Hardware resources required as reported by the encoders
+ * @top_ctrl:  topology control preference from kernel client
+ * @top:       selected topology for the display
+ * @hw_res:	   Hardware resources required as reported by the encoders
  */
 struct sde_rm_requirements {
-	enum sde_rm_topology_name top_name;
 	uint64_t top_ctrl;
-	int num_lm;
-	int num_ctl;
-	bool needs_split_display;
+	const struct sde_rm_topology_def *topology;
 	struct sde_encoder_hw_resources hw_res;
 };
 
@@ -607,7 +625,7 @@
 	}
 
 	pp_cfg = (struct sde_pingpong_cfg *)((*pp)->catalog);
-	if ((reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) &&
+	if ((reqs->topology->top_name == SDE_RM_TOPOLOGY_PPSPLIT) &&
 			!(test_bit(SDE_PINGPONG_SPLIT, &pp_cfg->features))) {
 		SDE_DEBUG("pp %d doesn't support ppsplit\n", pp_cfg->id);
 		*dspp = NULL;
@@ -630,14 +648,15 @@
 	int lm_count = 0;
 	int i, rc = 0;
 
-	if (!reqs->num_lm) {
-		SDE_ERROR("invalid number of lm: %d\n", reqs->num_lm);
+	if (!reqs->topology->num_lm) {
+		SDE_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
 		return -EINVAL;
 	}
 
 	/* Find a primary mixer */
 	sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
-	while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_i)) {
+	while (lm_count != reqs->topology->num_lm &&
+			sde_rm_get_hw(rm, &iter_i)) {
 		memset(&lm, 0, sizeof(lm));
 		memset(&dspp, 0, sizeof(dspp));
 		memset(&pp, 0, sizeof(pp));
@@ -655,7 +674,8 @@
 		/* Valid primary mixer found, find matching peers */
 		sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
 
-		while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_j)) {
+		while (lm_count != reqs->topology->num_lm &&
+				sde_rm_get_hw(rm, &iter_j)) {
 			if (iter_i.blk == iter_j.blk)
 				continue;
 
@@ -669,7 +689,7 @@
 		}
 	}
 
-	if (lm_count != reqs->num_lm) {
+	if (lm_count != reqs->topology->num_lm) {
 		SDE_DEBUG("unable to find appropriate mixers\n");
 		return -ENAVAIL;
 	}
@@ -687,7 +707,7 @@
 				dspp[i] ? dspp[i]->id : 0);
 	}
 
-	if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) {
+	if (reqs->topology->top_name == SDE_RM_TOPOLOGY_PPSPLIT) {
 		/* reserve a free PINGPONG_SLAVE block */
 		rc = -ENAVAIL;
 		sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
@@ -713,7 +733,7 @@
 static int _sde_rm_reserve_ctls(
 		struct sde_rm *rm,
 		struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs)
+		const struct sde_rm_topology_def *top)
 {
 	struct sde_rm_hw_blk *ctls[MAX_BLOCKS];
 	struct sde_rm_hw_iter iter;
@@ -735,23 +755,23 @@
 
 		SDE_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, caps);
 
-		if (reqs->needs_split_display != has_split_display)
+		if (top->needs_split_display != has_split_display)
 			continue;
 
-		if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT && !has_ppsplit)
+		if (top->top_name == SDE_RM_TOPOLOGY_PPSPLIT && !has_ppsplit)
 			continue;
 
 		ctls[i] = iter.blk;
 		SDE_DEBUG("ctl %d match\n", iter.blk->id);
 
-		if (++i == reqs->num_ctl)
+		if (++i == top->num_ctl)
 			break;
 	}
 
-	if (i != reqs->num_ctl)
+	if (i != top->num_ctl)
 		return -ENAVAIL;
 
-	for (i = 0; i < ARRAY_SIZE(ctls) && i < reqs->num_ctl; i++) {
+	for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
 		ctls[i]->rsvp_nxt = rsvp;
 		SDE_EVT32(ctls[i]->type, rsvp->enc_id, ctls[i]->id);
 	}
@@ -762,13 +782,13 @@
 static int _sde_rm_reserve_dsc(
 		struct sde_rm *rm,
 		struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs)
+		const struct sde_rm_topology_def *top)
 {
 	struct sde_rm_hw_iter iter;
 	int alloc_count = 0;
-	int num_dsc_enc = reqs->num_lm;
+	int num_dsc_enc = top->num_lm;
 
-	if (!reqs->hw_res.needs_dsc)
+	if (!top->num_comp_enc)
 		return 0;
 
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSC);
@@ -912,11 +932,12 @@
 		struct sde_rm_requirements *reqs)
 {
 	int ret;
+	struct sde_rm_topology_def topology;
 
 	/* Create reservation info, tag reserved blocks with it as we go */
 	rsvp->seq = ++rm->rsvp_next_seq;
 	rsvp->enc_id = enc->base.id;
-	rsvp->topology = reqs->top_name;
+	rsvp->topology = reqs->topology->top_name;
 	list_add_tail(&rsvp->list, &rm->rsvps);
 
 	/*
@@ -941,10 +962,11 @@
 	 * - Check mixers without Split Display
 	 * - Only then allow to grab from CTLs with split display capability
 	 */
-	_sde_rm_reserve_ctls(rm, rsvp, reqs);
-	if (ret && !reqs->needs_split_display) {
-		reqs->needs_split_display = true;
-		_sde_rm_reserve_ctls(rm, rsvp, reqs);
+	_sde_rm_reserve_ctls(rm, rsvp, reqs->topology);
+	if (ret && !reqs->topology->needs_split_display) {
+		memcpy(&topology, reqs->topology, sizeof(topology));
+		topology.needs_split_display = true;
+		_sde_rm_reserve_ctls(rm, rsvp, &topology);
 	}
 	if (ret) {
 		SDE_ERROR("unable to find appropriate CTL\n");
@@ -956,7 +978,7 @@
 	if (ret)
 		return ret;
 
-	ret = _sde_rm_reserve_dsc(rm, rsvp, reqs);
+	ret = _sde_rm_reserve_dsc(rm, rsvp, reqs->topology);
 	if (ret)
 		return ret;
 
@@ -971,37 +993,7 @@
 		struct sde_rm_requirements *reqs)
 {
 	const struct drm_display_mode *mode = &crtc_state->mode;
-
-	/**
-	 * DRM<->HW Topologies
-	 *
-	 * Name: SINGLEPIPE
-	 * Description: 1 LM, 1 PP, 1 INTF
-	 * Condition: 1 DRM Encoder w/ 1 Display Tiles (Default)
-	 *
-	 * Name: DUALPIPE
-	 * Description: 2 LM, 2 PP, 2 INTF
-	 * Condition: 1 DRM Encoder w/ 2 Display Tiles
-	 *
-	 * Name: PPSPLIT
-	 * Description: 1 LM, 1 PP + 1 Slave PP, 2 INTF
-	 * Condition:
-	 *	1 DRM Encoder w/ 2 Display Tiles
-	 *	topology_control & SDE_TOPREQ_PPSPLIT
-	 *
-	 * Name: DUALPIPEMERGE
-	 * Description: 2 LM, 2 PP, 3DMux, 1 INTF
-	 * Condition:
-	 *	1 DRM Encoder w/ 1 Display Tiles
-	 *	display_info.max_width >= layer_mixer.max_width
-	 *
-	 * Name: DUALPIPEMERGE
-	 * Description: 2 LM, 2 PP, 3DMux, 1 INTF
-	 * Condition:
-	 *	1 DRM Encoder w/ 1 Display Tiles
-	 *	display_info.max_width <= layer_mixer.max_width
-	 *	topology_control & SDE_TOPREQ_FORCE_TILING
-	 */
+	int i;
 
 	memset(reqs, 0, sizeof(*reqs));
 
@@ -1009,63 +1001,32 @@
 			CONNECTOR_PROP_TOPOLOGY_CONTROL);
 	sde_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
 
-	/* DSC blocks are hardwired for control path 0 and 1 */
-	if (reqs->hw_res.needs_dsc)
-		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
-
-	/* Base assumption is LMs = h_tiles, conditions below may override */
-	reqs->num_lm = reqs->hw_res.display_num_of_h_tiles;
-
-	if (reqs->num_lm == 2) {
-		if (RM_RQ_PPSPLIT(reqs)) {
-			/* user requests serving dual display with 1 lm */
-			reqs->top_name = SDE_RM_TOPOLOGY_PPSPLIT;
-			reqs->num_lm = 1;
-			reqs->num_ctl = 1;
-			reqs->needs_split_display = true;
-		} else {
-			/* dual display, serve with 2 lms */
-			reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPE;
-			reqs->num_ctl = 2;
-			reqs->needs_split_display = true;
+	for (i = 0; i < SDE_RM_TOPOLOGY_MAX; i++) {
+		if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
+					reqs->hw_res.topology)) {
+			reqs->topology = &g_top_table[i];
+			break;
 		}
+	}
 
-	} else if (reqs->num_lm == 1) {
-		if (mode->hdisplay > rm->lm_max_width) {
-			/* wide display, must split across 2 lm and merge */
-			reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
-			reqs->num_lm = 2;
-			reqs->num_ctl = 1;
-			reqs->needs_split_display = false;
-		} else if (RM_RQ_FORCE_TILING(reqs)) {
-			/* thin display, but user requests 2 lm and merge */
-			reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
-			reqs->num_lm = 2;
-			reqs->num_ctl = 1;
-			reqs->needs_split_display = false;
-		} else {
-			/* thin display, serve with only 1 lm */
-			reqs->top_name = SDE_RM_TOPOLOGY_SINGLEPIPE;
-			reqs->num_ctl = 1;
-			reqs->needs_split_display = false;
-		}
-
-	} else {
-		/* Currently no configurations with # LM > 2 */
-		SDE_ERROR("unsupported # of mixers %d\n", reqs->num_lm);
+	if (!reqs->topology) {
+		SDE_ERROR("invalid topology for the display\n");
 		return -EINVAL;
 	}
 
-	SDE_DEBUG("top_ctrl 0x%llX num_h_tiles %d\n", reqs->top_ctrl,
+	/* DSC blocks are hardwired for control path 0 and 1 */
+	if (reqs->topology->num_comp_enc)
+		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
+
+	SDE_DEBUG("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
 			reqs->hw_res.display_num_of_h_tiles);
-	SDE_DEBUG("display_max_width %d rm->lm_max_width %d\n",
-			mode->hdisplay, rm->lm_max_width);
-	SDE_DEBUG("num_lm %d num_ctl %d topology_name %d\n", reqs->num_lm,
-			reqs->num_ctl, reqs->top_name);
-	SDE_DEBUG("num_lm %d topology_name %d\n", reqs->num_lm,
-			reqs->top_name);
-	SDE_EVT32(mode->hdisplay, rm->lm_max_width, reqs->num_lm,
-			reqs->top_ctrl, reqs->top_name, reqs->num_ctl);
+	SDE_DEBUG("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
+			reqs->topology->num_lm, reqs->topology->num_ctl,
+			reqs->topology->top_name,
+			reqs->topology->needs_split_display);
+	SDE_EVT32(mode->hdisplay, rm->lm_max_width, reqs->topology->num_lm,
+			reqs->top_ctrl, reqs->topology->top_name,
+			reqs->topology->num_ctl);
 
 	return 0;
 }
@@ -1189,7 +1150,7 @@
 				sde_connector_get_propinfo(conn),
 				sde_connector_get_property_values(conn->state),
 				CONNECTOR_PROP_TOPOLOGY_NAME,
-				SDE_RM_TOPOLOGY_UNKNOWN);
+				SDE_RM_TOPOLOGY_NONE);
 	}
 }
 
@@ -1233,17 +1194,6 @@
 	return ret;
 }
 
-int sde_rm_check_property_topctl(uint64_t val)
-{
-	if ((BIT(SDE_RM_TOPCTL_FORCE_TILING) & val) &&
-			(BIT(SDE_RM_TOPCTL_PPSPLIT) & val)) {
-		SDE_ERROR("ppsplit & force_tiling are incompatible\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 int sde_rm_reserve(
 		struct sde_rm *rm,
 		struct drm_encoder *enc,
@@ -1310,7 +1260,7 @@
 						conn_state->connector),
 				sde_connector_get_property_values(conn_state),
 				CONNECTOR_PROP_TOPOLOGY_NAME,
-				SDE_RM_TOPOLOGY_UNKNOWN);
+				SDE_RM_TOPOLOGY_NONE);
 	}
 
 	/* Check the proposed reservation, store it in hw's "next" field */
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 4127bc2..059952a 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -22,18 +22,27 @@
 
 /**
  * enum sde_rm_topology_name - HW resource use case in use by connector
- * @SDE_RM_TOPOLOGY_UNKNOWN: No topology in use currently
- * @SDE_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
- * @SDE_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
- * @SDE_RM_TOPOLOGY_PPSPLIT: 1 LM, 2 PPs, 2 INTF/WB
- * @SDE_RM_TOPOLOGY_DUALPIPEMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_NONE:                 No topology in use currently
+ * @SDE_RM_TOPOLOGY_SINGLEPIPE:           1 LM, 1 PP, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_SINGLEPIPE_DSC:       1 LM, 1 DSC, 1 PP, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE:             2 LM, 2 PP, 2 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE_DSC:         2 LM, 2 DSC, 2 PP, 2 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE:     2 LM, 2 PP, 3DMux, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC: 2 LM, 2 PP, 3DMux, 1 DSC, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE:    2 LM, 2 PP, 2 DSC Merge, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_PPSPLIT:              1 LM, 2 PPs, 2 INTF/WB
  */
 enum sde_rm_topology_name {
-	SDE_RM_TOPOLOGY_UNKNOWN = 0,
+	SDE_RM_TOPOLOGY_NONE = 0,
 	SDE_RM_TOPOLOGY_SINGLEPIPE,
+	SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,
 	SDE_RM_TOPOLOGY_DUALPIPE,
+	SDE_RM_TOPOLOGY_DUALPIPE_DSC,
+	SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,
+	SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC,
+	SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,
 	SDE_RM_TOPOLOGY_PPSPLIT,
-	SDE_RM_TOPOLOGY_DUALPIPEMERGE,
+	SDE_RM_TOPOLOGY_MAX,
 };
 
 /**
@@ -47,18 +56,11 @@
  *                               Normal behavior would not impact the
  *                               reservation list during the AtomicTest phase.
  * @SDE_RM_TOPCTL_DSPP: Require layer mixers with DSPP capabilities
- * @SDE_RM_TOPCTL_FORCE_TILING: Require kernel to split across multiple layer
- *                              mixers, despite width fitting within capability
- *                              of a single layer mixer.
- * @SDE_RM_TOPCTL_PPSPLIT: Require kernel to use pingpong split pipe
- *                         configuration instead of dual pipe.
  */
 enum sde_rm_topology_control {
 	SDE_RM_TOPCTL_RESERVE_LOCK,
 	SDE_RM_TOPCTL_RESERVE_CLEAR,
 	SDE_RM_TOPCTL_DSPP,
-	SDE_RM_TOPCTL_FORCE_TILING,
-	SDE_RM_TOPCTL_PPSPLIT,
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index d753f0a..da68139 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -16,9 +16,9 @@
 
 #define MAX_CLIENT_NAME_LEN 128
 
-#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	2000000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	2000000
 #define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
-#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	2000000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	2000000
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
 
 #include <linux/sde_io_util.h>
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 3413ee7..50710cd 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -416,6 +416,11 @@
 	if (config)
 		sde_rsc_timer_calculate(rsc, config);
 
+	if (rsc->current_state == SDE_RSC_CMD_STATE) {
+		rc = 0;
+		goto vsync_wait;
+	}
+
 	/* any one client in video state blocks the cmd state switch */
 	list_for_each_entry(client, &rsc->client_list, list)
 		if (client->current_state == SDE_RSC_VID_STATE)
@@ -427,8 +432,10 @@
 			rpmh_mode_solver_set(rsc->disp_rsc, true);
 	}
 
-	/* wait for vsync for vid to cmd state switch */
-	if (!rc && (rsc->current_state == SDE_RSC_VID_STATE))
+vsync_wait:
+	/* wait for vsync for vid to cmd state switch and config update */
+	if (!rc && (rsc->current_state == SDE_RSC_VID_STATE ||
+			rsc->current_state == SDE_RSC_CMD_STATE))
 		drm_wait_one_vblank(rsc->master_drm,
 						rsc->primary_client->crtc_id);
 end:
@@ -470,6 +477,10 @@
 	if (config && (caller_client == rsc->primary_client))
 		sde_rsc_timer_calculate(rsc, config);
 
+	/* early exit without vsync wait for vid state */
+	if (rsc->current_state == SDE_RSC_VID_STATE)
+		goto end;
+
 	/* video state switch should be done immediately */
 	if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_VID_STATE);
@@ -482,6 +493,8 @@
 			(rsc->current_state == SDE_RSC_CMD_STATE))
 		drm_wait_one_vblank(rsc->master_drm,
 						rsc->primary_client->crtc_id);
+
+end:
 	return rc;
 }
 
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 81df309..7fd496f 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -66,7 +66,9 @@
 #define GAM_GDP_ALPHARANGE_255  BIT(5)
 #define GAM_GDP_AGC_FULL_RANGE  0x00808080
 #define GAM_GDP_PPT_IGNORE      (BIT(1) | BIT(0))
-#define GAM_GDP_SIZE_MAX        0x7FF
+
+#define GAM_GDP_SIZE_MAX_WIDTH  3840
+#define GAM_GDP_SIZE_MAX_HEIGHT 2160
 
 #define GDP_NODE_NB_BANK        2
 #define GDP_NODE_PER_FIELD      2
@@ -633,8 +635,8 @@
 	/* src_x are in 16.16 format */
 	src_x = state->src_x >> 16;
 	src_y = state->src_y >> 16;
-	src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
-	src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
+	src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
+	src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
 
 	format = sti_gdp_fourcc2format(fb->pixel_format);
 	if (format == -1) {
@@ -732,8 +734,8 @@
 	/* src_x are in 16.16 format */
 	src_x = state->src_x >> 16;
 	src_y = state->src_y >> 16;
-	src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
-	src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
+	src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
+	src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
 
 	list = sti_gdp_get_free_nodes(gdp);
 	top_field = list->top_field;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a6ed9d5..750733a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -66,8 +66,11 @@
 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
 			goto out_unlock;
 
+		ttm_bo_reference(bo);
 		up_read(&vma->vm_mm->mmap_sem);
 		(void) fence_wait(bo->moving, true);
+		ttm_bo_unreserve(bo);
+		ttm_bo_unref(&bo);
 		goto out_unlock;
 	}
 
@@ -120,8 +123,10 @@
 
 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+				ttm_bo_reference(bo);
 				up_read(&vma->vm_mm->mmap_sem);
 				(void) ttm_bo_wait_unreserved(bo);
+				ttm_bo_unref(&bo);
 			}
 
 			return VM_FAULT_RETRY;
@@ -166,6 +171,13 @@
 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
 	if (unlikely(ret != 0)) {
 		retval = ret;
+
+		if (retval == VM_FAULT_RETRY &&
+		    !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+			/* The BO has already been unreserved. */
+			return retval;
+		}
+
 		goto out_unlock;
 	}
 
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 14a19a4..e5cfd69 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -381,6 +381,7 @@
 #define A6XX_RBBM_PERFCTR_RBBM_SEL_1             0x508
 #define A6XX_RBBM_PERFCTR_RBBM_SEL_2             0x509
 #define A6XX_RBBM_PERFCTR_RBBM_SEL_3             0x50A
+#define A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED        0x50B
 
 #define A6XX_RBBM_ISDB_CNT                       0x533
 
@@ -710,6 +711,7 @@
 #define A6XX_VBIF_VERSION                       0x3000
 #define A6XX_VBIF_GATE_OFF_WRREQ_EN             0x302A
 #define A6XX_VBIF_XIN_HALT_CTRL0                0x3080
+#define A6XX_VBIF_XIN_HALT_CTRL0_MASK           0xF
 #define A6XX_VBIF_XIN_HALT_CTRL1                0x3081
 #define A6XX_VBIF_PERF_CNT_SEL0                 0x30d0
 #define A6XX_VBIF_PERF_CNT_SEL1                 0x30d1
@@ -798,7 +800,7 @@
 #define A6XX_GMU_GPU_NAP_CTRL			0x1F8E4
 #define A6XX_GMU_RPMH_CTRL			0x1F8E8
 #define A6XX_GMU_RPMH_HYST_CTRL			0x1F8E9
-#define A6XX_GMU_RPMH_POWER_STATE		0x1F8EC
+#define A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE    0x1F8EC
 #define A6XX_GMU_BOOT_KMD_LM_HANDSHAKE		0x1F9F0
 
 /* HFI registers*/
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 68d7653..bf3a91a 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -609,6 +609,14 @@
 	/* Ensure this increment is done before the IRQ status is updated */
 	smp_mb__after_atomic();
 
+	/*
+	 * On A6xx, the GPU can power down once the INT_0_STATUS is read
+	 * below. But there still might be some register reads required
+	 * so force the GMU/GPU into KEEPALIVE mode until done with the ISR.
+	 */
+	if (gpudev->gpu_keepalive)
+		gpudev->gpu_keepalive(adreno_dev, true);
+
 	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
 
 	/*
@@ -647,6 +655,13 @@
 		adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD,
 				int_bit);
 
+	/* Turn off the KEEPALIVE vote from earlier unless hard fault set */
+	if (gpudev->gpu_keepalive) {
+		/* If hard fault, then let snapshot turn off the keepalive */
+		if (!(adreno_gpu_fault(adreno_dev) & ADRENO_HARD_FAULT))
+			gpudev->gpu_keepalive(adreno_dev, false);
+	}
+
 	/* Make sure the regwrites are done before the decrement */
 	smp_mb__before_atomic();
 	atomic_dec(&adreno_dev->pending_irq_refcnt);
@@ -1251,9 +1266,12 @@
 		return ret;
 
 	/* Put the GPU in a responsive state */
-	ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
-	if (ret)
-		return ret;
+	if (ADRENO_GPUREV(adreno_dev) < 600) {
+		/* No need for newer generation architectures */
+		ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
+		if (ret)
+			return ret;
+	}
 
 	ret = adreno_iommu_init(adreno_dev);
 	if (ret)
@@ -1263,7 +1281,8 @@
 	adreno_fault_detect_init(adreno_dev);
 
 	/* Power down the device */
-	kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
+	if (ADRENO_GPUREV(adreno_dev) < 600)
+		kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
 
 	if (gpudev->init != NULL)
 		gpudev->init(adreno_dev);
@@ -1329,6 +1348,9 @@
 {
 	int i;
 
+	if (kgsl_gmu_isenabled(device))
+		return false;
+
 	for (i = 0; i < KGSL_MAX_REGULATORS; i++) {
 		struct kgsl_regulator *regulator =
 			&device->pwrctrl.regulators[i];
@@ -1403,7 +1425,7 @@
 	}
 
 	/* GPU comes up in secured mode, make it unsecured by default */
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
+	if (adreno_dev->zap_loaded)
 		ret = adreno_switch_to_unsecure_mode(adreno_dev, rb);
 	else
 		adreno_writereg(adreno_dev,
@@ -1466,6 +1488,15 @@
 		goto error_mmu_off;
 	}
 
+	/* Send OOB request to turn on the GX */
+	if (gpudev->oob_set) {
+		status = gpudev->oob_set(adreno_dev, OOB_GPUSTART_SET_MASK,
+				OOB_GPUSTART_CHECK_MASK,
+				OOB_GPUSTART_CLEAR_MASK);
+		if (status)
+			goto error_mmu_off;
+	}
+
 	/* Enable 64 bit gpu addr if feature is set */
 	if (gpudev->enable_64bit &&
 			adreno_support_64bit(adreno_dev))
@@ -1547,7 +1578,7 @@
 
 	status = adreno_ringbuffer_start(adreno_dev, ADRENO_START_COLD);
 	if (status)
-		goto error_mmu_off;
+		goto error_oob_clear;
 
 	/* Start the dispatcher */
 	adreno_dispatcher_start(device);
@@ -1560,8 +1591,16 @@
 		pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
 				pmqos_active_vote);
 
+	/* Send OOB request to allow IFPC */
+	if (gpudev->oob_clear)
+		gpudev->oob_clear(adreno_dev, OOB_GPUSTART_CLEAR_MASK);
+
 	return 0;
 
+error_oob_clear:
+	if (gpudev->oob_clear)
+		gpudev->oob_clear(adreno_dev, OOB_GPUSTART_CLEAR_MASK);
+
 error_mmu_off:
 	kgsl_mmu_stop(&device->mmu);
 
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 530529f..78cecd0 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -23,6 +23,7 @@
 #include "adreno_perfcounter.h"
 #include <linux/stat.h>
 #include <linux/delay.h>
+#include "kgsl_gmu.h"
 
 #include "a4xx_reg.h"
 
@@ -410,6 +411,7 @@
  * @gpu_llc_slice_enable: To enable the GPU system cache slice or not
  * @gpuhtw_llc_slice: GPU pagetables system cache slice descriptor
  * @gpuhtw_llc_slice_enable: To enable the GPUHTW system cache slice or not
+ * @zap_loaded: Used to track if zap was successfully loaded or not
  */
 struct adreno_device {
 	struct kgsl_device dev;    /* Must be first field in this struct */
@@ -473,6 +475,7 @@
 	bool gpu_llc_slice_enable;
 	void *gpuhtw_llc_slice;
 	bool gpuhtw_llc_slice_enable;
+	unsigned int zap_loaded;
 };
 
 /**
@@ -853,6 +856,8 @@
 				unsigned int clear_mask);
 	void (*oob_clear)(struct adreno_device *adreno_dev,
 				unsigned int clear_mask);
+	void (*gpu_keepalive)(struct adreno_device *adreno_dev,
+			bool state);
 	int (*rpmh_gpu_pwrctrl)(struct adreno_device *, unsigned int ops,
 				unsigned int arg1, unsigned int arg2);
 	bool (*hw_isidle)(struct adreno_device *);
@@ -1230,7 +1235,7 @@
 		kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
 				gpudev->reg_offsets->offsets[offset_name], val);
 	else
-		*val = 0xDEADBEEF;
+		*val = 0;
 }
 
 /*
@@ -1669,4 +1674,37 @@
 	*counter = val;
 	return ret;
 }
+
+static inline int adreno_perfcntr_active_oob_get(
+		struct adreno_device *adreno_dev)
+{
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	int ret;
+
+	ret = kgsl_active_count_get(KGSL_DEVICE(adreno_dev));
+	if (ret)
+		return ret;
+
+	if (gpudev->oob_set) {
+		ret = gpudev->oob_set(adreno_dev, OOB_PERFCNTR_SET_MASK,
+				OOB_PERFCNTR_CHECK_MASK,
+				OOB_PERFCNTR_CLEAR_MASK);
+		if (ret)
+			kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
+	}
+
+	return ret;
+}
+
+static inline void adreno_perfcntr_active_oob_put(
+		struct adreno_device *adreno_dev)
+{
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+
+	if (gpudev->oob_clear)
+		gpudev->oob_clear(adreno_dev, OOB_PERFCNTR_CLEAR_MASK);
+
+	kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
+}
+
 #endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 1e95e38..6c8b677 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -30,7 +30,6 @@
 #include "kgsl_trace.h"
 #include "adreno_a5xx_packets.h"
 
-static int zap_ucode_loaded;
 static int critical_packet_constructed;
 
 static struct kgsl_memdesc crit_pkts;
@@ -2179,7 +2178,7 @@
 	 * appropriate register,
 	 * skip if retention is supported for the CPZ register
 	 */
-	if (zap_ucode_loaded && !(ADRENO_FEATURE(adreno_dev,
+	if (adreno_dev->zap_loaded && !(ADRENO_FEATURE(adreno_dev,
 		ADRENO_CPZ_RETENTION))) {
 		int ret;
 		struct scm_desc desc = {0};
@@ -2197,14 +2196,13 @@
 	}
 
 	/* Load the zap shader firmware through PIL if its available */
-	if (adreno_dev->gpucore->zap_name && !zap_ucode_loaded) {
+	if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
 		ptr = subsystem_get(adreno_dev->gpucore->zap_name);
 
 		/* Return error if the zap shader cannot be loaded */
 		if (IS_ERR_OR_NULL(ptr))
 			return (ptr == NULL) ? -ENODEV : PTR_ERR(ptr);
-
-		zap_ucode_loaded = 1;
+		adreno_dev->zap_loaded = 1;
 	}
 
 	return 0;
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 585beb9..d04ddb0 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -217,11 +217,13 @@
 static void a6xx_platform_setup(struct adreno_device *adreno_dev)
 {
 	uint64_t addr;
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 
 	/* Calculate SP local and private mem addresses */
 	addr = ALIGN(ADRENO_UCHE_GMEM_BASE + adreno_dev->gmem_size, SZ_64K);
 	adreno_dev->sp_local_gpuaddr = addr;
 	adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
+	gpudev->vbif_xin_halt_ctrl0_mask = A6XX_VBIF_XIN_HALT_CTRL0_MASK;
 }
 
 static void _update_always_on_regs(struct adreno_device *adreno_dev)
@@ -383,6 +385,10 @@
 
 	adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
 			ARRAY_SIZE(a6xx_vbif_platforms));
+
+	/* Make all blocks contribute to the GPU BUSY perf counter */
+	kgsl_regwrite(device, A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
 	/*
 	 * Set UCHE_WRITE_THRU_BASE to the UCHE_TRAP_BASE effectively
 	 * disabling L2 bypass
@@ -490,7 +496,7 @@
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
 	uint64_t gpuaddr;
-	static void *zap;
+	void *zap;
 	int ret = 0;
 
 	gpuaddr = fw->memdesc.gpuaddr;
@@ -500,14 +506,15 @@
 				upper_32_bits(gpuaddr));
 
 	/* Load the zap shader firmware through PIL if its available */
-	if (adreno_dev->gpucore->zap_name && !zap) {
+	if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
 		zap = subsystem_get(adreno_dev->gpucore->zap_name);
 
 		/* Return error if the zap shader cannot be loaded */
 		if (IS_ERR_OR_NULL(zap)) {
 			ret = (zap == NULL) ? -ENODEV : PTR_ERR(zap);
 			zap = NULL;
-		}
+		} else
+			adreno_dev->zap_loaded = 1;
 	}
 
 	return ret;
@@ -827,6 +834,10 @@
 	struct gmu_device *gmu = &device->gmu;
 
 	/* Configure registers for idle setting. The setting is cumulative */
+
+	kgsl_gmu_regwrite(device,
+		A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,  0x9C40400);
+
 	switch (gmu->idle_level) {
 	case GPU_HW_MIN_VOLT:
 		kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
@@ -977,6 +988,18 @@
 	trace_kgsl_gmu_oob_clear(clear_mask);
 }
 
+/*
+ * a6xx_gpu_keepalive() - GMU reg write to request GPU stays on
+ * @adreno_dev: Pointer to the adreno device that has the GMU
+ * @state: State to set: true is ON, false is OFF
+ */
+static inline void a6xx_gpu_keepalive(struct adreno_device *adreno_dev,
+		bool state)
+{
+	adreno_write_gmureg(adreno_dev,
+			ADRENO_REG_GMU_PWR_COL_KEEPALIVE, state);
+}
+
 #define SPTPRAC_POWERON_CTRL_MASK	0x00778000
 #define SPTPRAC_POWEROFF_CTRL_MASK	0x00778001
 #define SPTPRAC_POWEROFF_STATUS_MASK	BIT(2)
@@ -1214,10 +1237,12 @@
 	if (ret)
 		dev_err(&gmu->pdev->dev, "OOB set for slumber timed out\n");
 	else {
-		kgsl_gmu_regread(device, A6XX_GMU_RPMH_POWER_STATE, &state);
+		kgsl_gmu_regread(device,
+			A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &state);
 		if (state != GPU_HW_SLUMBER) {
 			dev_err(&gmu->pdev->dev,
-					"Failed to prepare for slumber\n");
+					"Failed to prepare for slumber: 0x%x\n",
+					state);
 			ret = -EINVAL;
 		}
 	}
@@ -1231,34 +1256,33 @@
 	struct device *dev = &gmu->pdev->dev;
 	int ret = 0;
 
-	if (device->state != KGSL_STATE_INIT &&
-		device->state != KGSL_STATE_SUSPEND) {
-		/* RSC wake sequence */
-		kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
+	/* RSC wake sequence */
+	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
 
-		/* Write request before polling */
-		wmb();
+	/* Write request before polling */
+	wmb();
 
-		if (timed_poll_check(device,
-				A6XX_GMU_RSCC_CONTROL_ACK,
-				BIT(1),
-				GPU_START_TIMEOUT,
-				BIT(1))) {
-			dev_err(dev, "Failed to do GPU RSC power on\n");
-			return -EINVAL;
-		}
-
-		if (timed_poll_check(device,
-				A6XX_RSCC_SEQ_BUSY_DRV0,
-				0,
-				GPU_START_TIMEOUT,
-				0xFFFFFFFF))
-			goto error_rsc;
-
-		/* Turn on the HM and SPTP head switches */
-		ret = a6xx_hm_sptprac_control(device, true);
+	if (timed_poll_check(device,
+			A6XX_GMU_RSCC_CONTROL_ACK,
+			BIT(1),
+			GPU_START_TIMEOUT,
+			BIT(1))) {
+		dev_err(dev, "Failed to do GPU RSC power on\n");
+		return -EINVAL;
 	}
 
+	if (timed_poll_check(device,
+			A6XX_RSCC_SEQ_BUSY_DRV0,
+			0,
+			GPU_START_TIMEOUT,
+			0xFFFFFFFF))
+		goto error_rsc;
+
+	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+	/* Turn on the HM and SPTP head switches */
+	ret = a6xx_hm_sptprac_control(device, true);
+
 	return ret;
 
 error_rsc:
@@ -1293,6 +1317,7 @@
 			&val);
 	kgsl_gmu_regread(device, A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0,
 			&val);
+	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);
 
@@ -1315,16 +1340,21 @@
 	int ret, i;
 
 	if (boot_state == GMU_COLD_BOOT || boot_state == GMU_RESET) {
-		/* Turn on the HM and SPTP head switches */
-		ret = a6xx_hm_sptprac_control(device, true);
-		if (ret)
-			return ret;
 
 		/* Turn on TCM retention */
 		kgsl_gmu_regwrite(device, A6XX_GMU_GENERAL_7, 1);
 
-		if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags))
+		if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags)) {
 			_load_gmu_rpmh_ucode(device);
+			/* Turn on the HM and SPTP head switches */
+			ret = a6xx_hm_sptprac_control(device, true);
+			if (ret)
+				return ret;
+		} else {
+			ret = a6xx_rpmh_power_on_gpu(device);
+			if (ret)
+				return ret;
+		}
 
 		if (gmu->load_mode == TCM_BOOT) {
 			/* Load GMU image via AHB bus */
@@ -1482,7 +1512,9 @@
 
 	kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
 		A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg);
-	return ((~reg & GPUBUSYIGNAHB) != 0);
+	if (reg & GPUBUSYIGNAHB)
+		return false;
+	return true;
 }
 
 static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
@@ -1490,9 +1522,6 @@
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct gmu_device *gmu = &device->gmu;
 
-	/* TODO: Remove this register write when firmware is updated */
-	kgsl_gmu_regwrite(device, A6XX_GMU_CM3_FW_BUSY, 0);
-
 	if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
 			0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
 		dev_err(&gmu->pdev->dev, "GMU is not idling\n");
@@ -2224,6 +2253,10 @@
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
 				A6XX_RBBM_PERFCTR_LOAD_VALUE_HI),
 	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION, A6XX_VBIF_VERSION),
+	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
+				A6XX_VBIF_XIN_HALT_CTRL0),
+	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
+				A6XX_VBIF_XIN_HALT_CTRL1),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
 				A6XX_GMU_ALWAYS_ON_COUNTER_L),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
@@ -2247,7 +2280,7 @@
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_SFR_ADDR,
 				A6XX_GMU_HFI_SFR_ADDR),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_RPMH_POWER_STATE,
-				A6XX_GMU_RPMH_POWER_STATE),
+				A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
 				A6XX_GMU_GMU2HOST_INTR_CLR),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
@@ -2299,6 +2332,7 @@
 	.llc_enable_overrides = a6xx_llc_enable_overrides,
 	.oob_set = a6xx_oob_set,
 	.oob_clear = a6xx_oob_clear,
+	.gpu_keepalive = a6xx_gpu_keepalive,
 	.rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
 	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
 	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 01ecb01..63dbde0 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -180,6 +180,7 @@
 	unsigned int statetype;
 	const unsigned int *regs;
 	unsigned int num_sets;
+	unsigned int offset;
 } a6xx_non_ctx_dbgahb[] = {
 	{ 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
 		ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
@@ -735,10 +736,8 @@
 	return data_size + sizeof(*header);
 }
 
-
-
-static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
-				size_t remain, void *priv)
+static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
+				u8 *buf, size_t remain, void *priv)
 {
 	struct kgsl_snapshot_regs *header =
 				(struct kgsl_snapshot_regs *)buf;
@@ -783,6 +782,57 @@
 	return (count * 8) + sizeof(*header);
 }
 
+static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
+				size_t remain, void *priv)
+{
+	struct kgsl_snapshot_regs *header =
+				(struct kgsl_snapshot_regs *)buf;
+	struct a6xx_non_ctx_dbgahb_registers *regs =
+				(struct a6xx_non_ctx_dbgahb_registers *)priv;
+	unsigned int count = 0;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	unsigned int i, k;
+	unsigned int *src;
+
+	if (crash_dump_valid == false)
+		return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
+				regs);
+
+	if (remain < sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+		return 0;
+	}
+
+	remain -= sizeof(*header);
+
+	src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
+
+	for (i = 0; i < regs->num_sets; i++) {
+		unsigned int start;
+		unsigned int end;
+
+		start = regs->regs[2 * i];
+		end = regs->regs[(2 * i) + 1];
+
+		if (remain < (end - start + 1) * 8) {
+			SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+			goto out;
+		}
+
+		remain -= ((end - start) + 1) * 8;
+
+		for (k = start; k <= end; k++, count++) {
+			*data++ = k;
+			*data++ = *src++;
+		}
+	}
+out:
+	header->count = count;
+
+	/* Return the size of the section */
+	return (count * 8) + sizeof(*header);
+}
+
 static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
 				struct kgsl_snapshot *snapshot)
 {
@@ -952,6 +1002,12 @@
 	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
 	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
 
+	/*
+	 * There needs to be a delay of 1 us to ensure enough time for correct
+	 * data is funneled into the trace buffer
+	 */
+	udelay(1);
+
 	kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
 	val++;
 	kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
@@ -1050,6 +1106,12 @@
 	_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
 	_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
 
+	/*
+	 * There needs to be a delay of 1 us to ensure enough time for correct
+	 * data is funneled into the trace buffer
+	 */
+	udelay(1);
+
 	_cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
 	val++;
 	_cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
@@ -1098,8 +1160,8 @@
 
 	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
 		(0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
-		(0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
-		(0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
+		(0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
+		(0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
 
 	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
 		0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
@@ -1141,8 +1203,8 @@
 	if (a6xx_cx_dbgc) {
 		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
 		(0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
-		(0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
-		(0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
+		(0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
+		(0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
 
 		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
 			0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
@@ -1491,6 +1553,40 @@
 	return qwords;
 }
 
+static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
+{
+	int qwords = 0;
+	unsigned int i, k;
+	unsigned int count;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
+		struct a6xx_non_ctx_dbgahb_registers *regs =
+				&a6xx_non_ctx_dbgahb[i];
+
+		regs->offset = *offset;
+
+		/* Program the aperture */
+		ptr[qwords++] = (regs->statetype & 0xff) << 8;
+		ptr[qwords++] =	(((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
+					(1 << 21) | 1;
+
+		for (k = 0; k < regs->num_sets; k++) {
+			unsigned int start = regs->regs[2 * k];
+
+			count = REG_PAIR_COUNT(regs->regs, k);
+			ptr[qwords++] =
+				a6xx_crashdump_registers.gpuaddr + *offset;
+			ptr[qwords++] =
+				(((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+					start - regs->regbase / 4) << 44)) |
+							count;
+
+			*offset += count * sizeof(unsigned int);
+		}
+	}
+	return qwords;
+}
+
 void a6xx_crashdump_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1578,6 +1674,26 @@
 		}
 	}
 
+	/*
+	 * Calculate the script and data size for non context debug
+	 * AHB registers
+	 */
+	for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
+		struct a6xx_non_ctx_dbgahb_registers *regs =
+				&a6xx_non_ctx_dbgahb[i];
+
+		/* 16 bytes for programming the aperture */
+		script_size += 16;
+
+		/* Reading each pair of registers takes 16 bytes */
+		script_size += 16 * regs->num_sets;
+
+		/* A dword per register read from the cluster list */
+		for (k = 0; k < regs->num_sets; k++)
+			data_size += REG_PAIR_COUNT(regs->regs, k) *
+				sizeof(unsigned int);
+	}
+
 	/* Now allocate the script and data buffers */
 
 	/* The script buffers needs 2 extra qwords on the end */
@@ -1619,6 +1735,8 @@
 
 	ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
 
+	ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
+
 	*ptr++ = 0;
 	*ptr++ = 0;
 }
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 1cb0259..b831d0d 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2049,6 +2049,7 @@
 static int dispatcher_do_fault(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
 	struct adreno_dispatcher_drawqueue *dispatch_q = NULL, *dispatch_q_temp;
 	struct adreno_ringbuffer *rb;
@@ -2148,6 +2149,10 @@
 
 	do_header_and_snapshot(device, hung_rb, cmdobj);
 
+	/* Turn off the KEEPALIVE vote from the ISR for hard fault */
+	if (gpudev->gpu_keepalive && fault & ADRENO_HARD_FAULT)
+		gpudev->gpu_keepalive(adreno_dev, false);
+
 	/* Terminate the stalled transaction and resume the IOMMU */
 	if (fault & ADRENO_IOMMU_PAGE_FAULT)
 		kgsl_mmu_pagefault_resume(&device->mmu);
diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c
index 7325bbb..7c7bfa5 100644
--- a/drivers/gpu/msm/adreno_ioctl.c
+++ b/drivers/gpu/msm/adreno_ioctl.c
@@ -31,14 +31,19 @@
 	 * during start(), so it is not safe to take an
 	 * active count inside that function.
 	 */
-	result = kgsl_active_count_get(device);
 
-	if (result == 0) {
-		result = adreno_perfcounter_get(adreno_dev,
+	result = adreno_perfcntr_active_oob_get(adreno_dev);
+	if (result) {
+		mutex_unlock(&device->mutex);
+		return (long)result;
+	}
+
+	result = adreno_perfcounter_get(adreno_dev,
 			get->groupid, get->countable, &get->offset,
 			&get->offset_hi, PERFCOUNTER_FLAG_NONE);
-		kgsl_active_count_put(device);
-	}
+
+	adreno_perfcntr_active_oob_put(adreno_dev);
+
 	mutex_unlock(&device->mutex);
 
 	return (long) result;
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 456856d..cd95003 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -267,7 +267,8 @@
 	}
 
 	mutex_lock(&device->mutex);
-	ret = kgsl_active_count_get(device);
+
+	ret = adreno_perfcntr_active_oob_get(adreno_dev);
 	if (ret) {
 		mutex_unlock(&device->mutex);
 		goto done;
@@ -296,7 +297,8 @@
 		}
 	}
 
-	kgsl_active_count_put(device);
+	adreno_perfcntr_active_oob_put(adreno_dev);
+
 	mutex_unlock(&device->mutex);
 
 	/* write the data */
@@ -709,7 +711,8 @@
 	}
 	reg = &(counters->groups[group].regs[counter]);
 
-	if (test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv)) {
+	if (!adreno_is_a6xx(adreno_dev) &&
+			test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv)) {
 		struct adreno_ringbuffer *rb = &adreno_dev->ringbuffers[0];
 		unsigned int buf[4];
 		unsigned int *cmds = buf;
diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c
index e34957e..2985f24 100644
--- a/drivers/gpu/msm/adreno_profile.c
+++ b/drivers/gpu/msm/adreno_profile.c
@@ -739,7 +739,7 @@
 		goto error_unlock;
 	}
 
-	ret = kgsl_active_count_get(device);
+	ret = adreno_perfcntr_active_oob_get(adreno_dev);
 	if (ret) {
 		size = ret;
 		goto error_unlock;
@@ -786,7 +786,7 @@
 	size = len;
 
 error_put:
-	kgsl_active_count_put(device);
+	adreno_perfcntr_active_oob_put(adreno_dev);
 error_unlock:
 	mutex_unlock(&device->mutex);
 error_free:
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index fbff535..9d847ae 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -86,7 +86,43 @@
 	local_irq_restore(flags);
 }
 
-void adreno_ringbuffer_wptr(struct adreno_device *adreno_dev,
+/*
+ * Wait time before trying to write the register again.
+ * Hopefully the GMU has finished waking up during this delay.
+ */
+#define GMU_WAKEUP_DELAY 50
+/* Max amount of tries to wake up the GMU. */
+#define GMU_WAKEUP_RETRY_MAX 20
+
+/*
+ * Check the WRITEDROPPED0 bit in the
+ * FENCE_STATUS regsiter to check if the write went
+ * through. If it didn't then we retry the write.
+ */
+static inline void _gmu_wptr_update_if_dropped(struct adreno_device *adreno_dev,
+		struct adreno_ringbuffer *rb)
+{
+	unsigned int val, i;
+
+	for (i = 0; i < GMU_WAKEUP_RETRY_MAX; i++) {
+		adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_AHB_FENCE_STATUS,
+				&val);
+
+		/* If !writedropped, then wptr update was successful */
+		if (!(val & 0x1))
+			return;
+
+		/* Wait a small amount of time before trying again */
+		udelay(GMU_WAKEUP_DELAY);
+
+		/* Try to write WPTR again */
+		adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->_wptr);
+	}
+
+	dev_err(adreno_dev->dev.dev, "GMU WPTR update timed out\n");
+}
+
+static void adreno_ringbuffer_wptr(struct adreno_device *adreno_dev,
 		struct adreno_ringbuffer *rb)
 {
 	unsigned long flags;
@@ -102,6 +138,14 @@
 			kgsl_pwrscale_busy(KGSL_DEVICE(adreno_dev));
 			adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
 				rb->_wptr);
+
+			/*
+			 * If GMU, ensure the write posted after a possible
+			 * GMU wakeup (write could have dropped during wakeup)
+			 */
+			if (kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
+				_gmu_wptr_update_if_dropped(adreno_dev, rb);
+
 		}
 	}
 
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index db105c5..876b668 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -548,7 +548,7 @@
 	if (device->ftbl->gmu_regread)
 		device->ftbl->gmu_regread(device, offsetwords, value);
 	else
-		*value = 0xDEADBEEF;
+		*value = 0;
 }
 
 static inline void kgsl_gmu_regwrite(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 0c821cd..54659fc 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1332,12 +1332,6 @@
 	 * In v2, this function call shall move ahead
 	 * of hfi_start() to save power.
 	 */
-	ret = gpudev->oob_set(adreno_dev, OOB_CPINIT_SET_MASK,
-			OOB_CPINIT_CHECK_MASK, OOB_CPINIT_CLEAR_MASK);
-	gpudev->oob_clear(adreno_dev, OOB_CPINIT_CLEAR_MASK);
-
-	if (ret)
-		goto error_gpu;
 
 	if (device->state == KGSL_STATE_INIT ||
 			device->state == KGSL_STATE_SUSPEND) {
@@ -1379,19 +1373,20 @@
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	unsigned long t;
 	bool idle = false;
+	unsigned int reg;
 
 	if (!test_bit(GMU_CLK_ON, &gmu->flags))
 		return;
 
-	if (gpudev->hw_isidle) {
-		t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
-		while (!time_after(jiffies, t)) {
-			if (gpudev->hw_isidle(adreno_dev)) {
-				idle = true;
-				break;
-			}
-			cpu_relax();
+	t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
+	while (!time_after(jiffies, t)) {
+		adreno_read_gmureg(ADRENO_DEVICE(device),
+			ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
+		if (reg == device->gmu.idle_level) {
+			idle = true;
+			break;
 		}
+		cpu_relax();
 	}
 
 	gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index 7055eb7..4cfc120 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -70,6 +70,12 @@
 #define OOB_CPINIT_SET_MASK		BIT(16)
 #define OOB_CPINIT_CHECK_MASK		BIT(24)
 #define OOB_CPINIT_CLEAR_MASK		BIT(24)
+#define OOB_PERFCNTR_SET_MASK		BIT(17)
+#define OOB_PERFCNTR_CHECK_MASK		BIT(25)
+#define OOB_PERFCNTR_CLEAR_MASK		BIT(25)
+#define OOB_GPUSTART_SET_MASK		BIT(18)
+#define OOB_GPUSTART_CHECK_MASK		BIT(26)
+#define OOB_GPUSTART_CLEAR_MASK		BIT(26)
 
 /* Bits for the flags field in the gmu structure */
 enum gmu_flags {
@@ -146,7 +152,7 @@
 	GPU_HW_NAP = 0x4,
 	GPU_HW_MIN_VOLT = 0x5,
 	GPU_HW_MIN_DDR = 0x6,
-	GPU_HW_SLUMBER = 0x7
+	GPU_HW_SLUMBER = 0xF
 };
 
 /**
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index a9a3c94..7811079 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1676,6 +1676,8 @@
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 	int i = 0;
 
+	if (kgsl_gmu_isenabled(device))
+		return;
 	if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags))
 		return;
 
@@ -1785,6 +1787,8 @@
 {
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 
+	if (kgsl_gmu_isenabled(device))
+		return;
 	if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->ctrl_flags))
 		return;
 
@@ -1851,6 +1855,8 @@
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 	int status = 0;
 
+	if (kgsl_gmu_isenabled(device))
+		return 0;
 	if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags))
 		return 0;
 
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index b99c1df..81853ee 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -2600,7 +2600,7 @@
 
 		/* Check for pwm4 */
 		reg = superio_inb(sioaddr, IT87_SIO_GPIO4_REG);
-		if (!(reg & BIT(2)))
+		if (reg & BIT(2))
 			sio_data->skip_pwm |= BIT(3);
 
 		/* Check for pwm2, fan2 */
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index b04e8da..71e4103 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -979,11 +979,17 @@
 	drvdata->cpu = pdata ? pdata->cpu : 0;
 
 	get_online_cpus();
-	etmdrvdata[drvdata->cpu] = drvdata;
 
-	if (smp_call_function_single(drvdata->cpu,
-				etm4_init_arch_data,  drvdata, 1))
+	ret = smp_call_function_single(drvdata->cpu,
+				       etm4_init_arch_data, drvdata, 1);
+	if (ret) {
 		dev_err(dev, "ETM arch init failed\n");
+		put_online_cpus();
+		return ret;
+	} else if (etm4_arch_supported(drvdata->arch) == false) {
+		put_online_cpus();
+		return -EINVAL;
+	}
 
 	if (!etm4_count++) {
 		cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING,
@@ -999,11 +1005,6 @@
 
 	put_online_cpus();
 
-	if (etm4_arch_supported(drvdata->arch) == false) {
-		ret = -EINVAL;
-		goto err_arch_supported;
-	}
-
 	etm4_init_trace_id(drvdata);
 	etm4_set_default(&drvdata->config);
 
@@ -1026,6 +1027,9 @@
 	}
 
 	pm_runtime_put(&adev->dev);
+
+	etmdrvdata[drvdata->cpu] = drvdata;
+
 	dev_info(dev, "%s initialized\n", (char *)id->data);
 
 	if (boot_enable) {
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 475ea75..9bdde0b 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -940,8 +940,6 @@
 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
 {
 	unsigned long flags;
-	dma_addr_t paddr;
-	void __iomem *vaddr = NULL;
 
 	/* config types are set a boot time and never change */
 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
@@ -959,21 +957,12 @@
 		 */
 		tmc_etr_enable_hw(drvdata);
 	} else {
-		/*
-		 * The ETR is not tracing and the buffer was just read.
-		 * As such prepare to free the trace buffer.
-		 */
-		vaddr = drvdata->vaddr;
-		paddr = drvdata->paddr;
-		drvdata->buf = drvdata->vaddr = NULL;
+		tmc_etr_free_mem(drvdata);
+		drvdata->buf = NULL;
 	}
 
 	drvdata->reading = false;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-	/* Free allocated memory out side of the spinlock */
-	if (vaddr)
-		dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
-
 	return 0;
 }
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 01dc5e1..f9449fe 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -64,6 +64,7 @@
 void tmc_enable_hw(struct tmc_drvdata *drvdata)
 {
 	drvdata->enable = true;
+	drvdata->sticky_enable = true;
 	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
 }
 
@@ -77,6 +78,9 @@
 {
 	int ret = 0;
 
+	if (!drvdata->sticky_enable)
+		return -EPERM;
+
 	switch (drvdata->config_type) {
 	case TMC_CONFIG_TYPE_ETB:
 	case TMC_CONFIG_TYPE_ETF:
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 3d6e823..a9de0e8 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -183,6 +183,7 @@
 	struct usb_qdss_ch	*usbch;
 	struct tmc_etr_bam_data	*bamdata;
 	bool			enable_to_bam;
+	bool			sticky_enable;
 
 };
 
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 58e8850..622ccbc 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -20,6 +20,7 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/qcom-geni-se.h>
@@ -50,9 +51,12 @@
 #define SLV_ADDR_MSK		(GENMASK(15, 9))
 #define SLV_ADDR_SHFT		(9)
 
+#define I2C_CORE2X_VOTE		(10000)
+
 struct geni_i2c_dev {
 	struct device *dev;
 	void __iomem *base;
+	unsigned int tx_wm;
 	int irq;
 	int err;
 	struct i2c_adapter adap;
@@ -61,6 +65,7 @@
 	struct se_geni_rsc i2c_rsc;
 	int cur_wr;
 	int cur_rd;
+	struct device *wrapper_dev;
 };
 
 static inline void qcom_geni_i2c_conf(void __iomem *base, int dfs, int div)
@@ -114,7 +119,7 @@
 		}
 	} else if ((m_stat & M_TX_FIFO_WATERMARK_EN) &&
 					!(cur->flags & I2C_M_RD)) {
-		for (j = 0; j < 0x1f; j++) {
+		for (j = 0; j < gi2c->tx_wm; j++) {
 			u32 temp = 0;
 			int p;
 
@@ -163,9 +168,7 @@
 		pm_runtime_set_suspended(gi2c->dev);
 		return ret;
 	}
-	geni_se_init(gi2c->base, FIFO_MODE, 0xF, 0x10);
 	qcom_geni_i2c_conf(gi2c->base, 0, 2);
-	se_config_packing(gi2c->base, 8, 4, true);
 	dev_dbg(gi2c->dev, "i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
 				num, msgs[0].len, msgs[0].flags);
 	for (i = 0; i < num; i++) {
@@ -237,6 +240,8 @@
 {
 	struct geni_i2c_dev *gi2c;
 	struct resource *res;
+	struct platform_device *wrapper_pdev;
+	struct device_node *wrapper_ph_node;
 	int ret;
 
 	gi2c = devm_kzalloc(&pdev->dev, sizeof(*gi2c), GFP_KERNEL);
@@ -249,6 +254,29 @@
 	if (!res)
 		return -EINVAL;
 
+	wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
+				"qcom,wrapper-core", 0);
+	if (IS_ERR_OR_NULL(wrapper_ph_node)) {
+		ret = PTR_ERR(wrapper_ph_node);
+		dev_err(&pdev->dev, "No wrapper core defined\n");
+		return ret;
+	}
+	wrapper_pdev = of_find_device_by_node(wrapper_ph_node);
+	of_node_put(wrapper_ph_node);
+	if (IS_ERR_OR_NULL(wrapper_pdev)) {
+		ret = PTR_ERR(wrapper_pdev);
+		dev_err(&pdev->dev, "Cannot retrieve wrapper device\n");
+		return ret;
+	}
+	gi2c->wrapper_dev = &wrapper_pdev->dev;
+	gi2c->i2c_rsc.wrapper_dev = &wrapper_pdev->dev;
+	ret = geni_se_resources_init(&gi2c->i2c_rsc, I2C_CORE2X_VOTE,
+				     (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
+	if (ret) {
+		dev_err(gi2c->dev, "geni_se_resources_init\n");
+		return ret;
+	}
+
 	gi2c->i2c_rsc.se_clk = devm_clk_get(&pdev->dev, "se-clk");
 	if (IS_ERR(gi2c->i2c_rsc.se_clk)) {
 		ret = PTR_ERR(gi2c->i2c_rsc.se_clk);
@@ -360,6 +388,14 @@
 	if (ret)
 		return ret;
 
+	if (unlikely(!gi2c->tx_wm)) {
+		int gi2c_tx_depth = get_tx_fifo_depth(gi2c->base);
+
+		gi2c->tx_wm = gi2c_tx_depth - 1;
+		geni_se_init(gi2c->base, gi2c->tx_wm, gi2c_tx_depth);
+		geni_se_select_mode(gi2c->base, FIFO_MODE);
+		se_config_packing(gi2c->base, 8, 4, true);
+	}
 	enable_irq(gi2c->irq);
 	return 0;
 }
diff --git a/drivers/input/misc/qpnp-power-on.c b/drivers/input/misc/qpnp-power-on.c
index e1c16aa..339f94c 100644
--- a/drivers/input/misc/qpnp-power-on.c
+++ b/drivers/input/misc/qpnp-power-on.c
@@ -207,7 +207,7 @@
 	int			pon_power_off_reason;
 	int			num_pon_reg;
 	int			num_pon_config;
-	u32			dbc;
+	u32			dbc_time_us;
 	u32			uvlo;
 	int			warm_reset_poff_type;
 	int			hard_reset_poff_type;
@@ -219,6 +219,8 @@
 	u8			warm_reset_reason2;
 	bool			is_spon;
 	bool			store_hard_reset_reason;
+	bool			kpdpwr_dbc_enable;
+	ktime_t			kpdpwr_last_release_time;
 };
 
 static int pon_ship_mode_en;
@@ -381,7 +383,7 @@
 	int rc = 0;
 	u32 val;
 
-	if (delay == pon->dbc)
+	if (delay == pon->dbc_time_us)
 		goto out;
 
 	if (pon->pon_input)
@@ -409,7 +411,7 @@
 		goto unlock;
 	}
 
-	pon->dbc = delay;
+	pon->dbc_time_us = delay;
 
 unlock:
 	if (pon->pon_input)
@@ -418,12 +420,34 @@
 	return rc;
 }
 
+static int qpnp_pon_get_dbc(struct qpnp_pon *pon, u32 *delay)
+{
+	int rc;
+	unsigned int val;
+
+	rc = regmap_read(pon->regmap, QPNP_PON_DBC_CTL(pon), &val);
+	if (rc) {
+		pr_err("Unable to read pon_dbc_ctl rc=%d\n", rc);
+		return rc;
+	}
+	val &= QPNP_PON_DBC_DELAY_MASK(pon);
+
+	if (is_pon_gen2(pon))
+		*delay = USEC_PER_SEC /
+			(1 << (QPNP_PON_GEN2_DELAY_BIT_SHIFT - val));
+	else
+		*delay = USEC_PER_SEC /
+			(1 << (QPNP_PON_DELAY_BIT_SHIFT - val));
+
+	return rc;
+}
+
 static ssize_t qpnp_pon_dbc_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
 	struct qpnp_pon *pon = dev_get_drvdata(dev);
 
-	return snprintf(buf, QPNP_PON_BUFFER_SIZE, "%d\n", pon->dbc);
+	return snprintf(buf, QPNP_PON_BUFFER_SIZE, "%d\n", pon->dbc_time_us);
 }
 
 static ssize_t qpnp_pon_dbc_store(struct device *dev,
@@ -777,6 +801,7 @@
 	u8  pon_rt_bit = 0;
 	u32 key_status;
 	uint pon_rt_sts;
+	u64 elapsed_us;
 
 	cfg = qpnp_get_cfg(pon, pon_type);
 	if (!cfg)
@@ -786,6 +811,15 @@
 	if (!cfg->key_code)
 		return 0;
 
+	if (pon->kpdpwr_dbc_enable && cfg->pon_type == PON_KPDPWR) {
+		elapsed_us = ktime_us_delta(ktime_get(),
+				pon->kpdpwr_last_release_time);
+		if (elapsed_us < pon->dbc_time_us) {
+			pr_debug("Ignoring kpdpwr event - within debounce time\n");
+			return 0;
+		}
+	}
+
 	/* check the RT status to get the current status of the line */
 	rc = regmap_read(pon->regmap, QPNP_PON_RT_STS(pon), &pon_rt_sts);
 	if (rc) {
@@ -814,6 +848,11 @@
 					cfg->key_code, pon_rt_sts);
 	key_status = pon_rt_sts & pon_rt_bit;
 
+	if (pon->kpdpwr_dbc_enable && cfg->pon_type == PON_KPDPWR) {
+		if (!key_status)
+			pon->kpdpwr_last_release_time = ktime_get();
+	}
+
 	/*
 	 * simulate press event in case release event occurred
 	 * without a press event
@@ -2084,6 +2123,9 @@
 		return rc;
 	}
 
+	if (sys_reset)
+		boot_reason = ffs(pon_sts);
+
 	index = ffs(pon_sts) - 1;
 	cold_boot = !qpnp_pon_is_warm_reset();
 	if (index >= ARRAY_SIZE(qpnp_pon_reason) || index < 0) {
@@ -2230,7 +2272,21 @@
 		}
 	} else {
 		rc = qpnp_pon_set_dbc(pon, delay);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"Unable to set PON debounce delay rc=%d\n", rc);
+			return rc;
+		}
 	}
+	rc = qpnp_pon_get_dbc(pon, &pon->dbc_time_us);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"Unable to get PON debounce delay rc=%d\n", rc);
+		return rc;
+	}
+
+	pon->kpdpwr_dbc_enable = of_property_read_bool(pon->pdev->dev.of_node,
+					"qcom,kpdpwr-sw-debounce");
 
 	rc = of_property_read_u32(pon->pdev->dev.of_node,
 				"qcom,warm-reset-poweroff-type",
@@ -2297,8 +2353,6 @@
 		list_add(&pon->list, &spon_dev_list);
 		spin_unlock_irqrestore(&spon_list_slock, flags);
 		pon->is_spon = true;
-	} else {
-		boot_reason = ffs(pon_sts);
 	}
 
 	/* config whether store the hard reset reason */
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index fbab1f1..d52b534 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -421,7 +421,6 @@
 
 config IOMMU_DEBUG_TRACKING
 	bool "Track key IOMMU events"
-	depends on BROKEN
 	select IOMMU_API
 	help
 	  Enables additional debug tracking in the IOMMU framework code.
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index c5ab866..2db0d64 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -432,13 +432,12 @@
 	return ret;
 }
 
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
-		unsigned long offset, size_t size, int prot)
+static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
+		size_t size, int prot)
 {
 	dma_addr_t dma_addr;
 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 	struct iova_domain *iovad = cookie_iovad(domain);
-	phys_addr_t phys = page_to_phys(page) + offset;
 	size_t iova_off = iova_offset(iovad, phys);
 	size_t len = iova_align(iovad, size + iova_off);
 	struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
@@ -454,6 +453,12 @@
 	return dma_addr + iova_off;
 }
 
+dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+		unsigned long offset, size_t size, int prot)
+{
+	return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
+}
+
 void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
 		enum dma_data_direction dir, unsigned long attrs)
 {
@@ -624,6 +629,19 @@
 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
 }
 
+dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+		size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+	return __iommu_dma_map(dev, phys, size,
+			dma_direction_to_prot(dir, false) | IOMMU_MMIO);
+}
+
+void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
+}
+
 int iommu_dma_supported(struct device *dev, u64 mask)
 {
 	/*
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 8ba6da4..ac3059d 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -611,6 +611,55 @@
 	return ret;
 }
 
+static dma_addr_t fast_smmu_dma_map_resource(
+			struct device *dev, phys_addr_t phys_addr,
+			size_t size, enum dma_data_direction dir,
+			unsigned long attrs)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	size_t offset = phys_addr & ~FAST_PAGE_MASK;
+	size_t len = round_up(size + offset, FAST_PAGE_SIZE);
+	dma_addr_t dma_addr;
+	int prot;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	dma_addr = __fast_smmu_alloc_iova(mapping, attrs, len);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+
+	if (dma_addr == DMA_ERROR_CODE)
+		return dma_addr;
+
+	prot = __fast_dma_direction_to_prot(dir);
+	prot |= IOMMU_MMIO;
+
+	if (iommu_map(mapping->domain, dma_addr, phys_addr - offset,
+			len, prot)) {
+		spin_lock_irqsave(&mapping->lock, flags);
+		__fast_smmu_free_iova(mapping, dma_addr, len);
+		spin_unlock_irqrestore(&mapping->lock, flags);
+		return DMA_ERROR_CODE;
+	}
+	return dma_addr + offset;
+}
+
+static void fast_smmu_dma_unmap_resource(
+			struct device *dev, dma_addr_t addr,
+			size_t size, enum dma_data_direction dir,
+			unsigned long attrs)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	size_t offset = addr & ~FAST_PAGE_MASK;
+	size_t len = round_up(size + offset, FAST_PAGE_SIZE);
+	unsigned long flags;
+
+	iommu_unmap(mapping->domain, addr - offset, len);
+	spin_lock_irqsave(&mapping->lock, flags);
+	__fast_smmu_free_iova(mapping, addr, len);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+
 static int fast_smmu_dma_supported(struct device *dev, u64 mask)
 {
 	return mask <= 0xffffffff;
@@ -667,6 +716,8 @@
 	.unmap_sg = fast_smmu_unmap_sg,
 	.sync_sg_for_cpu = fast_smmu_sync_sg_for_cpu,
 	.sync_sg_for_device = fast_smmu_sync_sg_for_device,
+	.map_resource = fast_smmu_dma_map_resource,
+	.unmap_resource = fast_smmu_dma_unmap_resource,
 	.dma_supported = fast_smmu_dma_supported,
 	.mapping_error = fast_smmu_mapping_error,
 };
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index bea5f03..0c49a64 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -81,330 +81,58 @@
 
 static DEFINE_MUTEX(iommu_debug_attachments_lock);
 static LIST_HEAD(iommu_debug_attachments);
-static struct dentry *debugfs_attachments_dir;
 
+/*
+ * Each group may have more than one domain; but each domain may
+ * only have one group.
+ * Used by debug tools to display the name of the device(s) associated
+ * with a particular domain.
+ */
 struct iommu_debug_attachment {
 	struct iommu_domain *domain;
-	struct device *dev;
-	struct dentry *dentry;
+	struct iommu_group *group;
 	struct list_head list;
-	unsigned long reg_offset;
 };
 
-static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
-{
-	struct iommu_debug_attachment *attach = s->private;
-	int secure_vmid;
-
-	seq_printf(s, "Domain: 0x%p\n", attach->domain);
-
-	seq_puts(s, "SECURE_VMID: ");
-	if (iommu_domain_get_attr(attach->domain,
-				  DOMAIN_ATTR_SECURE_VMID,
-				  &secure_vmid))
-		seq_puts(s, "(Unknown)\n");
-	else
-		seq_printf(s, "%s (0x%x)\n",
-			   msm_secure_vmid_to_string(secure_vmid), secure_vmid);
-
-	return 0;
-}
-
-static int iommu_debug_attachment_info_open(struct inode *inode,
-					    struct file *file)
-{
-	return single_open(file, iommu_debug_attachment_info_show,
-			   inode->i_private);
-}
-
-static const struct file_operations iommu_debug_attachment_info_fops = {
-	.open	 = iommu_debug_attachment_info_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = single_release,
-};
-
-static ssize_t iommu_debug_attachment_reg_offset_write(
-	struct file *file, const char __user *ubuf, size_t count,
-	loff_t *offset)
-{
-	struct iommu_debug_attachment *attach = file->private_data;
-	unsigned long reg_offset;
-
-	if (kstrtoul_from_user(ubuf, count, 0, &reg_offset)) {
-		pr_err("Invalid reg_offset format\n");
-		return -EFAULT;
-	}
-
-	attach->reg_offset = reg_offset;
-
-	return count;
-}
-
-static const struct file_operations iommu_debug_attachment_reg_offset_fops = {
-	.open	= simple_open,
-	.write	= iommu_debug_attachment_reg_offset_write,
-};
-
-static ssize_t iommu_debug_attachment_reg_read_read(
-	struct file *file, char __user *ubuf, size_t count, loff_t *offset)
-{
-	struct iommu_debug_attachment *attach = file->private_data;
-	unsigned long val;
-	char *val_str;
-	ssize_t val_str_len;
-
-	if (*offset)
-		return 0;
-
-	val = iommu_reg_read(attach->domain, attach->reg_offset);
-	val_str = kasprintf(GFP_KERNEL, "0x%lx\n", val);
-	if (!val_str)
-		return -ENOMEM;
-	val_str_len = strlen(val_str);
-
-	if (copy_to_user(ubuf, val_str, val_str_len)) {
-		pr_err("copy_to_user failed\n");
-		val_str_len = -EFAULT;
-		goto out;
-	}
-	*offset = 1;		/* non-zero means we're done */
-
-out:
-	kfree(val_str);
-	return val_str_len;
-}
-
-static const struct file_operations iommu_debug_attachment_reg_read_fops = {
-	.open	= simple_open,
-	.read	= iommu_debug_attachment_reg_read_read,
-};
-
-static ssize_t iommu_debug_attachment_reg_write_write(
-	struct file *file, const char __user *ubuf, size_t count,
-	loff_t *offset)
-{
-	struct iommu_debug_attachment *attach = file->private_data;
-	unsigned long val;
-
-	if (kstrtoul_from_user(ubuf, count, 0, &val)) {
-		pr_err("Invalid val format\n");
-		return -EFAULT;
-	}
-
-	iommu_reg_write(attach->domain, attach->reg_offset, val);
-
-	return count;
-}
-
-static const struct file_operations iommu_debug_attachment_reg_write_fops = {
-	.open	= simple_open,
-	.write	= iommu_debug_attachment_reg_write_write,
-};
-
-/* should be called with iommu_debug_attachments_lock locked */
-static int iommu_debug_attach_add_debugfs(
-	struct iommu_debug_attachment *attach)
-{
-	const char *attach_name;
-	struct device *dev = attach->dev;
-	struct iommu_domain *domain = attach->domain;
-	int is_dynamic;
-
-	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_DYNAMIC, &is_dynamic))
-		is_dynamic = 0;
-
-	if (is_dynamic) {
-		uuid_le uuid;
-
-		uuid_le_gen(&uuid);
-		attach_name = kasprintf(GFP_KERNEL, "%s-%pUl", dev_name(dev),
-					uuid.b);
-		if (!attach_name)
-			return -ENOMEM;
-	} else {
-		attach_name = dev_name(dev);
-	}
-
-	attach->dentry = debugfs_create_dir(attach_name,
-					    debugfs_attachments_dir);
-	if (!attach->dentry) {
-		pr_err("Couldn't create iommu/attachments/%s debugfs directory for domain 0x%p\n",
-		       attach_name, domain);
-		if (is_dynamic)
-			kfree(attach_name);
-		return -EIO;
-	}
-
-	if (is_dynamic)
-		kfree(attach_name);
-
-	if (!debugfs_create_file(
-		    "info", S_IRUSR, attach->dentry, attach,
-		    &iommu_debug_attachment_info_fops)) {
-		pr_err("Couldn't create iommu/attachments/%s/info debugfs file for domain 0x%p\n",
-		       dev_name(dev), domain);
-		goto err_rmdir;
-	}
-
-	if (!debugfs_create_file(
-		    "reg_offset", S_IRUSR, attach->dentry, attach,
-		    &iommu_debug_attachment_reg_offset_fops)) {
-		pr_err("Couldn't create iommu/attachments/%s/reg_offset debugfs file for domain 0x%p\n",
-		       dev_name(dev), domain);
-		goto err_rmdir;
-	}
-
-	if (!debugfs_create_file(
-		    "reg_read", S_IRUSR, attach->dentry, attach,
-		    &iommu_debug_attachment_reg_read_fops)) {
-		pr_err("Couldn't create iommu/attachments/%s/reg_read debugfs file for domain 0x%p\n",
-		       dev_name(dev), domain);
-		goto err_rmdir;
-	}
-
-	if (!debugfs_create_file(
-		    "reg_write", S_IRUSR, attach->dentry, attach,
-		    &iommu_debug_attachment_reg_write_fops)) {
-		pr_err("Couldn't create iommu/attachments/%s/reg_write debugfs file for domain 0x%p\n",
-		       dev_name(dev), domain);
-		goto err_rmdir;
-	}
-
-	return 0;
-
-err_rmdir:
-	debugfs_remove_recursive(attach->dentry);
-	return -EIO;
-}
-
-void iommu_debug_domain_add(struct iommu_domain *domain)
-{
-	struct iommu_debug_attachment *attach;
-
-	mutex_lock(&iommu_debug_attachments_lock);
-
-	attach = kmalloc(sizeof(*attach), GFP_KERNEL);
-	if (!attach)
-		goto out_unlock;
-
-	attach->domain = domain;
-	attach->dev = NULL;
-	list_add(&attach->list, &iommu_debug_attachments);
-
-out_unlock:
-	mutex_unlock(&iommu_debug_attachments_lock);
-}
-
-void iommu_debug_domain_remove(struct iommu_domain *domain)
-{
-	struct iommu_debug_attachment *it;
-
-	mutex_lock(&iommu_debug_attachments_lock);
-	list_for_each_entry(it, &iommu_debug_attachments, list)
-		if (it->domain == domain && it->dev == NULL)
-			break;
-
-	if (&it->list == &iommu_debug_attachments) {
-		WARN(1, "Couldn't find debug attachment for domain=0x%p",
-				domain);
-	} else {
-		list_del(&it->list);
-		kfree(it);
-	}
-	mutex_unlock(&iommu_debug_attachments_lock);
-}
-
 void iommu_debug_attach_device(struct iommu_domain *domain,
 			       struct device *dev)
 {
 	struct iommu_debug_attachment *attach;
+	struct iommu_group *group;
+
+	group = iommu_group_get(dev);
+	if (!group)
+		return;
+
+	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+	if (!attach)
+		return;
+
+	attach->domain = domain;
+	attach->group = group;
+	INIT_LIST_HEAD(&attach->list);
 
 	mutex_lock(&iommu_debug_attachments_lock);
+	list_add(&attach->list, &iommu_debug_attachments);
+	mutex_unlock(&iommu_debug_attachments_lock);
+}
 
-	list_for_each_entry(attach, &iommu_debug_attachments, list)
-		if (attach->domain == domain && attach->dev == NULL)
-			break;
+void iommu_debug_domain_remove(struct iommu_domain *domain)
+{
+	struct iommu_debug_attachment *it, *tmp;
 
-	if (&attach->list == &iommu_debug_attachments) {
-		WARN(1, "Couldn't find debug attachment for domain=0x%p dev=%s",
-		     domain, dev_name(dev));
-	} else {
-		attach->dev = dev;
-
-		/*
-		 * we might not init until after other drivers start calling
-		 * iommu_attach_device. Only set up the debugfs nodes if we've
-		 * already init'd to avoid polluting the top-level debugfs
-		 * directory (by calling debugfs_create_dir with a NULL
-		 * parent). These will be flushed out later once we init.
-		 */
-
-		if (debugfs_attachments_dir)
-			iommu_debug_attach_add_debugfs(attach);
+	mutex_lock(&iommu_debug_attachments_lock);
+	list_for_each_entry_safe(it, tmp, &iommu_debug_attachments, list) {
+		if (it->domain != domain)
+			continue;
+		list_del(&it->list);
+		iommu_group_put(it->group);
+		kfree(it);
 	}
 
 	mutex_unlock(&iommu_debug_attachments_lock);
 }
 
-void iommu_debug_detach_device(struct iommu_domain *domain,
-			       struct device *dev)
-{
-	struct iommu_debug_attachment *it;
-
-	mutex_lock(&iommu_debug_attachments_lock);
-	list_for_each_entry(it, &iommu_debug_attachments, list)
-		if (it->domain == domain && it->dev == dev)
-			break;
-
-	if (&it->list == &iommu_debug_attachments) {
-		WARN(1, "Couldn't find debug attachment for domain=0x%p dev=%s",
-		     domain, dev_name(dev));
-	} else {
-		/*
-		 * Just remove debugfs entry and mark dev as NULL on
-		 * iommu_detach call. We would remove the actual
-		 * attachment entry from the list only on domain_free call.
-		 * This is to ensure we keep track of unattached domains too.
-		 */
-
-		debugfs_remove_recursive(it->dentry);
-		it->dev = NULL;
-	}
-	mutex_unlock(&iommu_debug_attachments_lock);
-}
-
-static int iommu_debug_init_tracking(void)
-{
-	int ret = 0;
-	struct iommu_debug_attachment *attach;
-
-	mutex_lock(&iommu_debug_attachments_lock);
-	debugfs_attachments_dir = debugfs_create_dir("attachments",
-						     iommu_debugfs_top);
-	if (!debugfs_attachments_dir) {
-		pr_err("Couldn't create iommu/attachments debugfs directory\n");
-		ret = -ENODEV;
-		goto out_unlock;
-	}
-
-	/* set up debugfs entries for attachments made during early boot */
-	list_for_each_entry(attach, &iommu_debug_attachments, list)
-		if (attach->dev)
-			iommu_debug_attach_add_debugfs(attach);
-
-out_unlock:
-	mutex_unlock(&iommu_debug_attachments_lock);
-	return ret;
-}
-
-static void iommu_debug_destroy_tracking(void)
-{
-	debugfs_remove_recursive(debugfs_attachments_dir);
-}
-#else
-static inline int iommu_debug_init_tracking(void) { return 0; }
-static inline void iommu_debug_destroy_tracking(void) { }
 #endif
 
 #ifdef CONFIG_IOMMU_TESTS
@@ -2045,9 +1773,6 @@
 
 static int iommu_debug_init(void)
 {
-	if (iommu_debug_init_tracking())
-		return -ENODEV;
-
 	if (iommu_debug_init_tests())
 		return -ENODEV;
 
@@ -2057,7 +1782,6 @@
 static void iommu_debug_exit(void)
 {
 	platform_driver_unregister(&iommu_debug_driver);
-	iommu_debug_destroy_tracking();
 	iommu_debug_destroy_tests();
 }
 
diff --git a/drivers/iommu/iommu-debug.h b/drivers/iommu/iommu-debug.h
index 94a97bf..91c418d 100644
--- a/drivers/iommu/iommu-debug.h
+++ b/drivers/iommu/iommu-debug.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,8 +16,6 @@
 #ifdef CONFIG_IOMMU_DEBUG_TRACKING
 
 void iommu_debug_attach_device(struct iommu_domain *domain, struct device *dev);
-void iommu_debug_detach_device(struct iommu_domain *domain, struct device *dev);
-void iommu_debug_domain_add(struct iommu_domain *domain);
 void iommu_debug_domain_remove(struct iommu_domain *domain);
 
 #else  /* !CONFIG_IOMMU_DEBUG_TRACKING */
@@ -27,15 +25,6 @@
 {
 }
 
-static inline void iommu_debug_detach_device(struct iommu_domain *domain,
-					     struct device *dev)
-{
-}
-
-static inline void iommu_debug_domain_add(struct iommu_domain *domain)
-{
-}
-
 static inline void iommu_debug_domain_remove(struct iommu_domain *domain)
 {
 }
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 7f9d9e1..e81bb48 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1067,8 +1067,6 @@
 	/* Assume all sizes by default; the driver may override this later */
 	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
 
-	iommu_debug_domain_add(domain);
-
 	return domain;
 }
 
@@ -1132,8 +1130,6 @@
 static void __iommu_detach_device(struct iommu_domain *domain,
 				  struct device *dev)
 {
-	iommu_debug_detach_device(domain, dev);
-
 	if (unlikely(domain->ops->detach_dev == NULL))
 		return;
 
diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
index bf23ba1..45296aa 100644
--- a/drivers/leds/leds-ktd2692.c
+++ b/drivers/leds/leds-ktd2692.c
@@ -270,15 +270,15 @@
 		return -ENXIO;
 
 	led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
-	if (IS_ERR(led->ctrl_gpio)) {
-		ret = PTR_ERR(led->ctrl_gpio);
+	ret = PTR_ERR_OR_ZERO(led->ctrl_gpio);
+	if (ret) {
 		dev_err(dev, "cannot get ctrl-gpios %d\n", ret);
 		return ret;
 	}
 
 	led->aux_gpio = devm_gpiod_get(dev, "aux", GPIOD_ASIS);
-	if (IS_ERR(led->aux_gpio)) {
-		ret = PTR_ERR(led->aux_gpio);
+	ret = PTR_ERR_OR_ZERO(led->aux_gpio);
+	if (ret) {
 		dev_err(dev, "cannot get aux-gpios %d\n", ret);
 		return ret;
 	}
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index 19de267..2e71d05 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -3,3 +3,8 @@
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_core/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpas/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_module/
+obj-$(CONFIG_SPECTRA_CAMERA) += icp/
diff --git a/drivers/media/platform/msm/camera/cam_cdm/Makefile b/drivers/media/platform/msm/camera/cam_cdm/Makefile
new file mode 100644
index 0000000..ad4ec04
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm_soc.o cam_cdm_util.o cam_cdm_intf.o \
+				cam_cdm_core_common.o cam_cdm_virtual_core.o \
+				cam_cdm_hw_core.o
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
new file mode 100644
index 0000000..fc7a493
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
@@ -0,0 +1,271 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_H_
+#define _CAM_CDM_H_
+
+#include <linux/dma-direction.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/bug.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_soc_util.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_intf.h"
+#include "cam_hw.h"
+
+#ifdef CONFIG_CAM_CDM_DBG
+#define CDM_CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDM_CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#ifdef CONFIG_CAM_CDM_DUMP_DBG
+#define CDM_DUMP_CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDM_DUMP_CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define CAM_MAX_SW_CDM_VERSION_SUPPORTED  1
+#define CAM_SW_CDM_INDEX                  0
+#define CAM_CDM_INFLIGHT_WORKS            5
+#define CAM_CDM_HW_RESET_TIMEOUT          3000
+
+#define CAM_CDM_HW_ID_MASK      0xF
+#define CAM_CDM_HW_ID_SHIFT     0x5
+#define CAM_CDM_CLIENTS_ID_MASK 0x1F
+
+#define CAM_CDM_GET_HW_IDX(x) (((x) >> CAM_CDM_HW_ID_SHIFT) & \
+	CAM_CDM_HW_ID_MASK)
+#define CAM_CDM_CREATE_CLIENT_HANDLE(hw_idx, client_idx) \
+	((((hw_idx) & CAM_CDM_HW_ID_MASK) << CAM_CDM_HW_ID_SHIFT) | \
+	 ((client_idx) & CAM_CDM_CLIENTS_ID_MASK))
+#define CAM_CDM_GET_CLIENT_IDX(x) ((x) & CAM_CDM_CLIENTS_ID_MASK)
+#define CAM_PER_CDM_MAX_REGISTERED_CLIENTS (CAM_CDM_CLIENTS_ID_MASK + 1)
+#define CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM (CAM_CDM_HW_ID_MASK + 1)
+
+/* enum cam_cdm_reg_attr - read, write, read and write permissions.*/
+enum cam_cdm_reg_attr {
+	CAM_REG_ATTR_READ,
+	CAM_REG_ATTR_WRITE,
+	CAM_REG_ATTR_READ_WRITE,
+};
+
+/* enum cam_cdm_hw_process_intf_cmd - interface commands.*/
+enum cam_cdm_hw_process_intf_cmd {
+	CAM_CDM_HW_INTF_CMD_ACQUIRE,
+	CAM_CDM_HW_INTF_CMD_RELEASE,
+	CAM_CDM_HW_INTF_CMD_SUBMIT_BL,
+	CAM_CDM_HW_INTF_CMD_RESET_HW,
+	CAM_CDM_HW_INTF_CMD_INVALID,
+};
+
+/* enum cam_cdm_regs - CDM driver offset enums.*/
+enum cam_cdm_regs {
+	/*cfg_offsets 0*/
+	CDM_CFG_HW_VERSION,
+	CDM_CFG_TITAN_VERSION,
+	CDM_CFG_RST_CMD,
+	CDM_CFG_CGC_CFG,
+	CDM_CFG_CORE_CFG,
+	CDM_CFG_CORE_EN,
+	CDM_CFG_FE_CFG,
+	/*irq_offsets 7*/
+	CDM_IRQ_MASK,
+	CDM_IRQ_CLEAR,
+	CDM_IRQ_CLEAR_CMD,
+	CDM_IRQ_SET,
+	CDM_IRQ_SET_CMD,
+	CDM_IRQ_STATUS,
+	CDM_IRQ_USR_DATA,
+	/*BL FIFO Registers 14*/
+	CDM_BL_FIFO_BASE_REG,
+	CDM_BL_FIFO_LEN_REG,
+	CDM_BL_FIFO_STORE_REG,
+	CDM_BL_FIFO_CFG,
+	CDM_BL_FIFO_RB,
+	CDM_BL_FIFO_BASE_RB,
+	CDM_BL_FIFO_LEN_RB,
+	CDM_BL_FIFO_PENDING_REQ_RB,
+	/*CDM System Debug Registers 22*/
+	CDM_DBG_WAIT_STATUS,
+	CDM_DBG_SCRATCH_0_REG,
+	CDM_DBG_SCRATCH_1_REG,
+	CDM_DBG_SCRATCH_2_REG,
+	CDM_DBG_SCRATCH_3_REG,
+	CDM_DBG_SCRATCH_4_REG,
+	CDM_DBG_SCRATCH_5_REG,
+	CDM_DBG_SCRATCH_6_REG,
+	CDM_DBG_SCRATCH_7_REG,
+	CDM_DBG_LAST_AHB_ADDR,
+	CDM_DBG_LAST_AHB_DATA,
+	CDM_DBG_CORE_DBUG,
+	CDM_DBG_LAST_AHB_ERR_ADDR,
+	CDM_DBG_LAST_AHB_ERR_DATA,
+	CDM_DBG_CURRENT_BL_BASE,
+	CDM_DBG_CURRENT_BL_LEN,
+	CDM_DBG_CURRENT_USED_AHB_BASE,
+	CDM_DBG_DEBUG_STATUS,
+	/*FE Bus Miser Registers 40*/
+	CDM_BUS_MISR_CFG_0,
+	CDM_BUS_MISR_CFG_1,
+	CDM_BUS_MISR_RD_VAL,
+	/*Performance Counter registers 43*/
+	CDM_PERF_MON_CTRL,
+	CDM_PERF_MON_0,
+	CDM_PERF_MON_1,
+	CDM_PERF_MON_2,
+	/*Spare registers 47*/
+	CDM_SPARE,
+};
+
+/* struct cam_cdm_reg_offset - struct for offset with attribute.*/
+struct cam_cdm_reg_offset {
+	uint32_t offset;
+	enum cam_cdm_reg_attr attribute;
+};
+
+/* struct cam_cdm_reg_offset_table - struct for whole offset table.*/
+struct cam_cdm_reg_offset_table {
+	uint32_t first_offset;
+	uint32_t last_offset;
+	uint32_t reg_count;
+	const struct cam_cdm_reg_offset *offsets;
+	uint32_t offset_max_size;
+};
+
+/* enum cam_cdm_flags - Bit fields for CDM flags used */
+enum cam_cdm_flags {
+	CAM_CDM_FLAG_SHARED_CDM,
+	CAM_CDM_FLAG_PRIVATE_CDM,
+};
+
+/* enum cam_cdm_type - Enum for possible CAM CDM types */
+enum cam_cdm_type {
+	CAM_VIRTUAL_CDM,
+	CAM_HW_CDM,
+};
+
+/* enum cam_cdm_mem_base_index - Enum for possible CAM CDM types */
+enum cam_cdm_mem_base_index {
+	CAM_HW_CDM_BASE_INDEX,
+	CAM_HW_CDM_MAX_INDEX = CAM_SOC_MAX_BLOCK,
+};
+
+/* struct cam_cdm_client - struct for cdm clients data.*/
+struct cam_cdm_client {
+	struct cam_cdm_acquire_data data;
+	void __iomem  *changebase_addr;
+	uint32_t stream_on;
+	uint32_t refcount;
+	struct mutex lock;
+	uint32_t handle;
+};
+
+/* struct cam_cdm_work_payload - struct for cdm work payload data.*/
+struct cam_cdm_work_payload {
+	struct cam_hw_info *hw;
+	uint32_t irq_status;
+	uint32_t irq_data;
+	struct work_struct work;
+};
+
+/* enum cam_cdm_bl_cb_type - Enum for possible CAM CDM cb request types */
+enum cam_cdm_bl_cb_type {
+	CAM_HW_CDM_BL_CB_CLIENT = 1,
+	CAM_HW_CDM_BL_CB_INTERNAL,
+};
+
+/* struct cam_cdm_bl_cb_request_entry - callback entry for work to process.*/
+struct cam_cdm_bl_cb_request_entry {
+	uint8_t bl_tag;
+	enum cam_cdm_bl_cb_type request_type;
+	uint32_t client_hdl;
+	void *userdata;
+	uint32_t cookie;
+	struct list_head entry;
+};
+
+/* struct cam_cdm_hw_intf_cmd_submit_bl - cdm interface submit command.*/
+struct cam_cdm_hw_intf_cmd_submit_bl {
+	uint32_t handle;
+	struct cam_cdm_bl_request *data;
+};
+
+/* struct cam_cdm_hw_mem - CDM hw memory.struct */
+struct cam_cdm_hw_mem {
+	int32_t handle;
+	uint32_t vaddr;
+	uint64_t kmdvaddr;
+	size_t size;
+};
+
+/* struct cam_cdm - CDM hw device struct */
+struct cam_cdm {
+	uint32_t index;
+	char name[128];
+	enum cam_cdm_id id;
+	enum cam_cdm_flags flags;
+	struct completion reset_complete;
+	struct completion bl_complete;
+	struct workqueue_struct *work_queue;
+	struct list_head bl_request_list;
+	struct cam_hw_version version;
+	uint32_t hw_version;
+	uint32_t hw_family_version;
+	struct cam_iommu_handle iommu_hdl;
+	struct cam_cdm_reg_offset_table *offset_tbl;
+	struct cam_cdm_utils_ops *ops;
+	struct cam_cdm_client *clients[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
+	uint8_t bl_tag;
+	atomic_t error;
+	struct cam_cdm_hw_mem gen_irq;
+	uint32_t cpas_handle;
+};
+
+/* struct cam_cdm_private_dt_data - CDM hw custom dt data */
+struct cam_cdm_private_dt_data {
+	bool dt_cdm_shared;
+	uint32_t dt_num_supported_clients;
+	const char *dt_cdm_client_name[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
+};
+
+/* struct cam_cdm_intf_devices - CDM mgr interface devices */
+struct cam_cdm_intf_devices {
+	struct mutex lock;
+	uint32_t refcount;
+	struct cam_hw_intf *device;
+	struct cam_cdm_private_dt_data *data;
+};
+
+/* struct cam_cdm_intf_mgr - CDM mgr interface device struct */
+struct cam_cdm_intf_mgr {
+	bool probe_done;
+	struct cam_cdm_intf_devices nodes[CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM];
+	uint32_t cdm_count;
+	uint32_t dt_supported_hw_cdm;
+	int32_t refcount;
+};
+
+int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t *index);
+int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t index);
+
+#endif /* _CAM_CDM_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
new file mode 100644
index 0000000..341406a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
@@ -0,0 +1,547 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-CORE %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_io_util.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_soc.h"
+#include "cam_cdm_core_common.h"
+
+static void cam_cdm_get_client_refcount(struct cam_cdm_client *client)
+{
+	mutex_lock(&client->lock);
+	CDM_CDBG("CDM client get refcount=%d\n",
+		client->refcount);
+	client->refcount++;
+	mutex_unlock(&client->lock);
+}
+
+static void cam_cdm_put_client_refcount(struct cam_cdm_client *client)
+{
+	mutex_lock(&client->lock);
+	CDM_CDBG("CDM client put refcount=%d\n",
+		client->refcount);
+	if (client->refcount > 0) {
+		client->refcount--;
+	} else {
+		pr_err("Refcount put when zero\n");
+		WARN_ON(1);
+	}
+	mutex_unlock(&client->lock);
+}
+
+bool cam_cdm_set_cam_hw_version(
+	uint32_t ver, struct cam_hw_version *cam_version)
+{
+	switch (ver) {
+	case CAM_CDM170_VERSION:
+		cam_version->major    = (ver & 0xF0000000);
+		cam_version->minor    = (ver & 0xFFF0000);
+		cam_version->incr     = (ver & 0xFFFF);
+		cam_version->reserved = 0;
+		return true;
+	default:
+		pr_err("CDM Version=%x not supported in util\n", ver);
+	break;
+	}
+	return false;
+}
+
+void cam_cdm_cpas_cb(int32_t client_handle, void *userdata,
+	enum cam_camnoc_irq_type evt_type, uint32_t evt_data)
+{
+	pr_err("CPAS error callback type=%d with data=%x\n", evt_type,
+		evt_data);
+}
+
+struct cam_cdm_utils_ops *cam_cdm_get_ops(
+	uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version)
+{
+	if (by_cam_version == false) {
+		switch (ver) {
+		case CAM_CDM170_VERSION:
+			return &CDM170_ops;
+		default:
+			pr_err("CDM Version=%x not supported in util\n", ver);
+		}
+	} else if (cam_version) {
+		if ((cam_version->major == 1) && (cam_version->minor == 0) &&
+			(cam_version->incr == 0))
+			return &CDM170_ops;
+		pr_err("cam_hw_version=%x:%x:%x not supported\n",
+			cam_version->major, cam_version->minor,
+			cam_version->incr);
+	}
+
+	return NULL;
+}
+
+struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
+	uint32_t tag, struct list_head *bl_list)
+{
+	struct cam_cdm_bl_cb_request_entry *node;
+
+	list_for_each_entry(node, bl_list, entry) {
+		if (node->bl_tag == tag)
+			return node;
+	}
+	pr_err("Could not find the bl request for tag=%d\n", tag);
+
+	return NULL;
+}
+
+int cam_cdm_get_caps(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *cdm_core;
+
+	if ((cdm_hw) && (cdm_hw->core_info) && (get_hw_cap_args) &&
+		(sizeof(struct cam_iommu_handle) == arg_size)) {
+		cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+		*((struct cam_iommu_handle *)get_hw_cap_args) =
+			cdm_core->iommu_hdl;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int cam_cdm_find_free_client_slot(struct cam_cdm *hw)
+{
+	int i;
+
+	for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
+		if (hw->clients[i] == NULL) {
+			CDM_CDBG("Found client slot %d\n", i);
+			return i;
+		}
+	}
+	pr_err("No more client slots\n");
+
+	return -EBUSY;
+}
+
+
+void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_cb_status status, void *data)
+{
+	int i;
+	struct cam_cdm *core = NULL;
+	struct cam_cdm_client *client = NULL;
+
+	if (!cdm_hw) {
+		pr_err("CDM Notify called with NULL hw info\n");
+		return;
+	}
+	core = (struct cam_cdm *)cdm_hw->core_info;
+
+	if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
+		int client_idx;
+		struct cam_cdm_bl_cb_request_entry *node =
+			(struct cam_cdm_bl_cb_request_entry *)data;
+
+		client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl);
+		client = core->clients[client_idx];
+		if ((!client) || (client->handle != node->client_hdl)) {
+			pr_err("Invalid client %pK hdl=%x\n", client,
+				node->client_hdl);
+			return;
+		}
+		cam_cdm_get_client_refcount(client);
+		if (client->data.cam_cdm_callback) {
+			CDM_CDBG("Calling client=%s cb cookie=%d\n",
+				client->data.identifier, node->cookie);
+			client->data.cam_cdm_callback(node->client_hdl,
+				node->userdata, CAM_CDM_CB_STATUS_BL_SUCCESS,
+				node->cookie);
+			CDM_CDBG("Exit client cb cookie=%d\n", node->cookie);
+		} else {
+			pr_err("No cb registered for client hdl=%x\n",
+				node->client_hdl);
+		}
+		cam_cdm_put_client_refcount(client);
+		return;
+	}
+
+	for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
+		if (core->clients[i] != NULL) {
+			client = core->clients[i];
+			mutex_lock(&client->lock);
+			CDM_CDBG("Found client slot %d\n", i);
+			if (client->data.cam_cdm_callback) {
+				if (status == CAM_CDM_CB_STATUS_PAGEFAULT) {
+					unsigned long iova =
+						(unsigned long)data;
+
+					client->data.cam_cdm_callback(
+						client->handle,
+						client->data.userdata,
+						CAM_CDM_CB_STATUS_PAGEFAULT,
+						(iova & 0xFFFFFFFF));
+				}
+			} else {
+				pr_err("No cb registered for client hdl=%x\n",
+					client->handle);
+			}
+			mutex_unlock(&client->lock);
+		}
+	}
+}
+
+int cam_cdm_stream_ops_internal(void *hw_priv,
+	void *start_args, bool operation)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *core = NULL;
+	int rc = -1;
+	int client_idx;
+	struct cam_cdm_client *client;
+	uint32_t *handle = start_args;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	core = (struct cam_cdm *)cdm_hw->core_info;
+	client_idx = CAM_CDM_GET_CLIENT_IDX(*handle);
+	client = core->clients[client_idx];
+	if (!client) {
+		pr_err("Invalid client %pK hdl=%x\n", client, *handle);
+		return -EINVAL;
+	}
+	cam_cdm_get_client_refcount(client);
+	if (*handle != client->handle) {
+		pr_err("client id given handle=%x invalid\n", *handle);
+		cam_cdm_put_client_refcount(client);
+		return -EINVAL;
+	}
+	if (operation == true) {
+		if (true == client->stream_on) {
+			pr_err("Invalid CDM client is already streamed ON\n");
+			cam_cdm_put_client_refcount(client);
+			return rc;
+		}
+	} else {
+		if (client->stream_on == false) {
+			pr_err("Invalid CDM client is already streamed Off\n");
+			cam_cdm_put_client_refcount(client);
+			return rc;
+		}
+	}
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	if (operation == true) {
+		if (!cdm_hw->open_count) {
+			struct cam_ahb_vote ahb_vote;
+			struct cam_axi_vote axi_vote;
+
+			ahb_vote.type = CAM_VOTE_ABSOLUTE;
+			ahb_vote.vote.level = CAM_SVS_VOTE;
+			axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+			axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+			rc = cam_cpas_start(core->cpas_handle,
+				&ahb_vote, &axi_vote);
+			if (rc != 0) {
+				pr_err("CPAS start failed\n");
+				goto end;
+			}
+			CDM_CDBG("CDM init first time\n");
+			if (core->id == CAM_CDM_VIRTUAL) {
+				CDM_CDBG("Virtual CDM HW init first time\n");
+				rc = 0;
+			} else {
+				CDM_CDBG("CDM HW init first time\n");
+				rc = cam_hw_cdm_init(hw_priv, NULL, 0);
+				if (rc == 0) {
+					rc = cam_hw_cdm_alloc_genirq_mem(
+						hw_priv);
+					if (rc != 0) {
+						pr_err("Genirqalloc failed\n");
+						cam_hw_cdm_deinit(hw_priv,
+							NULL, 0);
+					}
+				} else {
+					pr_err("CDM HW init failed\n");
+				}
+			}
+			if (rc == 0) {
+				cdm_hw->open_count++;
+				client->stream_on = true;
+			} else {
+				if (cam_cpas_stop(core->cpas_handle))
+					pr_err("CPAS stop failed\n");
+			}
+		} else {
+			cdm_hw->open_count++;
+			CDM_CDBG("CDM HW already ON count=%d\n",
+				cdm_hw->open_count);
+			rc = 0;
+			client->stream_on = true;
+		}
+	} else {
+		if (cdm_hw->open_count) {
+			cdm_hw->open_count--;
+			CDM_CDBG("stream OFF CDM %d\n", cdm_hw->open_count);
+			if (!cdm_hw->open_count) {
+				CDM_CDBG("CDM Deinit now\n");
+				if (core->id == CAM_CDM_VIRTUAL) {
+					CDM_CDBG("Virtual CDM HW Deinit\n");
+					rc = 0;
+				} else {
+					CDM_CDBG("CDM HW Deinit now\n");
+					rc = cam_hw_cdm_deinit(
+						hw_priv, NULL, 0);
+					if (cam_hw_cdm_release_genirq_mem(
+						hw_priv))
+						pr_err("Genirq release failed\n");
+				}
+				if (rc) {
+					pr_err("Deinit failed in streamoff\n");
+				} else {
+					client->stream_on = false;
+					rc = cam_cpas_stop(core->cpas_handle);
+					if (rc)
+						pr_err("CPAS stop failed\n");
+				}
+			} else {
+				client->stream_on = false;
+				CDM_CDBG("Client stream off success =%d\n",
+					cdm_hw->open_count);
+			}
+		} else {
+			CDM_CDBG("stream OFF CDM Invalid %d\n",
+				cdm_hw->open_count);
+			rc = -ENXIO;
+		}
+	}
+end:
+	cam_cdm_put_client_refcount(client);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	return rc;
+}
+
+int cam_cdm_stream_start(void *hw_priv,
+	void *start_args, uint32_t size)
+{
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	rc = cam_cdm_stream_ops_internal(hw_priv, start_args, true);
+	return rc;
+
+}
+
+int cam_cdm_stream_stop(void *hw_priv,
+	void *start_args, uint32_t size)
+{
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	rc = cam_cdm_stream_ops_internal(hw_priv, start_args, false);
+	return rc;
+
+}
+
+int cam_cdm_process_cmd(void *hw_priv,
+	uint32_t cmd, void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_data = NULL;
+	struct cam_cdm *core = NULL;
+	int rc = -EINVAL;
+
+	if ((!hw_priv) || (!cmd_args) ||
+		(cmd >= CAM_CDM_HW_INTF_CMD_INVALID))
+		return rc;
+
+	soc_data = &cdm_hw->soc_info;
+	core = (struct cam_cdm *)cdm_hw->core_info;
+	switch (cmd) {
+	case CAM_CDM_HW_INTF_CMD_SUBMIT_BL: {
+		struct cam_cdm_hw_intf_cmd_submit_bl *req;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(struct cam_cdm_hw_intf_cmd_submit_bl) != arg_size) {
+			pr_err("Invalid CDM cmd %d arg size=%x\n", cmd,
+				arg_size);
+			break;
+		}
+		req = (struct cam_cdm_hw_intf_cmd_submit_bl *)cmd_args;
+		if ((req->data->type < 0) ||
+			(req->data->type > CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA)) {
+			pr_err("Invalid req bl cmd addr type=%d\n",
+				req->data->type);
+			break;
+		}
+		idx = CAM_CDM_GET_CLIENT_IDX(req->handle);
+		client = core->clients[idx];
+		if ((!client) || (req->handle != client->handle)) {
+			pr_err("Invalid client %pK hdl=%x\n", client,
+				req->handle);
+			break;
+		}
+		cam_cdm_get_client_refcount(client);
+		if ((req->data->flag == true) &&
+			(!client->data.cam_cdm_callback)) {
+			pr_err("CDM request cb without registering cb\n");
+			cam_cdm_put_client_refcount(client);
+			break;
+		}
+		if (client->stream_on != true) {
+			pr_err("Invalid CDM needs to be streamed ON first\n");
+			cam_cdm_put_client_refcount(client);
+			break;
+		}
+		if (core->id == CAM_CDM_VIRTUAL)
+			rc = cam_virtual_cdm_submit_bl(cdm_hw, req, client);
+		else
+			rc = cam_hw_cdm_submit_bl(cdm_hw, req, client);
+
+		cam_cdm_put_client_refcount(client);
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_ACQUIRE: {
+		struct cam_cdm_acquire_data *data;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(struct cam_cdm_acquire_data) != arg_size) {
+			pr_err("Invalid CDM cmd %d arg size=%x\n", cmd,
+				arg_size);
+			break;
+		}
+
+		mutex_lock(&cdm_hw->hw_mutex);
+		data = (struct cam_cdm_acquire_data *)cmd_args;
+		CDM_CDBG("Trying to acquire client=%s in hw idx=%d\n",
+			data->identifier, core->index);
+		idx = cam_cdm_find_free_client_slot(core);
+		if ((idx < 0) || (core->clients[idx])) {
+			mutex_unlock(&cdm_hw->hw_mutex);
+			pr_err("Failed to client slots for client=%s in hw idx=%d\n",
+			data->identifier, core->index);
+			break;
+		}
+		core->clients[idx] = kzalloc(sizeof(struct cam_cdm_client),
+			GFP_KERNEL);
+		if (!core->clients[idx]) {
+			mutex_unlock(&cdm_hw->hw_mutex);
+			rc = -ENOMEM;
+			break;
+		}
+
+		mutex_unlock(&cdm_hw->hw_mutex);
+		client = core->clients[idx];
+		mutex_init(&client->lock);
+		data->ops = core->ops;
+		if (core->id == CAM_CDM_VIRTUAL) {
+			data->cdm_version.major = 1;
+			data->cdm_version.minor = 0;
+			data->cdm_version.incr = 0;
+			data->cdm_version.reserved = 0;
+			data->ops = cam_cdm_get_ops(0,
+					&data->cdm_version, true);
+			if (!data->ops) {
+				mutex_destroy(&client->lock);
+				mutex_lock(&cdm_hw->hw_mutex);
+				kfree(core->clients[idx]);
+				core->clients[idx] = NULL;
+				mutex_unlock(
+					&cdm_hw->hw_mutex);
+				rc = -1;
+				break;
+			}
+		} else {
+			data->cdm_version = core->version;
+		}
+
+		cam_cdm_get_client_refcount(client);
+		mutex_lock(&client->lock);
+		memcpy(&client->data, data,
+			sizeof(struct cam_cdm_acquire_data));
+		client->handle = CAM_CDM_CREATE_CLIENT_HANDLE(
+					core->index,
+					idx);
+		client->stream_on = false;
+		data->handle = client->handle;
+		CDM_CDBG("Acquired client=%s in hwidx=%d\n",
+			data->identifier, core->index);
+		mutex_unlock(&client->lock);
+		rc = 0;
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_RELEASE: {
+		uint32_t *handle = cmd_args;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(uint32_t) != arg_size) {
+			pr_err("Invalid CDM cmd %d size=%x for handle=%x\n",
+				cmd, arg_size, *handle);
+			return -EINVAL;
+		}
+		idx = CAM_CDM_GET_CLIENT_IDX(*handle);
+		mutex_lock(&cdm_hw->hw_mutex);
+		client = core->clients[idx];
+		if ((!client) || (*handle != client->handle)) {
+			pr_err("Invalid client %pK hdl=%x\n", client, *handle);
+			mutex_unlock(&cdm_hw->hw_mutex);
+			break;
+		}
+		cam_cdm_put_client_refcount(client);
+		mutex_lock(&client->lock);
+		if (client->refcount != 0) {
+			pr_err("CDM Client refcount not zero %d",
+				client->refcount);
+			rc = -1;
+			mutex_unlock(&client->lock);
+			mutex_unlock(&cdm_hw->hw_mutex);
+			break;
+		}
+		core->clients[idx] = NULL;
+		mutex_unlock(&client->lock);
+		mutex_destroy(&client->lock);
+		kfree(client);
+		mutex_unlock(&cdm_hw->hw_mutex);
+		rc = 0;
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_RESET_HW: {
+		pr_err("CDM HW reset not supported for handle =%x\n",
+			*((uint32_t *)cmd_args));
+		break;
+	}
+	default:
+		pr_err("CDM HW intf command not valid =%d\n", cmd);
+		break;
+	}
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
new file mode 100644
index 0000000..eb75aaa
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_CORE_COMMON_H_
+#define _CAM_CDM_CORE_COMMON_H_
+
+#include "cam_mem_mgr.h"
+
+#define CAM_CDM170_VERSION 0x10000000
+
+extern struct cam_cdm_utils_ops CDM170_ops;
+
+int cam_hw_cdm_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_hw_cdm_deinit(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_hw_cdm_alloc_genirq_mem(void *hw_priv);
+int cam_hw_cdm_release_genirq_mem(void *hw_priv);
+int cam_cdm_get_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size);
+int cam_cdm_stream_ops_internal(void *hw_priv, void *start_args,
+	bool operation);
+int cam_cdm_stream_start(void *hw_priv, void *start_args, uint32_t size);
+int cam_cdm_stream_stop(void *hw_priv, void *start_args, uint32_t size);
+int cam_cdm_process_cmd(void *hw_priv, uint32_t cmd, void *cmd_args,
+	uint32_t arg_size);
+bool cam_cdm_set_cam_hw_version(
+	uint32_t ver, struct cam_hw_version *cam_version);
+void cam_cdm_cpas_cb(int32_t client_handle, void *userdata,
+	enum cam_camnoc_irq_type evt_type, uint32_t evt_data);
+struct cam_cdm_utils_ops *cam_cdm_get_ops(
+	uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
+int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client);
+int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client);
+struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
+	uint32_t tag, struct list_head *bl_list);
+void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_cb_status status, void *data);
+
+#endif /* _CAM_CDM_CORE_COMMON_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
new file mode 100644
index 0000000..7f2c455
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
@@ -0,0 +1,1025 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-HW %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include <media/cam_req_mgr.h>
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_core_common.h"
+#include "cam_cdm_soc.h"
+#include "cam_io_util.h"
+#include "cam_hw_cdm170_reg.h"
+
+
+#define CAM_HW_CDM_CPAS_0_NAME "qcom,cam170-cpas-cdm0"
+#define CAM_HW_CDM_IPE_0_NAME "qcom,cam170-ipe0-cdm"
+#define CAM_HW_CDM_IPE_1_NAME "qcom,cam170-ipe1-cdm"
+#define CAM_HW_CDM_BPS_NAME "qcom,cam170-bps-cdm"
+
+#define CAM_CDM_BL_FIFO_WAIT_TIMEOUT 2000
+
+static void cam_hw_cdm_work(struct work_struct *work);
+
+/* DT match table entry for all CDM variants*/
+static const struct of_device_id msm_cam_hw_cdm_dt_match[] = {
+	{
+		.compatible = CAM_HW_CDM_CPAS_0_NAME,
+		.data = &cam170_cpas_cdm_offset_table,
+	},
+	{}
+};
+
+static enum cam_cdm_id cam_hw_cdm_get_id_by_name(char *name)
+{
+	if (!strcmp(CAM_HW_CDM_CPAS_0_NAME, name))
+		return CAM_CDM_CPAS_0;
+
+	return CAM_CDM_MAX;
+}
+
+int cam_hw_cdm_bl_fifo_pending_bl_rb(struct cam_hw_info *cdm_hw,
+	uint32_t *pending_bl)
+{
+	int rc = 0;
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
+		pending_bl)) {
+		pr_err("Failed to read CDM pending BL's\n");
+		rc = -1;
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_enable_core_dbg(struct cam_hw_info *cdm_hw)
+{
+	int rc = 0;
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0x10100)) {
+		pr_err("Failed to Write CDM HW core debug\n");
+		rc = -1;
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_disable_core_dbg(struct cam_hw_info *cdm_hw)
+{
+	int rc = 0;
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0)) {
+		pr_err("Failed to Write CDM HW core debug\n");
+		rc = -1;
+	}
+
+	return rc;
+}
+
+void cam_hw_cdm_dump_scratch_registors(struct cam_hw_info *cdm_hw)
+{
+	uint32_t dump_reg = 0;
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
+	pr_err("dump core en=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_0_REG, &dump_reg);
+	pr_err("dump scratch0=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_1_REG, &dump_reg);
+	pr_err("dump scratch1=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_2_REG, &dump_reg);
+	pr_err("dump scratch2=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_3_REG, &dump_reg);
+	pr_err("dump scratch3=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_4_REG, &dump_reg);
+	pr_err("dump scratch4=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_5_REG, &dump_reg);
+	pr_err("dump scratch5=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_6_REG, &dump_reg);
+	pr_err("dump scratch6=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_7_REG, &dump_reg);
+	pr_err("dump scratch7=%x\n", dump_reg);
+
+}
+
+void cam_hw_cdm_dump_core_debug_registers(
+	struct cam_hw_info *cdm_hw)
+{
+	uint32_t dump_reg, core_dbg, loop_cnt;
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
+	pr_err("CDM HW core status=%x\n", dump_reg);
+	/* First pause CDM */
+	cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x03);
+	cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
+	pr_err("CDM HW current pending BL=%x\n", dump_reg);
+	loop_cnt = dump_reg;
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_DEBUG_STATUS, &dump_reg);
+	pr_err("CDM HW Debug status reg=%x\n", dump_reg);
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, &core_dbg);
+	if (core_dbg & 0x100) {
+		cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_ADDR, &dump_reg);
+		pr_err("AHB dump reglastaddr=%x\n", dump_reg);
+		cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_DATA, &dump_reg);
+		pr_err("AHB dump reglastdata=%x\n", dump_reg);
+	} else {
+		pr_err("CDM HW AHB dump not enable\n");
+	}
+
+	if (core_dbg & 0x10000) {
+		int i;
+
+		pr_err("CDM HW BL FIFO dump with loop count=%d\n", loop_cnt);
+		for (i = 0 ; i < loop_cnt ; i++) {
+			cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_RB, i);
+			cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_RB,
+				&dump_reg);
+			pr_err("BL(%d) base addr =%x\n", i, dump_reg);
+			cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_RB,
+				&dump_reg);
+			pr_err("BL(%d) len=%d tag=%d\n", i,
+				(dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
+		}
+	} else {
+		pr_err("CDM HW BL FIFO readback not enable\n");
+	}
+
+	pr_err("CDM HW default dump\n");
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_CFG, &dump_reg);
+	pr_err("CDM HW core cfg=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS, &dump_reg);
+	pr_err("CDM HW irq status=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_SET, &dump_reg);
+	pr_err("CDM HW irq set reg=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_BASE, &dump_reg);
+	pr_err("CDM HW current BL base=%x\n", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_LEN, &dump_reg);
+	pr_err("CDM HW current BL len=%d tag=%d\n", (dump_reg & 0xFFFFF),
+		(dump_reg & 0xFF000000));
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_USED_AHB_BASE, &dump_reg);
+	pr_err("CDM HW current AHB base=%x\n", dump_reg);
+
+	cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
+	pr_err("CDM HW current pending BL=%x\n", dump_reg);
+
+	/* Enable CDM back */
+	cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 1);
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+}
+
+int cam_hw_cdm_wait_for_bl_fifo(struct cam_hw_info *cdm_hw,
+	uint32_t bl_count)
+{
+	uint32_t pending_bl = 0;
+	int32_t available_bl_slots = 0;
+	int rc = -1;
+	long time_left;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+	do {
+		if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
+			&pending_bl)) {
+			pr_err("Failed to read CDM pending BL's\n");
+			rc = -1;
+			break;
+		}
+		available_bl_slots = CAM_CDM_HWFIFO_SIZE - pending_bl;
+		if (available_bl_slots < 0) {
+			pr_err("Invalid available slots %d:%d:%d\n",
+				available_bl_slots, CAM_CDM_HWFIFO_SIZE,
+				pending_bl);
+			break;
+		}
+		if (bl_count < (available_bl_slots - 1)) {
+			CDM_CDBG("BL slot available_cnt=%d requested=%d\n",
+				(available_bl_slots - 1), bl_count);
+				rc = bl_count;
+				break;
+		} else if (0 == (available_bl_slots - 1)) {
+			time_left = wait_for_completion_timeout(
+				&core->bl_complete, msecs_to_jiffies(
+				CAM_CDM_BL_FIFO_WAIT_TIMEOUT));
+			if (time_left <= 0) {
+				pr_err("CDM HW BL Wait timed out failed\n");
+				rc = -1;
+				break;
+			}
+			rc = 0;
+			CDM_CDBG("CDM HW is ready for data\n");
+		} else {
+			rc = (bl_count - (available_bl_slots - 1));
+			break;
+		}
+	} while (1);
+
+	return rc;
+}
+
+bool cam_hw_cdm_bl_write(struct cam_hw_info *cdm_hw, uint32_t src,
+	uint32_t len, uint32_t tag)
+{
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_REG, src)) {
+		pr_err("Failed to write CDM base to BL base\n");
+		return true;
+	}
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_REG,
+		((len & 0xFFFFF) | ((tag & 0xFF) << 20)))) {
+		pr_err("Failed to write CDM BL len\n");
+		return true;
+	}
+	return false;
+}
+
+bool cam_hw_cdm_commit_bl_write(struct cam_hw_info *cdm_hw)
+{
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_STORE_REG, 1)) {
+		pr_err("Failed to write CDM commit BL\n");
+		return true;
+	}
+	return false;
+}
+
+int cam_hw_cdm_submit_gen_irq(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req)
+{
+	struct cam_cdm_bl_cb_request_entry *node;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+	uint32_t len;
+	int rc;
+
+	if (core->bl_tag > 63) {
+		pr_err("bl_tag invalid =%d\n", core->bl_tag);
+		rc = -EINVAL;
+		goto end;
+	}
+	CDM_CDBG("CDM write BL last cmd tag=%d total=%d\n",
+		core->bl_tag, req->data->cmd_arrary_count);
+	node = kzalloc(sizeof(struct cam_cdm_bl_cb_request_entry),
+			GFP_KERNEL);
+	if (!node) {
+		rc = -ENOMEM;
+		goto end;
+	}
+	node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
+	node->client_hdl = req->handle;
+	node->cookie = req->data->cookie;
+	node->bl_tag = core->bl_tag;
+	node->userdata = req->data->userdata;
+	list_add_tail(&node->entry, &core->bl_request_list);
+	len = core->ops->cdm_required_size_genirq() * core->bl_tag;
+	core->ops->cdm_write_genirq(((uint32_t *)core->gen_irq.kmdvaddr + len),
+		core->bl_tag);
+	rc = cam_hw_cdm_bl_write(cdm_hw, (core->gen_irq.vaddr + (4*len)),
+		((4 * core->ops->cdm_required_size_genirq()) - 1),
+		core->bl_tag);
+	if (rc) {
+		pr_err("CDM hw bl write failed for gen irq bltag=%d\n",
+			core->bl_tag);
+		list_del_init(&node->entry);
+		kfree(node);
+		rc = -1;
+		goto end;
+	}
+
+	if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
+		pr_err("Cannot commit the genirq BL with tag tag=%d\n",
+			core->bl_tag);
+		list_del_init(&node->entry);
+		kfree(node);
+		rc = -1;
+	}
+
+end:
+	return rc;
+}
+
+int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client)
+{
+	int i, rc = -1;
+	struct cam_cdm_bl_request *cdm_cmd = req->data;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+	uint32_t pending_bl = 0;
+	int write_count = 0;
+
+	if (req->data->cmd_arrary_count > CAM_CDM_HWFIFO_SIZE) {
+		pr_info("requested BL more than max size, cnt=%d max=%d\n",
+			req->data->cmd_arrary_count, CAM_CDM_HWFIFO_SIZE);
+	}
+
+	if (atomic_read(&core->error) != 0) {
+		pr_err("HW in error state, cannot trigger transactions now\n");
+		return rc;
+	}
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	mutex_lock(&client->lock);
+	rc = cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &pending_bl);
+	if (rc) {
+		pr_err("Cannot read the current BL depth\n");
+		mutex_unlock(&client->lock);
+		mutex_unlock(&cdm_hw->hw_mutex);
+		return rc;
+	}
+
+	for (i = 0; i < req->data->cmd_arrary_count ; i++) {
+		uint64_t hw_vaddr_ptr = 0;
+		size_t len = 0;
+
+		if ((!cdm_cmd->cmd[i].len) &&
+			(cdm_cmd->cmd[i].len > 0x100000)) {
+			pr_err("cmd len(%d) is invalid cnt=%d total cnt=%d\n",
+				cdm_cmd->cmd[i].len, i,
+				req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+		if (atomic_read(&core->error) != 0) {
+			pr_err("HW in error state cmd_count=%d total cnt=%d\n",
+				i, req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+		if (write_count == 0) {
+			write_count = cam_hw_cdm_wait_for_bl_fifo(cdm_hw,
+				(req->data->cmd_arrary_count - i));
+			if (write_count < 0) {
+				pr_err("wait for bl fifo failed %d:%d\n",
+					i, req->data->cmd_arrary_count);
+				rc = -1;
+				break;
+			}
+		} else {
+			write_count--;
+		}
+
+		if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
+			rc = cam_mem_get_io_buf(
+				cdm_cmd->cmd[i].bl_addr.mem_handle,
+				core->iommu_hdl.non_secure, &hw_vaddr_ptr,
+				&len);
+		} else if (req->data->type == CAM_CDM_BL_CMD_TYPE_HW_IOVA) {
+			if (!cdm_cmd->cmd[i].bl_addr.hw_iova) {
+				pr_err("Hw bl hw_iova is invalid %d:%d\n",
+					i, req->data->cmd_arrary_count);
+				rc = -1;
+				break;
+			}
+			rc = 0;
+			hw_vaddr_ptr =
+				(uint64_t)cdm_cmd->cmd[i].bl_addr.hw_iova;
+			len = cdm_cmd->cmd[i].len + cdm_cmd->cmd[i].offset;
+		} else {
+			pr_err("Only mem hdl/hw va type is supported %d\n",
+				req->data->type);
+			rc = -1;
+			break;
+		}
+
+		if ((!rc) && (hw_vaddr_ptr) && (len) &&
+			(len >= cdm_cmd->cmd[i].offset)) {
+			CDM_CDBG("Got the HW VA\n");
+			rc = cam_hw_cdm_bl_write(cdm_hw,
+				((uint32_t)hw_vaddr_ptr +
+					cdm_cmd->cmd[i].offset),
+				(cdm_cmd->cmd[i].len - 1), core->bl_tag);
+			if (rc) {
+				pr_err("Hw bl write failed %d:%d\n",
+					i, req->data->cmd_arrary_count);
+				rc = -1;
+				break;
+			}
+		} else {
+			pr_err("Sanity check failed for hdl=%x len=%zu:%d\n",
+				cdm_cmd->cmd[i].bl_addr.mem_handle, len,
+				cdm_cmd->cmd[i].offset);
+			pr_err("Sanity check failed for %d:%d\n",
+				i, req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+
+		if (!rc) {
+			CDM_CDBG("write BL success for cnt=%d with tag=%d\n",
+				i, core->bl_tag);
+			core->bl_tag++;
+			CDM_CDBG("Now commit the BL\n");
+			if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
+				pr_err("Cannot commit the BL %d tag=%d\n",
+					i, (core->bl_tag - 1));
+				rc = -1;
+				break;
+			}
+			CDM_CDBG("BL commit success BL %d tag=%d\n", i,
+				(core->bl_tag - 1));
+			if ((req->data->flag == true) &&
+				(i == (req->data->cmd_arrary_count -
+				1))) {
+				rc = cam_hw_cdm_submit_gen_irq(
+					cdm_hw, req);
+				if (rc == 0)
+					core->bl_tag++;
+			}
+			if (!rc && ((CAM_CDM_HWFIFO_SIZE - 1) ==
+				core->bl_tag))
+				core->bl_tag = 0;
+		}
+	}
+	mutex_unlock(&client->lock);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	return rc;
+
+}
+
+static void cam_hw_cdm_work(struct work_struct *work)
+{
+	struct cam_cdm_work_payload *payload;
+	struct cam_hw_info *cdm_hw;
+	struct cam_cdm *core;
+
+	payload = container_of(work, struct cam_cdm_work_payload, work);
+	if (payload) {
+		cdm_hw = payload->hw;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+
+		CDM_CDBG("IRQ status=%x\n", payload->irq_status);
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
+			struct cam_cdm_bl_cb_request_entry *node;
+
+			CDM_CDBG("inline IRQ data=%x\n",
+				payload->irq_data);
+			mutex_lock(&cdm_hw->hw_mutex);
+			node = cam_cdm_find_request_by_bl_tag(
+					payload->irq_data,
+					&core->bl_request_list);
+			if (node) {
+				if (node->request_type ==
+					CAM_HW_CDM_BL_CB_CLIENT) {
+					cam_cdm_notify_clients(cdm_hw,
+						CAM_CDM_CB_STATUS_BL_SUCCESS,
+						(void *)node);
+				} else if (node->request_type ==
+						CAM_HW_CDM_BL_CB_INTERNAL) {
+					pr_err("Invalid node=%pK %d\n", node,
+						node->request_type);
+				}
+				list_del_init(&node->entry);
+				kfree(node);
+			} else {
+				pr_err("Invalid node for inline irq\n");
+			}
+			mutex_unlock(&cdm_hw->hw_mutex);
+		}
+
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK) {
+			CDM_CDBG("CDM HW reset done IRQ\n");
+			complete(&core->reset_complete);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK) {
+			pr_err("CDM HW BL done IRQ\n");
+			complete(&core->bl_complete);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) {
+			pr_err("Invalid command IRQ, Need HW reset\n");
+			atomic_inc(&core->error);
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+			atomic_dec(&core->error);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) {
+			pr_err("AHB IRQ\n");
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK) {
+			pr_err("Overflow IRQ\n");
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+		}
+		kfree(payload);
+	} else {
+		pr_err("NULL payload\n");
+	}
+
+}
+
+static void cam_hw_cdm_iommu_fault_handler(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova, int flags, void *token)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_cdm *core = NULL;
+
+	if (token) {
+		cdm_hw = (struct cam_hw_info *)token;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+		atomic_inc(&core->error);
+		cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+		pr_err("Page fault iova addr %pK\n", (void *)iova);
+		cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT,
+			(void *)iova);
+		atomic_dec(&core->error);
+	} else {
+		pr_err("Invalid token\n");
+	}
+
+}
+
+irqreturn_t cam_hw_cdm_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *cdm_hw = data;
+	struct cam_cdm *cdm_core = cdm_hw->core_info;
+	struct cam_cdm_work_payload *payload;
+	bool work_status;
+
+	CDM_CDBG("Got irq\n");
+	payload = kzalloc(sizeof(struct cam_cdm_work_payload), GFP_ATOMIC);
+	if (payload) {
+		if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS,
+				&payload->irq_status)) {
+			pr_err("Failed to read CDM HW IRQ status\n");
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
+			if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_USR_DATA,
+				&payload->irq_data)) {
+				pr_err("Failed to read CDM HW IRQ data\n");
+			}
+		}
+		CDM_CDBG("Got payload=%d\n", payload->irq_status);
+		payload->hw = cdm_hw;
+		INIT_WORK((struct work_struct *)&payload->work,
+			cam_hw_cdm_work);
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR,
+			payload->irq_status))
+			pr_err("Failed to Write CDM HW IRQ Clear\n");
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR_CMD, 0x01))
+			pr_err("Failed to Write CDM HW IRQ cmd\n");
+		work_status = queue_work(cdm_core->work_queue, &payload->work);
+		if (work_status == false) {
+			pr_err("Failed to queue work for irq=%x\n",
+				payload->irq_status);
+			kfree(payload);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+int cam_hw_cdm_alloc_genirq_mem(void *hw_priv)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_mem_mgr_request_desc genirq_alloc_cmd;
+	struct cam_mem_mgr_memory_desc genirq_alloc_out;
+	struct cam_cdm *cdm_core = NULL;
+	int rc =  -EINVAL;
+
+	if (!hw_priv)
+		return rc;
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	genirq_alloc_cmd.align = 0;
+	genirq_alloc_cmd.size = (8 * CAM_CDM_HWFIFO_SIZE);
+	genirq_alloc_cmd.smmu_hdl = cdm_core->iommu_hdl.non_secure;
+	genirq_alloc_cmd.flags = 0;
+	genirq_alloc_cmd.region = CAM_MEM_MGR_REGION_NON_SECURE_IO;
+	rc = cam_mem_mgr_request_mem(&genirq_alloc_cmd,
+		&genirq_alloc_out);
+	if (rc) {
+		pr_err("Failed to get genirq cmd space rc=%d\n", rc);
+		goto end;
+	}
+	cdm_core->gen_irq.handle = genirq_alloc_out.mem_handle;
+	cdm_core->gen_irq.vaddr = (genirq_alloc_out.iova & 0xFFFFFFFF);
+	cdm_core->gen_irq.kmdvaddr = genirq_alloc_out.kva;
+	cdm_core->gen_irq.size = genirq_alloc_out.len;
+
+end:
+	return rc;
+}
+
+int cam_hw_cdm_release_genirq_mem(void *hw_priv)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_mem_mgr_memory_desc genirq_release_cmd;
+	int rc =  -EINVAL;
+
+	if (!hw_priv)
+		return rc;
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	genirq_release_cmd.mem_handle = cdm_core->gen_irq.handle;
+	rc = cam_mem_mgr_release_mem(&genirq_release_cmd);
+	if (rc)
+		pr_err("Failed to put genirq cmd space for hw\n");
+
+	return rc;
+}
+
+int cam_hw_cdm_init(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc;
+	long time_left;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	soc_info = &cdm_hw->soc_info;
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	if (rc) {
+		pr_err("Enable platform failed\n");
+		goto end;
+	}
+
+	CDM_CDBG("Enable soc done\n");
+
+/* Before triggering the reset to HW, clear the reset complete */
+	reinit_completion(&cdm_core->reset_complete);
+	reinit_completion(&cdm_core->bl_complete);
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003)) {
+		pr_err("Failed to Write CDM HW IRQ mask\n");
+		goto disable_return;
+	}
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_RST_CMD, 0x9)) {
+		pr_err("Failed to Write CDM HW reset\n");
+		goto disable_return;
+	}
+
+	CDM_CDBG("Waiting for CDM HW resetdone\n");
+	time_left = wait_for_completion_timeout(&cdm_core->reset_complete,
+			msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT));
+	if (time_left <= 0) {
+		pr_err("CDM HW reset Wait failed rc=%d\n", rc);
+		goto disable_return;
+	} else {
+		CDM_CDBG("CDM Init success\n");
+		cdm_hw->hw_state = CAM_HW_STATE_POWER_UP;
+		cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003);
+		cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CGC_CFG, 0x7);
+		rc = 0;
+		goto end;
+	}
+
+disable_return:
+	rc = -1;
+	cam_soc_util_disable_platform_resource(soc_info, true, true);
+end:
+	return rc;
+}
+
+int cam_hw_cdm_deinit(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	soc_info = &cdm_hw->soc_info;
+	cdm_core = cdm_hw->core_info;
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		pr_err("disable platform failed\n");
+	} else {
+		CDM_CDBG("CDM Deinit success\n");
+		cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_cdm_private_dt_data *soc_private = NULL;
+	struct cam_cpas_register_params cpas_parms;
+
+	cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cdm_hw_intf)
+		return -ENOMEM;
+
+	cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cdm_hw) {
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
+	if (!cdm_hw->core_info) {
+		kfree(cdm_hw);
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cdm_hw->soc_info.pdev = pdev;
+	cdm_hw_intf->hw_type = CAM_HW_CDM;
+	cdm_hw->open_count = 0;
+	mutex_init(&cdm_hw->hw_mutex);
+	spin_lock_init(&cdm_hw->hw_lock);
+	init_completion(&cdm_hw->hw_complete);
+
+	rc = cam_hw_cdm_soc_get_dt_properties(cdm_hw, msm_cam_hw_cdm_dt_match);
+	if (rc) {
+		pr_err("Failed to get dt properties\n");
+		goto release_mem;
+	}
+	cdm_hw_intf->hw_idx = cdm_hw->soc_info.pdev->id;
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	soc_private = (struct cam_cdm_private_dt_data *)
+		cdm_hw->soc_info.soc_private;
+	if (soc_private->dt_cdm_shared == true)
+		cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
+	else
+		cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
+
+	cdm_core->bl_tag = 0;
+	atomic_set(&cdm_core->error, 0);
+	cdm_core->id = cam_hw_cdm_get_id_by_name(cdm_core->name);
+	if (cdm_core->id >= CAM_CDM_MAX) {
+		pr_err("Failed to get CDM HW name for %s\n", cdm_core->name);
+		goto release_private_mem;
+	}
+	INIT_LIST_HEAD(&cdm_core->bl_request_list);
+	init_completion(&cdm_core->reset_complete);
+	init_completion(&cdm_core->bl_complete);
+	cdm_hw_intf->hw_priv = cdm_hw;
+	cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
+	cdm_hw_intf->hw_ops.init = cam_hw_cdm_init;
+	cdm_hw_intf->hw_ops.deinit = cam_hw_cdm_deinit;
+	cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
+	cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
+	cdm_hw_intf->hw_ops.read = NULL;
+	cdm_hw_intf->hw_ops.write = NULL;
+	cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
+	mutex_lock(&cdm_hw->hw_mutex);
+
+	CDM_CDBG("type %d index %d\n", cdm_hw_intf->hw_type,
+		cdm_hw_intf->hw_idx);
+
+	platform_set_drvdata(pdev, cdm_hw_intf);
+
+	rc = cam_smmu_get_handle("cpas-cdm0", &cdm_core->iommu_hdl.non_secure);
+	if (rc < 0) {
+		pr_err("cpas-cdm get iommu handle failed\n");
+		goto unlock_release_mem;
+	}
+	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+		cam_hw_cdm_iommu_fault_handler, cdm_hw);
+
+	rc = cam_smmu_ops(cdm_core->iommu_hdl.non_secure, CAM_SMMU_ATTACH);
+	if (rc < 0) {
+		pr_err("Attach iommu non secure handle failed\n");
+		goto destroy_non_secure_hdl;
+	}
+	cdm_core->iommu_hdl.secure = -1;
+
+	cdm_core->work_queue = alloc_workqueue(cdm_core->name,
+		WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
+		CAM_CDM_INFLIGHT_WORKS);
+
+	rc = cam_soc_util_request_platform_resource(&cdm_hw->soc_info,
+			cam_hw_cdm_irq, cdm_hw);
+	if (rc) {
+		pr_err("Failed to request platform resource\n");
+		goto destroy_non_secure_hdl;
+	}
+
+	cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
+	cpas_parms.cell_index = cdm_hw->soc_info.index;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = cdm_hw_intf;
+	strlcpy(cpas_parms.identifier, "cpas-cdm", CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		pr_err("Virtual CDM CPAS registration failed\n");
+		goto release_platform_resource;
+	}
+	CDM_CDBG("CPAS registration successful handle=%d\n",
+		cpas_parms.client_handle);
+	cdm_core->cpas_handle = cpas_parms.client_handle;
+
+	rc = cam_hw_cdm_init(cdm_hw, NULL, 0);
+	if (rc) {
+		pr_err("Failed to Init CDM HW\n");
+		goto init_failed;
+	}
+	cdm_hw->open_count++;
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
+		&cdm_core->hw_version)) {
+		pr_err("Failed to read CDM HW Version\n");
+		goto deinit;
+	}
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
+		&cdm_core->hw_family_version)) {
+		pr_err("Failed to read CDM family Version\n");
+		goto deinit;
+	}
+
+	CDM_CDBG("CDM Hw version read success family =%x hw =%x\n",
+		cdm_core->hw_family_version, cdm_core->hw_version);
+	cdm_core->ops = cam_cdm_get_ops(cdm_core->hw_version, NULL,
+		false);
+	if (!cdm_core->ops) {
+		pr_err("Failed to util ops for hw\n");
+		goto deinit;
+	}
+
+	if (!cam_cdm_set_cam_hw_version(cdm_core->hw_version,
+		&cdm_core->version)) {
+		pr_err("Failed to set cam he version for hw\n");
+		goto deinit;
+	}
+
+	rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
+	if (rc) {
+		pr_err("Failed to Deinit CDM HW\n");
+		goto release_platform_resource;
+	}
+
+	rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
+		soc_private, CAM_HW_CDM, &cdm_core->index);
+	if (rc) {
+		pr_err("HW CDM Interface registration failed\n");
+		goto release_platform_resource;
+	}
+	cdm_hw->open_count--;
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+	CDM_CDBG("CDM%d probe successful\n", cdm_hw_intf->hw_idx);
+
+	return rc;
+
+deinit:
+	if (cam_hw_cdm_deinit(cdm_hw, NULL, 0))
+		pr_err("Deinit failed for hw\n");
+	cdm_hw->open_count--;
+init_failed:
+	if (cam_cpas_unregister_client(cdm_core->cpas_handle))
+		pr_err("CPAS unregister failed\n");
+release_platform_resource:
+	if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
+		pr_err("Release platform resource failed\n");
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+destroy_non_secure_hdl:
+	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+		NULL, cdm_hw);
+	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
+		pr_err("Release iommu secure hdl failed\n");
+unlock_release_mem:
+	mutex_unlock(&cdm_hw->hw_mutex);
+release_private_mem:
+	kfree(cdm_hw->soc_info.soc_private);
+release_mem:
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw_intf);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	return rc;
+}
+
+int cam_hw_cdm_remove(struct platform_device *pdev)
+{
+	int rc = -EBUSY;
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+
+	cdm_hw_intf = platform_get_drvdata(pdev);
+	if (!cdm_hw_intf) {
+		pr_err("Failed to get dev private data\n");
+		return rc;
+	}
+
+	cdm_hw = cdm_hw_intf->hw_priv;
+	if (!cdm_hw) {
+		pr_err("Failed to get hw private data for type=%d idx=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	cdm_core = cdm_hw->core_info;
+	if (!cdm_core) {
+		pr_err("Failed to get hw core data for type=%d idx=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	if (cdm_hw->open_count != 0) {
+		pr_err("Hw open count invalid type=%d idx=%d cnt=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx,
+			cdm_hw->open_count);
+		return rc;
+	}
+
+	rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
+	if (rc) {
+		pr_err("Deinit failed for hw\n");
+		return rc;
+	}
+
+	rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
+	if (rc) {
+		pr_err("CPAS unregister failed\n");
+		return rc;
+	}
+
+	if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
+		pr_err("Release platform resource failed\n");
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+
+	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
+		pr_err("Release iommu secure hdl failed\n");
+	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+		NULL, cdm_hw);
+
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw->soc_info.soc_private);
+	kfree(cdm_hw_intf);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+
+	return 0;
+}
+
+static struct platform_driver cam_hw_cdm_driver = {
+	.probe = cam_hw_cdm_probe,
+	.remove = cam_hw_cdm_remove,
+	.driver = {
+		.name = "msm_cam_cdm",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_cam_hw_cdm_dt_match,
+	},
+};
+
+static int __init cam_hw_cdm_init_module(void)
+{
+	return platform_driver_register(&cam_hw_cdm_driver);
+}
+
+static void __exit cam_hw_cdm_exit_module(void)
+{
+	platform_driver_unregister(&cam_hw_cdm_driver);
+}
+
+module_init(cam_hw_cdm_init_module);
+module_exit(cam_hw_cdm_exit_module);
+MODULE_DESCRIPTION("MSM Camera HW CDM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
new file mode 100644
index 0000000..b1b2117
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
@@ -0,0 +1,569 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-INTF %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_virtual.h"
+#include "cam_soc_util.h"
+#include "cam_cdm_soc.h"
+
+static struct cam_cdm_intf_mgr cdm_mgr;
+static DEFINE_MUTEX(cam_cdm_mgr_lock);
+
+static const struct of_device_id msm_cam_cdm_intf_dt_match[] = {
+	{ .compatible = "qcom,cam-cdm-intf", },
+	{}
+};
+
+static int get_cdm_mgr_refcount(void)
+{
+	int rc = 0;
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.probe_done == false) {
+		pr_err("CDM intf mgr not probed yet\n");
+		rc = -1;
+	} else {
+		CDM_CDBG("CDM intf mgr get refcount=%d\n",
+			cdm_mgr.refcount);
+		cdm_mgr.refcount++;
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	return rc;
+}
+
+static void put_cdm_mgr_refcount(void)
+{
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.probe_done == false) {
+		pr_err("CDM intf mgr not probed yet\n");
+	} else {
+		CDM_CDBG("CDM intf mgr put refcount=%d\n",
+			cdm_mgr.refcount);
+		if (cdm_mgr.refcount > 0) {
+			cdm_mgr.refcount--;
+		} else {
+			pr_err("Refcount put when zero\n");
+			WARN_ON(1);
+		}
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+}
+
+static int get_cdm_iommu_handle(struct cam_iommu_handle *cdm_handles,
+	uint32_t hw_idx)
+{
+	int rc = -1;
+	struct cam_hw_intf *hw = cdm_mgr.nodes[hw_idx].device;
+
+	if (hw->hw_ops.get_hw_caps) {
+		rc = hw->hw_ops.get_hw_caps(hw->hw_priv, cdm_handles,
+			sizeof(struct cam_iommu_handle));
+	}
+
+	return rc;
+}
+
+static int get_cdm_index_by_id(char *identifier,
+	uint32_t cell_index, uint32_t *hw_index)
+{
+	int rc = -1, i, j;
+	char client_name[128];
+
+	CDM_CDBG("Looking for HW id of =%s and index=%d\n",
+		identifier, cell_index);
+	snprintf(client_name, sizeof(client_name), "%s", identifier);
+	CDM_CDBG("Looking for HW id of %s count:%d\n", client_name,
+		cdm_mgr.cdm_count);
+	mutex_lock(&cam_cdm_mgr_lock);
+	for (i = 0; i < cdm_mgr.cdm_count; i++) {
+		mutex_lock(&cdm_mgr.nodes[i].lock);
+		CDM_CDBG("dt_num_supported_clients=%d\n",
+			cdm_mgr.nodes[i].data->dt_num_supported_clients);
+
+		for (j = 0; j <
+			cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) {
+			CDM_CDBG("client name:%s\n",
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j]);
+			if (!strcmp(
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
+				client_name)) {
+				rc = 0;
+				*hw_index = i;
+				break;
+			}
+		}
+		mutex_unlock(&cdm_mgr.nodes[i].lock);
+		if (rc == 0)
+			break;
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+
+	return rc;
+}
+
+int cam_cdm_get_iommu_handle(char *identifier,
+	struct cam_iommu_handle *cdm_handles)
+{
+	int i, j, rc = -1;
+
+	if ((!identifier) || (!cdm_handles))
+		return -EINVAL;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+	CDM_CDBG("Looking for Iommu handle of %s\n", identifier);
+
+	for (i = 0; i < cdm_mgr.cdm_count; i++) {
+		mutex_lock(&cdm_mgr.nodes[i].lock);
+		if (!cdm_mgr.nodes[i].data) {
+			mutex_unlock(&cdm_mgr.nodes[i].lock);
+			continue;
+		}
+		for (j = 0; j <
+			 cdm_mgr.nodes[i].data->dt_num_supported_clients;
+			j++) {
+			if (!strcmp(
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
+				identifier)) {
+				rc = get_cdm_iommu_handle(cdm_handles, i);
+				break;
+			}
+		}
+		mutex_unlock(&cdm_mgr.nodes[i].lock);
+		if (rc == 0)
+			break;
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_get_iommu_handle);
+
+int cam_cdm_acquire(struct cam_cdm_acquire_data *data)
+{
+	int rc = -1;
+	struct cam_hw_intf *hw;
+	uint32_t hw_index = 0;
+
+	if ((!data) || (!data->identifier) || (!data->base_array) ||
+		(!data->base_array_cnt))
+		return -EINVAL;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	if (data->id > CAM_CDM_HW_ANY) {
+		pr_err("only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported\n");
+		rc = -1;
+		goto end;
+	}
+	rc = get_cdm_index_by_id(data->identifier, data->cell_index,
+		&hw_index);
+	if ((rc < 0) && (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM)) {
+		pr_err("Failed to identify associated hw id\n");
+		goto end;
+	} else {
+		CDM_CDBG("hw_index:%d\n", hw_index);
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_ACQUIRE, data,
+					sizeof(struct cam_cdm_acquire_data));
+			if (rc < 0) {
+				pr_err("CDM hw acquire failed\n");
+				goto end;
+			}
+		} else {
+			pr_err("idx %d doesn't have acquire ops\n", hw_index);
+			rc = -1;
+		}
+	}
+end:
+	if (rc < 0) {
+		pr_err("CDM acquire failed for id=%d name=%s, idx=%d\n",
+			data->id, data->identifier, data->cell_index);
+		put_cdm_mgr_refcount();
+	}
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_acquire);
+
+int cam_cdm_release(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_RELEASE, &handle,
+					sizeof(handle));
+			if (rc < 0)
+				pr_err("hw release failed for handle=%x\n",
+					handle);
+		} else
+			pr_err("hw idx %d doesn't have release ops\n",
+				hw_index);
+	}
+	put_cdm_mgr_refcount();
+	if (rc == 0)
+		put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_release);
+
+
+int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (!data)
+		return rc;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		struct cam_cdm_hw_intf_cmd_submit_bl req;
+
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			req.data = data;
+			req.handle = handle;
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+				CAM_CDM_HW_INTF_CMD_SUBMIT_BL, &req,
+				sizeof(struct cam_cdm_hw_intf_cmd_submit_bl));
+			if (rc < 0)
+				pr_err("hw submit bl failed for handle=%x\n",
+					handle);
+		} else {
+			pr_err("hw idx %d doesn't have submit ops\n",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_submit_bls);
+
+int cam_cdm_stream_on(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+			if (hw && hw->hw_ops.start) {
+				rc = hw->hw_ops.start(hw->hw_priv, &handle,
+						sizeof(uint32_t));
+				if (rc < 0)
+					pr_err("hw start failed handle=%x\n",
+						handle);
+			} else {
+				pr_err("hw idx %d doesn't have start ops\n",
+					hw_index);
+			}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_stream_on);
+
+int cam_cdm_stream_off(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.stop) {
+			rc = hw->hw_ops.stop(hw->hw_priv, &handle,
+					sizeof(uint32_t));
+			if (rc < 0)
+				pr_err("hw stop failed handle=%x\n",
+					handle);
+		} else {
+			pr_err("hw idx %d doesn't have stop ops\n",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_stream_off);
+
+int cam_cdm_reset_hw(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -1;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_RESET_HW, &handle,
+					sizeof(handle));
+			if (rc < 0)
+				pr_err("CDM hw release failed for handle=%x\n",
+					handle);
+		} else {
+			pr_err("hw idx %d doesn't have release ops\n",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_reset_hw);
+
+int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t *index)
+{
+	int rc = -EINVAL;
+
+	if ((!hw) || (!data) || (!index))
+		return rc;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if ((type == CAM_VIRTUAL_CDM) &&
+		(!cdm_mgr.nodes[CAM_SW_CDM_INDEX].device)) {
+		mutex_lock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = hw;
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = data;
+		*index = cdm_mgr.cdm_count;
+		mutex_unlock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
+		cdm_mgr.cdm_count++;
+		rc = 0;
+	} else if ((type == CAM_HW_CDM) && (cdm_mgr.cdm_count > 0)) {
+		mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.nodes[cdm_mgr.cdm_count].device = hw;
+		cdm_mgr.nodes[cdm_mgr.cdm_count].data = data;
+		*index = cdm_mgr.cdm_count;
+		mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.cdm_count++;
+		rc = 0;
+	} else {
+		pr_err("CDM registration failed type=%d count=%d\n",
+			type, cdm_mgr.cdm_count);
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+
+int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t index)
+{
+	int rc = -1;
+
+	if ((!hw) || (!data))
+		return -EINVAL;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if ((type == CAM_VIRTUAL_CDM) &&
+		(hw == cdm_mgr.nodes[CAM_SW_CDM_INDEX].device) &&
+		(index == CAM_SW_CDM_INDEX)) {
+		mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = NULL;
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = NULL;
+		mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		rc = 0;
+	} else if ((type == CAM_HW_CDM) &&
+		(hw == cdm_mgr.nodes[index].device)) {
+		mutex_lock(&cdm_mgr.nodes[index].lock);
+		cdm_mgr.nodes[index].device = NULL;
+		cdm_mgr.nodes[index].data = NULL;
+		mutex_unlock(&cdm_mgr.nodes[index].lock);
+		cdm_mgr.cdm_count--;
+		rc = 0;
+	} else {
+		pr_err("CDM Deregistration failed type=%d index=%d\n",
+			type, index);
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+
+static int cam_cdm_intf_probe(struct platform_device *pdev)
+{
+	int i, rc;
+
+	rc = cam_cdm_intf_mgr_soc_get_dt_properties(pdev, &cdm_mgr);
+	if (rc) {
+		pr_err("Failed to get dt properties\n");
+		return rc;
+	}
+	mutex_lock(&cam_cdm_mgr_lock);
+	for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+		mutex_init(&cdm_mgr.nodes[i].lock);
+		cdm_mgr.nodes[i].device = NULL;
+		cdm_mgr.nodes[i].data = NULL;
+		cdm_mgr.nodes[i].refcount = 0;
+	}
+	cdm_mgr.probe_done = true;
+	cdm_mgr.refcount = 0;
+	mutex_unlock(&cam_cdm_mgr_lock);
+	rc = cam_virtual_cdm_probe(pdev);
+	if (rc) {
+		mutex_lock(&cam_cdm_mgr_lock);
+		cdm_mgr.probe_done = false;
+		for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+			if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
+				(cdm_mgr.nodes[i].refcount != 0))
+				pr_err("Valid node present in index=%d\n", i);
+			mutex_destroy(&cdm_mgr.nodes[i].lock);
+			cdm_mgr.nodes[i].device = NULL;
+			cdm_mgr.nodes[i].data = NULL;
+			cdm_mgr.nodes[i].refcount = 0;
+		}
+		mutex_unlock(&cam_cdm_mgr_lock);
+	}
+
+	return rc;
+}
+
+static int cam_cdm_intf_remove(struct platform_device *pdev)
+{
+	int i, rc = -EBUSY;
+
+	if (get_cdm_mgr_refcount()) {
+		pr_err("CDM intf mgr get refcount failed\n");
+		return rc;
+	}
+
+	if (cam_virtual_cdm_remove(pdev)) {
+		pr_err("Virtual CDM remove failed\n");
+		goto end;
+	}
+	put_cdm_mgr_refcount();
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.refcount != 0) {
+		pr_err("cdm manger refcount not zero %d\n",
+			cdm_mgr.refcount);
+		goto end;
+	}
+
+	for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+		if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
+			(cdm_mgr.nodes[i].refcount != 0)) {
+			pr_err("Valid node present in index=%d\n", i);
+			mutex_unlock(&cam_cdm_mgr_lock);
+			goto end;
+		}
+		mutex_destroy(&cdm_mgr.nodes[i].lock);
+		cdm_mgr.nodes[i].device = NULL;
+		cdm_mgr.nodes[i].data = NULL;
+		cdm_mgr.nodes[i].refcount = 0;
+	}
+	cdm_mgr.probe_done = false;
+	rc = 0;
+
+end:
+	mutex_unlock(&cam_cdm_mgr_lock);
+	return rc;
+}
+
+static struct platform_driver cam_cdm_intf_driver = {
+	.probe = cam_cdm_intf_probe,
+	.remove = cam_cdm_intf_remove,
+	.driver = {
+	.name = "msm_cam_cdm_intf",
+	.owner = THIS_MODULE,
+	.of_match_table = msm_cam_cdm_intf_dt_match,
+	},
+};
+
+static int __init cam_cdm_intf_init_module(void)
+{
+	return platform_driver_register(&cam_cdm_intf_driver);
+}
+
+static void __exit cam_cdm_intf_exit_module(void)
+{
+	platform_driver_unregister(&cam_cdm_intf_driver);
+}
+
+module_init(cam_cdm_intf_init_module);
+module_exit(cam_cdm_intf_exit_module);
+MODULE_DESCRIPTION("MSM Camera CDM Intf driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
new file mode 100644
index 0000000..66c75f6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
@@ -0,0 +1,209 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_API_H_
+#define _CAM_CDM_API_H_
+
+#include <media/cam_defs.h>
+#include "cam_cdm_util.h"
+#include "cam_soc_util.h"
+
+/* enum cam_cdm_id - Enum for possible CAM CDM hardwares */
+enum cam_cdm_id {
+	CAM_CDM_VIRTUAL,
+	CAM_CDM_HW_ANY,
+	CAM_CDM_CPAS_0,
+	CAM_CDM_IPE0,
+	CAM_CDM_IPE1,
+	CAM_CDM_BPS,
+	CAM_CDM_VFE,
+	CAM_CDM_MAX
+};
+
+/* enum cam_cdm_cb_status - Enum for possible CAM CDM callback */
+enum cam_cdm_cb_status {
+	CAM_CDM_CB_STATUS_BL_SUCCESS,
+	CAM_CDM_CB_STATUS_INVALID_BL_CMD,
+	CAM_CDM_CB_STATUS_PAGEFAULT,
+	CAM_CDM_CB_STATUS_HW_RESET_ONGOING,
+	CAM_CDM_CB_STATUS_HW_RESET_DONE,
+	CAM_CDM_CB_STATUS_UNKNOWN_ERROR,
+};
+
+/* enum cam_cdm_bl_cmd_addr_type - Enum for possible CDM bl cmd addr types */
+enum cam_cdm_bl_cmd_addr_type {
+	CAM_CDM_BL_CMD_TYPE_MEM_HANDLE,
+	CAM_CDM_BL_CMD_TYPE_HW_IOVA,
+	CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA,
+};
+
+/**
+ * struct cam_cdm_acquire_data - Cam CDM acquire data structure
+ *
+ * @identifier : Input identifier string which is the device label from dt
+ *                    like vfe, ife, jpeg etc
+ * @cell_index : Input integer identifier pointing to the cell index from dt
+ *                     of the device. This can be used to form a unique string
+ *                     with @identifier like vfe0, ife1, jpeg0 etc
+ * @id : ID of a specific or any CDM HW which needs to be acquired.
+ * @userdata : Input private data which will be returned as part
+ *             of callback.
+ * @cam_cdm_callback : Input callback pointer for triggering the
+ *                     callbacks from CDM driver
+ *                     @handle : CDM Client handle
+ *                     @userdata : Private data given at the time of acquire
+ *                     @status : Callback status
+ *                     @cookie : Cookie if the callback is gen irq status
+ * @base_array_cnt : Input number of ioremapped address pair pointing
+ *                   in base_array, needed only if selected cdm is a virtual.
+ * @base_array : Input pointer to ioremapped address pair arrary
+ *               needed only if selected cdm is a virtual.
+ * @cdm_version : CDM version is output while acquiring HW cdm and
+ *                it is Input while acquiring virtual cdm, Currently fixing it
+ *                to one version below acquire API.
+ * @ops : Output pointer updated by cdm driver to the CDM
+ *                     util ops for this HW version of CDM acquired.
+ * @handle  : Output Unique handle generated for this acquire
+ *
+ */
+struct cam_cdm_acquire_data {
+	char identifier[128];
+	uint32_t cell_index;
+	enum cam_cdm_id id;
+	void *userdata;
+	void (*cam_cdm_callback)(uint32_t handle, void *userdata,
+		enum cam_cdm_cb_status status, uint32_t cookie);
+	uint32_t base_array_cnt;
+	struct cam_soc_reg_map *base_array[CAM_SOC_MAX_BLOCK];
+	struct cam_hw_version cdm_version;
+	struct cam_cdm_utils_ops *ops;
+	uint32_t handle;
+};
+
+/**
+ * struct cam_cdm_bl_cmd - Cam CDM HW bl command
+ *
+ * @bl_addr : Union of all three type for CDM BL commands
+ * @mem_handle : Input mem handle of bl cmd
+ * @offset : Input offset of the actual bl cmd in the memory pointed
+ *           by mem_handle
+ * @len : Input length of the BL command, Cannot be more than 1MB and
+ *           this is will be validated with offset+size of the memory pointed
+ *           by mem_handle
+ *
+ */
+struct cam_cdm_bl_cmd {
+	union {
+		int32_t mem_handle;
+		uint32_t *hw_iova;
+		void *kernel_iova;
+	} bl_addr;
+	uint32_t  offset;
+	uint32_t  len;
+};
+
+/**
+ * struct cam_cdm_bl_request - Cam CDM HW base & length (BL) request
+ *
+ * @flag : 1 for callback needed and 0 for no callback when this BL
+ *            request is done
+ * @userdata :Input private data which will be returned as part
+ *             of callback if request for this bl request in flags.
+ * @cookie : Cookie if the callback is gen irq status
+ * @type : type of the submitted bl cmd address.
+ * @cmd_arrary_count : Input number of BL commands to be submitted to CDM
+ * @bl_cmd_array     : Input payload holding the BL cmd's arrary
+ *                     to be sumbitted.
+ *
+ */
+struct cam_cdm_bl_request {
+	int flag;
+	void *userdata;
+	uint32_t cookie;
+	enum cam_cdm_bl_cmd_addr_type type;
+	uint32_t cmd_arrary_count;
+	struct cam_cdm_bl_cmd cmd[1];
+};
+
+/**
+ * @brief : API to get the CDM capabilities for a camera device type
+ *
+ * @identifier : Input pointer to a string which is the device label from dt
+ *                   like vfe, ife, jpeg etc, We do not need cell index
+ *                   assuming all devices of a single type maps to one SMMU
+ *                   client
+ * @cdm_handles : Input iommu handle memory pointer to update handles
+ *
+ * @return 0 on success
+ */
+int cam_cdm_get_iommu_handle(char *identifier,
+	struct cam_iommu_handle *cdm_handles);
+
+/**
+ * @brief : API to acquire a CDM
+ *
+ * @data : Input data for the CDM to be acquired
+ *
+ * @return 0 on success
+ */
+int cam_cdm_acquire(struct cam_cdm_acquire_data *data);
+
+/**
+ * @brief : API to release a previously acquired CDM
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_release(uint32_t handle);
+
+/**
+ * @brief : API to submit the base & length (BL's) for acquired CDM
+ *
+ * @handle : Input cdm handle to which the BL's needs to be sumbitted.
+ * @data   : Input pointer to the BL's to be sumbitted
+ *
+ * @return 0 on success
+ */
+int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data);
+
+/**
+ * @brief : API to stream ON a previously acquired CDM,
+ *          during this we turn on/off clocks/power based on active clients.
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_stream_on(uint32_t handle);
+
+/**
+ * @brief : API to stream OFF a previously acquired CDM,
+ *          during this we turn on/off clocks/power based on active clients.
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_stream_off(uint32_t handle);
+
+/**
+ * @brief : API to reset previously acquired CDM,
+ *          this can be only performed only the CDM is private.
+ *
+ * @handle : Input handle of the CDM to reset
+ *
+ * @return 0 on success
+ */
+int cam_cdm_reset_hw(uint32_t handle);
+
+#endif /* _CAM_CDM_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
new file mode 100644
index 0000000..0f5458c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
@@ -0,0 +1,205 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-SOC %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_OFFSET_FROM_REG(x, y) ((x)->offsets[y].offset)
+#define CAM_CDM_ATTR_FROM_REG(x, y) ((x)->offsets[y].attribute)
+
+bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t *value)
+{
+	void __iomem *reg_addr;
+	struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
+	void __iomem *base =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
+	resource_size_t mem_len =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
+
+	CDM_CDBG("E: b=%pK blen=%d reg=%x off=%x\n", (void *)base,
+		(int)mem_len, reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl,
+		reg)));
+	CDM_CDBG("E: b=%pK reg=%x off=%x\n", (void *)base,
+		reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)));
+
+	if ((reg > cdm->offset_tbl->offset_max_size) ||
+		(reg > cdm->offset_tbl->last_offset)) {
+		pr_err("CDM accessing invalid reg=%d\n", reg);
+		goto permission_error;
+	} else {
+		reg_addr = (base + (CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg)));
+		if (reg_addr > (base + mem_len)) {
+			pr_err("accessing invalid mapped region %d\n", reg);
+			goto permission_error;
+		}
+		*value = cam_io_r_mb(reg_addr);
+		CDM_CDBG("X b=%pK reg=%x off=%x val=%x\n",
+			(void *)base, reg, (CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg)),	*value);
+		return false;
+	}
+permission_error:
+	*value = 0;
+	return true;
+
+}
+
+bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t value)
+{
+	void __iomem *reg_addr;
+	struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
+	void __iomem *base =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
+	resource_size_t mem_len =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
+
+	CDM_CDBG("E: b=%pK reg=%x off=%x val=%x\n", (void *)base,
+		reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)), value);
+
+	if ((reg > cdm->offset_tbl->offset_max_size) ||
+		(reg > cdm->offset_tbl->last_offset)) {
+		pr_err("CDM accessing invalid reg=%d\n", reg);
+		goto permission_error;
+	} else {
+		reg_addr = (base + CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg));
+		if (reg_addr > (base + mem_len)) {
+			pr_err("Accessing invalid region %d:%d\n",
+				reg, (CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg)));
+			goto permission_error;
+		}
+		cam_io_w_mb(value, reg_addr);
+		return false;
+	}
+permission_error:
+	return true;
+
+}
+
+int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
+	struct cam_cdm_private_dt_data *ptr)
+{
+	int i, rc = -1;
+
+	ptr->dt_num_supported_clients = of_property_count_strings(
+						pdev->dev.of_node,
+						"cdm-client-names");
+	CDM_CDBG("Num supported cdm_client = %d\n",
+		ptr->dt_num_supported_clients);
+	if (ptr->dt_num_supported_clients >
+		CAM_PER_CDM_MAX_REGISTERED_CLIENTS) {
+		pr_err("Invalid count of client names count=%d\n",
+			ptr->dt_num_supported_clients);
+		rc = -EINVAL;
+		return rc;
+	}
+	if (ptr->dt_num_supported_clients < 0) {
+		CDM_CDBG("No cdm client names found\n");
+		ptr->dt_num_supported_clients = 0;
+		ptr->dt_cdm_shared = false;
+	} else {
+		ptr->dt_cdm_shared = true;
+	}
+	for (i = 0; i < ptr->dt_num_supported_clients; i++) {
+		rc = of_property_read_string_index(pdev->dev.of_node,
+			"cdm-client-names", i, &(ptr->dt_cdm_client_name[i]));
+		CDM_CDBG("cdm-client-names[%d] = %s\n",	i,
+			ptr->dt_cdm_client_name[i]);
+		if (rc < 0) {
+			pr_err("Reading cdm-client-names failed\n");
+			break;
+		}
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
+	const struct of_device_id *table)
+{
+	int rc;
+	struct cam_hw_soc_info *soc_ptr;
+	const struct of_device_id *id;
+
+	if (!cdm_hw  || (cdm_hw->soc_info.soc_private)
+		|| !(cdm_hw->soc_info.pdev))
+		return -EINVAL;
+
+	soc_ptr = &cdm_hw->soc_info;
+
+	rc = cam_soc_util_get_dt_properties(soc_ptr);
+	if (rc != 0) {
+		pr_err("Failed to retrieve the CDM dt properties\n");
+	} else {
+		soc_ptr->soc_private = kzalloc(
+				sizeof(struct cam_cdm_private_dt_data),
+				GFP_KERNEL);
+		if (!soc_ptr->soc_private)
+			return -ENOMEM;
+
+		rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev,
+			soc_ptr->soc_private);
+		if (rc != 0) {
+			pr_err("Failed to load CDM dt private data\n");
+			goto error;
+		}
+		id = of_match_node(table, soc_ptr->pdev->dev.of_node);
+		if ((!id) || !(id->data)) {
+			pr_err("Failed to retrieve the CDM id table\n");
+			goto error;
+		}
+		CDM_CDBG("CDM Hw Id compatible =%s\n", id->compatible);
+		((struct cam_cdm *)cdm_hw->core_info)->offset_tbl =
+			(struct cam_cdm_reg_offset_table *)id->data;
+		strlcpy(((struct cam_cdm *)cdm_hw->core_info)->name,
+			id->compatible,
+			sizeof(((struct cam_cdm *)cdm_hw->core_info)->name));
+	}
+
+	return rc;
+
+error:
+	rc = -1;
+	kfree(soc_ptr->soc_private);
+	soc_ptr->soc_private = NULL;
+	return rc;
+}
+
+int cam_cdm_intf_mgr_soc_get_dt_properties(
+	struct platform_device *pdev, struct cam_cdm_intf_mgr *mgr)
+{
+	int rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"num-hw-cdm", &mgr->dt_supported_hw_cdm);
+	CDM_CDBG("Number of HW cdm supported =%d\n", mgr->dt_supported_hw_cdm);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.h
new file mode 100644
index 0000000..765aba4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_SOC_H_
+#define _CAM_CDM_SOC_H_
+
+int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
+	const struct of_device_id *table);
+bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t *value);
+bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t value);
+int cam_cdm_intf_mgr_soc_get_dt_properties(
+	struct platform_device *pdev,
+	struct cam_cdm_intf_mgr *mgr);
+int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
+	struct cam_cdm_private_dt_data *ptr);
+
+#endif /* _CAM_CDM_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
new file mode 100644
index 0000000..034c782
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
@@ -0,0 +1,571 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-UTIL %s:%d " fmt, __func__, __LINE__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm_util.h"
+#include "cam_cdm.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_DWORD 4
+
+static unsigned int CDMCmdHeaderSizes[
+	CAM_CDM_CMD_PRIVATE_BASE + CAM_CDM_SW_CMD_COUNT] = {
+	0, /* UNUSED*/
+	3, /* DMI*/
+	0, /* UNUSED*/
+	2, /* RegContinuous*/
+	1, /* RegRandom*/
+	2, /* BUFFER_INDIREC*/
+	2, /* GenerateIRQ*/
+	3, /* WaitForEvent*/
+	1, /* ChangeBase*/
+	1, /* PERF_CONTINUOUSROL*/
+	3, /* DMI32*/
+	3, /* DMI64*/
+};
+
+/**
+ * struct cdm_regrandom_cmd - Definition for CDM random register command.
+ * @count: Number of register writes
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ */
+struct cdm_regrandom_cmd {
+	unsigned int count    : 16;
+	unsigned int reserved : 8;
+	unsigned int cmd      : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_regcontinuous_cmd - Definition for a CDM register range command.
+ * @count: Number of register writes
+ * @reserved0: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @offset: Start address of the range of registers
+ * @reserved1: reserved bits
+ */
+struct cdm_regcontinuous_cmd {
+	unsigned int count     : 16;
+	unsigned int reserved0 : 8;
+	unsigned int cmd       : 8;
+	unsigned int offset    : 24;
+	unsigned int reserved1 : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_dmi_cmd - Definition for a CDM DMI command.
+ * @length: Number of bytes in LUT - 1
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @addr: Address of the LUT in memory
+ * @DMIAddr: Address of the target DMI config register
+ * @DMISel: DMI identifier
+ */
+struct cdm_dmi_cmd {
+	unsigned int length   : 16;
+	unsigned int reserved : 8;
+	unsigned int cmd      : 8;
+	unsigned int addr;
+	unsigned int DMIAddr  : 24;
+	unsigned int DMISel   : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_indirect_cmd - Definition for a CDM indirect buffer command.
+ * @length: Number of bytes in buffer - 1
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @addr:  Device address of the indirect buffer
+ */
+struct cdm_indirect_cmd {
+	unsigned int length     : 16;
+	unsigned int reserved   : 8;
+	unsigned int cmd        : 8;
+	unsigned int addr;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_changebase_cmd - Definition for CDM base address change command.
+ * @base: Base address to be changed to
+ * @cmd:Command ID (CDMCmd)
+ */
+struct cdm_changebase_cmd {
+	unsigned int base   : 24;
+	unsigned int cmd    : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_wait_event_cmd - Definition for a CDM Gen IRQ command.
+ * @mask: Mask for the events
+ * @id: ID to read back for debug
+ * @iw_reserved: reserved bits
+ * @iw: iw AHB write bit
+ * @cmd:Command ID (CDMCmd)
+ * @offset: Offset to where data is written
+ * @offset_reserved: reserved bits
+ * @data: data returned in IRQ_USR_DATA
+ */
+struct cdm_wait_event_cmd {
+	unsigned int mask             : 8;
+	unsigned int id               : 8;
+	unsigned int iw_reserved      : 7;
+	unsigned int iw               : 1;
+	unsigned int cmd              : 8;
+	unsigned int offset           : 24;
+	unsigned int offset_reserved  : 8;
+	unsigned int data;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_genirq_cmd - Definition for a CDM Wait event command.
+ * @reserved: reserved bits
+ * @cmd:Command ID (CDMCmd)
+ * @userdata: userdata returned in IRQ_USR_DATA
+ */
+struct cdm_genirq_cmd {
+	unsigned int reserved   : 24;
+	unsigned int cmd        : 8;
+	unsigned int userdata;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_perf_ctrl_cmd_t - Definition for CDM perf control command.
+ * @perf: perf command
+ * @reserved: reserved bits
+ * @cmd:Command ID (CDMCmd)
+ */
+struct cdm_perf_ctrl_cmd {
+	unsigned int perf     : 2;
+	unsigned int reserved : 22;
+	unsigned int cmd      : 8;
+} __attribute__((__packed__));
+
+uint32_t cdm_get_cmd_header_size(unsigned int command)
+{
+	return CDMCmdHeaderSizes[command];
+}
+
+uint32_t cdm_required_size_reg_continuous(uint32_t  numVals)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT) + numVals;
+}
+
+uint32_t cdm_required_size_reg_random(uint32_t numRegVals)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM) +
+		(2 * numRegVals);
+}
+
+uint32_t cdm_required_size_dmi(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
+}
+
+uint32_t cdm_required_size_genirq(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ);
+}
+
+uint32_t cdm_required_size_indirect(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
+}
+
+uint32_t cdm_required_size_changebase(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
+}
+
+uint32_t cdm_offsetof_dmi_addr(void)
+{
+	return offsetof(struct cdm_dmi_cmd, addr);
+}
+
+uint32_t cdm_offsetof_indirect_addr(void)
+{
+	return offsetof(struct cdm_indirect_cmd, addr);
+}
+
+uint32_t *cdm_write_regcontinuous(uint32_t *pCmdBuffer, uint32_t reg,
+	uint32_t numVals, uint32_t *pVals)
+{
+	uint32_t i;
+	struct cdm_regcontinuous_cmd *pHeader =
+		(struct cdm_regcontinuous_cmd *)pCmdBuffer;
+
+	pHeader->count = numVals;
+	pHeader->cmd = CAM_CDM_CMD_REG_CONT;
+	pHeader->reserved0 = 0;
+	pHeader->reserved1 = 0;
+	pHeader->offset = reg;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
+
+	for (i = 0; i < numVals; i++)
+		(((uint32_t *)pCmdBuffer)[i]) = (((uint32_t *)pVals)[i]);
+
+	pCmdBuffer += numVals;
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_regrandom(uint32_t *pCmdBuffer, uint32_t numRegVals,
+	uint32_t *pRegVals)
+{
+	uint32_t i;
+	uint32_t *dst, *src;
+	struct cdm_regrandom_cmd *pHeader =
+		(struct cdm_regrandom_cmd *)pCmdBuffer;
+
+	pHeader->count = numRegVals;
+	pHeader->cmd = CAM_CDM_CMD_REG_RANDOM;
+	pHeader->reserved = 0;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
+	dst = pCmdBuffer;
+	src = pRegVals;
+	for (i = 0; i < numRegVals; i++) {
+		*dst++ = *src++;
+		*dst++ = *src++;
+	}
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd,
+	uint32_t DMIAddr, uint8_t DMISel, uint32_t dmiBufferAddr,
+	uint32_t length)
+{
+	struct cdm_dmi_cmd *pHeader = (struct cdm_dmi_cmd *)pCmdBuffer;
+
+	pHeader->cmd        = dmiCmd;
+	pHeader->addr = dmiBufferAddr;
+	pHeader->length = length - 1;
+	pHeader->DMIAddr = DMIAddr;
+	pHeader->DMISel = DMISel;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_indirect(uint32_t *pCmdBuffer, uint32_t indirectBufAddr,
+	uint32_t length)
+{
+	struct cdm_indirect_cmd *pHeader =
+		(struct cdm_indirect_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_BUFF_INDIRECT;
+	pHeader->addr = indirectBufAddr;
+	pHeader->length = length - 1;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_changebase(uint32_t *pCmdBuffer, uint32_t base)
+{
+	struct cdm_changebase_cmd *pHeader =
+		(struct cdm_changebase_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_CHANGE_BASE;
+	pHeader->base = base;
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
+
+	return pCmdBuffer;
+}
+
+void cdm_write_genirq(uint32_t *pCmdBuffer, uint32_t userdata)
+{
+	struct cdm_genirq_cmd *pHeader = (struct cdm_genirq_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_GEN_IRQ;
+	pHeader->userdata = userdata;
+}
+
+struct cam_cdm_utils_ops CDM170_ops = {
+	cdm_get_cmd_header_size,
+	cdm_required_size_reg_continuous,
+	cdm_required_size_reg_random,
+	cdm_required_size_dmi,
+	cdm_required_size_genirq,
+	cdm_required_size_indirect,
+	cdm_required_size_changebase,
+	cdm_offsetof_dmi_addr,
+	cdm_offsetof_indirect_addr,
+	cdm_write_regcontinuous,
+	cdm_write_regrandom,
+	cdm_write_dmi,
+	cdm_write_indirect,
+	cdm_write_changebase,
+	cdm_write_genirq,
+};
+
+void cam_cdm_data_alignement_check(void)
+{
+	BUILD_BUG_ON(sizeof(struct cdm_dmi_cmd) !=
+		(CAM_CDM_DWORD * cdm_get_cmd_header_size(CAM_CDM_CMD_DMI)));
+	BUILD_BUG_ON(sizeof(struct cdm_regcontinuous_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)));
+	BUILD_BUG_ON(sizeof(struct cdm_regrandom_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)));
+	BUILD_BUG_ON(sizeof(struct cdm_indirect_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT)));
+	BUILD_BUG_ON(sizeof(struct cdm_genirq_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ)));
+	BUILD_BUG_ON(sizeof(struct cdm_wait_event_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_WAIT_EVENT)));
+	BUILD_BUG_ON(sizeof(struct cdm_changebase_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE)));
+	BUILD_BUG_ON(sizeof(struct  cdm_perf_ctrl_cmd) !=
+		(CAM_CDM_DWORD *
+		cdm_get_cmd_header_size(CAM_CDM_CMD_PERF_CTRL)));
+	BUILD_BUG_ON(sizeof(struct cdm_dmi_cmd) !=
+		(CAM_CDM_DWORD * cdm_get_cmd_header_size(CAM_CDM_CMD_DMI_32)));
+	BUILD_BUG_ON(sizeof(struct cdm_dmi_cmd) !=
+		(CAM_CDM_DWORD * cdm_get_cmd_header_size(CAM_CDM_CMD_DMI_64)));
+}
+
+int cam_cdm_get_ioremap_from_base(uint32_t hw_base,
+	uint32_t base_array_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	void __iomem **device_base)
+{
+	int ret = -1, i;
+
+	for (i = 0; i < base_array_size; i++) {
+		if (base_table[i])
+			CDM_CDBG("In loop %d ioremap for %x addr=%x\n",
+			i, (base_table[i])->mem_cam_base, hw_base);
+		if ((base_table[i]) &&
+			((base_table[i])->mem_cam_base == hw_base)) {
+			*device_base = (base_table[i])->mem_base;
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int cam_cdm_util_reg_cont_write(void __iomem *base_addr,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
+{
+	int ret = 0;
+	uint32_t *data;
+	struct cdm_regcontinuous_cmd *reg_cont;
+
+	if ((cmd_buf_size < cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) ||
+		(!base_addr)) {
+		pr_err(" invalid base addr and data length  %d %pK\n",
+			cmd_buf_size, base_addr);
+		return -EINVAL;
+	}
+
+	reg_cont = (struct cdm_regcontinuous_cmd *)cmd_buf;
+	if ((!reg_cont->count) || (reg_cont->count > 0x10000) ||
+		(((reg_cont->count * sizeof(uint32_t)) +
+			cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) >
+			cmd_buf_size)) {
+		pr_err(" buffer size %d is not sufficient for count%d\n",
+			cmd_buf_size, reg_cont->count);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
+	cam_io_memcpy(base_addr + reg_cont->offset,	data,
+		reg_cont->count * sizeof(uint32_t));
+
+	*used_bytes = (reg_cont->count * sizeof(uint32_t)) +
+		(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT));
+
+	return ret;
+}
+
+static int cam_cdm_util_reg_random_write(void __iomem *base_addr,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
+{
+	uint32_t i;
+	struct cdm_regrandom_cmd *reg_random;
+	uint32_t *data;
+
+	if (!base_addr) {
+		pr_err("invalid base address\n");
+		return -EINVAL;
+	}
+
+	reg_random = (struct cdm_regrandom_cmd *) cmd_buf;
+	if ((!reg_random->count) || (reg_random->count > 0x10000) ||
+		(((reg_random->count * (sizeof(uint32_t) * 2)) +
+		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) >
+			cmd_buf_size)) {
+		pr_err("invalid reg_count  %d cmd_buf_size %d\n",
+			reg_random->count, cmd_buf_size);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
+
+	for (i = 0; i < reg_random->count; i++) {
+		CDM_DUMP_CDBG("reg random: offset 0x%llx, value 0x%x\n",
+			((uint64_t) base_addr + data[0]), data[1]);
+		cam_io_w(data[1], base_addr + data[0]);
+		data += 2;
+	}
+
+	*used_bytes = ((reg_random->count * (sizeof(uint32_t) * 2)) +
+		(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)));
+
+	return 0;
+}
+
+static int cam_cdm_util_swd_dmi_write(uint32_t cdm_cmd_type,
+	void __iomem *base_addr, uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	uint32_t *used_bytes)
+{
+	uint32_t i;
+	struct cdm_dmi_cmd *swd_dmi;
+	uint32_t *data;
+
+	swd_dmi = (struct cdm_dmi_cmd *)cmd_buf;
+
+	if (cmd_buf_size < (cdm_required_size_dmi() + swd_dmi->length + 1)) {
+		pr_err("invalid CDM_SWD_DMI length %d\n", swd_dmi->length + 1);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_required_size_dmi();
+
+	if (cdm_cmd_type == CAM_CDM_CMD_SWD_DMI_64) {
+		for (i = 0; i < (swd_dmi->length + 1)/8; i++) {
+			cam_io_w_mb(data[0], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
+			cam_io_w_mb(data[1], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_HI_OFFSET);
+			data += 2;
+		}
+	} else {
+		for (i = 0; i < (swd_dmi->length + 1)/4; i++) {
+			cam_io_w_mb(data[0], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
+			data += 1;
+		}
+	}
+	*used_bytes = (4 * cdm_required_size_dmi()) + swd_dmi->length + 1;
+
+	return 0;
+}
+
+int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	uint32_t base_array_size, uint8_t bl_tag)
+{
+	int ret = 0;
+	uint32_t cdm_cmd_type = 0, total_cmd_buf_size = 0;
+	uint32_t used_bytes = 0;
+
+	total_cmd_buf_size = cmd_buf_size;
+
+	while (cmd_buf_size > 0) {
+		CDM_CDBG("cmd data=%x\n", *cmd_buf);
+		cdm_cmd_type = (*cmd_buf >> CAM_CDM_COMMAND_OFFSET);
+		switch (cdm_cmd_type) {
+		case CAM_CDM_CMD_REG_CONT: {
+			ret = cam_cdm_util_reg_cont_write(*current_device_base,
+				cmd_buf, cmd_buf_size, &used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes/4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_REG_RANDOM: {
+			ret = cam_cdm_util_reg_random_write(
+				*current_device_base, cmd_buf, cmd_buf_size,
+				&used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes / 4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_SWD_DMI_32:
+		case CAM_CDM_CMD_SWD_DMI_64: {
+			if (*current_device_base == 0) {
+				pr_err("Got SWI DMI cmd =%d for invalid hw\n",
+					cdm_cmd_type);
+				ret = -EINVAL;
+				break;
+			}
+			ret = cam_cdm_util_swd_dmi_write(cdm_cmd_type,
+				*current_device_base, cmd_buf, cmd_buf_size,
+				&used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes / 4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_CHANGE_BASE: {
+			struct cdm_changebase_cmd *change_base_cmd =
+				(struct cdm_changebase_cmd *)cmd_buf;
+
+			ret = cam_cdm_get_ioremap_from_base(
+				change_base_cmd->base, base_array_size,
+				base_table, current_device_base);
+			if (ret != 0) {
+				pr_err("Get ioremap change base failed %x\n",
+					change_base_cmd->base);
+				break;
+			}
+			CDM_CDBG("Got ioremap for %x addr=%pK\n",
+				change_base_cmd->base,
+				current_device_base);
+			cmd_buf_size -= (4 *
+				cdm_required_size_changebase());
+			cmd_buf += cdm_required_size_changebase();
+			}
+			break;
+		default:
+			pr_err(" unsupported cdm_cmd_type type 0%x\n",
+			cdm_cmd_type);
+			ret = -EINVAL;
+			break;
+		}
+
+		if (ret < 0)
+			break;
+	}
+
+	return ret;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h
new file mode 100644
index 0000000..09d0d63
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h
@@ -0,0 +1,161 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_UTIL_H_
+#define _CAM_CDM_UTIL_H_
+
+#define CAM_CDM_SW_CMD_COUNT    2
+#define CAM_CMD_LENGTH_MASK     0xFFFF
+#define CAM_CDM_COMMAND_OFFSET  24
+
+#define CAM_CDM_DMI_DATA_HI_OFFSET   8
+#define CAM_CDM_DMI_DATA_LO_OFFSET   12
+
+enum cam_cdm_command {
+	CAM_CDM_CMD_UNUSED = 0x0,
+	CAM_CDM_CMD_DMI = 0x1,
+	CAM_CDM_CMD_NOT_DEFINED = 0x2,
+	CAM_CDM_CMD_REG_CONT = 0x3,
+	CAM_CDM_CMD_REG_RANDOM = 0x4,
+	CAM_CDM_CMD_BUFF_INDIRECT = 0x5,
+	CAM_CDM_CMD_GEN_IRQ = 0x6,
+	CAM_CDM_CMD_WAIT_EVENT = 0x7,
+	CAM_CDM_CMD_CHANGE_BASE = 0x8,
+	CAM_CDM_CMD_PERF_CTRL = 0x9,
+	CAM_CDM_CMD_DMI_32 = 0xa,
+	CAM_CDM_CMD_DMI_64 = 0xb,
+	CAM_CDM_CMD_PRIVATE_BASE = 0xc,
+	CAM_CDM_CMD_SWD_DMI_32 = (CAM_CDM_CMD_PRIVATE_BASE + 0x64),
+	CAM_CDM_CMD_SWD_DMI_64 = (CAM_CDM_CMD_PRIVATE_BASE + 0x65),
+	CAM_CDM_CMD_PRIVATE_BASE_MAX = 0x7F
+};
+
+/**
+ * struct cam_cdm_utils_ops - Camera CDM util ops
+ *
+ * @cdm_get_cmd_header_size: Returns the size of the given command header
+ *                           in DWORDs.
+ *      @command Command ID
+ *      @return Size of the command in DWORDs
+ *
+ * @cdm_required_size_reg_continuous: Calculates the size of a reg-continuous
+ *                                    command in dwords.
+ *      @numVals Number of continuous values
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_reg_random: Calculates the size of a reg-random command
+ *                                in dwords.
+ *      @numRegVals  Number of register/value pairs
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_dmi: Calculates the size of a DMI command in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_genirq: Calculates size of a Genirq command in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_indirect: Calculates the size of an indirect command
+ *                              in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_changebase: Calculates the size of a change-base command
+ *                                in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_offsetof_dmi_addr: Returns the offset of address field in the DMI
+ *                         command header.
+ *      @return Offset of addr field
+ *
+ * @cdm_offsetof_indirect_addr: Returns the offset of address field in the
+ *                              indirect command header.
+ *      @return Offset of addr field
+ *
+ * @cdm_write_regcontinuous: Writes a command into the command buffer.
+ *      @pCmdBuffer:  Pointer to command buffer
+ *      @reg: Beginning of the register address range where
+ *            values will be written.
+ *      @numVals: Number of values (registers) that will be written
+ *      @pVals : An array of values that will be written
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_regrandom: Writes a command into the command buffer in
+ *                       register/value pairs.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @numRegVals: Number of register/value pairs that will be written
+ *      @pRegVals: An array of register/value pairs that will be written
+ *                 The even indices are registers and the odd indices
+ *                 arevalues, e.g., {reg1, val1, reg2, val2, ...}.
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_dmi: Writes a DMI command into the command bufferM.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @dmiCmd: DMI command
+ *      @DMIAddr: Address of the DMI
+ *      @DMISel: Selected bank that the DMI will write to
+ *      @length: Size of data in bytes
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_indirect: Writes a indirect command into the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @indirectBufferAddr: Device address of the indirect cmd buffer.
+ *      @length: Size of data in bytes
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_changebase: Writes a changing CDM (address) base command into
+ *                        the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @base: New base (device) address
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_genirq:  Writes a gen irq command into the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @userdata: userdata or cookie return by hardware during irq.
+ */
+struct cam_cdm_utils_ops {
+uint32_t (*cdm_get_cmd_header_size)(unsigned int command);
+uint32_t (*cdm_required_size_reg_continuous)(uint32_t  numVals);
+uint32_t (*cdm_required_size_reg_random)(uint32_t numRegVals);
+uint32_t (*cdm_required_size_dmi)(void);
+uint32_t (*cdm_required_size_genirq)(void);
+uint32_t (*cdm_required_size_indirect)(void);
+uint32_t (*cdm_required_size_changebase)(void);
+uint32_t (*cdm_offsetof_dmi_addr)(void);
+uint32_t (*cdm_offsetof_indirect_addr)(void);
+uint32_t* (*cdm_write_regcontinuous)(
+	uint32_t *pCmdBuffer,
+	uint32_t reg,
+	uint32_t numVals,
+	uint32_t *pVals);
+uint32_t *(*cdm_write_regrandom)(
+	uint32_t *pCmdBuffer,
+	uint32_t numRegVals,
+	uint32_t *pRegVals);
+uint32_t *(*cdm_write_dmi)(
+	uint32_t *pCmdBuffer,
+	uint8_t  dmiCmd,
+	uint32_t DMIAddr,
+	uint8_t  DMISel,
+	uint32_t dmiBufferAddr,
+	uint32_t length);
+uint32_t *(*cdm_write_indirect)(
+	uint32_t *pCmdBuffer,
+	uint32_t indirectBufferAddr,
+	uint32_t length);
+uint32_t *(*cdm_write_changebase)(
+	uint32_t *pCmdBuffer,
+	uint32_t base);
+void (*cdm_write_genirq)(
+	uint32_t *pCmdBuffer,
+	uint32_t  userdata);
+};
+
+#endif /* _CAM_CDM_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual.h
new file mode 100644
index 0000000..ed07218
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CDM_VIRTUAL_H_
+#define _CAM_CDM_VIRTUAL_H_
+
+#include "cam_cdm_intf_api.h"
+
+int cam_virtual_cdm_probe(struct platform_device *pdev);
+int cam_virtual_cdm_remove(struct platform_device *pdev);
+int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	uint32_t base_array_size, uint8_t bl_tag);
+
+#endif /* _CAM_CDM_VIRTUAL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
new file mode 100644
index 0000000..e34bfc2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
@@ -0,0 +1,374 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-CDM-VIRTUAL %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_util.h"
+#include "cam_cdm_virtual.h"
+#include "cam_cdm_core_common.h"
+#include "cam_cdm_soc.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_VIRTUAL_NAME "qcom,cam_virtual_cdm"
+
+static void cam_virtual_cdm_work(struct work_struct *work)
+{
+	struct cam_cdm_work_payload *payload;
+	struct cam_hw_info *cdm_hw;
+	struct cam_cdm *core;
+
+	payload = container_of(work, struct cam_cdm_work_payload, work);
+	if (payload) {
+		cdm_hw = payload->hw;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+		if (payload->irq_status & 0x2) {
+			struct cam_cdm_bl_cb_request_entry *node;
+
+			CDM_CDBG("CDM HW Gen/inline IRQ with data=%x\n",
+				payload->irq_data);
+			mutex_lock(&cdm_hw->hw_mutex);
+			node = cam_cdm_find_request_by_bl_tag(
+				payload->irq_data,
+				&core->bl_request_list);
+			if (node) {
+				if (node->request_type ==
+					CAM_HW_CDM_BL_CB_CLIENT) {
+					cam_cdm_notify_clients(cdm_hw,
+						CAM_CDM_CB_STATUS_BL_SUCCESS,
+						(void *)node);
+				} else if (node->request_type ==
+					CAM_HW_CDM_BL_CB_INTERNAL) {
+					pr_err("Invalid node=%pK %d\n", node,
+						node->request_type);
+				}
+				list_del_init(&node->entry);
+				kfree(node);
+			} else {
+				pr_err("Invalid node for inline irq\n");
+			}
+			mutex_unlock(&cdm_hw->hw_mutex);
+		}
+		if (payload->irq_status & 0x1) {
+			CDM_CDBG("CDM HW reset done IRQ\n");
+			complete(&core->reset_complete);
+		}
+		kfree(payload);
+	}
+
+}
+
+int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client)
+{
+	int i, rc = -1;
+	struct cam_cdm_bl_request *cdm_cmd = req->data;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+	mutex_lock(&client->lock);
+	for (i = 0; i < req->data->cmd_arrary_count ; i++) {
+		uint64_t vaddr_ptr = 0;
+		size_t len = 0;
+
+		if ((!cdm_cmd->cmd[i].len) &&
+			(cdm_cmd->cmd[i].len > 0x100000)) {
+			pr_err("len(%d) is invalid count=%d total cnt=%d\n",
+				cdm_cmd->cmd[i].len, i,
+				req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+		if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
+			rc = cam_mem_get_cpu_buf(
+				cdm_cmd->cmd[i].bl_addr.mem_handle, &vaddr_ptr,
+				&len);
+		} else if (req->data->type ==
+			CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA) {
+			rc = 0;
+			vaddr_ptr =
+				(uint64_t)cdm_cmd->cmd[i].bl_addr.kernel_iova;
+			len = cdm_cmd->cmd[i].offset + cdm_cmd->cmd[i].len;
+		} else {
+			pr_err("Only mem hdl/Kernel va type is supported %d\n",
+				req->data->type);
+			rc = -1;
+			break;
+		}
+
+		if ((!rc) && (vaddr_ptr) && (len) &&
+			(len >= cdm_cmd->cmd[i].offset)) {
+			CDM_CDBG("hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu\n",
+				cdm_cmd->cmd[i].bl_addr.mem_handle,
+				(void *)vaddr_ptr, cdm_cmd->cmd[i].offset,
+				cdm_cmd->cmd[i].len, len);
+			rc = cam_cdm_util_cmd_buf_write(
+				&client->changebase_addr,
+				((uint32_t *)vaddr_ptr +
+					((cdm_cmd->cmd[i].offset)/4)),
+				cdm_cmd->cmd[i].len, client->data.base_array,
+				client->data.base_array_cnt, core->bl_tag);
+			if (rc) {
+				pr_err("write failed for cnt=%d:%d\n",
+					i, req->data->cmd_arrary_count);
+				break;
+			}
+		} else {
+			pr_err("Sanity check failed for hdl=%x len=%zu:%d\n",
+				cdm_cmd->cmd[i].bl_addr.mem_handle, len,
+				cdm_cmd->cmd[i].offset);
+			pr_err("Sanity check failed for cmd_count=%d cnt=%d\n",
+				i, req->data->cmd_arrary_count);
+			rc = -1;
+			break;
+		}
+		if (!rc) {
+			struct cam_cdm_work_payload *payload;
+
+			CDM_CDBG("write BL success for cnt=%d with tag=%d\n",
+				i, core->bl_tag);
+			if ((true == req->data->flag) &&
+				(i == req->data->cmd_arrary_count)) {
+				struct cam_cdm_bl_cb_request_entry *node;
+
+				node = kzalloc(sizeof(
+					struct cam_cdm_bl_cb_request_entry),
+					GFP_KERNEL);
+				if (!node) {
+					rc = -ENOMEM;
+					break;
+				}
+				node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
+				node->client_hdl = req->handle;
+				node->cookie = req->data->cookie;
+				node->bl_tag = core->bl_tag;
+				node->userdata = req->data->userdata;
+				mutex_lock(&cdm_hw->hw_mutex);
+				list_add_tail(&node->entry,
+					&core->bl_request_list);
+				mutex_unlock(&cdm_hw->hw_mutex);
+
+				payload = kzalloc(sizeof(
+					struct cam_cdm_work_payload),
+					GFP_ATOMIC);
+				if (payload) {
+					payload->irq_status = 0x2;
+					payload->irq_data = core->bl_tag;
+					payload->hw = cdm_hw;
+					INIT_WORK((struct work_struct *)
+						&payload->work,
+						cam_virtual_cdm_work);
+					queue_work(core->work_queue,
+						&payload->work);
+					}
+			}
+			core->bl_tag++;
+			CDM_CDBG("Now commit the BL nothing for virtual\n");
+			if (!rc && (core->bl_tag == 63))
+				core->bl_tag = 0;
+		}
+	}
+	mutex_unlock(&client->lock);
+	return rc;
+}
+
+int cam_virtual_cdm_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_cdm_private_dt_data *soc_private = NULL;
+	int rc;
+	struct cam_cpas_register_params cpas_parms;
+
+	cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cdm_hw_intf)
+		return -ENOMEM;
+
+	cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cdm_hw) {
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
+	if (!cdm_hw->core_info) {
+		kfree(cdm_hw);
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+	cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cdm_hw->soc_info.pdev = pdev;
+	cdm_hw_intf->hw_type = CAM_VIRTUAL_CDM;
+	cdm_hw->soc_info.soc_private = kzalloc(
+			sizeof(struct cam_cdm_private_dt_data), GFP_KERNEL);
+	if (!cdm_hw->soc_info.soc_private) {
+		rc = -ENOMEM;
+		goto soc_load_failed;
+	}
+
+	rc = cam_cdm_soc_load_dt_private(pdev, cdm_hw->soc_info.soc_private);
+	if (rc != 0) {
+		pr_err("Failed to load CDM dt private data\n");
+		rc = -1;
+		kfree(cdm_hw->soc_info.soc_private);
+		cdm_hw->soc_info.soc_private = NULL;
+		goto soc_load_failed;
+	}
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	soc_private = (struct cam_cdm_private_dt_data *)
+					cdm_hw->soc_info.soc_private;
+	if (soc_private->dt_cdm_shared == true)
+		cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
+	else
+		cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
+
+	cdm_core->bl_tag = 0;
+	INIT_LIST_HEAD(&cdm_core->bl_request_list);
+	init_completion(&cdm_core->reset_complete);
+	cdm_hw_intf->hw_priv = cdm_hw;
+	cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
+	cdm_hw_intf->hw_ops.init = NULL;
+	cdm_hw_intf->hw_ops.deinit = NULL;
+	cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
+	cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
+	cdm_hw_intf->hw_ops.read = NULL;
+	cdm_hw_intf->hw_ops.write = NULL;
+	cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
+
+	CDM_CDBG("type %d index %d\n", cdm_hw_intf->hw_type,
+		cdm_hw_intf->hw_idx);
+
+	platform_set_drvdata(pdev, cdm_hw_intf);
+
+	cdm_hw->open_count = 0;
+	cdm_core->iommu_hdl.non_secure = -1;
+	cdm_core->iommu_hdl.secure = -1;
+	mutex_init(&cdm_hw->hw_mutex);
+	spin_lock_init(&cdm_hw->hw_lock);
+	init_completion(&cdm_hw->hw_complete);
+	mutex_lock(&cdm_hw->hw_mutex);
+	cdm_core->id = CAM_CDM_VIRTUAL;
+	memcpy(cdm_core->name, CAM_CDM_VIRTUAL_NAME,
+		sizeof(CAM_CDM_VIRTUAL_NAME));
+	cdm_core->work_queue = alloc_workqueue(cdm_core->name,
+		WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
+		CAM_CDM_INFLIGHT_WORKS);
+	cdm_core->ops = NULL;
+
+	cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
+	cpas_parms.cell_index = cdm_hw->soc_info.index;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = cdm_hw_intf;
+	strlcpy(cpas_parms.identifier, "cam-cdm-intf",
+		CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		pr_err("Virtual CDM CPAS registration failed\n");
+		goto cpas_registration_failed;
+	}
+	CDM_CDBG("CPAS registration successful handle=%d\n",
+		cpas_parms.client_handle);
+	cdm_core->cpas_handle = cpas_parms.client_handle;
+
+	CDM_CDBG("CDM%d probe successful\n", cdm_hw_intf->hw_idx);
+
+	rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
+			soc_private, CAM_VIRTUAL_CDM, &cdm_core->index);
+	if (rc) {
+		pr_err("Virtual CDM Interface registration failed\n");
+		goto intf_registration_failed;
+	}
+	CDM_CDBG("CDM%d registered to intf successful\n", cdm_hw_intf->hw_idx);
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+	return 0;
+intf_registration_failed:
+	cam_cpas_unregister_client(cdm_core->cpas_handle);
+cpas_registration_failed:
+	kfree(cdm_hw->soc_info.soc_private);
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	mutex_destroy(&cdm_hw->hw_mutex);
+soc_load_failed:
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	kfree(cdm_hw_intf);
+	return rc;
+}
+
+int cam_virtual_cdm_remove(struct platform_device *pdev)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc = -EBUSY;
+
+	cdm_hw_intf = platform_get_drvdata(pdev);
+	if (!cdm_hw_intf) {
+		pr_err("Failed to get dev private data\n");
+		return rc;
+	}
+
+	cdm_hw = cdm_hw_intf->hw_priv;
+	if (!cdm_hw) {
+		pr_err("Failed to get virtual private data for type=%d idx=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	cdm_core = cdm_hw->core_info;
+	if (!cdm_core) {
+		pr_err("Failed to get virtual core data for type=%d idx=%d\n",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
+	if (rc) {
+		pr_err("CPAS unregister failed\n");
+		return rc;
+	}
+
+	rc = cam_cdm_intf_deregister_hw_cdm(cdm_hw_intf,
+			cdm_hw->soc_info.soc_private, CAM_VIRTUAL_CDM,
+			cdm_core->index);
+	if (rc) {
+		pr_err("Virtual CDM Interface de-registration failed\n");
+		return rc;
+	}
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw->soc_info.soc_private);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	kfree(cdm_hw_intf);
+	rc = 0;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_hw_cdm170_reg.h b/drivers/media/platform/msm/camera/cam_cdm/cam_hw_cdm170_reg.h
new file mode 100644
index 0000000..183b657
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_hw_cdm170_reg.h
@@ -0,0 +1,142 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HW_CDM170_REG_H_
+#define _CAM_HW_CDM170_REG_H_
+
+#define CAM_CDM_REG_OFFSET_FIRST 0x0
+#define CAM_CDM_REG_OFFSET_LAST 0x200
+#define CAM_CDM_REGS_COUNT 0x30
+#define CAM_CDM_HWFIFO_SIZE 0x40
+
+#define CAM_CDM_OFFSET_HW_VERSION 0x0
+#define CAM_CDM_OFFSET_TITAN_VERSION 0x4
+#define CAM_CDM_OFFSET_RST_CMD 0x10
+#define CAM_CDM_OFFSET_CGC_CFG 0x14
+#define CAM_CDM_OFFSET_CORE_CFG 0x18
+#define CAM_CDM_OFFSET_CORE_EN 0x1c
+#define CAM_CDM_OFFSET_FE_CFG 0x20
+#define CAM_CDM_OFFSET_IRQ_MASK 0x30
+#define CAM_CDM_OFFSET_IRQ_CLEAR 0x34
+#define CAM_CDM_OFFSET_IRQ_CLEAR_CMD 0x38
+#define CAM_CDM_OFFSET_IRQ_SET 0x3c
+#define CAM_CDM_OFFSET_IRQ_SET_CMD 0x40
+
+#define CAM_CDM_OFFSET_IRQ_STATUS 0x44
+#define CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK 0x1
+#define CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK 0x2
+#define CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK 0x4
+#define CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK 0x10000
+#define CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK 0x20000
+#define CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK 0x40000
+
+#define CAM_CDM_OFFSET_BL_FIFO_BASE_REG 0x50
+#define CAM_CDM_OFFSET_BL_FIFO_LEN_REG 0x54
+#define CAM_CDM_OFFSET_BL_FIFO_STORE_REG 0x58
+#define CAM_CDM_OFFSET_BL_FIFO_CFG 0x5c
+#define CAM_CDM_OFFSET_BL_FIFO_RB 0x60
+#define CAM_CDM_OFFSET_BL_FIFO_BASE_RB 0x64
+#define CAM_CDM_OFFSET_BL_FIFO_LEN_RB 0x68
+#define CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB 0x6c
+#define CAM_CDM_OFFSET_IRQ_USR_DATA 0x80
+#define CAM_CDM_OFFSET_WAIT_STATUS 0x84
+#define CAM_CDM_OFFSET_SCRATCH_0_REG 0x90
+#define CAM_CDM_OFFSET_SCRATCH_1_REG 0x94
+#define CAM_CDM_OFFSET_SCRATCH_2_REG 0x98
+#define CAM_CDM_OFFSET_SCRATCH_3_REG 0x9c
+#define CAM_CDM_OFFSET_SCRATCH_4_REG 0xa0
+#define CAM_CDM_OFFSET_SCRATCH_5_REG 0xa4
+#define CAM_CDM_OFFSET_SCRATCH_6_REG 0xa8
+#define CAM_CDM_OFFSET_SCRATCH_7_REG 0xac
+#define CAM_CDM_OFFSET_LAST_AHB_ADDR 0xd0
+#define CAM_CDM_OFFSET_LAST_AHB_DATA 0xd4
+#define CAM_CDM_OFFSET_CORE_DBUG 0xd8
+#define CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR 0xe0
+#define CAM_CDM_OFFSET_LAST_AHB_ERR_DATA 0xe4
+#define CAM_CDM_OFFSET_CURRENT_BL_BASE 0xe8
+#define CAM_CDM_OFFSET_CURRENT_BL_LEN 0xec
+#define CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE 0xf0
+#define CAM_CDM_OFFSET_DEBUG_STATUS 0xf4
+#define CAM_CDM_OFFSET_BUS_MISR_CFG_0 0x100
+#define CAM_CDM_OFFSET_BUS_MISR_CFG_1 0x104
+#define CAM_CDM_OFFSET_BUS_MISR_RD_VAL 0x108
+#define CAM_CDM_OFFSET_PERF_MON_CTRL 0x110
+#define CAM_CDM_OFFSET_PERF_MON_0 0x114
+#define CAM_CDM_OFFSET_PERF_MON_1 0x118
+#define CAM_CDM_OFFSET_PERF_MON_2 0x11c
+#define CAM_CDM_OFFSET_SPARE 0x200
+
+/*
+ * Always make sure below register offsets are aligned with
+ * enum cam_cdm_regs offsets
+ */
+struct cam_cdm_reg_offset cam170_cpas_cdm_register_offsets[] = {
+	{ CAM_CDM_OFFSET_HW_VERSION, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_TITAN_VERSION, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_RST_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_CGC_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_CORE_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_CORE_EN, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_FE_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_MASK, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_CLEAR, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_CLEAR_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_SET, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_SET_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_IRQ_USR_DATA, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_BASE_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_LEN_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_STORE_REG, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_RB, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_BASE_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BL_FIFO_LEN_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_WAIT_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_SCRATCH_0_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_1_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_2_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_3_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_4_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_5_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_6_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_7_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_LAST_AHB_ADDR, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_LAST_AHB_DATA, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CORE_DBUG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_LAST_AHB_ERR_DATA, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_BL_BASE, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_BL_LEN, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_DEBUG_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BUS_MISR_CFG_0, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BUS_MISR_CFG_1, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BUS_MISR_RD_VAL, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_CTRL, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_PERF_MON_0, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_1, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_2, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_SPARE, CAM_REG_ATTR_READ_WRITE }
+};
+
+struct cam_cdm_reg_offset_table cam170_cpas_cdm_offset_table = {
+	.first_offset = 0x0,
+	.last_offset = 0x200,
+	.reg_count = 0x30,
+	.offsets = cam170_cpas_cdm_register_offsets,
+	.offset_max_size = (sizeof(cam170_cpas_cdm_register_offsets)/
+		sizeof(struct cam_cdm_reg_offset)),
+};
+
+#endif /* _CAM_HW_CDM170_REG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/Makefile b/drivers/media/platform/msm/camera/cam_core/Makefile
index 417de13..60f94d1 100644
--- a/drivers/media/platform/msm/camera/cam_core/Makefile
+++ b/drivers/media/platform/msm/camera/cam_core/Makefile
@@ -1,3 +1,4 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
 
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_context.o cam_node.o cam_subdev.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_context.o cam_context_utils.o cam_node.o cam_subdev.o
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index 56b34f5..17b3c7c 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -358,4 +358,3 @@
 
 	return 0;
 }
-
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index c7329cf..37a5c03 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -22,6 +22,8 @@
 
 /* max request number */
 #define CAM_CTX_REQ_MAX              20
+#define CAM_CTX_CFG_MAX              20
+#define CAM_CTX_RES_MAX              20
 
 /**
  * enum cam_ctx_state -  context top level states
@@ -43,13 +45,29 @@
  * @status:                Request status
  * @request_id:            Request id
  * @req_priv:              Derived request object
+ * @hw_update_entries:     Hardware update entries
+ * @num_hw_update_entries: Number of hardware update entries
+ * @in_map_entries:        Entries for in fences
+ * @num_in_map_entries:    Number of in map entries
+ * @out_map_entries:       Entries for out fences
+ * @num_out_map_entries:   Number of out map entries
+ * @num_in_acked:          Number of in fence acked
+ * @num_out_acked:         Number of out fence acked
  *
  */
 struct cam_ctx_request {
-	struct list_head   list;
-	uint32_t           status;
-	uint64_t           request_id;
-	void              *req_priv;
+	struct list_head              list;
+	uint32_t                      status;
+	uint64_t                      request_id;
+	void                          *req_priv;
+	struct cam_hw_update_entry    hw_update_entries[CAM_CTX_CFG_MAX];
+	uint32_t                      num_hw_update_entries;
+	struct cam_hw_fence_map_entry in_map_entries[CAM_CTX_CFG_MAX];
+	uint32_t                      num_in_map_entries;
+	struct cam_hw_fence_map_entry out_map_entries[CAM_CTX_CFG_MAX];
+	uint32_t                      num_out_map_entries;
+	uint32_t                      num_in_acked;
+	uint32_t                      num_out_acked;
 };
 
 /**
@@ -132,6 +150,7 @@
  * @state:                 Current state for top level state machine
  * @state_machine:         Top level state machine
  * @ctx_priv:              Private context pointer
+ * @ctxt_to_hw_map:        Context to hardware mapping pointer
  *
  */
 struct cam_context {
@@ -159,6 +178,7 @@
 	struct cam_ctx_ops          *state_machine;
 
 	void                        *ctx_priv;
+	void                        *ctxt_to_hw_map;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
new file mode 100644
index 0000000..21a61ff
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -0,0 +1,481 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CTXT-UTILS %s:%d " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <media/cam_sync.h>
+#include <media/cam_defs.h>
+
+#include "cam_sync_api.h"
+#include "cam_req_mgr_util.h"
+#include "cam_mem_mgr.h"
+#include "cam_node.h"
+#include "cam_context.h"
+
+int cam_context_buf_done_from_hw(struct cam_context *ctx,
+	void *done_event_data, uint32_t bubble_state)
+{
+	int rc = 0;
+	int i, j;
+	struct cam_ctx_request *req;
+	struct cam_hw_done_event_data *done =
+		(struct cam_hw_done_event_data *)done_event_data;
+
+	if (list_empty(&ctx->active_req_list)) {
+		pr_err("Buf done with no active request\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->active_req_list,
+		struct cam_ctx_request, list);
+
+	for (i = 0; i < done->num_handles; i++) {
+		for (j = 0; j < req->num_out_map_entries; j++) {
+			if (done->resource_handle[i] ==
+				req->out_map_entries[j].resource_handle)
+				break;
+		}
+
+		if (j == req->num_out_map_entries) {
+			pr_err("Can not find matching lane handle 0x%x\n",
+				done->resource_handle[i]);
+			rc = -EINVAL;
+			continue;
+		}
+
+		cam_sync_signal(req->out_map_entries[j].sync_id,
+			CAM_SYNC_STATE_SIGNALED_SUCCESS);
+		req->num_out_acked++;
+		trace_printk("Sync success req %lld, reset sync id 0x%x\n",
+			req->request_id,
+			req->out_map_entries[j].sync_id);
+
+		req->out_map_entries[j].sync_id = -1;
+	}
+
+	if (req->num_out_acked == req->num_out_map_entries) {
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+end:
+	return rc;
+}
+
+int cam_context_apply_req_to_hw(struct cam_context *ctx,
+	struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_ctx_request *req;
+	struct cam_hw_config_args cfg;
+
+	if (!ctx->hw_mgr_intf) {
+		pr_err("HW interface is not ready\n");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	if (list_empty(&ctx->pending_req_list)) {
+		pr_err("No available request for Apply id %lld\n",
+			apply->request_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	spin_lock(&ctx->lock);
+	req = list_first_entry(&ctx->pending_req_list,
+		struct cam_ctx_request, list);
+	list_del_init(&req->list);
+	spin_unlock(&ctx->lock);
+
+	cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+	cfg.hw_update_entries = req->hw_update_entries;
+	cfg.num_hw_update_entries = req->num_hw_update_entries;
+	cfg.out_map_entries = req->out_map_entries;
+	cfg.num_out_map_entries = req->num_out_map_entries;
+	cfg.priv = (void *)&req->request_id;
+	list_add_tail(&req->list, &ctx->active_req_list);
+
+	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc)
+		list_del_init(&req->list);
+
+end:
+	return rc;
+}
+
+int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc = 0;
+	int i;
+	struct cam_hw_release_args arg;
+	struct cam_ctx_request *req;
+
+	if (!ctx->hw_mgr_intf) {
+		pr_err("HW interface is not ready\n");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	if (ctx->ctxt_to_hw_map) {
+		arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+		ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
+			&arg);
+		ctx->ctxt_to_hw_map = NULL;
+	}
+
+	ctx->session_hdl = 0;
+	ctx->dev_hdl = 0;
+	ctx->link_hdl = 0;
+
+	while (!list_empty(&ctx->active_req_list)) {
+		req = list_first_entry(&ctx->active_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		pr_warn("signal fence in active list. fence num %d\n",
+			req->num_out_map_entries);
+		for (i = 0; i < req->num_out_map_entries; i++) {
+			if (req->out_map_entries[i].sync_id != -1)
+				cam_sync_signal(req->out_map_entries[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+		}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	/* flush the pending queue */
+	while (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		pr_debug("signal fence in pending list. fence num %d\n",
+			req->num_out_map_entries);
+		for (i = 0; i < req->num_out_map_entries; i++)
+			if (req->out_map_entries[i].sync_id != -1)
+				cam_sync_signal(req->out_map_entries[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+end:
+	return rc;
+}
+
+void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
+{
+	struct cam_context *ctx = data;
+	struct cam_ctx_request *req = NULL;
+	struct cam_req_mgr_apply_request apply;
+
+	spin_lock(&ctx->lock);
+	if (!list_empty(&ctx->pending_req_list))
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+	spin_unlock(&ctx->lock);
+
+	if (!req) {
+		pr_err("No more request obj free\n");
+		return;
+	}
+
+	req->num_in_acked++;
+	if (req->num_in_acked == req->num_in_map_entries) {
+		apply.request_id = req->request_id;
+		trace_printk("async cb for request :%llu",
+			req->request_id);
+		cam_context_apply_req_to_hw(ctx, &apply);
+	}
+}
+
+int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_ctx_request *req = NULL;
+	struct cam_hw_prepare_update_args cfg;
+	uint64_t packet_addr;
+	struct cam_packet *packet;
+	size_t len = 0;
+	int32_t i = 0;
+
+	if (!ctx->hw_mgr_intf) {
+		pr_err("HW interface is not ready\n");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	spin_lock(&ctx->lock);
+	if (!list_empty(&ctx->free_req_list)) {
+		req = list_first_entry(&ctx->free_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+	}
+	spin_unlock(&ctx->lock);
+
+	if (!req) {
+		pr_err("No more request obj free\n");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	memset(req, 0, sizeof(*req));
+	INIT_LIST_HEAD(&req->list);
+
+	/* for config dev, only memory handle is supported */
+	/* map packet from the memhandle */
+	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+		(uint64_t *) &packet_addr,
+		&len);
+	if (rc != 0) {
+		pr_err("Can not get packet address\n");
+		rc = -EINVAL;
+		goto free_req;
+	}
+
+	packet = (struct cam_packet *) (packet_addr + cmd->offset);
+	pr_debug("pack_handle %llx\n", cmd->packet_handle);
+	pr_debug("packet address is 0x%llx\n", packet_addr);
+	pr_debug("packet with length %zu, offset 0x%llx\n",
+		len, cmd->offset);
+	pr_debug("Packet request id 0x%llx\n",
+		packet->header.request_id);
+	pr_debug("Packet size 0x%x\n", packet->header.size);
+	pr_debug("packet op %d\n", packet->header.op_code);
+
+	/* preprocess the configuration */
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.packet = packet;
+	cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+	cfg.max_hw_update_entries = CAM_CTX_CFG_MAX;
+	cfg.num_hw_update_entries = req->num_hw_update_entries;
+	cfg.hw_update_entries = req->hw_update_entries;
+	cfg.max_out_map_entries = CAM_CTX_CFG_MAX;
+	cfg.out_map_entries = req->out_map_entries;
+	cfg.max_in_map_entries = CAM_CTX_CFG_MAX;
+	cfg.in_map_entries = req->in_map_entries;
+
+	rc = ctx->hw_mgr_intf->hw_prepare_update(
+		ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc != 0) {
+		pr_err("Prepare config packet failed in HW layer\n");
+		rc = -EFAULT;
+		goto free_req;
+	}
+	req->num_hw_update_entries = cfg.num_hw_update_entries;
+	req->num_out_map_entries = cfg.num_out_map_entries;
+	req->num_in_map_entries = cfg.num_in_map_entries;
+	req->request_id = packet->header.request_id;
+	req->status = 1;
+	req->req_priv = cfg.priv;
+
+	if (req->num_in_map_entries > 0) {
+		spin_lock(&ctx->lock);
+		list_add_tail(&req->list, &ctx->pending_req_list);
+		spin_unlock(&ctx->lock);
+		for (i = 0; i < req->num_in_map_entries; i++) {
+			trace_printk("register in fence callback: %d\n",
+				req->in_map_entries[i].sync_id);
+			rc = cam_sync_register_callback(
+					cam_context_sync_callback,
+					(void *)ctx,
+					req->in_map_entries[i].sync_id);
+			pr_debug("register in fence callback: %d ret = %d\n",
+				req->in_map_entries[i].sync_id, rc);
+		}
+		goto end;
+	}
+
+	return rc;
+
+free_req:
+	spin_lock(&ctx->lock);
+	list_add_tail(&req->list, &ctx->free_req_list);
+	spin_unlock(&ctx->lock);
+end:
+	pr_debug("Config dev successful\n");
+	return rc;
+}
+
+int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+	struct cam_hw_acquire_args param;
+	struct cam_create_dev_hdl req_hdl_param;
+	struct cam_hw_release_args release;
+
+	if (!ctx->hw_mgr_intf) {
+		pr_err("HW interface is not ready\n");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	pr_debug("acquire cmd: session_hdl 0x%x, num_resources %d\n",
+		cmd->session_handle, cmd->num_resources);
+	pr_debug(" handle type %d, res %lld\n", cmd->handle_type,
+		cmd->resource_hdl);
+
+	if (cmd->num_resources > CAM_CTX_RES_MAX) {
+		pr_err("Too much resources in the acquire\n");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	/* for now we only support user pointer */
+	if (cmd->handle_type != 1)  {
+		pr_err("Only user pointer is supported");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* fill in parameters */
+	param.context_data = ctx;
+	param.event_cb = ctx->irq_cb_intf;
+	param.num_acq = cmd->num_resources;
+	param.acquire_info = cmd->resource_hdl;
+
+	pr_debug("ctx %pK: acquire hw resource: hw_intf: 0x%pK, priv 0x%pK",
+		ctx, ctx->hw_mgr_intf, ctx->hw_mgr_intf->hw_mgr_priv);
+	pr_debug("acquire_hw_func 0x%pK\n", ctx->hw_mgr_intf->hw_acquire);
+
+	/* call HW manager to reserve the resource */
+	rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
+		&param);
+	if (rc != 0) {
+		pr_err("Acquire device failed\n");
+		goto end;
+	}
+
+	ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
+
+	/* if hw resource acquire successful, acquire dev handle */
+	req_hdl_param.session_hdl = cmd->session_handle;
+	/* bridge is not ready for these flags. so false for now */
+	req_hdl_param.v4l2_sub_dev_flag = 0;
+	req_hdl_param.media_entity_flag = 0;
+	req_hdl_param.priv = ctx;
+
+	pr_debug("get device handle from bridge\n");
+	ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
+	if (ctx->dev_hdl <= 0) {
+		rc = -EFAULT;
+		pr_err("Can not create device handle\n");
+		goto free_hw;
+	}
+	cmd->dev_handle = ctx->dev_hdl;
+
+	/* store session information */
+	ctx->session_hdl = cmd->session_handle;
+
+	pr_err("dev_handle = %x\n", cmd->dev_handle);
+	return rc;
+
+free_hw:
+	release.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+	ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
+	ctx->ctxt_to_hw_map = NULL;
+end:
+	return rc;
+}
+
+int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_hw_start_args arg;
+
+	if (!ctx->hw_mgr_intf) {
+		pr_err("HW interface is not ready\n");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	if ((cmd->session_handle != ctx->session_hdl) ||
+		(cmd->dev_handle != ctx->dev_hdl)) {
+		pr_err("Invalid session hdl[%d], dev_handle[%d]\n",
+			cmd->session_handle, cmd->dev_handle);
+		rc = -EPERM;
+		goto end;
+	}
+
+	if (ctx->hw_mgr_intf->hw_start) {
+		rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
+				&arg);
+		if (rc) {
+			/* HW failure. user need to clean up the resource */
+			pr_err("Start HW failed\n");
+			goto end;
+		}
+	}
+
+	pr_debug("start device success\n");
+end:
+	return rc;
+}
+
+int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
+{
+	int rc = 0;
+	uint32_t i;
+	struct cam_hw_stop_args stop;
+	struct cam_ctx_request *req;
+
+	if (!ctx->hw_mgr_intf) {
+		pr_err("HW interface is not ready\n");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	/* stop hw first */
+	if (ctx->ctxt_to_hw_map) {
+		stop.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+		if (ctx->hw_mgr_intf->hw_stop)
+			ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+				&stop);
+	}
+
+	/* flush pending and active queue */
+	while (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		pr_debug("signal fence in pending list. fence num %d\n",
+			req->num_out_map_entries);
+		for (i = 0; i < req->num_out_map_entries; i++)
+			if (req->out_map_entries[i].sync_id != -1)
+				cam_sync_signal(req->out_map_entries[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	while (!list_empty(&ctx->active_req_list)) {
+		req = list_first_entry(&ctx->active_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		pr_debug("signal fence in active list. fence num %d\n",
+			req->num_out_map_entries);
+		for (i = 0; i < req->num_out_map_entries; i++)
+			if (req->out_map_entries[i].sync_id != -1)
+				cam_sync_signal(req->out_map_entries[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+end:
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
new file mode 100644
index 0000000..f7982eb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CONTEXT_UTILS_H_
+#define _CAM_CONTEXT_UTILS_H_
+
+#include <linux/types.h>
+
+int cam_context_buf_done_from_hw(struct cam_context *ctx,
+	void *done_event_data, uint32_t bubble_state);
+int cam_context_apply_req_to_hw(struct cam_context *ctx,
+	struct cam_req_mgr_apply_request *apply);
+int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd);
+int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd);
+int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd);
+int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd);
+int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx);
+
+#endif /* _CAM_CONTEXT_UTILS_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h b/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h
new file mode 100644
index 0000000..3498836
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_REQ_MGR_CORE_DEFS_H_
+#define _CAM_REQ_MGR_CORE_DEFS_H_
+
+#define CAM_CORE_TRACE_ENABLE 0
+
+#if (CAM_CORE_TRACE_ENABLE == 1)
+	#define CAM_CORE_DBG(fmt, args...) do { \
+	trace_printk("%d: [cam_core_dbg] "fmt"\n", __LINE__, ##args); \
+	pr_debug("%s:%d "fmt"\n", __func__, __LINE__, ##args); \
+	} while (0)
+
+	#define CAM_CORE_WARN(fmt, args...) do { \
+	trace_printk("%d: [cam_core_warn] "fmt"\n", __LINE__, ##args); \
+	pr_warn("%s:%d "fmt"\n", __func__, __LINE__, ##args); \
+	} while (0)
+
+	#define CAM_CORE_ERR(fmt, args...) do { \
+	trace_printk("%d: [cam_core_err] "fmt"\n", __LINE__, ##args); \
+	pr_err("%s:%d "fmt"\n", __func__, __LINE__, ##args);\
+	} while (0)
+#else
+	#define CAM_CORE_DBG(fmt, args...) pr_debug("%s:%d "fmt"\n", \
+	__func__, __LINE__, ##args)
+
+	#define CAM_CORE_WARN(fmt, args...) pr_warn("%s:%d "fmt"\n", \
+	__func__, __LINE__, ##args)
+
+	#define CAM_CORE_ERR(fmt, args...) pr_err("%s:%d "fmt"\n", \
+	__func__, __LINE__, ##args)
+#endif
+
+#endif /* _CAM_REQ_MGR_CORE_DEFS_H_ */
+
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
index db605e7..f72a1d7 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
@@ -36,6 +36,7 @@
  * @offset:                Memory offset
  * @len:                   Size of the configuration
  * @flags:                 Flags for the config entry(eg. DMI)
+ * @addr:                  Address of hardware update entry
  *
  */
 struct cam_hw_update_entry {
@@ -43,6 +44,7 @@
 	uint32_t           offset;
 	uint32_t           len;
 	uint32_t           flags;
+	uint64_t           addr;
 };
 
 /**
@@ -137,6 +139,7 @@
  * @max_in_map_entries:    Maximum input fence mapping supported
  * @in_map_entries:        Actual input fence mapping list (returned)
  * @num_in_map_entries:    Number of acutal input fence mapping (returned)
+ * @priv:                  Private pointer of hw update
  *
  */
 struct cam_hw_prepare_update_args {
@@ -151,6 +154,7 @@
 	uint32_t                        max_in_map_entries;
 	struct cam_hw_fence_map_entry  *in_map_entries;
 	uint32_t                        num_in_map_entries;
+	void                           *priv;
 };
 
 /**
@@ -159,12 +163,18 @@
  * @ctxt_to_hw_map:        HW context from the acquire
  * @num_hw_update_entries: Number of hardware update entries
  * @hw_update_entries:     Hardware update list
+ * @out_map_entries:       Out map info
+ * @num_out_map_entries:   Number of out map entries
+ * @priv:                  Private pointer
  *
  */
 struct cam_hw_config_args {
-	void                        *ctxt_to_hw_map;
-	uint32_t                     num_hw_update_entries;
-	struct cam_hw_update_entry  *hw_update_entries;
+	void                           *ctxt_to_hw_map;
+	uint32_t                        num_hw_update_entries;
+	struct cam_hw_update_entry     *hw_update_entries;
+	struct cam_hw_fence_map_entry  *out_map_entries;
+	uint32_t                        num_out_map_entries;
+	void                           *priv;
 };
 
 /**
@@ -189,6 +199,8 @@
  * @hw_write:              Function pointer for Write hardware registers
  * @hw_cmd:                Function pointer for any customized commands for the
  *                         hardware manager
+ * @download_fw:           Function pointer for firmware downloading
+ * @hw_close:              Function pointer for subdev close
  *
  */
 struct cam_hw_mgr_intf {
@@ -204,6 +216,8 @@
 	int (*hw_read)(void *hw_priv, void *read_args);
 	int (*hw_write)(void *hw_priv, void *write_args);
 	int (*hw_cmd)(void *hw_priv, void *write_args);
+	int (*download_fw)(void *hw_priv, void *fw_download_args);
+	int (*hw_close)(void *hw_priv, void *hw_close_args);
 };
 
 #endif /* _CAM_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index ef60822..17f6973 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -21,13 +21,16 @@
 {
 	int rc = -EFAULT;
 
-	if (!query)
+	if (!query) {
+		pr_err("%s: Invalid params\n", __func__);
 		return -EINVAL;
+	}
 
 	if (node->hw_mgr_intf.hw_get_caps) {
 		rc = node->hw_mgr_intf.hw_get_caps(
 			node->hw_mgr_intf.hw_mgr_priv, query);
 	}
+
 	return rc;
 }
 
@@ -47,7 +50,6 @@
 		list_del_init(&ctx->list);
 	}
 	mutex_unlock(&node->list_mutex);
-
 	if (!ctx) {
 		rc = -ENOMEM;
 		goto err;
@@ -254,8 +256,8 @@
 		memset(node, 0, sizeof(*node));
 
 	pr_debug("%s: deinit complete!\n", __func__);
-	return 0;
 
+	return 0;
 }
 
 int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
@@ -274,7 +276,6 @@
 	strlcpy(node->name, name, sizeof(node->name));
 
 	memcpy(&node->hw_mgr_intf, hw_mgr_intf, sizeof(node->hw_mgr_intf));
-
 	node->crm_node_intf.apply_req = __cam_node_apply_req;
 	node->crm_node_intf.get_dev_info = __cam_node_get_dev_info;
 	node->crm_node_intf.link_setup = __cam_node_link_setup;
@@ -318,15 +319,18 @@
 			rc = -EFAULT;
 			break;
 		}
+
 		rc = __cam_node_handle_query_cap(node, &query);
 		if (rc) {
 			pr_err("%s: querycap is failed(rc = %d)\n",
 				__func__,  rc);
 			break;
 		}
+
 		if (copy_to_user((void __user *)cmd->handle, &query,
 			sizeof(query)))
 			rc = -EFAULT;
+
 		break;
 	}
 	case CAM_ACQUIRE_DEV: {
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
index 429474b..a89981d 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
@@ -148,6 +148,7 @@
 	sd->sd_flags =
 		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
 	sd->ent_function = dev_type;
+
 	rc = cam_register_subdev(sd);
 	if (rc) {
 		pr_err("%s: cam_register_subdev() failed for dev: %s!\n",
diff --git a/drivers/media/platform/msm/camera/cam_cpas/Makefile b/drivers/media/platform/msm/camera/cam_cpas/Makefile
new file mode 100644
index 0000000..63dc58e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/Makefile
@@ -0,0 +1,10 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/cpas_top
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/camss_top
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cpas_top/
+obj-$(CONFIG_SPECTRA_CAMERA) += camss_top/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpas_soc.o cam_cpas_intf.o cam_cpas_hw.o
\ No newline at end of file
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
new file mode 100644
index 0000000..4f246e1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -0,0 +1,1415 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+#include "cam_cpas_hw.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_soc.h"
+
+int cam_cpas_util_get_string_index(const char **strings,
+	uint32_t num_strings, char *matching_string, uint32_t *index)
+{
+	int i;
+
+	for (i = 0; i < num_strings; i++) {
+		if (strnstr(strings[i], matching_string, strlen(strings[i]))) {
+			CPAS_CDBG("matched %s : %d\n", matching_string, i);
+			*index = i;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
+	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	uint32_t value;
+	int reg_base_index;
+
+	if (reg_info->enable == false)
+		return 0;
+
+	reg_base_index = cpas_core->regbase_index[reg_base];
+	if (reg_base_index == -1)
+		return -EINVAL;
+
+	if (reg_info->masked_value) {
+		value = cam_io_r_mb(
+			soc_info->reg_map[reg_base_index].mem_base +
+			reg_info->offset);
+		value = value & (~reg_info->mask);
+		value = value | (reg_info->value << reg_info->shift);
+	} else {
+		value = reg_info->value;
+	}
+
+	CPAS_CDBG("Base[%d] Offset[0x%8x] Value[0x%8x]\n",
+		reg_base, reg_info->offset, value);
+
+	cam_io_w_mb(value, soc_info->reg_map[reg_base_index].mem_base +
+		reg_info->offset);
+
+	return 0;
+}
+
+static int cam_cpas_util_vote_bus_client_level(
+	struct cam_cpas_bus_client *bus_client, unsigned int level)
+{
+	if (!bus_client->valid || (bus_client->dyn_vote == true)) {
+		pr_err("Invalid params %d %d\n", bus_client->valid,
+			bus_client->dyn_vote);
+		return -EINVAL;
+	}
+
+	if (level >= bus_client->num_usecases) {
+		pr_err("Invalid vote level=%d, usecases=%d\n", level,
+			bus_client->num_usecases);
+		return -EINVAL;
+	}
+
+	if (level == bus_client->curr_vote_level)
+		return 0;
+
+	CPAS_CDBG("Bus client[%d] index[%d]\n", bus_client->client_id, level);
+	msm_bus_scale_client_update_request(bus_client->client_id, level);
+	bus_client->curr_vote_level = level;
+
+	return 0;
+}
+
+static int cam_cpas_util_vote_bus_client_bw(
+	struct cam_cpas_bus_client *bus_client, uint64_t ab, uint64_t ib)
+{
+	struct msm_bus_paths *path;
+	struct msm_bus_scale_pdata *pdata;
+	int idx = 0;
+
+	if (!bus_client->valid) {
+		pr_err("bus client not valid\n");
+		return -EINVAL;
+	}
+
+	if ((bus_client->num_usecases != 2) ||
+		(bus_client->num_paths != 1) ||
+		(bus_client->dyn_vote != true)) {
+		pr_err("dynamic update not allowed %d %d %d\n",
+			bus_client->num_usecases, bus_client->num_paths,
+			bus_client->dyn_vote);
+		return -EINVAL;
+	}
+
+	mutex_lock(&bus_client->lock);
+
+	if (bus_client->curr_vote_level > 1) {
+		pr_err("curr_vote_level %d cannot be greater than 1\n",
+			bus_client->curr_vote_level);
+		mutex_unlock(&bus_client->lock);
+		return -EINVAL;
+	}
+
+	idx = bus_client->curr_vote_level;
+	idx = 1 - idx;
+	bus_client->curr_vote_level = idx;
+	mutex_unlock(&bus_client->lock);
+
+	pdata = bus_client->pdata;
+	path = &(pdata->usecase[idx]);
+	path->vectors[0].ab = ab;
+	path->vectors[0].ib = ib;
+
+	CPAS_CDBG("Bus client[%d] :ab[%llu] ib[%llu], index[%d]\n",
+		bus_client->client_id, ab, ib, idx);
+	msm_bus_scale_client_update_request(bus_client->client_id, idx);
+
+	return 0;
+}
+
+static int cam_cpas_util_register_bus_client(
+	struct cam_hw_soc_info *soc_info, struct device_node *dev_node,
+	struct cam_cpas_bus_client *bus_client)
+{
+	struct msm_bus_scale_pdata *pdata = NULL;
+	uint32_t client_id;
+	int rc;
+
+	pdata = msm_bus_pdata_from_node(soc_info->pdev,
+		dev_node);
+	if (!pdata) {
+		pr_err("failed get_pdata\n");
+		return -EINVAL;
+	}
+
+	if ((pdata->num_usecases == 0) ||
+		(pdata->usecase[0].num_paths == 0)) {
+		pr_err("usecase=%d\n", pdata->num_usecases);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	client_id = msm_bus_scale_register_client(pdata);
+	if (!client_id) {
+		pr_err("failed in register ahb bus client\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	bus_client->dyn_vote = of_property_read_bool(dev_node,
+		"qcom,msm-bus-vector-dyn-vote");
+
+	if (bus_client->dyn_vote && (pdata->num_usecases != 2)) {
+		pr_err("Excess or less vectors %d\n", pdata->num_usecases);
+		rc = -EINVAL;
+		goto fail_unregister_client;
+	}
+
+	msm_bus_scale_client_update_request(client_id, 0);
+
+	bus_client->src = pdata->usecase[0].vectors[0].src;
+	bus_client->dst = pdata->usecase[0].vectors[0].dst;
+	bus_client->pdata = pdata;
+	bus_client->client_id = client_id;
+	bus_client->num_usecases = pdata->num_usecases;
+	bus_client->num_paths = pdata->usecase[0].num_paths;
+	bus_client->curr_vote_level = 0;
+	bus_client->valid = true;
+	mutex_init(&bus_client->lock);
+
+	CPAS_CDBG("Bus Client : src=%d, dst=%d, bus_client=%d\n",
+		bus_client->src, bus_client->dst, bus_client->client_id);
+
+	return 0;
+fail_unregister_client:
+	msm_bus_scale_unregister_client(bus_client->client_id);
+error:
+	return rc;
+
+}
+
+static int cam_cpas_util_unregister_bus_client(
+	struct cam_cpas_bus_client *bus_client)
+{
+	if (!bus_client->valid)
+		return -EINVAL;
+
+	if (bus_client->dyn_vote)
+		cam_cpas_util_vote_bus_client_bw(bus_client, 0, 0);
+	else
+		cam_cpas_util_vote_bus_client_level(bus_client, 0);
+
+	msm_bus_scale_unregister_client(bus_client->client_id);
+	bus_client->valid = false;
+
+	mutex_destroy(&bus_client->lock);
+
+	return 0;
+}
+
+static int cam_cpas_util_axi_cleanup(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *)soc_info->soc_private;
+	struct cam_cpas_axi_port *curr_port;
+	struct cam_cpas_axi_port *temp_port;
+
+	list_for_each_entry_safe(curr_port, temp_port,
+		&cpas_core->axi_ports_list_head, sibling_port) {
+		cam_cpas_util_unregister_bus_client(&curr_port->mnoc_bus);
+		of_node_put(curr_port->axi_port_mnoc_node);
+		if (soc_private->axi_camnoc_based) {
+			cam_cpas_util_unregister_bus_client(
+				&curr_port->camnoc_bus);
+			of_node_put(curr_port->axi_port_camnoc_node);
+		}
+		of_node_put(curr_port->axi_port_node);
+		list_del(&curr_port->sibling_port);
+		mutex_destroy(&curr_port->lock);
+		kfree(curr_port);
+	}
+
+	of_node_put(soc_private->axi_port_list_node);
+
+	return 0;
+}
+
+static int cam_cpas_util_axi_setup(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *)soc_info->soc_private;
+	struct cam_cpas_axi_port *axi_port;
+	int rc;
+	struct device_node *axi_port_list_node;
+	struct device_node *axi_port_node = NULL;
+	struct device_node *axi_port_mnoc_node = NULL;
+	struct device_node *axi_port_camnoc_node = NULL;
+
+	INIT_LIST_HEAD(&cpas_core->axi_ports_list_head);
+
+	axi_port_list_node = of_find_node_by_name(soc_info->pdev->dev.of_node,
+		"qcom,axi-port-list");
+	if (!axi_port_list_node) {
+		pr_err("Node qcom,axi-port-list not found.\n");
+		return -EINVAL;
+	}
+
+	soc_private->axi_port_list_node = axi_port_list_node;
+
+	for_each_available_child_of_node(axi_port_list_node, axi_port_node) {
+		axi_port = kzalloc(sizeof(*axi_port), GFP_KERNEL);
+		if (!axi_port) {
+			rc = -ENOMEM;
+			goto error_previous_axi_cleanup;
+		}
+		axi_port->axi_port_node = axi_port_node;
+
+		rc = of_property_read_string_index(axi_port_node,
+			"qcom,axi-port-name", 0,
+			(const char **)&axi_port->axi_port_name);
+		if (rc) {
+			pr_err("failed to read qcom,axi-port-name rc=%d\n", rc);
+			goto port_name_fail;
+		}
+
+		axi_port_mnoc_node = of_find_node_by_name(axi_port_node,
+			"qcom,axi-port-mnoc");
+		if (!axi_port_mnoc_node) {
+			pr_err("Node qcom,axi-port-mnoc not found.\n");
+			rc = -EINVAL;
+			goto mnoc_node_get_fail;
+		}
+		axi_port->axi_port_mnoc_node = axi_port_mnoc_node;
+
+		rc = cam_cpas_util_register_bus_client(soc_info,
+			axi_port_mnoc_node, &axi_port->mnoc_bus);
+		if (rc)
+			goto mnoc_register_fail;
+
+		if (soc_private->axi_camnoc_based) {
+			axi_port_camnoc_node = of_find_node_by_name(
+				axi_port_node, "qcom,axi-port-camnoc");
+			if (!axi_port_camnoc_node) {
+				pr_err("Node qcom,axi-port-camnoc not found\n");
+				rc = -EINVAL;
+				goto camnoc_node_get_fail;
+			}
+			axi_port->axi_port_camnoc_node = axi_port_camnoc_node;
+
+			rc = cam_cpas_util_register_bus_client(soc_info,
+				axi_port_camnoc_node, &axi_port->camnoc_bus);
+			if (rc)
+				goto camnoc_register_fail;
+		}
+
+		mutex_init(&axi_port->lock);
+
+		INIT_LIST_HEAD(&axi_port->sibling_port);
+		list_add_tail(&axi_port->sibling_port,
+			&cpas_core->axi_ports_list_head);
+		INIT_LIST_HEAD(&axi_port->clients_list_head);
+	}
+
+	return 0;
+camnoc_register_fail:
+	of_node_put(axi_port->axi_port_camnoc_node);
+camnoc_node_get_fail:
+	cam_cpas_util_unregister_bus_client(&axi_port->mnoc_bus);
+mnoc_register_fail:
+	of_node_put(axi_port->axi_port_mnoc_node);
+mnoc_node_get_fail:
+port_name_fail:
+	of_node_put(axi_port->axi_port_node);
+	kfree(axi_port);
+error_previous_axi_cleanup:
+	cam_cpas_util_axi_cleanup(cpas_core, soc_info);
+	return rc;
+}
+
+static int cam_cpas_util_vote_default_ahb_axi(struct cam_hw_info *cpas_hw,
+	int enable)
+{
+	int rc;
+	struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+	struct cam_cpas_axi_port *curr_port;
+	struct cam_cpas_axi_port *temp_port;
+	uint64_t camnoc_bw, mnoc_bw;
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+
+	rc = cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
+		(enable == true) ? CAM_SVS_VOTE : CAM_SUSPEND_VOTE);
+	if (rc) {
+		pr_err("Failed in AHB vote, enable=%d, rc=%d\n", enable, rc);
+		return rc;
+	}
+
+	if (enable) {
+		mnoc_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	} else {
+		mnoc_bw = 0;
+		camnoc_bw = 0;
+	}
+
+	list_for_each_entry_safe(curr_port, temp_port,
+		&cpas_core->axi_ports_list_head, sibling_port) {
+		rc = cam_cpas_util_vote_bus_client_bw(&curr_port->mnoc_bus,
+			mnoc_bw, 0);
+		if (rc) {
+			pr_err("Failed in mnoc vote, enable=%d, rc=%d\n",
+				enable, rc);
+			goto remove_ahb_vote;
+		}
+
+		if (soc_private->axi_camnoc_based) {
+			cam_cpas_util_vote_bus_client_bw(
+				&curr_port->camnoc_bus, camnoc_bw, 0);
+			if (rc) {
+				pr_err("Failed in mnoc vote, enable=%d, %d\n",
+					enable, rc);
+				cam_cpas_util_vote_bus_client_bw(
+					&curr_port->mnoc_bus, 0, 0);
+				goto remove_ahb_vote;
+			}
+		}
+	}
+
+	return 0;
+remove_ahb_vote:
+	cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
+		CAM_SUSPEND_VOTE);
+	return rc;
+}
+
+static int cam_cpas_util_insert_client_to_axi_port(struct cam_cpas *cpas_core,
+	struct cam_cpas_private_soc *soc_private,
+	struct cam_cpas_client *cpas_client, int32_t client_indx)
+{
+	struct cam_cpas_axi_port *curr_port;
+	struct cam_cpas_axi_port *temp_port;
+
+	list_for_each_entry_safe(curr_port, temp_port,
+		&cpas_core->axi_ports_list_head, sibling_port) {
+		if (strnstr(curr_port->axi_port_name,
+			soc_private->client_axi_port_name[client_indx],
+			strlen(curr_port->axi_port_name))) {
+
+			cpas_client->axi_port = curr_port;
+			INIT_LIST_HEAD(&cpas_client->axi_sibling_client);
+
+			mutex_lock(&curr_port->lock);
+			list_add_tail(&cpas_client->axi_sibling_client,
+				&cpas_client->axi_port->clients_list_head);
+			mutex_unlock(&curr_port->lock);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static void cam_cpas_util_remove_client_from_axi_port(
+	struct cam_cpas_client *cpas_client)
+{
+	mutex_lock(&cpas_client->axi_port->lock);
+	list_del(&cpas_client->axi_sibling_client);
+	mutex_unlock(&cpas_client->axi_port->lock);
+}
+
+static int cam_cpas_hw_reg_write(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, enum cam_cpas_reg_base reg_base,
+	uint32_t offset, bool mb, uint32_t value)
+{
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int reg_base_index = cpas_core->regbase_index[reg_base];
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
+		pr_err("Invalid reg_base=%d, reg_base_index=%d, num_map=%d\n",
+			reg_base, reg_base_index, soc_info->num_reg_map);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("client has not started%d\n", client_indx);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	if (mb)
+		cam_io_w_mb(value,
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+	else
+		cam_io_w(value,
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_hw_reg_read(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, enum cam_cpas_reg_base reg_base,
+	uint32_t offset, bool mb, uint32_t *value)
+{
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int reg_base_index = cpas_core->regbase_index[reg_base];
+	uint32_t reg_value;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!value)
+		return -EINVAL;
+
+	if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
+		pr_err("Invalid reg_base=%d, reg_base_index=%d, num_map=%d\n",
+			reg_base, reg_base_index, soc_info->num_reg_map);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("client has not started%d\n", client_indx);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	if (mb)
+		reg_value = cam_io_r_mb(
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+	else
+		reg_value = cam_io_r(
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+
+	*value = reg_value;
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_util_apply_client_axi_vote(
+	struct cam_cpas *cpas_core, struct cam_cpas_private_soc *soc_private,
+	struct cam_cpas_client *cpas_client, struct cam_axi_vote *axi_vote)
+{
+	struct cam_cpas_client *curr_client;
+	struct cam_cpas_client *temp_client;
+	struct cam_axi_vote req_axi_vote = *axi_vote;
+	struct cam_cpas_axi_port *axi_port = cpas_client->axi_port;
+	uint64_t camnoc_bw = 0, mnoc_bw = 0;
+	int rc = 0;
+
+	if (!axi_port) {
+		pr_err("axi port does not exists\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Make sure we use same bw for both compressed, uncompressed
+	 * in case client has requested either of one only
+	 */
+	if (req_axi_vote.compressed_bw == 0)
+		req_axi_vote.compressed_bw = req_axi_vote.uncompressed_bw;
+
+	if (req_axi_vote.uncompressed_bw == 0)
+		req_axi_vote.uncompressed_bw = req_axi_vote.compressed_bw;
+
+	if ((cpas_client->axi_vote.compressed_bw ==
+		req_axi_vote.compressed_bw) &&
+		(cpas_client->axi_vote.uncompressed_bw ==
+		req_axi_vote.uncompressed_bw))
+		return 0;
+
+	mutex_lock(&axi_port->lock);
+	cpas_client->axi_vote = req_axi_vote;
+
+	list_for_each_entry_safe(curr_client, temp_client,
+		&axi_port->clients_list_head, axi_sibling_client) {
+		camnoc_bw += curr_client->axi_vote.uncompressed_bw;
+		mnoc_bw += curr_client->axi_vote.compressed_bw;
+	}
+
+	if ((!soc_private->axi_camnoc_based) && (mnoc_bw < camnoc_bw))
+		mnoc_bw = camnoc_bw;
+
+	CPAS_CDBG("axi[(%d, %d),(%d, %d)] : camnoc_bw[%llu], mnoc_bw[%llu]\n",
+		axi_port->mnoc_bus.src, axi_port->mnoc_bus.dst,
+		axi_port->camnoc_bus.src, axi_port->camnoc_bus.dst,
+		camnoc_bw, mnoc_bw);
+
+	rc = cam_cpas_util_vote_bus_client_bw(&axi_port->mnoc_bus,
+		mnoc_bw, 0);
+	if (rc) {
+		pr_err("Failed in mnoc vote ab[%llu] ib[%llu] rc=%d\n",
+			mnoc_bw, mnoc_bw, rc);
+		goto unlock_axi_port;
+	}
+
+	if (soc_private->axi_camnoc_based) {
+		rc = cam_cpas_util_vote_bus_client_bw(&axi_port->camnoc_bus,
+			camnoc_bw, 0);
+		if (rc) {
+			pr_err("Failed camnoc vote ab[%llu] ib[%llu] rc=%d\n",
+				camnoc_bw, camnoc_bw, rc);
+			goto unlock_axi_port;
+		}
+	}
+
+unlock_axi_port:
+	mutex_unlock(&axi_port->lock);
+	return rc;
+}
+
+static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, struct cam_axi_vote *axi_vote)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!axi_vote || ((axi_vote->compressed_bw == 0) &&
+		(axi_vote->uncompressed_bw == 0))) {
+		pr_err("Invalid vote, client_handle=%d\n", client_handle);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("client has not started %d\n", client_indx);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	CPAS_CDBG("Client[%d] Requested compressed[%llu], uncompressed[%llu]\n",
+		client_indx, axi_vote->compressed_bw,
+		axi_vote->uncompressed_bw);
+
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_core,
+		cpas_hw->soc_info.soc_private,
+		cpas_core->cpas_client[client_indx], axi_vote);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_util_apply_client_ahb_vote(struct cam_cpas *cpas_core,
+	struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote)
+{
+	struct cam_cpas_bus_client *ahb_bus_client = &cpas_core->ahb_bus_client;
+	enum cam_vote_level required_level;
+	enum cam_vote_level highest_level;
+	int i, rc = 0;
+
+	if (!ahb_bus_client->valid) {
+		pr_err("AHB Bus client not valid\n");
+		return -EINVAL;
+	}
+
+	if (ahb_vote->type == CAM_VOTE_DYNAMIC) {
+		pr_err("Dynamic AHB vote not supported\n");
+		return -EINVAL;
+	}
+
+	required_level = ahb_vote->vote.level;
+
+	if (cpas_client->ahb_level == required_level)
+		return 0;
+
+	mutex_lock(&ahb_bus_client->lock);
+	cpas_client->ahb_level = required_level;
+
+	CPAS_CDBG("Clients required level[%d], curr_level[%d]\n",
+		required_level, ahb_bus_client->curr_vote_level);
+
+	if (required_level == ahb_bus_client->curr_vote_level)
+		goto unlock_bus_client;
+
+	highest_level = required_level;
+	for (i = 0; i < cpas_core->num_clients; i++) {
+		if (cpas_core->cpas_client[i] && (highest_level <
+			cpas_core->cpas_client[i]->ahb_level))
+			highest_level = cpas_core->cpas_client[i]->ahb_level;
+	}
+
+	CPAS_CDBG("Required highest_level[%d]\n", highest_level);
+
+	rc = cam_cpas_util_vote_bus_client_level(ahb_bus_client,
+		highest_level);
+	if (rc)
+		pr_err("Failed in ahb vote, level=%d, rc=%d\n",
+			highest_level, rc);
+
+unlock_bus_client:
+	mutex_unlock(&ahb_bus_client->lock);
+	return rc;
+}
+
+static int cam_cpas_hw_update_ahb_vote(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, struct cam_ahb_vote *ahb_vote)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!ahb_vote || (ahb_vote->vote.level == 0)) {
+		pr_err("Invalid AHB vote, %pK\n", ahb_vote);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("client has not started %d\n", client_indx);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	CPAS_CDBG("client[%d] : type[%d], level[%d], freq[%ld], applied[%d]\n",
+		client_indx, ahb_vote->type, ahb_vote->vote.level,
+		ahb_vote->vote.freq,
+		cpas_core->cpas_client[client_indx]->ahb_level);
+
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core,
+		cpas_core->cpas_client[client_indx], ahb_vote);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_hw_start(void *hw_priv, void *start_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	uint32_t client_indx;
+	struct cam_cpas_hw_cmd_start *cmd_hw_start;
+	struct cam_cpas_client *cpas_client;
+	struct cam_ahb_vote *ahb_vote;
+	struct cam_axi_vote *axi_vote;
+	int rc;
+
+	if (!hw_priv || !start_args) {
+		pr_err("Invalid arguments %pK %pK\n", hw_priv, start_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_cmd_start) != arg_size) {
+		pr_err("HW_CAPS size mismatch %ld %d\n",
+			sizeof(struct cam_cpas_hw_cmd_start), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	cmd_hw_start = (struct cam_cpas_hw_cmd_start *)start_args;
+	client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_start->client_handle);
+	ahb_vote = cmd_hw_start->ahb_vote;
+	axi_vote = cmd_hw_start->axi_vote;
+
+	if (!ahb_vote || !axi_vote)
+		return -EINVAL;
+
+	if ((ahb_vote->vote.level == 0) || ((axi_vote->compressed_bw == 0) &&
+		(axi_vote->uncompressed_bw == 0))) {
+		pr_err("Invalid vote ahb[%d], axi[%llu], [%llu]\n",
+			ahb_vote->vote.level, axi_vote->compressed_bw,
+			axi_vote->uncompressed_bw);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		pr_err("client is not registered %d\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("Client %d is in start state\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	CPAS_CDBG("AHB :client[%d] type[%d], level[%d], applied[%d]\n",
+		client_indx, ahb_vote->type, ahb_vote->vote.level,
+		cpas_client->ahb_level);
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core, cpas_client,
+		ahb_vote);
+	if (rc)
+		goto done;
+
+	CPAS_CDBG("AXI client[%d] compressed_bw[%llu], uncompressed_bw[%llu]\n",
+		client_indx, axi_vote->compressed_bw,
+		axi_vote->uncompressed_bw);
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_core,
+		cpas_hw->soc_info.soc_private, cpas_client, axi_vote);
+	if (rc)
+		goto done;
+
+	if (cpas_core->streamon_clients == 0) {
+		rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info);
+		if (rc) {
+			pr_err("enable_resorce failed, rc=%d\n", rc);
+			goto done;
+		}
+
+		if (cpas_core->internal_ops.power_on_settings) {
+			rc = cpas_core->internal_ops.power_on_settings(cpas_hw);
+			if (rc) {
+				cam_cpas_soc_disable_resources(
+					&cpas_hw->soc_info);
+				pr_err("failed in power_on settings rc=%d\n",
+					rc);
+				goto done;
+			}
+		}
+		cpas_hw->hw_state = CAM_HW_STATE_POWER_UP;
+	}
+
+	cpas_client->started = true;
+	cpas_core->streamon_clients++;
+
+	CPAS_CDBG("client_indx=%d, streamon_clients=%d\n",
+		client_indx, cpas_core->streamon_clients);
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+
+static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	uint32_t client_indx;
+	struct cam_cpas_hw_cmd_stop *cmd_hw_stop;
+	struct cam_cpas_client *cpas_client;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	int rc = 0;
+
+	if (!hw_priv || !stop_args) {
+		pr_err("Invalid arguments %pK %pK\n", hw_priv, stop_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_cmd_stop) != arg_size) {
+		pr_err("HW_CAPS size mismatch %ld %d\n",
+			sizeof(struct cam_cpas_hw_cmd_stop), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	cmd_hw_stop = (struct cam_cpas_hw_cmd_stop *)stop_args;
+	client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_stop->client_handle);
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	CPAS_CDBG("client_indx=%d, streamon_clients=%d\n",
+		client_indx, cpas_core->streamon_clients);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("Client %d is not started\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	cpas_client = cpas_core->cpas_client[client_indx];
+	cpas_client->started = false;
+	cpas_core->streamon_clients--;
+
+	if (cpas_core->streamon_clients == 0) {
+		rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+		if (rc) {
+			pr_err("disable_resorce failed, rc=%d\n", rc);
+			goto done;
+		}
+		cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	}
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SUSPEND_VOTE;
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core, cpas_client,
+		&ahb_vote);
+	if (rc)
+		goto done;
+
+	axi_vote.uncompressed_bw = 0;
+	axi_vote.compressed_bw = 0;
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_core,
+		cpas_hw->soc_info.soc_private, cpas_client, &axi_vote);
+
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_hw_init(void *hw_priv, void *init_hw_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	int rc = 0;
+
+	if (!hw_priv || !init_hw_args) {
+		pr_err("Invalid arguments %pK %pK\n", hw_priv, init_hw_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
+		pr_err("INIT HW size mismatch %ld %d\n",
+			sizeof(struct cam_cpas_hw_caps), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+
+	if (cpas_core->internal_ops.init_hw_version) {
+		rc = cpas_core->internal_ops.init_hw_version(cpas_hw,
+			(struct cam_cpas_hw_caps *)init_hw_args);
+	}
+
+	return rc;
+}
+
+static int cam_cpas_hw_register_client(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_register_params *register_params)
+{
+	int rc;
+	struct cam_cpas_client *cpas_client;
+	char client_name[CAM_HW_IDENTIFIER_LENGTH + 3];
+	int32_t client_indx = -1;
+	struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+
+	CPAS_CDBG("Register params : identifier=%s, cell_index=%d\n",
+		register_params->identifier, register_params->cell_index);
+
+	if (soc_private->client_id_based)
+		snprintf(client_name, sizeof(client_name), "%s%d",
+			register_params->identifier,
+			register_params->cell_index);
+	else
+		snprintf(client_name, sizeof(client_name), "%s",
+			register_params->identifier);
+
+	mutex_lock(&cpas_hw->hw_mutex);
+
+	rc = cam_cpas_util_get_string_index(soc_private->client_name,
+		soc_private->num_clients, client_name, &client_indx);
+	if (rc || !CAM_CPAS_CLIENT_VALID(client_indx) ||
+		CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		pr_err("Invalid Client register : %s %d, %d\n",
+			register_params->identifier,
+			register_params->cell_index, client_indx);
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -EPERM;
+	}
+
+	cpas_client = kzalloc(sizeof(struct cam_cpas_client), GFP_KERNEL);
+	if (!cpas_client) {
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -ENOMEM;
+	}
+
+	rc = cam_cpas_util_insert_client_to_axi_port(cpas_core, soc_private,
+		cpas_client, client_indx);
+	if (rc) {
+		pr_err("axi_port_insert failed client_indx=%d, rc=%d\n",
+			client_indx, rc);
+		kfree(cpas_client);
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -EINVAL;
+	}
+
+	register_params->client_handle =
+		CAM_CPAS_GET_CLIENT_HANDLE(client_indx);
+	memcpy(&cpas_client->data, register_params,
+		sizeof(struct cam_cpas_register_params));
+	cpas_core->cpas_client[client_indx] = cpas_client;
+	cpas_core->registered_clients++;
+
+	mutex_unlock(&cpas_hw->hw_mutex);
+
+	CPAS_CDBG("client_indx=%d, registered_clients=%d\n",
+		client_indx, cpas_core->registered_clients);
+
+	return 0;
+}
+
+static int cam_cpas_hw_unregister_client(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		pr_err("client not registered %d\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		pr_err("Client %d is not stopped\n", client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	cam_cpas_util_remove_client_from_axi_port(
+		cpas_core->cpas_client[client_indx]);
+
+	CPAS_CDBG("client_indx=%d, registered_clients=%d\n",
+		client_indx, cpas_core->registered_clients);
+
+	kfree(cpas_core->cpas_client[client_indx]);
+	cpas_core->cpas_client[client_indx] = NULL;
+	cpas_core->registered_clients--;
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_hw_get_hw_info(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	struct cam_cpas_hw_caps *hw_caps;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		pr_err("Invalid arguments %pK %pK\n", hw_priv, get_hw_cap_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
+		pr_err("HW_CAPS size mismatch %ld %d\n",
+			sizeof(struct cam_cpas_hw_caps), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	hw_caps = (struct cam_cpas_hw_caps *)get_hw_cap_args;
+
+	*hw_caps = cpas_core->hw_caps;
+
+	return 0;
+}
+
+
+static int cam_cpas_hw_process_cmd(void *hw_priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = -EINVAL;
+
+	if (!hw_priv || !cmd_args ||
+		(cmd_type >= CAM_CPAS_HW_CMD_INVALID)) {
+		pr_err("Invalid arguments %pK %pK %d\n", hw_priv, cmd_args,
+			cmd_type);
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_CPAS_HW_CMD_REGISTER_CLIENT: {
+		struct cam_cpas_register_params *register_params;
+
+		if (sizeof(struct cam_cpas_register_params) != arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		register_params = (struct cam_cpas_register_params *)cmd_args;
+		rc = cam_cpas_hw_register_client(hw_priv, register_params);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_UNREGISTER_CLIENT: {
+		uint32_t *client_handle;
+
+		if (sizeof(uint32_t) != arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		client_handle = (uint32_t *)cmd_args;
+		rc = cam_cpas_hw_unregister_client(hw_priv, *client_handle);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_REG_WRITE: {
+		struct cam_cpas_hw_cmd_reg_read_write *reg_write;
+
+		if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
+			arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		reg_write =
+			(struct cam_cpas_hw_cmd_reg_read_write *)cmd_args;
+		rc = cam_cpas_hw_reg_write(hw_priv, reg_write->client_handle,
+			reg_write->reg_base, reg_write->offset, reg_write->mb,
+			reg_write->value);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_REG_READ: {
+		struct cam_cpas_hw_cmd_reg_read_write *reg_read;
+
+		if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
+			arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		reg_read =
+			(struct cam_cpas_hw_cmd_reg_read_write *)cmd_args;
+		rc = cam_cpas_hw_reg_read(hw_priv,
+			reg_read->client_handle, reg_read->reg_base,
+			reg_read->offset, reg_read->mb, &reg_read->value);
+
+		break;
+	}
+	case CAM_CPAS_HW_CMD_AHB_VOTE: {
+		struct cam_cpas_hw_cmd_ahb_vote *cmd_ahb_vote;
+
+		if (sizeof(struct cam_cpas_hw_cmd_ahb_vote) != arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		cmd_ahb_vote = (struct cam_cpas_hw_cmd_ahb_vote *)cmd_args;
+		rc = cam_cpas_hw_update_ahb_vote(hw_priv,
+			cmd_ahb_vote->client_handle, cmd_ahb_vote->ahb_vote);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_AXI_VOTE: {
+		struct cam_cpas_hw_cmd_axi_vote *cmd_axi_vote;
+
+		if (sizeof(struct cam_cpas_hw_cmd_axi_vote) != arg_size) {
+			pr_err("cmd_type %d, size mismatch %d\n",
+				cmd_type, arg_size);
+			break;
+		}
+
+		cmd_axi_vote = (struct cam_cpas_hw_cmd_axi_vote *)cmd_args;
+		rc = cam_cpas_hw_update_axi_vote(hw_priv,
+			cmd_axi_vote->client_handle, cmd_axi_vote->axi_vote);
+		break;
+	}
+	default:
+		pr_err("CPAS HW command not valid =%d\n", cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_cpas_util_client_setup(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int i;
+
+	for (i = 0; i < CPAS_MAX_CLIENTS; i++) {
+		mutex_init(&cpas_core->client_mutex[i]);
+		cpas_core->cpas_client[i] = NULL;
+	}
+
+	return 0;
+}
+
+static int cam_cpas_util_client_cleanup(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int i;
+
+	for (i = 0; i < CPAS_MAX_CLIENTS; i++) {
+		if (cpas_core->cpas_client[i]) {
+			cam_cpas_hw_unregister_client(cpas_hw, i);
+			cpas_core->cpas_client[i] = NULL;
+		}
+		mutex_destroy(&cpas_core->client_mutex[i]);
+	}
+
+	return 0;
+}
+
+static int cam_cpas_util_get_internal_ops(struct platform_device *pdev,
+	struct cam_hw_intf *hw_intf, struct cam_cpas_internal_ops *internal_ops)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc;
+	const char *compat_str = NULL;
+
+	rc = of_property_read_string_index(of_node, "arch-compat", 0,
+		(const char **)&compat_str);
+	if (rc) {
+		pr_err("failed to get arch-compat rc=%d\n", rc);
+		return -EINVAL;
+	}
+
+	if (strnstr(compat_str, "camss_top", strlen(compat_str))) {
+		hw_intf->hw_type = CAM_HW_CAMSSTOP;
+		rc = cam_camsstop_get_internal_ops(internal_ops);
+	} else if (strnstr(compat_str, "cpas_top", strlen(compat_str))) {
+		hw_intf->hw_type = CAM_HW_CPASTOP;
+		rc = cam_cpastop_get_internal_ops(internal_ops);
+	} else {
+		pr_err("arch-compat %s not supported\n", compat_str);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_cpas_hw_probe(struct platform_device *pdev,
+	struct cam_hw_intf **hw_intf)
+{
+	int rc = 0;
+	int i;
+	struct cam_hw_info *cpas_hw = NULL;
+	struct cam_hw_intf *cpas_hw_intf = NULL;
+	struct cam_cpas *cpas_core = NULL;
+	struct cam_cpas_private_soc *soc_private;
+	struct cam_cpas_internal_ops *internal_ops;
+
+	cpas_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cpas_hw_intf)
+		return -ENOMEM;
+
+	cpas_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cpas_hw) {
+		kfree(cpas_hw_intf);
+		return -ENOMEM;
+	}
+
+	cpas_core = kzalloc(sizeof(struct cam_cpas), GFP_KERNEL);
+	if (!cpas_core) {
+		kfree(cpas_hw);
+		kfree(cpas_hw_intf);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < CAM_CPAS_REG_MAX; i++)
+		cpas_core->regbase_index[i] = -1;
+
+	cpas_hw_intf->hw_priv = cpas_hw;
+	cpas_hw->core_info = cpas_core;
+
+	cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cpas_hw->soc_info.pdev = pdev;
+	cpas_hw->open_count = 0;
+	mutex_init(&cpas_hw->hw_mutex);
+	spin_lock_init(&cpas_hw->hw_lock);
+	init_completion(&cpas_hw->hw_complete);
+
+	cpas_hw_intf->hw_ops.get_hw_caps = cam_cpas_hw_get_hw_info;
+	cpas_hw_intf->hw_ops.init = cam_cpas_hw_init;
+	cpas_hw_intf->hw_ops.deinit = NULL;
+	cpas_hw_intf->hw_ops.reset = NULL;
+	cpas_hw_intf->hw_ops.reserve = NULL;
+	cpas_hw_intf->hw_ops.release = NULL;
+	cpas_hw_intf->hw_ops.start = cam_cpas_hw_start;
+	cpas_hw_intf->hw_ops.stop = cam_cpas_hw_stop;
+	cpas_hw_intf->hw_ops.read = NULL;
+	cpas_hw_intf->hw_ops.write = NULL;
+	cpas_hw_intf->hw_ops.process_cmd = cam_cpas_hw_process_cmd;
+
+	internal_ops = &cpas_core->internal_ops;
+	rc = cam_cpas_util_get_internal_ops(pdev, cpas_hw_intf, internal_ops);
+	if (rc != 0)
+		goto release_mem;
+
+	rc = cam_cpas_soc_init_resources(&cpas_hw->soc_info,
+		internal_ops->handle_irq, cpas_hw);
+	if (rc)
+		goto release_mem;
+
+	soc_private = (struct cam_cpas_private_soc *)
+		cpas_hw->soc_info.soc_private;
+	cpas_core->num_clients = soc_private->num_clients;
+
+	if (internal_ops->setup_regbase) {
+		rc = internal_ops->setup_regbase(&cpas_hw->soc_info,
+			cpas_core->regbase_index, CAM_CPAS_REG_MAX);
+		if (rc)
+			goto deinit_platform_res;
+	}
+
+	rc = cam_cpas_util_client_setup(cpas_hw);
+	if (rc) {
+		pr_err("failed in client setup, rc=%d\n", rc);
+		goto deinit_platform_res;
+	}
+
+	rc = cam_cpas_util_register_bus_client(&cpas_hw->soc_info,
+		cpas_hw->soc_info.pdev->dev.of_node,
+		&cpas_core->ahb_bus_client);
+	if (rc) {
+		pr_err("failed in ahb setup, rc=%d\n", rc);
+		goto client_cleanup;
+	}
+
+	rc = cam_cpas_util_axi_setup(cpas_core, &cpas_hw->soc_info);
+	if (rc) {
+		pr_err("failed in axi setup, rc=%d\n", rc);
+		goto ahb_cleanup;
+	}
+
+	/* Need to vote first before enabling clocks */
+	rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, true);
+	if (rc)
+		goto axi_cleanup;
+
+	rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info);
+	if (rc) {
+		pr_err("failed in soc_enable_resources, rc=%d\n", rc);
+		goto remove_default_vote;
+	}
+
+	if (internal_ops->get_hw_info) {
+		rc = internal_ops->get_hw_info(cpas_hw, &cpas_core->hw_caps);
+		if (rc) {
+			pr_err("failed in get_hw_info, rc=%d\n", rc);
+			goto disable_soc_res;
+		}
+	} else {
+		pr_err("Invalid get_hw_info\n");
+		goto disable_soc_res;
+	}
+
+	rc = cam_cpas_hw_init(cpas_hw_intf->hw_priv,
+		&cpas_core->hw_caps, sizeof(struct cam_cpas_hw_caps));
+	if (rc)
+		goto disable_soc_res;
+
+	rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+	if (rc) {
+		pr_err("failed in soc_disable_resources, rc=%d\n", rc);
+		goto remove_default_vote;
+	}
+
+	rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
+	if (rc)
+		goto axi_cleanup;
+
+	*hw_intf = cpas_hw_intf;
+	return 0;
+
+disable_soc_res:
+	cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
+remove_default_vote:
+	cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
+axi_cleanup:
+	cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info);
+ahb_cleanup:
+	cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
+client_cleanup:
+	cam_cpas_util_client_cleanup(cpas_hw);
+deinit_platform_res:
+	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+release_mem:
+	mutex_destroy(&cpas_hw->hw_mutex);
+	kfree(cpas_core);
+	kfree(cpas_hw);
+	kfree(cpas_hw_intf);
+	pr_err("failed in hw probe\n");
+	return rc;
+}
+
+int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+
+	if (!cpas_hw_intf) {
+		pr_err("cpas interface not initialized\n");
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)cpas_hw_intf->hw_priv;
+	cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+
+	if (cpas_hw->hw_state == CAM_HW_STATE_POWER_UP) {
+		pr_err("cpas hw is in power up state\n");
+		return -EINVAL;
+	}
+
+	cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info);
+	cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
+	cam_cpas_util_client_cleanup(cpas_hw);
+	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+	mutex_destroy(&cpas_hw->hw_mutex);
+	kfree(cpas_core);
+	kfree(cpas_hw);
+	kfree(cpas_hw_intf);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
new file mode 100644
index 0000000..c181302
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -0,0 +1,193 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_HW_H_
+#define _CAM_CPAS_HW_H_
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw_intf.h"
+
+#define CPAS_MAX_CLIENTS 20
+
+#define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
+#define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
+
+#define CAM_CPAS_CLIENT_VALID(indx) ((indx >= 0) && (indx < CPAS_MAX_CLIENTS))
+#define CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)        \
+	((CAM_CPAS_CLIENT_VALID(indx)) && \
+	(cpas_core->cpas_client[indx]))
+#define CAM_CPAS_CLIENT_STARTED(cpas_core, indx)          \
+	((CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)) && \
+	(cpas_core->cpas_client[indx]->started))
+
+/**
+ * enum cam_cpas_access_type - Enum for Register access type
+ */
+enum cam_cpas_access_type {
+	CAM_REG_TYPE_READ,
+	CAM_REG_TYPE_WRITE,
+	CAM_REG_TYPE_READ_WRITE,
+};
+
+/**
+ * struct cam_cpas_internal_ops - CPAS Hardware layer internal ops
+ *
+ * @get_hw_info: Function pointer for get hw info
+ * @init_hw_version: Function pointer for hw init based on version
+ * @handle_irq: Function poniter for irq handling
+ * @setup_regbase: Function pointer for setup rebase indices
+ * @power_on_settings: Function pointer for hw core specific power on settings
+ *
+ */
+struct cam_cpas_internal_ops {
+	int (*get_hw_info)(struct cam_hw_info *cpas_hw,
+		struct cam_cpas_hw_caps *hw_caps);
+	int (*init_hw_version)(struct cam_hw_info *cpas_hw,
+		struct cam_cpas_hw_caps *hw_caps);
+	irqreturn_t (*handle_irq)(int irq_num, void *data);
+	int (*setup_regbase)(struct cam_hw_soc_info *soc_info,
+		int32_t regbase_index[], int32_t num_reg_map);
+	int (*power_on_settings)(struct cam_hw_info *cpas_hw);
+};
+
+/**
+ * struct cam_cpas_reg : CPAS register info
+ *
+ * @enable: Whether this reg info need to be enabled
+ * @access_type: Register access type
+ * @masked_value: Whether this register write/read is based on mask, shift
+ * @mask: Mask for this register value
+ * @shift: Shift for this register value
+ * @value: Register value
+ *
+ */
+struct cam_cpas_reg {
+	bool enable;
+	enum cam_cpas_access_type access_type;
+	bool masked_value;
+	uint32_t offset;
+	uint32_t mask;
+	uint32_t shift;
+	uint32_t value;
+};
+
+/**
+ * struct cam_cpas_client : CPAS Client structure info
+ *
+ * @data: Client register params
+ * @started: Whether client has streamed on
+ * @ahb_level: Determined/Applied ahb level for the client
+ * @axi_vote: Determined/Applied axi vote for the client
+ * @axi_port: Client's parent axi port
+ * @axi_sibling_client: Client's sibllings sharing the same axi port
+ *
+ */
+struct cam_cpas_client {
+	struct cam_cpas_register_params data;
+	bool started;
+	enum cam_vote_level ahb_level;
+	struct cam_axi_vote axi_vote;
+	struct cam_cpas_axi_port *axi_port;
+	struct list_head axi_sibling_client;
+};
+
+/**
+ * struct cam_cpas_bus_client : Bus client information
+ *
+ * @src: Bus master/src id
+ * @dst: Bus slave/dst id
+ * @pdata: Bus pdata information
+ * @client_id: Bus client id
+ * @num_usecases: Number of use cases for this client
+ * @num_paths: Number of paths for this client
+ * @curr_vote_level: current voted index
+ * @dyn_vote: Whether dynamic voting enabled
+ * @lock: Mutex lock used while voting on this client
+ * @valid: Whether bus client is valid
+ *
+ */
+struct cam_cpas_bus_client {
+	int src;
+	int dst;
+	struct msm_bus_scale_pdata *pdata;
+	uint32_t client_id;
+	int num_usecases;
+	int num_paths;
+	unsigned int curr_vote_level;
+	bool dyn_vote;
+	struct mutex lock;
+	bool valid;
+};
+
+/**
+ * struct cam_cpas_axi_port : AXI port information
+ *
+ * @sibling_port: Sibling AXI ports
+ * @clients_list_head: List head pointing to list of clients sharing this port
+ * @lock: Mutex lock for accessing this port
+ * @camnoc_bus: CAMNOC bus client info for this port
+ * @mnoc_bus: MNOC bus client info for this port
+ * @axi_port_name: Name of this AXI port
+ * @axi_port_node: Node representing this AXI Port
+ * @axi_port_mnoc_node: Node representing mnoc in this AXI Port
+ * @axi_port_camnoc_node: Node representing camnoc in this AXI Port
+ *
+ */
+struct cam_cpas_axi_port {
+	struct list_head sibling_port;
+	struct list_head clients_list_head;
+	struct mutex lock;
+	struct cam_cpas_bus_client camnoc_bus;
+	struct cam_cpas_bus_client mnoc_bus;
+	const char *axi_port_name;
+	struct device_node *axi_port_node;
+	struct device_node *axi_port_mnoc_node;
+	struct device_node *axi_port_camnoc_node;
+};
+
+/**
+ * struct cam_cpas : CPAS core data structure info
+ *
+ * @hw_caps: CPAS hw capabilities
+ * @cpas_client: Array of pointers to CPAS clients info
+ * @client_mutex: Mutex for accessing client info
+ * @num_clients: Total number of clients that CPAS supports
+ * @registered_clients: Number of Clients registered currently
+ * @streamon_clients: Number of Clients that are in start state currently
+ * @regbase_index: Register base indices for CPAS register base IDs
+ * @ahb_bus_client: AHB Bus client info
+ * @axi_ports_list_head: Head pointing to list of AXI ports
+ * @internal_ops: CPAS HW internal ops
+ *
+ */
+struct cam_cpas {
+	struct cam_cpas_hw_caps hw_caps;
+	struct cam_cpas_client *cpas_client[CPAS_MAX_CLIENTS];
+	struct mutex client_mutex[CPAS_MAX_CLIENTS];
+	uint32_t num_clients;
+	uint32_t registered_clients;
+	uint32_t streamon_clients;
+	int32_t regbase_index[CAM_CPAS_REG_MAX];
+	struct cam_cpas_bus_client ahb_bus_client;
+	struct list_head axi_ports_list_head;
+	struct cam_cpas_internal_ops internal_ops;
+};
+
+int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
+int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
+
+int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
+	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info);
+int cam_cpas_util_get_string_index(const char **strings,
+	uint32_t num_strings, char *matching_string, uint32_t *index);
+
+#endif /* _CAM_CPAS_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
new file mode 100644
index 0000000..d2c3e06
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
@@ -0,0 +1,137 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_HW_INTF_H_
+#define _CAM_CPAS_HW_INTF_H_
+
+#include <linux/platform_device.h>
+
+#include "cam_cpas_api.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+
+#ifdef CONFIG_CAM_CPAS_DBG
+#define CPAS_CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CPAS_CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+#undef pr_fmt
+#define pr_fmt(fmt) "CAM-CPAS %s:%d " fmt, __func__, __LINE__
+
+#define BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
+
+/**
+ * enum cam_cpas_hw_type - Enum for CPAS HW type
+ */
+enum cam_cpas_hw_type {
+	CAM_HW_CPASTOP,
+	CAM_HW_CAMSSTOP,
+};
+
+/**
+ * enum cam_cpas_hw_cmd_process - Enum for CPAS HW process command type
+ */
+enum cam_cpas_hw_cmd_process {
+	CAM_CPAS_HW_CMD_REGISTER_CLIENT,
+	CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
+	CAM_CPAS_HW_CMD_REG_WRITE,
+	CAM_CPAS_HW_CMD_REG_READ,
+	CAM_CPAS_HW_CMD_AHB_VOTE,
+	CAM_CPAS_HW_CMD_AXI_VOTE,
+	CAM_CPAS_HW_CMD_INVALID,
+};
+
+/**
+ * struct cam_cpas_hw_cmd_reg_read_write : CPAS cmd struct for reg read, write
+ *
+ * @client_handle: Client handle
+ * @reg_base: Register base type
+ * @offset: Register offset
+ * @value: Register value
+ * @mb: Whether to do operation with memory barrier
+ *
+ */
+struct cam_cpas_hw_cmd_reg_read_write {
+	uint32_t client_handle;
+	enum cam_cpas_reg_base reg_base;
+	uint32_t offset;
+	uint32_t value;
+	bool mb;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_ahb_vote : CPAS cmd struct for AHB vote
+ *
+ * @client_handle: Client handle
+ * @ahb_vote: AHB voting info
+ *
+ */
+struct cam_cpas_hw_cmd_ahb_vote {
+	uint32_t client_handle;
+	struct cam_ahb_vote *ahb_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_axi_vote : CPAS cmd struct for AXI vote
+ *
+ * @client_handle: Client handle
+ * @axi_vote: axi bandwidth vote
+ *
+ */
+struct cam_cpas_hw_cmd_axi_vote {
+	uint32_t client_handle;
+	struct cam_axi_vote *axi_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_start : CPAS cmd struct for start
+ *
+ * @client_handle: Client handle
+ *
+ */
+struct cam_cpas_hw_cmd_start {
+	uint32_t client_handle;
+	struct cam_ahb_vote *ahb_vote;
+	struct cam_axi_vote *axi_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_stop : CPAS cmd struct for stop
+ *
+ * @client_handle: Client handle
+ *
+ */
+struct cam_cpas_hw_cmd_stop {
+	uint32_t client_handle;
+};
+
+/**
+ * struct cam_cpas_hw_caps : CPAS HW capabilities
+ *
+ * @camera_family: Camera family type
+ * @camera_version: Camera version
+ * @cpas_version: CPAS version
+ * @camera_capability: Camera hw capabilities
+ *
+ */
+struct cam_cpas_hw_caps {
+	uint32_t camera_family;
+	struct cam_hw_version camera_version;
+	struct cam_hw_version cpas_version;
+	uint32_t camera_capability;
+};
+
+int cam_cpas_hw_probe(struct platform_device *pdev,
+	struct cam_hw_intf **hw_intf);
+int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf);
+
+#endif /* _CAM_CPAS_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
new file mode 100644
index 0000000..fdebdc7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -0,0 +1,605 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_subdev.h"
+#include "cam_cpas_hw_intf.h"
+
+#define CAM_CPAS_DEV_NAME    "cam-cpas"
+#define CAM_CPAS_INTF_INITIALIZED() (g_cpas_intf && g_cpas_intf->probe_done)
+
+/**
+ * struct cam_cpas_intf : CPAS interface
+ *
+ * @pdev: Platform device
+ * @subdev: Subdev info
+ * @hw_intf: CPAS HW interface
+ * @hw_caps: CPAS HW capabilities
+ * @intf_lock: CPAS interface mutex
+ * @open_cnt: CPAS subdev open count
+ * @probe_done: Whether CPAS prove completed
+ *
+ */
+struct cam_cpas_intf {
+	struct platform_device *pdev;
+	struct cam_subdev subdev;
+	struct cam_hw_intf *hw_intf;
+	struct cam_cpas_hw_caps hw_caps;
+	struct mutex intf_lock;
+	uint32_t open_cnt;
+	bool probe_done;
+};
+
+static struct cam_cpas_intf *g_cpas_intf;
+
+int cam_cpas_get_hw_info(uint32_t *camera_family,
+	struct cam_hw_version *camera_version)
+{
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (!camera_family || !camera_version) {
+		pr_err("invalid input %pK %pK\n", camera_family,
+			camera_version);
+		return -EINVAL;
+	}
+
+	*camera_family = g_cpas_intf->hw_caps.camera_family;
+	*camera_version = g_cpas_intf->hw_caps.camera_version;
+
+	return 0;
+}
+EXPORT_SYMBOL(cam_cpas_get_hw_info);
+
+int cam_cpas_reg_write(uint32_t client_handle,
+	enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
+	uint32_t value)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_reg_read_write cmd_reg_write;
+
+		cmd_reg_write.client_handle = client_handle;
+		cmd_reg_write.reg_base = reg_base;
+		cmd_reg_write.offset = offset;
+		cmd_reg_write.value = value;
+		cmd_reg_write.mb = mb;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REG_WRITE, &cmd_reg_write,
+			sizeof(struct cam_cpas_hw_cmd_reg_read_write));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_reg_write);
+
+int cam_cpas_reg_read(uint32_t client_handle,
+	enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
+	uint32_t *value)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (!value) {
+		pr_err("Invalid arg value\n");
+		return -EINVAL;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_reg_read_write cmd_reg_read;
+
+		cmd_reg_read.client_handle = client_handle;
+		cmd_reg_read.reg_base = reg_base;
+		cmd_reg_read.offset = offset;
+		cmd_reg_read.mb = mb;
+		cmd_reg_read.value = 0;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REG_READ, &cmd_reg_read,
+			sizeof(struct cam_cpas_hw_cmd_reg_read_write));
+		if (rc) {
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+			return rc;
+		}
+
+		*value = cmd_reg_read.value;
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_reg_read);
+
+int cam_cpas_update_axi_vote(uint32_t client_handle,
+	struct cam_axi_vote *axi_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_axi_vote cmd_axi_vote;
+
+		cmd_axi_vote.client_handle = client_handle;
+		cmd_axi_vote.axi_vote = axi_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_AXI_VOTE, &cmd_axi_vote,
+			sizeof(struct cam_cpas_hw_cmd_axi_vote));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_update_axi_vote);
+
+int cam_cpas_update_ahb_vote(uint32_t client_handle,
+	struct cam_ahb_vote *ahb_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_ahb_vote cmd_ahb_vote;
+
+		cmd_ahb_vote.client_handle = client_handle;
+		cmd_ahb_vote.ahb_vote = ahb_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_AHB_VOTE, &cmd_ahb_vote,
+			sizeof(struct cam_cpas_hw_cmd_ahb_vote));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_update_ahb_vote);
+
+int cam_cpas_stop(uint32_t client_handle)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.stop) {
+		struct cam_cpas_hw_cmd_stop cmd_hw_stop;
+
+		cmd_hw_stop.client_handle = client_handle;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.stop(
+			g_cpas_intf->hw_intf->hw_priv, &cmd_hw_stop,
+			sizeof(struct cam_cpas_hw_cmd_stop));
+		if (rc)
+			pr_err("Failed in stop, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid stop ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_stop);
+
+int cam_cpas_start(uint32_t client_handle,
+	struct cam_ahb_vote *ahb_vote, struct cam_axi_vote *axi_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.start) {
+		struct cam_cpas_hw_cmd_start cmd_hw_start;
+
+		cmd_hw_start.client_handle = client_handle;
+		cmd_hw_start.ahb_vote = ahb_vote;
+		cmd_hw_start.axi_vote = axi_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.start(
+			g_cpas_intf->hw_intf->hw_priv, &cmd_hw_start,
+			sizeof(struct cam_cpas_hw_cmd_start));
+		if (rc)
+			pr_err("Failed in start, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid start ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_start);
+
+int cam_cpas_unregister_client(uint32_t client_handle)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
+			&client_handle, sizeof(uint32_t));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_unregister_client);
+
+int cam_cpas_register_client(
+	struct cam_cpas_register_params *register_params)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REGISTER_CLIENT, register_params,
+			sizeof(struct cam_cpas_register_params));
+		if (rc)
+			pr_err("Failed in process_cmd, rc=%d\n", rc);
+	} else {
+		pr_err("Invalid process_cmd ops\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_register_client);
+
+int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
+	struct cam_control *cmd)
+{
+	int rc;
+
+	if (!cmd) {
+		pr_err("Invalid input cmd\n");
+		return -EINVAL;
+	}
+
+	switch (cmd->op_code) {
+	case CAM_QUERY_CAP: {
+		struct cam_cpas_query_cap query;
+
+		rc = copy_from_user(&query, (void __user *) cmd->handle,
+			sizeof(query));
+		if (rc) {
+			pr_err("Failed in copy from user, rc=%d\n", rc);
+			break;
+		}
+
+		rc = cam_cpas_get_hw_info(&query.camera_family,
+			&query.camera_version);
+		if (rc)
+			break;
+
+		rc = copy_to_user((void __user *) cmd->handle, &query,
+			sizeof(query));
+		if (rc)
+			pr_err("Failed in copy to user, rc=%d\n", rc);
+
+		break;
+	}
+	default:
+		pr_err("Unknown op code %d for CPAS\n", cmd->op_code);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_cpas_subdev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		pr_err("CPAS not initialized\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&cpas_intf->intf_lock);
+	cpas_intf->open_cnt++;
+	CPAS_CDBG("CPAS Subdev open count %d\n", cpas_intf->open_cnt);
+	mutex_unlock(&cpas_intf->intf_lock);
+
+	return 0;
+}
+
+static int cam_cpas_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		pr_err("CPAS not initialized\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&cpas_intf->intf_lock);
+	cpas_intf->open_cnt--;
+	CPAS_CDBG("CPAS Subdev close count %d\n", cpas_intf->open_cnt);
+	mutex_unlock(&cpas_intf->intf_lock);
+
+	return 0;
+}
+
+static long cam_cpas_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int32_t rc;
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		pr_err("CPAS not initialized\n");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
+		break;
+	default:
+		pr_err("Invalid command %d for CPAS!\n", cmd);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	int32_t rc;
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		pr_err("CPAS not initialized\n");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
+		break;
+	default:
+		pr_err("Invalid command %d for CPAS!\n", cmd);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+#endif
+
+static struct v4l2_subdev_core_ops cpas_subdev_core_ops = {
+	.ioctl = cam_cpas_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_cpas_subdev_compat_ioctl,
+#endif
+};
+
+static const struct v4l2_subdev_ops cpas_subdev_ops = {
+	.core = &cpas_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cpas_subdev_intern_ops = {
+	.open = cam_cpas_subdev_open,
+	.close = cam_cpas_subdev_close,
+};
+
+static int cam_cpas_subdev_register(struct platform_device *pdev)
+{
+	int rc;
+	struct cam_subdev *subdev;
+
+	if (!g_cpas_intf)
+		return -EINVAL;
+
+	subdev = &g_cpas_intf->subdev;
+
+	subdev->name = CAM_CPAS_DEV_NAME;
+	subdev->pdev = pdev;
+	subdev->ops = &cpas_subdev_ops;
+	subdev->internal_ops = &cpas_subdev_intern_ops;
+	subdev->token = g_cpas_intf;
+	subdev->sd_flags =
+		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+	subdev->ent_function = CAM_CPAS_DEVICE_TYPE;
+
+	rc = cam_register_subdev(subdev);
+	if (rc) {
+		pr_err("failed register subdev: %s!\n", CAM_CPAS_DEV_NAME);
+		return rc;
+	}
+
+	platform_set_drvdata(g_cpas_intf->pdev, g_cpas_intf);
+	return rc;
+}
+
+static int cam_cpas_dev_probe(struct platform_device *pdev)
+{
+	struct cam_cpas_hw_caps *hw_caps;
+	struct cam_hw_intf *hw_intf;
+	int rc;
+
+	if (g_cpas_intf) {
+		pr_err("cpas dev proble already done\n");
+		return -EALREADY;
+	}
+
+	g_cpas_intf = kzalloc(sizeof(*g_cpas_intf), GFP_KERNEL);
+	if (!g_cpas_intf)
+		return -ENOMEM;
+
+	mutex_init(&g_cpas_intf->intf_lock);
+	g_cpas_intf->pdev = pdev;
+
+	rc = cam_cpas_hw_probe(pdev, &g_cpas_intf->hw_intf);
+	if (rc || (g_cpas_intf->hw_intf == NULL)) {
+		pr_err("Failed in hw probe, rc=%d\n", rc);
+		goto error_destroy_mem;
+	}
+
+	hw_intf = g_cpas_intf->hw_intf;
+	hw_caps = &g_cpas_intf->hw_caps;
+	if (hw_intf->hw_ops.get_hw_caps) {
+		rc = hw_intf->hw_ops.get_hw_caps(hw_intf->hw_priv,
+			hw_caps, sizeof(struct cam_cpas_hw_caps));
+		if (rc) {
+			pr_err("Failed in get_hw_caps, rc=%d\n", rc);
+			goto error_hw_remove;
+		}
+	} else {
+		pr_err("Invalid get_hw_caps ops\n");
+		goto error_hw_remove;
+	}
+
+	rc = cam_cpas_subdev_register(pdev);
+	if (rc)
+		goto error_hw_remove;
+
+	g_cpas_intf->probe_done = true;
+	CPAS_CDBG("CPAS INTF Probe success %d, %d.%d.%d, %d.%d.%d, 0x%x\n",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr,
+		hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
+		hw_caps->cpas_version.incr, hw_caps->camera_capability);
+
+	return rc;
+
+error_hw_remove:
+	cam_cpas_hw_remove(g_cpas_intf->hw_intf);
+error_destroy_mem:
+	mutex_destroy(&g_cpas_intf->intf_lock);
+	kfree(g_cpas_intf);
+	g_cpas_intf = NULL;
+	pr_err("CPAS probe failed\n");
+	return rc;
+}
+
+static int cam_cpas_dev_remove(struct platform_device *dev)
+{
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		pr_err("cpas intf not initialized\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&g_cpas_intf->intf_lock);
+	cam_unregister_subdev(&g_cpas_intf->subdev);
+	cam_cpas_hw_remove(g_cpas_intf->hw_intf);
+	mutex_unlock(&g_cpas_intf->intf_lock);
+	mutex_destroy(&g_cpas_intf->intf_lock);
+	kfree(g_cpas_intf);
+	g_cpas_intf = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id cam_cpas_dt_match[] = {
+	{.compatible = "qcom,cam-cpas"},
+	{}
+};
+
+static struct platform_driver cam_cpas_driver = {
+	.probe = cam_cpas_dev_probe,
+	.remove = cam_cpas_dev_remove,
+	.driver = {
+		.name = CAM_CPAS_DEV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_cpas_dt_match,
+	},
+};
+
+static int __init cam_cpas_dev_init_module(void)
+{
+	return platform_driver_register(&cam_cpas_driver);
+}
+
+static void __exit cam_cpas_dev_exit_module(void)
+{
+	platform_driver_unregister(&cam_cpas_driver);
+}
+
+module_init(cam_cpas_dev_init_module);
+module_exit(cam_cpas_dev_exit_module);
+MODULE_DESCRIPTION("MSM CPAS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
new file mode 100644
index 0000000..0a8e6bb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -0,0 +1,174 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpas_soc.h"
+
+int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
+	struct cam_cpas_private_soc *soc_private)
+{
+	struct device_node *of_node;
+	int count = 0, i = 0, rc = 0;
+
+	if (!soc_private || !pdev) {
+		pr_err("invalid input arg %pK %pK\n", soc_private, pdev);
+		return -EINVAL;
+	}
+
+	of_node = pdev->dev.of_node;
+
+	rc = of_property_read_string_index(of_node, "arch-compat", 0,
+		(const char **)&soc_private->arch_compat);
+	if (rc) {
+		pr_err("device %s failed to read arch-compat\n", pdev->name);
+		return rc;
+	}
+
+	soc_private->client_id_based = of_property_read_bool(of_node,
+		"client-id-based");
+
+	count = of_property_count_strings(of_node, "client-names");
+	if (count <= 0) {
+		pr_err("no client-names found\n");
+		count = 0;
+		return -EINVAL;
+	}
+	soc_private->num_clients = count;
+	CPAS_CDBG("arch-compat=%s, client_id_based = %d, num_clients=%d\n",
+		soc_private->arch_compat, soc_private->client_id_based,
+		soc_private->num_clients);
+
+	for (i = 0; i < soc_private->num_clients; i++) {
+		rc = of_property_read_string_index(of_node,
+			"client-names", i, &soc_private->client_name[i]);
+		if (rc) {
+			pr_err("no client-name at cnt=%d\n", i);
+			return -ENODEV;
+		}
+		CPAS_CDBG("Client[%d] : %s\n", i, soc_private->client_name[i]);
+	}
+
+	count = of_property_count_strings(of_node, "client-axi-port-names");
+	if ((count <= 0) || (count != soc_private->num_clients)) {
+		pr_err("incorrect client-axi-port-names info %d %d\n",
+			count, soc_private->num_clients);
+		count = 0;
+		return -EINVAL;
+	}
+
+	for (i = 0; i < soc_private->num_clients; i++) {
+		rc = of_property_read_string_index(of_node,
+			"client-axi-port-names", i,
+			&soc_private->client_axi_port_name[i]);
+		if (rc) {
+			pr_err("no client-name at cnt=%d\n", i);
+			return -ENODEV;
+		}
+		CPAS_CDBG("Client AXI Port[%d] : %s\n", i,
+			soc_private->client_axi_port_name[i]);
+	}
+
+	soc_private->axi_camnoc_based = of_property_read_bool(of_node,
+		"client-bus-camnoc-based");
+
+	return 0;
+}
+
+int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		pr_err("failed in get_dt_properties, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (soc_info->irq_line && !irq_handler) {
+		pr_err("Invalid IRQ handler\n");
+		return -EINVAL;
+	}
+
+	rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+		irq_data);
+	if (rc) {
+		pr_err("failed in request_platform_resource, rc=%d\n", rc);
+		return rc;
+	}
+
+	soc_info->soc_private = kzalloc(sizeof(struct cam_cpas_private_soc),
+		GFP_KERNEL);
+	if (!soc_info->soc_private) {
+		rc = -ENOMEM;
+		goto release_res;
+	}
+
+	rc = cam_cpas_get_custom_dt_info(soc_info->pdev, soc_info->soc_private);
+	if (rc) {
+		pr_err("failed in get_custom_info, rc=%d\n", rc);
+		goto free_soc_private;
+	}
+
+	return rc;
+
+free_soc_private:
+	kfree(soc_info->soc_private);
+release_res:
+	cam_soc_util_release_platform_resource(soc_info);
+	return rc;
+}
+
+int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+	if (rc)
+		pr_err("release platform failed, rc=%d\n", rc);
+
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+
+	return rc;
+}
+
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	if (rc)
+		pr_err("enable platform resource failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		pr_err("disable platform failed, rc=%d\n", rc);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
new file mode 100644
index 0000000..fdd9386
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_SOC_H_
+#define _CAM_CPAS_SOC_H_
+
+#include "cam_soc_util.h"
+
+#define CAM_CPAS_MAX_CLIENTS 20
+
+/**
+ * struct cam_cpas_private_soc : CPAS private DT info
+ *
+ * @arch_compat: ARCH compatible string
+ * @client_id_based: Whether clients are id based
+ * @num_clients: Number of clients supported
+ * @client_name: Client names
+ * @axi_camnoc_based: Whether AXi access is camnoc based
+ * @client_axi_port_name: AXI Port name for each client
+ * @axi_port_list_node : Node representing AXI Ports list
+ *
+ */
+struct cam_cpas_private_soc {
+	const char *arch_compat;
+	bool client_id_based;
+	uint32_t num_clients;
+	const char *client_name[CAM_CPAS_MAX_CLIENTS];
+	bool axi_camnoc_based;
+	const char *client_axi_port_name[CAM_CPAS_MAX_CLIENTS];
+	struct device_node *axi_port_list_node;
+};
+
+int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t vfe_irq_handler, void *irq_data);
+int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info);
+#endif /* _CAM_CPAS_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/Makefile b/drivers/media/platform/msm/camera/cam_cpas/camss_top/Makefile
new file mode 100644
index 0000000..bce10cb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_camsstop_hw.o
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
new file mode 100644
index 0000000..fa8ab89
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
@@ -0,0 +1,87 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpas_soc.h"
+
+int cam_camsstop_get_hw_info(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CAMSS];
+	uint32_t reg_value;
+
+	if (reg_indx == -1)
+		return -EINVAL;
+
+	hw_caps->camera_family = CAM_FAMILY_CAMERA_SS;
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
+	hw_caps->camera_version.major =
+		BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+	hw_caps->camera_version.minor =
+		BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->camera_version.incr =
+		BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	CPAS_CDBG("Family %d, version %d.%d.%d\n",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr);
+
+	return 0;
+}
+
+int cam_camsstop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
+	int32_t regbase_index[], int32_t num_reg_map)
+{
+	uint32_t index;
+	int rc;
+
+	if (num_reg_map > CAM_CPAS_REG_MAX) {
+		pr_err("invalid num_reg_map=%d\n", num_reg_map);
+		return -EINVAL;
+	}
+
+	if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
+		pr_err("invalid num_mem_block=%d\n", soc_info->num_mem_block);
+		return -EINVAL;
+	}
+
+	rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_camss", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CAMSS] = index;
+	} else {
+		pr_err("regbase not found for CAM_CPAS_REG_CAMSS\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
+{
+	if (!internal_ops) {
+		pr_err("invalid NULL param\n");
+		return -EINVAL;
+	}
+
+	internal_ops->get_hw_info = cam_camsstop_get_hw_info;
+	internal_ops->init_hw_version = NULL;
+	internal_ops->handle_irq = NULL;
+	internal_ops->setup_regbase = cam_camsstop_setup_regbase_indices;
+	internal_ops->power_on_settings = NULL;
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/Makefile b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/Makefile
new file mode 100644
index 0000000..820a0df
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpastop_hw.o
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
new file mode 100644
index 0000000..415de47
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -0,0 +1,301 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpastop_hw.h"
+#include "cam_io_util.h"
+#include "cam_cpas_soc.h"
+#include "cpastop100.h"
+
+struct cam_camnoc_info *camnoc_info;
+
+static int cam_cpastop_get_hw_info(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CPASTOP];
+	uint32_t reg_value;
+
+	if (reg_indx == -1)
+		return -EINVAL;
+
+	hw_caps->camera_family = CAM_FAMILY_CPAS_SS;
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
+	hw_caps->camera_version.major =
+		BITS_MASK_SHIFT(reg_value, 0xff0000, 0x10);
+	hw_caps->camera_version.minor =
+		BITS_MASK_SHIFT(reg_value, 0xff00, 0x8);
+	hw_caps->camera_version.incr =
+		BITS_MASK_SHIFT(reg_value, 0xff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x4);
+	hw_caps->cpas_version.major =
+		BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+	hw_caps->cpas_version.minor =
+		BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->cpas_version.incr =
+		BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x8);
+	hw_caps->camera_capability = reg_value;
+
+	CPAS_CDBG("Family %d, version %d.%d.%d, cpas %d.%d.%d, cap 0x%x\n",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr,
+		hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
+		hw_caps->cpas_version.incr, hw_caps->camera_capability);
+
+	return 0;
+}
+
+static int cam_cpastop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
+	int32_t regbase_index[], int32_t num_reg_map)
+{
+	uint32_t index;
+	int rc;
+
+	if (num_reg_map > CAM_CPAS_REG_MAX) {
+		pr_err("invalid num_reg_map=%d\n", num_reg_map);
+		return -EINVAL;
+	}
+
+	if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
+		pr_err("invalid num_mem_block=%d\n", soc_info->num_mem_block);
+		return -EINVAL;
+	}
+
+	rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_cpas_top", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CPASTOP] = index;
+	} else {
+		pr_err("regbase not found for CPASTOP, rc=%d, %d %d\n",
+			rc, index, num_reg_map);
+		return -EINVAL;
+	}
+
+	rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_camnoc", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CAMNOC] = index;
+	} else {
+		pr_err("regbase not found for CAMNOC, rc=%d, %d %d\n",
+			rc, index, num_reg_map);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_cpastop_handle_errlogger(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info)
+{
+	uint32_t reg_value;
+	int i;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+	for (i = 0; i < camnoc_info->error_logger_size; i++) {
+		reg_value = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i]);
+		pr_err("ErrorLogger[%d] : 0x%x\n", i, reg_value);
+	}
+
+	return 0;
+}
+
+static int cam_cpastop_handle_ubwc_err(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info, int i)
+{
+	uint32_t reg_value;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_err[i].err_status.offset);
+
+	pr_err("Dumping ubwc error status : 0x%x\n", reg_value);
+
+	return 0;
+}
+
+static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw)
+{
+	pr_err("ahb timout error\n");
+
+	return 0;
+}
+
+static int cam_cpastop_disable_test_irq(struct cam_hw_info *cpas_hw)
+{
+	camnoc_info->irq_sbm->sbm_clear.value &= ~0x4;
+	camnoc_info->irq_sbm->sbm_enable.value &= ~0x100;
+	camnoc_info->irq_err[CAM_CAMNOC_HW_IRQ_CAMNOC_TEST].enable = false;
+
+	return 0;
+}
+
+static int cam_cpastop_reset_irq(struct cam_hw_info *cpas_hw)
+{
+	int i;
+
+	cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+		&camnoc_info->irq_sbm->sbm_clear);
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if (camnoc_info->irq_err[i].enable)
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->irq_err[i].err_clear);
+	}
+
+	cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+		&camnoc_info->irq_sbm->sbm_enable);
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if (camnoc_info->irq_err[i].enable)
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->irq_err[i].err_enable);
+	}
+
+	return 0;
+}
+
+irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
+{
+	uint32_t irq_status;
+	struct cam_hw_info *cpas_hw = (struct cam_hw_info *)data;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	int i;
+	enum cam_camnoc_hw_irq_type irq_type;
+
+	irq_status = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_sbm->sbm_status.offset);
+
+	pr_err("IRQ callback, irq_status=0x%x\n", irq_status);
+
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if ((irq_status & camnoc_info->irq_err[i].sbm_port) &&
+			(camnoc_info->irq_err[i].enable)) {
+			irq_type = camnoc_info->irq_err[i].irq_type;
+			pr_err("Error occurred, type=%d\n", irq_type);
+
+			switch (irq_type) {
+			case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR:
+				cam_cpastop_handle_errlogger(cpas_core,
+					soc_info);
+				break;
+			case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+				cam_cpastop_handle_ubwc_err(cpas_core,
+					soc_info, i);
+				break;
+			case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT:
+				cam_cpastop_handle_ahb_timeout_err(cpas_hw);
+				break;
+			case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
+				CPAS_CDBG("TEST IRQ\n");
+				break;
+			default:
+				break;
+			}
+
+			irq_status &= ~camnoc_info->irq_err[i].sbm_port;
+		}
+	}
+
+	if (irq_status)
+		pr_err("IRQ not handled, irq_status=0x%x\n", irq_status);
+
+	if (TEST_IRQ_ENABLE)
+		cam_cpastop_disable_test_irq(cpas_hw);
+
+	cam_cpastop_reset_irq(cpas_hw);
+
+	return IRQ_HANDLED;
+}
+
+static int cam_cpastop_static_settings(struct cam_hw_info *cpas_hw)
+{
+	int i;
+
+	cam_cpastop_reset_irq(cpas_hw);
+
+	for (i = 0; i < camnoc_info->specific_size; i++) {
+		if (camnoc_info->specific[i].enable) {
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].priority_lut_low);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].priority_lut_high);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].urgency);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].danger_lut);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].safe_lut);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].ubwc_ctl);
+		}
+	}
+
+	return 0;
+}
+
+static int cam_cpastop_init_hw_version(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	if ((hw_caps->camera_version.major == 1) &&
+		(hw_caps->camera_version.minor == 7) &&
+		(hw_caps->camera_version.incr == 0)) {
+		if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 0) &&
+			(hw_caps->cpas_version.incr == 0)) {
+			camnoc_info = &cam170_cpas100_camnoc_info;
+		} else {
+			pr_err("CPAS Version not supported %d.%d.%d\n",
+				hw_caps->cpas_version.major,
+				hw_caps->cpas_version.minor,
+				hw_caps->cpas_version.incr);
+			return -EINVAL;
+		}
+	} else {
+		pr_err("Camera Version not supported %d.%d.%d\n",
+			hw_caps->camera_version.major,
+			hw_caps->camera_version.minor,
+			hw_caps->camera_version.incr);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
+{
+	if (!internal_ops) {
+		pr_err("invalid NULL param\n");
+		return -EINVAL;
+	}
+
+	internal_ops->get_hw_info = cam_cpastop_get_hw_info;
+	internal_ops->init_hw_version = cam_cpastop_init_hw_version;
+	internal_ops->handle_irq = cam_cpastop_handle_irq;
+	internal_ops->setup_regbase = cam_cpastop_setup_regbase_indices;
+	internal_ops->power_on_settings = cam_cpastop_static_settings;
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
new file mode 100644
index 0000000..99aae3f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
@@ -0,0 +1,171 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPASTOP_HW_H_
+#define _CAM_CPASTOP_HW_H_
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw.h"
+
+/**
+ * enum cam_camnoc_hw_irq_type - Enum for camnoc error types
+ *
+ * @CAM_CAMNOC_HW_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
+ *                                 1 QHB port) has an error logger. The error
+ *                                 observed at any slave port is logged into
+ *                                 the error logger register and an IRQ is
+ *                                 triggered
+ * @CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR  : Triggered if any error
+ *                                               detected in the IFE0 UBWC
+ *                                               encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR  : Triggered if any error
+ *                                               detected in the IFE1 or IFE3
+ *                                               UBWC encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error
+ *                                               detected in the IPE/BPS
+ *                                               UBWC decoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error
+ *                                               detected in the IPE/BPS UBWC
+ *                                               encoder instance
+ * @CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT              : Triggered when the QHS_ICP
+ *                                               slave  times out after 4000
+ *                                               AHB cycles
+ * @CAM_CAMNOC_HW_IRQ_RESERVED1                : Reserved
+ * @CAM_CAMNOC_HW_IRQ_RESERVED2                : Reserved
+ * @CAM_CAMNOC_HW_IRQ_CAMNOC_TEST              : To test the IRQ logic
+ */
+enum cam_camnoc_hw_irq_type {
+	CAM_CAMNOC_HW_IRQ_SLAVE_ERROR =
+		CAM_CAMNOC_IRQ_SLAVE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR =
+		CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT =
+		CAM_CAMNOC_IRQ_AHB_TIMEOUT,
+	CAM_CAMNOC_HW_IRQ_RESERVED1,
+	CAM_CAMNOC_HW_IRQ_RESERVED2,
+	CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+};
+
+/**
+ * enum cam_camnoc_port_type - Enum for different camnoc hw ports. All CAMNOC
+ *         settings like QoS, LUT mappings need to be configured for
+ *         each of these ports.
+ *
+ * @CAM_CAMNOC_CDM: Indicates CDM HW connection to camnoc
+ * @CAM_CAMNOC_IFE02: Indicates IFE0, IFE2 HW connection to camnoc
+ * @CAM_CAMNOC_IFE13: Indicates IFE1, IFE3 HW connection to camnoc
+ * @CAM_CAMNOC_IPE_BPS_LRME_READ: Indicates IPE, BPS, LRME Read HW
+ *         connection to camnoc
+ * @CAM_CAMNOC_IPE_BPS_LRME_WRITE: Indicates IPE, BPS, LRME Write HW
+ *         connection to camnoc
+ * @CAM_CAMNOC_JPEG: Indicates JPEG HW connection to camnoc
+ * @CAM_CAMNOC_FD: Indicates FD HW connection to camnoc
+ * @CAM_CAMNOC_ICP: Indicates ICP HW connection to camnoc
+ */
+enum cam_camnoc_port_type {
+	CAM_CAMNOC_CDM,
+	CAM_CAMNOC_IFE02,
+	CAM_CAMNOC_IFE13,
+	CAM_CAMNOC_IPE_BPS_LRME_READ,
+	CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+	CAM_CAMNOC_JPEG,
+	CAM_CAMNOC_FD,
+	CAM_CAMNOC_ICP,
+};
+
+/**
+ * struct cam_camnoc_specific : CPAS camnoc specific settings
+ *
+ * @port_type: Port type
+ * @enable: Whether to enable settings for this connection
+ * @priority_lut_low: Priority Low LUT mapping for this connection
+ * @priority_lut_high: Priority High LUT mapping for this connection
+ * @urgency: Urgency (QoS) settings for this connection
+ * @danger_lut: Danger LUT mapping for this connection
+ * @safe_lut: Safe LUT mapping for this connection
+ * @ubwc_ctl: UBWC control settings for this connection
+ *
+ */
+struct cam_camnoc_specific {
+	enum cam_camnoc_port_type port_type;
+	bool enable;
+	struct cam_cpas_reg priority_lut_low;
+	struct cam_cpas_reg priority_lut_high;
+	struct cam_cpas_reg urgency;
+	struct cam_cpas_reg danger_lut;
+	struct cam_cpas_reg safe_lut;
+	struct cam_cpas_reg ubwc_ctl;
+};
+
+/**
+ * struct cam_camnoc_irq_sbm : Sideband manager settings for all CAMNOC IRQs
+ *
+ * @sbm_enable: SBM settings for IRQ enable
+ * @sbm_status: SBM settings for IRQ status
+ * @sbm_clear: SBM settings for IRQ clear
+ *
+ */
+struct cam_camnoc_irq_sbm {
+	struct cam_cpas_reg sbm_enable;
+	struct cam_cpas_reg sbm_status;
+	struct cam_cpas_reg sbm_clear;
+};
+
+/**
+ * struct cam_camnoc_irq_err : Error settings specific to each CAMNOC IRQ
+ *
+ * @irq_type: Type of IRQ
+ * @enable: Whether to enable error settings for this IRQ
+ * @sbm_port: Corresponding SBM port for this IRQ
+ * @err_enable: Error enable settings for this IRQ
+ * @err_status: Error status settings for this IRQ
+ * @err_clear: Error clear settings for this IRQ
+ *
+ */
+struct cam_camnoc_irq_err {
+	enum cam_camnoc_hw_irq_type irq_type;
+	bool enable;
+	uint32_t sbm_port;
+	struct cam_cpas_reg err_enable;
+	struct cam_cpas_reg err_status;
+	struct cam_cpas_reg err_clear;
+};
+
+/**
+ * struct cam_camnoc_info : Overall CAMNOC settings info
+ *
+ * @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings
+ * @specific_size: Array size of SPECIFICTONTTPTR settings
+ * @irq_sbm: Pointer to CAMNOC IRQ SBM settings
+ * @irq_err: Pointer to CAMNOC IRQ Error settings
+ * @irq_err_size: Array size of IRQ Error settings
+ * @error_logger: Pointer to CAMNOC IRQ Error logger read registers
+ * @error_logger_size: Array size of IRQ Error logger
+ *
+ */
+struct cam_camnoc_info {
+	struct cam_camnoc_specific *specific;
+	int specific_size;
+	struct cam_camnoc_irq_sbm *irq_sbm;
+	struct cam_camnoc_irq_err *irq_err;
+	int irq_err_size;
+	uint32_t *error_logger;
+	int error_logger_size;
+};
+
+#endif /* _CAM_CPASTOP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
new file mode 100644
index 0000000..12c8e66
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
@@ -0,0 +1,532 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CPASTOP100_H_
+#define _CPASTOP100_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas100_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas100_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas100_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE02,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE13,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = false,
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = false,
+	}
+};
+
+uint32_t slave_error_logger[] = {
+	0x2700, /* ERRLOGGER_SWID_LOW */
+	0x2704, /* ERRLOGGER_SWID_HIGH */
+	0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+struct cam_camnoc_info cam170_cpas100_camnoc_info = {
+	.specific = &cam_cpas100_camnoc_specific[0],
+	.specific_size = sizeof(cam_cpas100_camnoc_specific) /
+		sizeof(cam_cpas100_camnoc_specific[0]),
+	.irq_sbm = &cam_cpas100_irq_sbm,
+	.irq_err = &cam_cpas100_irq_err[0],
+	.irq_err_size = sizeof(cam_cpas100_irq_err) /
+		sizeof(cam_cpas100_irq_err[0]),
+	.error_logger = &slave_error_logger[0],
+	.error_logger_size = sizeof(slave_error_logger) /
+		sizeof(slave_error_logger[0]),
+};
+
+#endif /* _CPASTOP100_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
new file mode 100644
index 0000000..f6b0729
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -0,0 +1,324 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CPAS_API_H_
+#define _CAM_CPAS_API_H_
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <media/cam_cpas.h>
+
+#define CAM_HW_IDENTIFIER_LENGTH 128
+
+/* Default AXI Bandwidth vote */
+#define CAM_CPAS_DEFAULT_AXI_BW 1024
+
+/**
+ * enum cam_cpas_reg_base - Enum for register base identifier. These
+ *                          are the identifiers used in generic register
+ *                          write/read APIs provided by cpas driver.
+ */
+enum cam_cpas_reg_base {
+	CAM_CPAS_REG_CPASTOP,
+	CAM_CPAS_REG_CAMNOC,
+	CAM_CPAS_REG_CAMSS,
+	CAM_CPAS_REG_MAX
+};
+
+/**
+ * enum cam_camnoc_irq_type - Enum for camnoc irq types
+ *
+ * @CAM_CAMNOC_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
+ *                              1 QHB port) has an error logger. The error
+ *                              observed at any slave port is logged into
+ *                              the error logger register and an IRQ is
+ *                              triggered
+ * @CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR  : Triggered if any error detected
+ *                                            in the IFE0 UBWC encoder instance
+ * @CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR  : Triggered if any error detected
+ *                                            in the IFE1 or IFE3 UBWC encoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error detected
+ *                                            in the IPE/BPS UBWC decoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error detected
+ *                                            in the IPE/BPS UBWC encoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_AHB_TIMEOUT              : Triggered when the QHS_ICP slave
+ *                                            times out after 4000 AHB cycles
+ */
+enum cam_camnoc_irq_type {
+	CAM_CAMNOC_IRQ_SLAVE_ERROR,
+	CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+	CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_AHB_TIMEOUT,
+};
+
+/**
+ * struct cam_cpas_register_params : Register params for cpas client
+ *
+ * @identifier        : Input identifier string which is the device label
+ *                      from dt like vfe, ife, jpeg etc
+ * @cell_index        : Input integer identifier pointing to the cell index
+ *                      from dt of the device. This can be used to form a
+ *                      unique string with @identifier like vfe0, ife1,
+ *                      jpeg0, etc
+ * @dev               : device handle
+ * @userdata          : Input private data which will be passed as
+ *                      an argument while callback.
+ * @cam_cpas_callback : Input callback pointer for triggering the
+ *                      callbacks from CPAS driver.
+ *                      @client_handle : CPAS client handle
+ *                      @userdata    : User data given at the time of register
+ *                      @event_type  : event type
+ *                      @event_data  : event data
+ * @client_handle       : Output Unique handle generated for this register
+ *
+ */
+struct cam_cpas_register_params {
+	char            identifier[CAM_HW_IDENTIFIER_LENGTH];
+	uint32_t        cell_index;
+	struct device  *dev;
+	void           *userdata;
+	void          (*cam_cpas_client_cb)(
+			int32_t                   client_handle,
+			void                     *userdata,
+			enum cam_camnoc_irq_type  event_type,
+			uint32_t                  event_data);
+	uint32_t        client_handle;
+};
+
+/**
+ * enum cam_vote_level - Enum for voting type
+ *
+ * @CAM_VOTE_ABSOLUTE : Absolute vote
+ * @CAM_VOTE_DYNAMIC  : Dynamic vote
+ */
+enum cam_vote_type {
+	CAM_VOTE_ABSOLUTE,
+	CAM_VOTE_DYNAMIC,
+};
+
+/**
+ * enum cam_vote_level - Enum for voting level
+ *
+ * @CAM_SUSPEND_VOTE : Suspend vote
+ * @CAM_SVS_VOTE     : SVS vote
+ * @CAM_NOMINAL_VOTE : Nominal vote
+ * @CAM_TURBO_VOTE   : Turbo vote
+ */
+enum cam_vote_level {
+	CAM_SUSPEND_VOTE,
+	CAM_SVS_VOTE,
+	CAM_NOMINAL_VOTE,
+	CAM_TURBO_VOTE,
+};
+
+/**
+ * struct cam_ahb_vote : AHB vote
+ *
+ * @type  : AHB voting type.
+ *          CAM_VOTE_ABSOLUTE : vote based on the value 'level' is set
+ *          CAM_VOTE_DYNAMIC  : vote calculated dynamically using 'freq'
+ *                              and 'dev' handle is set
+ * @level : AHB vote level
+ * @freq  : AHB vote dynamic frequency
+ *
+ */
+struct cam_ahb_vote {
+	enum cam_vote_type   type;
+	union {
+		enum cam_vote_level  level;
+		unsigned long        freq;
+	} vote;
+};
+
+/**
+ * struct cam_axi_vote : AXI vote
+ *
+ * @uncompressed_bw : Bus bandwidth required in Bytes for uncompressed data
+ *                    This is the required bandwidth for uncompressed
+ *                    data traffic between hw core and camnoc.
+ * @compressed_bw   : Bus bandwidth required in Bytes for compressed data.
+ *                    This is the required bandwidth for compressed
+ *                    data traffic between camnoc and mmnoc.
+ *
+ * If one of the above is not applicable to a hw client, it has to
+ * fill the same values in both.
+ *
+ */
+struct cam_axi_vote {
+	uint64_t   uncompressed_bw;
+	uint64_t   compressed_bw;
+};
+
+/**
+ * cam_cpas_register_client()
+ *
+ * @brief: API to register cpas client
+ *
+ * @register_params: Input params to register as a client to CPAS
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_register_client(
+	struct cam_cpas_register_params *register_params);
+
+/**
+ * cam_cpas_unregister_client()
+ *
+ * @brief: API to unregister cpas client
+ *
+ * @client_handle: Client handle to be unregistered
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_unregister_client(uint32_t client_handle);
+
+/**
+ * cam_cpas_start()
+ *
+ * @brief: API to start cpas client hw. Clients have to vote for minimal
+ *     bandwidth requirements for AHB, AXI. Use cam_cpas_update_ahb_vote
+ *     to scale bandwidth after start.
+ *
+ * @client_handle: client cpas handle
+ * @ahb_vote     : Pointer to ahb vote info
+ * @axi_vote     : Pointer to axi bandwidth vote info
+ *
+ * If AXI vote is not applicable to a particular client, use the value exposed
+ * by CAM_CPAS_DEFAULT_AXI_BW as the default vote request.
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_start(
+	uint32_t               client_handle,
+	struct cam_ahb_vote   *ahb_vote,
+	struct cam_axi_vote   *axi_vote);
+
+/**
+ * cam_cpas_stop()
+ *
+ * @brief: API to stop cpas client hw. Bandwidth for AHB, AXI votes
+ *     would be removed for this client on this call. Clients should not
+ *     use cam_cpas_update_ahb_vote or cam_cpas_update_axi_vote
+ *     to remove their bandwidth vote.
+ *
+ * @client_handle: client cpas handle
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_stop(uint32_t client_handle);
+
+/**
+ * cam_cpas_update_ahb_vote()
+ *
+ * @brief: API to update AHB vote requirement. Use this function only
+ *     between cam_cpas_start and cam_cpas_stop in case clients wants
+ *     to scale to different vote level. Do not use this function to de-vote,
+ *     removing client's vote is implicit on cam_cpas_stop
+ *
+ * @client_handle : Client cpas handle
+ * @ahb_vote      : Pointer to ahb vote info
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_update_ahb_vote(
+	uint32_t               client_handle,
+	struct cam_ahb_vote   *ahb_vote);
+
+/**
+ * cam_cpas_update_axi_vote()
+ *
+ * @brief: API to update AXI vote requirement. Use this function only
+ *     between cam_cpas_start and cam_cpas_stop in case clients wants
+ *     to scale to different vote level. Do not use this function to de-vote,
+ *     removing client's vote is implicit on cam_cpas_stop
+ *
+ * @client_handle : Client cpas handle
+ * @axi_vote      : Pointer to axi bandwidth vote info
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_update_axi_vote(
+	uint32_t             client_handle,
+	struct cam_axi_vote *axi_vote);
+
+/**
+ * cam_cpas_reg_write()
+ *
+ * @brief: API to write a register value in CPAS register space
+ *
+ * @client_handle : Client cpas handle
+ * @reg_base      : Register base identifier
+ * @offset        : Offset from the register base address
+ * @mb            : Whether to do reg write with memory barrier
+ * @value         : Value to be written in register
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_reg_write(
+	uint32_t                  client_handle,
+	enum cam_cpas_reg_base    reg_base,
+	uint32_t                  offset,
+	bool                      mb,
+	uint32_t                  value);
+
+/**
+ * cam_cpas_reg_read()
+ *
+ * @brief: API to read a register value from CPAS register space
+ *
+ * @client_handle : Client cpas handle
+ * @reg_base      : Register base identifier
+ * @offset        : Offset from the register base address
+ * @mb            : Whether to do reg read with memory barrier
+ * @value         : Value to be red from register
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_reg_read(
+	uint32_t                  client_handle,
+	enum cam_cpas_reg_base    reg_base,
+	uint32_t                  offset,
+	bool                      mb,
+	uint32_t                 *value);
+
+/**
+ * cam_cpas_get_hw_info()
+ *
+ * @brief: API to get camera hw information
+ *
+ * @camera_family  : Camera family type. One of
+ *                   CAM_FAMILY_CAMERA_SS
+ *                   CAM_FAMILY_CPAS_SS
+ * @camera_version : Camera version
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_get_hw_info(
+	uint32_t                 *camera_family,
+	struct cam_hw_version    *camera_version);
+
+#endif /* _CAM_CPAS_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/Makefile b/drivers/media/platform/msm/camera/cam_isp/Makefile
new file mode 100644
index 0000000..77ad6fc
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += isp_hw_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_dev.o cam_isp_context.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
new file mode 100644
index 0000000..76dd1f3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -0,0 +1,1334 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "cam_isp_context.h"
+#include "cam_isp_log.h"
+#include "cam_mem_mgr.h"
+#include "cam_sync_api.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static int __cam_isp_ctx_handle_buf_done_in_activated_state(
+	struct cam_isp_context *ctx_isp,
+	struct cam_isp_hw_done_event_data *done,
+	uint32_t bubble_state)
+{
+	int rc = 0;
+	int i, j;
+	struct cam_ctx_request  *req;
+	struct cam_isp_ctx_req  *req_isp;
+	struct cam_context *ctx = ctx_isp->base;
+
+	if (list_empty(&ctx->active_req_list)) {
+		CDBG("Buf done with no active request!\n");
+		goto end;
+	}
+
+	CDBG("%s: Enter with bubble_state %d\n", __func__, bubble_state);
+
+	req = list_first_entry(&ctx->active_req_list,
+			struct cam_ctx_request, list);
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+	for (i = 0; i < done->num_handles; i++) {
+		for (j = 0; j < req_isp->num_fence_map_out; j++) {
+			if (done->resource_handle[i] ==
+				req_isp->fence_map_out[j].resource_handle)
+			break;
+		}
+
+		if (j == req_isp->num_fence_map_out) {
+			pr_err("Can not find matching lane handle 0x%x!\n",
+				done->resource_handle[i]);
+			rc = -EINVAL;
+			continue;
+		}
+
+		if (!bubble_state) {
+			CDBG("%s: Sync success: fd 0x%x\n", __func__,
+				   req_isp->fence_map_out[j].sync_id);
+			cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+				CAM_SYNC_STATE_SIGNALED_SUCCESS);
+		} else if (!req_isp->bubble_report) {
+			CDBG("%s: Sync failure: fd 0x%x\n", __func__,
+				   req_isp->fence_map_out[j].sync_id);
+			cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+				CAM_SYNC_STATE_SIGNALED_ERROR);
+		} else {
+			/*
+			 * Ignore the buffer done if bubble detect is on
+			 * In most case, active list should be empty when
+			 * bubble detects. But for safety, we just move the
+			 * current active request to the pending list here.
+			 */
+			list_del_init(&req->list);
+			list_add(&req->list, &ctx->pending_req_list);
+			continue;
+		}
+
+		CDBG("%s: req %lld, reset sync id 0x%x\n", __func__,
+			   req->request_id,
+			   req_isp->fence_map_out[j].sync_id);
+		req_isp->num_acked++;
+		req_isp->fence_map_out[j].sync_id = -1;
+	}
+
+	if (req_isp->num_acked == req_isp->num_fence_map_out) {
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_reg_upd_in_activated_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_ctx_request  *req;
+	struct cam_context      *ctx = ctx_isp->base;
+	struct cam_isp_ctx_req  *req_isp;
+
+	if (list_empty(&ctx->pending_req_list)) {
+		pr_err("Reg upd ack with no pending request\n");
+		goto end;
+	}
+	req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+	list_del_init(&req->list);
+
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+	if (req_isp->num_fence_map_out != 0) {
+		CDBG("%s: move request %lld to active list\n", __func__,
+			req->request_id);
+		if (!list_empty(&ctx->active_req_list))
+			pr_err("%s: More than one entry in active list\n",
+				__func__);
+		list_add_tail(&req->list, &ctx->active_req_list);
+	} else {
+		/* no io config, so the request is completed. */
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	/*
+	 * This function only called directly from applied and bubble applied
+	 * state so change substate here.
+	 */
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
+	CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
+
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_notify_sof_in_actived_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_req_mgr_sof_notify  notify;
+	struct cam_context *ctx = ctx_isp->base;
+
+	/* notify reqmgr with sof  signal */
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.frame_id = ctx_isp->frame_id;
+
+		ctx->ctx_crm_intf->notify_sof(&notify);
+		CDBG("%s: Notify CRM  SOF frame %lld\n", __func__,
+			ctx_isp->frame_id);
+	} else {
+		pr_err("%s: Can not notify SOF to CRM\n", __func__);
+	}
+
+	return rc;
+}
+
+
+static int __cam_isp_ctx_sof_in_sof(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+
+	CDBG("%s: Enter\n", __func__);
+	ctx_isp->frame_id++;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_ctx_request *req;
+	struct cam_isp_ctx_req *req_isp;
+	struct cam_context *ctx = ctx_isp->base;
+
+	if (ctx->state != CAM_CTX_ACTIVATED) {
+		CDBG("%s: invalid RUP\n", __func__);
+		goto end;
+	}
+
+	/*
+	 * This is for the first update. The initial setting will
+	 * cause the reg_upd in the first frame.
+	 */
+	if (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		if (req_isp->num_fence_map_out == req_isp->num_acked)
+			list_add_tail(&req->list, &ctx->free_req_list);
+		else {
+			/* need to handle the buf done */
+			list_add_tail(&req->list, &ctx->active_req_list);
+			ctx_isp->substate_activated =
+				CAM_ISP_CTX_ACTIVATED_EPOCH;
+		}
+	}
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_ctx_request    *req;
+	struct cam_isp_ctx_req    *req_isp;
+	struct cam_context        *ctx = ctx_isp->base;
+
+	if (list_empty(&ctx->pending_req_list)) {
+		/*
+		 * If no pending req in epoch, this is an error case.
+		 * The recovery is to go back to sof state
+		 */
+		pr_err("%s: No pending request\n", __func__);
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+		list);
+	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+
+	CDBG("Report Bubble flag %d\n", req_isp->bubble_report);
+	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+		ctx->ctx_crm_intf->notify_err) {
+		struct cam_req_mgr_error_notify notify;
+
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = req->request_id;
+		notify.error = CRM_KMD_ERR_BUBBLE;
+		ctx->ctx_crm_intf->notify_err(&notify);
+		CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
+			ctx_isp->frame_id);
+	} else {
+		/*
+		 * Since can not bubble report, always move the request to
+		 * active list.
+		 */
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->active_req_list);
+		req_isp->bubble_report = 0;
+	}
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+	CDBG("%s: next substate %d\n", __func__,
+		ctx_isp->substate_activated);
+end:
+	return rc;
+}
+
+
+static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
+	return rc;
+}
+
+
+static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+
+	ctx_isp->frame_id++;
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	pr_err("%s: next substate %d\n", __func__,
+		ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
+	return rc;
+}
+
+
+static int __cam_isp_ctx_sof_in_bubble(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	ctx_isp->frame_id++;
+	return 0;
+}
+
+static int __cam_isp_ctx_buf_done_in_bubble(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
+	return rc;
+}
+
+static int __cam_isp_ctx_sof_in_bubble_applied(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	ctx_isp->frame_id++;
+	return 0;
+}
+
+
+static int __cam_isp_ctx_epoch_in_bubble_applied(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	struct cam_ctx_request    *req;
+	struct cam_isp_ctx_req    *req_isp;
+	struct cam_context        *ctx = ctx_isp->base;
+
+	/*
+	 * This means we missed the reg upd ack. So we need to
+	 * transition to BUBBLE state again.
+	 */
+
+	if (list_empty(&ctx->pending_req_list)) {
+		/*
+		 * If no pending req in epoch, this is an error case.
+		 * Just go back to the bubble state.
+		 */
+		pr_err("%s: No pending request.\n", __func__);
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+		list);
+	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+
+	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+		ctx->ctx_crm_intf->notify_err) {
+		struct cam_req_mgr_error_notify notify;
+
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = req->request_id;
+		notify.error = CRM_KMD_ERR_BUBBLE;
+		ctx->ctx_crm_intf->notify_err(&notify);
+		CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
+			ctx_isp->frame_id);
+	} else {
+		/*
+		 * If we can not report bubble, then treat it as if no bubble
+		 * report. Just move the req to active list.
+		 */
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->active_req_list);
+		req_isp->bubble_report = 0;
+	}
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+	CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
+end:
+	return 0;
+}
+
+static int __cam_isp_ctx_buf_done_in_bubble_applied(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
+	return rc;
+}
+
+static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int                              rc = 0;
+	struct cam_ctx_request          *req;
+	struct cam_req_mgr_error_notify  notify;
+
+	struct cam_context *ctx = ctx_isp->base;
+	struct cam_isp_hw_error_event_data  *error_event_data =
+			(struct cam_isp_hw_error_event_data *)evt_data;
+
+	uint32_t error_type = error_event_data->error_type;
+
+	CDBG("%s: Enter error_type = %d\n", __func__, error_type);
+	if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
+		(error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW))
+		notify.error = CRM_KMD_ERR_FATAL;
+
+	/*
+	 * Need to check the active req
+	 * move all of them to the pending request list
+	 * Note this funciton need revisit!
+	 */
+
+	if (list_empty(&ctx->active_req_list)) {
+		pr_err("handling error with no active request!\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->active_req_list,
+				struct cam_ctx_request, list);
+
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) {
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = req->request_id;
+
+		ctx->ctx_crm_intf->notify_err(&notify);
+		pr_err("%s: Notify CRM about ERROR frame %lld\n", __func__,
+			ctx_isp->frame_id);
+	} else {
+		pr_err("%s: Can not notify ERRROR to CRM\n", __func__);
+		rc = -EFAULT;
+	}
+
+	list_del_init(&req->list);
+	list_add(&req->list, &ctx->pending_req_list);
+	/* might need to check if active list is empty */
+
+end:
+	CDBG("%s: Exit\n", __func__);
+	return rc;
+}
+
+static struct cam_isp_ctx_irq_ops
+	cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
+	/* SOF */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_sof_in_sof,
+			__cam_isp_ctx_reg_upd_in_sof,
+			__cam_isp_ctx_notify_sof_in_actived_state,
+			NULL,
+			NULL,
+		},
+	},
+	/* APPLIED */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_sof_in_sof,
+			__cam_isp_ctx_reg_upd_in_activated_state,
+			__cam_isp_ctx_epoch_in_applied,
+			NULL,
+			__cam_isp_ctx_buf_done_in_applied,
+		},
+	},
+	/* EPOCH */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_sof_in_epoch,
+			NULL,
+			__cam_isp_ctx_notify_sof_in_actived_state,
+			NULL,
+			__cam_isp_ctx_buf_done_in_epoch,
+		},
+	},
+	/* BUBBLE */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_sof_in_bubble,
+			NULL,
+			__cam_isp_ctx_notify_sof_in_actived_state,
+			NULL,
+			__cam_isp_ctx_buf_done_in_bubble,
+		},
+	},
+	/* Bubble Applied */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_sof_in_bubble_applied,
+			__cam_isp_ctx_reg_upd_in_activated_state,
+			__cam_isp_ctx_epoch_in_bubble_applied,
+			NULL,
+			__cam_isp_ctx_buf_done_in_bubble_applied,
+		},
+	},
+	/* HALT */
+	{
+	},
+};
+
+static int __cam_isp_ctx_apply_req_in_activated_state(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
+	uint32_t next_state)
+{
+	int rc = 0;
+	int cnt = 0;
+	struct cam_ctx_request          *req;
+	struct cam_isp_ctx_req          *req_isp;
+	struct cam_isp_context          *ctx_isp;
+	struct cam_hw_config_args        cfg;
+
+	if (list_empty(&ctx->pending_req_list)) {
+		pr_err("%s: No available request for Apply id %lld\n",
+			__func__, apply->request_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	/*
+	 * When the pipeline has issue, the requests can be queued up in the
+	 * pipeline. In this case, we should reject the additional request.
+	 * The maximum number of request allowed to be outstanding is 2.
+	 *
+	 */
+	list_for_each_entry(req, &ctx->active_req_list, list) {
+		if (++cnt > 2) {
+			pr_err("%s: Apply failed due to pipeline congestion\n",
+				__func__);
+			rc = -EFAULT;
+			goto end;
+		}
+	}
+
+	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+		list);
+
+	/*
+	 * Check whehter the request id is matching the tip, if not, this means
+	 * we are in the middle of the error handling. Need to reject this apply
+	 */
+	if (req->request_id != apply->request_id) {
+		rc = -EFAULT;
+		goto end;
+	}
+
+	CDBG("%s: Apply request %lld\n", __func__, req->request_id);
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+	ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
+
+	req_isp->bubble_report = apply->report_if_bubble;
+
+	cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	cfg.hw_update_entries = req_isp->cfg;
+	cfg.num_hw_update_entries = req_isp->num_cfg;
+
+	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc) {
+		pr_err("%s: Can not apply the configuration\n", __func__);
+	} else {
+		spin_lock(&ctx->lock);
+		ctx_isp->substate_activated = next_state;
+		CDBG("%s: new state %d\n", __func__, next_state);
+		spin_unlock(&ctx->lock);
+	}
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_sof(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: current substate %d\n", __func__,
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_APPLIED);
+	CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_epoch(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: current substate %d\n", __func__,
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_APPLIED);
+	CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_bubble(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: current substate %d\n", __func__,
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
+	CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static struct cam_ctx_ops
+	cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
+	/* SOF */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req_in_sof,
+		},
+		.irq_ops = NULL,
+	},
+	/* APPLIED */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* EPOCH */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req_in_epoch,
+		},
+		.irq_ops = NULL,
+	},
+	/* BUBBLE */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req_in_bubble,
+		},
+		.irq_ops = NULL,
+	},
+	/* Bubble Applied */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* HALT */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+};
+
+
+/* top level state machine */
+static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc = 0;
+	int i;
+	struct cam_hw_release_args       rel_arg;
+	struct cam_ctx_request	        *req;
+	struct cam_isp_ctx_req	        *req_isp;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	if (ctx_isp->hw_ctx) {
+		rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
+			&rel_arg);
+		ctx_isp->hw_ctx = NULL;
+	}
+
+	ctx->session_hdl = 0;
+	ctx->dev_hdl = 0;
+	ctx->link_hdl = 0;
+	ctx->ctx_crm_intf = NULL;
+	ctx_isp->frame_id = 0;
+
+	/*
+	 * Ideally, we should never have any active request here.
+	 * But we still add some sanity check code here to help the debug
+	 */
+	if (!list_empty(&ctx->active_req_list))
+		pr_err("%s: Active list is empty.\n", __func__);
+
+	/* flush the pending list */
+	while (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		pr_err("%s: signal fence in pending list. fence num %d\n",
+			__func__, req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++) {
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+	ctx->state = CAM_CTX_AVAILABLE;
+	CDBG("%s: next state %d\n", __func__, ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_config_dev_in_top_state(
+	struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_ctx_request           *req = NULL;
+	struct cam_isp_ctx_req           *req_isp;
+	uint64_t                          packet_addr;
+	struct cam_packet                *packet;
+	size_t                            len = 0;
+	struct cam_hw_prepare_update_args cfg;
+	struct cam_req_mgr_add_request    add_req;
+	struct cam_isp_context           *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: get free request object......\n", __func__);
+
+	/* get free request */
+	spin_lock(&ctx->lock);
+	if (!list_empty(&ctx->free_req_list)) {
+		req = list_first_entry(&ctx->free_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+	}
+	spin_unlock(&ctx->lock);
+
+	if (!req) {
+		pr_err("%s: No more request obj free\n", __func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+	/* for config dev, only memory handle is supported */
+	/* map packet from the memhandle */
+	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+		(uint64_t *) &packet_addr, &len);
+	if (rc != 0) {
+		pr_err("%s: Can not get packet address\n", __func__);
+		rc = -EINVAL;
+		goto free_req;
+	}
+
+	packet = (struct cam_packet *) (packet_addr + cmd->offset);
+	CDBG("%s: pack_handle %llx\n", __func__, cmd->packet_handle);
+	CDBG("%s: packet address is 0x%llx\n", __func__, packet_addr);
+	CDBG("%s: packet with length %zu, offset 0x%llx\n", __func__,
+		len, cmd->offset);
+	CDBG("%s: Packet request id 0x%llx\n", __func__,
+		packet->header.request_id);
+	CDBG("%s: Packet size 0x%x\n", __func__, packet->header.size);
+	CDBG("%s: packet op %d\n", __func__, packet->header.op_code);
+
+	/* preprocess the configuration */
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.packet = packet;
+	cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	cfg.max_hw_update_entries = CAM_ISP_CTX_CFG_MAX;
+	cfg.hw_update_entries = req_isp->cfg;
+	cfg.max_out_map_entries = CAM_ISP_CTX_RES_MAX;
+	cfg.max_in_map_entries = CAM_ISP_CTX_RES_MAX;
+	cfg.out_map_entries = req_isp->fence_map_out;
+	cfg.in_map_entries = req_isp->fence_map_in;
+
+	CDBG("%s: try to prepare config packet......\n", __func__);
+
+	rc = ctx->hw_mgr_intf->hw_prepare_update(
+		ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc != 0) {
+		pr_err("%s: Prepare config packet failed in HW layer\n",
+			__func__);
+		rc = -EFAULT;
+		goto free_req;
+	}
+	req_isp->num_cfg = cfg.num_hw_update_entries;
+	req_isp->num_fence_map_out = cfg.num_out_map_entries;
+	req_isp->num_fence_map_in = cfg.num_in_map_entries;
+	req_isp->num_acked = 0;
+
+	CDBG("%s: num_entry: %d, num fence out: %d, num fence in: %d\n",
+		__func__, req_isp->num_cfg, req_isp->num_fence_map_out,
+		req_isp->num_fence_map_in);
+
+	req->request_id = packet->header.request_id;
+	req->status = 1;
+
+	if (ctx->state == CAM_CTX_ACTIVATED && ctx->ctx_crm_intf->add_req) {
+		add_req.link_hdl = ctx->link_hdl;
+		add_req.dev_hdl  = ctx->dev_hdl;
+		add_req.req_id   = req->request_id;
+		rc = ctx->ctx_crm_intf->add_req(&add_req);
+		if (rc) {
+			pr_err("%s: Error: Adding request id=%llu\n", __func__,
+				req->request_id);
+				goto free_req;
+		}
+	}
+
+	CDBG("%s: Packet request id 0x%llx\n", __func__,
+		packet->header.request_id);
+
+	spin_lock(&ctx->lock);
+	list_add_tail(&req->list, &ctx->pending_req_list);
+	spin_unlock(&ctx->lock);
+
+	CDBG("%s: Preprocessing Config %lld successful\n", __func__,
+		req->request_id);
+
+	return rc;
+
+free_req:
+	spin_lock(&ctx->lock);
+	list_add_tail(&req->list, &ctx->free_req_list);
+	spin_unlock(&ctx->lock);
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_hw_acquire_args       param;
+	struct cam_isp_resource         *isp_res = NULL;
+	struct cam_create_dev_hdl        req_hdl_param;
+	struct cam_hw_release_args       release;
+	struct cam_isp_context          *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	if (!ctx->hw_mgr_intf) {
+		pr_err("HW interface is not ready!\n");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	CDBG("%s: session_hdl 0x%x, num_resources %d, hdl type %d, res %lld\n",
+		 __func__, cmd->session_handle, cmd->num_resources,
+		cmd->handle_type, cmd->resource_hdl);
+
+	if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
+		pr_err("Too much resources in the acquire!\n");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	/* for now we only support user pointer */
+	if (cmd->handle_type != 1)  {
+		pr_err("%s: Only user pointer is supported!", __func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	isp_res = kzalloc(
+		sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL);
+	if (!isp_res) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	CDBG("%s: start copy %d resources from user\n",
+		__func__, cmd->num_resources);
+
+	if (copy_from_user(isp_res, (void __user *)cmd->resource_hdl,
+		sizeof(*isp_res)*cmd->num_resources)) {
+		rc = -EFAULT;
+		goto free_res;
+	}
+
+	param.context_data = ctx;
+	param.event_cb = ctx->irq_cb_intf;
+	param.num_acq = cmd->num_resources;
+	param.acquire_info = (uint64_t) isp_res;
+
+	/* call HW manager to reserve the resource */
+	rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
+		&param);
+	if (rc != 0) {
+		pr_err("Acquire device failed\n");
+		goto free_res;
+	}
+
+	ctx_isp->hw_ctx = param.ctxt_to_hw_map;
+
+	req_hdl_param.session_hdl = cmd->session_handle;
+	/* bridge is not ready for these flags. so false for now */
+	req_hdl_param.v4l2_sub_dev_flag = 0;
+	req_hdl_param.media_entity_flag = 0;
+	req_hdl_param.ops = ctx->crm_ctx_intf;
+	req_hdl_param.priv = ctx;
+
+	CDBG("%s: get device handle form bridge\n", __func__);
+	ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
+	if (ctx->dev_hdl <= 0) {
+		rc = -EFAULT;
+		pr_err("Can not create device handle\n");
+		goto free_hw;
+	}
+	cmd->dev_handle = ctx->dev_hdl;
+
+	/* store session information */
+	ctx->session_hdl = cmd->session_handle;
+
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	CDBG("%s:%d: Acquire success.\n", __func__, __LINE__);
+	kfree(isp_res);
+	return rc;
+
+free_hw:
+	release.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
+	ctx_isp->hw_ctx = NULL;
+free_res:
+	kfree(isp_res);
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
+
+	if (!rc && ctx->link_hdl)
+		ctx->state = CAM_CTX_READY;
+
+	CDBG("%s: next state %d\n", __func__, ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *link)
+{
+	int rc = 0;
+
+	CDBG("%s:%d: Enter.........\n", __func__, __LINE__);
+
+	ctx->link_hdl = link->link_hdl;
+	ctx->ctx_crm_intf = link->crm_cb;
+
+	/* change state only if we had the init config */
+	if (!list_empty(&ctx->pending_req_list))
+		ctx->state = CAM_CTX_READY;
+
+	CDBG("%s: next state %d\n", __func__, ctx->state);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc = 0;
+
+	ctx->link_hdl = 0;
+	ctx->ctx_crm_intf = NULL;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
+	struct cam_req_mgr_device_info *dev_info)
+{
+	int rc = 0;
+
+	dev_info->dev_hdl = ctx->dev_hdl;
+	strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
+	dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE;
+	dev_info->p_delay = 1;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_hw_start_args         arg;
+	struct cam_ctx_request          *req;
+	struct cam_isp_ctx_req          *req_isp;
+	struct cam_isp_context          *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	if (cmd->session_handle != ctx->session_hdl ||
+		cmd->dev_handle != ctx->dev_hdl) {
+		rc = -EPERM;
+		goto end;
+	}
+
+	if (list_empty(&ctx->pending_req_list)) {
+		/* should never happen */
+		pr_err("%s: Start device with empty configuration\n",
+			__func__);
+		rc = -EFAULT;
+		goto end;
+	} else {
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+	}
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+	if (!ctx_isp->hw_ctx) {
+		pr_err("%s:%d: Wrong hw context pointer.\n",
+			__func__, __LINE__);
+		rc = -EFAULT;
+		goto end;
+	}
+	arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	arg.hw_update_entries = req_isp->cfg;
+	arg.num_hw_update_entries = req_isp->num_cfg;
+
+	ctx_isp->frame_id = 0;
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+	/*
+	 * Only place to change state before calling the hw due to
+	 * hardware tasklet has higher priority that can cause the
+	 * irq handling comes early
+	 */
+	ctx->state = CAM_CTX_ACTIVATED;
+	rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
+	if (rc) {
+		/* HW failure. user need to clean up the resource */
+		pr_err("Start HW failed\n");
+		ctx->state = CAM_CTX_READY;
+		goto end;
+	}
+	CDBG("%s: start device success\n", __func__);
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc = 0;
+
+	ctx->link_hdl = 0;
+	ctx->ctx_crm_intf = NULL;
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_stop_dev_in_activated_unlock(
+	struct cam_context *ctx)
+{
+	int rc = 0;
+	uint32_t i;
+	struct cam_hw_stop_args          stop;
+	struct cam_ctx_request          *req;
+	struct cam_isp_ctx_req          *req_isp;
+	struct cam_isp_context          *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	/* Mask off all the incoming hardware events */
+	spin_lock(&ctx->lock);
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
+	spin_unlock(&ctx->lock);
+	CDBG("%s: next substate %d", __func__, ctx_isp->substate_activated);
+
+	/* stop hw first */
+	if (ctx_isp->hw_ctx) {
+		stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+			&stop);
+	}
+
+	while (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		CDBG("%s: signal fence in pending list. fence num %d\n",
+			__func__, req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++)
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	while (!list_empty(&ctx->active_req_list)) {
+		req = list_first_entry(&ctx->active_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		CDBG("%s: signal fence in active list. fence num %d\n",
+			__func__, req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++)
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+	ctx_isp->frame_id = 0;
+
+	CDBG("%s: next state %d", __func__, ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	__cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
+	ctx->state = CAM_CTX_ACQUIRED;
+	return rc;
+}
+
+static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	__cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
+
+	if (ctx_isp->hw_ctx) {
+		struct cam_hw_release_args   arg;
+
+		arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
+			&arg);
+		ctx_isp->hw_ctx = NULL;
+	}
+
+	ctx->session_hdl = 0;
+	ctx->dev_hdl = 0;
+	ctx->link_hdl = 0;
+	ctx->ctx_crm_intf = NULL;
+
+	ctx->state =  CAM_CTX_AVAILABLE;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
+	struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CDBG("%s: Enter: apply req in Substate %d\n",
+		__func__, ctx_isp->substate_activated);
+	if (ctx_isp->substate_machine[ctx_isp->substate_activated].
+		crm_ops.apply_req) {
+		rc = ctx_isp->substate_machine[ctx_isp->substate_activated].
+			crm_ops.apply_req(ctx, apply);
+	} else {
+		pr_err("%s: No handle function in activated substate %d\n",
+			__func__, ctx_isp->substate_activated);
+		rc = -EFAULT;
+	}
+
+	if (rc)
+		pr_err("%s: Apply failed in active substate %d\n",
+			__func__, ctx_isp->substate_activated);
+	return rc;
+}
+
+
+
+static int __cam_isp_ctx_handle_irq_in_activated(void *context,
+	uint32_t evt_id, void *evt_data)
+{
+	int rc = 0;
+	struct cam_context *ctx = (struct cam_context *)context;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *)ctx->ctx_priv;
+
+	spin_lock(&ctx->lock);
+	CDBG("%s: Enter: State %d, Substate %d, evt id %d\n",
+		__func__, ctx->state, ctx_isp->substate_activated, evt_id);
+	if (ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
+		irq_ops[evt_id]) {
+		rc = ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
+			irq_ops[evt_id](ctx_isp, evt_data);
+	} else {
+		CDBG("%s: No handle function for substate %d\n", __func__,
+			ctx_isp->substate_activated);
+	}
+	CDBG("%s: Exit: State %d Substate %d\n",
+		__func__, ctx->state, ctx_isp->substate_activated);
+	spin_unlock(&ctx->lock);
+	return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+	cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_isp_ctx_acquire_dev_in_available,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_isp_ctx_release_dev_in_top_state,
+			.config_dev = __cam_isp_ctx_config_dev_in_acquired,
+		},
+		.crm_ops = {
+			.link = __cam_isp_ctx_link_in_acquired,
+			.unlink = __cam_isp_ctx_unlink_in_acquired,
+			.get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
+		},
+		.irq_ops = NULL,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = {
+			.start_dev = __cam_isp_ctx_start_dev_in_ready,
+			.release_dev = __cam_isp_ctx_release_dev_in_top_state,
+			.config_dev = __cam_isp_ctx_config_dev_in_top_state,
+		},
+		.crm_ops = {
+			.unlink = __cam_isp_ctx_unlink_in_ready,
+		},
+		.irq_ops = NULL,
+	},
+	/* Activated */
+	{
+		.ioctl_ops = {
+			.stop_dev = __cam_isp_ctx_stop_dev_in_activated,
+			.release_dev = __cam_isp_ctx_release_dev_in_activated,
+			.config_dev = __cam_isp_ctx_config_dev_in_top_state,
+		},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req,
+		},
+		.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
+	},
+};
+
+
+int cam_isp_context_init(struct cam_isp_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_req_mgr_kmd_ops *crm_node_intf,
+	struct cam_hw_mgr_intf *hw_intf)
+
+{
+	int rc = -1;
+	int i;
+
+	if (!ctx || !ctx_base) {
+		pr_err("%s: Invalid Context\n", __func__);
+		goto err;
+	}
+
+	/* ISP context setup */
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->base = ctx_base;
+	ctx->frame_id = 0;
+	ctx->hw_ctx = NULL;
+	ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	ctx->substate_machine = cam_isp_ctx_activated_state_machine;
+	ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
+
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
+		ctx->req_base[i].req_priv = &ctx->req_isp[i];
+		ctx->req_isp[i].base = &ctx->req_base[i];
+	}
+
+	/* camera context setup */
+	rc = cam_context_init(ctx_base, crm_node_intf, hw_intf, ctx->req_base,
+		CAM_CTX_REQ_MAX);
+	if (rc) {
+		pr_err("%s: Camera Context Base init failed\n", __func__);
+		goto err;
+	}
+
+	/* link camera context with isp context */
+	ctx_base->state_machine = cam_isp_ctx_top_state_machine;
+	ctx_base->ctx_priv = ctx;
+
+err:
+	return rc;
+}
+
+int cam_isp_context_deinit(struct cam_isp_context *ctx)
+{
+	int rc = 0;
+
+	if (ctx->base)
+		cam_context_deinit(ctx->base);
+
+	if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
+		pr_err("%s: ISP context substate is invalid\n", __func__);
+
+	memset(ctx, 0, sizeof(*ctx));
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
new file mode 100644
index 0000000..dae1dda
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -0,0 +1,151 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_CONTEXT_H_
+#define _CAM_ISP_CONTEXT_H_
+
+
+#include <linux/spinlock.h>
+#include <uapi/media/cam_isp.h>
+
+#include "cam_context.h"
+#include "cam_isp_hw_mgr_intf.h"
+
+/*
+ * Maximum hw resource - This number is based on the maximum
+ * output port resource. The current maximum resource number
+ * is 20.
+ */
+#define CAM_ISP_CTX_RES_MAX                     20
+
+/*
+ * Maxiimum configuration entry size  - This is based on the
+ * worst case DUAL IFE use case plus some margin.
+ */
+#define CAM_ISP_CTX_CFG_MAX                     20
+
+/* forward declaration */
+struct cam_isp_context;
+
+/* cam isp context irq handling function type */
+typedef int (*cam_isp_hw_event_cb_func)(struct cam_isp_context *ctx_isp,
+	void *evt_data);
+
+/**
+ * enum cam_isp_ctx_activated_substate - sub states for activated
+ *
+ */
+enum cam_isp_ctx_activated_substate {
+	CAM_ISP_CTX_ACTIVATED_SOF,
+	CAM_ISP_CTX_ACTIVATED_APPLIED,
+	CAM_ISP_CTX_ACTIVATED_EPOCH,
+	CAM_ISP_CTX_ACTIVATED_BUBBLE,
+	CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED,
+	CAM_ISP_CTX_ACTIVATED_HALT,
+	CAM_ISP_CTX_ACTIVATED_MAX,
+};
+
+
+/**
+ * struct cam_isp_ctx_irq_ops - Function table for handling IRQ callbacks
+ *
+ * @irq_ops:               Array of handle function pointers.
+ *
+ */
+struct cam_isp_ctx_irq_ops {
+	cam_isp_hw_event_cb_func         irq_ops[CAM_ISP_HW_EVENT_MAX];
+};
+
+/**
+ * struct cam_isp_ctx_req - ISP context request object
+ *
+ * @base:                  Common request object ponter
+ * @cfg:                   ISP hardware configuration array
+ * @num_cfg:               Number of ISP hardware configuration entries
+ * @fence_map_out:         Output fence mapping array
+ * @num_fence_map_out:     Number of the output fence map
+ * @fence_map_in:          Input fence mapping array
+ * @num_fence_map_in:      Number of input fence map
+ * @num_acked:             Count to track acked entried for output.
+ *                         If count equals the number of fence out, it means
+ *                         the request has been completed.
+ * @bubble_report:         Flag to track if bubble report is active on
+ *                         current request
+ *
+ */
+struct cam_isp_ctx_req {
+	struct cam_ctx_request          *base;
+
+	struct cam_hw_update_entry       cfg[CAM_ISP_CTX_CFG_MAX];
+	uint32_t                         num_cfg;
+	struct cam_hw_fence_map_entry    fence_map_out[CAM_ISP_CTX_RES_MAX];
+	uint32_t                         num_fence_map_out;
+	struct cam_hw_fence_map_entry    fence_map_in[CAM_ISP_CTX_RES_MAX];
+	uint32_t                         num_fence_map_in;
+	uint32_t                         num_acked;
+	int32_t                          bubble_report;
+};
+
+/**
+ * struct cam_isp_context  - ISP context object
+ *
+ * @base:                  Common context object pointer
+ * @frame_id:              Frame id tracking for the isp context
+ * @substate_actiavted:    Current substate for the activated state.
+ * @substate_machine:      ISP substate machine for external interface
+ * @substate_machine_irq:  ISP substate machine for irq handling
+ * @req_base:              Common request object storage
+ * @req_isp:               ISP private request object storage
+ * @hw_ctx:                HW object returned by the acquire device command
+ *
+ */
+struct cam_isp_context {
+	struct cam_context              *base;
+
+	int64_t                          frame_id;
+	uint32_t                         substate_activated;
+	struct cam_ctx_ops              *substate_machine;
+	struct cam_isp_ctx_irq_ops      *substate_machine_irq;
+
+	struct cam_ctx_request           req_base[CAM_CTX_REQ_MAX];
+	struct cam_isp_ctx_req           req_isp[CAM_CTX_REQ_MAX];
+
+	void                            *hw_ctx;
+};
+
+/**
+ * cam_isp_context_init()
+ *
+ * @brief:              Initialization function for the ISP context
+ *
+ * @ctx:                ISP context obj to be initialized
+ * @bridge_ops:         Bridge call back funciton
+ * @hw_intf:            ISP hw manager interface
+ *
+ */
+int cam_isp_context_init(struct cam_isp_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_req_mgr_kmd_ops *bridge_ops,
+	struct cam_hw_mgr_intf *hw_intf);
+
+/**
+ * cam_isp_context_deinit()
+ *
+ * @brief:               Deinitialize function for the ISP context
+ *
+ * @ctx:                 ISP context obj to be deinitialized
+ *
+ */
+int cam_isp_context_deinit(struct cam_isp_context *ctx);
+
+
+#endif  /* __CAM_ISP_CONTEXT_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
new file mode 100644
index 0000000..4cebb58
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
@@ -0,0 +1,132 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include <uapi/media/cam_req_mgr.h>
+#include "cam_isp_dev.h"
+#include "cam_isp_log.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_node.h"
+
+static struct cam_isp_dev g_isp_dev;
+
+static const struct of_device_id cam_isp_dt_match[] = {
+	{
+		.compatible = "qcom,cam-isp"
+	},
+	{}
+};
+
+static int cam_isp_dev_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	int i;
+
+	/* clean up resources */
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_isp_context_deinit(&g_isp_dev.ctx_isp[i]);
+		if (rc)
+			pr_err("%s: ISP context %d deinit failed\n",
+				__func__, i);
+	}
+
+	rc = cam_subdev_remove(&g_isp_dev.sd);
+	if (rc)
+		pr_err("%s: Unregister failed\n", __func__);
+
+	memset(&g_isp_dev, 0, sizeof(g_isp_dev));
+	return 0;
+}
+
+static int cam_isp_dev_probe(struct platform_device *pdev)
+{
+	int rc = -1;
+	int i;
+	struct cam_hw_mgr_intf         hw_mgr_intf;
+	struct cam_node               *node;
+
+	/* Initialze the v4l2 subdevice first. (create cam_node) */
+	rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME,
+		CAM_IFE_DEVICE_TYPE);
+	if (rc) {
+		pr_err("%s: ISP cam_subdev_probe failed!\n", __func__);
+		goto err;
+	}
+	node = (struct cam_node *) g_isp_dev.sd.token;
+
+	rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf);
+	if (rc != 0) {
+		pr_err("%s: Can not initialized ISP HW manager!\n", __func__);
+		goto unregister;
+	}
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_isp_context_init(&g_isp_dev.ctx_isp[i],
+			&g_isp_dev.ctx[i],
+			&node->crm_node_intf,
+			&node->hw_mgr_intf);
+		if (rc) {
+			pr_err("%s: ISP context init failed!\n", __func__);
+			goto unregister;
+		}
+	}
+
+	rc = cam_node_init(node, &hw_mgr_intf, g_isp_dev.ctx, CAM_CTX_MAX,
+		CAM_ISP_DEV_NAME);
+	if (rc) {
+		pr_err("%s: ISP node init failed!\n", __func__);
+		goto unregister;
+	}
+
+	pr_info("%s: Camera ISP probe complete\n", __func__);
+
+	return 0;
+unregister:
+	rc = cam_subdev_remove(&g_isp_dev.sd);
+err:
+	return rc;
+}
+
+
+static struct platform_driver isp_driver = {
+	.probe = cam_isp_dev_probe,
+	.remove = cam_isp_dev_remove,
+	.driver = {
+		.name = "cam_isp",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_isp_dt_match,
+	},
+};
+
+static int __init cam_isp_dev_init_module(void)
+{
+	return platform_driver_register(&isp_driver);
+}
+
+static void __exit cam_isp_dev_exit_module(void)
+{
+	platform_driver_unregister(&isp_driver);
+}
+
+module_init(cam_isp_dev_init_module);
+module_exit(cam_isp_dev_exit_module);
+MODULE_DESCRIPTION("MSM ISP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
new file mode 100644
index 0000000..95463ca
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_DEV_H_
+#define _CAM_ISP_DEV_H_
+
+#include "cam_subdev.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_context.h"
+#include "cam_isp_context.h"
+
+/**
+ * struct cam_isp_dev - Camera ISP V4l2 device node
+ *
+ * @sd:                    Commone camera subdevice node
+ * @ctx:                   Isp base context storage
+ * @ctx_isp:               Isp private context storage
+ *
+ */
+struct cam_isp_dev {
+	struct cam_subdev          sd;
+	struct cam_context         ctx[CAM_CTX_MAX];
+	struct cam_isp_context     ctx_isp[CAM_CTX_MAX];
+};
+
+#endif /* __CAM_ISP_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_log.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_log.h
new file mode 100644
index 0000000..4f5205e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_log.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_LOG_H_
+#define _CAM_ISP_LOG_H_
+
+#include <linux/kernel.h>
+
+#define ISP_TRACE_ENABLE			1
+
+#if (ISP_TRACE_ENABLE == 1)
+	#define ISP_TRACE(args...)		trace_printk(args)
+#else
+	#define ISP_TRACE(arg...)
+#endif
+
+#endif /* __CAM_ISP_LOG_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
new file mode 100644
index 0000000..2c6eaba
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += hw_utils/ isp_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_hw_mgr.o cam_ife_hw_mgr.o
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
new file mode 100644
index 0000000..259e773
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -0,0 +1,3049 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_smmu_api.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_isp_hw.h"
+#include "cam_ife_csid_hw_intf.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_isp_packet_parser.h"
+#include "cam_ife_hw_mgr.h"
+#include "cam_cdm_intf_api.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define CAM_IFE_HW_ENTRIES_MAX  20
+
+static struct cam_ife_hw_mgr g_ife_hw_mgr;
+
+static int cam_ife_mgr_get_hw_caps(void *hw_mgr_priv,
+	void *hw_caps_args)
+{
+	int rc = 0;
+	int i;
+	struct cam_ife_hw_mgr             *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd          *query = hw_caps_args;
+	struct cam_isp_query_cap_cmd       query_isp;
+
+	CDBG("%s: enter\n", __func__);
+
+	if (copy_from_user(&query_isp, (void __user *)query->caps_handle,
+		sizeof(struct cam_isp_query_cap_cmd))) {
+		rc = -EFAULT;
+		return rc;
+	}
+
+	query_isp.device_iommu.non_secure = hw_mgr->mgr_common.img_iommu_hdl;
+	query_isp.device_iommu.secure = hw_mgr->mgr_common.img_iommu_hdl_secure;
+	query_isp.cdm_iommu.non_secure = hw_mgr->mgr_common.cmd_iommu_hdl;
+	query_isp.cdm_iommu.secure = hw_mgr->mgr_common.cmd_iommu_hdl_secure;
+	query_isp.num_dev = 2;
+	for (i = 0; i < query_isp.num_dev; i++) {
+		query_isp.dev_caps[i].hw_type = CAM_ISP_HW_IFE;
+		query_isp.dev_caps[i].hw_version.major = 1;
+		query_isp.dev_caps[i].hw_version.minor = 7;
+		query_isp.dev_caps[i].hw_version.incr = 0;
+		query_isp.dev_caps[i].hw_version.reserved = 0;
+	}
+
+	if (copy_to_user((void __user *)query->caps_handle, &query_isp,
+		sizeof(struct cam_isp_query_cap_cmd)))
+		rc = -EFAULT;
+
+	CDBG("%s: exit rc :%d !\n", __func__, rc);
+
+	return rc;
+}
+
+static int cam_ife_hw_mgr_is_rdi_res(uint32_t format)
+{
+	int rc = 0;
+
+	switch (format) {
+	case CAM_ISP_IFE_OUT_RES_RDI_0:
+	case CAM_ISP_IFE_OUT_RES_RDI_1:
+	case CAM_ISP_IFE_OUT_RES_RDI_2:
+	case CAM_ISP_IFE_OUT_RES_RDI_3:
+		rc = 1;
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_ife_hw_mgr_init_hw_res(
+	struct cam_ife_hw_mgr_res   *isp_hw_res)
+{
+	int i;
+	int rc = -1;
+	struct cam_hw_intf      *hw_intf;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		CDBG("%s: enabled vfe hardware %d\n", __func__,
+			hw_intf->hw_idx);
+		if (hw_intf->hw_ops.init) {
+			rc = hw_intf->hw_ops.init(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+			if (rc)
+				goto err;
+		}
+	}
+
+	return 0;
+err:
+	pr_err("%s: INIT HW res failed! (type:%d, id:%d)", __func__,
+		isp_hw_res->res_type, isp_hw_res->res_id);
+	return rc;
+}
+
+static int cam_ife_hw_mgr_start_hw_res(
+	struct cam_ife_hw_mgr_res   *isp_hw_res)
+{
+	int i;
+	int rc = -1;
+	struct cam_hw_intf      *hw_intf;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		if (hw_intf->hw_ops.start) {
+			rc = hw_intf->hw_ops.start(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+			if (rc) {
+				pr_err("%s: Can not start HW resources!\n",
+					__func__);
+				goto err;
+			}
+		} else {
+			pr_err("%s:function null\n", __func__);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	pr_err("%s: Start hw res failed! (type:%d, id:%d)", __func__,
+		isp_hw_res->res_type, isp_hw_res->res_id);
+	return rc;
+}
+
+static void cam_ife_hw_mgr_stop_hw_res(
+	struct cam_ife_hw_mgr_res   *isp_hw_res)
+{
+	int i;
+	struct cam_hw_intf      *hw_intf;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		if (hw_intf->hw_ops.stop)
+			hw_intf->hw_ops.stop(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+		else
+			pr_err("%s:stop null\n", __func__);
+	}
+}
+
+static void cam_ife_hw_mgr_deinit_hw_res(
+	struct cam_ife_hw_mgr_res   *isp_hw_res)
+{
+	int i;
+	struct cam_hw_intf      *hw_intf;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		if (hw_intf->hw_ops.deinit)
+			hw_intf->hw_ops.deinit(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+	}
+}
+
+static int cam_ife_hw_mgr_put_res(
+	struct list_head                *src_list,
+	struct cam_ife_hw_mgr_res      **res)
+{
+	int rc                              = 0;
+	struct cam_ife_hw_mgr_res *res_ptr  = NULL;
+
+	res_ptr = *res;
+	if (res_ptr)
+		list_add_tail(&res_ptr->list, src_list);
+
+	return rc;
+}
+
+static int cam_ife_hw_mgr_get_res(
+	struct list_head                *src_list,
+	struct cam_ife_hw_mgr_res      **res)
+{
+	int rc = 0;
+	struct cam_ife_hw_mgr_res *res_ptr  = NULL;
+
+	if (!list_empty(src_list)) {
+		res_ptr = list_first_entry(src_list,
+			struct cam_ife_hw_mgr_res, list);
+		list_del_init(&res_ptr->list);
+	} else {
+		pr_err("No more free ife hw mgr ctx!\n");
+		rc = -1;
+	}
+	*res = res_ptr;
+
+	return rc;
+}
+
+static int cam_ife_hw_mgr_free_hw_res(
+	struct cam_ife_hw_mgr_res   *isp_hw_res)
+{
+	int rc = 0;
+	int i;
+	struct cam_hw_intf      *hw_intf;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		if (hw_intf->hw_ops.release) {
+			rc = hw_intf->hw_ops.release(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+			if (rc)
+				pr_err("%s:Release hw resrouce id %d failed!\n",
+					__func__, isp_hw_res->res_id);
+			isp_hw_res->hw_res[i] = NULL;
+		} else
+			pr_err("%s:Release null\n", __func__);
+	}
+	/* caller should make sure the resource is in a list */
+	list_del_init(&isp_hw_res->list);
+	memset(isp_hw_res, 0, sizeof(*isp_hw_res));
+	INIT_LIST_HEAD(&isp_hw_res->list);
+
+	return 0;
+}
+
+static int cam_ife_mgr_csid_stop_hw(
+	struct cam_ife_hw_mgr_ctx *ctx, struct list_head  *stop_list,
+		uint32_t  base_idx, uint32_t stop_cmd)
+{
+	struct cam_ife_hw_mgr_res      *hw_mgr_res;
+	struct cam_isp_resource_node   *isp_res;
+	struct cam_isp_resource_node   *stop_res[CAM_IFE_PIX_PATH_RES_MAX - 1];
+	struct cam_csid_hw_stop_args    stop;
+	struct cam_hw_intf             *hw_intf;
+	uint32_t i, cnt;
+
+	cnt = 0;
+	list_for_each_entry(hw_mgr_res, stop_list, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			isp_res = hw_mgr_res->hw_res[i];
+			if (isp_res->hw_intf->hw_idx != base_idx)
+				continue;
+
+			stop_res[cnt] = isp_res;
+			cnt++;
+		}
+	}
+
+	if (cnt) {
+		hw_intf =  stop_res[0]->hw_intf;
+		stop.num_res = cnt;
+		stop.node_res = stop_res;
+		stop.stop_cmd = stop_cmd;
+		hw_intf->hw_ops.stop(hw_intf->hw_priv, &stop, sizeof(stop));
+	}
+
+	return 0;
+}
+
+static int cam_ife_hw_mgr_release_hw_for_ctx(
+	struct cam_ife_hw_mgr_ctx  *ife_ctx)
+{
+	uint32_t                          i;
+	struct cam_ife_hw_mgr_res        *hw_mgr_res;
+	struct cam_ife_hw_mgr_res        *hw_mgr_res_temp;
+
+	/* ife leaf resource */
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
+		cam_ife_hw_mgr_free_hw_res(&ife_ctx->res_list_ife_out[i]);
+
+	/* ife source resource */
+	list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
+		&ife_ctx->res_list_ife_src, list) {
+		cam_ife_hw_mgr_free_hw_res(hw_mgr_res);
+		cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res);
+	}
+
+	/* ife csid resource */
+	list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
+		&ife_ctx->res_list_ife_csid, list) {
+		cam_ife_hw_mgr_free_hw_res(hw_mgr_res);
+		cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res);
+	}
+
+	/* ife cid resource */
+	list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
+		&ife_ctx->res_list_ife_cid, list) {
+		cam_ife_hw_mgr_free_hw_res(hw_mgr_res);
+		cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res);
+	}
+
+	/* ife root node */
+	if (ife_ctx->res_list_ife_in.res_type != CAM_IFE_HW_MGR_RES_UNINIT)
+		cam_ife_hw_mgr_free_hw_res(&ife_ctx->res_list_ife_in);
+
+	/* clean up the callback function */
+	ife_ctx->common.cb_priv = NULL;
+	memset(ife_ctx->common.event_cb, 0, sizeof(ife_ctx->common.event_cb));
+
+	CDBG("%s:%d: release context completed ctx id:%d\n",
+		__func__, __LINE__, ife_ctx->ctx_index);
+
+	return 0;
+}
+
+
+static int cam_ife_hw_mgr_put_ctx(
+	struct list_head                 *src_list,
+	struct cam_ife_hw_mgr_ctx       **ife_ctx)
+{
+	int rc                              = 0;
+	struct cam_ife_hw_mgr_ctx *ctx_ptr  = NULL;
+
+	mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+	ctx_ptr = *ife_ctx;
+	if (ctx_ptr)
+		list_add_tail(&ctx_ptr->list, src_list);
+	*ife_ctx = NULL;
+	mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+	return rc;
+}
+
+static int cam_ife_hw_mgr_get_ctx(
+	struct list_head                *src_list,
+	struct cam_ife_hw_mgr_ctx       **ife_ctx)
+{
+	int rc                              = 0;
+	struct cam_ife_hw_mgr_ctx *ctx_ptr  = NULL;
+
+	mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+	if (!list_empty(src_list)) {
+		ctx_ptr = list_first_entry(src_list,
+			struct cam_ife_hw_mgr_ctx, list);
+		list_del_init(&ctx_ptr->list);
+	} else {
+		pr_err("No more free ife hw mgr ctx!\n");
+		rc = -1;
+	}
+	*ife_ctx = ctx_ptr;
+	mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+
+	return rc;
+}
+
+static void cam_ife_mgr_add_base_info(
+	struct cam_ife_hw_mgr_ctx       *ctx,
+	enum cam_isp_hw_split_id         split_id,
+	uint32_t                         base_idx)
+{
+	uint32_t    i;
+
+	if (!ctx->num_base) {
+		CDBG("%s: Add split id = %d for base idx = %d\n", __func__,
+			split_id, base_idx);
+		ctx->base[0].split_id = split_id;
+		ctx->base[0].idx      = base_idx;
+		ctx->num_base++;
+	} else {
+		/*Check if base index is alreay exist in the list */
+		for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+			if (ctx->base[i].idx == base_idx) {
+				if (split_id != CAM_ISP_HW_SPLIT_MAX &&
+					ctx->base[i].split_id ==
+						CAM_ISP_HW_SPLIT_MAX)
+					ctx->base[i].split_id = split_id;
+
+				break;
+			}
+		}
+
+		if (i == CAM_IFE_HW_NUM_MAX) {
+			CDBG("%s: Add split id = %d for base idx = %d\n",
+				__func__, split_id, base_idx);
+			ctx->base[ctx->num_base].split_id = split_id;
+			ctx->base[ctx->num_base].idx      = base_idx;
+			ctx->num_base++;
+		}
+	}
+}
+
+static int cam_ife_mgr_process_base_info(
+	struct cam_ife_hw_mgr_ctx        *ctx)
+{
+	struct cam_ife_hw_mgr_res        *hw_mgr_res;
+	struct cam_isp_resource_node     *res = NULL;
+	uint32_t i;
+
+	if (list_empty(&ctx->res_list_ife_src)) {
+		pr_err("%s: Error! Mux List empty\n", __func__);
+		return -ENODEV;
+	}
+
+	/* IFE mux in resources */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			continue;
+
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+			res = hw_mgr_res->hw_res[i];
+			if (res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
+				cam_ife_mgr_add_base_info(ctx, i,
+					res->hw_intf->hw_idx);
+
+			else
+				cam_ife_mgr_add_base_info(ctx,
+						CAM_ISP_HW_SPLIT_MAX,
+						res->hw_intf->hw_idx);
+		}
+	}
+	CDBG("%s: ctx base num = %d\n", __func__, ctx->num_base);
+
+	return 0;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_out_rdi(
+	struct cam_ife_hw_mgr_ctx       *ife_ctx,
+	struct cam_ife_hw_mgr_res       *ife_src_res,
+	struct cam_isp_in_port_info     *in_port)
+{
+	int rc = -EINVAL;
+	struct cam_vfe_acquire_args               vfe_acquire;
+	struct cam_isp_out_port_info             *out_port = NULL;
+	struct cam_ife_hw_mgr_res                *ife_out_res;
+	struct cam_hw_intf                       *hw_intf;
+	uint32_t  i, vfe_out_res_id, vfe_in_res_id;
+
+	/* take left resource */
+	vfe_in_res_id = ife_src_res->hw_res[0]->res_id;
+
+	switch (vfe_in_res_id) {
+	case CAM_ISP_HW_VFE_IN_RDI0:
+		vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_0;
+		break;
+	case CAM_ISP_HW_VFE_IN_RDI1:
+		vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_1;
+		break;
+	case CAM_ISP_HW_VFE_IN_RDI2:
+		vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_2;
+		break;
+	case CAM_ISP_HW_VFE_IN_RDI3:
+		vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_3;
+		break;
+	default:
+		pr_err("%s: invalid resource type\n", __func__);
+		goto err;
+	}
+
+	vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
+	vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
+
+	ife_out_res = &ife_ctx->res_list_ife_out[vfe_out_res_id & 0xFF];
+	for (i = 0; i < in_port->num_out_res; i++) {
+		out_port = &in_port->data[i];
+
+		if (vfe_out_res_id != out_port->res_type)
+			continue;
+
+		vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
+		vfe_acquire.vfe_out.out_port_info = out_port;
+		vfe_acquire.vfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT;
+		vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
+		hw_intf = ife_src_res->hw_res[0]->hw_intf;
+		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+			&vfe_acquire,
+			sizeof(struct cam_vfe_acquire_args));
+		if (rc) {
+			pr_err("%s: Can not acquire out resource 0x%x\n",
+				__func__, out_port->res_type);
+			goto err;
+		}
+		break;
+	}
+
+	if (i == in_port->num_out_res) {
+		pr_err("%s: Can not acquire out resource\n", __func__);
+		goto err;
+	}
+
+	ife_out_res->hw_res[0] = vfe_acquire.vfe_out.rsrc_node;
+	ife_out_res->is_dual_vfe = 0;
+	ife_out_res->res_id = vfe_out_res_id;
+	ife_out_res->res_type = CAM_ISP_RESOURCE_VFE_OUT;
+
+	return 0;
+err:
+	return rc;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_out_pixel(
+	struct cam_ife_hw_mgr_ctx       *ife_ctx,
+	struct cam_ife_hw_mgr_res       *ife_src_res,
+	struct cam_isp_in_port_info     *in_port)
+{
+	int rc = -1;
+	uint32_t  i, j, k;
+	struct cam_vfe_acquire_args               vfe_acquire;
+	struct cam_isp_out_port_info             *out_port;
+	struct cam_ife_hw_mgr_res                *ife_out_res;
+	struct cam_hw_intf                       *hw_intf;
+
+	for (i = 0; i < in_port->num_out_res; i++) {
+		out_port = &in_port->data[i];
+		k = out_port->res_type & 0xFF;
+		if (k >= CAM_IFE_HW_OUT_RES_MAX) {
+			pr_err("%s: invalid output resource type 0x%x\n",
+				__func__, out_port->res_type);
+			continue;
+		}
+
+		if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
+			continue;
+
+		CDBG("%s: res_type 0x%x\n",
+			__func__, out_port->res_type);
+
+		ife_out_res = &ife_ctx->res_list_ife_out[k];
+		ife_out_res->is_dual_vfe = in_port->usage_type;
+
+		vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
+		vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
+		vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
+		vfe_acquire.vfe_out.out_port_info =  out_port;
+		vfe_acquire.vfe_out.is_dual       = ife_src_res->is_dual_vfe;
+		vfe_acquire.vfe_out.unique_id     = ife_ctx->ctx_index;
+
+		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+			if (!ife_src_res->hw_res[j])
+				continue;
+
+			if (j == CAM_ISP_HW_SPLIT_LEFT) {
+				vfe_acquire.vfe_out.split_id  =
+					CAM_ISP_HW_SPLIT_LEFT;
+				if (ife_src_res->is_dual_vfe) {
+					/*TBD */
+					vfe_acquire.vfe_out.is_master     = 1;
+					vfe_acquire.vfe_out.dual_slave_core =
+						1;
+				} else {
+					vfe_acquire.vfe_out.is_master   = 0;
+					vfe_acquire.vfe_out.dual_slave_core =
+						0;
+				}
+			} else {
+				vfe_acquire.vfe_out.split_id  =
+					CAM_ISP_HW_SPLIT_RIGHT;
+				vfe_acquire.vfe_out.is_master       = 0;
+				vfe_acquire.vfe_out.dual_slave_core = 0;
+			}
+
+			hw_intf = ife_src_res->hw_res[j]->hw_intf;
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+				&vfe_acquire,
+				sizeof(struct cam_vfe_acquire_args));
+			if (rc) {
+				pr_err("%s:Can not acquire out resource 0x%x\n",
+					__func__, out_port->res_type);
+				goto err;
+			}
+
+			ife_out_res->hw_res[j] =
+				vfe_acquire.vfe_out.rsrc_node;
+			CDBG("%s: resource type :0x%x res id:0x%x\n",
+				__func__, ife_out_res->hw_res[j]->res_type,
+				ife_out_res->hw_res[j]->res_id);
+
+		}
+		ife_out_res->res_type = CAM_ISP_RESOURCE_VFE_OUT;
+		ife_out_res->res_id = out_port->res_type;
+		ife_out_res->parent = ife_src_res;
+		ife_src_res->child[ife_src_res->num_children++] = ife_out_res;
+	}
+
+	return 0;
+err:
+	/* release resource at the entry function */
+	return rc;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_out(
+	struct cam_ife_hw_mgr_ctx     *ife_ctx,
+	struct cam_isp_in_port_info   *in_port)
+{
+	int rc = -EINVAL;
+	struct cam_ife_hw_mgr_res       *ife_src_res;
+
+	list_for_each_entry(ife_src_res, &ife_ctx->res_list_ife_src, list) {
+		if (ife_src_res->num_children)
+			continue;
+
+		switch (ife_src_res->res_id) {
+		case CAM_ISP_HW_VFE_IN_CAMIF:
+			rc = cam_ife_hw_mgr_acquire_res_ife_out_pixel(ife_ctx,
+				ife_src_res, in_port);
+			break;
+		case CAM_ISP_HW_VFE_IN_RDI0:
+		case CAM_ISP_HW_VFE_IN_RDI1:
+		case CAM_ISP_HW_VFE_IN_RDI2:
+		case CAM_ISP_HW_VFE_IN_RDI3:
+			rc = cam_ife_hw_mgr_acquire_res_ife_out_rdi(ife_ctx,
+				ife_src_res, in_port);
+			break;
+		default:
+			pr_err("%s: Fatal: Unknown IFE SRC resource!\n",
+				__func__);
+			break;
+		}
+		if (rc)
+			goto err;
+	}
+
+	return 0;
+err:
+	/* release resource on entry function */
+	return rc;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_src(
+	struct cam_ife_hw_mgr_ctx     *ife_ctx,
+	struct cam_isp_in_port_info   *in_port)
+{
+	int rc                = -1;
+	int i;
+	struct cam_ife_hw_mgr_res                  *csid_res;
+	struct cam_ife_hw_mgr_res                  *ife_src_res;
+	struct cam_vfe_acquire_args                 vfe_acquire;
+	struct cam_hw_intf                         *hw_intf;
+	struct cam_ife_hw_mgr                      *ife_hw_mgr;
+
+	ife_hw_mgr = ife_ctx->hw_mgr;
+
+	list_for_each_entry(csid_res, &ife_ctx->res_list_ife_csid, list) {
+		if (csid_res->num_children)
+			continue;
+
+		rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
+			&ife_src_res);
+		if (rc) {
+			pr_err("%s: No more free hw mgr resource!\n", __func__);
+			goto err;
+		}
+		cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_src,
+			&ife_src_res);
+
+		vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_IN;
+		vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
+		vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_IN;
+		vfe_acquire.vfe_in.cdm_ops = ife_ctx->cdm_ops;
+
+		switch (csid_res->res_id) {
+		case CAM_IFE_PIX_PATH_RES_IPP:
+			vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_CAMIF;
+			vfe_acquire.vfe_in.in_port = in_port;
+			if (csid_res->is_dual_vfe)
+				vfe_acquire.vfe_in.sync_mode =
+				CAM_ISP_HW_SYNC_MASTER;
+			else
+				vfe_acquire.vfe_in.sync_mode =
+				CAM_ISP_HW_SYNC_NONE;
+
+			break;
+		case CAM_IFE_PIX_PATH_RES_RDI_0:
+			vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI0;
+			vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+			break;
+		case CAM_IFE_PIX_PATH_RES_RDI_1:
+			vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI1;
+			vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+			break;
+		case CAM_IFE_PIX_PATH_RES_RDI_2:
+			vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI2;
+			vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+			break;
+		case CAM_IFE_PIX_PATH_RES_RDI_3:
+			vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI3;
+			vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+			break;
+		default:
+			pr_err("%s: Wrong IFE CSID Resource Node!\n",
+				__func__);
+			goto err;
+		}
+		ife_src_res->res_type = vfe_acquire.rsrc_type;
+		ife_src_res->res_id = vfe_acquire.vfe_in.res_id;
+		ife_src_res->is_dual_vfe = csid_res->is_dual_vfe;
+
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!csid_res->hw_res[i])
+				continue;
+
+			hw_intf = ife_hw_mgr->ife_devices[
+				csid_res->hw_res[i]->hw_intf->hw_idx];
+
+			/* fill in more acquire information as needed */
+			/* slave Camif resource, */
+			if (i == CAM_ISP_HW_SPLIT_RIGHT &&
+				ife_src_res->is_dual_vfe)
+				vfe_acquire.vfe_in.sync_mode =
+				CAM_ISP_HW_SYNC_SLAVE;
+
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+					&vfe_acquire,
+					sizeof(struct cam_vfe_acquire_args));
+			if (rc) {
+				pr_err("%s:Can not acquire IFE HW res %d!\n",
+					__func__, csid_res->res_id);
+				goto err;
+			}
+			ife_src_res->hw_res[i] = vfe_acquire.vfe_in.rsrc_node;
+			CDBG("%s:acquire success res type :0x%x res id:0x%x\n",
+				__func__, ife_src_res->hw_res[i]->res_type,
+				ife_src_res->hw_res[i]->res_id);
+
+		}
+
+		/* It should be one to one mapping between
+		 * csid resource and ife source resource
+		 */
+		csid_res->child[0] = ife_src_res;
+		csid_res->num_children = 1;
+		ife_src_res->parent = csid_res;
+		csid_res->child[csid_res->num_children++] = ife_src_res;
+	}
+
+	return 0;
+err:
+	/* release resource at the entry function */
+	return rc;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_csid_ipp(
+	struct cam_ife_hw_mgr_ctx          *ife_ctx,
+	struct cam_isp_in_port_info        *in_port,
+	uint32_t                            cid_res_id)
+{
+	int rc = -1;
+	int i, j;
+
+	struct cam_ife_hw_mgr               *ife_hw_mgr;
+	struct cam_ife_hw_mgr_res           *csid_res;
+	struct cam_hw_intf                   *hw_intf;
+	struct cam_csid_hw_reserve_resource_args  csid_acquire;
+
+	ife_hw_mgr = ife_ctx->hw_mgr;
+
+	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &csid_res);
+	if (rc) {
+		pr_err("%s: No more free hw mgr resource!\n", __func__);
+		goto err;
+	}
+	cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
+
+	csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+	csid_acquire.res_id = CAM_IFE_PIX_PATH_RES_IPP;
+	csid_acquire.cid = cid_res_id;
+	csid_acquire.in_port = in_port;
+
+	if (in_port->usage_type)
+		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_MASTER;
+	else
+		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
+
+
+
+	for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+		if (!ife_hw_mgr->csid_devices[i])
+			continue;
+
+		hw_intf = ife_hw_mgr->csid_devices[i];
+		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &csid_acquire,
+			sizeof(csid_acquire));
+		if (rc)
+			continue;
+		else
+			break;
+	}
+
+	if (i == CAM_IFE_CSID_HW_NUM_MAX) {
+		pr_err("%s: Can not acquire ife csid ipp resrouce!\n",
+			__func__);
+		goto err;
+	}
+
+	CDBG("%s: acquired csid(%d) left ipp resrouce successfully!\n",
+		__func__, i);
+
+	csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH;
+	csid_res->res_id = CAM_IFE_PIX_PATH_RES_IPP;
+	csid_res->is_dual_vfe = in_port->usage_type;
+	csid_res->hw_res[0] = csid_acquire.node_res;
+	csid_res->hw_res[1] = NULL;
+
+	if (csid_res->is_dual_vfe) {
+		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_SLAVE;
+
+		for (j = i + 1; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
+			if (!ife_hw_mgr->csid_devices[j])
+				continue;
+
+			hw_intf = ife_hw_mgr->csid_devices[j];
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+				&csid_acquire, sizeof(csid_acquire));
+			if (rc)
+				continue;
+			else
+				break;
+		}
+
+		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
+			pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
+				__func__);
+			goto err;
+		}
+		csid_res->hw_res[1] = csid_acquire.node_res;
+
+		CDBG("%s:acquired csid(%d)right ipp resrouce successfully!\n",
+			__func__, j);
+
+	}
+	csid_res->parent = &ife_ctx->res_list_ife_in;
+	ife_ctx->res_list_ife_in.child[
+		ife_ctx->res_list_ife_in.num_children++] = csid_res;
+
+	return 0;
+err:
+	return rc;
+}
+
+static enum cam_ife_pix_path_res_id
+	cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
+	uint32_t                 out_port_type)
+{
+	enum cam_ife_pix_path_res_id path_id;
+
+	switch (out_port_type) {
+	case CAM_ISP_IFE_OUT_RES_RDI_0:
+		path_id = CAM_IFE_PIX_PATH_RES_RDI_0;
+		break;
+	case CAM_ISP_IFE_OUT_RES_RDI_1:
+		path_id = CAM_IFE_PIX_PATH_RES_RDI_1;
+		break;
+	case CAM_ISP_IFE_OUT_RES_RDI_2:
+		path_id = CAM_IFE_PIX_PATH_RES_RDI_2;
+		break;
+	case CAM_ISP_IFE_OUT_RES_RDI_3:
+		path_id = CAM_IFE_PIX_PATH_RES_RDI_3;
+		break;
+	default:
+		path_id = CAM_IFE_PIX_PATH_RES_MAX;
+		CDBG("%s: maximum rdi output type exceeded\n", __func__);
+		break;
+	}
+
+	return path_id;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_csid_rdi(
+	struct cam_ife_hw_mgr_ctx     *ife_ctx,
+	struct cam_isp_in_port_info   *in_port,
+	uint32_t                       cid_res_id)
+{
+	int rc = -1;
+	int i, j;
+
+	struct cam_ife_hw_mgr               *ife_hw_mgr;
+	struct cam_ife_hw_mgr_res           *csid_res;
+	struct cam_hw_intf                   *hw_intf;
+	struct cam_isp_out_port_info        *out_port;
+	struct cam_csid_hw_reserve_resource_args  csid_acquire;
+
+	ife_hw_mgr = ife_ctx->hw_mgr;
+
+	for (i = 0; i < in_port->num_out_res; i++) {
+		out_port = &in_port->data[i];
+		if (!cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
+			continue;
+
+		rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
+			&csid_res);
+		if (rc) {
+			pr_err("%s: No more free hw mgr resource!\n",
+				__func__);
+			goto err;
+		}
+		cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
+
+		/*
+		 * no need to check since we are doing one to one mapping
+		 * between the csid rdi type and out port rdi type
+		 */
+
+		csid_acquire.res_id =
+			cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
+				out_port->res_type);
+
+		csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+		csid_acquire.cid = cid_res_id;
+		csid_acquire.in_port = in_port;
+		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
+
+		for (j = 0; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
+			if (!ife_hw_mgr->csid_devices[j])
+				continue;
+
+			hw_intf = ife_hw_mgr->csid_devices[j];
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+				&csid_acquire, sizeof(csid_acquire));
+			if (rc)
+				continue;
+			else
+				break;
+		}
+
+		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
+			pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
+				__func__);
+			goto err;
+		}
+
+		csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH;
+		csid_res->res_id = csid_acquire.res_id;
+		csid_res->is_dual_vfe = 0;
+		csid_res->hw_res[0] = csid_acquire.node_res;
+		csid_res->hw_res[1] = NULL;
+
+		csid_res->parent = &ife_ctx->res_list_ife_in;
+		ife_ctx->res_list_ife_in.child[
+			ife_ctx->res_list_ife_in.num_children++] = csid_res;
+	}
+
+	return 0;
+err:
+	/* resource resources at entry funciton */
+	return rc;
+}
+
+static int cam_ife_hw_mgr_acquire_res_root(
+	struct cam_ife_hw_mgr_ctx          *ife_ctx,
+	struct cam_isp_in_port_info        *in_port)
+{
+	int rc = -1;
+
+	if (ife_ctx->res_list_ife_in.res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
+		/* first acquire */
+		ife_ctx->res_list_ife_in.res_type = CAM_IFE_HW_MGR_RES_ROOT;
+		ife_ctx->res_list_ife_in.res_id = in_port->res_type;
+		ife_ctx->res_list_ife_in.is_dual_vfe = in_port->usage_type;
+	} else if (ife_ctx->res_list_ife_in.res_id != in_port->res_type) {
+		pr_err("%s: No Free resource for this context!\n", __func__);
+		goto err;
+	} else {
+		/* else do nothing */
+	}
+	return 0;
+err:
+	/* release resource in entry function */
+	return rc;
+}
+
+static int cam_ife_hw_mgr_preprocess_out_port(
+	struct cam_ife_hw_mgr_ctx   *ife_ctx,
+	struct cam_isp_in_port_info *in_port,
+	int                         *pixel_count,
+	int                         *rdi_count)
+{
+	int pixel_num      = 0;
+	int rdi_num        = 0;
+	uint32_t i;
+	struct cam_isp_out_port_info      *out_port;
+	struct cam_ife_hw_mgr             *ife_hw_mgr;
+
+	ife_hw_mgr = ife_ctx->hw_mgr;
+
+	for (i = 0; i < in_port->num_out_res; i++) {
+		out_port = &in_port->data[i];
+		if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
+			rdi_num++;
+		else
+			pixel_num++;
+	}
+
+	*pixel_count = pixel_num;
+	*rdi_count = rdi_num;
+
+	return 0;
+}
+
+static int cam_ife_mgr_acquire_cid_res(
+	struct cam_ife_hw_mgr_ctx          *ife_ctx,
+	struct cam_isp_in_port_info        *in_port,
+	uint32_t                           *cid_res_id)
+{
+	int rc = -1;
+	int i, j;
+	struct cam_ife_hw_mgr               *ife_hw_mgr;
+	struct cam_ife_hw_mgr_res           *cid_res;
+	struct cam_hw_intf                  *hw_intf;
+	struct cam_csid_hw_reserve_resource_args  csid_acquire;
+
+	/* no dual vfe for TPG */
+	if ((in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) &&
+		(in_port->usage_type != 0)) {
+		pr_err("%s: No Dual VFE on TPG input!\n", __func__);
+		goto err;
+	}
+
+	ife_hw_mgr = ife_ctx->hw_mgr;
+
+	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &cid_res);
+	if (rc) {
+		pr_err("%s: No more free hw mgr resource!\n", __func__);
+		goto err;
+	}
+	cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, &cid_res);
+
+	csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
+	csid_acquire.in_port = in_port;
+
+	for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+		if (!ife_hw_mgr->csid_devices[i])
+			continue;
+
+		hw_intf = ife_hw_mgr->csid_devices[i];
+		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &csid_acquire,
+			sizeof(csid_acquire));
+		if (rc)
+			continue;
+		else
+			break;
+	}
+
+	if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
+		pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
+			__func__);
+		goto err;
+	}
+
+	cid_res->res_type = CAM_IFE_HW_MGR_RES_CID;
+	cid_res->res_id = csid_acquire.node_res->res_id;
+	cid_res->is_dual_vfe = in_port->usage_type;
+	cid_res->hw_res[0] = csid_acquire.node_res;
+	cid_res->hw_res[1] = NULL;
+	/* CID(DT_ID) value of acquire device, require for path */
+	*cid_res_id = csid_acquire.node_res->res_id;
+
+	if (cid_res->is_dual_vfe) {
+		csid_acquire.node_res = NULL;
+		csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
+		csid_acquire.in_port = in_port;
+		for (j = i + 1; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
+			if (!ife_hw_mgr->csid_devices[j])
+				continue;
+
+			hw_intf = ife_hw_mgr->csid_devices[j];
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+				&csid_acquire, sizeof(csid_acquire));
+			if (rc)
+				continue;
+			else
+				break;
+		}
+
+		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
+			pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
+				__func__);
+			goto err;
+		}
+		cid_res->hw_res[1] = csid_acquire.node_res;
+	}
+	cid_res->parent = &ife_ctx->res_list_ife_in;
+	ife_ctx->res_list_ife_in.child[
+		ife_ctx->res_list_ife_in.num_children++] = cid_res;
+
+	return 0;
+err:
+	return rc;
+
+}
+static int cam_ife_mgr_acquire_hw_for_ctx(
+	struct cam_ife_hw_mgr_ctx          *ife_ctx,
+	struct cam_isp_in_port_info        *in_port)
+{
+	int rc                                    = -1;
+	int is_dual_vfe                           = 0;
+	int pixel_count                           = 0;
+	int rdi_count                             = 0;
+	uint32_t                                cid_res_id = 0;
+
+	is_dual_vfe = in_port->usage_type;
+
+	/* get root node resource */
+	rc = cam_ife_hw_mgr_acquire_res_root(ife_ctx, in_port);
+	if (rc) {
+		pr_err("%s:%d: Can not acquire csid rx resource!\n",
+			__func__, __LINE__);
+		goto err;
+	}
+
+	/* get cid resource */
+	rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id);
+	if (rc) {
+		pr_err("%s%d: Acquire IFE CID resource Failed!\n",
+			__func__, __LINE__);
+		goto err;
+	}
+
+	cam_ife_hw_mgr_preprocess_out_port(ife_ctx, in_port,
+		&pixel_count, &rdi_count);
+
+	if (!pixel_count && !rdi_count) {
+		pr_err("%s: Error! no PIX or RDI resource\n", __func__);
+		return -EINVAL;
+	}
+
+	if (pixel_count) {
+		/* get ife csid IPP resrouce */
+		rc = cam_ife_hw_mgr_acquire_res_ife_csid_ipp(ife_ctx, in_port,
+				cid_res_id);
+		if (rc) {
+			pr_err("%s%d: Acquire IFE CSID IPP resource Failed!\n",
+				__func__, __LINE__);
+			goto err;
+		}
+	}
+
+	if (rdi_count) {
+		/* get ife csid rdi resource */
+		rc = cam_ife_hw_mgr_acquire_res_ife_csid_rdi(ife_ctx, in_port,
+			cid_res_id);
+		if (rc) {
+			pr_err("%s%d: Acquire IFE CSID RDI resource Failed!\n",
+				__func__, __LINE__);
+			goto err;
+		}
+	}
+
+	/* get ife src resource */
+	rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port);
+	if (rc) {
+		pr_err("%s%d: Acquire IFE SRC resource Failed!\n",
+			__func__, __LINE__);
+		goto err;
+	}
+
+	rc = cam_ife_hw_mgr_acquire_res_ife_out(ife_ctx, in_port);
+	if (rc) {
+		pr_err("%s%d: Acquire IFE OUT resource Failed!\n",
+			__func__, __LINE__);
+		goto err;
+	}
+
+	return 0;
+err:
+	/* release resource at the acquire entry funciton */
+	return rc;
+}
+
+void cam_ife_cam_cdm_callback(uint32_t handle, void *userdata,
+	enum cam_cdm_cb_status status, uint32_t cookie)
+{
+	CDBG("%s: Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%d\n",
+		__func__, handle, userdata, status, cookie);
+}
+
+
+/* entry function: acquire_hw */
+static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv,
+					void *acquire_hw_args)
+{
+	struct cam_ife_hw_mgr *ife_hw_mgr            = hw_mgr_priv;
+	struct cam_hw_acquire_args *acquire_args     = acquire_hw_args;
+	int rc                                       = -1;
+	int i, j;
+	struct cam_ife_hw_mgr_ctx         *ife_ctx;
+	struct cam_isp_in_port_info       *in_port = NULL;
+	struct cam_isp_resource           *isp_resource = NULL;
+	struct cam_cdm_acquire_data cdm_acquire;
+
+	CDBG("%s: Enter...\n", __func__);
+
+	if (!acquire_args || acquire_args->num_acq <= 0) {
+		pr_err("%s: Nothing to acquire. Seems like error\n", __func__);
+		return -EINVAL;
+	}
+
+	/* get the ife ctx */
+	rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
+	if (rc || !ife_ctx) {
+		pr_err("Get ife hw context failed!\n");
+		goto err;
+	}
+
+	ife_ctx->common.cb_priv = acquire_args->context_data;
+	for (i = 0; i < CAM_ISP_HW_EVENT_MAX; i++)
+		ife_ctx->common.event_cb[i] = acquire_args->event_cb;
+
+	ife_ctx->hw_mgr = ife_hw_mgr;
+
+
+	memcpy(cdm_acquire.identifier, "ife", sizeof("ife"));
+	cdm_acquire.cell_index = 0;
+	cdm_acquire.handle = 0;
+	cdm_acquire.userdata = ife_ctx;
+	cdm_acquire.base_array_cnt = CAM_IFE_HW_NUM_MAX;
+	for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+		if (ife_hw_mgr->cdm_reg_map[i])
+			cdm_acquire.base_array[j++] =
+				ife_hw_mgr->cdm_reg_map[i];
+	}
+	cdm_acquire.base_array_cnt = j;
+
+
+	cdm_acquire.id = CAM_CDM_VIRTUAL;
+	cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback;
+	if (!cam_cdm_acquire(&cdm_acquire)) {
+		CDBG("Successfully acquired the CDM HW hdl=%x\n",
+			cdm_acquire.handle);
+		ife_ctx->cdm_handle = cdm_acquire.handle;
+		ife_ctx->cdm_ops = cdm_acquire.ops;
+	} else {
+		pr_err("Failed to acquire the CDM HW\n");
+		goto err;
+	}
+
+	isp_resource = (struct cam_isp_resource *)acquire_args->acquire_info;
+
+	/* acquire HW resources */
+	for (i = 0; i < acquire_args->num_acq; i++) {
+		if (isp_resource[i].resource_id != CAM_ISP_RES_ID_PORT)
+			continue;
+
+		CDBG("%s: start copy from user handle %lld with len = %d\n",
+			__func__, isp_resource[i].res_hdl,
+			isp_resource[i].length);
+
+		in_port = memdup_user((void __user *)isp_resource[i].res_hdl,
+			isp_resource[i].length);
+		if (in_port > 0) {
+			rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port);
+			kfree(in_port);
+			if (rc) {
+				pr_err("%s: can not acquire resource!\n",
+					__func__);
+				goto free_res;
+			}
+		} else {
+			pr_err("%s: copy from user failed with in_port = %pK",
+				__func__, in_port);
+			rc = -EFAULT;
+			goto free_res;
+		}
+	}
+	/* Process base info */
+	rc = cam_ife_mgr_process_base_info(ife_ctx);
+	if (rc) {
+		pr_err("%s: Error process) base info!\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	acquire_args->ctxt_to_hw_map = ife_ctx;
+	ife_ctx->ctx_in_use = 1;
+
+	cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx);
+
+	CDBG("%s: Exit...(success)!\n", __func__);
+
+	return 0;
+free_res:
+	cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
+	cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
+err:
+	CDBG("%s: Exit...(rc=%d)!\n", __func__, rc);
+	return rc;
+}
+
+/* entry function: config_hw */
+static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
+					void *config_hw_args)
+{
+	int rc = -1, i;
+	struct cam_hw_start_args *cfg;
+	struct cam_hw_update_entry *cmd;
+	struct cam_cdm_bl_request *cdm_cmd;
+	struct cam_ife_hw_mgr_ctx *ctx;
+
+	CDBG("%s: Enter\n", __func__);
+	if (!hw_mgr_priv || !config_hw_args) {
+		pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	cfg = config_hw_args;
+	ctx = (struct cam_ife_hw_mgr_ctx *)cfg->ctxt_to_hw_map;
+	if (!ctx) {
+		pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+		return -EPERM;
+	}
+
+	if (!ctx->ctx_in_use || !ctx->cdm_cmd) {
+		pr_err("%s: Invalid context parameters !\n", __func__);
+		return -EPERM;
+	}
+
+	CDBG("%s%d: Enter...ctx id:%d\n", __func__, __LINE__, ctx->ctx_index);
+
+	if (cfg->num_hw_update_entries > 0) {
+		cdm_cmd = ctx->cdm_cmd;
+		cdm_cmd->cmd_arrary_count = cfg->num_hw_update_entries;
+		cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+		cdm_cmd->flag = false;
+		cdm_cmd->userdata = NULL;
+		cdm_cmd->cookie = 0;
+
+		for (i = 0 ; i <= cfg->num_hw_update_entries; i++) {
+			cmd = (cfg->hw_update_entries + i);
+			cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+			cdm_cmd->cmd[i].offset = cmd->offset;
+			cdm_cmd->cmd[i].len = cmd->len;
+		}
+
+		rc = cam_cdm_submit_bls(ctx->cdm_handle, cdm_cmd);
+		if (rc)
+			pr_err("Failed to apply the configs\n");
+	} else {
+		pr_err("No commands to config\n");
+	}
+	CDBG("%s: Exit\n", __func__);
+
+	return rc;
+}
+
+static int cam_ife_mgr_stop_hw_in_overflow(void *hw_mgr_priv,
+		void *stop_hw_args)
+{
+	int                               rc        = 0;
+	struct cam_hw_stop_args          *stop_args = stop_hw_args;
+	struct cam_ife_hw_mgr_res        *hw_mgr_res;
+	struct cam_ife_hw_mgr_ctx        *ctx;
+	uint32_t                          i, master_base_idx = 0;
+
+	if (!hw_mgr_priv || !stop_hw_args) {
+		pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+	ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+		return -EPERM;
+	}
+
+	CDBG("%s%d: Enter...ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+
+	/* stop resource will remove the irq mask from the hardware */
+	if (!ctx->num_base) {
+		pr_err("%s%d: error number of bases are zero\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	/* get master base index first */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) {
+			master_base_idx = ctx->base[i].idx;
+			break;
+		}
+	}
+
+	/*
+	 * if Context does not have PIX resources and has only RDI resource
+	 * then take the first base index.
+	 */
+
+	if (i == ctx->num_base)
+		master_base_idx = ctx->base[0].idx;
+
+	/* stop the master CIDs first */
+	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+			master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
+
+	/* stop rest of the CIDs  */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (i == master_base_idx)
+			continue;
+		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+			ctx->base[i].idx, CAM_CSID_HALT_IMMEDIATELY);
+	}
+
+	/* stop the master CSID path first */
+	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+			master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
+
+	/* Stop rest of the CSID paths  */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (i == master_base_idx)
+			continue;
+
+		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+			ctx->base[i].idx, CAM_CSID_HALT_IMMEDIATELY);
+	}
+
+	/* IFE mux in resources */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
+	}
+
+	/* IFE out resources */
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
+		cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]);
+
+	/* update vote bandwidth should be done at the HW layer */
+
+	CDBG("%s%d Exit...ctx id:%d rc :%d\n", __func__, __LINE__,
+		ctx->ctx_index, rc);
+
+	return rc;
+}
+
+/* entry function: stop_hw */
+static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
+{
+	int                               rc        = 0;
+	struct cam_hw_stop_args          *stop_args = stop_hw_args;
+	struct cam_ife_hw_mgr_res        *hw_mgr_res;
+	struct cam_ife_hw_mgr_ctx        *ctx;
+	uint32_t                          i, master_base_idx = 0;
+
+	if (!hw_mgr_priv || !stop_hw_args) {
+		pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+	ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+		return -EPERM;
+	}
+
+	CDBG("%s%d: Enter...ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+
+	/* Note:stop resource will remove the irq mask from the hardware */
+
+	if (!ctx->num_base) {
+		pr_err("%s%d: error number of bases are zero\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	/* get master base index first */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) {
+			master_base_idx = ctx->base[i].idx;
+			break;
+		}
+	}
+
+	/*
+	 * If Context does not have PIX resources and has only RDI resource
+	 * then take the first base index.
+	 */
+	if (i == ctx->num_base)
+		master_base_idx = ctx->base[0].idx;
+
+	/* Stop the master CIDs first */
+	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+			master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+
+	/* stop rest of the CIDs  */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (i == master_base_idx)
+			continue;
+		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+			ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+	}
+
+
+	/* Stop the master CSID path first */
+	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+			master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+
+	/* stop rest of the CSID paths  */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (i == master_base_idx)
+			continue;
+
+		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+			ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+	}
+
+	if (cam_cdm_stream_off(ctx->cdm_handle))
+		pr_err("%s%d: CDM stream off failed %d\n",
+			__func__, __LINE__, ctx->cdm_handle);
+
+	/* IFE mux in resources */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
+	}
+
+	/* IFE out resources */
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
+		cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]);
+
+	/* Update vote bandwidth should be done at the HW layer */
+
+	cam_tasklet_stop(ctx->common.tasklet_info);
+
+	/* Deinit IFE root node: do nothing */
+
+	/* Deinit IFE CID */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
+		cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
+	}
+
+	/* Deinit IFE CSID */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+		cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
+	}
+
+	/* Deint IFE MUX(SRC) */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
+	}
+
+	/* Deinit IFE OUT */
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
+		cam_ife_hw_mgr_deinit_hw_res(&ctx->res_list_ife_out[i]);
+
+	CDBG("%s%d Exit...ctx id:%d rc :%d\n", __func__, __LINE__,
+		ctx->ctx_index, rc);
+
+	return rc;
+}
+
+static int cam_ife_mgr_reset_hw(struct cam_ife_hw_mgr *hw_mgr,
+			uint32_t hw_idx)
+{
+	uint32_t i = 0;
+	struct cam_hw_intf             *csid_hw_intf;
+	struct cam_hw_intf             *vfe_hw_intf;
+	struct cam_csid_reset_cfg_args  csid_reset_args;
+
+	if (!hw_mgr) {
+		CDBG("%s: Invalid arguments\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Reset IFE CSID HW */
+	csid_reset_args.reset_type = CAM_IFE_CSID_RESET_GLOBAL;
+
+	for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+		if (hw_idx != hw_mgr->csid_devices[i]->hw_idx)
+			continue;
+
+		csid_hw_intf = hw_mgr->csid_devices[i];
+		csid_hw_intf->hw_ops.reset(csid_hw_intf->hw_priv,
+			&csid_reset_args,
+			sizeof(struct cam_csid_reset_cfg_args));
+		break;
+	}
+
+	/* Reset VFE HW*/
+	for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
+		if (hw_idx != hw_mgr->ife_devices[i]->hw_idx)
+			continue;
+		CDBG("%d:VFE (id = %d) reset\n", __LINE__, hw_idx);
+		vfe_hw_intf = hw_mgr->ife_devices[i];
+		vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv, NULL, 0);
+		break;
+	}
+
+	CDBG("%d: Exit Successfully\n", __LINE__);
+	return 0;
+}
+
+static int cam_ife_mgr_restart_hw(void *hw_mgr_priv,
+		void *start_hw_args)
+{
+	int                               rc = -1;
+	struct cam_hw_start_args         *start_args = start_hw_args;
+	struct cam_ife_hw_mgr_ctx        *ctx;
+	struct cam_ife_hw_mgr_res        *hw_mgr_res;
+	uint32_t                          i;
+
+	if (!hw_mgr_priv || !start_hw_args) {
+		pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		pr_err("%s: Invalid context is used!\n", __func__);
+		return -EPERM;
+	}
+
+	CDBG("%s%d Enter... ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+
+	CDBG("%s%d START IFE OUT ... in ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+	/* start the IFE out devices */
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+		rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
+		if (rc) {
+			pr_err("%s: Can not start IFE OUT (%d)!\n",
+				__func__, i);
+			goto err;
+		}
+	}
+
+	CDBG("%s%d START IFE SRC ... in ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+	/* Start the IFE mux in devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+		if (rc) {
+			pr_err("%s: Can not start IFE MUX (%d)!\n",
+				__func__, hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	CDBG("%s:%d: START CSID HW ... in ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+	/* Start the IFE CSID HW devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+		if (rc) {
+			pr_err("%s: Can not start IFE CSID (%d)!\n",
+				__func__, hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	CDBG("%s%d START CID SRC ... in ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+	/* Start the IFE CID HW devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
+		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+		if (rc) {
+			pr_err("%s: Can not start IFE CSID (%d)!\n",
+				__func__, hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	/* Start IFE root node: do nothing */
+	CDBG("%s: Exit...(success)\n", __func__);
+	return 0;
+
+err:
+	cam_ife_mgr_stop_hw(hw_mgr_priv, start_hw_args);
+	CDBG("%s: Exit...(rc=%d)\n", __func__, rc);
+	return rc;
+}
+
+static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
+{
+	int                               rc = -1;
+	struct cam_hw_start_args         *start_args = start_hw_args;
+	struct cam_ife_hw_mgr_ctx        *ctx;
+	struct cam_ife_hw_mgr_res        *hw_mgr_res;
+	uint32_t                          i;
+
+	if (!hw_mgr_priv || !start_hw_args) {
+		pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		pr_err("%s: Invalid context is used!\n", __func__);
+		return -EPERM;
+	}
+
+	CDBG("%s%d Enter... ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+
+	/* update Bandwidth should be done at the hw layer */
+
+	cam_tasklet_start(ctx->common.tasklet_info);
+
+	/* INIT IFE Root: do nothing */
+
+	CDBG("%s%d INIT IFE CID ... in ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+	/* INIT IFE CID */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
+		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
+		if (rc) {
+			pr_err("%s: Can not INIT IFE CID.(id :%d)!\n",
+				__func__, hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+
+	CDBG("%s%d INIT IFE csid ... in ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+
+	/* INIT IFE csid */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
+		if (rc) {
+			pr_err("%s: Can not INIT IFE CSID.(id :%d)!\n",
+				__func__, hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	/* INIT IFE SRC */
+	CDBG("%s%d INIT IFE SRC in ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
+		if (rc) {
+			pr_err("%s: Can not INIT IFE SRC (%d)!\n",
+				__func__, hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	/* INIT IFE OUT */
+	CDBG("%s%d INIT IFE OUT RESOURCES in ctx id:%d\n", __func__,
+		__LINE__, ctx->ctx_index);
+
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+		rc = cam_ife_hw_mgr_init_hw_res(&ctx->res_list_ife_out[i]);
+		if (rc) {
+			pr_err("%s: Can not INIT IFE OUT (%d)!\n",
+				__func__, ctx->res_list_ife_out[i].res_id);
+			goto err;
+		}
+	}
+
+	CDBG("%s: start cdm interface\n", __func__);
+	rc = cam_cdm_stream_on(ctx->cdm_handle);
+	if (rc) {
+		pr_err("%s: Can not start cdm (%d)!\n",
+			__func__, ctx->cdm_handle);
+		goto err;
+	}
+
+	/* Apply initial configuration */
+	CDBG("%s: Config HW\n", __func__);
+	rc = cam_ife_mgr_config_hw(hw_mgr_priv, start_hw_args);
+	if (rc) {
+		pr_err("%s: Config HW failed\n", __func__);
+		goto err;
+	}
+
+	CDBG("%s%d START IFE OUT ... in ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+	/* start the IFE out devices */
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+		rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
+		if (rc) {
+			pr_err("%s: Can not start IFE OUT (%d)!\n",
+				__func__, i);
+			goto err;
+		}
+	}
+
+	CDBG("%s%d START IFE SRC ... in ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+	/* Start the IFE mux in devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+		if (rc) {
+			pr_err("%s: Can not start IFE MUX (%d)!\n",
+				__func__, hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	CDBG("%s:%d: START CSID HW ... in ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+	/* Start the IFE CSID HW devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+		if (rc) {
+			pr_err("%s: Can not start IFE CSID (%d)!\n",
+				__func__, hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	CDBG("%s%d START CID SRC ... in ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+	/* Start the IFE CID HW devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
+		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+		if (rc) {
+			pr_err("%s: Can not start IFE CSID (%d)!\n",
+				__func__, hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	/* Start IFE root node: do nothing */
+	CDBG("%s: Exit...(success)\n", __func__);
+	return 0;
+err:
+	cam_ife_mgr_stop_hw(hw_mgr_priv, start_hw_args);
+	CDBG("%s: Exit...(rc=%d)\n", __func__, rc);
+	return rc;
+}
+
+static int cam_ife_mgr_read(void *hw_mgr_priv, void *read_args)
+{
+	return -EPERM;
+}
+
+static int cam_ife_mgr_write(void *hw_mgr_priv, void *write_args)
+{
+	return -EPERM;
+}
+
+static int cam_ife_mgr_release_hw(void *hw_mgr_priv,
+					void *release_hw_args)
+{
+	int                               rc           = 0;
+	struct cam_hw_release_args       *release_args = release_hw_args;
+	struct cam_ife_hw_mgr            *hw_mgr       = hw_mgr_priv;
+	struct cam_ife_hw_mgr_ctx        *ctx;
+
+	if (!hw_mgr_priv || !release_hw_args) {
+		pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_ife_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+		return -EPERM;
+	}
+
+	CDBG("%s%d Enter...ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+
+	/* we should called the stop hw before this already */
+	cam_ife_hw_mgr_release_hw_for_ctx(ctx);
+
+	/* reset base info */
+	ctx->num_base = 0;
+	memset(ctx->base, 0, sizeof(ctx->base));
+
+	/* release cdm handle */
+	cam_cdm_release(ctx->cdm_handle);
+
+	/* clean context */
+	list_del_init(&ctx->list);
+	ctx->ctx_in_use = 0;
+	CDBG("%s%d Exit...ctx id:%d\n", __func__, __LINE__,
+		ctx->ctx_index);
+	cam_ife_hw_mgr_put_ctx(&hw_mgr->free_ctx_list, &ctx);
+	return rc;
+}
+
+static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
+	void *prepare_hw_update_args)
+{
+	int rc = 0;
+	struct cam_hw_prepare_update_args *prepare =
+		(struct cam_hw_prepare_update_args *) prepare_hw_update_args;
+	struct cam_ife_hw_mgr_ctx        *ctx;
+	struct cam_ife_hw_mgr            *hw_mgr;
+	struct cam_isp_kmd_buf_info       kmd_buf;
+	uint32_t                          i;
+	bool                              fill_fence = true;
+
+	if (!hw_mgr_priv || !prepare_hw_update_args) {
+		pr_err("%s: Invalid args\n", __func__);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d enter\n", __func__, __LINE__);
+
+	ctx = (struct cam_ife_hw_mgr_ctx *) prepare->ctxt_to_hw_map;
+	hw_mgr = (struct cam_ife_hw_mgr *)hw_mgr_priv;
+
+	rc = cam_isp_validate_packet(prepare->packet);
+	if (rc)
+		return rc;
+
+	CDBG("%s:%d enter\n", __func__, __LINE__);
+	/* Pre parse the packet*/
+	rc = cam_isp_get_kmd_buffer(prepare->packet, &kmd_buf);
+	if (rc)
+		return rc;
+
+	prepare->num_hw_update_entries = 0;
+	prepare->num_in_map_entries = 0;
+	prepare->num_out_map_entries = 0;
+
+	for (i = 0; i < ctx->num_base; i++) {
+		CDBG("%s: process cmd buffer for device %d\n", __func__, i);
+
+		/* Add change base */
+		rc = cam_isp_add_change_base(prepare, &ctx->res_list_ife_src,
+			ctx->base[i].idx, &kmd_buf);
+		if (rc)
+			return rc;
+
+		/* get command buffers */
+		if (ctx->base[i].split_id != CAM_ISP_HW_SPLIT_MAX) {
+			rc = cam_isp_add_command_buffers(prepare,
+				ctx->base[i].split_id);
+			if (rc)
+				return rc;
+		}
+
+		/* get IO buffers */
+		rc = cam_isp_add_io_buffers(hw_mgr->mgr_common.img_iommu_hdl,
+				prepare, ctx->base[i].idx,
+			&kmd_buf, ctx->res_list_ife_out,
+			CAM_IFE_HW_OUT_RES_MAX, fill_fence);
+
+		if (rc)
+			return rc;
+
+		/* fence map table entries need to fill only once in the loop */
+		if (fill_fence)
+			fill_fence = false;
+	}
+
+	/* add reg update commands */
+	for (i = 0; i < ctx->num_base; i++) {
+		/* Add change base */
+		rc = cam_isp_add_change_base(prepare, &ctx->res_list_ife_src,
+			ctx->base[i].idx, &kmd_buf);
+		if (rc)
+			return rc;
+
+		/*Add reg update */
+		rc = cam_isp_add_reg_update(prepare, &ctx->res_list_ife_src,
+			ctx->base[i].idx, &kmd_buf);
+		if (rc)
+			return rc;
+	}
+
+	return rc;
+}
+
+static int cam_ife_mgr_process_recovery_cb(void *priv, void *data)
+{
+	int32_t rc = 0;
+	struct cam_hw_event_recovery_data   *recovery_data = priv;
+	struct cam_hw_start_args     start_args;
+	struct cam_ife_hw_mgr   *ife_hw_mgr = NULL;
+	uint32_t   hw_mgr_priv;
+	uint32_t i = 0;
+
+	uint32_t error_type = recovery_data->error_type;
+	struct cam_ife_hw_mgr_ctx        *ctx = NULL;
+
+	/* Here recovery is performed */
+	CDBG("%s:Enter: ErrorType = %d\n", __func__, error_type);
+
+	switch (error_type) {
+	case CAM_ISP_HW_ERROR_OVERFLOW:
+	case CAM_ISP_HW_ERROR_BUSIF_OVERFLOW:
+		if (!recovery_data->affected_ctx[0]) {
+			pr_err("No context is affected but recovery called\n");
+			kfree(recovery_data);
+			return 0;
+		}
+
+		ctx = recovery_data->affected_ctx[0];
+		ife_hw_mgr = ctx->hw_mgr;
+
+		for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
+			if (recovery_data->affected_core[i])
+				rc = cam_ife_mgr_reset_hw(ife_hw_mgr, i);
+		}
+
+		for (i = 0; i < recovery_data->no_of_context; i++) {
+			start_args.ctxt_to_hw_map =
+				recovery_data->affected_ctx[i];
+			rc = cam_ife_mgr_restart_hw(&hw_mgr_priv, &start_args);
+		}
+
+		break;
+
+	case CAM_ISP_HW_ERROR_P2I_ERROR:
+		break;
+
+	case CAM_ISP_HW_ERROR_VIOLATION:
+		break;
+
+	default:
+		pr_err("%s: Invalid Error\n", __func__);
+	}
+	CDBG("%s:Exit: ErrorType = %d\n", __func__, error_type);
+
+	kfree(recovery_data);
+	return rc;
+}
+
+static int cam_ife_hw_mgr_do_error_recovery(
+		struct cam_hw_event_recovery_data  *ife_mgr_recovery_data)
+{
+	int32_t rc = 0;
+	struct crm_workq_task        *task = NULL;
+	struct cam_hw_event_recovery_data  *recovery_data = NULL;
+
+	return 0;
+
+	recovery_data = kzalloc(sizeof(struct cam_hw_event_recovery_data),
+		GFP_ATOMIC);
+	if (!recovery_data)
+		return -ENOMEM;
+
+	memcpy(recovery_data, ife_mgr_recovery_data,
+			sizeof(struct cam_hw_event_recovery_data));
+
+	CDBG("%s: Enter: error_type (%d)\n", __func__,
+		recovery_data->error_type);
+
+	task = cam_req_mgr_workq_get_task(g_ife_hw_mgr.workq);
+	if (!task) {
+		pr_err("%s: No empty task frame\n", __func__);
+		kfree(recovery_data);
+		return -ENOMEM;
+	}
+
+	task->process_cb = &cam_ife_mgr_process_recovery_cb;
+	rc = cam_req_mgr_workq_enqueue_task(task, recovery_data,
+		CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+/*
+ * This function checks if any of the valid entry in affected_core[]
+ * is associated with this context. if YES
+ *  a. It fills the other cores associated with this context.in
+ *      affected_core[]
+ *  b. Return 0 i.e.SUCCESS
+ */
+static int cam_ife_hw_mgr_match_hw_idx(
+	struct cam_ife_hw_mgr_ctx   *ife_hwr_mgr_ctx,
+	uint32_t *affected_core)
+{
+
+	int32_t rc = -EPERM;
+	uint32_t i = 0, j = 0;
+	uint32_t max_idx =  ife_hwr_mgr_ctx->num_base;
+	uint32_t ctx_affected_core_idx[CAM_IFE_HW_NUM_MAX] = {0};
+
+	CDBG("%s:Enter:max_idx = %d\n", __func__, max_idx);
+
+	while (i < max_idx) {
+		if (affected_core[ife_hwr_mgr_ctx->base[i].idx])
+			rc = 0;
+		else {
+			ctx_affected_core_idx[j] = ife_hwr_mgr_ctx->base[i].idx;
+			j = j + 1;
+		}
+
+		i = i + 1;
+	}
+
+	if (rc == 0) {
+		while (j) {
+			if (affected_core[ctx_affected_core_idx[j-1]] != 1)
+				affected_core[ctx_affected_core_idx[j-1]] = 1;
+
+			j = j - 1;
+		}
+	}
+	CDBG("%s:Exit\n", __func__);
+	return rc;
+}
+
+/*
+ *  Loop through each context
+ *  a. match core_idx
+ *  b. For each context from ctx_list Stop the acquired resources
+ *  c. Notify CRM with fatal error for the affected isp context
+ *  d. For any dual VFE context, if copanion VFE is also serving
+ *     other context it should also notify the CRM with fatal error
+ */
+static int  cam_ife_hw_mgr_handle_overflow(
+	struct cam_ife_hw_mgr_ctx   *curr_ife_hwr_mgr_ctx,
+	struct cam_isp_hw_error_event_data *error_event_data,
+	uint32_t curr_core_idx,
+	struct cam_hw_event_recovery_data  *recovery_data)
+{
+	uint32_t affected_core[CAM_IFE_HW_NUM_MAX] = {0};
+	struct cam_ife_hw_mgr_ctx   *ife_hwr_mgr_ctx = NULL;
+	cam_hw_event_cb_func	         ife_hwr_irq_err_cb;
+	struct cam_ife_hw_mgr		*ife_hwr_mgr = NULL;
+	uint32_t                            hw_mgr_priv = 1;
+	struct cam_hw_stop_args          stop_args;
+	uint32_t i = 0;
+
+	CDBG("%s:Enter\n", __func__);
+	return 0;
+
+	if (!recovery_data) {
+		pr_err("%s: recovery_data parameter is NULL\n",
+			__func__);
+		return -EINVAL;
+	}
+	recovery_data->no_of_context = 0;
+	/* affected_core is indexed by core_idx*/
+	affected_core[curr_core_idx] = 1;
+
+	ife_hwr_mgr = curr_ife_hwr_mgr_ctx->hw_mgr;
+
+	list_for_each_entry(ife_hwr_mgr_ctx,
+		&ife_hwr_mgr->used_ctx_list, list) {
+
+		/*
+		 * Check if current core_idx matches the HW associated
+		 * with this context
+		 */
+		CDBG("%s:Calling match Hw idx\n", __func__);
+		if (cam_ife_hw_mgr_match_hw_idx(ife_hwr_mgr_ctx, affected_core))
+			continue;
+
+		ife_hwr_irq_err_cb =
+		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_ERROR];
+
+		stop_args.ctxt_to_hw_map = ife_hwr_mgr_ctx;
+
+		/* Add affected_context in list of recovery data*/
+		CDBG("%s:Add new entry in affected_ctx_list\n", __func__);
+		if (recovery_data->no_of_context < CAM_CTX_MAX)
+			recovery_data->affected_ctx[
+				recovery_data->no_of_context++] =
+				ife_hwr_mgr_ctx;
+
+		/*
+		 * Stop the hw resources associated with this context
+		 * and call the error callback. In the call back function
+		 * corresponding ISP context will update CRM about fatal Error
+		 */
+		if (!cam_ife_mgr_stop_hw_in_overflow(&hw_mgr_priv,
+			&stop_args)) {
+			CDBG("%s:Calling Error handler CB\n", __func__);
+			ife_hwr_irq_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
+				CAM_ISP_HW_EVENT_ERROR, error_event_data);
+		}
+	}
+	/* fill the affected_core in recovery data */
+	for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+		recovery_data->affected_core[i] = affected_core[i];
+		CDBG("%s: Vfe core %d is affected (%d)\n",
+			__func__, i, recovery_data->affected_core[i]);
+	}
+	CDBG("%s:Exit\n", __func__);
+	return 0;
+}
+
+static int  cam_ife_hw_mgr_handle_camif_error(
+	void                              *handler_priv,
+	void                              *payload)
+{
+	int32_t  rc = 0;
+	uint32_t core_idx;
+	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
+	struct cam_vfe_top_irq_evt_payload      *evt_payload;
+	struct cam_isp_hw_error_event_data       error_event_data = {0};
+	struct cam_hw_event_recovery_data        recovery_data = {0};
+
+	ife_hwr_mgr_ctx = handler_priv;
+	evt_payload = payload;
+	core_idx = evt_payload->core_index;
+
+	rc = evt_payload->error_type;
+	CDBG("%s: Enter: error_type (%d)\n", __func__, evt_payload->error_type);
+	switch (evt_payload->error_type) {
+	case CAM_ISP_HW_ERROR_OVERFLOW:
+	case CAM_ISP_HW_ERROR_P2I_ERROR:
+	case CAM_ISP_HW_ERROR_VIOLATION:
+
+		error_event_data.error_type =
+				CAM_ISP_HW_ERROR_OVERFLOW;
+
+		cam_ife_hw_mgr_handle_overflow(ife_hwr_mgr_ctx,
+				&error_event_data,
+				core_idx,
+				&recovery_data);
+
+		/* Trigger for recovery */
+		recovery_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW;
+		cam_ife_hw_mgr_do_error_recovery(&recovery_data);
+		break;
+	default:
+		CDBG("%s: None error. Error type (%d)\n", __func__,
+			evt_payload->error_type);
+	}
+
+	CDBG("%s: Exit (%d)\n", __func__, rc);
+	return rc;
+}
+
+/*
+ * DUAL VFE is valid for PIX processing path
+ * This function assumes hw_res[0] is master in case
+ * of dual VFE.
+ * RDI path does not support DUAl VFE
+ */
+static int cam_ife_hw_mgr_handle_rup_for_camif_hw_res(
+	void                              *handler_priv,
+	void                              *payload)
+{
+	struct cam_isp_resource_node            *hw_res;
+	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
+	struct cam_vfe_top_irq_evt_payload      *evt_payload;
+	struct cam_ife_hw_mgr_res               *isp_ife_camif_res = NULL;
+	cam_hw_event_cb_func                     ife_hwr_irq_rup_cb;
+	struct cam_isp_hw_reg_update_event_data  rup_event_data;
+	uint32_t  core_idx;
+	uint32_t  rup_status = -EINVAL;
+
+	CDBG("%s: Enter\n", __func__);
+
+	ife_hwr_mgr_ctx = handler_priv;
+	evt_payload = payload;
+
+	if (!handler_priv || !payload) {
+		pr_err("%s: Invalid Parameter\n", __func__);
+		return -EPERM;
+	}
+
+	core_idx = evt_payload->core_index;
+	ife_hwr_irq_rup_cb =
+		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
+
+	evt_payload->evt_id = CAM_ISP_HW_EVENT_REG_UPDATE;
+	list_for_each_entry(isp_ife_camif_res,
+			&ife_hwr_mgr_ctx->res_list_ife_src, list) {
+
+		if (isp_ife_camif_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			continue;
+
+		CDBG("%s: camif resource id = %d, curr_core_idx = %d\n",
+			__func__, isp_ife_camif_res->res_id, core_idx);
+		switch (isp_ife_camif_res->res_id) {
+		case CAM_ISP_HW_VFE_IN_CAMIF:
+			if (isp_ife_camif_res->is_dual_vfe)
+				/* It checks for slave core RUP ACK*/
+				hw_res = isp_ife_camif_res->hw_res[1];
+			else
+				hw_res = isp_ife_camif_res->hw_res[0];
+
+			if (!hw_res) {
+				pr_err("%s: CAMIF device is NULL\n", __func__);
+				break;
+			}
+			CDBG("%s: current_core_id = %d , core_idx res = %d\n",
+					__func__, core_idx,
+					hw_res->hw_intf->hw_idx);
+
+			if (core_idx == hw_res->hw_intf->hw_idx) {
+				rup_status = hw_res->bottom_half_handler(
+					hw_res, evt_payload);
+			}
+			break;
+
+		case CAM_ISP_HW_VFE_IN_RDI0:
+		case CAM_ISP_HW_VFE_IN_RDI1:
+		case CAM_ISP_HW_VFE_IN_RDI2:
+			hw_res = isp_ife_camif_res->hw_res[0];
+
+			if (!hw_res) {
+				pr_err("%s: RDI Device is NULL\n", __func__);
+				break;
+			}
+			if (core_idx == hw_res->hw_intf->hw_idx)
+				/* Need to process rdi reg update */
+				rup_status = -EINVAL;
+			break;
+		default:
+			pr_err("%s: invalid resource id (%d)", __func__,
+				isp_ife_camif_res->res_id);
+		}
+
+		/* only do callback for pixel reg update for now */
+		if (!rup_status && (isp_ife_camif_res->res_id ==
+			CAM_ISP_HW_VFE_IN_CAMIF)) {
+			ife_hwr_irq_rup_cb(ife_hwr_mgr_ctx->common.cb_priv,
+				CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data);
+		}
+
+	}
+
+	CDBG("%s: Exit (rup_status = %d)!\n", __func__, rup_status);
+	return 0;
+}
+
+static int cam_ife_hw_mgr_check_epoch_for_dual_vfe(
+	struct cam_ife_hw_mgr_ctx   *ife_hw_mgr_ctx,
+	uint32_t                     core_idx0,
+	uint32_t                     core_idx1)
+{
+	int32_t rc = -1;
+	uint32_t *epoch_cnt = ife_hw_mgr_ctx->epoch_cnt;
+
+	if (epoch_cnt[core_idx0] ==
+			epoch_cnt[core_idx1]) {
+
+		epoch_cnt[core_idx0] = 0;
+		epoch_cnt[core_idx1] = 0;
+
+		rc = 0;
+		return rc;
+	}
+
+	if ((epoch_cnt[core_idx0] - epoch_cnt[core_idx1] > 1) ||
+		(epoch_cnt[core_idx1] - epoch_cnt[core_idx0] > 1)) {
+
+		pr_warn("%s:One of the VFE of dual VFE cound not generate error\n",
+			__func__);
+		rc = -1;
+		return rc;
+	}
+
+	CDBG("Only one core_index has given EPOCH\n");
+
+	return rc;
+}
+
+static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
+	void                              *handler_priv,
+	void                              *payload)
+{
+	int32_t rc = -EINVAL;
+	struct cam_isp_resource_node         *hw_res_l;
+	struct cam_isp_resource_node         *hw_res_r;
+	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
+	struct cam_vfe_top_irq_evt_payload   *evt_payload;
+	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
+	cam_hw_event_cb_func                  ife_hwr_irq_epoch_cb;
+	struct cam_isp_hw_epoch_event_data    epoch_done_event_data;
+	uint32_t  core_idx;
+	uint32_t  epoch_status = -EINVAL;
+	uint32_t  core_index0;
+	uint32_t  core_index1;
+
+	CDBG("%s:Enter\n", __func__);
+
+	ife_hwr_mgr_ctx = handler_priv;
+	evt_payload = payload;
+	ife_hwr_irq_epoch_cb =
+		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EPOCH];
+	core_idx = evt_payload->core_index;
+
+	evt_payload->evt_id = CAM_ISP_HW_EVENT_EPOCH;
+
+	list_for_each_entry(isp_ife_camif_res,
+		&ife_hwr_mgr_ctx->res_list_ife_src, list) {
+		if ((isp_ife_camif_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			|| (isp_ife_camif_res->res_id !=
+			CAM_ISP_HW_VFE_IN_CAMIF))
+			continue;
+
+		hw_res_l = isp_ife_camif_res->hw_res[0];
+		hw_res_r = isp_ife_camif_res->hw_res[1];
+
+		switch (isp_ife_camif_res->is_dual_vfe) {
+		/* Handling Single VFE Scenario */
+		case 0:
+			/* EPOCH check for Left side VFE */
+			if (!hw_res_l) {
+				pr_err("%s: Left Device is NULL\n",
+					__func__);
+				break;
+			}
+
+			if (core_idx == hw_res_l->hw_intf->hw_idx) {
+				epoch_status = hw_res_l->bottom_half_handler(
+					hw_res_l, evt_payload);
+				if (!epoch_status)
+					ife_hwr_irq_epoch_cb(
+						ife_hwr_mgr_ctx->common.cb_priv,
+						CAM_ISP_HW_EVENT_EPOCH,
+						&epoch_done_event_data);
+			}
+
+			break;
+
+		/* Handling Dual VFE Scenario */
+		case 1:
+			/* SOF check for Left side VFE (Master)*/
+
+			if ((!hw_res_l) || (!hw_res_r)) {
+				pr_err("%s: Dual VFE Device is NULL\n",
+					__func__);
+				break;
+			}
+			if (core_idx == hw_res_l->hw_intf->hw_idx) {
+				epoch_status = hw_res_l->bottom_half_handler(
+					hw_res_l, evt_payload);
+
+				if (!epoch_status)
+					ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
+			}
+
+			/* SOF check for Right side VFE */
+			if (core_idx == hw_res_r->hw_intf->hw_idx) {
+				epoch_status = hw_res_r->bottom_half_handler(
+					hw_res_r, evt_payload);
+
+				if (!epoch_status)
+					ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
+			}
+
+			core_index0 = hw_res_l->hw_intf->hw_idx;
+			core_index1 = hw_res_r->hw_intf->hw_idx;
+
+			rc = cam_ife_hw_mgr_check_epoch_for_dual_vfe(
+					ife_hwr_mgr_ctx,
+					core_index0,
+					core_index1);
+
+			if (!rc)
+				ife_hwr_irq_epoch_cb(
+					ife_hwr_mgr_ctx->common.cb_priv,
+					CAM_ISP_HW_EVENT_EPOCH,
+					&epoch_done_event_data);
+
+			break;
+
+		/* Error */
+		default:
+			pr_err("%s: error with hw_res\n", __func__);
+
+		}
+	}
+
+	CDBG("%s: Exit (epoch_status = %d)!\n", __func__, epoch_status);
+	return 0;
+}
+
+static int cam_ife_hw_mgr_check_sof_for_dual_vfe(
+	struct cam_ife_hw_mgr_ctx   *ife_hwr_mgr_ctx,
+	uint32_t                     core_idx0,
+	uint32_t                     core_idx1)
+{
+	uint32_t *sof_cnt = ife_hwr_mgr_ctx->sof_cnt;
+	int32_t rc = -1;
+
+	if (sof_cnt[core_idx0] ==
+			sof_cnt[core_idx1]) {
+
+		sof_cnt[core_idx0] = 0;
+		sof_cnt[core_idx1] = 0;
+
+		rc = 0;
+		return rc;
+	}
+
+	if ((sof_cnt[core_idx0] - sof_cnt[core_idx1] > 1) ||
+		(sof_cnt[core_idx1] - sof_cnt[core_idx0] > 1)) {
+
+		pr_err("%s: One VFE of dual VFE cound not generate SOF\n",
+					__func__);
+		rc = -1;
+		return rc;
+	}
+
+	pr_info("Only one core_index has given SOF\n");
+
+	return rc;
+}
+
+static int cam_ife_hw_mgr_handle_sof_for_camif_hw_res(
+	void                              *handler_priv,
+	void                              *payload)
+{
+	int32_t rc = -1;
+	struct cam_isp_resource_node         *hw_res_l = NULL;
+	struct cam_isp_resource_node         *hw_res_r = NULL;
+	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
+	struct cam_vfe_top_irq_evt_payload   *evt_payload;
+	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
+	cam_hw_event_cb_func                  ife_hwr_irq_sof_cb;
+	struct cam_isp_hw_sof_event_data      sof_done_event_data;
+	uint32_t  core_idx;
+	uint32_t  sof_status = 0;
+	uint32_t  core_index0;
+	uint32_t  core_index1;
+
+	CDBG("%s:Enter\n", __func__);
+
+	ife_hwr_mgr_ctx = handler_priv;
+	evt_payload = payload;
+	if (!evt_payload) {
+		pr_err("%s: no payload\n", __func__);
+		return IRQ_HANDLED;
+	}
+	core_idx = evt_payload->core_index;
+	ife_hwr_irq_sof_cb =
+		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];
+
+	evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;
+
+	list_for_each_entry(isp_ife_camif_res,
+		&ife_hwr_mgr_ctx->res_list_ife_src, list) {
+
+		if ((isp_ife_camif_res->res_type ==
+			CAM_IFE_HW_MGR_RES_UNINIT) ||
+			(isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
+			continue;
+
+		hw_res_l = isp_ife_camif_res->hw_res[0];
+		hw_res_r = isp_ife_camif_res->hw_res[1];
+
+		CDBG("%s:is_dual_vfe ? = %d\n", __func__,
+			isp_ife_camif_res->is_dual_vfe);
+		switch (isp_ife_camif_res->is_dual_vfe) {
+		/* Handling Single VFE Scenario */
+		case 0:
+			/* SOF check for Left side VFE */
+			if (!hw_res_l) {
+				pr_err("%s: VFE Device is NULL\n",
+					__func__);
+				break;
+			}
+			CDBG("%s: curr_core_idx = %d, core idx hw = %d\n",
+					__func__, core_idx,
+					hw_res_l->hw_intf->hw_idx);
+
+			if (core_idx == hw_res_l->hw_intf->hw_idx) {
+				sof_status = hw_res_l->bottom_half_handler(
+					hw_res_l, evt_payload);
+				if (!sof_status)
+					ife_hwr_irq_sof_cb(
+						ife_hwr_mgr_ctx->common.cb_priv,
+						CAM_ISP_HW_EVENT_SOF,
+						&sof_done_event_data);
+			}
+
+			break;
+
+		/* Handling Dual VFE Scenario */
+		case 1:
+			/* SOF check for Left side VFE */
+
+			if (!hw_res_l) {
+				pr_err("%s: VFE Device is NULL\n",
+					__func__);
+				break;
+			}
+			CDBG("%s: curr_core_idx = %d, idx associated hw = %d\n",
+					__func__, core_idx,
+					hw_res_l->hw_intf->hw_idx);
+
+			if (core_idx == hw_res_l->hw_intf->hw_idx) {
+				sof_status = hw_res_l->bottom_half_handler(
+					hw_res_l, evt_payload);
+				if (!sof_status)
+					ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+			}
+
+			/* SOF check for Right side VFE */
+			if (!hw_res_r) {
+				pr_err("%s: VFE Device is NULL\n",
+					__func__);
+				break;
+			}
+			CDBG("%s: curr_core_idx = %d, idx associated hw = %d\n",
+					__func__, core_idx,
+					hw_res_r->hw_intf->hw_idx);
+			if (core_idx == hw_res_r->hw_intf->hw_idx) {
+				sof_status = hw_res_r->bottom_half_handler(
+					hw_res_r, evt_payload);
+				if (!sof_status)
+					ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+			}
+
+			core_index0 = hw_res_l->hw_intf->hw_idx;
+			core_index1 = hw_res_r->hw_intf->hw_idx;
+
+			rc = cam_ife_hw_mgr_check_sof_for_dual_vfe(
+				ife_hwr_mgr_ctx, core_index0, core_index1);
+
+			if (!rc)
+				ife_hwr_irq_sof_cb(
+					ife_hwr_mgr_ctx->common.cb_priv,
+					CAM_ISP_HW_EVENT_SOF,
+					&sof_done_event_data);
+
+			break;
+
+		default:
+			pr_err("%s: error with hw_res\n", __func__);
+		}
+	}
+
+	CDBG("%s: Exit (sof_status = %d)!\n", __func__, sof_status);
+	return 0;
+}
+
+static int cam_ife_hw_mgr_handle_buf_done_for_hw_res(
+	void                              *handler_priv,
+	void                              *payload)
+
+{
+	int32_t                              buf_done_status = 0;
+	int32_t                              i = 0;
+	int32_t                              rc = 0;
+	cam_hw_event_cb_func                 ife_hwr_irq_wm_done_cb;
+	struct cam_isp_resource_node        *hw_res_l = NULL;
+	struct cam_ife_hw_mgr_ctx           *ife_hwr_mgr_ctx = handler_priv;
+	struct cam_vfe_bus_irq_evt_payload  *evt_payload = payload;
+	struct cam_ife_hw_mgr_res           *isp_ife_out_res = NULL;
+	struct cam_hw_event_recovery_data    recovery_data;
+	struct cam_isp_hw_done_event_data    buf_done_event_data = {0};
+	struct cam_isp_hw_error_event_data   error_event_data = {0};
+	uint32_t  error_resc_handle[CAM_IFE_HW_OUT_RES_MAX];
+	uint32_t  num_of_error_handles = 0;
+
+	CDBG("%s:Enter\n", __func__);
+
+	ife_hwr_irq_wm_done_cb =
+		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
+
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+		isp_ife_out_res = &ife_hwr_mgr_ctx->res_list_ife_out[i];
+
+		if (isp_ife_out_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			continue;
+
+		hw_res_l = isp_ife_out_res->hw_res[0];
+
+		/*
+		 * DUAL VFE: Index 0 is always a master. In case of composite
+		 * Error, if the error is not in master, it needs to be checked
+		 * in slave (for debuging purpose only) For other cases:
+		 * Index zero is valid
+		 */
+
+		if (hw_res_l && (evt_payload->core_index ==
+			hw_res_l->hw_intf->hw_idx))
+			buf_done_status = hw_res_l->bottom_half_handler(
+				hw_res_l, evt_payload);
+
+		switch (buf_done_status) {
+		case CAM_VFE_IRQ_STATUS_ERR_COMP:
+			/*
+			 * Write interface can pipeline upto 2 buffer done
+			 * strobes from each write client. If any of the client
+			 * triggers a third buffer done strobe before a
+			 * composite interrupt based on the first buffer doneis
+			 * triggered an error irq is set. This scenario can
+			 * only happen if a client is 3 frames ahead of the
+			 * other clients enabled in the same composite mask.
+			 */
+		case CAM_VFE_IRQ_STATUS_COMP_OWRT:
+			/*
+			 * It is an indication that bandwidth is not sufficient
+			 * to generate composite done irq within the VBI time.
+			 */
+
+			error_resc_handle[num_of_error_handles++] =
+					isp_ife_out_res->res_id;
+
+			if (num_of_error_handles > 0) {
+				error_event_data.error_type =
+					CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
+				goto err;
+			}
+
+			break;
+		case CAM_VFE_IRQ_STATUS_ERR:
+			break;
+		case CAM_VFE_IRQ_STATUS_SUCCESS:
+			buf_done_event_data.num_handles = 1;
+			buf_done_event_data.resource_handle[0] =
+				isp_ife_out_res->res_id;
+
+			/* Report for Successful buf_done event if any */
+			if (buf_done_event_data.num_handles > 0 &&
+				ife_hwr_irq_wm_done_cb) {
+				CDBG("%s: notify isp context\n", __func__);
+				ife_hwr_irq_wm_done_cb(
+					ife_hwr_mgr_ctx->common.cb_priv,
+					CAM_ISP_HW_EVENT_DONE,
+					&buf_done_event_data);
+			}
+
+			break;
+		default:
+			/* Do NOTHING */
+			error_resc_handle[num_of_error_handles++] =
+				isp_ife_out_res->res_id;
+			if (num_of_error_handles > 0) {
+				error_event_data.error_type =
+					CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
+				goto err;
+			}
+			break;
+		}
+		CDBG("%s:buf_done status:(%d),isp_ife_out_res->res_id : 0x%x\n",
+			__func__, buf_done_status, isp_ife_out_res->res_id);
+	}
+
+
+	CDBG("%s: Exit (buf_done_status (Success) = %d)!\n", __func__,
+			buf_done_status);
+	return rc;
+
+err:
+	/*
+	 * Report for error if any.
+	 * For the first phase, Error is reported as overflow, for all
+	 * the affected context and any successful buf_done event is not
+	 * reported.
+	 */
+	rc = cam_ife_hw_mgr_handle_overflow(ife_hwr_mgr_ctx,
+		&error_event_data, evt_payload->core_index,
+		&recovery_data);
+
+	/*
+	 * We can temporarily return from here as
+	 * for the first phase, we are going to reset entire HW.
+	 */
+
+	CDBG("%s: Exit (buf_done_status (Error) = %d)!\n", __func__,
+			buf_done_status);
+	return rc;
+}
+
+int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv,
+	void *evt_payload_priv)
+{
+	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx = handler_priv;
+	struct cam_vfe_bus_irq_evt_payload      *evt_payload;
+	int rc = -EINVAL;
+
+	if (!handler_priv)
+		return rc;
+
+	evt_payload = evt_payload_priv;
+	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
+
+	CDBG("addr of evt_payload = %llx\n", (uint64_t)evt_payload);
+	CDBG("bus_irq_status_0: = %x\n", evt_payload->irq_reg_val[0]);
+	CDBG("bus_irq_status_1: = %x\n", evt_payload->irq_reg_val[1]);
+	CDBG("bus_irq_status_2: = %x\n", evt_payload->irq_reg_val[2]);
+	CDBG("bus_irq_comp_err: = %x\n", evt_payload->irq_reg_val[3]);
+	CDBG("bus_irq_comp_owrt: = %x\n", evt_payload->irq_reg_val[4]);
+	CDBG("bus_irq_dual_comp_err: = %x\n", evt_payload->irq_reg_val[5]);
+	CDBG("bus_irq_dual_comp_owrt: = %x\n", evt_payload->irq_reg_val[6]);
+
+	/*
+	 * If overflow/overwrite/error/violation are pending
+	 * for this context it needs to be handled remaining
+	 * interrupts are ignored.
+	 */
+	rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+	if (rc) {
+		pr_err("%s: Encountered Error (%d), ignoring other irqs\n",
+			__func__, rc);
+		return IRQ_HANDLED;
+	}
+
+	CDBG("%s: Calling Buf_done\n", __func__);
+	/* WM Done */
+	return cam_ife_hw_mgr_handle_buf_done_for_hw_res(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+}
+
+int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)
+{
+	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx = handler_priv;
+	struct cam_vfe_top_irq_evt_payload   *evt_payload;
+	int rc = -EINVAL;
+
+	if (!handler_priv)
+		return rc;
+
+	evt_payload = evt_payload_priv;
+	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
+
+	CDBG("addr of evt_payload = %llx\n", (uint64_t)evt_payload);
+	CDBG("irq_status_0: = %x\n", evt_payload->irq_reg_val[0]);
+	CDBG("irq_status_1: = %x\n", evt_payload->irq_reg_val[1]);
+	CDBG("Violation register: = %x\n", evt_payload->irq_reg_val[2]);
+
+	/*
+	 * If overflow/overwrite/error/violation are pending
+	 * for this context it needs to be handled remaining
+	 * interrupts are ignored.
+	 */
+	rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+	if (rc) {
+		pr_err("%s: Encountered Error (%d), ignoring other irqs\n",
+			__func__, rc);
+		return IRQ_HANDLED;
+	}
+
+	CDBG("%s: Calling SOF\n", __func__);
+	/* SOF IRQ */
+	cam_ife_hw_mgr_handle_sof_for_camif_hw_res(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+
+	CDBG("%s: Calling RUP\n", __func__);
+	/* REG UPDATE */
+	cam_ife_hw_mgr_handle_rup_for_camif_hw_res(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+
+	CDBG("%s: Calling EPOCH\n", __func__);
+	/* EPOCH IRQ */
+	cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+
+	return IRQ_HANDLED;
+}
+
+static int cam_ife_hw_mgr_sort_dev_with_caps(
+	struct cam_ife_hw_mgr *ife_hw_mgr)
+{
+	int i;
+
+	/* get caps for csid devices */
+	for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+		if (!ife_hw_mgr->csid_devices[i])
+			continue;
+		if (ife_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps) {
+			ife_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps(
+				ife_hw_mgr->csid_devices[i]->hw_priv,
+				&ife_hw_mgr->ife_csid_dev_caps[i],
+				sizeof(ife_hw_mgr->ife_csid_dev_caps[i]));
+		}
+	}
+
+	/* get caps for ife devices */
+	for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+		if (!ife_hw_mgr->ife_devices[i])
+			continue;
+		if (ife_hw_mgr->ife_devices[i]->hw_ops.get_hw_caps) {
+			ife_hw_mgr->ife_devices[i]->hw_ops.get_hw_caps(
+				ife_hw_mgr->ife_devices[i]->hw_priv,
+				&ife_hw_mgr->ife_dev_caps[i],
+				sizeof(ife_hw_mgr->ife_dev_caps[i]));
+		}
+	}
+
+	return 0;
+}
+
+int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf)
+{
+	int rc = -EFAULT;
+	int i, j;
+	struct cam_iommu_handle cdm_handles;
+
+	pr_info("%s: Enter\n", __func__);
+
+	memset(&g_ife_hw_mgr, 0, sizeof(g_ife_hw_mgr));
+
+	mutex_init(&g_ife_hw_mgr.ctx_mutex);
+
+	if (CAM_IFE_HW_NUM_MAX != CAM_IFE_CSID_HW_NUM_MAX) {
+		pr_err("%s: Fatal, CSID num is different then IFE num!\n",
+			__func__);
+		goto end;
+	}
+
+	/* fill ife hw intf information */
+	for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+		rc = cam_vfe_hw_init(&g_ife_hw_mgr.ife_devices[i], i);
+		if (!rc) {
+			struct cam_hw_info *vfe_hw =
+				(struct cam_hw_info *)
+				g_ife_hw_mgr.ife_devices[i]->hw_priv;
+			struct cam_hw_soc_info *soc_info = &vfe_hw->soc_info;
+
+			j++;
+
+			g_ife_hw_mgr.cdm_reg_map[i] = &soc_info->reg_map[0];
+			CDBG("reg_map: mem base = 0x%llx, cam_base = 0x%llx\n",
+				(uint64_t) soc_info->reg_map[0].mem_base,
+				(uint64_t) soc_info->reg_map[0].mem_cam_base);
+		} else {
+			g_ife_hw_mgr.cdm_reg_map[i] = NULL;
+		}
+	}
+	if (j == 0) {
+		pr_err("%s: no valid IFE HW!\n", __func__);
+		goto end;
+	}
+
+	/* fill csid hw intf information */
+	for (i = 0, j = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+		rc = cam_ife_csid_hw_init(&g_ife_hw_mgr.csid_devices[i], i);
+		if (!rc)
+			j++;
+	}
+	if (!j) {
+		pr_err("%s: no valid IFE CSID HW!\n", __func__);
+		goto end;
+	}
+
+	cam_ife_hw_mgr_sort_dev_with_caps(&g_ife_hw_mgr);
+
+	/* setup ife context list */
+	INIT_LIST_HEAD(&g_ife_hw_mgr.free_ctx_list);
+	INIT_LIST_HEAD(&g_ife_hw_mgr.used_ctx_list);
+
+	/*
+	 *  for now, we only support one iommu handle. later
+	 *  we will need to setup more iommu handle for other
+	 *  use cases.
+	 *  Also, we have to release them once we have the
+	 *  deinit support
+	 */
+	if (cam_smmu_get_handle("ife",
+		&g_ife_hw_mgr.mgr_common.img_iommu_hdl)) {
+		pr_err("%s: Can not get iommu handle.\n", __func__);
+		goto end;
+	}
+
+	if (cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
+		CAM_SMMU_ATTACH)) {
+		pr_err("%s: Attach iommu handle failed.\n", __func__);
+		goto end;
+	}
+
+	CDBG("got iommu_handle=%d\n", g_ife_hw_mgr.mgr_common.img_iommu_hdl);
+	g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure = -1;
+
+	if (!cam_cdm_get_iommu_handle("ife", &cdm_handles)) {
+		CDBG("Successfully acquired the CDM iommu handles\n");
+		g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = cdm_handles.non_secure;
+		g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure =
+			cdm_handles.secure;
+	} else {
+		CDBG("Failed to acquire the CDM iommu handles\n");
+		g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = -1;
+		g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure = -1;
+	}
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		memset(&g_ife_hw_mgr.ctx_pool[i], 0,
+			sizeof(g_ife_hw_mgr.ctx_pool[i]));
+		INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].list);
+
+		INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_in.list);
+		INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_cid);
+		INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_csid);
+		INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_src);
+		for (j = 0; j < CAM_IFE_HW_OUT_RES_MAX; j++) {
+			INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].
+				res_list_ife_out[j].list);
+		}
+
+		/* init context pool */
+		INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].free_res_list);
+		for (j = 0; j < CAM_IFE_HW_RES_POOL_MAX; j++) {
+			INIT_LIST_HEAD(
+				&g_ife_hw_mgr.ctx_pool[i].res_pool[j].list);
+			list_add_tail(
+				&g_ife_hw_mgr.ctx_pool[i].res_pool[j].list,
+				&g_ife_hw_mgr.ctx_pool[i].free_res_list);
+		}
+
+		g_ife_hw_mgr.ctx_pool[i].cdm_cmd =
+			kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+				((CAM_IFE_HW_ENTRIES_MAX - 1) *
+				 sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+		if (!g_ife_hw_mgr.ctx_pool[i].cdm_cmd) {
+			rc = -ENOMEM;
+			pr_err("Allocation Failed for cdm command\n");
+			goto end;
+		}
+
+		g_ife_hw_mgr.ctx_pool[i].ctx_index = i;
+		g_ife_hw_mgr.ctx_pool[i].hw_mgr = &g_ife_hw_mgr;
+
+		cam_tasklet_init(&g_ife_hw_mgr.mgr_common.tasklet_pool[i],
+			&g_ife_hw_mgr.ctx_pool[i], i);
+		g_ife_hw_mgr.ctx_pool[i].common.tasklet_info =
+			g_ife_hw_mgr.mgr_common.tasklet_pool[i];
+
+		list_add_tail(&g_ife_hw_mgr.ctx_pool[i].list,
+			&g_ife_hw_mgr.free_ctx_list);
+	}
+
+	/* Create Worker for ife_hw_mgr with 10 tasks */
+	rc = cam_req_mgr_workq_create("cam_ife_worker", 10,
+			&g_ife_hw_mgr.workq);
+
+	if (rc < 0) {
+		pr_err("%s: Unable to create worker\n", __func__);
+		goto end;
+	}
+
+	/* fill return structure */
+	hw_mgr_intf->hw_mgr_priv = &g_ife_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_ife_mgr_get_hw_caps;
+	hw_mgr_intf->hw_acquire = cam_ife_mgr_acquire_hw;
+	hw_mgr_intf->hw_start = cam_ife_mgr_start_hw;
+	hw_mgr_intf->hw_stop = cam_ife_mgr_stop_hw;
+	hw_mgr_intf->hw_read = cam_ife_mgr_read;
+	hw_mgr_intf->hw_write = cam_ife_mgr_write;
+	hw_mgr_intf->hw_release = cam_ife_mgr_release_hw;
+	hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
+	hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
+
+	pr_info("%s: Exit\n", __func__);
+	return 0;
+end:
+	if (rc) {
+		for (i = 0; i < CAM_CTX_MAX; i++)
+			kfree(g_ife_hw_mgr.ctx_pool[i].cdm_cmd);
+	}
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
new file mode 100644
index 0000000..174d2ce
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -0,0 +1,210 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_HW_MGR_H_
+#define _CAM_IFE_HW_MGR_H_
+
+#include "cam_isp_hw_mgr.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_ife_csid_hw_intf.h"
+#include "cam_tasklet_util.h"
+
+/* MAX IFE instance */
+#define CAM_IFE_HW_NUM_MAX                       4
+
+/* enum cam_ife_hw_mgr_res_type - manager resource node type */
+enum cam_ife_hw_mgr_res_type {
+	CAM_IFE_HW_MGR_RES_UNINIT,
+	CAM_IFE_HW_MGR_RES_ROOT,
+	CAM_IFE_HW_MGR_RES_CID,
+	CAM_IFE_HW_MGR_RES_CSID,
+	CAM_IFE_HW_MGR_RES_IFE_SRC,
+	CAM_IFE_HW_MGR_RES_IFE_OUT,
+};
+
+/* IFE resource constants */
+#define CAM_IFE_HW_IN_RES_MAX            (CAM_ISP_IFE_IN_RES_MAX & 0xFF)
+#define CAM_IFE_HW_OUT_RES_MAX           (CAM_ISP_IFE_OUT_RES_MAX & 0xFF)
+#define CAM_IFE_HW_RES_POOL_MAX          64
+
+/**
+ * struct cam_vfe_hw_mgr_res- HW resources for the VFE manager
+ *
+ * @list:                used by the resource list
+ * @res_type:            IFE manager resource type
+ * @res_id:              resource id based on the resource type for root or
+ *                       leaf resource, it matches the KMD interface port id.
+ *                       For branch resrouce, it is defined by the ISP HW
+ *                       layer
+ * @hw_res:              hw layer resource array. For single VFE, only one VFE
+ *                       hw resrouce will be acquired. For dual VFE, two hw
+ *                       resources from different VFE HW device will be
+ *                       acquired
+ * @parent:              point to the parent resource node.
+ * @children:            point to the children resource nodes
+ * @child_num:           numbe of the child resource node.
+ *
+ */
+struct cam_ife_hw_mgr_res {
+	struct list_head                 list;
+	enum cam_ife_hw_mgr_res_type     res_type;
+	uint32_t                         res_id;
+	uint32_t                         is_dual_vfe;
+	struct cam_isp_resource_node    *hw_res[CAM_ISP_HW_SPLIT_MAX];
+
+	/* graph */
+	struct cam_ife_hw_mgr_res       *parent;
+	struct cam_ife_hw_mgr_res       *child[CAM_IFE_HW_OUT_RES_MAX];
+	uint32_t                         num_children;
+};
+
+
+/**
+ * struct ctx_base_info - base hardware information for the context
+ *
+ * @idx:                 Base resource index
+ * @split_id:            split info for the base resource
+ *
+ */
+struct ctx_base_info {
+	uint32_t                       idx;
+	enum cam_isp_hw_split_id       split_id;
+};
+
+/**
+ * struct cam_vfe_hw_mgr_ctx - IFE HW manager Context object
+ *
+ * @list:                   used by the ctx list.
+ * @common:                 common acquired context data
+ * @ctx_index:              acquired context id.
+ * @hw_mgr:                 IFE hw mgr which owns this context
+ * @ctx_in_use:             flag to tell whether context is active
+ * @res_list_ife_in:        Starting resource(TPG,PHY0, PHY1...) Can only be
+ *                          one.
+ * @res_list_csid:          CSID resource list
+ * @res_list_ife_src:       IFE input resource list
+ * @res_list_ife_out:       IFE output resoruces array
+ * @free_res_list:          Free resources list for the branch node
+ * @res_pool:               memory storage for the free resource list
+ * @irq_status0_mask:       irq_status0_mask for the context
+ * @irq_status1_mask:       irq_status1_mask for the context
+ * @base                    device base index array contain the all IFE HW
+ *                          instance associated with this context.
+ * @num_base                number of valid base data in the base array
+ * @cdm_handle              cdm hw acquire handle
+ * @cdm_ops                 cdm util operation pointer for building
+ *                          cdm commands
+ * @cdm_cmd                 cdm base and length request pointer
+ * @sof_cnt                 sof count value per core, used for dual VFE
+ * @epoch_cnt               epoch count value per core, used for dual VFE
+ * @overflow_pending        flat to specify the overflow is pending for the
+ *                          context
+ */
+struct cam_ife_hw_mgr_ctx {
+	struct list_head                list;
+	struct cam_isp_hw_mgr_ctx       common;
+
+	uint32_t                        ctx_index;
+	struct cam_ife_hw_mgr          *hw_mgr;
+	uint32_t                        ctx_in_use;
+
+	struct cam_ife_hw_mgr_res       res_list_ife_in;
+	struct list_head                res_list_ife_cid;
+	struct list_head                res_list_ife_csid;
+	struct list_head                res_list_ife_src;
+	struct cam_ife_hw_mgr_res       res_list_ife_out[
+						CAM_IFE_HW_OUT_RES_MAX];
+
+	struct list_head                free_res_list;
+	struct cam_ife_hw_mgr_res       res_pool[CAM_IFE_HW_RES_POOL_MAX];
+
+	uint32_t                        irq_status0_mask[CAM_IFE_HW_NUM_MAX];
+	uint32_t                        irq_status1_mask[CAM_IFE_HW_NUM_MAX];
+	struct ctx_base_info            base[CAM_IFE_HW_NUM_MAX];
+	uint32_t                        num_base;
+	uint32_t                        cdm_handle;
+	struct cam_cdm_utils_ops       *cdm_ops;
+	struct cam_cdm_bl_request      *cdm_cmd;
+
+	uint32_t                        sof_cnt[CAM_IFE_HW_NUM_MAX];
+	uint32_t                        epoch_cnt[CAM_IFE_HW_NUM_MAX];
+	atomic_t                        overflow_pending;
+
+};
+
+/**
+ * struct cam_ife_hw_mgr - IFE HW Manager
+ *
+ * @mgr_common:            common data for all HW managers
+ * @csid_devices;          csid device instances array. This will be filled by
+ *                         HW manager during the initialization.
+ * @ife_devices:           IFE device instances array. This will be filled by
+ *                         HW layer during initialization
+ * @ctx_mutex:             mutex for the hw context pool
+ * @free_ctx_list:         free hw context list
+ * @used_ctx_list:         used hw context list
+ * @ctx_pool:              context storage
+ * @ife_csid_dev_caps      csid device capability stored per core
+ * @ife_dev_caps           ife device capability per core
+ * @work q                 work queue for IFE hw manager
+ */
+struct cam_ife_hw_mgr {
+	struct cam_isp_hw_mgr          mgr_common;
+	struct cam_hw_intf            *csid_devices[CAM_IFE_CSID_HW_NUM_MAX];
+	struct cam_hw_intf            *ife_devices[CAM_IFE_HW_NUM_MAX];
+	struct cam_soc_reg_map        *cdm_reg_map[CAM_IFE_HW_NUM_MAX];
+
+	struct mutex                   ctx_mutex;
+	struct list_head               free_ctx_list;
+	struct list_head               used_ctx_list;
+	struct cam_ife_hw_mgr_ctx      ctx_pool[CAM_CTX_MAX];
+
+	struct cam_ife_csid_hw_caps    ife_csid_dev_caps[
+						CAM_IFE_CSID_HW_NUM_MAX];
+	struct cam_vfe_hw_get_hw_cap   ife_dev_caps[CAM_IFE_HW_NUM_MAX];
+	struct cam_req_mgr_core_workq  *workq;
+};
+
+/**
+ * cam_ife_hw_mgr_init()
+ *
+ * @brief:              Initialize the IFE hardware manger. This is the
+ *                      etnry functinon for the IFE HW manager.
+ *
+ * @hw_mgr_intf:        IFE hardware manager object returned
+ *
+ */
+int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf);
+
+/**
+ * cam_ife_mgr_do_tasklet_buf_done()
+ *
+ * @brief:              Main tasklet handle function for the buf done event
+ *
+ * @handler_priv:       Tasklet information handle
+ * @evt_payload_priv:   Event payload for the handler funciton
+ *
+ */
+int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv, void *evt_payload_priv);
+
+/**
+ * cam_ife_mgr_do_tasklet()
+ *
+ * @brief:              Main tasklet handle function for mux resource events
+ *
+ * @handler_priv:       Tasklet information handle
+ * @evt_payload_priv:   Event payload for the handler funciton
+ *
+ */
+int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv);
+
+#endif /* _CAM_IFE_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
new file mode 100644
index 0000000..2e23222
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_ife_hw_mgr.h"
+
+
+int cam_isp_hw_mgr_init(struct device_node *of_node,
+	struct cam_hw_mgr_intf *hw_mgr)
+{
+	int rc = 0;
+	const char *compat_str = NULL;
+
+	rc = of_property_read_string_index(of_node, "arch-compat", 0,
+		(const char **)&compat_str);
+
+	if (strnstr(compat_str, "ife", strlen(compat_str)))
+		rc = cam_ife_hw_mgr_init(hw_mgr);
+	else {
+		pr_err("%s: Invalid ISP hw type\n", __func__);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h
new file mode 100644
index 0000000..2810dbd
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h
@@ -0,0 +1,74 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_HW_MGR_H_
+#define _CAM_ISP_HW_MGR_H_
+
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_tasklet_util.h"
+
+#define CAM_ISP_HW_NUM_MAX                       4
+
+/**
+ * struct cam_isp_hw_mgr_ctx - common acquired context for managers
+ *
+ * @takslet_info:          assciated tasklet
+ * @event_cb:              call back interface to ISP context. Set during
+ *                         acquire device
+ * @cb_priv:               first argument for the call back function
+ *                         set during acquire device
+ *
+ */
+struct cam_isp_hw_mgr_ctx {
+	void                           *tasklet_info;
+	cam_hw_event_cb_func            event_cb[CAM_ISP_HW_EVENT_MAX];
+	void                           *cb_priv;
+};
+
+/**
+ * struct cam_isp_hw_mgr - ISP HW Manager common object
+ *
+ * @tasklet_pool:             Tasklet pool
+ * @img_iommu_hdl:            iommu memory handle for regular image buffer
+ * @img_iommu_hdl_secure:     iommu memory handle for secure image buffer
+ * @cmd_iommu_hdl:            iommu memory handle for regular command buffer
+ * @cmd_iommu_hdl:            iommu memory handle for secure command buffer
+ * @scratch_buf_range:        scratch buffer range (not for IFE)
+ * @scratch_buf_addr:         scratch buffer address (not for IFE)
+ *
+ */
+struct cam_isp_hw_mgr {
+	void                           *tasklet_pool[CAM_CTX_MAX];
+	int                             img_iommu_hdl;
+	int                             img_iommu_hdl_secure;
+	int                             cmd_iommu_hdl;
+	int                             cmd_iommu_hdl_secure;
+	uint32_t                        scratch_buf_range;
+	dma_addr_t                      scratch_buf_addr;
+};
+
+/**
+ * struct cam_hw_event_recovery_data - Payload for the recovery procedure
+ *
+ * @error_type:               Error type that causes the recovery
+ * @affected_core:            Array of the hardware cores that are affected
+ * @affected_ctx:             Array of the hardware contexts that are affected
+ * @no_of_context:            Actual number of the affected context
+ *
+ */
+struct cam_hw_event_recovery_data {
+	uint32_t                   error_type;
+	uint32_t                   affected_core[CAM_ISP_HW_NUM_MAX];
+	struct cam_ife_hw_mgr_ctx *affected_ctx[CAM_CTX_MAX];
+	uint32_t                   no_of_context;
+};
+#endif /* _CAM_ISP_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile
new file mode 100644
index 0000000..19da180
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_tasklet_util.o cam_isp_packet_parser.o
+obj-$(CONFIG_SPECTRA_CAMERA) += irq_controller/
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
new file mode 100644
index 0000000..b608320
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -0,0 +1,608 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <uapi/media/cam_defs.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_mem_mgr.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_isp_packet_parser.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static int cam_isp_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
+	size_t *len)
+{
+	int rc = 0;
+	uint64_t kmd_buf_addr = 0;
+
+	rc = cam_mem_get_cpu_buf(handle, &kmd_buf_addr, len);
+	if (rc) {
+		pr_err("%s:%d Unable to get the virtual address rc:%d\n",
+			__func__, __LINE__, rc);
+		rc = -ENOMEM;
+	} else {
+		if (kmd_buf_addr && *len)
+			*buf_addr = (uint32_t *)kmd_buf_addr;
+		else {
+			pr_err("%s:%d Invalid addr and length :%ld\n",
+				__func__, __LINE__, *len);
+			rc = -ENOMEM;
+		}
+	}
+	return rc;
+}
+
+static int cam_isp_validate_cmd_desc(
+	struct cam_cmd_buf_desc *cmd_desc)
+{
+	if (cmd_desc->length > cmd_desc->size ||
+		(cmd_desc->mem_handle <= 0)) {
+		pr_err("%s:%d invalid cmd arg %d %d %d %d\n",
+			__func__, __LINE__, cmd_desc->offset,
+			cmd_desc->length, cmd_desc->mem_handle,
+			cmd_desc->size);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_isp_validate_packet(struct cam_packet *packet)
+{
+	if (!packet)
+		return -EINVAL;
+
+	CDBG("%s:%d num cmd buf:%d num of io config:%d kmd buf index:%d\n",
+		__func__, __LINE__, packet->num_cmd_buf,
+		packet->num_io_configs, packet->kmd_cmd_buf_index);
+
+	if (packet->kmd_cmd_buf_index >= packet->num_cmd_buf ||
+		(!packet->header.size) ||
+		packet->cmd_buf_offset > packet->header.size ||
+		packet->io_configs_offset > packet->header.size)  {
+		pr_err("%s:%d invalid packet:%d %d %d %d %d\n",
+			__func__, __LINE__, packet->kmd_cmd_buf_index,
+			packet->num_cmd_buf, packet->cmd_buf_offset,
+			packet->io_configs_offset, packet->header.size);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d exit\n", __func__, __LINE__);
+	return 0;
+}
+
+int cam_isp_get_kmd_buffer(struct cam_packet *packet,
+	struct cam_isp_kmd_buf_info *kmd_buf)
+{
+	int                      rc = 0;
+	size_t                   len = 0;
+	struct cam_cmd_buf_desc *cmd_desc;
+	uint32_t                *cpu_addr;
+
+	if (!packet || !kmd_buf) {
+		pr_err("%s:%d Invalid arg\n", __func__, __LINE__);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	/* Take first command descriptor and add offset to it for kmd*/
+	cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)
+			&packet->payload + packet->cmd_buf_offset);
+	cmd_desc += packet->kmd_cmd_buf_index;
+
+	CDBG("%s:%d enter\n", __func__, __LINE__);
+	rc = cam_isp_validate_cmd_desc(cmd_desc);
+	if (rc)
+		return rc;
+
+	CDBG("%s:%d enter\n", __func__, __LINE__);
+	rc = cam_isp_get_cmd_mem_addr(cmd_desc->mem_handle, &cpu_addr,
+		&len);
+	if (rc)
+		return rc;
+
+	if (len < cmd_desc->size) {
+		pr_err("%s:%d invalid memory len:%ld and cmd desc size:%d\n",
+			__func__, __LINE__, len, cmd_desc->size);
+		return -EINVAL;
+	}
+
+	cpu_addr += cmd_desc->offset/4 + packet->kmd_cmd_buf_offset/4;
+	CDBG("%s:%d total size %d, cmd size: %d, KMD buffer size: %d\n",
+		__func__, __LINE__, cmd_desc->size, cmd_desc->length,
+		cmd_desc->size - cmd_desc->length);
+	CDBG("%s:%d: handle 0x%x, cmd offset %d, kmd offset %d, addr 0x%pK\n",
+		__func__, __LINE__, cmd_desc->mem_handle, cmd_desc->offset,
+		packet->kmd_cmd_buf_offset, cpu_addr);
+
+	kmd_buf->cpu_addr   = cpu_addr;
+	kmd_buf->handle     = cmd_desc->mem_handle;
+	kmd_buf->offset     = cmd_desc->offset + packet->kmd_cmd_buf_offset;
+	kmd_buf->size       = cmd_desc->size - cmd_desc->length;
+	kmd_buf->used_bytes = 0;
+
+	return rc;
+}
+
+int cam_isp_add_change_base(
+	struct cam_hw_prepare_update_args      *prepare,
+	struct list_head                       *res_list_isp_src,
+	uint32_t                                base_idx,
+	struct cam_isp_kmd_buf_info            *kmd_buf_info)
+{
+	int rc = -EINVAL;
+	struct cam_ife_hw_mgr_res       *hw_mgr_res;
+	struct cam_isp_resource_node    *res;
+	struct cam_isp_hw_get_cdm_args   get_base;
+	struct cam_hw_update_entry      *hw_entry;
+	uint32_t                         num_ent, i;
+
+	hw_entry = prepare->hw_update_entries;
+	num_ent = prepare->num_hw_update_entries;
+
+	/* Max one hw entries required for each base */
+	if (num_ent + 1 >= prepare->max_hw_update_entries) {
+		pr_err("%s:%d Insufficient  HW entries :%d %d\n",
+			__func__, __LINE__, num_ent,
+			prepare->max_hw_update_entries);
+		return -EINVAL;
+	}
+
+	list_for_each_entry(hw_mgr_res, res_list_isp_src, list) {
+		if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			continue;
+
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			res = hw_mgr_res->hw_res[i];
+			if (res->hw_intf->hw_idx != base_idx)
+				continue;
+
+			get_base.res  = res;
+			get_base.cmd_buf_addr = kmd_buf_info->cpu_addr +
+				kmd_buf_info->used_bytes/4;
+			get_base.size  = kmd_buf_info->size -
+					kmd_buf_info->used_bytes;
+
+			rc = res->hw_intf->hw_ops.process_cmd(
+				res->hw_intf->hw_priv,
+				CAM_VFE_HW_CMD_GET_CHANGE_BASE, &get_base,
+				sizeof(struct cam_isp_hw_get_cdm_args));
+			if (rc)
+				return rc;
+
+			hw_entry[num_ent].handle = kmd_buf_info->handle;
+			hw_entry[num_ent].len    = get_base.used_bytes;
+			hw_entry[num_ent].offset = kmd_buf_info->offset;
+
+			kmd_buf_info->used_bytes += get_base.used_bytes;
+			kmd_buf_info->offset     += get_base.used_bytes;
+			num_ent++;
+			prepare->num_hw_update_entries = num_ent;
+
+			/* return success */
+			return 0;
+		}
+	}
+
+	return rc;
+}
+
+
+int cam_isp_add_command_buffers(
+	struct cam_hw_prepare_update_args  *prepare,
+	uint32_t                            split_id)
+{
+	int rc = 0;
+	uint32_t  cmd_meta_data, num_ent, i;
+	struct cam_cmd_buf_desc       *cmd_desc = NULL;
+	struct cam_hw_update_entry    *hw_entry;
+
+	hw_entry = prepare->hw_update_entries;
+	num_ent = prepare->num_hw_update_entries;
+	/*
+	 * set the cmd_desc to point the first command descriptor in the
+	 * packet
+	 */
+	cmd_desc = (struct cam_cmd_buf_desc *)
+			((uint8_t *)&prepare->packet->payload +
+			prepare->packet->cmd_buf_offset);
+
+	CDBG("%s:%d split id = %d, number of command buffers:%d\n", __func__,
+		__LINE__, split_id, prepare->packet->num_cmd_buf);
+
+	for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+		/* One hw entry space required for left or right or common */
+		if (num_ent + 1 >= prepare->max_hw_update_entries) {
+			pr_err("%s:%d Insufficient  HW entries :%d %d\n",
+				__func__, __LINE__, num_ent,
+				prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		rc = cam_isp_validate_cmd_desc(&cmd_desc[i]);
+		if (rc)
+			return rc;
+
+		cmd_meta_data = cmd_desc[i].meta_data;
+
+		CDBG("%s:%d meta type: %d, split_id: %d\n", __func__, __LINE__,
+			cmd_meta_data, split_id);
+
+		switch (cmd_meta_data) {
+		case CAM_ISP_PACKET_META_BASE:
+		case CAM_ISP_PACKET_META_LEFT:
+		case CAM_ISP_PACKET_META_DMI_LEFT:
+			if (split_id == CAM_ISP_HW_SPLIT_LEFT) {
+				hw_entry[num_ent].len = cmd_desc[i].length;
+				hw_entry[num_ent].handle =
+					cmd_desc[i].mem_handle;
+				hw_entry[num_ent].offset = cmd_desc[i].offset;
+
+				if (cmd_meta_data ==
+					CAM_ISP_PACKET_META_DMI_LEFT)
+					hw_entry[num_ent].flags = 0x1;
+
+				num_ent++;
+			}
+			break;
+		case CAM_ISP_PACKET_META_RIGHT:
+		case CAM_ISP_PACKET_META_DMI_RIGHT:
+			if (split_id == CAM_ISP_HW_SPLIT_RIGHT) {
+				hw_entry[num_ent].len = cmd_desc[i].length;
+				hw_entry[num_ent].handle =
+					cmd_desc[i].mem_handle;
+				hw_entry[num_ent].offset = cmd_desc[i].offset;
+
+				if (cmd_meta_data ==
+					CAM_ISP_PACKET_META_DMI_RIGHT)
+					hw_entry[num_ent].flags = 0x1;
+				num_ent++;
+			}
+			break;
+		case CAM_ISP_PACKET_META_COMMON:
+		case CAM_ISP_PACKET_META_DMI_COMMON:
+			hw_entry[num_ent].len = cmd_desc[i].length;
+			hw_entry[num_ent].handle =
+				cmd_desc[i].mem_handle;
+			hw_entry[num_ent].offset = cmd_desc[i].offset;
+
+			if (cmd_meta_data == CAM_ISP_PACKET_META_DMI_COMMON)
+				hw_entry[num_ent].flags = 0x1;
+
+			num_ent++;
+			break;
+		default:
+			pr_err("%s:%d invalid cdm command meta data %d\n",
+			__func__, __LINE__, cmd_meta_data);
+			return -EINVAL;
+		}
+	}
+
+	prepare->num_hw_update_entries = num_ent;
+
+	return rc;
+}
+
+
+int cam_isp_add_io_buffers(
+	int                                   iommu_hdl,
+	struct cam_hw_prepare_update_args    *prepare,
+	uint32_t                              base_idx,
+	struct cam_isp_kmd_buf_info          *kmd_buf_info,
+	struct cam_ife_hw_mgr_res            *res_list_isp_out,
+	uint32_t                              size_isp_out,
+	bool                                  fill_fence)
+{
+	int rc = 0;
+	uint64_t                            io_addr[CAM_PACKET_MAX_PLANES];
+	struct cam_buf_io_cfg              *io_cfg;
+	struct cam_isp_resource_node       *res;
+	struct cam_ife_hw_mgr_res          *hw_mgr_res;
+	struct cam_isp_hw_get_buf_update    update_buf;
+	uint32_t kmd_buf_remain_size,  i, j, k, out_buf, in_buf,
+		res_id_out, res_id_in, num_plane, io_cfg_used_bytes, num_ent;
+	size_t size;
+
+	io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
+			&prepare->packet->payload +
+			prepare->packet->io_configs_offset);
+	out_buf = 0;
+	in_buf  = 0;
+	io_cfg_used_bytes = 0;
+
+	/* Max one hw entries required for each base */
+	if (prepare->num_hw_update_entries + 1 >=
+			prepare->max_hw_update_entries) {
+		pr_err("%s:%d Insufficient  HW entries :%d %d\n",
+			__func__, __LINE__, prepare->num_hw_update_entries,
+			prepare->max_hw_update_entries);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < prepare->packet->num_io_configs; i++) {
+		CDBG("%s:%d ======= io config idx %d ============\n",
+			__func__, __LINE__, i);
+		CDBG("%s:%d resource_type:%d fence:%d\n", __func__, __LINE__,
+			io_cfg[i].resource_type, io_cfg[i].fence);
+		CDBG("%s:%d format: %d\n", __func__, __LINE__,
+			io_cfg[i].format);
+		CDBG("%s:%d direction %d\n", __func__, __LINE__,
+			io_cfg[i].direction);
+
+		if (io_cfg[i].direction == CAM_BUF_OUTPUT) {
+			res_id_out = io_cfg[i].resource_type & 0xFF;
+			if (res_id_out >= size_isp_out) {
+				pr_err("%s:%d invalid out restype:%x\n",
+					__func__, __LINE__,
+					io_cfg[i].resource_type);
+				return -EINVAL;
+			}
+
+			CDBG("%s:%d configure output io with fill fence %d\n",
+				__func__, __LINE__, fill_fence);
+			if (fill_fence) {
+				if (out_buf < prepare->max_out_map_entries) {
+					prepare->out_map_entries[out_buf].
+						resource_handle =
+							io_cfg[i].resource_type;
+					prepare->out_map_entries[out_buf].
+						sync_id = io_cfg[i].fence;
+					out_buf++;
+				} else {
+					pr_err("%s:%d ln_out:%d max_ln:%d\n",
+						__func__, __LINE__,
+						out_buf,
+						prepare->max_out_map_entries);
+					return -EINVAL;
+				}
+			}
+
+			hw_mgr_res = &res_list_isp_out[res_id_out];
+			if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
+				pr_err("%s:%d io res id:%d not valid\n",
+					__func__, __LINE__,
+					io_cfg[i].resource_type);
+				return -EINVAL;
+			}
+		} else if (io_cfg[i].direction == CAM_BUF_INPUT) {
+			res_id_in = io_cfg[i].resource_type & 0xFF;
+			CDBG("%s:%d configure input io with fill fence %d\n",
+				__func__, __LINE__, fill_fence);
+			if (fill_fence) {
+				if (in_buf < prepare->max_in_map_entries) {
+					prepare->in_map_entries[in_buf].
+						resource_handle =
+							io_cfg[i].resource_type;
+					prepare->in_map_entries[in_buf].
+						sync_id =
+							io_cfg[i].fence;
+					in_buf++;
+				} else {
+					pr_err("%s:%d ln_in:%d imax_ln:%d\n",
+						__func__, __LINE__,
+						in_buf,
+						prepare->max_in_map_entries);
+					return -EINVAL;
+				}
+			}
+			/*TO DO get the input FE address and add to list */
+			continue;
+		} else {
+			pr_err("%s:%d Invalid io config direction :%d\n",
+				__func__, __LINE__,
+				io_cfg[i].direction);
+			return -EINVAL;
+		}
+
+		CDBG("%s:%d setup mem io\n", __func__, __LINE__);
+		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+			if (!hw_mgr_res->hw_res[j])
+				continue;
+
+			if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx)
+				continue;
+
+			res = hw_mgr_res->hw_res[j];
+			if (res->res_id != io_cfg[i].resource_type) {
+				pr_err("%s:%d wm err res id:%d io res id:%d\n",
+					__func__, __LINE__, res->res_id,
+					io_cfg[i].resource_type);
+				return -EINVAL;
+			}
+
+			memset(io_addr, 0, sizeof(io_addr));
+			num_plane = 0;
+			for (k = 0; k < CAM_PACKET_MAX_PLANES; k++) {
+				if (!io_cfg[i].mem_handle[k])
+					continue;
+
+				rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[k],
+					iommu_hdl, &io_addr[num_plane], &size);
+				if (rc) {
+					pr_err("%s:%d no io addr for plane%d\n",
+						__func__, __LINE__, k);
+					rc = -ENOMEM;
+					return rc;
+				}
+				/* need to update with offset */
+				io_addr[num_plane] += io_cfg->offsets[k];
+				CDBG("%s: get io_addr for plane %d: 0x%llx\n",
+					__func__, num_plane,
+					io_addr[num_plane]);
+				num_plane++;
+			}
+			if (!num_plane) {
+				pr_err("%s:%d No valid planes for res%d\n",
+					__func__, __LINE__, res->res_id);
+				rc = -ENOMEM;
+				return rc;
+			}
+
+			if ((kmd_buf_info->used_bytes + io_cfg_used_bytes) <
+				kmd_buf_info->size) {
+				kmd_buf_remain_size = kmd_buf_info->size -
+					(kmd_buf_info->used_bytes +
+					io_cfg_used_bytes);
+			} else {
+				pr_err("%s:%d no free kmd memory for base %d\n",
+					__func__, __LINE__, base_idx);
+				rc = -ENOMEM;
+				return rc;
+			}
+			update_buf.cdm.res = res;
+			update_buf.cdm.cmd_buf_addr = kmd_buf_info->cpu_addr +
+				kmd_buf_info->used_bytes/4 +
+					io_cfg_used_bytes/4;
+			update_buf.cdm.size = kmd_buf_remain_size;
+			update_buf.image_buf = io_addr;
+			update_buf.num_buf   = num_plane;
+
+			CDBG("%s:%d: cmd buffer 0x%pK, size %d\n", __func__,
+				__LINE__, update_buf.cdm.cmd_buf_addr,
+				update_buf.cdm.size);
+			rc = res->hw_intf->hw_ops.process_cmd(
+				res->hw_intf->hw_priv,
+				CAM_VFE_HW_CMD_GET_BUF_UPDATE, &update_buf,
+				sizeof(struct cam_isp_hw_get_buf_update));
+
+			if (rc) {
+				pr_err("%s:%d get buf cmd error:%d\n",
+					__func__, __LINE__, res->res_id);
+				rc = -ENOMEM;
+				return rc;
+			}
+			io_cfg_used_bytes += update_buf.cdm.used_bytes;
+		}
+	}
+
+	CDBG("%s: io_cfg_used_bytes %d, fill_fence %d\n", __func__,
+		io_cfg_used_bytes, fill_fence);
+	if (io_cfg_used_bytes) {
+		/* Update the HW entries */
+		num_ent = prepare->num_hw_update_entries;
+		prepare->hw_update_entries[num_ent].handle =
+					kmd_buf_info->handle;
+		prepare->hw_update_entries[num_ent].len = io_cfg_used_bytes;
+		prepare->hw_update_entries[num_ent].offset =
+			kmd_buf_info->offset;
+		num_ent++;
+
+		kmd_buf_info->used_bytes += io_cfg_used_bytes;
+		kmd_buf_info->offset     += io_cfg_used_bytes;
+		prepare->num_hw_update_entries = num_ent;
+	}
+
+	if (fill_fence) {
+		prepare->num_out_map_entries = out_buf;
+		prepare->num_in_map_entries  = in_buf;
+	}
+
+	return rc;
+}
+
+
+int cam_isp_add_reg_update(
+	struct cam_hw_prepare_update_args    *prepare,
+	struct list_head                     *res_list_isp_src,
+	uint32_t                              base_idx,
+	struct cam_isp_kmd_buf_info          *kmd_buf_info)
+{
+	int rc = -EINVAL;
+	struct cam_isp_resource_node         *res;
+	struct cam_ife_hw_mgr_res            *hw_mgr_res;
+	struct cam_hw_update_entry           *hw_entry;
+	struct cam_isp_hw_get_cdm_args        get_regup;
+	uint32_t kmd_buf_remain_size, num_ent, i, reg_update_size;
+
+	hw_entry = prepare->hw_update_entries;
+	/* Max one hw entries required for each base */
+	if (prepare->num_hw_update_entries + 1 >=
+				prepare->max_hw_update_entries) {
+		pr_err("%s:%d Insufficient  HW entries :%d %d\n",
+			__func__, __LINE__,
+			prepare->num_hw_update_entries,
+			prepare->max_hw_update_entries);
+		return -EINVAL;
+	}
+
+	reg_update_size = 0;
+	list_for_each_entry(hw_mgr_res, res_list_isp_src, list) {
+		if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			continue;
+
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			res = hw_mgr_res->hw_res[i];
+			if (res->hw_intf->hw_idx != base_idx)
+				continue;
+
+			if (kmd_buf_info->size > (kmd_buf_info->used_bytes +
+				reg_update_size)) {
+				kmd_buf_remain_size =  kmd_buf_info->size -
+					(kmd_buf_info->used_bytes +
+					reg_update_size);
+			} else {
+				pr_err("%s:%d no free mem %d %d %d\n",
+					__func__, __LINE__, base_idx,
+					kmd_buf_info->size,
+					kmd_buf_info->used_bytes +
+					reg_update_size);
+				rc = -EINVAL;
+				return rc;
+			}
+
+			get_regup.cmd_buf_addr = kmd_buf_info->cpu_addr +
+				kmd_buf_info->used_bytes/4 +
+				reg_update_size/4;
+			get_regup.size = kmd_buf_remain_size;
+			get_regup.res = res;
+
+			rc = res->hw_intf->hw_ops.process_cmd(
+				res->hw_intf->hw_priv,
+				CAM_VFE_HW_CMD_GET_REG_UPDATE, &get_regup,
+				sizeof(struct cam_isp_hw_get_cdm_args));
+			if (rc)
+				return rc;
+
+			reg_update_size += get_regup.used_bytes;
+		}
+	}
+
+	if (reg_update_size) {
+		/* Update the HW entries */
+		num_ent = prepare->num_hw_update_entries;
+		prepare->hw_update_entries[num_ent].handle =
+					kmd_buf_info->handle;
+		prepare->hw_update_entries[num_ent].len = reg_update_size;
+		prepare->hw_update_entries[num_ent].offset =
+			kmd_buf_info->offset;
+		num_ent++;
+
+		kmd_buf_info->used_bytes += reg_update_size;
+		kmd_buf_info->offset     += reg_update_size;
+		prepare->num_hw_update_entries = num_ent;
+		/* reg update is success return status 0 */
+		rc = 0;
+	}
+
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
new file mode 100644
index 0000000..ecc71b3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
@@ -0,0 +1,322 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/ratelimit.h>
+#include "cam_tasklet_util.h"
+#include "cam_irq_controller.h"
+
+#undef  CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define CAM_TASKLETQ_SIZE              256
+
+static void cam_tasklet_action(unsigned long data);
+
+/**
+ * struct cam_tasklet_queue_cmd:
+ * @Brief:                  Structure associated with each slot in the
+ *                          tasklet queue
+ *
+ * @list:                   list_head member for each entry in queue
+ * @payload:                Payload structure for the event. This will be
+ *                          passed to the handler function
+ * @bottom_half_handler:    Function pointer for event handler in bottom
+ *                          half context
+ *
+ */
+struct cam_tasklet_queue_cmd {
+	struct list_head                   list;
+	void                              *payload;
+	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler;
+};
+
+/**
+ * struct cam_tasklet_info:
+ * @Brief:                  Tasklet private structure
+ *
+ * @list:                   list_head member for each tasklet
+ * @index:                  Instance id for the tasklet
+ * @tasklet_lock:           Spin lock
+ * @tasklet_active:         Atomic variable to control tasklet state
+ * @tasklet:                Tasklet structure used to schedule bottom half
+ * @free_cmd_list:          List of free tasklet queue cmd for use
+ * @used_cmd_list:          List of used tasklet queue cmd
+ * @cmd_queue:              Array of tasklet cmd for storage
+ * @ctx_priv:               Private data passed to the handling function
+ *
+ */
+struct cam_tasklet_info {
+	struct list_head                   list;
+	uint32_t                           index;
+	spinlock_t                         tasklet_lock;
+	atomic_t                           tasklet_active;
+	struct tasklet_struct              tasklet;
+
+	struct list_head                   free_cmd_list;
+	struct list_head                   used_cmd_list;
+	struct cam_tasklet_queue_cmd       cmd_queue[CAM_TASKLETQ_SIZE];
+
+	void                              *ctx_priv;
+};
+
+/**
+ * cam_tasklet_get_cmd()
+ *
+ * @brief:              Get free cmd from tasklet
+ *
+ * @tasklet:            Tasklet Info structure to get cmd from
+ * @tasklet_cmd:        Return tasklet_cmd pointer if successful
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+static int cam_tasklet_get_cmd(
+	struct cam_tasklet_info        *tasklet,
+	struct cam_tasklet_queue_cmd  **tasklet_cmd)
+{
+	int           rc = 0;
+	unsigned long flags;
+
+	*tasklet_cmd = NULL;
+
+	if (!atomic_read(&tasklet->tasklet_active)) {
+		pr_err_ratelimited("Tasklet is not active!\n");
+		rc = -EPIPE;
+		return rc;
+	}
+
+	spin_lock_irqsave(&tasklet->tasklet_lock, flags);
+	if (list_empty(&tasklet->free_cmd_list)) {
+		pr_err_ratelimited("No more free tasklet cmd!\n");
+		rc = -ENODEV;
+		goto spin_unlock;
+	} else {
+		*tasklet_cmd = list_first_entry(&tasklet->free_cmd_list,
+			struct cam_tasklet_queue_cmd, list);
+		list_del_init(&(*tasklet_cmd)->list);
+	}
+
+spin_unlock:
+	spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
+	return rc;
+}
+
+/**
+ * cam_tasklet_put_cmd()
+ *
+ * @brief:              Put back cmd to free list
+ *
+ * @tasklet:            Tasklet Info structure to put cmd into
+ * @tasklet_cmd:        tasklet_cmd pointer that needs to be put back
+ *
+ * @return:             Void
+ */
+static void cam_tasklet_put_cmd(
+	struct cam_tasklet_info        *tasklet,
+	struct cam_tasklet_queue_cmd  **tasklet_cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tasklet->tasklet_lock, flags);
+	list_add_tail(&(*tasklet_cmd)->list,
+		&tasklet->free_cmd_list);
+	spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
+}
+
+/**
+ * cam_tasklet_dequeue_cmd()
+ *
+ * @brief:              Initialize the tasklet info structure
+ *
+ * @hw_mgr_ctx:         Private Ctx data that will be passed to the handler
+ *                      function
+ * @idx:                Index of tasklet used as identity
+ * @tasklet_action:     Tasklet callback function that will be called
+ *                      when tasklet runs on CPU
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+static int cam_tasklet_dequeue_cmd(
+	struct cam_tasklet_info        *tasklet,
+	struct cam_tasklet_queue_cmd  **tasklet_cmd)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	*tasklet_cmd = NULL;
+
+	if (!atomic_read(&tasklet->tasklet_active)) {
+		pr_err("Tasklet is not active!\n");
+		rc = -EPIPE;
+		return rc;
+	}
+
+	CDBG("Dequeue before lock.\n");
+	spin_lock_irqsave(&tasklet->tasklet_lock, flags);
+	if (list_empty(&tasklet->used_cmd_list)) {
+		CDBG("End of list reached. Exit\n");
+		rc = -ENODEV;
+		goto spin_unlock;
+	} else {
+		*tasklet_cmd = list_first_entry(&tasklet->used_cmd_list,
+			struct cam_tasklet_queue_cmd, list);
+		list_del_init(&(*tasklet_cmd)->list);
+		CDBG("Dequeue Successful\n");
+	}
+
+spin_unlock:
+	spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
+	return rc;
+}
+
+int cam_tasklet_enqueue_cmd(
+	void                              *bottom_half,
+	void                              *handler_priv,
+	void                              *evt_payload_priv,
+	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler)
+{
+	struct cam_tasklet_info       *tasklet = bottom_half;
+	struct cam_tasklet_queue_cmd  *tasklet_cmd = NULL;
+	unsigned long                  flags;
+	int                            rc;
+
+	if (!bottom_half) {
+		pr_err("NULL bottom half\n");
+		return -EINVAL;
+	}
+
+	rc = cam_tasklet_get_cmd(tasklet, &tasklet_cmd);
+
+	if (tasklet_cmd) {
+		CDBG("%s: Enqueue tasklet cmd\n", __func__);
+		tasklet_cmd->bottom_half_handler = bottom_half_handler;
+		tasklet_cmd->payload = evt_payload_priv;
+		spin_lock_irqsave(&tasklet->tasklet_lock, flags);
+		list_add_tail(&tasklet_cmd->list,
+			&tasklet->used_cmd_list);
+		spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
+		tasklet_schedule(&tasklet->tasklet);
+	} else {
+		pr_err("%s: tasklet cmd is NULL!\n", __func__);
+	}
+
+	return rc;
+}
+
+int cam_tasklet_init(
+	void                    **tasklet_info,
+	void                     *hw_mgr_ctx,
+	uint32_t                  idx)
+{
+	int i;
+	struct cam_tasklet_info  *tasklet = NULL;
+
+	tasklet = kzalloc(sizeof(struct cam_tasklet_info), GFP_KERNEL);
+	if (!tasklet) {
+		CDBG("Error! Unable to allocate memory for tasklet");
+		*tasklet_info = NULL;
+		return -ENOMEM;
+	}
+
+	tasklet->ctx_priv = hw_mgr_ctx;
+	tasklet->index = idx;
+	spin_lock_init(&tasklet->tasklet_lock);
+	memset(tasklet->cmd_queue, 0, sizeof(tasklet->cmd_queue));
+	INIT_LIST_HEAD(&tasklet->free_cmd_list);
+	INIT_LIST_HEAD(&tasklet->used_cmd_list);
+	for (i = 0; i < CAM_TASKLETQ_SIZE; i++) {
+		INIT_LIST_HEAD(&tasklet->cmd_queue[i].list);
+		list_add_tail(&tasklet->cmd_queue[i].list,
+			&tasklet->free_cmd_list);
+	}
+	tasklet_init(&tasklet->tasklet, cam_tasklet_action,
+		(unsigned long)tasklet);
+	cam_tasklet_stop(tasklet);
+
+	*tasklet_info = tasklet;
+
+	return 0;
+}
+
+void cam_tasklet_deinit(void    **tasklet_info)
+{
+	struct cam_tasklet_info *tasklet = *tasklet_info;
+
+	atomic_set(&tasklet->tasklet_active, 0);
+	tasklet_kill(&tasklet->tasklet);
+	kfree(tasklet);
+	*tasklet_info = NULL;
+}
+
+int cam_tasklet_start(void  *tasklet_info)
+{
+	struct cam_tasklet_info       *tasklet = tasklet_info;
+	struct cam_tasklet_queue_cmd  *tasklet_cmd;
+	struct cam_tasklet_queue_cmd  *tasklet_cmd_temp;
+
+	if (atomic_read(&tasklet->tasklet_active)) {
+		pr_err("Tasklet already active. idx = %d\n", tasklet->index);
+		return -EBUSY;
+	}
+	atomic_set(&tasklet->tasklet_active, 1);
+
+	/* flush the command queue first */
+	list_for_each_entry_safe(tasklet_cmd, tasklet_cmd_temp,
+		&tasklet->used_cmd_list, list) {
+		list_del_init(&tasklet_cmd->list);
+		list_add_tail(&tasklet_cmd->list, &tasklet->free_cmd_list);
+	}
+
+	tasklet_enable(&tasklet->tasklet);
+
+	return 0;
+}
+
+void cam_tasklet_stop(void  *tasklet_info)
+{
+	struct cam_tasklet_info  *tasklet = tasklet_info;
+
+	atomic_set(&tasklet->tasklet_active, 0);
+	tasklet_disable(&tasklet->tasklet);
+}
+
+/*
+ * cam_tasklet_action()
+ *
+ * @brief:              Process function that will be called  when tasklet runs
+ *                      on CPU
+ *
+ * @data:               Tasklet Info structure that is passed in tasklet_init
+ *
+ * @return:             Void
+ */
+static void cam_tasklet_action(unsigned long data)
+{
+	struct cam_tasklet_info          *tasklet_info = NULL;
+	struct cam_tasklet_queue_cmd     *tasklet_cmd = NULL;
+
+	tasklet_info = (struct cam_tasklet_info *)data;
+
+	while (!cam_tasklet_dequeue_cmd(tasklet_info, &tasklet_cmd)) {
+		tasklet_cmd->bottom_half_handler(tasklet_info->ctx_priv,
+			tasklet_cmd->payload);
+		cam_tasklet_put_cmd(tasklet_info, &tasklet_cmd);
+	}
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
new file mode 100644
index 0000000..9730fc2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
@@ -0,0 +1,140 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_HW_PARSER_H_
+#define _CAM_ISP_HW_PARSER_H_
+
+#include <linux/types.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_ife_hw_mgr.h"
+#include "cam_hw_intf.h"
+
+/**
+ * @brief                  KMD scratch buffer information
+ *
+ * @handle:                Memory handle
+ * @cpu_addr:              Cpu address
+ * @offset:                Offset from the start of the buffer
+ * @size:                  Size of the buffer
+ * @used_bytes:            Used memory in bytes
+ *
+ */
+struct cam_isp_kmd_buf_info {
+	int        handle;
+	uint32_t  *cpu_addr;
+	uint32_t   offset;
+	uint32_t   size;
+	uint32_t   used_bytes;
+};
+
+
+/**
+ * @brief                  Validate the packet
+ *
+ * @packet:                Packet to be validated
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_isp_validate_packet(struct cam_packet *packet);
+
+/**
+ * @brief                  Get the kmd buffer from the packet command descriptor
+ *
+ * @packet:                Packet data
+ * @kmd_buf:               Extracted the KMD buffer information
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_isp_get_kmd_buffer(struct cam_packet *packet,
+	struct cam_isp_kmd_buf_info *kmd_buf_info);
+
+/**
+ * @brief                  Add change base in the hw entries list
+ *                         processe the isp source list get the change base from
+ *                         ISP HW instance
+ *
+ * @prepare:               Contain the  packet and HW update variables
+ * @res_list_isp_src:      Resource list for IFE/VFE source
+ * @base_idx:              Base or dev index of the IFE/VFE HW instance for
+ *                         which change change base need to be added
+ * @kmd_buf_info:          Kmd buffer to store the change base command
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_isp_add_change_base(
+	struct cam_hw_prepare_update_args     *prepare,
+	struct list_head                      *res_list_isp_src,
+	uint32_t                               base_idx,
+	struct cam_isp_kmd_buf_info           *kmd_buf_info);
+
+/**
+ * @brief                  Add command buffer in the HW entries list for given
+ *                         left or right VFE/IFE instance.
+ *
+ * @prepare:               Contain the  packet and HW update variables
+ * @dual_type:             Left of right command buffers to be extracted
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_isp_add_command_buffers(
+	struct cam_hw_prepare_update_args    *prepare,
+	enum cam_isp_hw_split_id              split_id);
+
+/**
+ * @brief                  Add io buffer configurations in the HW entries list
+ *                         processe the io configurations based on the base
+ *                         index and update the HW entries list
+ *
+ * @iommu_hdl:             Iommu handle to get the IO buf from memory manager
+ * @prepare:               Contain the  packet and HW update variables
+ * @base_idx:              Base or dev index of the IFE/VFE HW instance
+ * @kmd_buf_info:          Kmd buffer to store the change base command
+ * @res_list_isp_out:      IFE /VFE out resource list
+ * @size_isp_out:          Size of the res_list_isp_out array
+ * @fill_fence:            If true, Fence map table will be filled
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_isp_add_io_buffers(int	 iommu_hdl,
+	struct cam_hw_prepare_update_args    *prepare,
+	uint32_t                              base_idx,
+	struct cam_isp_kmd_buf_info          *kmd_buf_info,
+	struct cam_ife_hw_mgr_res            *res_list_isp_out,
+	uint32_t                              size_isp_out,
+	bool                                  fill_fence);
+
+
+/**
+ * @brief                  Add reg update in the hw entries list
+ *                         processe the isp source list get the reg update from
+ *                         ISP HW instance
+ *
+ * @prepare:               Contain the  packet and HW update variables
+ * @res_list_isp_src:      Resource list for IFE/VFE source
+ * @base_idx:              Base or dev index of the IFE/VFE HW instance
+ * @kmd_buf_info:          Kmd buffer to store the change base command
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_isp_add_reg_update(
+	struct cam_hw_prepare_update_args    *prepare,
+	struct list_head                     *res_list_isp_src,
+	uint32_t                              base_idx,
+	struct cam_isp_kmd_buf_info          *kmd_buf_info);
+
+
+#endif /*_CAM_ISP_HW_PARSER_H */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_tasklet_util.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_tasklet_util.h
new file mode 100644
index 0000000..0e4bf12
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_tasklet_util.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_TASKLET_UTIL_H_
+#define _CAM_TASKLET_UTIL_H_
+
+#include "cam_irq_controller.h"
+
+/*
+ * cam_tasklet_init()
+ *
+ * @brief:              Initialize the tasklet info structure
+ *
+ * @tasklet:            Tasklet to initialize
+ * @hw_mgr_ctx:         Private Ctx data that will be passed to the handler
+ *                      function
+ * @idx:                Index of tasklet used as identity
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_tasklet_init(
+	void                   **tasklet,
+	void                    *hw_mgr_ctx,
+	uint32_t                 idx);
+
+/*
+ * cam_tasklet_deinit()
+ *
+ * @brief:              Deinitialize the tasklet info structure
+ *
+ * @tasklet:            Tasklet to deinitialize
+ *
+ * @return:             Void
+ */
+void cam_tasklet_deinit(void   **tasklet);
+
+/*
+ * cam_tasklet_start()
+ *
+ * @brief:              Enable the tasklet to be scheduled and run.
+ *                      Caller should make sure this function is called
+ *                      before trying to enqueue.
+ *
+ * @tasklet:            Tasklet to start
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_tasklet_start(void    *tasklet);
+
+/*
+ * cam_tasklet_stop()
+ *
+ * @brief:              Disable the tasklet so it can no longer be scheduled.
+ *                      Need to enable again to run.
+ *
+ * @tasklet:            Tasklet to stop
+ *
+ * @return:             Void
+ */
+void cam_tasklet_stop(void    *tasklet);
+
+/*
+ * cam_tasklet_enqueue_cmd()
+ *
+ * @brief:               Enqueue the tasklet_cmd to used list
+ *
+ * @bottom_half:         Tasklet info to enqueue onto
+ * @handler_priv:        Private Handler data that will be passed to the
+ *                       handler function
+ * @evt_payload_priv:    Event payload that will be passed to the handler
+ *                       function
+ * @bottom_half_handler: Callback function that will be called by tasklet
+ *                       for handling event
+ *
+ * @return:              0: Success
+ *                       Negative: Failure
+ */
+int cam_tasklet_enqueue_cmd(
+	void                              *bottom_half,
+	void                              *handler_priv,
+	void                              *evt_payload_priv,
+	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler);
+
+#endif /* _CAM_TASKLET_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/Makefile
new file mode 100644
index 0000000..1fc0dd2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_irq_controller.o
\ No newline at end of file
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
new file mode 100644
index 0000000..bf4d174
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
@@ -0,0 +1,533 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include "cam_io_util.h"
+#include "cam_irq_controller.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+/**
+ * struct cam_irq_evt_handler:
+ * @Brief:                  Event handler information
+ *
+ * @priority:               Priority level of this event
+ * @evt_bit_mask_arr:       evt_bit_mask that has the bits set for IRQs to
+ *                          subscribe for
+ * @handler_priv:           Private data that will be passed to the Top/Bottom
+ *                          Half handler function
+ * @top_half_handler:       Top half Handler callback function
+ * @bottom_half_handler:    Bottom half Handler callback function
+ * @bottom_half:            Pointer to bottom_half implementation on which to
+ *                          enqueue the event for further handling
+ * @bottom_half_enqueue_func:
+ *                          Function used to enqueue the bottom_half event
+ * @list_node:              list_head struct used for overall handler List
+ * @th_list_node:           list_head struct used for top half handler List
+ */
+struct cam_irq_evt_handler {
+	enum cam_irq_priority_level        priority;
+	uint32_t                          *evt_bit_mask_arr;
+	void                              *handler_priv;
+	CAM_IRQ_HANDLER_TOP_HALF           top_half_handler;
+	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler;
+	void                              *bottom_half;
+	CAM_IRQ_BOTTOM_HALF_ENQUEUE_FUNC   bottom_half_enqueue_func;
+	struct list_head                   list_node;
+	struct list_head                   th_list_node;
+	int                                index;
+};
+
+/**
+ * struct cam_irq_register_obj:
+ * @Brief:                  Structure containing information related to
+ *                          a particular register Set
+ *
+ * @index:                  Index of set in Array
+ * @mask_reg_offset:        Offset of IRQ MASK register
+ * @clear_reg_offset:       Offset of IRQ CLEAR register
+ * @status_reg_offset:      Offset of IRQ STATUS register
+ * @top_half_enable_mask:   Array of enabled bit_mask sorted by priority
+ */
+struct cam_irq_register_obj {
+	uint32_t                     index;
+	uint32_t                     mask_reg_offset;
+	uint32_t                     clear_reg_offset;
+	uint32_t                     status_reg_offset;
+	uint32_t                     top_half_enable_mask[CAM_IRQ_PRIORITY_MAX];
+};
+
+/**
+ * struct cam_irq_controller:
+ *
+ * @brief:                  IRQ Controller structure.
+ *
+ * @name:                   Name of IRQ Controller block
+ * @mem_base:               Mapped base address of register space to which
+ *                          register offsets are added to access registers
+ * @num_registers:          Number of sets(mask/clear/status) of IRQ registers
+ * @irq_register_arr:       Array of Register object associated with this
+ *                          Controller
+ * @irq_status_arr:         Array of IRQ Status values
+ * @global_clear_offset:    Offset of Global IRQ Clear register. This register
+ *                          contains the BIT that needs to be set for the CLEAR
+ *                          to take effect
+ * @global_clear_bitmask:   Bitmask needed to be used in Global Clear register
+ *                          for Clear IRQ cmd to take effect
+ * @evt_handler_list_head:  List of all event handlers
+ * @th_list_head:           List of handlers sorted by priority
+ * @hdl_idx:                Unique identity of handler assigned on Subscribe.
+ *                          Used to Unsubscribe.
+ * @rw_lock:                Read-Write Lock for use by controller
+ */
+struct cam_irq_controller {
+	const char                     *name;
+	void __iomem                   *mem_base;
+	uint32_t                        num_registers;
+	struct cam_irq_register_obj    *irq_register_arr;
+	uint32_t                       *irq_status_arr;
+	uint32_t                        global_clear_offset;
+	uint32_t                        global_clear_bitmask;
+	struct list_head                evt_handler_list_head;
+	struct list_head                th_list_head[CAM_IRQ_PRIORITY_MAX];
+	uint32_t                        hdl_idx;
+	rwlock_t                        rw_lock;
+	struct cam_irq_th_payload       th_payload;
+};
+
+int cam_irq_controller_deinit(void **irq_controller)
+{
+	struct cam_irq_controller *controller = *irq_controller;
+	struct cam_irq_evt_handler *evt_handler = NULL;
+
+	while (!list_empty(&controller->evt_handler_list_head)) {
+		evt_handler = list_first_entry(
+			&controller->evt_handler_list_head,
+			struct cam_irq_evt_handler, list_node);
+		list_del_init(&evt_handler->list_node);
+		kfree(evt_handler->evt_bit_mask_arr);
+		kfree(evt_handler);
+	}
+
+	kfree(controller->th_payload.evt_status_arr);
+	kfree(controller->irq_status_arr);
+	kfree(controller->irq_register_arr);
+	kfree(controller);
+	*irq_controller = NULL;
+	return 0;
+}
+
+int cam_irq_controller_init(const char       *name,
+	void __iomem                         *mem_base,
+	struct cam_irq_controller_reg_info   *register_info,
+	void                                **irq_controller)
+{
+	struct cam_irq_controller *controller = NULL;
+	int i, rc = 0;
+
+	*irq_controller = NULL;
+
+	if (!register_info->num_registers || !register_info->irq_reg_set ||
+		!name || !mem_base) {
+		pr_err("Invalid parameters\n");
+		rc = -EINVAL;
+		return rc;
+	}
+
+	controller = kzalloc(sizeof(struct cam_irq_controller), GFP_KERNEL);
+	if (!controller) {
+		CDBG("Failed to allocate IRQ Controller\n");
+		return -ENOMEM;
+	}
+
+	controller->irq_register_arr = kzalloc(register_info->num_registers *
+		sizeof(struct cam_irq_register_obj), GFP_KERNEL);
+	if (!controller->irq_register_arr) {
+		CDBG("Failed to allocate IRQ register Arr\n");
+		rc = -ENOMEM;
+		goto reg_alloc_error;
+	}
+
+	controller->irq_status_arr = kzalloc(register_info->num_registers *
+		sizeof(uint32_t), GFP_KERNEL);
+	if (!controller->irq_status_arr) {
+		CDBG("Failed to allocate IRQ status Arr\n");
+		rc = -ENOMEM;
+		goto status_alloc_error;
+	}
+
+	controller->th_payload.evt_status_arr =
+		kzalloc(register_info->num_registers * sizeof(uint32_t),
+		GFP_KERNEL);
+	if (!controller->th_payload.evt_status_arr) {
+		CDBG("Failed to allocate BH payload bit mask Arr\n");
+		rc = -ENOMEM;
+		goto evt_mask_alloc_error;
+	}
+
+	controller->name = name;
+
+	CDBG("num_registers: %d\n", register_info->num_registers);
+	for (i = 0; i < register_info->num_registers; i++) {
+		controller->irq_register_arr[i].index = i;
+		controller->irq_register_arr[i].mask_reg_offset =
+			register_info->irq_reg_set[i].mask_reg_offset;
+		controller->irq_register_arr[i].clear_reg_offset =
+			register_info->irq_reg_set[i].clear_reg_offset;
+		controller->irq_register_arr[i].status_reg_offset =
+			register_info->irq_reg_set[i].status_reg_offset;
+		CDBG("i %d mask_reg_offset: 0x%x\n", i,
+			controller->irq_register_arr[i].mask_reg_offset);
+		CDBG("i %d clear_reg_offset: 0x%x\n", i,
+			controller->irq_register_arr[i].clear_reg_offset);
+		CDBG("i %d status_reg_offset: 0x%x\n", i,
+			controller->irq_register_arr[i].status_reg_offset);
+	}
+	controller->num_registers        = register_info->num_registers;
+	controller->global_clear_bitmask = register_info->global_clear_bitmask;
+	controller->global_clear_offset  = register_info->global_clear_offset;
+	controller->mem_base             = mem_base;
+
+	CDBG("global_clear_bitmask: 0x%x\n",
+		controller->global_clear_bitmask);
+	CDBG("global_clear_offset: 0x%x\n",
+		controller->global_clear_offset);
+	CDBG("mem_base: 0x%llx\n", (uint64_t)controller->mem_base);
+
+	INIT_LIST_HEAD(&controller->evt_handler_list_head);
+	for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++)
+		INIT_LIST_HEAD(&controller->th_list_head[i]);
+
+	rwlock_init(&controller->rw_lock);
+
+	controller->hdl_idx = 1;
+	*irq_controller = controller;
+
+	return rc;
+
+evt_mask_alloc_error:
+	kfree(controller->irq_status_arr);
+status_alloc_error:
+	kfree(controller->irq_register_arr);
+reg_alloc_error:
+	kfree(controller);
+
+	return rc;
+}
+
+int cam_irq_controller_subscribe_irq(void *irq_controller,
+	enum cam_irq_priority_level        priority,
+	uint32_t                          *evt_bit_mask_arr,
+	void                              *handler_priv,
+	CAM_IRQ_HANDLER_TOP_HALF           top_half_handler,
+	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler,
+	void                              *bottom_half,
+	CAM_IRQ_BOTTOM_HALF_ENQUEUE_FUNC   bottom_half_enqueue_func)
+{
+	struct cam_irq_controller  *controller  = irq_controller;
+	struct cam_irq_evt_handler *evt_handler = NULL;
+	int                         i;
+	int                         rc = 0;
+	uint32_t                    irq_mask;
+	unsigned long               flags;
+
+	if (!controller || !handler_priv || !evt_bit_mask_arr) {
+		pr_err("Invalid params: ctlr=%pK handler_priv=%pK bit_mask_arr = %pK\n",
+			controller, handler_priv, evt_bit_mask_arr);
+		return -EINVAL;
+	}
+
+	if (!top_half_handler) {
+		pr_err("Missing top half handler\n");
+		return -EINVAL;
+	}
+
+	if (bottom_half_handler &&
+		(!bottom_half || !bottom_half_enqueue_func)) {
+		pr_err("Invalid params: bh_handler=%pK bh=%pK bh_enq_f=%pK\n",
+			bottom_half_handler,
+			bottom_half,
+			bottom_half_enqueue_func);
+		return -EINVAL;
+	}
+
+	if (priority >= CAM_IRQ_PRIORITY_MAX) {
+		pr_err("Invalid priority=%u, max=%u\n", priority,
+			CAM_IRQ_PRIORITY_MAX);
+		return -EINVAL;
+	}
+
+	if (sizeof(evt_bit_mask_arr) !=
+		sizeof(uint32_t) * controller->num_registers) {
+		pr_err("Invalid evt_mask size = %lu expected = %lu\n",
+			sizeof(evt_bit_mask_arr),
+			sizeof(uint32_t) * controller->num_registers);
+		return -EINVAL;
+	}
+
+	evt_handler = kzalloc(sizeof(struct cam_irq_evt_handler), GFP_KERNEL);
+	if (!evt_handler) {
+		CDBG("Error allocating hlist_node\n");
+		return -ENOMEM;
+	}
+
+	evt_handler->evt_bit_mask_arr = kzalloc(sizeof(uint32_t) *
+		controller->num_registers, GFP_KERNEL);
+	if (!evt_handler->evt_bit_mask_arr) {
+		CDBG("Error allocating hlist_node\n");
+		rc = -ENOMEM;
+		goto free_evt_handler;
+	}
+
+	INIT_LIST_HEAD(&evt_handler->list_node);
+	INIT_LIST_HEAD(&evt_handler->th_list_node);
+
+	for (i = 0; i < controller->num_registers; i++)
+		evt_handler->evt_bit_mask_arr[i] = evt_bit_mask_arr[i];
+
+	evt_handler->priority                 = priority;
+	evt_handler->handler_priv             = handler_priv;
+	evt_handler->top_half_handler         = top_half_handler;
+	evt_handler->bottom_half_handler      = bottom_half_handler;
+	evt_handler->bottom_half              = bottom_half;
+	evt_handler->bottom_half_enqueue_func = bottom_half_enqueue_func;
+	evt_handler->index                    = controller->hdl_idx++;
+	if (controller->hdl_idx > 0x3FFFFFFF)
+		controller->hdl_idx = 1;
+
+	write_lock_irqsave(&controller->rw_lock, flags);
+	for (i = 0; i < controller->num_registers; i++) {
+		controller->irq_register_arr[i].top_half_enable_mask[priority]
+			|= evt_bit_mask_arr[i];
+
+		irq_mask = cam_io_r_mb(controller->mem_base +
+			controller->irq_register_arr[i].mask_reg_offset);
+		irq_mask |= evt_bit_mask_arr[i];
+
+		cam_io_w_mb(irq_mask, controller->mem_base +
+			controller->irq_register_arr[i].mask_reg_offset);
+	}
+	write_unlock_irqrestore(&controller->rw_lock, flags);
+
+	list_add_tail(&evt_handler->list_node,
+		&controller->evt_handler_list_head);
+	list_add_tail(&evt_handler->th_list_node,
+		&controller->th_list_head[priority]);
+
+	return evt_handler->index;
+
+free_evt_handler:
+	kfree(evt_handler);
+	evt_handler = NULL;
+
+	return rc;
+}
+
+int cam_irq_controller_unsubscribe_irq(void *irq_controller,
+	uint32_t handle)
+{
+	struct cam_irq_controller  *controller  = irq_controller;
+	struct cam_irq_evt_handler *evt_handler = NULL;
+	struct cam_irq_evt_handler *evt_handler_temp;
+	uint32_t                    i;
+	uint32_t                    found = 0;
+	uint32_t                    irq_mask;
+	unsigned long               flags;
+	int                         rc = -EINVAL;
+
+	list_for_each_entry_safe(evt_handler, evt_handler_temp,
+		&controller->evt_handler_list_head, list_node) {
+		if (evt_handler->index == handle) {
+			CDBG("unsubscribe item %d\n", handle);
+			list_del_init(&evt_handler->list_node);
+			list_del_init(&evt_handler->th_list_node);
+			found = 1;
+			rc = 0;
+			break;
+		}
+	}
+
+	if (found) {
+		write_lock_irqsave(&controller->rw_lock, flags);
+		for (i = 0; i < controller->num_registers; i++) {
+			controller->irq_register_arr[i].
+				top_half_enable_mask[evt_handler->priority] &=
+				~(evt_handler->evt_bit_mask_arr[i]);
+
+			irq_mask = cam_io_r_mb(controller->mem_base +
+				controller->irq_register_arr[i].
+				mask_reg_offset);
+			irq_mask &= ~(evt_handler->evt_bit_mask_arr[i]);
+
+			cam_io_w_mb(irq_mask, controller->mem_base +
+				controller->irq_register_arr[i].
+				mask_reg_offset);
+
+			/* Clear the IRQ bits of this handler */
+			cam_io_w_mb(evt_handler->evt_bit_mask_arr[i],
+				controller->mem_base +
+				controller->irq_register_arr[i].
+				clear_reg_offset);
+			if (controller->global_clear_offset)
+				cam_io_w_mb(
+					controller->global_clear_bitmask,
+					controller->mem_base +
+					controller->global_clear_offset);
+		}
+		write_unlock_irqrestore(&controller->rw_lock, flags);
+
+		kfree(evt_handler->evt_bit_mask_arr);
+		kfree(evt_handler);
+	}
+
+	return rc;
+}
+
+/**
+ * cam_irq_controller_match_bit_mask()
+ *
+ * @Brief:                This function checks if any of the enabled IRQ bits
+ *                        for a certain handler is Set in the Status values
+ *                        of the controller.
+ *
+ * @controller:           IRQ Controller structure
+ * @evt_handler:          Event handler structure
+ *
+ * @Return:               True: If any interested IRQ Bit is Set
+ *                        False: Otherwise
+ */
+static bool cam_irq_controller_match_bit_mask(
+	struct cam_irq_controller   *controller,
+	struct cam_irq_evt_handler  *evt_handler)
+{
+	int i;
+
+	for (i = 0; i < controller->num_registers; i++) {
+		if (evt_handler->evt_bit_mask_arr[i] &
+			controller->irq_status_arr[i])
+			return true;
+	}
+
+	return false;
+}
+
+static void cam_irq_controller_th_processing(
+	struct cam_irq_controller      *controller,
+	struct list_head               *th_list_head)
+{
+	struct cam_irq_evt_handler     *evt_handler = NULL;
+	struct cam_irq_th_payload      *th_payload = &controller->th_payload;
+	bool                            is_irq_match;
+	int                             rc = -EINVAL;
+	int                             i;
+
+	CDBG("Enter\n");
+
+	if (list_empty(th_list_head))
+		return;
+
+	list_for_each_entry(evt_handler, th_list_head, th_list_node) {
+		is_irq_match = cam_irq_controller_match_bit_mask(controller,
+			evt_handler);
+
+		if (!is_irq_match)
+			continue;
+
+		CDBG("match found\n");
+
+		cam_irq_th_payload_init(th_payload);
+		th_payload->handler_priv  = evt_handler->handler_priv;
+		th_payload->num_registers = controller->num_registers;
+		for (i = 0; i < controller->num_registers; i++) {
+			th_payload->evt_status_arr[i] =
+				controller->irq_status_arr[i] &
+				evt_handler->evt_bit_mask_arr[i];
+		}
+
+		/*
+		 * irq_status_arr[0] is dummy argument passed. the entire
+		 * status array is passed in th_payload.
+		 */
+		if (evt_handler->top_half_handler)
+			rc = evt_handler->top_half_handler(
+				controller->irq_status_arr[0],
+				(void *)th_payload);
+
+		if (!rc && evt_handler->bottom_half_handler) {
+			CDBG("Enqueuing bottom half\n");
+			if (evt_handler->bottom_half_enqueue_func) {
+				evt_handler->bottom_half_enqueue_func(
+					evt_handler->bottom_half,
+					evt_handler->handler_priv,
+					th_payload->evt_payload_priv,
+					evt_handler->bottom_half_handler);
+			}
+		}
+	}
+
+	CDBG("Exit\n");
+}
+
+irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv)
+{
+	struct cam_irq_controller  *controller  = priv;
+	bool         need_th_processing[CAM_IRQ_PRIORITY_MAX] = {false};
+	int          i;
+	int          j;
+
+	if (!controller)
+		return IRQ_NONE;
+
+	read_lock(&controller->rw_lock);
+	for (i = 0; i < controller->num_registers; i++) {
+		controller->irq_status_arr[i] = cam_io_r_mb(
+			controller->mem_base +
+			controller->irq_register_arr[i].status_reg_offset);
+		cam_io_w_mb(controller->irq_status_arr[i],
+			controller->mem_base +
+			controller->irq_register_arr[i].clear_reg_offset);
+		CDBG("Read irq status%d = 0x%x\n", i,
+			controller->irq_status_arr[i]);
+		for (j = 0; j < CAM_IRQ_PRIORITY_MAX; j++) {
+			if (controller->irq_register_arr[i].
+				top_half_enable_mask[j] &
+				controller->irq_status_arr[i])
+				need_th_processing[j] = true;
+				CDBG("i %d j %d need_th_processing = %d\n",
+					i, j, need_th_processing[j]);
+		}
+	}
+	read_unlock(&controller->rw_lock);
+
+	CDBG("Status Registers read Successful\n");
+
+	if (controller->global_clear_offset)
+		cam_io_w_mb(controller->global_clear_bitmask,
+			controller->mem_base + controller->global_clear_offset);
+
+	CDBG("Status Clear done\n");
+
+	for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++) {
+		if (need_th_processing[i]) {
+			CDBG("%s: Invoke TH processing\n", __func__);
+			cam_irq_controller_th_processing(controller,
+				&controller->th_list_head[i]);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
new file mode 100644
index 0000000..1990c51
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
@@ -0,0 +1,217 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IRQ_CONTROLLER_H_
+#define _CAM_IRQ_CONTROLLER_H_
+
+#include <linux/interrupt.h>
+
+#define CAM_IRQ_BITS_PER_REGISTER      32
+
+/*
+ * enum cam_irq_priority_level:
+ * @Brief:                  Priority levels for IRQ events.
+ *                          Priority_0 events will be serviced before
+ *                          Priority_1 if they these bits are set in the same
+ *                          Status Read. And so on upto Priority_4.
+ *
+ *                          Default Priority is Priority_4.
+ */
+enum cam_irq_priority_level {
+	CAM_IRQ_PRIORITY_0,
+	CAM_IRQ_PRIORITY_1,
+	CAM_IRQ_PRIORITY_2,
+	CAM_IRQ_PRIORITY_3,
+	CAM_IRQ_PRIORITY_4,
+	CAM_IRQ_PRIORITY_MAX,
+};
+
+/*
+ * struct cam_irq_register_set:
+ * @Brief:                  Structure containing offsets of IRQ related
+ *                          registers belonging to a Set
+ *
+ * @mask_reg_offset:        Offset of IRQ MASK register
+ * @clear_reg_offset:       Offset of IRQ CLEAR register
+ * @status_reg_offset:      Offset of IRQ STATUS register
+ */
+struct cam_irq_register_set {
+	uint32_t                       mask_reg_offset;
+	uint32_t                       clear_reg_offset;
+	uint32_t                       status_reg_offset;
+};
+
+/*
+ * struct cam_irq_controller_reg_info:
+ * @Brief:                  Structure describing the IRQ registers
+ *
+ * @num_registers:          Number of sets(mask/clear/status) of IRQ registers
+ * @irq_reg_set:            Array of Register Set Offsets.
+ *                          Length of array = num_registers
+ * @global_clear_offset:    Offset of Global IRQ Clear register. This register
+ *                          contains the BIT that needs to be set for the CLEAR
+ *                          to take effect
+ * @global_clear_bitmask:   Bitmask needed to be used in Global Clear register
+ *                          for Clear IRQ cmd to take effect
+ */
+struct cam_irq_controller_reg_info {
+	uint32_t                      num_registers;
+	struct cam_irq_register_set  *irq_reg_set;
+	uint32_t                      global_clear_offset;
+	uint32_t                      global_clear_bitmask;
+};
+
+/*
+ * struct cam_irq_th_payload:
+ * @Brief:                  Event payload structure. This structure will be
+ *                          passed to the Top Half handler functions.
+ *
+ * @handler_priv:           Private Data of handling object set when
+ *                          subscribing to IRQ event.
+ * @num_registers:          Length of evt_bit_mask Array below
+ * @evt_status_arr:         Array of Status bitmask read from registers.
+ *                          Length of array = num_registers
+ * @evt_payload_priv:       Private payload pointer which can be set by Top
+ *                          Half handler for use in Bottom Half.
+ */
+struct cam_irq_th_payload {
+	void       *handler_priv;
+	uint32_t    num_registers;
+	uint32_t   *evt_status_arr;
+	void       *evt_payload_priv;
+};
+
+/*
+ * cam_irq_th_payload_init()
+ *
+ * @brief:              Initialize the top half payload structure
+ *
+ * @th_payload:         Top Half payload structure to Initialize
+ *
+ * @return:             Void
+ */
+static inline void cam_irq_th_payload_init(
+	struct cam_irq_th_payload *th_payload) {
+	th_payload->handler_priv = NULL;
+	th_payload->evt_payload_priv = NULL;
+}
+
+typedef int (*CAM_IRQ_HANDLER_TOP_HALF)(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload);
+
+typedef int (*CAM_IRQ_HANDLER_BOTTOM_HALF)(void *handler_priv,
+	void *evt_payload_priv);
+
+typedef int (*CAM_IRQ_BOTTOM_HALF_ENQUEUE_FUNC)(void *bottom_half,
+	void *handler_priv, void *evt_payload_priv,
+	CAM_IRQ_HANDLER_BOTTOM_HALF);
+
+/*
+ * cam_irq_controller_init()
+ *
+ * @brief:              Create and Initialize IRQ Controller.
+ *
+ * @name:               Name of IRQ Controller block
+ * @mem_base:           Mapped base address of register space to which
+ *                      register offsets are added to access registers
+ * @register_info:      Register Info structure associated with this Controller
+ * @irq_controller:     Pointer to IRQ Controller that will be filled if
+ *                      initialization is successful
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_irq_controller_init(const char       *name,
+	void __iomem                         *mem_base,
+	struct cam_irq_controller_reg_info   *register_info,
+	void                                **irq_controller);
+
+/*
+ * cam_irq_controller_subscribe_irq()
+ *
+ * @brief:               Subscribe to certain IRQ events.
+ *
+ * @irq_controller:      Pointer to IRQ Controller that controls this event IRQ
+ * @priority:            Priority level of these events used if multiple events
+ *                       are SET in the Status Register
+ * @evt_bit_mask_arr:    evt_bit_mask that has the bits set for IRQs to
+ *                       subscribe for
+ * @handler_priv:        Private data that will be passed to the Top/Bottom Half
+ *                       handler function
+ * @top_half_handler:    Top half Handler callback function
+ * @bottom_half_handler: Bottom half Handler callback function
+ * @bottom_half:         Pointer to bottom_half implementation on which to
+ *                       enqueue the event for further handling
+ * @bottom_half_enqueue_func:
+ *                       Function used to enqueue the bottom_half event
+ *
+ * @return:              Positive: Success. Value represents handle which is
+ *                                 to be used to unsubscribe
+ *                       Negative: Failure
+ */
+int cam_irq_controller_subscribe_irq(void *irq_controller,
+	enum cam_irq_priority_level        priority,
+	uint32_t                          *evt_bit_mask_arr,
+	void                              *handler_priv,
+	CAM_IRQ_HANDLER_TOP_HALF           top_half_handler,
+	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler,
+	void                              *bottom_half,
+	CAM_IRQ_BOTTOM_HALF_ENQUEUE_FUNC   bottom_half_enqueue_func);
+
+/*
+ * cam_irq_controller_unsubscribe_irq()
+ *
+ * @brief:               Unsubscribe to IRQ events previously subscribed to.
+ *
+ * @irq_controller:      Pointer to IRQ Controller that controls this event IRQ
+ * @handle:              Handle returned on successful subscribe used to
+ *                       identify the handler object
+ *
+ * @return:              0: Success
+ *                       Negative: Failure
+ */
+int cam_irq_controller_unsubscribe_irq(void *irq_controller,
+	uint32_t handle);
+
+/*
+ * cam_irq_controller_deinit()
+ *
+ * @brief:              Deinitialize IRQ Controller.
+ *
+ * @irq_controller:     Pointer to IRQ Controller that needs to be
+ *                      deinitialized
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_irq_controller_deinit(void **irq_controller);
+
+/*
+ * cam_irq_controller_handle_irq()
+ *
+ * @brief:              Function that should be registered with the IRQ line.
+ *                      This is the first function to be called when the IRQ
+ *                      is fired. It will read the Status register and Clear
+ *                      the IRQ bits. It will then call the top_half handlers
+ *                      and enqueue the result to bottom_half.
+ *
+ * @irq_num:            Number of IRQ line that was set that lead to this
+ *                      function being called
+ * @priv:               Private data registered with request_irq is passed back
+ *                      here. This private data should be the irq_controller
+ *                      structure.
+ *
+ * @return:             IRQ_HANDLED/IRQ_NONE
+ */
+irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv);
+
+#endif /* _CAM_IRQ_CONTROLLER_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
new file mode 100644
index 0000000..9f2204b4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -0,0 +1,131 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_HW_MGR_INTF_H_
+#define _CAM_ISP_HW_MGR_INTF_H_
+
+#include <linux/of.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_hw_mgr_intf.h"
+
+/**
+ *  enum cam_isp_hw_event_type - Collection of the ISP hardware events
+ */
+enum cam_isp_hw_event_type {
+	CAM_ISP_HW_EVENT_ERROR,
+	CAM_ISP_HW_EVENT_SOF,
+	CAM_ISP_HW_EVENT_REG_UPDATE,
+	CAM_ISP_HW_EVENT_EPOCH,
+	CAM_ISP_HW_EVENT_EOF,
+	CAM_ISP_HW_EVENT_DONE,
+	CAM_ISP_HW_EVENT_MAX
+};
+
+
+/**
+ * enum cam_isp_hw_err_type - Collection of the ISP error types for
+ *                         ISP hardware event CAM_ISP_HW_EVENT_ERROR
+ */
+enum cam_isp_hw_err_type {
+	CAM_ISP_HW_ERROR_NONE,
+	CAM_ISP_HW_ERROR_OVERFLOW,
+	CAM_ISP_HW_ERROR_P2I_ERROR,
+	CAM_ISP_HW_ERROR_VIOLATION,
+	CAM_ISP_HW_ERROR_BUSIF_OVERFLOW,
+	CAM_ISP_HW_ERROR_MAX,
+};
+
+
+/**
+ * struct cam_isp_hw_sof_event_data - Event payload for CAM_HW_EVENT_SOF
+ *
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_sof_event_data {
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_reg_update_event_data - Event payload for
+ *                         CAM_HW_EVENT_REG_UPDATE
+ *
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_reg_update_event_data {
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_epoch_event_data - Event payload for CAM_HW_EVENT_EPOCH
+ *
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_epoch_event_data {
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_done_event_data - Event payload for CAM_HW_EVENT_DONE
+ *
+ * @num_handles:           Number of resource handeles
+ * @resource_handle:       Resource handle array
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_done_event_data {
+	uint32_t             num_handles;
+	uint32_t             resource_handle[
+				CAM_NUM_OUT_PER_COMP_IRQ_MAX];
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_eof_event_data - Event payload for CAM_HW_EVENT_EOF
+ *
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_eof_event_data {
+	struct timeval       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_error_event_data - Event payload for CAM_HW_EVENT_ERROR
+ *
+ * @error_type:            error type for the error event
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_error_event_data {
+	uint32_t             error_type;
+	struct timeval       timestamp;
+};
+
+/**
+ * cam_isp_hw_mgr_init()
+ *
+ * @brief:              Initialization function for the ISP hardware manager
+ *
+ * @of_node:            Device node input
+ * @hw_mgr:             Input/output structure for the ISP hardware manager
+ *                          initialization
+ *
+ */
+int cam_isp_hw_mgr_init(struct device_node *of_node,
+	struct cam_hw_mgr_intf *hw_mgr);
+
+#endif /* __CAM_ISP_HW_MGR_INTF_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/Makefile
new file mode 100644
index 0000000..4bf4a0e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += ife_csid_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += vfe_hw/
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
new file mode 100644
index 0000000..1615d21f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ife_csid_dev.o cam_ife_csid_soc.o cam_ife_csid_core.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ife_csid170.o cam_ife_csid_lite170.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.c
new file mode 100644
index 0000000..bdd59d2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.c
@@ -0,0 +1,60 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/module.h>
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid170.h"
+#include "cam_ife_csid_dev.h"
+
+#define CAM_CSID_DRV_NAME                    "csid_170"
+#define CAM_CSID_VERSION_V170                 0x10070000
+
+static struct cam_ife_csid_hw_info cam_ife_csid170_hw_info = {
+	.csid_reg = &cam_ife_csid_170_reg_offset,
+	.hw_dts_version = CAM_CSID_VERSION_V170,
+};
+
+static const struct of_device_id cam_ife_csid170_dt_match[] = {
+	{
+		.compatible = "qcom,csid170",
+		.data = &cam_ife_csid170_hw_info,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, cam_ife_csid170_dt_match);
+
+static struct platform_driver cam_ife_csid170_driver = {
+	.probe = cam_ife_csid_probe,
+	.remove = cam_ife_csid_remove,
+	.driver = {
+		.name = CAM_CSID_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_ife_csid170_dt_match,
+	},
+};
+
+static int __init cam_ife_csid170_init_module(void)
+{
+	return platform_driver_register(&cam_ife_csid170_driver);
+}
+
+static void __exit cam_ife_csid170_exit_module(void)
+{
+	platform_driver_unregister(&cam_ife_csid170_driver);
+}
+
+module_init(cam_ife_csid170_init_module);
+module_exit(cam_ife_csid170_exit_module);
+MODULE_DESCRIPTION("CAM IFE_CSID170 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
new file mode 100644
index 0000000..8ff2a55
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
@@ -0,0 +1,295 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_170_H_
+#define _CAM_IFE_CSID_170_H_
+
+#include "cam_ife_csid_core.h"
+
+static struct cam_ife_csid_ipp_reg_offset  cam_ife_csid_170_ipp_reg_offset = {
+	.csid_ipp_irq_status_addr            = 0x30,
+	.csid_ipp_irq_mask_addr              = 0x34,
+	.csid_ipp_irq_clear_addr             = 0x38,
+	.csid_ipp_irq_set_addr               = 0x3c,
+
+	.csid_ipp_cfg0_addr                  = 0x200,
+	.csid_ipp_cfg1_addr                  = 0x204,
+	.csid_ipp_ctrl_addr                  = 0x208,
+	.csid_ipp_frm_drop_pattern_addr      = 0x20c,
+	.csid_ipp_frm_drop_period_addr       = 0x210,
+	.csid_ipp_irq_subsample_pattern_addr = 0x214,
+	.csid_ipp_irq_subsample_period_addr  = 0x218,
+	.csid_ipp_hcrop_addr                 = 0x21c,
+	.csid_ipp_vcrop_addr                 = 0x220,
+	.csid_ipp_pix_drop_pattern_addr      = 0x224,
+	.csid_ipp_pix_drop_period_addr       = 0x228,
+	.csid_ipp_line_drop_pattern_addr     = 0x22c,
+	.csid_ipp_line_drop_period_addr      = 0x230,
+	.csid_ipp_rst_strobes_addr           = 0x240,
+	.csid_ipp_status_addr                = 0x254,
+	.csid_ipp_misr_val_addr              = 0x258,
+	.csid_ipp_format_measure_cfg0_addr   = 0x270,
+	.csid_ipp_format_measure_cfg1_addr   = 0x274,
+	.csid_ipp_format_measure0_addr       = 0x278,
+	.csid_ipp_format_measure1_addr       = 0x27c,
+	.csid_ipp_format_measure2_addr       = 0x280,
+	.csid_ipp_timestamp_curr0_sof_addr   = 0x290,
+	.csid_ipp_timestamp_curr1_sof_addr   = 0x294,
+	.csid_ipp_timestamp_perv0_sof_addr   = 0x298,
+	.csid_ipp_timestamp_perv1_sof_addr   = 0x29c,
+	.csid_ipp_timestamp_curr0_eof_addr   = 0x2a0,
+	.csid_ipp_timestamp_curr1_eof_addr   = 0x2a4,
+	.csid_ipp_timestamp_perv0_eof_addr   = 0x2a8,
+	.csid_ipp_timestamp_perv1_eof_addr   = 0x2ac,
+	/* configurations */
+	.pix_store_en_shift_val              = 7,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_0_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x40,
+	.csid_rdi_irq_mask_addr                   = 0x44,
+	.csid_rdi_irq_clear_addr                  = 0x48,
+	.csid_rdi_irq_set_addr                    = 0x4c,
+	.csid_rdi_cfg0_addr                       = 0x300,
+	.csid_rdi_cfg1_addr                       = 0x304,
+	.csid_rdi_ctrl_addr                       = 0x308,
+	.csid_rdi_frm_drop_pattern_addr           = 0x30c,
+	.csid_rdi_frm_drop_period_addr            = 0x310,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x314,
+	.csid_rdi_irq_subsample_period_addr       = 0x318,
+	.csid_rdi_rpp_hcrop_addr                  = 0x31c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x320,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x324,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x328,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x32c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x330,
+	.csid_rdi_rst_strobes_addr                = 0x340,
+	.csid_rdi_status_addr                     = 0x350,
+	.csid_rdi_misr_val0_addr                  = 0x354,
+	.csid_rdi_misr_val1_addr                  = 0x358,
+	.csid_rdi_misr_val2_addr                  = 0x35c,
+	.csid_rdi_misr_val3_addr                  = 0x360,
+	.csid_rdi_format_measure_cfg0_addr        = 0x370,
+	.csid_rdi_format_measure_cfg1_addr        = 0x374,
+	.csid_rdi_format_measure0_addr            = 0x378,
+	.csid_rdi_format_measure1_addr            = 0x37c,
+	.csid_rdi_format_measure2_addr            = 0x380,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x390,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x394,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x398,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x39c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x3a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x3a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x3a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x3ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x3e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x3e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_1_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x50,
+	.csid_rdi_irq_mask_addr                   = 0x54,
+	.csid_rdi_irq_clear_addr                  = 0x58,
+	.csid_rdi_irq_set_addr                    = 0x5c,
+	.csid_rdi_cfg0_addr                       = 0x400,
+	.csid_rdi_cfg1_addr                       = 0x404,
+	.csid_rdi_ctrl_addr                       = 0x408,
+	.csid_rdi_frm_drop_pattern_addr           = 0x40c,
+	.csid_rdi_frm_drop_period_addr            = 0x410,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x414,
+	.csid_rdi_irq_subsample_period_addr       = 0x418,
+	.csid_rdi_rpp_hcrop_addr                  = 0x41c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x420,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x424,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x428,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x42c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x430,
+	.csid_rdi_rst_strobes_addr                = 0x440,
+	.csid_rdi_status_addr                     = 0x450,
+	.csid_rdi_misr_val0_addr                  = 0x454,
+	.csid_rdi_misr_val1_addr                  = 0x458,
+	.csid_rdi_misr_val2_addr                  = 0x45c,
+	.csid_rdi_misr_val3_addr                  = 0x460,
+	.csid_rdi_format_measure_cfg0_addr        = 0x470,
+	.csid_rdi_format_measure_cfg1_addr        = 0x474,
+	.csid_rdi_format_measure0_addr            = 0x478,
+	.csid_rdi_format_measure1_addr            = 0x47c,
+	.csid_rdi_format_measure2_addr            = 0x480,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x490,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x494,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x498,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x49c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x4a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x4a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x4a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x4ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x4e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x4e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_2_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x60,
+	.csid_rdi_irq_mask_addr                   = 0x64,
+	.csid_rdi_irq_clear_addr                  = 0x68,
+	.csid_rdi_irq_set_addr                    = 0x6c,
+	.csid_rdi_cfg0_addr                       = 0x500,
+	.csid_rdi_cfg1_addr                       = 0x504,
+	.csid_rdi_ctrl_addr                       = 0x508,
+	.csid_rdi_frm_drop_pattern_addr           = 0x50c,
+	.csid_rdi_frm_drop_period_addr            = 0x510,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x514,
+	.csid_rdi_irq_subsample_period_addr       = 0x518,
+	.csid_rdi_rpp_hcrop_addr                  = 0x51c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x520,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x524,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x528,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x52c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x530,
+	.csid_rdi_yuv_chroma_conversion_addr      = 0x534,
+	.csid_rdi_rst_strobes_addr                = 0x540,
+	.csid_rdi_status_addr                     = 0x550,
+	.csid_rdi_misr_val0_addr                  = 0x554,
+	.csid_rdi_misr_val1_addr                  = 0x558,
+	.csid_rdi_misr_val2_addr                  = 0x55c,
+	.csid_rdi_misr_val3_addr                  = 0x560,
+	.csid_rdi_format_measure_cfg0_addr        = 0x570,
+	.csid_rdi_format_measure_cfg1_addr        = 0x574,
+	.csid_rdi_format_measure0_addr            = 0x578,
+	.csid_rdi_format_measure1_addr            = 0x57c,
+	.csid_rdi_format_measure2_addr            = 0x580,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x590,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x594,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x598,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x59c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x5a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x5a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x5a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x5ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x5e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x5e4,
+};
+
+static struct cam_ife_csid_csi2_rx_reg_offset
+			cam_ife_csid_170_csi2_reg_offset = {
+	.csid_csi2_rx_irq_status_addr                 = 0x20,
+	.csid_csi2_rx_irq_mask_addr                   = 0x24,
+	.csid_csi2_rx_irq_clear_addr                  = 0x28,
+	.csid_csi2_rx_irq_set_addr                    = 0x2c,
+
+	/*CSI2 rx control */
+	.csid_csi2_rx_cfg0_addr                       = 0x100,
+	.csid_csi2_rx_cfg1_addr                       = 0x104,
+	.csid_csi2_rx_capture_ctrl_addr               = 0x108,
+	.csid_csi2_rx_rst_strobes_addr                = 0x110,
+	.csid_csi2_rx_de_scramble_cfg0_addr           = 0x114,
+	.csid_csi2_rx_de_scramble_cfg1_addr           = 0x118,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr   = 0x120,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr   = 0x124,
+	.csid_csi2_rx_captured_short_pkt_0_addr       = 0x128,
+	.csid_csi2_rx_captured_short_pkt_1_addr       = 0x12c,
+	.csid_csi2_rx_captured_long_pkt_0_addr        = 0x130,
+	.csid_csi2_rx_captured_long_pkt_1_addr        = 0x134,
+	.csid_csi2_rx_captured_long_pkt_ftr_addr      = 0x138,
+	.csid_csi2_rx_captured_cphy_pkt_ftr_addr      = 0x13c,
+	.csid_csi2_rx_lane0_misr_addr                 = 0x150,
+	.csid_csi2_rx_lane1_misr_addr                 = 0x154,
+	.csid_csi2_rx_lane2_misr_addr                 = 0x158,
+	.csid_csi2_rx_lane3_misr_addr                 = 0x15c,
+	.csid_csi2_rx_total_pkts_rcvd_addr            = 0x160,
+	.csid_csi2_rx_stats_ecc_addr                  = 0x164,
+	.csid_csi2_rx_total_crc_err_addr              = 0x168,
+
+	.csi2_rst_srb_all                             = 0x3FFF,
+	.csi2_rst_done_shift_val                      = 27,
+	.csi2_irq_mask_all                            = 0xFFFFFFF,
+	.csi2_misr_enable_shift_val                   = 6,
+	.csi2_vc_mode_shift_val                       = 2,
+};
+
+static struct cam_ife_csid_csi2_tpg_reg_offset
+			cam_ife_csid_170_tpg_reg_offset = {
+	/*CSID TPG control */
+	.csid_tpg_ctrl_addr                           = 0x600,
+	.csid_tpg_vc_cfg0_addr                        = 0x604,
+	.csid_tpg_vc_cfg1_addr                        = 0x608,
+	.csid_tpg_lfsr_seed_addr                      = 0x60c,
+	.csid_tpg_dt_n_cfg_0_addr                     = 0x610,
+	.csid_tpg_dt_n_cfg_1_addr                     = 0x614,
+	.csid_tpg_dt_n_cfg_2_addr                     = 0x618,
+	.csid_tpg_color_bars_cfg_addr                 = 0x640,
+	.csid_tpg_color_box_cfg_addr                  = 0x644,
+	.csid_tpg_common_gen_cfg_addr                 = 0x648,
+	.csid_tpg_cgen_n_cfg_addr                     = 0x650,
+	.csid_tpg_cgen_n_x0_addr                      = 0x654,
+	.csid_tpg_cgen_n_x1_addr                      = 0x658,
+	.csid_tpg_cgen_n_x2_addr                      = 0x65c,
+	.csid_tpg_cgen_n_xy_addr                      = 0x660,
+	.csid_tpg_cgen_n_y1_addr                      = 0x664,
+	.csid_tpg_cgen_n_y2_addr                      = 0x668,
+
+	/*configurations */
+	.tpg_dtn_cfg_offset                           = 0xc,
+	.tpg_cgen_cfg_offset                          = 0x20,
+};
+
+static struct cam_ife_csid_common_reg_offset
+			cam_ife_csid_170_cmn_reg_offset = {
+	.csid_hw_version_addr                         = 0x0,
+	.csid_cfg0_addr                               = 0x4,
+	.csid_ctrl_addr                               = 0x8,
+	.csid_reset_addr                              = 0xc,
+	.csid_rst_strobes_addr                        = 0x10,
+
+	.csid_test_bus_ctrl_addr                      = 0x14,
+	.csid_top_irq_status_addr                     = 0x70,
+	.csid_top_irq_mask_addr                       = 0x74,
+	.csid_top_irq_clear_addr                      = 0x78,
+	.csid_top_irq_set_addr                        = 0x7c,
+	.csid_irq_cmd_addr                            = 0x80,
+
+	/*configurations */
+	.major_version                                = 1,
+	.minor_version                                = 7,
+	.version_incr                                 = 0,
+	.no_rdis                                      = 3,
+	.no_pix                                       = 1,
+	.csid_rst_stb                                 = 0x1e,
+	.csid_rst_stb_sw_all                          = 0x1f,
+	.path_rst_stb_all                             = 0x7f,
+	.path_rst_done_shift_val                      = 1,
+	.path_en_shift_val                            = 31,
+	.dt_id_shift_val                              = 27,
+	.vc_shift_val                                 = 22,
+	.dt_shift_val                                 = 16,
+	.fmt_shift_val                                = 12,
+	.plain_fmt_shit_val                           = 10,
+	.crop_v_en_shift_val                          = 6,
+	.crop_h_en_shift_val                          = 5,
+	.crop_shift                                   = 16,
+	.ipp_irq_mask_all                             = 0x7FFF,
+	.rdi_irq_mask_all                             = 0x7FFF,
+};
+
+struct cam_ife_csid_reg_offset cam_ife_csid_170_reg_offset = {
+	.cmn_reg          = &cam_ife_csid_170_cmn_reg_offset,
+	.csi2_reg         = &cam_ife_csid_170_csi2_reg_offset,
+	.ipp_reg          = &cam_ife_csid_170_ipp_reg_offset,
+	.rdi_reg = {
+		&cam_ife_csid_170_rdi_0_reg_offset,
+		&cam_ife_csid_170_rdi_1_reg_offset,
+		&cam_ife_csid_170_rdi_2_reg_offset,
+		NULL,
+		},
+	.tpg_reg = &cam_ife_csid_170_tpg_reg_offset,
+};
+
+#endif /*_CAM_IFE_CSID_170_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
new file mode 100644
index 0000000..6306df3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -0,0 +1,2554 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include <uapi/media/cam_defs.h>
+
+#include "cam_ife_csid_core.h"
+#include "cam_isp_hw.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+
+/* Timeout value in msec */
+#define IFE_CSID_TIMEOUT                               1000
+
+/* TPG VC/DT values */
+#define CAM_IFE_CSID_TPG_VC_VAL                        0xA
+#define CAM_IFE_CSID_TPG_DT_VAL                        0x2B
+
+/* Timeout values in usec */
+#define CAM_IFE_CSID_TIMEOUT_SLEEP_US                  1000
+#define CAM_IFE_CSID_TIMEOUT_ALL_US                    1000000
+
+static int cam_ife_csid_is_ipp_format_supported(
+				uint32_t decode_fmt)
+{
+	int rc = -EINVAL;
+
+	switch (decode_fmt) {
+	case CAM_FORMAT_MIPI_RAW_6:
+	case CAM_FORMAT_MIPI_RAW_8:
+	case CAM_FORMAT_MIPI_RAW_10:
+	case CAM_FORMAT_MIPI_RAW_12:
+	case CAM_FORMAT_MIPI_RAW_14:
+	case CAM_FORMAT_MIPI_RAW_16:
+	case CAM_FORMAT_MIPI_RAW_20:
+	case CAM_FORMAT_DPCM_10_6_10:
+	case CAM_FORMAT_DPCM_10_8_10:
+	case CAM_FORMAT_DPCM_12_6_12:
+	case CAM_FORMAT_DPCM_12_8_12:
+	case CAM_FORMAT_DPCM_14_8_14:
+	case CAM_FORMAT_DPCM_14_10_14:
+		rc = 0;
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+static int cam_ife_csid_get_format(uint32_t  res_id,
+	uint32_t decode_fmt, uint32_t *path_fmt, uint32_t *plain_fmt)
+{
+	int rc = 0;
+
+	if (res_id >= CAM_IFE_PIX_PATH_RES_RDI_0 &&
+		res_id <= CAM_IFE_PIX_PATH_RES_RDI_3) {
+		*path_fmt = 0xf;
+		return 0;
+	}
+
+	switch (decode_fmt) {
+	case CAM_FORMAT_MIPI_RAW_6:
+		*path_fmt  = 0;
+		*plain_fmt = 0;
+		break;
+	case CAM_FORMAT_MIPI_RAW_8:
+		*path_fmt  = 1;
+		*plain_fmt = 0;
+		break;
+	case CAM_FORMAT_MIPI_RAW_10:
+		*path_fmt  = 2;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_12:
+		*path_fmt  = 3;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_14:
+		*path_fmt  = 4;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_16:
+		*path_fmt  = 5;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_20:
+		*path_fmt  = 6;
+		*plain_fmt = 2;
+		break;
+	case CAM_FORMAT_DPCM_10_6_10:
+		*path_fmt  = 7;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_10_8_10:
+		*path_fmt  = 8;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_12_6_12:
+		*path_fmt  = 9;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_12_8_12:
+		*path_fmt  = 0xA;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_14_8_14:
+		*path_fmt  = 0xB;
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_DPCM_14_10_14:
+		*path_fmt  = 0xC;
+		*plain_fmt = 1;
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d un supported format\n",
+		__func__, __LINE__, decode_fmt);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
+	struct cam_isp_resource_node **res, int32_t vc, uint32_t dt,
+	uint32_t res_type)
+{
+	int  rc = 0;
+	struct cam_ife_csid_cid_data    *cid_data;
+	uint32_t  i = 0, j = 0;
+
+	for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
+		if (csid_hw->cid_res[i].res_state >=
+			CAM_ISP_RESOURCE_STATE_RESERVED) {
+			cid_data = (struct cam_ife_csid_cid_data *)
+				csid_hw->cid_res[i].res_priv;
+			if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
+				if (cid_data->tpg_set) {
+					cid_data->cnt++;
+					*res = &csid_hw->cid_res[i];
+					break;
+				}
+			} else {
+				if (cid_data->vc == vc && cid_data->dt == dt) {
+					cid_data->cnt++;
+					*res = &csid_hw->cid_res[i];
+					break;
+				}
+			}
+		}
+	}
+
+	if (i == CAM_IFE_CSID_CID_RES_MAX) {
+		if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
+			pr_err("%s:%d:CSID:%d TPG CID not available\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+			rc = -EINVAL;
+		}
+
+		for (j = 0; j < CAM_IFE_CSID_CID_RES_MAX; j++) {
+			if (csid_hw->cid_res[j].res_state ==
+				CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+				cid_data = (struct cam_ife_csid_cid_data *)
+					csid_hw->cid_res[j].res_priv;
+				cid_data->vc  = vc;
+				cid_data->dt  = dt;
+				cid_data->cnt = 1;
+				csid_hw->cid_res[j].res_state =
+					CAM_ISP_RESOURCE_STATE_RESERVED;
+				*res = &csid_hw->cid_res[j];
+				CDBG("%s:%d:CSID:%d CID %d allocated\n",
+					__func__, __LINE__,
+					csid_hw->hw_intf->hw_idx,
+					csid_hw->cid_res[j].res_id);
+				break;
+			}
+		}
+
+		if (j == CAM_IFE_CSID_CID_RES_MAX) {
+			pr_err("%s:%d:CSID:%d Free cid is not available\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+			rc = -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+
+static int cam_ife_csid_global_reset(struct cam_ife_csid_hw *csid_hw)
+{
+	struct cam_hw_soc_info          *soc_info;
+	struct cam_ife_csid_reg_offset  *csid_reg;
+	int rc = 0;
+	uint32_t i, irq_mask_rx, irq_mask_ipp = 0,
+		irq_mask_rdi[CAM_IFE_CSID_RDI_MAX];
+
+	soc_info = &csid_hw->hw_info->soc_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		pr_err("%s:%d:CSID:%d Invalid HW State:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d:CSID:%d Csid reset\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	init_completion(&csid_hw->csid_top_complete);
+
+	/* Save interrupt mask registers values*/
+	irq_mask_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		irq_mask_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
+		irq_mask_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+	}
+
+	/* Mask all interrupts */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+
+	/* clear all interrupts */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+
+	cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
+
+	for (i = 0 ; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	cam_io_w_mb(0x80, soc_info->reg_map[0].mem_base +
+		csid_hw->csid_info->csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+	/* enable the IPP and RDI format measure */
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_cfg0_addr);
+
+	/* perform the top CSID HW reset */
+	cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_rst_strobes_addr);
+
+	CDBG("%s:%d: Waiting for reset complete from irq handler\n",
+		__func__, __LINE__);
+
+	rc = wait_for_completion_timeout(&csid_hw->csid_top_complete,
+		msecs_to_jiffies(IFE_CSID_TIMEOUT));
+	if (rc <= 0) {
+		pr_err("%s:%d:CSID:%d reset completion in fail rc = %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+	} else {
+		rc = 0;
+	}
+
+	/*restore all interrupt masks */
+	cam_io_w_mb(irq_mask_rx, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(irq_mask_ipp, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(irq_mask_rdi[i], soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+
+	return rc;
+}
+
+static int cam_ife_csid_path_reset(struct cam_ife_csid_hw *csid_hw,
+	struct cam_csid_reset_cfg_args  *reset)
+{
+	int rc = 0;
+	struct cam_hw_soc_info              *soc_info;
+	struct cam_isp_resource_node        *res;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	uint32_t  reset_strb_addr, reset_strb_val, val, id;
+	struct completion  *complete;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	res      = reset->node_res;
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		pr_err("%s:%d:CSID:%d Invalid hw state :%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CDBG("%s:%d:CSID:%d resource:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		if (!csid_reg->ipp_reg) {
+			pr_err("%s:%d:CSID:%d IPP not supported :%d\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				res->res_id);
+			return -EINVAL;
+		}
+
+		reset_strb_addr = csid_reg->ipp_reg->csid_ipp_rst_strobes_addr;
+		complete = &csid_hw->csid_ipp_complete;
+
+		/* Enable path reset done interrupt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+		val |= CSID_PATH_INFO_RST_DONE;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			 csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	} else {
+		id = res->res_id;
+		if (!csid_reg->rdi_reg[id]) {
+			pr_err("%s:%d:CSID:%d RDI res not supported :%d\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				res->res_id);
+			return -EINVAL;
+		}
+
+		reset_strb_addr =
+			csid_reg->rdi_reg[id]->csid_rdi_rst_strobes_addr;
+		complete =
+			&csid_hw->csid_rdin_complete[id];
+
+		/* Enable path reset done interrupt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+		val |= CSID_PATH_INFO_RST_DONE;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	}
+
+	init_completion(complete);
+	reset_strb_val = csid_reg->cmn_reg->path_rst_stb_all;
+
+	/* Enable the Test gen before reset */
+	cam_io_w_mb(1,	csid_hw->hw_info->soc_info.reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+
+	/* Reset the corresponding ife csid path */
+	cam_io_w_mb(reset_strb_val, soc_info->reg_map[0].mem_base +
+				reset_strb_addr);
+
+	rc = wait_for_completion_timeout(complete,
+		msecs_to_jiffies(IFE_CSID_TIMEOUT));
+	if (rc <= 0) {
+		pr_err("%s:%d CSID:%d Res id %d fail rc = %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id,  rc);
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+	}
+
+	/* Disable Test Gen after reset*/
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+
+end:
+	return rc;
+
+}
+
+static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
+	struct cam_csid_hw_reserve_resource_args  *cid_reserv)
+{
+	int rc = 0;
+	struct cam_ife_csid_cid_data       *cid_data;
+
+	CDBG("%s:%d CSID:%d res_sel:%d Lane type:%d lane_num:%d dt:%d vc:%d\n",
+		__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		cid_reserv->in_port->res_type,
+		cid_reserv->in_port->lane_type,
+		cid_reserv->in_port->lane_num,
+		cid_reserv->in_port->dt,
+		cid_reserv->in_port->vc);
+
+	if (cid_reserv->in_port->res_type >= CAM_ISP_IFE_IN_RES_MAX) {
+		pr_err("%s:%d:CSID:%d  Invalid phy sel %d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->res_type);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (cid_reserv->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX &&
+		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+		pr_err("%s:%d:CSID:%d  Invalid lane type %d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_type);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((cid_reserv->in_port->lane_type ==  CAM_ISP_LANE_TYPE_DPHY &&
+		cid_reserv->in_port->lane_num > 4) &&
+		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+		pr_err("%s:%d:CSID:%d Invalid lane num %d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_num);
+		rc = -EINVAL;
+		goto end;
+	}
+	if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY &&
+		cid_reserv->in_port->lane_num > 3) &&
+		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+		pr_err("%s:%d: CSID:%d Invalid lane type %d & num %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_type,
+			cid_reserv->in_port->lane_num);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* CSID  CSI2 v2.0 supports 31 vc  */
+	if (cid_reserv->in_port->dt > 0x3f ||
+		cid_reserv->in_port->vc > 0x1f) {
+		pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->vc, cid_reserv->in_port->dt);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG && (
+		(cid_reserv->in_port->format < CAM_FORMAT_MIPI_RAW_8 &&
+		cid_reserv->in_port->format > CAM_FORMAT_MIPI_RAW_16))) {
+		pr_err("%s:%d: CSID:%d Invalid tpg decode fmt %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->format);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (csid_hw->csi2_reserve_cnt) {
+		/* current configure res type should match requested res type */
+		if (csid_hw->res_type != cid_reserv->in_port->res_type) {
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+			if (csid_hw->csi2_rx_cfg.lane_cfg !=
+				cid_reserv->in_port->lane_cfg  ||
+				csid_hw->csi2_rx_cfg.lane_type !=
+				cid_reserv->in_port->lane_type ||
+				csid_hw->csi2_rx_cfg.lane_num !=
+				cid_reserv->in_port->lane_num) {
+				rc = -EINVAL;
+				goto end;
+				}
+		} else {
+			if (csid_hw->tpg_cfg.decode_fmt !=
+				cid_reserv->in_port->format     ||
+				csid_hw->tpg_cfg.width !=
+				cid_reserv->in_port->left_width ||
+				csid_hw->tpg_cfg.height !=
+				cid_reserv->in_port->height     ||
+				csid_hw->tpg_cfg.test_pattern !=
+				cid_reserv->in_port->test_pattern) {
+				rc = -EINVAL;
+				goto end;
+			}
+		}
+	}
+
+	if (!csid_hw->csi2_reserve_cnt) {
+		csid_hw->res_type = cid_reserv->in_port->res_type;
+		/* Take the first CID resource*/
+		csid_hw->cid_res[0].res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+		cid_data = (struct cam_ife_csid_cid_data *)
+				csid_hw->cid_res[0].res_priv;
+
+		csid_hw->csi2_rx_cfg.lane_cfg =
+			cid_reserv->in_port->lane_cfg;
+		csid_hw->csi2_rx_cfg.lane_type =
+			cid_reserv->in_port->lane_type;
+		csid_hw->csi2_rx_cfg.lane_num =
+			cid_reserv->in_port->lane_num;
+
+		if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+			csid_hw->csi2_rx_cfg.phy_sel = 0;
+			if (cid_reserv->in_port->format >
+			    CAM_FORMAT_MIPI_RAW_16) {
+				pr_err("%s:%d: Wrong TPG format\n", __func__,
+					__LINE__);
+				rc = -EINVAL;
+				goto end;
+			}
+			csid_hw->tpg_cfg.decode_fmt =
+				cid_reserv->in_port->format;
+			csid_hw->tpg_cfg.width =
+				cid_reserv->in_port->left_width;
+			csid_hw->tpg_cfg.height = cid_reserv->in_port->height;
+			csid_hw->tpg_cfg.test_pattern =
+				cid_reserv->in_port->test_pattern;
+			cid_data->tpg_set = 1;
+		} else {
+			csid_hw->csi2_rx_cfg.phy_sel =
+				(cid_reserv->in_port->res_type & 0xFF) - 1;
+		}
+
+		cid_data->vc = cid_reserv->in_port->vc;
+		cid_data->dt = cid_reserv->in_port->dt;
+		cid_data->cnt = 1;
+		cid_reserv->node_res = &csid_hw->cid_res[0];
+		csid_hw->csi2_reserve_cnt++;
+
+		CDBG("%s:%d:CSID:%d CID :%d resource acquired successfully\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			cid_reserv->node_res->res_id);
+	} else {
+		rc = cam_ife_csid_cid_get(csid_hw, &cid_reserv->node_res,
+			cid_reserv->in_port->vc, cid_reserv->in_port->dt,
+			cid_reserv->in_port->res_type);
+		/* if success then increment the reserve count */
+		if (!rc) {
+			if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
+				pr_err("%s:%d:CSID%d reserve cnt reached max\n",
+					__func__, __LINE__,
+					csid_hw->hw_intf->hw_idx);
+				rc = -EINVAL;
+			} else {
+				csid_hw->csi2_reserve_cnt++;
+				CDBG("%s:%d:CSID:%d CID:%d acquired\n",
+					__func__, __LINE__,
+					csid_hw->hw_intf->hw_idx,
+					cid_reserv->node_res->res_id);
+			}
+		}
+	}
+
+end:
+	return rc;
+}
+
+
+static int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
+	struct cam_csid_hw_reserve_resource_args  *reserve)
+{
+	int rc = 0;
+	struct cam_ife_csid_path_cfg    *path_data;
+	struct cam_isp_resource_node    *res;
+
+	/* CSID  CSI2 v2.0 supports 31 vc */
+	if (reserve->in_port->dt > 0x3f || reserve->in_port->vc > 0x1f ||
+		(reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
+		pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d mode:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			reserve->in_port->vc, reserve->in_port->dt,
+			reserve->sync_mode);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	switch (reserve->res_id) {
+	case CAM_IFE_PIX_PATH_RES_IPP:
+		if (csid_hw->ipp_res.res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CDBG("%s:%d:CSID:%d IPP resource not available %d\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				csid_hw->ipp_res.res_state);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (cam_ife_csid_is_ipp_format_supported(
+				reserve->in_port->format)) {
+			pr_err("%s:%d:CSID:%d res id:%d un support format %d\n",
+				__func__, __LINE__,
+				csid_hw->hw_intf->hw_idx, reserve->res_id,
+				reserve->in_port->format);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		/* assign the IPP resource */
+		res = &csid_hw->ipp_res;
+		CDBG("%s:%d:CSID:%d IPP resource:%d acquired successfully\n",
+			__func__, __LINE__,
+			csid_hw->hw_intf->hw_idx, res->res_id);
+
+			break;
+	case CAM_IFE_PIX_PATH_RES_RDI_0:
+	case CAM_IFE_PIX_PATH_RES_RDI_1:
+	case CAM_IFE_PIX_PATH_RES_RDI_2:
+	case CAM_IFE_PIX_PATH_RES_RDI_3:
+		if (csid_hw->rdi_res[reserve->res_id].res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CDBG("%s:%d:CSID:%d RDI:%d resource not available %d\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				reserve->res_id,
+				csid_hw->rdi_res[reserve->res_id].res_state);
+			rc = -EINVAL;
+			goto end;
+		} else {
+			res = &csid_hw->rdi_res[reserve->res_id];
+			CDBG("%s:%d:CSID:%d RDI resource:%d acquire success\n",
+				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+				res->res_id);
+		}
+
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res id:%d\n",
+			__func__, __LINE__,
+			csid_hw->hw_intf->hw_idx, reserve->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	path_data = (struct cam_ife_csid_path_cfg   *)res->res_priv;
+
+	path_data->cid = reserve->cid;
+	path_data->decode_fmt = reserve->in_port->format;
+	path_data->master_idx = reserve->master_idx;
+	path_data->sync_mode = reserve->sync_mode;
+	path_data->height  = reserve->in_port->height;
+	path_data->start_line = reserve->in_port->line_start;
+	if (reserve->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+		path_data->dt = CAM_IFE_CSID_TPG_DT_VAL;
+		path_data->vc = CAM_IFE_CSID_TPG_VC_VAL;
+	} else {
+		path_data->dt = reserve->in_port->dt;
+		path_data->vc = reserve->in_port->vc;
+	}
+
+	if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
+		path_data->crop_enable = 1;
+		path_data->start_pixel = reserve->in_port->left_start;
+		path_data->width  = reserve->in_port->left_width;
+	} else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
+		path_data->crop_enable = 1;
+		path_data->start_pixel = reserve->in_port->right_start;
+		path_data->width  = reserve->in_port->right_width;
+	} else
+		path_data->crop_enable = 0;
+
+	reserve->node_res = res;
+
+end:
+	return rc;
+}
+
+static int cam_ife_csid_enable_hw(struct cam_ife_csid_hw  *csid_hw)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t i, status, val;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	/* overflow check before increment */
+	if (csid_hw->hw_info->open_count == UINT_MAX) {
+		pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+		return -EINVAL;
+	}
+
+	/* Increment ref Count */
+	csid_hw->hw_info->open_count++;
+	if (csid_hw->hw_info->open_count > 1) {
+		CDBG("%s:%d: CSID hw has already been enabled\n",
+			__func__, __LINE__);
+		return rc;
+	}
+
+	CDBG("%s:%d:CSID:%d init CSID HW\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	rc = cam_ife_csid_enable_soc_resources(soc_info);
+	if (rc) {
+		pr_err("%s:%d:CSID:%d Enable SOC failed\n", __func__, __LINE__,
+			csid_hw->hw_intf->hw_idx);
+		goto err;
+	}
+
+
+	CDBG("%s:%d:CSID:%d enable top irq interrupt\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP;
+	/* Enable the top IRQ interrupt */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_mask_addr);
+
+	rc = cam_ife_csid_global_reset(csid_hw);
+	if (rc) {
+		pr_err("%s:%d CSID:%d csid_reset fail rc = %d\n",
+			 __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
+		rc = -ETIMEDOUT;
+		goto disable_soc;
+	}
+
+	/*
+	 * Reset the SW registers
+	 * SW register reset also reset the mask irq, so poll the irq status
+	 * to check the reset complete.
+	 */
+	CDBG("%s:%d:CSID:%d Reset Software registers\n", __func__, __LINE__,
+			csid_hw->hw_intf->hw_idx);
+
+	cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb_sw_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_rst_strobes_addr);
+
+	rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_status_addr,
+			status, (status & 0x1) == 0x1,
+		CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
+	if (rc < 0) {
+		pr_err("%s:%d: software register reset timeout.....\n",
+			__func__, __LINE__);
+		rc = -ETIMEDOUT;
+		goto disable_soc;
+	}
+
+	/* clear all interrupts */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+
+	cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	/* Enable the top IRQ interrupt */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+			csid_reg->cmn_reg->csid_top_irq_mask_addr);
+
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->cmn_reg->csid_hw_version_addr);
+	CDBG("%s:%d:CSID:%d CSID HW version: 0x%x\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, val);
+
+	return 0;
+
+disable_soc:
+	cam_ife_csid_disable_soc_resources(soc_info);
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+err:
+	csid_hw->hw_info->open_count--;
+	return rc;
+}
+
+static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
+{
+	int rc = 0;
+	struct cam_hw_soc_info             *soc_info;
+	struct cam_ife_csid_reg_offset     *csid_reg;
+
+
+	/*  Decrement ref Count */
+	if (csid_hw->hw_info->open_count)
+		csid_hw->hw_info->open_count--;
+	if (csid_hw->hw_info->open_count)
+		return rc;
+
+	soc_info = &csid_hw->hw_info->soc_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	CDBG("%s:%d:CSID:%d De-init CSID HW\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	/*disable the top IRQ interrupt */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_mask_addr);
+
+	rc = cam_ife_csid_disable_soc_resources(soc_info);
+	if (rc)
+		pr_err("%s:%d:CSID:%d Disable CSID SOC failed\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+	return rc;
+}
+
+
+static int cam_ife_csid_tpg_start(struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node       *res)
+{
+	uint32_t  val = 0;
+	struct cam_hw_soc_info    *soc_info;
+
+	csid_hw->tpg_start_cnt++;
+	if (csid_hw->tpg_start_cnt == 1) {
+		/*Enable the TPG */
+		CDBG("%s:%d CSID:%d start CSID TPG\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+
+		soc_info = &csid_hw->hw_info->soc_info;
+		{
+			uint32_t val;
+			uint32_t i;
+			uint32_t base = 0x600;
+
+			CDBG("%s:%d: ================== TPG ===============\n",
+				__func__, __LINE__);
+			for (i = 0; i < 16; i++) {
+				val = cam_io_r_mb(
+					soc_info->reg_map[0].mem_base +
+					base + i * 4);
+				CDBG("%s:%d reg 0x%x = 0x%x\n",
+					__func__, __LINE__,
+					(base + i*4), val);
+			}
+
+			CDBG("%s:%d: ================== IPP ===============\n",
+				__func__, __LINE__);
+			base = 0x200;
+			for (i = 0; i < 10; i++) {
+				val = cam_io_r_mb(
+					soc_info->reg_map[0].mem_base +
+					base + i * 4);
+				CDBG("%s:%d reg 0x%x = 0x%x\n",
+					__func__, __LINE__,
+					(base + i*4), val);
+			}
+
+			CDBG("%s:%d: ================== RX ===============\n",
+				__func__, __LINE__);
+			base = 0x100;
+			for (i = 0; i < 5; i++) {
+				val = cam_io_r_mb(
+					soc_info->reg_map[0].mem_base +
+					base + i * 4);
+				CDBG("%s:%d reg 0x%x = 0x%x\n",
+					__func__, __LINE__,
+					(base + i*4), val);
+			}
+		}
+
+		CDBG("%s:%d: =============== TPG control ===============\n",
+			__func__, __LINE__);
+		val = (4 << 20);
+		val |= (0x80 << 8);
+		val |= (((csid_hw->csi2_rx_cfg.lane_num - 1) & 0x3) << 4);
+		val |= 7;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_hw->csid_info->csid_reg->tpg_reg->
+			csid_tpg_ctrl_addr);
+
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base + 0x600);
+		CDBG("%s:%d reg 0x%x = 0x%x\n", __func__, __LINE__,
+			0x600, val);
+	}
+
+	return 0;
+}
+
+static int cam_ife_csid_tpg_stop(struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node       *res)
+{
+	struct cam_hw_soc_info    *soc_info;
+
+	if (csid_hw->tpg_start_cnt)
+		csid_hw->tpg_start_cnt--;
+
+	if (csid_hw->tpg_start_cnt)
+		return 0;
+
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	/* disable the TPG */
+	if (!csid_hw->tpg_start_cnt) {
+		CDBG("%s:%d CSID:%d stop CSID TPG\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+
+		/*stop the TPG */
+		cam_io_w_mb(0,  soc_info->reg_map[0].mem_base +
+		csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+	}
+
+	return 0;
+}
+
+
+static int cam_ife_csid_config_tpg(struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node       *res)
+{
+	struct cam_ife_csid_reg_offset *csid_reg;
+	struct cam_hw_soc_info         *soc_info;
+	uint32_t val = 0;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	CDBG("%s:%d CSID:%d TPG config\n", __func__,
+		__LINE__, csid_hw->hw_intf->hw_idx);
+
+	/* configure one DT, infinite frames */
+	val = (0 << 16) | (1 << 10) | CAM_IFE_CSID_TPG_VC_VAL;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->tpg_reg->csid_tpg_vc_cfg0_addr);
+
+	/* vertical blanking count = 0x740, horzontal blanking count = 0x740*/
+	val = (0x740 << 12) | 0x740;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->tpg_reg->csid_tpg_vc_cfg1_addr);
+
+	cam_io_w_mb(0x12345678, soc_info->reg_map[0].mem_base +
+		csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_lfsr_seed_addr);
+
+	val = csid_hw->tpg_cfg.width << 16 |
+		csid_hw->tpg_cfg.height;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_0_addr);
+
+	cam_io_w_mb(CAM_IFE_CSID_TPG_DT_VAL, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_1_addr);
+
+	/*
+	 * decode_fmt is the same as the input resource format.
+	 * it is one larger than the register spec format.
+	 */
+	val = ((csid_hw->tpg_cfg.decode_fmt - 1) << 16) | 0x8;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_2_addr);
+
+	/* select rotate period as  5 frame */
+	val =  5 << 8;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_color_bars_cfg_addr);
+	/* config pix pattern */
+	cam_io_w_mb(csid_hw->tpg_cfg.test_pattern,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_common_gen_cfg_addr);
+
+	return 0;
+}
+
+static int cam_ife_csid_enable_csi2(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info               *soc_info;
+	struct cam_ife_csid_cid_data         *cid_data;
+	uint32_t val = 0;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	CDBG("%s:%d CSID:%d count:%d config csi2 rx\n", __func__,
+		__LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+
+	/* overflow check before increment */
+	if (csid_hw->csi2_cfg_cnt == UINT_MAX) {
+		pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx);
+		return -EINVAL;
+	}
+
+	cid_data = (struct cam_ife_csid_cid_data *)res->res_priv;
+
+	res->res_state  = CAM_ISP_RESOURCE_STATE_STREAMING;
+	csid_hw->csi2_cfg_cnt++;
+	if (csid_hw->csi2_cfg_cnt > 1)
+		return rc;
+
+	/* rx cfg0 */
+	val = (csid_hw->csi2_rx_cfg.lane_num - 1)  |
+		(csid_hw->csi2_rx_cfg.lane_cfg << 4) |
+		(csid_hw->csi2_rx_cfg.lane_type << 24);
+	val |= csid_hw->csi2_rx_cfg.phy_sel & 0x3;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
+
+	/* rx cfg1*/
+	val = (1 << csid_reg->csi2_reg->csi2_misr_enable_shift_val);
+	/* if VC value is more than 3 than set full width of VC */
+	if (cid_data->vc > 3)
+		val |= (1 << csid_reg->csi2_reg->csi2_vc_mode_shift_val);
+
+	/* enable packet ecc correction */
+	val |= 1;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+	if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+		/* Config the TPG */
+		rc = cam_ife_csid_config_tpg(csid_hw, res);
+		if (rc) {
+			res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+			return rc;
+		}
+	}
+
+	/*Enable the CSI2 rx inerrupts */
+	val = CSID_CSI2_RX_INFO_RST_DONE |
+		CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION |
+		CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION |
+		CSID_CSI2_RX_ERROR_CPHY_PH_CRC;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	return 0;
+}
+
+static int cam_ife_csid_disable_csi2(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+
+	if (res->res_id >= CAM_IFE_CSID_CID_MAX) {
+		pr_err("%s:%d CSID:%d Invalid res id :%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	CDBG("%s:%d CSID:%d cnt : %d Disable csi2 rx\n", __func__,
+		__LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+
+	if (csid_hw->csi2_cfg_cnt)
+		csid_hw->csi2_cfg_cnt--;
+
+	if (csid_hw->csi2_cfg_cnt)
+		return 0;
+
+	/*Disable the CSI2 rx inerrupts */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	return 0;
+}
+
+static int cam_ife_csid_init_config_ipp_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_path_cfg           *path_data;
+	struct cam_ife_csid_reg_offset         *csid_reg;
+	struct cam_hw_soc_info                 *soc_info;
+	uint32_t path_format = 0, plain_format = 0, val = 0;
+
+	path_data = (struct cam_ife_csid_path_cfg  *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (!csid_reg->ipp_reg) {
+		pr_err("%s:%d CSID:%d IPP:%d is not supported on HW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d: Enabled IPP Path.......\n", __func__, __LINE__);
+	rc = cam_ife_csid_get_format(res->res_id,
+		path_data->decode_fmt, &path_format, &plain_format);
+	if (rc)
+		return rc;
+
+	/**
+	 * configure the IPP and enable the time stamp capture.
+	 * enable the HW measrurement blocks
+	 */
+	val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
+		(path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
+		(path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
+		(path_format << csid_reg->cmn_reg->fmt_shift_val) |
+		(path_data->crop_enable & 1 <<
+		csid_reg->cmn_reg->crop_h_en_shift_val) |
+		(path_data->crop_enable & 1 <<
+		csid_reg->cmn_reg->crop_v_en_shift_val) |
+		(1 << 1) | 1;
+	val |= (1 << csid_reg->ipp_reg->pix_store_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+
+	if (path_data->crop_enable) {
+		val = ((path_data->width +
+			path_data->start_pixel) & 0xFFFF <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_pixel & 0xFFFF);
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_hcrop_addr);
+
+		val = ((path_data->height +
+			path_data->start_line) & 0xFFFF <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_line & 0xFFFF);
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_vcrop_addr);
+	}
+
+	/* set frame drop pattern to 0 and period to 1 */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_frm_drop_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_frm_drop_pattern_addr);
+	/* set irq sub sample pattern to 0 and period to 1 */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_irq_subsample_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_irq_subsample_pattern_addr);
+	/* set pixel drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_pix_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_pix_drop_period_addr);
+	/* set line drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_line_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_line_drop_period_addr);
+
+	/*Set master or slave IPP */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
+		/*Set halt mode as master */
+		val = CSID_HALT_MODE_MASTER << 2;
+	else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+		/*Set halt mode as slave and set master idx */
+		val = path_data->master_idx  << 4 | CSID_HALT_MODE_SLAVE << 2;
+	else
+		/* Default is internal halt mode */
+		val = 0;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+
+	/* Enable the IPP path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+	val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+
+	return rc;
+}
+
+static int cam_ife_csid_deinit_ipp_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t val = 0;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
+		pr_err("%s:%d:CSID:%d Res type %d res_id:%d in wrong state %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+	}
+
+	if (!csid_reg->ipp_reg) {
+		pr_err("%s:%d:CSID:%d IPP %d is not supported on HW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		rc = -EINVAL;
+	}
+
+	/* Disable the IPP path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+	val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_ife_csid_enable_ipp_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	struct cam_ife_csid_reg_offset    *csid_reg;
+	struct cam_hw_soc_info            *soc_info;
+	struct cam_ife_csid_path_cfg      *path_data;
+	uint32_t val = 0;
+
+	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
+		pr_err("%s:%d:CSID:%d res type:%d res_id:%d Invalid state%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		return -EINVAL;
+	}
+
+	if (!csid_reg->ipp_reg) {
+		pr_err("%s:%d:CSID:%d IPP %d not supported on HW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d: enable IPP path.......\n", __func__, __LINE__);
+
+	/*Resume at frame boundary */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+		val |= CAM_CSID_RESUME_AT_FRAME_BOUNDARY;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+	} else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE) {
+		cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+	}
+	/* for slave mode, not need to resume for slave device */
+
+	/* Enable the required ipp interrupts */
+	val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
+		CSID_PATH_INFO_INPUT_SOF|CSID_PATH_INFO_INPUT_EOF;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return 0;
+}
+
+static int cam_ife_csid_disable_ipp_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res,
+	enum cam_ife_csid_halt_cmd       stop_cmd)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info               *soc_info;
+	struct cam_ife_csid_path_cfg         *path_data;
+	uint32_t val = 0;
+
+	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, res->res_state);
+		return rc;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	if (!csid_reg->ipp_reg) {
+		pr_err("%s:%d:CSID:%d IPP%d is not supported on HW\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
+		stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
+		return -EINVAL;
+	}
+
+	CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
+		/* configure Halt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+		val &= ~0x3;
+		val |= stop_cmd;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+	} else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
+		cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+
+	/* For slave mode, halt command should take it from master */
+
+	/* Enable the EOF interrupt for resume at boundary case */
+	if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		init_completion(&csid_hw->csid_ipp_complete);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+		val |= CSID_PATH_INFO_INPUT_EOF;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+	} else {
+		val &= ~(CSID_PATH_INFO_RST_DONE |
+				CSID_PATH_ERROR_FIFO_OVERFLOW);
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+	}
+
+	return rc;
+}
+
+
+static int cam_ife_csid_init_config_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_path_cfg           *path_data;
+	struct cam_ife_csid_reg_offset         *csid_reg;
+	struct cam_hw_soc_info                 *soc_info;
+	uint32_t path_format = 0, plain_fmt = 0, val = 0, id;
+
+	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	id = res->res_id;
+	if (!csid_reg->rdi_reg[id]) {
+		pr_err("%s:%d CSID:%d RDI:%d is not supported on HW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx, id);
+		return -EINVAL;
+	}
+
+	rc = cam_ife_csid_get_format(res->res_id,
+		path_data->decode_fmt, &path_format, &plain_fmt);
+	if (rc)
+		return rc;
+
+	/**
+	 * RDI path config and enable the time stamp capture
+	 * Enable the measurement blocks
+	 */
+	val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
+		(path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
+		(path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
+		(path_format << csid_reg->cmn_reg->fmt_shift_val) |
+		(plain_fmt << csid_reg->cmn_reg->plain_fmt_shit_val) |
+		(path_data->crop_enable & 1 <<
+			csid_reg->cmn_reg->crop_h_en_shift_val) |
+		(path_data->crop_enable & 1 <<
+		csid_reg->cmn_reg->crop_v_en_shift_val) |
+		(1 << 2) | 3;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+	if (path_data->crop_enable) {
+		val = ((path_data->width +
+			path_data->start_pixel) & 0xFFFF <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_pixel & 0xFFFF);
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_rpp_hcrop_addr);
+
+		val = ((path_data->height +
+			path_data->start_line) & 0xFFFF <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_line & 0xFFFF);
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_rpp_vcrop_addr);
+	}
+	/* set frame drop pattern to 0 and period to 1 */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_frm_drop_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_frm_drop_pattern_addr);
+	/* set IRQ sum sabmple */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_pattern_addr);
+
+	/* set pixel drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_period_addr);
+	/* set line drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_period_addr);
+
+	/* Configure the halt mode */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	/* Enable the RPP path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+	val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+
+	return rc;
+}
+
+static int cam_ife_csid_deinit_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t val = 0, id;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if (res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
+		res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		!csid_reg->rdi_reg[id]) {
+		pr_err("%s:%d:CSID:%d Invalid res id%d state:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	/* Disable the RDI path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+	val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_ife_csid_enable_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t id, val;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
+		!csid_reg->rdi_reg[id]) {
+		pr_err("%s:%d:CSID:%d invalid res type:%d res_id:%d state%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		return -EINVAL;
+	}
+
+	/*resume at frame boundary */
+	cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	/* Enable the required RDI interrupts */
+	val = (CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
+		CSID_PATH_INFO_INPUT_SOF | CSID_PATH_INFO_INPUT_EOF);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return 0;
+}
+
+
+static int cam_ife_csid_disable_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res,
+	enum cam_ife_csid_halt_cmd                stop_cmd)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info               *soc_info;
+	uint32_t  val = 0, id;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX ||
+		!csid_reg->rdi_reg[res->res_id]) {
+		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, res->res_state);
+		return rc;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CDBG("%s:%d:CSID:%d Res:%d Invalid res_state%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
+		stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
+		return -EINVAL;
+	}
+
+
+	CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	init_completion(&csid_hw->csid_rdin_complete[id]);
+
+	if (stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+		val |= CSID_PATH_INFO_INPUT_EOF;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	} else {
+		val &= ~(CSID_PATH_INFO_RST_DONE |
+				CSID_PATH_ERROR_FIFO_OVERFLOW);
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	}
+
+	/*Halt the RDI path */
+	cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	return rc;
+}
+
+static int cam_ife_csid_get_time_stamp(
+		struct cam_ife_csid_hw   *csid_hw, void *cmd_args)
+{
+	struct cam_csid_get_time_stamp_args  *time_stamp;
+	struct cam_isp_resource_node         *res;
+	struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info               *soc_info;
+	uint32_t  time_32, id;
+
+	time_stamp = (struct cam_csid_get_time_stamp_args  *)cmd_args;
+	res = time_stamp->node_res;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CDBG("%s:%d:CSID:%d Invalid res_type:%d res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		pr_err("%s:%d:CSID:%d Invalid dev state :%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_timestamp_curr1_sof_addr);
+		time_stamp->time_stamp_val = time_32;
+		time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_timestamp_curr0_sof_addr);
+		time_stamp->time_stamp_val |= time_32;
+	} else {
+		id = res->res_id;
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->
+			csid_rdi_timestamp_curr1_sof_addr);
+		time_stamp->time_stamp_val = time_32;
+		time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->
+			csid_rdi_timestamp_curr0_sof_addr);
+		time_stamp->time_stamp_val |= time_32;
+	}
+
+	return 0;
+}
+static int cam_ife_csid_res_wait_for_halt(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+
+	struct completion  *complete;
+	uint32_t val = 0, id;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, res->res_state);
+		return rc;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+		complete = &csid_hw->csid_ipp_complete;
+	else
+		complete =  &csid_hw->csid_rdin_complete[res->res_id];
+
+	rc = wait_for_completion_timeout(complete,
+		msecs_to_jiffies(IFE_CSID_TIMEOUT));
+	if (rc <= 0) {
+		pr_err("%s:%d:CSID%d stop at frame boundary failid:%drc:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, rc);
+		if (rc == 0)
+			/* continue even have timeout */
+			rc = -ETIMEDOUT;
+	}
+
+	/* Disable the interrupt */
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+		val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
+				CSID_PATH_ERROR_FIFO_OVERFLOW);
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+	} else {
+		id = res->res_id;
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+		val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
+			CSID_PATH_ERROR_FIFO_OVERFLOW);
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	}
+	/* set state to init HW */
+	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+	return rc;
+}
+
+static int cam_ife_csid_get_hw_caps(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw_caps     *hw_caps;
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_info              *csid_hw_info;
+	struct cam_ife_csid_reg_offset  *csid_reg;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	hw_caps = (struct cam_ife_csid_hw_caps *) get_hw_cap_args;
+
+	hw_caps->no_rdis = csid_reg->cmn_reg->no_rdis;
+	hw_caps->no_pix = csid_reg->cmn_reg->no_pix;
+	hw_caps->major_version = csid_reg->cmn_reg->major_version;
+	hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
+	hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
+
+	CDBG("%s:%d:CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d\n",
+		__func__, __LINE__, csid_hw->hw_intf->hw_idx, hw_caps->no_rdis,
+		hw_caps->no_pix, hw_caps->major_version, hw_caps->minor_version,
+		hw_caps->version_incr);
+
+	return rc;
+}
+
+static int cam_ife_csid_reset(void *hw_priv,
+	void *reset_args, uint32_t arg_size)
+{
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_info              *csid_hw_info;
+	struct cam_csid_reset_cfg_args  *reset;
+	int rc = 0;
+
+	if (!hw_priv || !reset_args || (arg_size !=
+		sizeof(struct cam_csid_reset_cfg_args))) {
+		pr_err("%s:%d:CSID:Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	reset   = (struct cam_csid_reset_cfg_args  *)reset_args;
+
+	switch (reset->reset_type) {
+	case CAM_IFE_CSID_RESET_GLOBAL:
+		rc = cam_ife_csid_global_reset(csid_hw);
+		break;
+	case CAM_IFE_CSID_RESET_PATH:
+		rc = cam_ife_csid_path_reset(csid_hw, reset);
+		break;
+	default:
+		pr_err("%s:%d:CSID:Invalid reset type :%d\n", __func__,
+			__LINE__, reset->reset_type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_ife_csid_reserve(void *hw_priv,
+	void *reserve_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                    *csid_hw;
+	struct cam_hw_info                        *csid_hw_info;
+	struct cam_csid_hw_reserve_resource_args  *reserv;
+
+	if (!hw_priv || !reserve_args || (arg_size !=
+		sizeof(struct cam_csid_hw_reserve_resource_args))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	reserv = (struct cam_csid_hw_reserve_resource_args  *)reserve_args;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	switch (reserv->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		rc = cam_ife_csid_cid_reserve(csid_hw, reserv);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		rc = cam_ife_csid_path_reserve(csid_hw, reserv);
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res type :%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, reserv->res_type);
+		rc = -EINVAL;
+		break;
+	}
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_release(void *hw_priv,
+	void *release_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_info              *csid_hw_info;
+	struct cam_isp_resource_node    *res;
+	struct cam_ife_csid_cid_data    *cid_data;
+
+	if (!hw_priv || !release_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	res = (struct cam_isp_resource_node *)release_args;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+		pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		CDBG("%s:%d:CSID:%d res type:%d Res %d  in released state\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id);
+		goto end;
+	}
+
+	if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d res type:%d Res id:%d invalid state:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CDBG("%s:%d:CSID:%d res type :%d Resource id:%d\n", __func__, __LINE__,
+			csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		cid_data = (struct cam_ife_csid_cid_data    *) res->res_priv;
+		if (cid_data->cnt)
+			cid_data->cnt--;
+
+		if (!cid_data->cnt)
+			res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+		if (csid_hw->csi2_reserve_cnt)
+			csid_hw->csi2_reserve_cnt--;
+
+		if (!csid_hw->csi2_reserve_cnt)
+			memset(&csid_hw->csi2_rx_cfg, 0,
+				sizeof(struct cam_ife_csid_csi2_rx_cfg));
+
+		CDBG("%s:%d:CSID:%d res id :%d cnt:%d reserv cnt:%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id, cid_data->cnt, csid_hw->csi2_reserve_cnt);
+
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		break;
+	}
+
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_init_hw(void *hw_priv,
+	void *init_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+	struct cam_ife_csid_reg_offset         *csid_reg;
+
+	if (!hw_priv || !init_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	res      = (struct cam_isp_resource_node *)init_args;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+		pr_err("%s:%d:CSID:%d Invalid res tpe:%d res id%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+
+	if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
+		(res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
+		pr_err("%s:%d:CSID:%d res type:%d res_id:%dInvalid state %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CDBG("%s:%d CSID:%d res type :%d res_id:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+
+	/* Initialize the csid hardware */
+	rc = cam_ife_csid_enable_hw(csid_hw);
+	if (rc)
+		goto end;
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		rc = cam_ife_csid_enable_csi2(csid_hw, res);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+			rc = cam_ife_csid_init_config_ipp_path(csid_hw, res);
+		else
+			rc = cam_ife_csid_init_config_rdi_path(csid_hw, res);
+
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res type state %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		break;
+	}
+
+	if (rc)
+		cam_ife_csid_disable_hw(csid_hw);
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_deinit_hw(void *hw_priv,
+	void *deinit_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+
+	if (!hw_priv || !deinit_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("%s:%d:CSID:Invalid arguments\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	res = (struct cam_isp_resource_node *)deinit_args;
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CDBG("%s:%d:CSID:%d Res:%d already in De-init state\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		goto end;
+	}
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		rc = cam_ife_csid_disable_csi2(csid_hw, res);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+			rc = cam_ife_csid_deinit_ipp_path(csid_hw, res);
+		else
+			rc = cam_ife_csid_deinit_rdi_path(csid_hw, res);
+
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid Res type %d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		goto end;
+	}
+
+	/* Disable CSID HW */
+	cam_ife_csid_disable_hw(csid_hw);
+
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_start(void *hw_priv, void *start_args,
+			uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+	struct cam_ife_csid_reg_offset         *csid_reg;
+
+	if (!hw_priv || !start_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	res = (struct cam_isp_resource_node *)start_args;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+		CDBG("%s:%d:CSID:%d Invalid res tpe:%d res id:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CDBG("%s:%d CSID:%d res_type :%d res_id:%d\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		if (csid_hw->res_type ==  CAM_ISP_IFE_IN_RES_TPG)
+			rc = cam_ife_csid_tpg_start(csid_hw, res);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+			rc = cam_ife_csid_enable_ipp_path(csid_hw, res);
+		else
+			rc = cam_ife_csid_enable_rdi_path(csid_hw, res);
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d Invalid res type%d\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		break;
+	}
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_stop(void *hw_priv,
+	void *stop_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw               *csid_hw;
+	struct cam_hw_info                   *csid_hw_info;
+	struct cam_isp_resource_node         *res;
+	struct cam_csid_hw_stop_args         *csid_stop;
+	uint32_t  i;
+
+	if (!hw_priv || !stop_args ||
+		(arg_size != sizeof(struct cam_csid_hw_stop_args))) {
+		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+	csid_stop = (struct cam_csid_hw_stop_args  *) stop_args;
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	/* Stop the resource first */
+	for (i = 0; i < csid_stop->num_res; i++) {
+		res = csid_stop->node_res[i];
+		switch (res->res_type) {
+		case CAM_ISP_RESOURCE_CID:
+			if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG)
+				rc = cam_ife_csid_tpg_stop(csid_hw, res);
+			break;
+		case CAM_ISP_RESOURCE_PIX_PATH:
+			if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
+				rc = cam_ife_csid_disable_ipp_path(csid_hw,
+						res, csid_stop->stop_cmd);
+			else
+				rc = cam_ife_csid_disable_rdi_path(csid_hw,
+						res, csid_stop->stop_cmd);
+
+			break;
+		default:
+			pr_err("%s:%d:CSID:%d Invalid res type%d\n", __func__,
+				__LINE__, csid_hw->hw_intf->hw_idx,
+				res->res_type);
+			break;
+		}
+	}
+
+	/*wait for the path to halt */
+	for (i = 0; i < csid_stop->num_res; i++) {
+		res = csid_stop->node_res[i];
+		if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+			csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
+			rc = cam_ife_csid_res_wait_for_halt(csid_hw, res);
+	}
+
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+
+}
+
+static int cam_ife_csid_read(void *hw_priv,
+	void *read_args, uint32_t arg_size)
+{
+	pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
+
+	return -EINVAL;
+}
+
+static int cam_ife_csid_write(void *hw_priv,
+	void *write_args, uint32_t arg_size)
+{
+	pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
+	return -EINVAL;
+}
+
+static int cam_ife_csid_process_cmd(void *hw_priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw               *csid_hw;
+	struct cam_hw_info                   *csid_hw_info;
+
+	if (!hw_priv || !cmd_args) {
+		pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	switch (cmd_type) {
+	case CAM_IFE_CSID_CMD_GET_TIME_STAMP:
+		rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
+		break;
+	default:
+		pr_err("%s:%d:CSID:%d un supported cmd:%d\n", __func__,
+			__LINE__, csid_hw->hw_intf->hw_idx, cmd_type);
+		rc = -EINVAL;
+		break;
+	}
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+
+	return rc;
+
+}
+
+irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
+{
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_soc_info          *soc_info;
+	struct cam_ife_csid_reg_offset  *csid_reg;
+	uint32_t i, irq_status_top, irq_status_rx, irq_status_ipp = 0,
+		irq_status_rdi[4];
+
+	csid_hw = (struct cam_ife_csid_hw *)data;
+
+	CDBG("%s:%d:CSID %d IRQ Handling\n", __func__, __LINE__,
+		csid_hw->hw_intf->hw_idx);
+
+	if (!data) {
+		pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
+		return IRQ_HANDLED;
+	}
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	/* read */
+	irq_status_top = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_status_addr);
+
+	irq_status_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_status_addr);
+
+	if (csid_reg->cmn_reg->no_pix)
+		irq_status_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_status_addr);
+
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
+		irq_status_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[i]->csid_rdi_irq_status_addr);
+
+	/* clear */
+	cam_io_w_mb(irq_status_top, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+	cam_io_w_mb(irq_status_rx, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+	if (csid_reg->cmn_reg->no_pix)
+		cam_io_w_mb(irq_status_ipp, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_irq_clear_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
+		cam_io_w_mb(irq_status_rdi[i], soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+	}
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	CDBG("%s:%d: irq_status_rx = 0x%x\n", __func__, __LINE__,
+		irq_status_rx);
+	CDBG("%s:%d: irq_status_ipp = 0x%x\n", __func__, __LINE__,
+		irq_status_ipp);
+
+	if (irq_status_top) {
+		CDBG("%s:%d: CSID global reset complete......Exit\n",
+			__func__, __LINE__);
+		complete(&csid_hw->csid_top_complete);
+		return IRQ_HANDLED;
+	}
+
+
+	if (irq_status_rx & BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
+		CDBG("%s:%d: csi rx reset complete\n", __func__, __LINE__);
+		complete(&csid_hw->csid_csi2_complete);
+	}
+
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d lane 0 over flow\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d lane 1 over flow\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d lane 2 over flow\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d lane 3 over flow\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
+		pr_err_ratelimited("%s:%d:CSID:%d TG OVER  FLOW\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
+		pr_err_ratelimited("%s:%d:CSID:%d CPHY_EOT_RECEPTION\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
+		pr_err_ratelimited("%s:%d:CSID:%d CPHY_SOT_RECEPTION\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
+		pr_err_ratelimited("%s:%d:CSID:%d CPHY_PH_CRC\n",
+			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+	}
+
+	/*read the IPP errors */
+	if (csid_reg->cmn_reg->no_pix) {
+		/* IPP reset done bit */
+		if (irq_status_ipp &
+			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+			CDBG("%s%d: CSID IPP reset complete\n",
+				__func__, __LINE__);
+			complete(&csid_hw->csid_ipp_complete);
+		}
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOF)
+			CDBG("%s: CSID IPP SOF received\n", __func__);
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOL)
+			CDBG("%s: CSID IPP SOL received\n", __func__);
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOL)
+			CDBG("%s: CSID IPP EOL received\n", __func__);
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
+			CDBG("%s: CSID IPP EOF received\n", __func__);
+
+		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
+			complete(&csid_hw->csid_ipp_complete);
+
+		if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+			pr_err("%s:%d:CSID:%d IPP fifo over flow\n",
+				__func__, __LINE__,
+				csid_hw->hw_intf->hw_idx);
+			/*Stop IPP path immediately */
+			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+				soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_ipp_ctrl_addr);
+		}
+	}
+
+	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
+		if (irq_status_rdi[i] &
+			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+			CDBG("%s:%d: CSID rdi%d reset complete\n",
+				__func__, __LINE__, i);
+			complete(&csid_hw->csid_rdin_complete[i]);
+		}
+
+		if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
+			complete(&csid_hw->csid_rdin_complete[i]);
+
+		if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+			pr_err("%s:%d:CSID:%d RDI fifo over flow\n",
+				__func__, __LINE__,
+				csid_hw->hw_intf->hw_idx);
+			/*Stop RDI path immediately */
+			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+				soc_info->reg_map[0].mem_base +
+				csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr);
+		}
+	}
+
+	CDBG("%s:%d:IRQ Handling exit\n", __func__, __LINE__);
+	return IRQ_HANDLED;
+}
+
+int cam_ife_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
+	uint32_t csid_idx)
+{
+	int rc = -EINVAL;
+	uint32_t i;
+	struct cam_ife_csid_path_cfg         *path_data;
+	struct cam_ife_csid_cid_data         *cid_data;
+	struct cam_hw_info                   *csid_hw_info;
+	struct cam_ife_csid_hw               *ife_csid_hw = NULL;
+
+	if (csid_idx >= CAM_IFE_CSID_HW_RES_MAX) {
+		pr_err("%s:%d: Invalid csid index:%d\n", __func__, __LINE__,
+			csid_idx);
+		return rc;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *) csid_hw_intf->hw_priv;
+	ife_csid_hw  = (struct cam_ife_csid_hw  *) csid_hw_info->core_info;
+
+	ife_csid_hw->hw_intf = csid_hw_intf;
+	ife_csid_hw->hw_info = csid_hw_info;
+
+	CDBG("%s:%d: type %d index %d\n", __func__, __LINE__,
+		ife_csid_hw->hw_intf->hw_type, csid_idx);
+
+
+	ife_csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&ife_csid_hw->hw_info->hw_mutex);
+	spin_lock_init(&ife_csid_hw->hw_info->hw_lock);
+	init_completion(&ife_csid_hw->hw_info->hw_complete);
+
+	init_completion(&ife_csid_hw->csid_top_complete);
+	init_completion(&ife_csid_hw->csid_csi2_complete);
+	init_completion(&ife_csid_hw->csid_ipp_complete);
+	for (i = 0; i < CAM_IFE_CSID_RDI_MAX; i++)
+		init_completion(&ife_csid_hw->csid_rdin_complete[i]);
+
+
+	rc = cam_ife_csid_init_soc_resources(&ife_csid_hw->hw_info->soc_info,
+			cam_ife_csid_irq, ife_csid_hw);
+	if (rc < 0) {
+		pr_err("%s:%d:CSID:%d Failed to init_soc\n", __func__, __LINE__,
+			csid_idx);
+		goto err;
+	}
+
+	ife_csid_hw->hw_intf->hw_ops.get_hw_caps = cam_ife_csid_get_hw_caps;
+	ife_csid_hw->hw_intf->hw_ops.init        = cam_ife_csid_init_hw;
+	ife_csid_hw->hw_intf->hw_ops.deinit      = cam_ife_csid_deinit_hw;
+	ife_csid_hw->hw_intf->hw_ops.reset       = cam_ife_csid_reset;
+	ife_csid_hw->hw_intf->hw_ops.reserve     = cam_ife_csid_reserve;
+	ife_csid_hw->hw_intf->hw_ops.release     = cam_ife_csid_release;
+	ife_csid_hw->hw_intf->hw_ops.start       = cam_ife_csid_start;
+	ife_csid_hw->hw_intf->hw_ops.stop        = cam_ife_csid_stop;
+	ife_csid_hw->hw_intf->hw_ops.read        = cam_ife_csid_read;
+	ife_csid_hw->hw_intf->hw_ops.write       = cam_ife_csid_write;
+	ife_csid_hw->hw_intf->hw_ops.process_cmd = cam_ife_csid_process_cmd;
+
+	/*Initialize the CID resoure */
+	for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
+		ife_csid_hw->cid_res[i].res_type = CAM_ISP_RESOURCE_CID;
+		ife_csid_hw->cid_res[i].res_id = i;
+		ife_csid_hw->cid_res[i].res_state  =
+					CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		ife_csid_hw->cid_res[i].hw_intf = ife_csid_hw->hw_intf;
+
+		cid_data = kzalloc(sizeof(struct cam_ife_csid_cid_data),
+					GFP_KERNEL);
+		if (!cid_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		ife_csid_hw->cid_res[i].res_priv = cid_data;
+	}
+
+	/* Initialize the IPP resources */
+	if (ife_csid_hw->csid_info->csid_reg->cmn_reg->no_pix) {
+		ife_csid_hw->ipp_res.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+		ife_csid_hw->ipp_res.res_id = CAM_IFE_PIX_PATH_RES_IPP;
+		ife_csid_hw->ipp_res.res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		ife_csid_hw->ipp_res.hw_intf = ife_csid_hw->hw_intf;
+		path_data = kzalloc(sizeof(struct cam_ife_csid_path_cfg),
+					GFP_KERNEL);
+		if (!path_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		ife_csid_hw->ipp_res.res_priv = path_data;
+	}
+
+	/* Initialize the RDI resource */
+	for (i = 0; i < ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis;
+				i++) {
+		/* res type is from RDI 0 to RDI3 */
+		ife_csid_hw->rdi_res[i].res_type =
+			CAM_ISP_RESOURCE_PIX_PATH;
+		ife_csid_hw->rdi_res[i].res_id = i;
+		ife_csid_hw->rdi_res[i].res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		ife_csid_hw->rdi_res[i].hw_intf = ife_csid_hw->hw_intf;
+
+		path_data = kzalloc(sizeof(struct cam_ife_csid_path_cfg),
+					GFP_KERNEL);
+		if (!path_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		ife_csid_hw->rdi_res[i].res_priv = path_data;
+	}
+
+	return 0;
+err:
+	if (rc) {
+		kfree(ife_csid_hw->ipp_res.res_priv);
+		for (i = 0; i <
+			ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis; i++)
+			kfree(ife_csid_hw->rdi_res[i].res_priv);
+
+		for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
+			kfree(ife_csid_hw->cid_res[i].res_priv);
+
+	}
+
+	return rc;
+}
+
+
+int cam_ife_csid_hw_deinit(struct cam_ife_csid_hw *ife_csid_hw)
+{
+	int rc = -EINVAL;
+	uint32_t i;
+
+	if (!ife_csid_hw) {
+		pr_err("%s:%d: Invalid param\n", __func__, __LINE__);
+		return rc;
+	}
+
+	/* release the privdate data memory from resources */
+	kfree(ife_csid_hw->ipp_res.res_priv);
+	for (i = 0; i <
+		ife_csid_hw->csid_info->csid_reg->cmn_reg->no_rdis;
+		i++) {
+		kfree(ife_csid_hw->rdi_res[i].res_priv);
+	}
+	for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
+		kfree(ife_csid_hw->cid_res[i].res_priv);
+
+
+	return 0;
+}
+
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
new file mode 100644
index 0000000..d36c576
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -0,0 +1,419 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_HW_H_
+#define _CAM_IFE_CSID_HW_H_
+
+#include "cam_hw.h"
+#include "cam_ife_csid_hw_intf.h"
+#include "cam_ife_csid_soc.h"
+
+#define CAM_IFE_CSID_HW_RES_MAX      4
+#define CAM_IFE_CSID_CID_RES_MAX     4
+#define CAM_IFE_CSID_RDI_MAX         4
+
+#define CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED    BIT(0)
+#define CSID_CSI2_RX_NFO_PHY_DL1_EOT_CAPTURED     BIT(1)
+#define CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED    BIT(2)
+#define CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED    BIT(3)
+#define CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED    BIT(4)
+#define CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED    BIT(5)
+#define CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED    BIT(6)
+#define CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED    BIT(7)
+#define CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED       BIT(8)
+#define CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED      BIT(9)
+#define CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED   BIT(10)
+#define CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION     BIT(11)
+#define CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION     BIT(12)
+#define CSID_CSI2_RX_ERROR_CPHY_PH_CRC            BIT(13)
+#define CSID_CSI2_RX_WARNING_ECC                  BIT(14)
+#define CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW    BIT(15)
+#define CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW    BIT(16)
+#define CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW    BIT(17)
+#define CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW    BIT(18)
+#define CSID_CSI2_RX_ERROR_CRC                    BIT(19)
+#define CSID_CSI2_RX_ERROR_ECC                    BIT(20)
+#define CSID_CSI2_RX_ERROR_MMAPPED_VC_DT          BIT(21)
+#define CSID_CSI2_RX_ERROR_UNMAPPED_VC_DT         BIT(22)
+#define CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW       BIT(23)
+#define CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME        BIT(24)
+#define CSID_CSI2_RX_INFO_TG_DONE                 BIT(25)
+#define CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW       BIT(26)
+#define CSID_CSI2_RX_INFO_RST_DONE                BIT(27)
+
+#define CSID_PATH_INFO_RST_DONE                   BIT(1)
+#define CSID_PATH_ERROR_FIFO_OVERFLOW             BIT(2)
+#define CSID_PATH_INFO_SUBSAMPLED_EOF             BIT(3)
+#define CSID_PATH_INFO_SUBSAMPLED_SOF             BIT(4)
+#define CSID_PATH_INFO_FRAME_DROP_EOF             BIT(5)
+#define CSID_PATH_INFO_FRAME_DROP_EOL             BIT(6)
+#define CSID_PATH_INFO_FRAME_DROP_SOL             BIT(7)
+#define CSID_PATH_INFO_FRAME_DROP_SOF             BIT(8)
+#define CSID_PATH_INFO_INPUT_EOF                  BIT(9)
+#define CSID_PATH_INFO_INPUT_EOL                  BIT(10)
+#define CSID_PATH_INFO_INPUT_SOL                  BIT(11)
+#define CSID_PATH_INFO_INPUT_SOF                  BIT(12)
+#define CSID_PATH_ERROR_PIX_COUNT                 BIT(13)
+#define CSID_PATH_ERROR_LINE_COUNT                BIT(14)
+
+enum cam_csid_path_halt_mode {
+	CSID_HALT_MODE_INTERNAL,
+	CSID_HALT_MODE_GLOBAL,
+	CSID_HALT_MODE_MASTER,
+	CSID_HALT_MODE_SLAVE,
+};
+
+
+struct cam_ife_csid_ipp_reg_offset {
+	/*Image pixel path register offsets*/
+	uint32_t csid_ipp_irq_status_addr;
+	uint32_t csid_ipp_irq_mask_addr;
+	uint32_t csid_ipp_irq_clear_addr;
+	uint32_t csid_ipp_irq_set_addr;
+
+	uint32_t csid_ipp_cfg0_addr;
+	uint32_t csid_ipp_cfg1_addr;
+	uint32_t csid_ipp_ctrl_addr;
+	uint32_t csid_ipp_frm_drop_pattern_addr;
+	uint32_t csid_ipp_frm_drop_period_addr;
+	uint32_t csid_ipp_irq_subsample_pattern_addr;
+	uint32_t csid_ipp_irq_subsample_period_addr;
+	uint32_t csid_ipp_hcrop_addr;
+	uint32_t csid_ipp_vcrop_addr;
+	uint32_t csid_ipp_pix_drop_pattern_addr;
+	uint32_t csid_ipp_pix_drop_period_addr;
+	uint32_t csid_ipp_line_drop_pattern_addr;
+	uint32_t csid_ipp_line_drop_period_addr;
+	uint32_t csid_ipp_rst_strobes_addr;
+	uint32_t csid_ipp_status_addr;
+	uint32_t csid_ipp_misr_val_addr;
+	uint32_t csid_ipp_format_measure_cfg0_addr;
+	uint32_t csid_ipp_format_measure_cfg1_addr;
+	uint32_t csid_ipp_format_measure0_addr;
+	uint32_t csid_ipp_format_measure1_addr;
+	uint32_t csid_ipp_format_measure2_addr;
+	uint32_t csid_ipp_timestamp_curr0_sof_addr;
+	uint32_t csid_ipp_timestamp_curr1_sof_addr;
+	uint32_t csid_ipp_timestamp_perv0_sof_addr;
+	uint32_t csid_ipp_timestamp_perv1_sof_addr;
+	uint32_t csid_ipp_timestamp_curr0_eof_addr;
+	uint32_t csid_ipp_timestamp_curr1_eof_addr;
+	uint32_t csid_ipp_timestamp_perv0_eof_addr;
+	uint32_t csid_ipp_timestamp_perv1_eof_addr;
+
+	/* configuration */
+	uint32_t  pix_store_en_shift_val;
+};
+
+struct cam_ife_csid_rdi_reg_offset {
+	uint32_t csid_rdi_irq_status_addr;
+	uint32_t csid_rdi_irq_mask_addr;
+	uint32_t csid_rdi_irq_clear_addr;
+	uint32_t csid_rdi_irq_set_addr;
+
+	/*RDI N register address */
+	uint32_t csid_rdi_cfg0_addr;
+	uint32_t csid_rdi_cfg1_addr;
+	uint32_t csid_rdi_ctrl_addr;
+	uint32_t csid_rdi_frm_drop_pattern_addr;
+	uint32_t csid_rdi_frm_drop_period_addr;
+	uint32_t csid_rdi_irq_subsample_pattern_addr;
+	uint32_t csid_rdi_irq_subsample_period_addr;
+	uint32_t csid_rdi_rpp_hcrop_addr;
+	uint32_t csid_rdi_rpp_vcrop_addr;
+	uint32_t csid_rdi_rpp_pix_drop_pattern_addr;
+	uint32_t csid_rdi_rpp_pix_drop_period_addr;
+	uint32_t csid_rdi_rpp_line_drop_pattern_addr;
+	uint32_t csid_rdi_rpp_line_drop_period_addr;
+	uint32_t csid_rdi_yuv_chroma_conversion_addr;
+	uint32_t csid_rdi_rst_strobes_addr;
+	uint32_t csid_rdi_status_addr;
+	uint32_t csid_rdi_misr_val0_addr;
+	uint32_t csid_rdi_misr_val1_addr;
+	uint32_t csid_rdi_misr_val2_addr;
+	uint32_t csid_rdi_misr_val3_addr;
+	uint32_t csid_rdi_format_measure_cfg0_addr;
+	uint32_t csid_rdi_format_measure_cfg1_addr;
+	uint32_t csid_rdi_format_measure0_addr;
+	uint32_t csid_rdi_format_measure1_addr;
+	uint32_t csid_rdi_format_measure2_addr;
+	uint32_t csid_rdi_timestamp_curr0_sof_addr;
+	uint32_t csid_rdi_timestamp_curr1_sof_addr;
+	uint32_t csid_rdi_timestamp_prev0_sof_addr;
+	uint32_t csid_rdi_timestamp_prev1_sof_addr;
+	uint32_t csid_rdi_timestamp_curr0_eof_addr;
+	uint32_t csid_rdi_timestamp_curr1_eof_addr;
+	uint32_t csid_rdi_timestamp_prev0_eof_addr;
+	uint32_t csid_rdi_timestamp_prev1_eof_addr;
+	uint32_t csid_rdi_byte_cntr_ping_addr;
+	uint32_t csid_rdi_byte_cntr_pong_addr;
+};
+
+struct cam_ife_csid_csi2_rx_reg_offset {
+	uint32_t csid_csi2_rx_irq_status_addr;
+	uint32_t csid_csi2_rx_irq_mask_addr;
+	uint32_t csid_csi2_rx_irq_clear_addr;
+	uint32_t csid_csi2_rx_irq_set_addr;
+	uint32_t csid_csi2_rx_cfg0_addr;
+	uint32_t csid_csi2_rx_cfg1_addr;
+	uint32_t csid_csi2_rx_capture_ctrl_addr;
+	uint32_t csid_csi2_rx_rst_strobes_addr;
+	uint32_t csid_csi2_rx_de_scramble_cfg0_addr;
+	uint32_t csid_csi2_rx_de_scramble_cfg1_addr; /* */
+	uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr;
+	uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr;
+	uint32_t csid_csi2_rx_captured_short_pkt_0_addr;
+	uint32_t csid_csi2_rx_captured_short_pkt_1_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_0_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_1_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_ftr_addr;
+	uint32_t csid_csi2_rx_captured_cphy_pkt_ftr_addr;
+	uint32_t csid_csi2_rx_lane0_misr_addr;
+	uint32_t csid_csi2_rx_lane1_misr_addr;
+	uint32_t csid_csi2_rx_lane2_misr_addr;
+	uint32_t csid_csi2_rx_lane3_misr_addr;
+	uint32_t csid_csi2_rx_total_pkts_rcvd_addr;
+	uint32_t csid_csi2_rx_stats_ecc_addr;
+	uint32_t csid_csi2_rx_total_crc_err_addr;
+
+	/*configurations */
+	uint32_t csi2_rst_srb_all;
+	uint32_t csi2_rst_done_shift_val;
+	uint32_t csi2_irq_mask_all;
+	uint32_t csi2_misr_enable_shift_val;
+	uint32_t csi2_vc_mode_shift_val;
+};
+
+struct cam_ife_csid_csi2_tpg_reg_offset {
+	uint32_t csid_tpg_ctrl_addr;
+	uint32_t csid_tpg_vc_cfg0_addr;
+	uint32_t csid_tpg_vc_cfg1_addr;
+	uint32_t csid_tpg_lfsr_seed_addr;
+	uint32_t csid_tpg_dt_n_cfg_0_addr;
+	uint32_t csid_tpg_dt_n_cfg_1_addr;
+	uint32_t csid_tpg_dt_n_cfg_2_addr;
+	uint32_t csid_tpg_color_bars_cfg_addr;
+	uint32_t csid_tpg_color_box_cfg_addr;
+	uint32_t csid_tpg_common_gen_cfg_addr;
+	uint32_t csid_tpg_cgen_n_cfg_addr;
+	uint32_t csid_tpg_cgen_n_x0_addr;
+	uint32_t csid_tpg_cgen_n_x1_addr;
+	uint32_t csid_tpg_cgen_n_x2_addr;
+	uint32_t csid_tpg_cgen_n_xy_addr;
+	uint32_t csid_tpg_cgen_n_y1_addr;
+	uint32_t csid_tpg_cgen_n_y2_addr;
+
+	/*configurations */
+	uint32_t tpg_dtn_cfg_offset;
+	uint32_t tpg_cgen_cfg_offset;
+
+};
+struct cam_ife_csid_common_reg_offset {
+	/* MIPI CSID registers */
+	uint32_t csid_hw_version_addr;
+	uint32_t csid_cfg0_addr;
+	uint32_t csid_ctrl_addr;
+	uint32_t csid_reset_addr;
+	uint32_t csid_rst_strobes_addr;
+
+	uint32_t csid_test_bus_ctrl_addr;
+	uint32_t csid_top_irq_status_addr;
+	uint32_t csid_top_irq_mask_addr;
+	uint32_t csid_top_irq_clear_addr;
+	uint32_t csid_top_irq_set_addr;
+	uint32_t csid_irq_cmd_addr;
+
+	/*configurations */
+	uint32_t major_version;
+	uint32_t minor_version;
+	uint32_t version_incr;
+	uint32_t no_rdis;
+	uint32_t no_pix;
+	uint32_t csid_rst_stb;
+	uint32_t csid_rst_stb_sw_all;
+	uint32_t path_rst_stb_all;
+	uint32_t path_rst_done_shift_val;
+	uint32_t path_en_shift_val;
+	uint32_t dt_id_shift_val;
+	uint32_t vc_shift_val;
+	uint32_t dt_shift_val;
+	uint32_t fmt_shift_val;
+	uint32_t plain_fmt_shit_val;
+	uint32_t crop_v_en_shift_val;
+	uint32_t crop_h_en_shift_val;
+	uint32_t crop_shift;
+	uint32_t ipp_irq_mask_all;
+	uint32_t rdi_irq_mask_all;
+};
+
+/**
+ * struct cam_ife_csid_reg_offset- CSID instance register info
+ *
+ * @cmn_reg:  csid common registers info
+ * @ipp_reg:  ipp register offset information
+ * @rdi_reg:  rdi register offser information
+ *
+ */
+struct cam_ife_csid_reg_offset {
+	struct cam_ife_csid_common_reg_offset   *cmn_reg;
+	struct cam_ife_csid_csi2_rx_reg_offset  *csi2_reg;
+	struct cam_ife_csid_ipp_reg_offset      *ipp_reg;
+	struct cam_ife_csid_rdi_reg_offset      *rdi_reg[CAM_IFE_CSID_RDI_MAX];
+	struct cam_ife_csid_csi2_tpg_reg_offset *tpg_reg;
+};
+
+
+/**
+ * struct cam_ife_csid_hw_info- CSID HW info
+ *
+ * @csid_reg:        csid register offsets
+ * @hw_dts_version:  HW DTS version
+ * @csid_max_clk:    maximim csid clock
+ *
+ */
+struct cam_ife_csid_hw_info {
+	struct cam_ife_csid_reg_offset      *csid_reg;
+	uint32_t                             hw_dts_version;
+	uint32_t                             csid_max_clk;
+
+};
+
+
+
+/**
+ * struct cam_ife_csid_csi2_rx_cfg- csid csi2 rx configuration data
+ * @phy_sel:     input resource type for sensor only
+ * @lane_type:   lane type: c-phy or d-phy
+ * @lane_num :   active lane number
+ * @lane_cfg:    lane configurations: 4 bits per lane
+ *
+ */
+struct cam_ife_csid_csi2_rx_cfg  {
+	uint32_t                        phy_sel;
+	uint32_t                        lane_type;
+	uint32_t                        lane_num;
+	uint32_t                        lane_cfg;
+};
+
+/**
+ * struct             cam_ife_csid_tpg_cfg- csid tpg configuration data
+ * @width:            width
+ * @height:           height
+ * @test_pattern :    pattern
+ * @decode_format:    decode format
+ *
+ */
+struct cam_ife_csid_tpg_cfg  {
+	uint32_t                        width;
+	uint32_t                        height;
+	uint32_t                        test_pattern;
+	uint32_t                        decode_fmt;
+};
+
+/**
+ * struct cam_ife_csid_cid_data- cid configuration private data
+ *
+ * @vc:      virtual channel
+ * @dt:      Data type
+ * @cnt:     cid resource reference count.
+ * @tpg_set: tpg used for this cid resource
+ *
+ */
+struct cam_ife_csid_cid_data {
+	uint32_t                     vc;
+	uint32_t                     dt;
+	uint32_t                     cnt;
+	uint32_t                     tpg_set;
+};
+
+
+/**
+ * struct cam_ife_csid_path_cfg- csid path configuration details. It is stored
+ *                          as private data for IPP/ RDI paths
+ * @vc :            Virtual channel number
+ * @dt :            Data type number
+ * @cid             cid number, it is same as DT_ID number in HW
+ * @decode_fmt:     input decode format
+ * @crop_enable:    crop is enable or disabled, if enabled
+ *                  then remaining parameters are valid.
+ * @start_pixel:    start pixel
+ * @width:          width
+ * @start_line:     start line
+ * @height:         heigth
+ * @sync_mode:       Applicable for IPP/RDI path reservation
+ *                  Reserving the path for master IPP or slave IPP
+ *                  master (set value 1), Slave ( set value 2)
+ *                  for RDI, set  mode to none
+ * @master_idx:     For Slave reservation, Give master IFE instance Index.
+ *                  Slave will synchronize with master Start and stop operations
+ *
+ */
+struct cam_ife_csid_path_cfg {
+	uint32_t                        vc;
+	uint32_t                        dt;
+	uint32_t                        cid;
+	uint32_t                        decode_fmt;
+	bool                            crop_enable;
+	uint32_t                        start_pixel;
+	uint32_t                        width;
+	uint32_t                        start_line;
+	uint32_t                        height;
+	enum cam_isp_hw_sync_mode       sync_mode;
+	uint32_t                        master_idx;
+};
+
+/**
+ * struct cam_ife_csid_hw- csid hw device resources data
+ *
+ * @hw_intf:                  contain the csid hw interface information
+ * @hw_info:                  csid hw device information
+ * @csid_info:                csid hw specific information
+ * @res_type:                 CSID in resource type
+ * @csi2_rx_cfg:              Csi2 rx decoder configuration for csid
+ * @tpg_cfg:                  TPG configuration
+ * @csi2_rx_reserve_cnt:      CSI2 reservations count value
+ * @csi2_cfg_cnt:             csi2 configuration count
+ * @tpg_start_cnt:            tpg start count
+ * @ipp_res:                  image pixel path resource
+ * @rdi_res:                  raw dump image path resources
+ * @cid_res:                  cid resources state
+ * @csid_top_reset_complete:  csid top reset completion
+ * @csid_csi2_reset_complete: csi2 reset completion
+ * @csid_ipp_reset_complete:  ipp reset completion
+ * @csid_rdin_reset_complete: rdi n completion
+ *
+ */
+struct cam_ife_csid_hw {
+	struct cam_hw_intf              *hw_intf;
+	struct cam_hw_info              *hw_info;
+	struct cam_ife_csid_hw_info     *csid_info;
+	uint32_t                         res_type;
+	struct cam_ife_csid_csi2_rx_cfg  csi2_rx_cfg;
+	struct cam_ife_csid_tpg_cfg      tpg_cfg;
+	uint32_t                         csi2_reserve_cnt;
+	uint32_t                         csi2_cfg_cnt;
+	uint32_t                         tpg_start_cnt;
+	struct cam_isp_resource_node     ipp_res;
+	struct cam_isp_resource_node     rdi_res[CAM_IFE_CSID_RDI_MAX];
+	struct cam_isp_resource_node     cid_res[CAM_IFE_CSID_CID_RES_MAX];
+	struct completion                csid_top_complete;
+	struct completion                csid_csi2_complete;
+	struct completion                csid_ipp_complete;
+	struct completion    csid_rdin_complete[CAM_IFE_CSID_RDI_MAX];
+};
+
+int cam_ife_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
+	uint32_t csid_idx);
+
+int cam_ife_csid_hw_deinit(struct cam_ife_csid_hw *ife_csid_hw);
+
+#endif /* _CAM_IFE_CSID_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
new file mode 100644
index 0000000..003d83f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
@@ -0,0 +1,141 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid_dev.h"
+#include "cam_ife_csid_hw_intf.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static struct cam_hw_intf *cam_ife_csid_hw_list[CAM_IFE_CSID_HW_RES_MAX] = {
+	0, 0, 0, 0};
+
+int cam_ife_csid_probe(struct platform_device *pdev)
+{
+
+	struct cam_hw_intf             *csid_hw_intf;
+	struct cam_hw_info             *csid_hw_info;
+	struct cam_ife_csid_hw         *csid_dev = NULL;
+	const struct of_device_id      *match_dev = NULL;
+	struct cam_ife_csid_hw_info    *csid_hw_data = NULL;
+	uint32_t                        csid_dev_idx;
+	int                             rc = 0;
+
+	CDBG("%s:%d probe called\n", __func__, __LINE__);
+
+	csid_hw_intf = kzalloc(sizeof(*csid_hw_intf), GFP_KERNEL);
+	if (!csid_hw_intf) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	csid_hw_info = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!csid_hw_info) {
+		rc = -ENOMEM;
+		goto free_hw_intf;
+	}
+
+	csid_dev = kzalloc(sizeof(struct cam_ife_csid_hw), GFP_KERNEL);
+	if (!csid_dev) {
+		rc = -ENOMEM;
+		goto free_hw_info;
+	}
+
+	/* get ife csid hw index */
+	of_property_read_u32(pdev->dev.of_node, "cell-index", &csid_dev_idx);
+	/* get ife csid hw information */
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		pr_err("%s:%d No matching table for the IFE CSID HW!\n",
+			__func__, __LINE__);
+		rc = -EINVAL;
+		goto free_dev;
+	}
+
+	csid_hw_intf->hw_idx = csid_dev_idx;
+	csid_hw_intf->hw_type = CAM_ISP_HW_TYPE_IFE_CSID;
+	csid_hw_intf->hw_priv = csid_hw_info;
+
+	csid_hw_info->core_info = csid_dev;
+	csid_hw_info->soc_info.pdev = pdev;
+
+	csid_hw_data = (struct cam_ife_csid_hw_info  *)match_dev->data;
+	/* need to setup the pdev before call the ife hw probe init */
+	csid_dev->csid_info = csid_hw_data;
+
+	rc = cam_ife_csid_hw_probe_init(csid_hw_intf, csid_dev_idx);
+	if (rc)
+		goto free_dev;
+
+	platform_set_drvdata(pdev, csid_dev);
+	CDBG("%s:%d CSID:%d probe successful\n", __func__, __LINE__,
+		csid_hw_intf->hw_idx);
+
+
+	if (csid_hw_intf->hw_idx < CAM_IFE_CSID_HW_RES_MAX)
+		cam_ife_csid_hw_list[csid_hw_intf->hw_idx] = csid_hw_intf;
+	else
+		goto free_dev;
+
+	return 0;
+
+free_dev:
+	kfree(csid_dev);
+free_hw_info:
+	kfree(csid_hw_info);
+free_hw_intf:
+	kfree(csid_hw_intf);
+err:
+	return rc;
+}
+
+int cam_ife_csid_remove(struct platform_device *pdev)
+{
+	struct cam_ife_csid_hw         *csid_dev = NULL;
+	struct cam_hw_intf             *csid_hw_intf;
+	struct cam_hw_info             *csid_hw_info;
+
+	csid_dev = (struct cam_ife_csid_hw *)platform_get_drvdata(pdev);
+	csid_hw_intf = csid_dev->hw_intf;
+	csid_hw_info = csid_dev->hw_info;
+
+	CDBG("%s:%d CSID:%d remove\n", __func__, __LINE__,
+		csid_dev->hw_intf->hw_idx);
+
+	cam_ife_csid_hw_deinit(csid_dev);
+
+	/*release the csid device memory */
+	kfree(csid_dev);
+	kfree(csid_hw_info);
+	kfree(csid_hw_intf);
+	return 0;
+}
+
+int cam_ife_csid_hw_init(struct cam_hw_intf **ife_csid_hw,
+	uint32_t hw_idx)
+{
+	int rc = 0;
+
+	if (cam_ife_csid_hw_list[hw_idx]) {
+		*ife_csid_hw = cam_ife_csid_hw_list[hw_idx];
+	} else {
+		*ife_csid_hw = NULL;
+		rc = -1;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h
new file mode 100644
index 0000000..3b213e2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_DEV_H_
+#define _CAM_IFE_CSID_DEV_H_
+
+#include "cam_isp_hw.h"
+
+irqreturn_t cam_ife_csid_irq(int irq_num, void *data);
+
+int cam_ife_csid_probe(struct platform_device *pdev);
+int cam_ife_csid_remove(struct platform_device *pdev);
+
+#endif /*_CAM_IFE_CSID_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
new file mode 100644
index 0000000..4ed4da5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
@@ -0,0 +1,59 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "cam_ife_csid_lite170.h"
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid_dev.h"
+
+
+#define CAM_CSID_LITE_DRV_NAME                    "csid_lite_170"
+#define CAM_CSID_LITE_VERSION_V170                 0x10070000
+
+static struct cam_ife_csid_hw_info cam_ife_csid_lite170_hw_info = {
+	.csid_reg = &cam_ife_csid_lite_170_reg_offset,
+	.hw_dts_version = CAM_CSID_LITE_VERSION_V170,
+};
+
+static const struct of_device_id cam_ife_csid_lite170_dt_match[] = {
+	{
+		.compatible = "qcom,csid-lite170",
+		.data = &cam_ife_csid_lite170_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_ife_csid_lite170_dt_match);
+
+static struct platform_driver cam_ife_csid_lite170_driver = {
+	.probe = cam_ife_csid_probe,
+	.remove = cam_ife_csid_remove,
+	.driver = {
+		.name = CAM_CSID_LITE_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_ife_csid_lite170_dt_match,
+	},
+};
+
+static int __init cam_ife_csid_lite170_init_module(void)
+{
+	return platform_driver_register(&cam_ife_csid_lite170_driver);
+}
+
+static void __exit cam_ife_csid_lite170_exit_module(void)
+{
+	platform_driver_unregister(&cam_ife_csid_lite170_driver);
+}
+
+module_init(cam_ife_csid_lite170_init_module);
+module_exit(cam_ife_csid_lite170_exit_module);
+MODULE_DESCRIPTION("CAM IFE_CSID_LITE170 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h
new file mode 100644
index 0000000..e857f8b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h
@@ -0,0 +1,310 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_LITE170_H_
+#define _CAM_IFE_CSID_LITE170_H_
+#include "cam_ife_csid_core.h"
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_170_rdi_0_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x30,
+	.csid_rdi_irq_mask_addr                   = 0x34,
+	.csid_rdi_irq_clear_addr                  = 0x38,
+	.csid_rdi_irq_set_addr                    = 0x3c,
+	.csid_rdi_cfg0_addr                       = 0x200,
+	.csid_rdi_cfg1_addr                       = 0x204,
+	.csid_rdi_ctrl_addr                       = 0x208,
+	.csid_rdi_frm_drop_pattern_addr           = 0x20c,
+	.csid_rdi_frm_drop_period_addr            = 0x210,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x214,
+	.csid_rdi_irq_subsample_period_addr       = 0x218,
+	.csid_rdi_rpp_hcrop_addr                  = 0x21c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x220,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x224,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x228,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x22c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x230,
+	.csid_rdi_rst_strobes_addr                = 0x240,
+	.csid_rdi_status_addr                     = 0x250,
+	.csid_rdi_misr_val0_addr                  = 0x254,
+	.csid_rdi_misr_val1_addr                  = 0x258,
+	.csid_rdi_misr_val2_addr                  = 0x25c,
+	.csid_rdi_misr_val3_addr                  = 0x260,
+	.csid_rdi_format_measure_cfg0_addr        = 0x270,
+	.csid_rdi_format_measure_cfg1_addr        = 0x274,
+	.csid_rdi_format_measure0_addr            = 0x278,
+	.csid_rdi_format_measure1_addr            = 0x27c,
+	.csid_rdi_format_measure2_addr            = 0x280,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x290,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x294,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x298,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x29c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x2a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x2a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x2a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x2ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x2e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x2e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_170_rdi_1_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x40,
+	.csid_rdi_irq_mask_addr                   = 0x44,
+	.csid_rdi_irq_clear_addr                  = 0x48,
+	.csid_rdi_irq_set_addr                    = 0x4c,
+	.csid_rdi_cfg0_addr                       = 0x300,
+	.csid_rdi_cfg1_addr                       = 0x304,
+	.csid_rdi_ctrl_addr                       = 0x308,
+	.csid_rdi_frm_drop_pattern_addr           = 0x30c,
+	.csid_rdi_frm_drop_period_addr            = 0x310,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x314,
+	.csid_rdi_irq_subsample_period_addr       = 0x318,
+	.csid_rdi_rpp_hcrop_addr                  = 0x31c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x320,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x324,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x328,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x32c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x330,
+	.csid_rdi_rst_strobes_addr                = 0x340,
+	.csid_rdi_status_addr                     = 0x350,
+	.csid_rdi_misr_val0_addr                  = 0x354,
+	.csid_rdi_misr_val1_addr                  = 0x358,
+	.csid_rdi_misr_val2_addr                  = 0x35c,
+	.csid_rdi_misr_val3_addr                  = 0x360,
+	.csid_rdi_format_measure_cfg0_addr        = 0x370,
+	.csid_rdi_format_measure_cfg1_addr        = 0x374,
+	.csid_rdi_format_measure0_addr            = 0x378,
+	.csid_rdi_format_measure1_addr            = 0x37c,
+	.csid_rdi_format_measure2_addr            = 0x380,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x390,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x394,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x398,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x39c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x3a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x3a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x3a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x3ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x3e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x3e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_170_rdi_2_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x50,
+	.csid_rdi_irq_mask_addr                   = 0x54,
+	.csid_rdi_irq_clear_addr                  = 0x58,
+	.csid_rdi_irq_set_addr                    = 0x5c,
+	.csid_rdi_cfg0_addr                       = 0x400,
+	.csid_rdi_cfg1_addr                       = 0x404,
+	.csid_rdi_ctrl_addr                       = 0x408,
+	.csid_rdi_frm_drop_pattern_addr           = 0x40c,
+	.csid_rdi_frm_drop_period_addr            = 0x410,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x414,
+	.csid_rdi_irq_subsample_period_addr       = 0x418,
+	.csid_rdi_rpp_hcrop_addr                  = 0x41c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x420,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x424,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x428,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x42c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x430,
+	.csid_rdi_yuv_chroma_conversion_addr      = 0x434,
+	.csid_rdi_rst_strobes_addr                = 0x440,
+	.csid_rdi_status_addr                     = 0x450,
+	.csid_rdi_misr_val0_addr                  = 0x454,
+	.csid_rdi_misr_val1_addr                  = 0x458,
+	.csid_rdi_misr_val2_addr                  = 0x45c,
+	.csid_rdi_misr_val3_addr                  = 0x460,
+	.csid_rdi_format_measure_cfg0_addr        = 0x470,
+	.csid_rdi_format_measure_cfg1_addr        = 0x474,
+	.csid_rdi_format_measure0_addr            = 0x478,
+	.csid_rdi_format_measure1_addr            = 0x47c,
+	.csid_rdi_format_measure2_addr            = 0x480,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x490,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x494,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x498,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x49c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x4a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x4a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x4a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x4ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x4e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x4e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_170_rdi_3_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x60,
+	.csid_rdi_irq_mask_addr                   = 0x64,
+	.csid_rdi_irq_clear_addr                  = 0x68,
+	.csid_rdi_irq_set_addr                    = 0x6c,
+	.csid_rdi_cfg0_addr                       = 0x500,
+	.csid_rdi_cfg1_addr                       = 0x504,
+	.csid_rdi_ctrl_addr                       = 0x508,
+	.csid_rdi_frm_drop_pattern_addr           = 0x50c,
+	.csid_rdi_frm_drop_period_addr            = 0x510,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x514,
+	.csid_rdi_irq_subsample_period_addr       = 0x518,
+	.csid_rdi_rpp_hcrop_addr                  = 0x51c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x520,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x524,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x528,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x52c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x530,
+	.csid_rdi_yuv_chroma_conversion_addr      = 0x534,
+	.csid_rdi_rst_strobes_addr                = 0x540,
+	.csid_rdi_status_addr                     = 0x550,
+	.csid_rdi_misr_val0_addr                  = 0x554,
+	.csid_rdi_misr_val1_addr                  = 0x558,
+	.csid_rdi_misr_val2_addr                  = 0x55c,
+	.csid_rdi_misr_val3_addr                  = 0x560,
+	.csid_rdi_format_measure_cfg0_addr        = 0x570,
+	.csid_rdi_format_measure_cfg1_addr        = 0x574,
+	.csid_rdi_format_measure0_addr            = 0x578,
+	.csid_rdi_format_measure1_addr            = 0x57c,
+	.csid_rdi_format_measure2_addr            = 0x580,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x590,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x594,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x598,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x59c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x5a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x5a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x5a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x5ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x5e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x5e4,
+};
+
+static struct cam_ife_csid_csi2_rx_reg_offset
+	cam_ife_csid_lite_170_csi2_reg_offset = {
+
+	.csid_csi2_rx_irq_status_addr                 = 0x20,
+	.csid_csi2_rx_irq_mask_addr                   = 0x24,
+	.csid_csi2_rx_irq_clear_addr                  = 0x28,
+	.csid_csi2_rx_irq_set_addr                    = 0x2c,
+
+	/*CSI2 rx control */
+	.csid_csi2_rx_cfg0_addr                       = 0x100,
+	.csid_csi2_rx_cfg1_addr                       = 0x104,
+	.csid_csi2_rx_capture_ctrl_addr               = 0x108,
+	.csid_csi2_rx_rst_strobes_addr                = 0x110,
+	.csid_csi2_rx_de_scramble_cfg0_addr           = 0x114,
+	.csid_csi2_rx_de_scramble_cfg1_addr           = 0x118,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr   = 0x120,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr   = 0x124,
+	.csid_csi2_rx_captured_short_pkt_0_addr       = 0x128,
+	.csid_csi2_rx_captured_short_pkt_1_addr       = 0x12c,
+	.csid_csi2_rx_captured_long_pkt_0_addr        = 0x130,
+	.csid_csi2_rx_captured_long_pkt_1_addr        = 0x134,
+	.csid_csi2_rx_captured_long_pkt_ftr_addr      = 0x138,
+	.csid_csi2_rx_captured_cphy_pkt_ftr_addr      = 0x13c,
+	.csid_csi2_rx_lane0_misr_addr                 = 0x150,
+	.csid_csi2_rx_lane1_misr_addr                 = 0x154,
+	.csid_csi2_rx_lane2_misr_addr                 = 0x158,
+	.csid_csi2_rx_lane3_misr_addr                 = 0x15c,
+	.csid_csi2_rx_total_pkts_rcvd_addr            = 0x160,
+	.csid_csi2_rx_stats_ecc_addr                  = 0x164,
+	.csid_csi2_rx_total_crc_err_addr              = 0x168,
+
+	.csi2_rst_srb_all                             = 0x3FFF,
+	.csi2_rst_done_shift_val                      = 27,
+	.csi2_irq_mask_all                            = 0xFFFFFFF,
+	.csi2_misr_enable_shift_val                   = 6,
+	.csi2_vc_mode_shift_val                       = 2,
+};
+
+
+static struct cam_ife_csid_csi2_tpg_reg_offset
+	cam_ife_csid_lite_170_tpg_reg_offset = {
+
+	/*CSID TPG control */
+	.csid_tpg_ctrl_addr                           = 0x600,
+	.csid_tpg_vc_cfg0_addr                        = 0x604,
+	.csid_tpg_vc_cfg1_addr                        = 0x608,
+	.csid_tpg_lfsr_seed_addr                      = 0x60c,
+	.csid_tpg_dt_n_cfg_0_addr                     = 0x610,
+	.csid_tpg_dt_n_cfg_1_addr                     = 0x614,
+	.csid_tpg_dt_n_cfg_2_addr                     = 0x618,
+	.csid_tpg_color_bars_cfg_addr                 = 0x640,
+	.csid_tpg_color_box_cfg_addr                  = 0x644,
+	.csid_tpg_common_gen_cfg_addr                 = 0x648,
+	.csid_tpg_cgen_n_cfg_addr                     = 0x650,
+	.csid_tpg_cgen_n_x0_addr                      = 0x654,
+	.csid_tpg_cgen_n_x1_addr                      = 0x658,
+	.csid_tpg_cgen_n_x2_addr                      = 0x65c,
+	.csid_tpg_cgen_n_xy_addr                      = 0x660,
+	.csid_tpg_cgen_n_y1_addr                      = 0x664,
+	.csid_tpg_cgen_n_y2_addr                      = 0x668,
+
+	/*configurations */
+	.tpg_dtn_cfg_offset                           = 0xc,
+	.tpg_cgen_cfg_offset                          = 0x20,
+};
+
+
+static struct cam_ife_csid_common_reg_offset
+	cam_csid_lite_170_cmn_reg_offset = {
+
+	.csid_hw_version_addr                         = 0x0,
+	.csid_cfg0_addr                               = 0x4,
+	.csid_ctrl_addr                               = 0x8,
+	.csid_reset_addr                              = 0xc,
+	.csid_rst_strobes_addr                        = 0x10,
+
+	.csid_test_bus_ctrl_addr                      = 0x14,
+	.csid_top_irq_status_addr                     = 0x70,
+	.csid_top_irq_mask_addr                       = 0x74,
+	.csid_top_irq_clear_addr                      = 0x78,
+	.csid_top_irq_set_addr                        = 0x7c,
+	.csid_irq_cmd_addr                            = 0x80,
+
+	/*configurations */
+	.major_version                                = 1,
+	.minor_version                                = 7,
+	.version_incr                                 = 0,
+	.no_rdis                                      = 4,
+	.no_pix                                       = 0,
+	.csid_rst_stb                                 = 0x1e,
+	.csid_rst_stb_sw_all                          = 0x1f,
+	.path_rst_stb_all                             = 0x7f,
+	.path_rst_done_shift_val                      = 1,
+	.path_en_shift_val                            = 31,
+	.dt_id_shift_val                              = 27,
+	.vc_shift_val                                 = 22,
+	.dt_shift_val                                 = 16,
+	.fmt_shift_val                                = 12,
+	.plain_fmt_shit_val                           = 10,
+	.crop_v_en_shift_val                          = 6,
+	.crop_h_en_shift_val                          = 5,
+	.crop_shift                                   = 16,
+	.ipp_irq_mask_all                             = 0x7FFF,
+	.rdi_irq_mask_all                             = 0x7FFF,
+};
+
+struct cam_ife_csid_reg_offset cam_ife_csid_lite_170_reg_offset = {
+	.cmn_reg          = &cam_csid_lite_170_cmn_reg_offset,
+	.csi2_reg         = &cam_ife_csid_lite_170_csi2_reg_offset,
+	.ipp_reg          = NULL,
+	.rdi_reg = {
+		&cam_ife_csid_lite_170_rdi_0_reg_offset,
+		&cam_ife_csid_lite_170_rdi_1_reg_offset,
+		&cam_ife_csid_lite_170_rdi_2_reg_offset,
+		&cam_ife_csid_lite_170_rdi_3_reg_offset,
+		},
+	.tpg_reg = &cam_ife_csid_lite_170_tpg_reg_offset,
+};
+
+#endif /*_CAM_IFE_CSID_LITE170_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
new file mode 100644
index 0000000..f07c45e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -0,0 +1,94 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_ife_csid_soc.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static int cam_ife_csid_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	struct device_node *of_node = NULL;
+	struct csid_device_soc_info *csid_soc_info = NULL;
+	int rc = 0;
+
+	of_node = soc_info->pdev->dev.of_node;
+	csid_soc_info = (struct csid_device_soc_info *)soc_info->soc_private;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+static int cam_ife_csid_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler,
+	void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, csid_irq_handler,
+		irq_data);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_ife_csid_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	/* Need to see if we want post process the clock list */
+
+	rc = cam_ife_csid_request_platform_resource(soc_info, csid_irq_handler,
+		irq_data);
+	if (rc < 0)
+		return rc;
+
+	CDBG("%s: mem_base is 0x%llx\n", __func__,
+		(uint64_t) soc_info->reg_map[0].mem_base);
+
+	return rc;
+}
+
+int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	if (rc) {
+		pr_err("%s: enable platform failed\n", __func__);
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		pr_err("%s: Disable platform failed\n", __func__);
+
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
new file mode 100644
index 0000000..218e05a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IFE_CSID_SOC_H_
+#define _CAM_IFE_CSID_SOC_H_
+
+#include "cam_isp_hw.h"
+
+/**
+ * struct csid_device_soc_info - CSID SOC info object
+ *
+ * @csi_vdd_voltage:       csi vdd voltage value
+ *
+ */
+struct csid_device_soc_info {
+	int                             csi_vdd_voltage;
+};
+
+/**
+ * cam_ife_csid_init_soc_resources()
+ *
+ * @brief:                 csid initialization function for the soc info
+ *
+ * @soc_info:              soc info structure pointer
+ * @csid_irq_handler:      irq handler function to be registered
+ * @irq_data:              irq data for the callback function
+ *
+ */
+int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler, void *irq_data);
+
+/**
+ * cam_ife_csid_enable_soc_resources()
+ *
+ * @brief:                 csid soc resource enable function
+ *
+ * @soc_info:              soc info structure pointer
+ *
+ */
+int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info  *soc_info);
+
+/**
+ * cam_ife_csid_disable_soc_resources()
+ *
+ * @brief:                 csid soc resource disable function
+ *
+ * @soc_info:              soc info structure pointer
+ *
+ */
+int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
new file mode 100644
index 0000000..ecc6f0e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
@@ -0,0 +1,171 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_CSID_HW_INTF_H__
+#define __CAM_CSID_HW_INTF_H__
+
+#include "cam_isp_hw.h"
+#include "cam_hw_intf.h"
+
+/* MAX IFE CSID instance */
+#define CAM_IFE_CSID_HW_NUM_MAX                        4
+
+
+/**
+ * enum cam_ife_pix_path_res_id - Specify the csid patch
+ */
+enum cam_ife_pix_path_res_id {
+	CAM_IFE_PIX_PATH_RES_RDI_0,
+	CAM_IFE_PIX_PATH_RES_RDI_1,
+	CAM_IFE_PIX_PATH_RES_RDI_2,
+	CAM_IFE_PIX_PATH_RES_RDI_3,
+	CAM_IFE_PIX_PATH_RES_IPP,
+	CAM_IFE_PIX_PATH_RES_MAX,
+};
+
+/**
+ * enum cam_ife_cid_res_id - Specify the csid cid
+ */
+enum cam_ife_cid_res_id {
+	CAM_IFE_CSID_CID_0,
+	CAM_IFE_CSID_CID_1,
+	CAM_IFE_CSID_CID_2,
+	CAM_IFE_CSID_CID_3,
+	CAM_IFE_CSID_CID_MAX,
+};
+
+
+/**
+ * struct cam_ife_csid_hw_caps- get the CSID hw capability
+ * @no_rdis :       number of rdis supported by CSID HW device
+ * @no_pix:         number of pixel path supported by CSID HW device
+ * @major_version : major version
+ * @minor_version:  minor version
+ * @version_incr:   version increment
+ *
+ */
+struct cam_ife_csid_hw_caps {
+	uint32_t      no_rdis;
+	uint32_t      no_pix;
+	uint32_t      major_version;
+	uint32_t      minor_version;
+	uint32_t      version_incr;
+};
+
+
+/**
+ * struct cam_csid_hw_reserve_resource- hw reserve
+ * @res_type :  reource type CID or PATH
+ *              if type is CID, then res_id is not required,
+ *              if type is path then res id need to be filled
+ * @res_id  :  res id to be reserved
+ * @in_port : input port resource info
+ * @sync_mode : Sync mode
+ *              Sync mode could be master, slave or none
+ * @master_idx: master device index to be configured in the slave path
+ *              for master path, this value is not required.
+ *              only slave need to configure the master index value
+ * @cid:        cid (DT_ID) value for path, this is applicable for CSID path
+ *              reserve
+ * @node_res :  reserved resource structure pointer
+ *
+ */
+struct cam_csid_hw_reserve_resource_args {
+	enum cam_isp_resource_type                res_type;
+	uint32_t                                  res_id;
+	struct cam_isp_in_port_info              *in_port;
+	enum cam_isp_hw_sync_mode                 sync_mode;
+	uint32_t                                  master_idx;
+	uint32_t                                  cid;
+	struct cam_isp_resource_node             *node_res;
+
+};
+
+
+/**
+ *  enum cam_ife_csid_halt_cmd - Specify the halt command type
+ */
+enum cam_ife_csid_halt_cmd {
+	CAM_CSID_HALT_AT_FRAME_BOUNDARY,
+	CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
+	CAM_CSID_HALT_IMMEDIATELY,
+	CAM_CSID_HALT_MAX,
+};
+
+/**
+ * struct cam_csid_hw_stop- stop all resources
+ * @stop_cmd : Applicable only for PATH resources
+ *             if stop command set to Halt immediately,driver will stop
+ *             path immediately, manager need to reset the path after HI
+ *             if stop command set to halt at frame boundary, driver will set
+ *             halt at frame boundary and wait for frame boundary
+ * @node_res :  reource pointer array( ie cid or CSID)
+ * @num_res :   number of resources to be stopped
+ *
+ */
+struct cam_csid_hw_stop_args {
+	enum cam_ife_csid_halt_cmd                stop_cmd;
+	struct cam_isp_resource_node            **node_res;
+	uint32_t                                  num_res;
+};
+
+/**
+ * enum cam_ife_csid_reset_type - Specify the reset type
+ */
+enum cam_ife_csid_reset_type {
+	CAM_IFE_CSID_RESET_GLOBAL,
+	CAM_IFE_CSID_RESET_PATH,
+	CAM_IFE_CSID_RESET_MAX,
+};
+
+/**
+ * struct cam_ife_csid_reset_cfg-  csid reset configuration
+ * @ reset_type : Global reset or path reset
+ * @res_node :   resource need to be reset
+ *
+ */
+struct cam_csid_reset_cfg_args {
+	enum cam_ife_csid_reset_type   reset_type;
+	struct cam_isp_resource_node  *node_res;
+};
+
+/**
+ * struct cam_csid_get_time_stamp_args-  time stamp capture arguments
+ * @res_node :   resource to get the time stamp
+ * @ time_stamp_val : captured time stamp
+ *
+ */
+struct cam_csid_get_time_stamp_args {
+	struct cam_isp_resource_node      *node_res;
+	uint64_t                           time_stamp_val;
+};
+
+/**
+ * enum cam_ife_csid_cmd_type - Specify the csid command
+ */
+enum cam_ife_csid_cmd_type {
+	CAM_IFE_CSID_CMD_GET_TIME_STAMP,
+	CAM_IFE_CSID_CMD_MAX,
+};
+
+/**
+ * cam_ife_csid_hw_init()
+ *
+ * @brief:               Initialize function for the CSID hardware
+ *
+ * @ife_csid_hw:         CSID hardware instance returned
+ * @hw_idex:             CSID hardware instance id
+ */
+int cam_ife_csid_hw_init(struct cam_hw_intf **ife_csid_hw,
+	uint32_t hw_idx);
+
+#endif /* __CAM_CSID_HW_INTF_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
new file mode 100644
index 0000000..ea34406
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -0,0 +1,162 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ISP_HW_H_
+#define _CAM_ISP_HW_H_
+
+#include <linux/completion.h>
+#include "cam_hw.h"
+#include "cam_soc_util.h"
+#include "cam_irq_controller.h"
+
+/*
+ * struct cam_isp_timestamp:
+ *
+ * @mono_time:          Monotonic boot time
+ * @vt_time:            AV Timer time
+ * @ticks:              Qtimer ticks
+ */
+struct cam_isp_timestamp {
+	struct timeval          mono_time;
+	struct timeval          vt_time;
+	uint64_t                ticks;
+};
+
+/*
+ * cam_isp_hw_get_timestamp()
+ *
+ * @Brief:              Get timestamp values
+ *
+ * @time_stamp:         Structure that holds different time values
+ *
+ * @Return:             Void
+ */
+void cam_isp_hw_get_timestamp(struct cam_isp_timestamp *time_stamp);
+
+enum cam_isp_hw_type {
+	CAM_ISP_HW_TYPE_CSID        = 0,
+	CAM_ISP_HW_TYPE_ISPIF       = 1,
+	CAM_ISP_HW_TYPE_VFE         = 2,
+	CAM_ISP_HW_TYPE_IFE_CSID    = 3,
+	CAM_ISP_HW_TYPE_MAX         = 4,
+};
+
+enum cam_isp_hw_split_id {
+	CAM_ISP_HW_SPLIT_LEFT       = 0,
+	CAM_ISP_HW_SPLIT_RIGHT,
+	CAM_ISP_HW_SPLIT_MAX,
+};
+
+enum cam_isp_hw_sync_mode {
+	CAM_ISP_HW_SYNC_NONE,
+	CAM_ISP_HW_SYNC_MASTER,
+	CAM_ISP_HW_SYNC_SLAVE,
+	CAM_ISP_HW_SYNC_MAX,
+};
+
+enum cam_isp_resource_state {
+	CAM_ISP_RESOURCE_STATE_UNAVAILABLE   = 0,
+	CAM_ISP_RESOURCE_STATE_AVAILABLE     = 1,
+	CAM_ISP_RESOURCE_STATE_RESERVED      = 2,
+	CAM_ISP_RESOURCE_STATE_INIT_HW       = 3,
+	CAM_ISP_RESOURCE_STATE_STREAMING     = 4,
+};
+
+enum cam_isp_resource_type {
+	CAM_ISP_RESOURCE_UNINT,
+	CAM_ISP_RESOURCE_SRC,
+	CAM_ISP_RESOURCE_CID,
+	CAM_ISP_RESOURCE_PIX_PATH,
+	CAM_ISP_RESOURCE_VFE_IN,
+	CAM_ISP_RESOURCE_VFE_OUT,
+	CAM_ISP_RESOURCE_MAX,
+};
+
+/*
+ * struct cam_isp_resource_node:
+ *
+ * @Brief:                        Structure representing HW resource object
+ *
+ * @res_type:                     Resource Type
+ * @res_id:                       Unique resource ID within res_type objects
+ *                                for a particular HW
+ * @res_state:                    State of the resource
+ * @hw_intf:                      HW Interface of HW to which this resource
+ *                                belongs
+ * @res_priv:                     Private data of the resource
+ * @list:                         list_head node for this resource
+ * @cdm_ops:                      CDM operation functions
+ * @tasklet_info:                 Tasklet structure that will be used to
+ *                                schedule IRQ events related to this resource
+ * @irq_handle:                   handle returned on subscribing for IRQ event
+ * @start:                        function pointer to start the HW resource
+ * @stop:                         function pointer to stop the HW resource
+ * @process_cmd:                  function pointer for processing commands
+ *                                specific to the resource
+ * @top_half_handler:             Top Half handler function
+ * @bottom_half_handler:          Bottom Half handler function
+ */
+struct cam_isp_resource_node {
+	enum cam_isp_resource_type     res_type;
+	uint32_t                       res_id;
+	enum cam_isp_resource_state    res_state;
+	struct cam_hw_intf            *hw_intf;
+	void                          *res_priv;
+	struct list_head               list;
+	void                          *cdm_ops;
+	void                          *tasklet_info;
+	int                            irq_handle;
+
+	int (*start)(struct cam_isp_resource_node *rsrc_node);
+	int (*stop)(struct cam_isp_resource_node *rsrc_node);
+	int (*process_cmd)(struct cam_isp_resource_node *rsrc_node,
+		uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+	CAM_IRQ_HANDLER_TOP_HALF       top_half_handler;
+	CAM_IRQ_HANDLER_BOTTOM_HALF    bottom_half_handler;
+};
+
+/*
+ * struct cam_isp_hw_get_cdm_args:
+ *
+ * @Brief:           Contain the command buffer information
+ *                   to store the CDM commands.
+ *
+ * @res:             Resource node
+ * @cmd_buf_addr:    Command buffer to store the change base command
+ * @size:            Size of the buffer in bytes
+ * @used_bytes:      Consumed bytes in the command buffer
+ *
+ */
+struct cam_isp_hw_get_cdm_args {
+	struct cam_isp_resource_node   *res;
+	uint32_t                       *cmd_buf_addr;
+	uint32_t                        size;
+	uint32_t                        used_bytes;
+};
+
+/*
+ * struct cam_isp_hw_get_buf_update:
+ *
+ * @Brief:         Get cdm commands for buffer updates.
+ *
+ * @ cdm:          Command buffer information
+ * @ image_buf:    Contain the image buffer information
+ * @ num_buf:      Number of buffers in the image_buf array
+ *
+ */
+struct cam_isp_hw_get_buf_update {
+	struct cam_isp_hw_get_cdm_args  cdm;
+	uint64_t                       *image_buf;
+	uint32_t                        num_buf;
+};
+
+#endif /* _CAM_ISP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
new file mode 100644
index 0000000..15db6a6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -0,0 +1,255 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_HW_INTF_H_
+#define _CAM_VFE_HW_INTF_H_
+
+#include "cam_isp_hw.h"
+
+#define CAM_VFE_HW_NUM_MAX                       4
+
+#define VFE_CORE_BASE_IDX             0
+/*
+ * VBIF and BUS do not exist on same HW.
+ * Hence both can be 1 below.
+ */
+#define VFE_VBIF_BASE_IDX             1
+#define VFE_BUS_BASE_IDX              1
+
+enum cam_isp_hw_vfe_in_mux {
+	CAM_ISP_HW_VFE_IN_CAMIF     = 0,
+	CAM_ISP_HW_VFE_IN_TESTGEN   = 1,
+	CAM_ISP_HW_VFE_IN_BUS_RD    = 2,
+	CAM_ISP_HW_VFE_IN_RDI0      = 3,
+	CAM_ISP_HW_VFE_IN_RDI1      = 4,
+	CAM_ISP_HW_VFE_IN_RDI2      = 5,
+	CAM_ISP_HW_VFE_IN_RDI3      = 6,
+	CAM_ISP_HW_VFE_IN_MAX,
+};
+
+enum cam_isp_hw_vfe_core {
+	CAM_ISP_HW_VFE_CORE_0,
+	CAM_ISP_HW_VFE_CORE_1,
+	CAM_ISP_HW_VFE_CORE_2,
+	CAM_ISP_HW_VFE_CORE_3,
+	CAM_ISP_HW_VFE_CORE_MAX,
+};
+
+enum cam_vfe_hw_cmd_type {
+	CAM_VFE_HW_CMD_GET_CHANGE_BASE,
+	CAM_VFE_HW_CMD_GET_BUF_UPDATE,
+	CAM_VFE_HW_CMD_GET_REG_UPDATE,
+	CAM_VFE_HW_CMD_MAX,
+};
+
+enum cam_vfe_hw_irq_status {
+	CAM_VFE_IRQ_STATUS_ERR_COMP             = -3,
+	CAM_VFE_IRQ_STATUS_COMP_OWRT            = -2,
+	CAM_VFE_IRQ_STATUS_ERR                  = -1,
+	CAM_VFE_IRQ_STATUS_SUCCESS              = 0,
+	CAM_VFE_IRQ_STATUS_MAX,
+};
+
+enum cam_vfe_hw_irq_regs {
+	CAM_IFE_IRQ_CAMIF_REG_STATUS0           = 0,
+	CAM_IFE_IRQ_CAMIF_REG_STATUS1           = 1,
+	CAM_IFE_IRQ_VIOLATION_STATUS            = 2,
+	CAM_IFE_IRQ_REGISTERS_MAX,
+};
+
+enum cam_vfe_bus_irq_regs {
+	CAM_IFE_IRQ_BUS_REG_STATUS0             = 0,
+	CAM_IFE_IRQ_BUS_REG_STATUS1             = 1,
+	CAM_IFE_IRQ_BUS_REG_STATUS2             = 2,
+	CAM_IFE_IRQ_BUS_REG_COMP_ERR            = 3,
+	CAM_IFE_IRQ_BUS_REG_COMP_OWRT           = 4,
+	CAM_IFE_IRQ_BUS_DUAL_COMP_ERR           = 5,
+	CAM_IFE_IRQ_BUS_DUAL_COMP_OWRT          = 6,
+	CAM_IFE_BUS_IRQ_REGISTERS_MAX,
+};
+
+/*
+ * struct cam_vfe_hw_get_hw_cap:
+ *
+ * @max_width:               Max width supported by HW
+ * @max_height:              Max height supported by HW
+ * @max_pixel_num:           Max Pixel channels available
+ * @max_rdi_num:             Max Raw channels available
+ */
+struct cam_vfe_hw_get_hw_cap {
+	uint32_t                max_width;
+	uint32_t                max_height;
+	uint32_t                max_pixel_num;
+	uint32_t                max_rdi_num;
+};
+
+/*
+ * struct cam_vfe_hw_vfe_out_acquire_args:
+ *
+ * @rsrc_node:               Pointer to Resource Node object, filled if acquire
+ *                           is successful
+ * @out_port_info:           Output Port details to acquire
+ * @unique_id:               Unique Identity of Context to associate with this
+ *                           resource. Used for composite grouping of multiple
+ *                           resources in the same context
+ * @is_dual:                 Dual VFE or not
+ * @split_id:                In case of Dual VFE, this is Left or Right.
+ *                           (Default is Left if Single VFE)
+ * @is_master:               In case of Dual VFE, this is Master or Slave.
+ *                           (Default is Master in case of Single VFE)
+ * @dual_slave_core:         If Master and Slave exists, HW Index of Slave
+ * @cdm_ops:                 CDM operations
+ */
+struct cam_vfe_hw_vfe_out_acquire_args {
+	struct cam_isp_resource_node      *rsrc_node;
+	struct cam_isp_out_port_info      *out_port_info;
+	uint32_t                           unique_id;
+	uint32_t                           is_dual;
+	enum cam_isp_hw_split_id           split_id;
+	uint32_t                           is_master;
+	uint32_t                           dual_slave_core;
+	struct cam_cdm_utils_ops          *cdm_ops;
+};
+
+/*
+ * struct cam_vfe_hw_vfe_in_acquire_args:
+ *
+ * @rsrc_node:               Pointer to Resource Node object, filled if acquire
+ *                           is successful
+ * @res_id:                  Resource ID of resource to acquire if specific,
+ *                           else CAM_ISP_HW_VFE_IN_MAX
+ * @cdm_ops:                 CDM operations
+ * @sync_mode:               In case of Dual VFE, this is Master or Slave.
+ *                           (Default is Master in case of Single VFE)
+ * @in_port:                 Input port details to acquire
+ */
+struct cam_vfe_hw_vfe_in_acquire_args {
+	struct cam_isp_resource_node         *rsrc_node;
+	uint32_t                              res_id;
+	void                                 *cdm_ops;
+	enum cam_isp_hw_sync_mode             sync_mode;
+	struct cam_isp_in_port_info          *in_port;
+};
+
+/*
+ * struct cam_vfe_acquire_args:
+ *
+ * @rsrc_type:               Type of Resource (OUT/IN) to acquire
+ * @tasklet:                 Tasklet to associate with this resource. This is
+ *                           used to schedule bottom of IRQ events associated
+ *                           with this resource.
+ * @vfe_out:                 Acquire args for VFE_OUT
+ * @vfe_in:                  Acquire args for VFE_IN
+ */
+struct cam_vfe_acquire_args {
+	enum cam_isp_resource_type           rsrc_type;
+	void                                *tasklet;
+	union {
+		struct cam_vfe_hw_vfe_out_acquire_args  vfe_out;
+		struct cam_vfe_hw_vfe_in_acquire_args   vfe_in;
+	};
+};
+
+/*
+ * struct cam_vfe_top_irq_evt_payload:
+ *
+ * @Brief:                   This structure is used to save payload for IRQ
+ *                           related to VFE_TOP resources
+ *
+ * @list:                    list_head node for the payload
+ * @core_index:              Index of VFE HW that generated this IRQ event
+ * @core_info:               Private data of handler in bottom half context
+ * @evt_id:                  IRQ event
+ * @irq_reg_val:             IRQ and Error register values, read when IRQ was
+ *                           handled
+ * @error_type:              Identify different errors
+ * @ts:                      Timestamp
+ */
+struct cam_vfe_top_irq_evt_payload {
+	struct list_head           list;
+	uint32_t                   core_index;
+	void                      *core_info;
+	uint32_t                   evt_id;
+	uint32_t                   irq_reg_val[CAM_IFE_IRQ_REGISTERS_MAX];
+	uint32_t                   error_type;
+	struct cam_isp_timestamp   ts;
+};
+
+/*
+ * struct cam_vfe_bus_irq_evt_payload:
+ *
+ * @Brief:                   This structure is used to save payload for IRQ
+ *                           related to VFE_BUS resources
+ *
+ * @list:                    list_head node for the payload
+ * @core_index:              Index of VFE HW that generated this IRQ event
+ * @core_info:               Private data of handler in bottom half context
+ * @evt_id:                  IRQ event
+ * @irq_reg_val:             IRQ and Error register values, read when IRQ was
+ *                           handled
+ * @error_type:              Identify different errors
+ * @ts:                      Timestamp
+ */
+struct cam_vfe_bus_irq_evt_payload {
+	struct list_head             list;
+	uint32_t                     core_index;
+	void                        *core_info;
+	uint32_t                     evt_id;
+	uint32_t                     irq_reg_val[CAM_IFE_BUS_IRQ_REGISTERS_MAX];
+	uint32_t                     error_type;
+	struct cam_vfe_bus_ver2_priv *bus_priv;
+	struct cam_isp_timestamp     ts;
+};
+
+/*
+ * struct cam_vfe_irq_handler_priv:
+ *
+ * @Brief:                   This structure is used as private data to
+ *                           register with IRQ controller. It has information
+ *                           needed by top half and bottom half.
+ *
+ * @core_index:              Index of VFE HW that generated this IRQ event
+ * @core_info:               Private data of handler in bottom half context
+ * @mem_base:                Mapped base address of the register space
+ * @reset_complete:          Completion structure to be signaled if Reset IRQ
+ *                           is Set
+ */
+struct cam_vfe_irq_handler_priv {
+	uint32_t                     core_index;
+	void                        *core_info;
+	void __iomem                *mem_base;
+	struct completion           *reset_complete;
+};
+
+/*
+ * cam_vfe_hw_init()
+ *
+ * @Brief:                  Initialize VFE HW device
+ *
+ * @vfe_hw:                 vfe_hw interface to fill in and return on
+ *                          successful initialization
+ * @hw_idx:                 Index of VFE HW
+ */
+int cam_vfe_hw_init(struct cam_hw_intf **vfe_hw, uint32_t hw_idx);
+
+/*
+ * cam_vfe_put_evt_payload()
+ *
+ * @Brief:                  Put the evt payload back to free list
+ *
+ * @core_info:              VFE HW core_info
+ * @evt_payload:            Event payload data
+ */
+int cam_vfe_put_evt_payload(void             *core_info,
+	struct cam_vfe_top_irq_evt_payload  **evt_payload);
+
+#endif /* _CAM_VFE_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/Makefile
new file mode 100644
index 0000000..5a67efa
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/Makefile
@@ -0,0 +1,15 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_soc.o cam_vfe_dev.o cam_vfe_core.o
+obj-$(CONFIG_SPECTRA_CAMERA) += vfe_bus/ vfe_top/ vfe170/
\ No newline at end of file
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
new file mode 100644
index 0000000..739a1e7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -0,0 +1,635 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/ratelimit.h>
+#include "cam_tasklet_util.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_vfe_soc.h"
+#include "cam_vfe_core.h"
+#include "cam_vfe_bus.h"
+#include "cam_vfe_top.h"
+#include "cam_ife_hw_mgr.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static const char drv_name[] = "vfe";
+
+static uint32_t irq_reg_offset[CAM_IFE_IRQ_REGISTERS_MAX] = {
+	0x0000006C,
+	0x00000070,
+	0x0000007C,
+};
+
+static uint32_t camif_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+	0x0003FD1F,
+	0x0FFF7EB3,
+};
+
+static uint32_t rdi_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+	0x780000e0,
+	0x00000000,
+};
+
+static uint32_t top_reset_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+	0x80000000,
+	0x00000000,
+};
+
+static uint32_t bus_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+	0x00000200,
+	0x00000000,
+};
+
+static int cam_vfe_get_evt_payload(struct cam_vfe_hw_core_info *core_info,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	spin_lock(&core_info->spin_lock);
+	if (list_empty(&core_info->free_payload_list)) {
+		*evt_payload = NULL;
+		spin_unlock(&core_info->spin_lock);
+		pr_err_ratelimited("No free payload, core info 0x%x\n",
+			core_info->cpas_handle);
+		return -ENODEV;
+	}
+
+	*evt_payload = list_first_entry(&core_info->free_payload_list,
+		struct cam_vfe_top_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+	spin_unlock(&core_info->spin_lock);
+
+	return 0;
+}
+
+int cam_vfe_put_evt_payload(void             *core_info,
+	struct cam_vfe_top_irq_evt_payload  **evt_payload)
+{
+	struct cam_vfe_hw_core_info        *vfe_core_info = core_info;
+	unsigned long                       flags;
+
+	if (!core_info) {
+		pr_err("Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		pr_err("No payload to put\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&vfe_core_info->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list, &vfe_core_info->free_payload_list);
+	spin_unlock_irqrestore(&vfe_core_info->spin_lock, flags);
+
+	*evt_payload = NULL;
+	return 0;
+}
+
+int cam_vfe_get_hw_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *vfe_dev = hw_priv;
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	int rc = 0;
+
+	CDBG("Enter\n");
+	if (!hw_priv) {
+		pr_err("%s: Invalid arguments\n", __func__);
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_vfe_hw_core_info *)vfe_dev->core_info;
+
+	if (core_info->vfe_top->hw_ops.get_hw_caps)
+		core_info->vfe_top->hw_ops.get_hw_caps(
+			core_info->vfe_top->top_priv,
+			get_hw_cap_args, arg_size);
+
+	CDBG("Exit\n");
+	return rc;
+}
+
+int cam_vfe_reset_irq_top_half(uint32_t    evt_id,
+	struct cam_irq_th_payload         *th_payload)
+{
+	int32_t                            rc = -EINVAL;
+	struct cam_vfe_irq_handler_priv   *handler_priv;
+
+	handler_priv = th_payload->handler_priv;
+
+	CDBG("Enter\n");
+	CDBG("IRQ status_0 = 0x%x\n", th_payload->evt_status_arr[0]);
+
+	if (th_payload->evt_status_arr[0] & (1<<31)) {
+		CDBG("Calling Complete for RESET CMD\n");
+		complete(handler_priv->reset_complete);
+
+		/*
+		 * Clear All IRQs to avoid spurious IRQs immediately
+		 * after Reset Done.
+		 */
+		cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x64);
+		cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x68);
+		cam_io_w(0x1, handler_priv->mem_base + 0x58);
+
+		rc = 0;
+	}
+
+	CDBG("Exit\n");
+	return rc;
+}
+
+int cam_vfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *vfe_hw = hw_priv;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	int rc = 0;
+
+	CDBG("Enter\n");
+	if (!hw_priv) {
+		pr_err("Invalid arguments\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&vfe_hw->hw_mutex);
+	vfe_hw->open_count++;
+	if (vfe_hw->open_count > 1) {
+		mutex_unlock(&vfe_hw->hw_mutex);
+		CDBG("VFE has already been initialized cnt %d\n",
+			vfe_hw->open_count);
+		return 0;
+	}
+	mutex_unlock(&vfe_hw->hw_mutex);
+
+	soc_info = &vfe_hw->soc_info;
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+	/* Turn ON Regulators, Clocks and other SOC resources */
+	rc = cam_vfe_enable_soc_resources(soc_info);
+	if (rc) {
+		pr_err("Enable SOC failed\n");
+		rc = -EFAULT;
+		goto decrement_open_cnt;
+	}
+
+	CDBG("Enable soc done\n");
+
+	/* Do HW Reset */
+	rc = cam_vfe_reset(hw_priv, NULL, 0);
+	if (rc) {
+		pr_err("Reset Failed\n");
+		goto disable_soc;
+	}
+
+	return 0;
+
+disable_soc:
+	cam_vfe_disable_soc_resources(soc_info);
+decrement_open_cnt:
+	mutex_lock(&vfe_hw->hw_mutex);
+	vfe_hw->open_count--;
+	mutex_unlock(&vfe_hw->hw_mutex);
+	return rc;
+}
+
+int cam_vfe_deinit_hw(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *vfe_hw = hw_priv;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	int rc = 0;
+
+	CDBG("Enter\n");
+	if (!hw_priv) {
+		pr_err("%s: Invalid arguments\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&vfe_hw->hw_mutex);
+	if (!vfe_hw->open_count) {
+		mutex_unlock(&vfe_hw->hw_mutex);
+		pr_err("Error! Unbalanced deinit\n");
+		return -EFAULT;
+	}
+	vfe_hw->open_count--;
+	if (vfe_hw->open_count) {
+		mutex_unlock(&vfe_hw->hw_mutex);
+		CDBG("open_cnt non-zero =%d\n", vfe_hw->open_count);
+		return 0;
+	}
+	mutex_unlock(&vfe_hw->hw_mutex);
+
+	soc_info = &vfe_hw->soc_info;
+
+	/* Turn OFF Regulators, Clocks and other SOC resources */
+	CDBG("Disable SOC resource\n");
+	rc = cam_vfe_disable_soc_resources(soc_info);
+	if (rc)
+		pr_err("Disable SOC failed\n");
+
+	vfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+
+	CDBG("Exit\n");
+	return rc;
+}
+
+int cam_vfe_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *vfe_hw  = hw_priv;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	int rc;
+
+	CDBG("Enter\n");
+
+	if (!hw_priv) {
+		pr_err("Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	soc_info = &vfe_hw->soc_info;
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+	core_info->irq_payload.core_index = soc_info->index;
+	core_info->irq_payload.mem_base =
+		vfe_hw->soc_info.reg_map[VFE_CORE_BASE_IDX].mem_base;
+	core_info->irq_payload.core_info = core_info;
+	core_info->irq_payload.reset_complete = &vfe_hw->hw_complete;
+
+	core_info->irq_handle = cam_irq_controller_subscribe_irq(
+		core_info->vfe_irq_controller, CAM_IRQ_PRIORITY_0,
+		top_reset_irq_reg_mask, &core_info->irq_payload,
+		cam_vfe_reset_irq_top_half, NULL, NULL, NULL);
+	if (core_info->irq_handle < 0) {
+		pr_err("subscribe irq controller failed\n");
+		return -EFAULT;
+	}
+
+	reinit_completion(&vfe_hw->hw_complete);
+
+	CDBG("calling RESET\n");
+	core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv, NULL, 0);
+	CDBG("waiting for vfe reset complete\n");
+	/* Wait for Completion or Timeout of 500ms */
+	rc = wait_for_completion_timeout(&vfe_hw->hw_complete, 500);
+	if (!rc)
+		pr_err("Error! Reset Timeout\n");
+
+	CDBG("reset complete done (%d)\n", rc);
+
+	rc = cam_irq_controller_unsubscribe_irq(
+		core_info->vfe_irq_controller, core_info->irq_handle);
+	if (rc)
+		pr_err("Error! Unsubscribe failed\n");
+
+	CDBG("Exit\n");
+	return rc;
+}
+
+void cam_isp_hw_get_timestamp(struct cam_isp_timestamp *time_stamp)
+{
+	struct timespec ts;
+
+	get_monotonic_boottime(&ts);
+	time_stamp->mono_time.tv_sec    = ts.tv_sec;
+	time_stamp->mono_time.tv_usec   = ts.tv_nsec/1000;
+}
+
+
+int cam_vfe_irq_top_half(uint32_t    evt_id,
+	struct cam_irq_th_payload   *th_payload)
+{
+	int32_t                              rc;
+	int                                  i;
+	struct cam_vfe_irq_handler_priv     *handler_priv;
+	struct cam_vfe_top_irq_evt_payload  *evt_payload;
+
+	handler_priv = th_payload->handler_priv;
+
+	CDBG("IRQ status_0 = %x\n", th_payload->evt_status_arr[0]);
+	CDBG("IRQ status_1 = %x\n", th_payload->evt_status_arr[1]);
+
+	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
+	if (rc) {
+		pr_err_ratelimited("No tasklet_cmd is free in queue\n");
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	evt_payload->core_index = handler_priv->core_index;
+	evt_payload->core_info  = handler_priv->core_info;
+	evt_payload->evt_id  = evt_id;
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	for (; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) {
+		evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
+			irq_reg_offset[i]);
+	}
+	CDBG("Violation status = %x\n", evt_payload->irq_reg_val[2]);
+
+	/*
+	 *  need to handle overflow condition here, otherwise irq storm
+	 *  will block everything.
+	 */
+	if (evt_payload->irq_reg_val[1]) {
+		pr_err("Mask all the interrupts\n");
+		cam_io_w(0, handler_priv->mem_base + 0x60);
+		cam_io_w(0, handler_priv->mem_base + 0x5C);
+
+		evt_payload->error_type = CAM_ISP_HW_ERROR_OVERFLOW;
+	}
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CDBG("Exit\n");
+	return rc;
+}
+
+int cam_vfe_reserve(void *hw_priv, void *reserve_args, uint32_t arg_size)
+{
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_hw_info                *vfe_hw  = hw_priv;
+	struct cam_vfe_acquire_args       *acquire;
+	int rc = -ENODEV;
+
+
+	if (!hw_priv || !reserve_args || (arg_size !=
+		sizeof(struct cam_vfe_acquire_args))) {
+		pr_err("Invalid input arguments\n");
+		return -EINVAL;
+	}
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+	acquire = (struct cam_vfe_acquire_args   *)reserve_args;
+
+	mutex_lock(&vfe_hw->hw_mutex);
+	if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_IN)
+		rc = core_info->vfe_top->hw_ops.reserve(
+			core_info->vfe_top->top_priv,
+			acquire,
+			sizeof(acquire));
+	else if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_OUT)
+		rc = core_info->vfe_bus->acquire_resource(
+			core_info->vfe_bus->bus_priv, acquire);
+	else
+		pr_err("Invalid res type:%d\n", acquire->rsrc_type);
+
+	mutex_unlock(&vfe_hw->hw_mutex);
+
+	return rc;
+}
+
+
+int cam_vfe_release(void *hw_priv, void *release_args, uint32_t arg_size)
+{
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_hw_info                *vfe_hw  = hw_priv;
+	struct cam_isp_resource_node      *isp_res;
+	int rc = -ENODEV;
+
+	if (!hw_priv || !release_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+	isp_res = (struct cam_isp_resource_node      *) release_args;
+
+	mutex_lock(&vfe_hw->hw_mutex);
+	if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN)
+		rc = core_info->vfe_top->hw_ops.release(
+			core_info->vfe_top->top_priv, isp_res,
+			sizeof(struct cam_isp_resource_node));
+	else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT)
+		rc = core_info->vfe_bus->release_resource(
+			core_info->vfe_bus->bus_priv, isp_res);
+	else
+		pr_err("Invalid res type:%d\n", isp_res->res_type);
+
+	mutex_unlock(&vfe_hw->hw_mutex);
+
+	return rc;
+}
+
+
+int cam_vfe_start(void *hw_priv, void *start_args, uint32_t arg_size)
+{
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_hw_info                *vfe_hw  = hw_priv;
+	struct cam_isp_resource_node      *isp_res;
+	int rc = -ENODEV;
+
+	if (!hw_priv || !start_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+	isp_res = (struct cam_isp_resource_node  *)start_args;
+
+	mutex_lock(&vfe_hw->hw_mutex);
+	if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
+		if (isp_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
+			isp_res->irq_handle = cam_irq_controller_subscribe_irq(
+				core_info->vfe_irq_controller,
+				CAM_IRQ_PRIORITY_2,
+				camif_irq_reg_mask, &core_info->irq_payload,
+				cam_vfe_irq_top_half, cam_ife_mgr_do_tasklet,
+				isp_res->tasklet_info, cam_tasklet_enqueue_cmd);
+		else
+			isp_res->irq_handle = cam_irq_controller_subscribe_irq(
+				core_info->vfe_irq_controller,
+				CAM_IRQ_PRIORITY_2,
+				rdi_irq_reg_mask, &core_info->irq_payload,
+				cam_vfe_irq_top_half, cam_ife_mgr_do_tasklet,
+				isp_res->tasklet_info, cam_tasklet_enqueue_cmd);
+
+		if (isp_res->irq_handle > 0)
+			rc = core_info->vfe_top->hw_ops.start(
+				core_info->vfe_top->top_priv, isp_res,
+				sizeof(struct cam_isp_resource_node));
+		else
+			pr_err("Error! subscribe irq controller failed\n");
+	} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
+		isp_res->irq_handle = cam_irq_controller_subscribe_irq(
+			core_info->vfe_irq_controller, CAM_IRQ_PRIORITY_1,
+			bus_irq_reg_mask, &core_info->irq_payload,
+			core_info->vfe_bus->top_half_handler,
+			cam_ife_mgr_do_tasklet_buf_done,
+			isp_res->tasklet_info, cam_tasklet_enqueue_cmd);
+		if (isp_res->irq_handle > 0)
+			rc = core_info->vfe_bus->start_resource(isp_res);
+		else
+			pr_err("Error! subscribe irq controller failed\n");
+	} else {
+		pr_err("Invalid res type:%d\n", isp_res->res_type);
+	}
+
+	mutex_unlock(&vfe_hw->hw_mutex);
+
+	return rc;
+}
+
+int cam_vfe_stop(void *hw_priv, void *stop_args, uint32_t arg_size)
+{
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_hw_info                *vfe_hw  = hw_priv;
+	struct cam_isp_resource_node      *isp_res;
+	int rc = -EINVAL;
+
+	if (!hw_priv || !stop_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		pr_err("Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+	isp_res = (struct cam_isp_resource_node  *)stop_args;
+
+	mutex_lock(&vfe_hw->hw_mutex);
+	if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
+		cam_irq_controller_unsubscribe_irq(
+			core_info->vfe_irq_controller, isp_res->irq_handle);
+		rc = core_info->vfe_top->hw_ops.stop(
+			core_info->vfe_top->top_priv, isp_res,
+			sizeof(struct cam_isp_resource_node));
+	} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
+		cam_irq_controller_unsubscribe_irq(
+			core_info->vfe_irq_controller, isp_res->irq_handle);
+		rc = core_info->vfe_bus->stop_resource(isp_res);
+	} else {
+		pr_err("Invalid res type:%d\n", isp_res->res_type);
+	}
+
+	mutex_unlock(&vfe_hw->hw_mutex);
+
+	return rc;
+}
+
+int cam_vfe_read(void *hw_priv, void *read_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_write(void *hw_priv, void *write_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_process_cmd(void *hw_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *vfe_hw = hw_priv;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_vfe_hw_info            *hw_info = NULL;
+	int rc = 0;
+
+	if (!hw_priv) {
+		pr_err("Invalid arguments\n");
+		return -EINVAL;
+	}
+
+	soc_info = &vfe_hw->soc_info;
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+	hw_info = core_info->vfe_hw_info;
+
+	switch (cmd_type) {
+	case CAM_VFE_HW_CMD_GET_CHANGE_BASE:
+	case CAM_VFE_HW_CMD_GET_REG_UPDATE:
+		rc = core_info->vfe_top->hw_ops.process_cmd(
+			core_info->vfe_top->top_priv, cmd_type, cmd_args,
+			arg_size);
+
+		break;
+	case CAM_VFE_HW_CMD_GET_BUF_UPDATE:
+		rc = core_info->vfe_bus->process_cmd(
+			core_info->vfe_bus->bus_priv, cmd_type, cmd_args,
+			arg_size);
+		break;
+
+	default:
+		pr_err("Invalid cmd type:%d\n", cmd_type);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+irqreturn_t cam_vfe_irq(int irq_num, void *data)
+{
+	struct cam_hw_info            *vfe_hw;
+	struct cam_vfe_hw_core_info   *core_info;
+
+	if (!data)
+		return IRQ_NONE;
+
+	vfe_hw = (struct cam_hw_info *)data;
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+	return cam_irq_controller_handle_irq(irq_num,
+		core_info->vfe_irq_controller);
+}
+
+int cam_vfe_core_init(struct cam_vfe_hw_core_info  *core_info,
+	struct cam_hw_soc_info                     *soc_info,
+	struct cam_hw_intf                         *hw_intf,
+	struct cam_vfe_hw_info                     *vfe_hw_info)
+{
+	int rc = -EINVAL;
+	int i;
+
+	CDBG("Enter");
+
+	rc = cam_irq_controller_init(drv_name,
+		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX),
+		vfe_hw_info->irq_reg_info, &core_info->vfe_irq_controller);
+	if (rc) {
+		pr_err("Error! cam_irq_controller_init failed\n");
+		return rc;
+	}
+
+	rc = cam_vfe_top_init(vfe_hw_info->top_version,
+		soc_info, hw_intf, vfe_hw_info->top_hw_info,
+		&core_info->vfe_top);
+	if (rc) {
+		pr_err("Error! cam_vfe_top_init failed\n");
+		return rc;
+	}
+
+	rc = cam_vfe_bus_init(vfe_hw_info->bus_version,
+		soc_info->reg_map[0].mem_base, hw_intf,
+		vfe_hw_info->bus_hw_info, NULL, &core_info->vfe_bus);
+	if (rc) {
+		pr_err("Error! cam_vfe_bus_init failed\n");
+		return rc;
+	}
+
+	INIT_LIST_HEAD(&core_info->free_payload_list);
+	for (i = 0; i < CAM_VFE_EVT_MAX; i++) {
+		INIT_LIST_HEAD(&core_info->evt_payload[i].list);
+		list_add_tail(&core_info->evt_payload[i].list,
+			&core_info->free_payload_list);
+	}
+
+	spin_lock_init(&core_info->spin_lock);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
new file mode 100644
index 0000000..94b4cf0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
@@ -0,0 +1,92 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_CORE_H_
+#define _CAM_VFE_CORE_H_
+
+#include <linux/spinlock.h>
+#include "cam_hw_intf.h"
+#include "cam_vfe_top.h"
+#include "cam_vfe_bus.h"
+#include "cam_vfe_hw_intf.h"
+
+struct cam_vfe_hw_info {
+	struct cam_irq_controller_reg_info *irq_reg_info;
+
+	uint32_t                          bus_version;
+	void                             *bus_hw_info;
+
+	uint32_t                          top_version;
+	void                             *top_hw_info;
+	uint32_t                          camif_version;
+	void                             *camif_reg;
+
+	uint32_t                          testgen_version;
+	void                             *testgen_reg;
+
+	uint32_t                          num_qos_settings;
+	struct cam_isp_reg_val_pair      *qos_settings;
+
+	uint32_t                          num_ds_settings;
+	struct cam_isp_reg_val_pair      *ds_settings;
+
+	uint32_t                          num_vbif_settings;
+	struct cam_isp_reg_val_pair      *vbif_settings;
+};
+
+#define CAM_VFE_EVT_MAX                    256
+
+struct cam_vfe_hw_core_info {
+	struct cam_vfe_hw_info             *vfe_hw_info;
+	void                               *vfe_irq_controller;
+	struct cam_vfe_top                 *vfe_top;
+	struct cam_vfe_bus                 *vfe_bus;
+
+	struct cam_vfe_top_irq_evt_payload  evt_payload[CAM_VFE_EVT_MAX];
+	struct list_head                    free_payload_list;
+	struct cam_vfe_irq_handler_priv     irq_payload;
+	uint32_t                            cpas_handle;
+	int                                 irq_handle;
+	spinlock_t                          spin_lock;
+};
+
+int cam_vfe_get_hw_caps(void *device_priv,
+	void *get_hw_cap_args, uint32_t arg_size);
+int cam_vfe_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_vfe_deinit_hw(void *hw_priv,
+	void *deinit_hw_args, uint32_t arg_size);
+int cam_vfe_reset(void *device_priv,
+	void *reset_core_args, uint32_t arg_size);
+int cam_vfe_reserve(void *device_priv,
+	void *reserve_args, uint32_t arg_size);
+int cam_vfe_release(void *device_priv,
+	void *reserve_args, uint32_t arg_size);
+int cam_vfe_start(void *device_priv,
+	void *start_args, uint32_t arg_size);
+int cam_vfe_stop(void *device_priv,
+	void *stop_args, uint32_t arg_size);
+int cam_vfe_read(void *device_priv,
+	void *read_args, uint32_t arg_size);
+int cam_vfe_write(void *device_priv,
+	void *write_args, uint32_t arg_size);
+int cam_vfe_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+
+irqreturn_t cam_vfe_irq(int irq_num, void *data);
+
+int cam_vfe_core_init(struct cam_vfe_hw_core_info *core_info,
+	struct cam_hw_soc_info             *soc_info,
+	struct cam_hw_intf                 *hw_intf,
+	struct cam_vfe_hw_info             *vfe_hw_info);
+
+#endif /* _CAM_VFE_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
new file mode 100644
index 0000000..40279ae
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
@@ -0,0 +1,140 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include "cam_vfe_dev.h"
+#include "cam_vfe_core.h"
+#include "cam_vfe_soc.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static struct cam_hw_intf *cam_vfe_hw_list[CAM_VFE_HW_NUM_MAX] = {0, 0, 0, 0};
+
+int cam_vfe_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info                *vfe_hw = NULL;
+	struct cam_hw_intf                *vfe_hw_intf = NULL;
+	const struct of_device_id         *match_dev = NULL;
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_vfe_hw_info            *hw_info = NULL;
+	int                                rc = 0;
+
+	vfe_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!vfe_hw_intf) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &vfe_hw_intf->hw_idx);
+
+	vfe_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!vfe_hw) {
+		rc = -ENOMEM;
+		goto free_vfe_hw_intf;
+	}
+	vfe_hw->soc_info.pdev = pdev;
+	vfe_hw_intf->hw_priv = vfe_hw;
+	vfe_hw_intf->hw_ops.get_hw_caps = cam_vfe_get_hw_caps;
+	vfe_hw_intf->hw_ops.init = cam_vfe_init_hw;
+	vfe_hw_intf->hw_ops.deinit = cam_vfe_deinit_hw;
+	vfe_hw_intf->hw_ops.reset = cam_vfe_reset;
+	vfe_hw_intf->hw_ops.reserve = cam_vfe_reserve;
+	vfe_hw_intf->hw_ops.release = cam_vfe_release;
+	vfe_hw_intf->hw_ops.start = cam_vfe_start;
+	vfe_hw_intf->hw_ops.stop = cam_vfe_stop;
+	vfe_hw_intf->hw_ops.read = cam_vfe_read;
+	vfe_hw_intf->hw_ops.write = cam_vfe_write;
+	vfe_hw_intf->hw_ops.process_cmd = cam_vfe_process_cmd;
+	vfe_hw_intf->hw_type = CAM_ISP_HW_TYPE_VFE;
+
+	CDBG("type %d index %d\n", vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
+
+	platform_set_drvdata(pdev, vfe_hw_intf);
+
+	vfe_hw->core_info = kzalloc(sizeof(struct cam_vfe_hw_core_info),
+		GFP_KERNEL);
+	if (!vfe_hw->core_info) {
+		CDBG("Failed to alloc for core\n");
+		rc = -ENOMEM;
+		goto free_vfe_hw;
+	}
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		pr_err("Of_match Failed\n");
+		rc = -EINVAL;
+		goto free_core_info;
+	}
+	hw_info = (struct cam_vfe_hw_info *)match_dev->data;
+	core_info->vfe_hw_info = hw_info;
+
+	rc = cam_vfe_init_soc_resources(&vfe_hw->soc_info, cam_vfe_irq,
+		vfe_hw);
+	if (rc < 0) {
+		pr_err("Failed to init soc\n");
+		goto free_core_info;
+	}
+
+	rc = cam_vfe_core_init(core_info, &vfe_hw->soc_info,
+		vfe_hw_intf, hw_info);
+	if (rc < 0) {
+		pr_err("Failed to init core\n");
+		goto deinit_soc;
+	}
+
+	vfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&vfe_hw->hw_mutex);
+	spin_lock_init(&vfe_hw->hw_lock);
+	init_completion(&vfe_hw->hw_complete);
+
+	if (vfe_hw_intf->hw_idx < CAM_VFE_HW_NUM_MAX)
+		cam_vfe_hw_list[vfe_hw_intf->hw_idx] = vfe_hw_intf;
+
+	cam_vfe_init_hw(vfe_hw, NULL, 0);
+	cam_vfe_deinit_hw(vfe_hw, NULL, 0);
+
+	CDBG("VFE%d probe successful\n", vfe_hw_intf->hw_idx);
+
+	return rc;
+
+deinit_soc:
+free_core_info:
+	kfree(vfe_hw->core_info);
+free_vfe_hw:
+	kfree(vfe_hw);
+free_vfe_hw_intf:
+	kfree(vfe_hw_intf);
+end:
+	return rc;
+}
+
+int cam_vfe_hw_init(struct cam_hw_intf **vfe_hw, uint32_t hw_idx)
+{
+	int rc = 0;
+
+	if (cam_vfe_hw_list[hw_idx]) {
+		*vfe_hw = cam_vfe_hw_list[hw_idx];
+		rc = 0;
+	} else {
+		*vfe_hw = NULL;
+		rc = -ENODEV;
+	}
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h
new file mode 100644
index 0000000..ca54d81
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_DEV_H_
+#define _CAM_VFE_DEV_H_
+
+#include <linux/platform_device.h>
+
+/*
+ * cam_vfe_probe()
+ *
+ * @brief:                   Driver probe function called on Boot
+ *
+ * @pdev:                    Platform Device pointer
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_probe(struct platform_device *pdev);
+
+#endif /* _CAM_VFE_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
new file mode 100644
index 0000000..3670ca9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -0,0 +1,165 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/slab.h>
+#include "cam_cpas_api.h"
+#include "cam_vfe_soc.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static int cam_vfe_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		pr_err("Error! get DT properties failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int cam_vfe_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t vfe_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, vfe_irq_handler,
+		irq_data);
+
+	return rc;
+}
+
+int cam_vfe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t vfe_irq_handler, void *irq_data)
+{
+	int                               rc = 0;
+	struct cam_vfe_soc_private       *soc_private;
+	struct cam_cpas_register_params   cpas_register_param;
+
+	soc_private = kzalloc(sizeof(struct cam_vfe_soc_private),
+		GFP_KERNEL);
+	if (!soc_private) {
+		CDBG("Error! soc_private Alloc Failed\n");
+		return -ENOMEM;
+	}
+	soc_info->soc_private = soc_private;
+
+	rc = cam_vfe_get_dt_properties(soc_info);
+	if (rc < 0) {
+		pr_err("Error! Get DT properties failed\n");
+		goto free_soc_private;
+	}
+
+	rc = cam_vfe_request_platform_resource(soc_info, vfe_irq_handler,
+		irq_data);
+	if (rc < 0) {
+		pr_err("Error! Request platform resources failed\n");
+		goto free_soc_private;
+	}
+
+	memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+	strlcpy(cpas_register_param.identifier, "ife",
+		CAM_HW_IDENTIFIER_LENGTH);
+	cpas_register_param.cell_index = soc_info->index;
+	cpas_register_param.dev = &soc_info->pdev->dev;
+	rc = cam_cpas_register_client(&cpas_register_param);
+	if (rc) {
+		pr_err("CPAS registration failed\n");
+		goto release_soc;
+	} else {
+		soc_private->cpas_handle = cpas_register_param.client_handle;
+	}
+
+	return rc;
+
+release_soc:
+	cam_soc_util_release_platform_resource(soc_info);
+free_soc_private:
+	kfree(soc_private);
+
+	return rc;
+}
+
+int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int                               rc = 0;
+	struct cam_vfe_soc_private       *soc_private;
+	struct cam_ahb_vote               ahb_vote;
+	struct cam_axi_vote               axi_vote;
+
+	if (!soc_info) {
+		pr_err("Error! Invalid params\n");
+		rc = -EINVAL;
+		goto end;
+	}
+	soc_private = soc_info->soc_private;
+
+	ahb_vote.type       = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+
+	axi_vote.compressed_bw   = 640000000;
+	axi_vote.uncompressed_bw = 640000000;
+
+	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		pr_err("Error! CPAS start failed.\n");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	if (rc) {
+		pr_err("Error! enable platform failed\n");
+		goto stop_cpas;
+	}
+
+	return rc;
+
+stop_cpas:
+	cam_cpas_stop(soc_private->cpas_handle);
+end:
+	return rc;
+}
+
+
+int cam_vfe_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+	struct cam_vfe_soc_private       *soc_private;
+
+	if (!soc_info) {
+		pr_err("Error! Invalid params\n");
+		rc = -EINVAL;
+		return rc;
+	}
+	soc_private = soc_info->soc_private;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		pr_err("%s: disable platform failed\n", __func__);
+		return rc;
+	}
+
+	rc = cam_cpas_stop(soc_private->cpas_handle);
+	if (rc) {
+		pr_err("Error! CPAS stop failed.\n");
+		return rc;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
new file mode 100644
index 0000000..27fb192
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
@@ -0,0 +1,71 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_SOC_H_
+#define _CAM_VFE_SOC_H_
+
+#include "cam_soc_util.h"
+#include "cam_isp_hw.h"
+
+/*
+ * struct cam_vfe_soc_private:
+ *
+ * @Brief:                   Private SOC data specific to VFE HW Driver
+ *
+ * @cpas_handle:             Handle returned on registering with CPAS driver.
+ *                           This handle is used for all further interface
+ *                           with CPAS.
+ */
+struct cam_vfe_soc_private {
+	uint32_t cpas_handle;
+};
+
+/*
+ * cam_vfe_init_soc_resources()
+ *
+ * @Brief:                   Initialize SOC resources including private data
+ *
+ * @soc_info:                Device soc information
+ * @handler:                 Irq handler function pointer
+ * @irq_data:                Irq handler function Callback data
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t vfe_irq_handler, void *irq_data);
+
+/*
+ * cam_vfe_enable_soc_resources()
+ *
+ * @brief:                   Enable regulator, irq resources, start CPAS
+ *
+ * @soc_info:                Device soc information
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/*
+ * cam_vfe_disable_soc_resources()
+ *
+ * @brief:                   Disable regulator, irq resources, stop CPAS
+ *
+ * @soc_info:                Device soc information
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_VFE_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/Makefile
new file mode 100644
index 0000000..77e4eb3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe170.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c
new file mode 100644
index 0000000..2245ab1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "cam_vfe170.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_core.h"
+#include "cam_vfe_dev.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static const struct of_device_id cam_vfe170_dt_match[] = {
+	{
+		.compatible = "qcom,vfe170",
+		.data = &cam_vfe170_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_vfe170_dt_match);
+
+static struct platform_driver cam_vfe170_driver = {
+	.probe = cam_vfe_probe,
+	.driver = {
+		.name = "cam_vfe170",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_vfe170_dt_match,
+	},
+};
+
+static int __init cam_vfe170_init_module(void)
+{
+	return platform_driver_register(&cam_vfe170_driver);
+}
+
+static void __exit cam_vfe170_exit_module(void)
+{
+	platform_driver_unregister(&cam_vfe170_driver);
+}
+
+module_init(cam_vfe170_init_module);
+module_exit(cam_vfe170_exit_module);
+MODULE_DESCRIPTION("CAM VFE170 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
new file mode 100644
index 0000000..b550071
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -0,0 +1,781 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE170_H_
+#define _CAM_VFE170_H_
+
+#include "cam_vfe_camif_ver2.h"
+#include "cam_vfe_bus_ver2.h"
+#include "cam_irq_controller.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_core.h"
+
+static struct cam_irq_register_set vfe170_top_irq_reg_set[2] = {
+	{
+		.mask_reg_offset   = 0x0000005C,
+		.clear_reg_offset  = 0x00000064,
+		.status_reg_offset = 0x0000006C,
+	},
+	{
+		.mask_reg_offset   = 0x00000060,
+		.clear_reg_offset  = 0x00000068,
+		.status_reg_offset = 0x00000070,
+	},
+};
+
+static struct cam_irq_controller_reg_info vfe170_top_irq_reg_info = {
+	.num_registers = 2,
+	.irq_reg_set = vfe170_top_irq_reg_set,
+	.global_clear_offset  = 0x00000058,
+	.global_clear_bitmask = 0x00000001,
+};
+
+static struct cam_vfe_camif_ver2_reg vfe170_camif_reg = {
+	.camif_cmd                = 0x00000478,
+	.camif_config             = 0x0000047C,
+	.line_skip_pattern        = 0x00000488,
+	.pixel_skip_pattern       = 0x0000048C,
+	.skip_period              = 0x00000490,
+	.irq_subsample_pattern    = 0x0000049C,
+	.epoch_irq                = 0x000004A0,
+	.raw_crop_width_cfg       = 0x00000CE4,
+	.raw_crop_height_cfg      = 0x00000CE8,
+};
+
+static struct cam_vfe_camif_reg_data vfe_170_camif_reg_data = {
+	.raw_crop_first_pixel_shift      = 16,
+	.raw_crop_first_pixel_mask       = 0xFFFF,
+	.raw_crop_last_pixel_shift       = 0x0,
+	.raw_crop_last_pixel_mask        = 0x3FFF,
+	.raw_crop_first_line_shift       = 16,
+	.raw_crop_first_line_mask        = 0xFFFF,
+	.raw_crop_last_line_shift        = 0,
+	.raw_crop_last_line_mask         = 0x3FFF,
+	.input_mux_sel_shift             = 5,
+	.input_mux_sel_mask              = 0x3,
+	.extern_reg_update_shift         = 4,
+	.extern_reg_update_mask          = 1,
+	.pixel_pattern_shift             = 0,
+	.pixel_pattern_mask              = 0x7,
+	.epoch_line_cfg                  = 0x140000,
+	.sof_irq_mask                    = 0x00000001,
+	.epoch0_irq_mask                 = 0x00000004,
+	.reg_update_irq_mask             = 0x00000010,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_170_reg = {
+	.reset    = 0x0000001C,
+	.cgc_ovd  = 0x0000002C,
+	.enable   = 0x00000040,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl stats_170_reg = {
+	.reset    = 0x00000020,
+	.cgc_ovd  = 0x00000030,
+	.enable   = 0x00000044,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl color_170_reg = {
+	.reset    = 0x00000024,
+	.cgc_ovd  = 0x00000034,
+	.enable   = 0x00000048,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl zoom_170_reg = {
+	.reset    = 0x00000028,
+	.cgc_ovd  = 0x00000038,
+	.enable   = 0x0000004C,
+};
+
+static struct cam_vfe_top_ver2_reg_offset_common vfe170_top_common_reg = {
+	.hw_version               = 0x00000000,
+	.hw_capability            = 0x00000004,
+	.lens_feature             = 0x00000008,
+	.stats_feature            = 0x0000000C,
+	.color_feature            = 0x00000010,
+	.zoom_feature             = 0x00000014,
+	.global_reset_cmd         = 0x00000018,
+	.module_ctrl              = {
+		&lens_170_reg,
+		&stats_170_reg,
+		&color_170_reg,
+		&zoom_170_reg,
+	},
+	.bus_cgc_ovd              = 0x0000003C,
+	.core_cfg                 = 0x00000050,
+	.three_D_cfg              = 0x00000054,
+	.violation_status         = 0x0000007C,
+	.reg_update_cmd           = 0x000004AC,
+};
+
+static struct cam_vfe_top_ver2_hw_info vfe170_top_hw_info = {
+	.common_reg = &vfe170_top_common_reg,
+	.camif_hw_info = {
+		.common_reg = &vfe170_top_common_reg,
+		.camif_reg =  &vfe170_camif_reg,
+		.reg_data  =  &vfe_170_camif_reg_data,
+		},
+	.mux_type = {
+		CAM_VFE_CAMIF_VER_2_0,
+		CAM_VFE_RDI_VER_1_0,
+		CAM_VFE_RDI_VER_1_0,
+		CAM_VFE_RDI_VER_1_0,
+	},
+};
+
+static struct cam_irq_register_set vfe170_bus_irq_reg[3] = {
+		{
+			.mask_reg_offset   = 0x00002044,
+			.clear_reg_offset  = 0x00002050,
+			.status_reg_offset = 0x0000205C,
+		},
+		{
+			.mask_reg_offset   = 0x00002048,
+			.clear_reg_offset  = 0x00002054,
+			.status_reg_offset = 0x00002060,
+		},
+		{
+			.mask_reg_offset   = 0x0000204C,
+			.clear_reg_offset  = 0x00002058,
+			.status_reg_offset = 0x00002064,
+		},
+};
+
+static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_3 = {
+	.tile_cfg         = 0x0000252C,
+	.h_init           = 0x00002530,
+	.v_init           = 0x00002534,
+	.meta_addr        = 0x00002538,
+	.meta_offset      = 0x0000253C,
+	.meta_stride      = 0x00002540,
+	.mode_cfg         = 0x00002544,
+};
+
+static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_4 = {
+	.tile_cfg         = 0x0000262C,
+	.h_init           = 0x00002630,
+	.v_init           = 0x00002634,
+	.meta_addr        = 0x00002638,
+	.meta_offset      = 0x0000263C,
+	.meta_stride      = 0x00002640,
+	.mode_cfg         = 0x00002644,
+};
+
+static struct cam_vfe_bus_ver2_hw_info vfe170_bus_hw_info = {
+	.common_reg = {
+		.hw_version                   = 0x00002000,
+		.hw_capability                = 0x00002004,
+		.sw_reset                     = 0x00002008,
+		.cgc_ovd                      = 0x0000200C,
+		.pwr_iso_cfg                  = 0x000020CC,
+		.dual_master_comp_cfg         = 0x00002028,
+		.irq_reg_info = {
+			.num_registers        = 3,
+			.irq_reg_set          = vfe170_bus_irq_reg,
+			.global_clear_offset  = 0x00002068,
+			.global_clear_bitmask = 0x00000001,
+		},
+		.comp_error_status            = 0x0000206C,
+		.comp_ovrwr_status            = 0x00002070,
+		.dual_comp_error_status       = 0x00002074,
+		.dual_comp_error_status       = 0x00002078,
+	},
+	.bus_client_reg = {
+		/* BUS Client 0 */
+		{
+			.status0                  = 0x00002200,
+			.status1                  = 0x00002204,
+			.cfg                      = 0x00002208,
+			.header_addr              = 0x0000220C,
+			.header_cfg               = 0x00002210,
+			.image_addr               = 0x00002214,
+			.image_addr_offset        = 0x00002218,
+			.buffer_width_cfg         = 0x0000221C,
+			.buffer_height_cfg        = 0x00002220,
+			.packer_cfg               = 0x00002224,
+			.stride                   = 0x00002228,
+			.irq_subsample_period     = 0x00002248,
+			.irq_subsample_pattern    = 0x0000224C,
+			.framedrop_period         = 0x00002250,
+			.framedrop_pattern        = 0x00002254,
+			.frame_inc                = 0x00002258,
+			.burst_limit              = 0x0000225C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 1 */
+		{
+			.status0                  = 0x00002300,
+			.status1                  = 0x00002304,
+			.cfg                      = 0x00002308,
+			.header_addr              = 0x0000230C,
+			.header_cfg               = 0x00002310,
+			.image_addr               = 0x00002314,
+			.image_addr_offset        = 0x00002318,
+			.buffer_width_cfg         = 0x0000231C,
+			.buffer_height_cfg        = 0x00002320,
+			.packer_cfg               = 0x00002324,
+			.stride                   = 0x00002328,
+			.irq_subsample_period     = 0x00002348,
+			.irq_subsample_pattern    = 0x0000234C,
+			.framedrop_period         = 0x00002350,
+			.framedrop_pattern        = 0x00002354,
+			.frame_inc                = 0x00002358,
+			.burst_limit              = 0x0000235C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 2 */
+		{
+			.status0                  = 0x00002400,
+			.status1                  = 0x00002404,
+			.cfg                      = 0x00002408,
+			.header_addr              = 0x0000240C,
+			.header_cfg               = 0x00002410,
+			.image_addr               = 0x00002414,
+			.image_addr_offset        = 0x00002418,
+			.buffer_width_cfg         = 0x0000241C,
+			.buffer_height_cfg        = 0x00002420,
+			.packer_cfg               = 0x00002424,
+			.stride                   = 0x00002428,
+			.irq_subsample_period     = 0x00002448,
+			.irq_subsample_pattern    = 0x0000244C,
+			.framedrop_period         = 0x00002450,
+			.framedrop_pattern        = 0x00002454,
+			.frame_inc                = 0x00002458,
+			.burst_limit              = 0x0000245C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 3 */
+		{
+			.status0                  = 0x00002500,
+			.status1                  = 0x00002504,
+			.cfg                      = 0x00002508,
+			.header_addr              = 0x0000250C,
+			.header_cfg               = 0x00002510,
+			.image_addr               = 0x00002514,
+			.image_addr_offset        = 0x00002518,
+			.buffer_width_cfg         = 0x0000251C,
+			.buffer_height_cfg        = 0x00002520,
+			.packer_cfg               = 0x00002524,
+			.stride                   = 0x00002528,
+			.irq_subsample_period     = 0x00002548,
+			.irq_subsample_pattern    = 0x0000254C,
+			.framedrop_period         = 0x00002550,
+			.framedrop_pattern        = 0x00002554,
+			.frame_inc                = 0x00002558,
+			.burst_limit              = 0x0000255C,
+			.ubwc_regs                = &ubwc_regs_client_3,
+		},
+		/* BUS Client 4 */
+		{
+			.status0                  = 0x00002600,
+			.status1                  = 0x00002604,
+			.cfg                      = 0x00002608,
+			.header_addr              = 0x0000260C,
+			.header_cfg               = 0x00002610,
+			.image_addr               = 0x00002614,
+			.image_addr_offset        = 0x00002618,
+			.buffer_width_cfg         = 0x0000261C,
+			.buffer_height_cfg        = 0x00002620,
+			.packer_cfg               = 0x00002624,
+			.stride                   = 0x00002628,
+			.irq_subsample_period     = 0x00002648,
+			.irq_subsample_pattern    = 0x0000264C,
+			.framedrop_period         = 0x00002650,
+			.framedrop_pattern        = 0x00002654,
+			.frame_inc                = 0x00002658,
+			.burst_limit              = 0x0000265C,
+			.ubwc_regs                = &ubwc_regs_client_4,
+		},
+		/* BUS Client 5 */
+		{
+			.status0                  = 0x00002700,
+			.status1                  = 0x00002704,
+			.cfg                      = 0x00002708,
+			.header_addr              = 0x0000270C,
+			.header_cfg               = 0x00002710,
+			.image_addr               = 0x00002714,
+			.image_addr_offset        = 0x00002718,
+			.buffer_width_cfg         = 0x0000271C,
+			.buffer_height_cfg        = 0x00002720,
+			.packer_cfg               = 0x00002724,
+			.stride                   = 0x00002728,
+			.irq_subsample_period     = 0x00002748,
+			.irq_subsample_pattern    = 0x0000274C,
+			.framedrop_period         = 0x00002750,
+			.framedrop_pattern        = 0x00002754,
+			.frame_inc                = 0x00002758,
+			.burst_limit              = 0x0000275C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 6 */
+		{
+			.status0                  = 0x00002800,
+			.status1                  = 0x00002804,
+			.cfg                      = 0x00002808,
+			.header_addr              = 0x0000280C,
+			.header_cfg               = 0x00002810,
+			.image_addr               = 0x00002814,
+			.image_addr_offset        = 0x00002818,
+			.buffer_width_cfg         = 0x0000281C,
+			.buffer_height_cfg        = 0x00002820,
+			.packer_cfg               = 0x00002824,
+			.stride                   = 0x00002828,
+			.irq_subsample_period     = 0x00002848,
+			.irq_subsample_pattern    = 0x0000284C,
+			.framedrop_period         = 0x00002850,
+			.framedrop_pattern        = 0x00002854,
+			.frame_inc                = 0x00002858,
+			.burst_limit              = 0x0000285C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 7 */
+		{
+			.status0                  = 0x00002900,
+			.status1                  = 0x00002904,
+			.cfg                      = 0x00002908,
+			.header_addr              = 0x0000290C,
+			.header_cfg               = 0x00002910,
+			.image_addr               = 0x00002914,
+			.image_addr_offset        = 0x00002918,
+			.buffer_width_cfg         = 0x0000291C,
+			.buffer_height_cfg        = 0x00002920,
+			.packer_cfg               = 0x00002924,
+			.stride                   = 0x00002928,
+			.irq_subsample_period     = 0x00002948,
+			.irq_subsample_pattern    = 0x0000294C,
+			.framedrop_period         = 0x00002950,
+			.framedrop_pattern        = 0x00002954,
+			.frame_inc                = 0x00002958,
+			.burst_limit              = 0x0000295C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 8 */
+		{
+			.status0                  = 0x00002A00,
+			.status1                  = 0x00002A04,
+			.cfg                      = 0x00002A08,
+			.header_addr              = 0x00002A0C,
+			.header_cfg               = 0x00002A10,
+			.image_addr               = 0x00002A14,
+			.image_addr_offset        = 0x00002A18,
+			.buffer_width_cfg         = 0x00002A1C,
+			.buffer_height_cfg        = 0x00002A20,
+			.packer_cfg               = 0x00002A24,
+			.stride                   = 0x00002A28,
+			.irq_subsample_period     = 0x00002A48,
+			.irq_subsample_pattern    = 0x00002A4C,
+			.framedrop_period         = 0x00002A50,
+			.framedrop_pattern        = 0x00002A54,
+			.frame_inc                = 0x00002A58,
+			.burst_limit              = 0x00002A5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 9 */
+		{
+			.status0                  = 0x00002B00,
+			.status1                  = 0x00002B04,
+			.cfg                      = 0x00002B08,
+			.header_addr              = 0x00002B0C,
+			.header_cfg               = 0x00002B10,
+			.image_addr               = 0x00002B14,
+			.image_addr_offset        = 0x00002B18,
+			.buffer_width_cfg         = 0x00002B1C,
+			.buffer_height_cfg        = 0x00002B20,
+			.packer_cfg               = 0x00002B24,
+			.stride                   = 0x00002B28,
+			.irq_subsample_period     = 0x00002B48,
+			.irq_subsample_pattern    = 0x00002B4C,
+			.framedrop_period         = 0x00002B50,
+			.framedrop_pattern        = 0x00002B54,
+			.frame_inc                = 0x00002B58,
+			.burst_limit              = 0x00002B5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 10 */
+		{
+			.status0                  = 0x00002C00,
+			.status1                  = 0x00002C04,
+			.cfg                      = 0x00002C08,
+			.header_addr              = 0x00002C0C,
+			.header_cfg               = 0x00002C10,
+			.image_addr               = 0x00002C14,
+			.image_addr_offset        = 0x00002C18,
+			.buffer_width_cfg         = 0x00002C1C,
+			.buffer_height_cfg        = 0x00002C20,
+			.packer_cfg               = 0x00002C24,
+			.stride                   = 0x00002C28,
+			.irq_subsample_period     = 0x00002C48,
+			.irq_subsample_pattern    = 0x00002C4C,
+			.framedrop_period         = 0x00002C50,
+			.framedrop_pattern        = 0x00002C54,
+			.frame_inc                = 0x00002C58,
+			.burst_limit              = 0x00002C5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 11 */
+		{
+			.status0                  = 0x00002D00,
+			.status1                  = 0x00002D04,
+			.cfg                      = 0x00002D08,
+			.header_addr              = 0x00002D0C,
+			.header_cfg               = 0x00002D10,
+			.image_addr               = 0x00002D14,
+			.image_addr_offset        = 0x00002D18,
+			.buffer_width_cfg         = 0x00002D1C,
+			.buffer_height_cfg        = 0x00002D20,
+			.packer_cfg               = 0x00002D24,
+			.stride                   = 0x00002D28,
+			.irq_subsample_period     = 0x00002D48,
+			.irq_subsample_pattern    = 0x00002D4C,
+			.framedrop_period         = 0x00002D50,
+			.framedrop_pattern        = 0x00002D54,
+			.frame_inc                = 0x00002D58,
+			.burst_limit              = 0x00002D5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 12 */
+		{
+			.status0                  = 0x00002E00,
+			.status1                  = 0x00002E04,
+			.cfg                      = 0x00002E08,
+			.header_addr              = 0x00002E0C,
+			.header_cfg               = 0x00002E10,
+			.image_addr               = 0x00002E14,
+			.image_addr_offset        = 0x00002E18,
+			.buffer_width_cfg         = 0x00002E1C,
+			.buffer_height_cfg        = 0x00002E20,
+			.packer_cfg               = 0x00002E24,
+			.stride                   = 0x00002E28,
+			.irq_subsample_period     = 0x00002E48,
+			.irq_subsample_pattern    = 0x00002E4C,
+			.framedrop_period         = 0x00002E50,
+			.framedrop_pattern        = 0x00002E54,
+			.frame_inc                = 0x00002E58,
+			.burst_limit              = 0x00002E5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 13 */
+		{
+			.status0                  = 0x00002F00,
+			.status1                  = 0x00002F04,
+			.cfg                      = 0x00002F08,
+			.header_addr              = 0x00002F0C,
+			.header_cfg               = 0x00002F10,
+			.image_addr               = 0x00002F14,
+			.image_addr_offset        = 0x00002F18,
+			.buffer_width_cfg         = 0x00002F1C,
+			.buffer_height_cfg        = 0x00002F20,
+			.packer_cfg               = 0x00002F24,
+			.stride                   = 0x00002F28,
+			.irq_subsample_period     = 0x00002F48,
+			.irq_subsample_pattern    = 0x00002F4C,
+			.framedrop_period         = 0x00002F50,
+			.framedrop_pattern        = 0x00002F54,
+			.frame_inc                = 0x00002F58,
+			.burst_limit              = 0x00002F5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 14 */
+		{
+			.status0                  = 0x00003000,
+			.status1                  = 0x00003004,
+			.cfg                      = 0x00003008,
+			.header_addr              = 0x0000300C,
+			.header_cfg               = 0x00003010,
+			.image_addr               = 0x00003014,
+			.image_addr_offset        = 0x00003018,
+			.buffer_width_cfg         = 0x0000301C,
+			.buffer_height_cfg        = 0x00003020,
+			.packer_cfg               = 0x00003024,
+			.stride                   = 0x00003028,
+			.irq_subsample_period     = 0x00003048,
+			.irq_subsample_pattern    = 0x0000304C,
+			.framedrop_period         = 0x00003050,
+			.framedrop_pattern        = 0x00003054,
+			.frame_inc                = 0x00003058,
+			.burst_limit              = 0x0000305C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 15 */
+		{
+			.status0                  = 0x00003100,
+			.status1                  = 0x00003104,
+			.cfg                      = 0x00003108,
+			.header_addr              = 0x0000310C,
+			.header_cfg               = 0x00003110,
+			.image_addr               = 0x00003114,
+			.image_addr_offset        = 0x00003118,
+			.buffer_width_cfg         = 0x0000311C,
+			.buffer_height_cfg        = 0x00003120,
+			.packer_cfg               = 0x00003124,
+			.stride                   = 0x00003128,
+			.irq_subsample_period     = 0x00003148,
+			.irq_subsample_pattern    = 0x0000314C,
+			.framedrop_period         = 0x00003150,
+			.framedrop_pattern        = 0x00003154,
+			.frame_inc                = 0x00003158,
+			.burst_limit              = 0x0000315C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 16 */
+		{
+			.status0                  = 0x00003200,
+			.status1                  = 0x00003204,
+			.cfg                      = 0x00003208,
+			.header_addr              = 0x0000320C,
+			.header_cfg               = 0x00003210,
+			.image_addr               = 0x00003214,
+			.image_addr_offset        = 0x00003218,
+			.buffer_width_cfg         = 0x0000321C,
+			.buffer_height_cfg        = 0x00003220,
+			.packer_cfg               = 0x00003224,
+			.stride                   = 0x00003228,
+			.irq_subsample_period     = 0x00003248,
+			.irq_subsample_pattern    = 0x0000324C,
+			.framedrop_period         = 0x00003250,
+			.framedrop_pattern        = 0x00003254,
+			.frame_inc                = 0x00003258,
+			.burst_limit              = 0x0000325C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 17 */
+		{
+			.status0                  = 0x00003300,
+			.status1                  = 0x00003304,
+			.cfg                      = 0x00003308,
+			.header_addr              = 0x0000330C,
+			.header_cfg               = 0x00003310,
+			.image_addr               = 0x00003314,
+			.image_addr_offset        = 0x00003318,
+			.buffer_width_cfg         = 0x0000331C,
+			.buffer_height_cfg        = 0x00003320,
+			.packer_cfg               = 0x00003324,
+			.stride                   = 0x00003328,
+			.irq_subsample_period     = 0x00003348,
+			.irq_subsample_pattern    = 0x0000334C,
+			.framedrop_period         = 0x00003350,
+			.framedrop_pattern        = 0x00003354,
+			.frame_inc                = 0x00003358,
+			.burst_limit              = 0x0000335C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 18 */
+		{
+			.status0                  = 0x00003400,
+			.status1                  = 0x00003404,
+			.cfg                      = 0x00003408,
+			.header_addr              = 0x0000340C,
+			.header_cfg               = 0x00003410,
+			.image_addr               = 0x00003414,
+			.image_addr_offset        = 0x00003418,
+			.buffer_width_cfg         = 0x0000341C,
+			.buffer_height_cfg        = 0x00003420,
+			.packer_cfg               = 0x00003424,
+			.stride                   = 0x00003428,
+			.irq_subsample_period     = 0x00003448,
+			.irq_subsample_pattern    = 0x0000344C,
+			.framedrop_period         = 0x00003450,
+			.framedrop_pattern        = 0x00003454,
+			.frame_inc                = 0x00003458,
+			.burst_limit              = 0x0000345C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 19 */
+		{
+			.status0                  = 0x00003500,
+			.status1                  = 0x00003504,
+			.cfg                      = 0x00003508,
+			.header_addr              = 0x0000350C,
+			.header_cfg               = 0x00003510,
+			.image_addr               = 0x00003514,
+			.image_addr_offset        = 0x00003518,
+			.buffer_width_cfg         = 0x0000351C,
+			.buffer_height_cfg        = 0x00003520,
+			.packer_cfg               = 0x00003524,
+			.stride                   = 0x00003528,
+			.irq_subsample_period     = 0x00003548,
+			.irq_subsample_pattern    = 0x0000354C,
+			.framedrop_period         = 0x00003550,
+			.framedrop_pattern        = 0x00003554,
+			.frame_inc                = 0x00003558,
+			.burst_limit              = 0x0000355C,
+			.ubwc_regs                = NULL,
+		},
+	},
+	.comp_grp_reg = {
+		/* CAM_VFE_BUS_VER2_COMP_GRP_0 */
+		{
+			.comp_mask                    = 0x00002010,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_1 */
+		{
+			.comp_mask                    = 0x00002014,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_2 */
+		{
+			.comp_mask                    = 0x00002018,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_3 */
+		{
+			.comp_mask                    = 0x0000201C,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_4 */
+		{
+			.comp_mask                    = 0x00002020,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_5 */
+		{
+			.comp_mask                    = 0x00002024,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */
+		{
+			.comp_mask                    = 0x0000202C,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */
+		{
+			.comp_mask                    = 0x00002030,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */
+		{
+			.comp_mask                    = 0x00002034,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */
+		{
+			.comp_mask                    = 0x00002038,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */
+		{
+			.comp_mask                    = 0x0000203C,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */
+		{
+			.comp_mask                    = 0x00002040,
+		},
+	},
+	.vfe_out_hw_info = {
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_FULL,
+			.max_width     = 4096,
+			.max_height    = 4096,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_DS4,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_DS16,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_FD,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_PDAF,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RDI0,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RDI1,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RDI2,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+	},
+};
+
+struct cam_vfe_hw_info cam_vfe170_hw_info = {
+	.irq_reg_info                  = &vfe170_top_irq_reg_info,
+
+	.bus_version                   = CAM_VFE_BUS_VER_2_0,
+	.bus_hw_info                   = &vfe170_bus_hw_info,
+
+	.top_version                   = CAM_VFE_TOP_VER_2_0,
+	.top_hw_info                   = &vfe170_top_hw_info,
+
+	.camif_version                 = CAM_VFE_CAMIF_VER_2_0,
+	.camif_reg                     = &vfe170_camif_reg,
+
+};
+
+#endif /* _CAM_VFE170_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
new file mode 100644
index 0000000..cea1137
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
@@ -0,0 +1,10 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_bus.o cam_vfe_bus_ver2.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
new file mode 100644
index 0000000..50952f8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
@@ -0,0 +1,39 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include "cam_vfe_bus.h"
+#include "cam_vfe_bus_ver1.h"
+#include "cam_vfe_bus_ver2.h"
+
+int cam_vfe_bus_init(uint32_t          bus_version,
+	void __iomem                  *mem_base,
+	struct cam_hw_intf            *hw_intf,
+	void                          *bus_hw_info,
+	void                          *vfe_irq_controller,
+	struct cam_vfe_bus            **vfe_bus)
+{
+	int rc = -ENODEV;
+
+	switch (bus_version) {
+	case CAM_VFE_BUS_VER_2_0:
+		rc = cam_vfe_bus_ver2_init(mem_base, hw_intf, bus_hw_info,
+			vfe_irq_controller, vfe_bus);
+		break;
+	default:
+		pr_err("Unsupported Bus Version %x\n", bus_version);
+		break;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver1.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver1.h
new file mode 100644
index 0000000..3572451
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver1.h
@@ -0,0 +1,120 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_BUS_VER1_H_
+#define _CAM_VFE_BUS_VER1_H_
+
+enum cam_vfe_bus_ver1_pingpong_id {
+	CAM_VFE_BUS_VER1_PING,
+	CAM_VFE_BUS_VER1_PONG,
+	CAM_VFE_BUS_VER1_PINGPONG_MAX,
+};
+
+enum cam_vfe_bus_ver1_wm_type {
+	CAM_VFE_BUS_WM_TYPE_IMAGE,
+	CAM_VFE_BUS_WM_TYPE_STATS,
+	CAM_VFE_BUS_WM_TYPE_MAX,
+};
+
+enum cam_vfe_bus_ver1_comp_grp_type {
+	CAM_VFE_BUS_VER1_COMP_GRP_IMG0,
+	CAM_VFE_BUS_VER1_COMP_GRP_IMG1,
+	CAM_VFE_BUS_VER1_COMP_GRP_IMG2,
+	CAM_VFE_BUS_VER1_COMP_GRP_IMG3,
+	CAM_VFE_BUS_VER1_COMP_GRP_STATS0,
+	CAM_VFE_BUS_VER1_COMP_GRP_STATS1,
+	CAM_VFE_BUS_VER1_COMP_GRP_MAX,
+};
+
+struct cam_vfe_bus_ver1_common_reg {
+	uint32_t cmd_offset;
+	uint32_t cfg_offset;
+	uint32_t io_fmt_offset;
+	uint32_t argb_cfg_offset;
+	uint32_t xbar_cfg0_offset;
+	uint32_t xbar_cfg1_offset;
+	uint32_t xbar_cfg2_offset;
+	uint32_t xbar_cfg3_offset;
+	uint32_t ping_pong_status_reg;
+};
+
+struct cam_vfe_bus_ver1_wm_reg {
+	uint32_t wm_cfg_offset;
+	uint32_t ping_addr_offset;
+	uint32_t ping_max_addr_offset;
+	uint32_t pong_addr_offset;
+	uint32_t pong_max_addr_offset;
+	uint32_t addr_cfg_offset;
+	uint32_t ub_cfg_offset;
+	uint32_t image_size_offset;
+	uint32_t buffer_cfg_offset;
+	uint32_t framedrop_pattern_offset;
+	uint32_t irq_subsample_pattern_offset;
+	uint32_t ping_pong_status_bit; /* 0 - 31 */
+	uint32_t composite_bit; /* 0 -31 */
+};
+
+struct cam_vfe_bus_ver1_wm_resource_data {
+	uint32_t             index;
+	uint32_t             wm_type;
+	uint32_t             res_type;
+
+	uint32_t             offset;
+	uint32_t             width;
+	uint32_t             height;
+	uint32_t             stride;
+	uint32_t             scanline;
+
+	uint32_t             burst_len;
+
+	uint32_t             framedrop_period;
+	uint32_t             framedrop_pattern;
+
+	uint32_t             buf_valid[CAM_VFE_BUS_VER1_PINGPONG_MAX];
+	uint32_t             ub_size;
+	uint32_t             ub_offset;
+
+	struct cam_vfe_bus_ver1_wm_reg  hw_regs;
+};
+
+struct cam_vfe_bus_ver1_comp_grp_reg {
+	enum cam_vfe_bus_ver1_comp_grp_type comp_grp_type;
+	uint32_t             comp_grp_offset;
+};
+
+struct cam_vfe_bus_ver1_comp_grp {
+	struct cam_vfe_bus_ver1_comp_grp_reg reg_info;
+	struct list_head     wm_list;
+	uint32_t             cur_bit_mask;
+};
+
+/*
+ * cam_vfe_bus_ver1_init()
+ *
+ * @Brief:                   Initialize Bus layer
+ *
+ * @mem_base:                Mapped base address of register space
+ * @hw_intf:                 HW Interface of HW to which this resource belongs
+ * @bus_hw_info:             BUS HW info that contains details of BUS registers
+ * @vfe_irq_controller:      VFE IRQ Controller to use for subscribing to Top
+ *                           level IRQs
+ * @vfe_bus:                 Pointer to vfe_bus structure which will be filled
+ *                           and returned on successful initialize
+ */
+int cam_vfe_bus_ver1_init(
+	void __iomem                         *mem_base,
+	struct cam_hw_intf                   *hw_intf,
+	void                                 *bus_hw_info,
+	void                                 *vfe_irq_controller,
+	struct cam_vfe_bus                  **vfe_bus);
+
+#endif /* _CAM_VFE_BUS_VER1_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
new file mode 100644
index 0000000..6e62dcf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -0,0 +1,1796 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include "cam_io_util.h"
+#include "cam_cdm_util.h"
+#include "cam_hw_intf.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_irq_controller.h"
+#include "cam_vfe_bus.h"
+#include "cam_vfe_bus_ver2.h"
+#include "cam_vfe_core.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define FRAME_BASED_EN 0
+
+static uint32_t irq_reg_offset[CAM_IFE_BUS_IRQ_REGISTERS_MAX] = {
+	0x0000205C,
+	0x00002060,
+	0x00002064,
+	0x0000206C,
+	0x00002070,
+	0x00002074,
+	0x00002078,
+};
+
+enum cam_vfe_bus_packer_format {
+	PACKER_FMT_PLAIN_128                   = 0x0,
+	PACKER_FMT_PLAIN_8                     = 0x1,
+	PACKER_FMT_PLAIN_16_10BPP              = 0x2,
+	PACKER_FMT_PLAIN_16_12BPP              = 0x3,
+	PACKER_FMT_PLAIN_16_14BPP              = 0x4,
+	PACKER_FMT_PLAIN_16_16BPP              = 0x5,
+	PACKER_FMT_ARGB_10                     = 0x6,
+	PACKER_FMT_ARGB_12                     = 0x7,
+	PACKER_FMT_ARGB_14                     = 0x8,
+	PACKER_FMT_PLAIN_32_20BPP              = 0x9,
+	PACKER_FMT_PLAIN_64                    = 0xA,
+	PACKER_FMT_TP_10                       = 0xB,
+	PACKER_FMT_PLAIN_32_32BPP              = 0xC,
+	PACKER_FMT_PLAIN_8_ODD_EVEN            = 0xD,
+	PACKER_FMT_PLAIN_8_LSB_MSB_10          = 0xE,
+	PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN = 0xF,
+	PACKER_FMT_MAX                         = 0xF,
+};
+
+struct cam_vfe_bus_ver2_common_data {
+	void __iomem                               *mem_base;
+	struct cam_hw_intf                         *hw_intf;
+	void                                       *bus_irq_controller;
+	void                                       *vfe_irq_controller;
+	struct cam_vfe_bus_ver2_reg_offset_common  *common_reg;
+};
+
+struct cam_vfe_bus_ver2_wm_resource_data {
+	uint32_t             index;
+	struct cam_vfe_bus_ver2_common_data            *common_data;
+	struct cam_vfe_bus_ver2_reg_offset_bus_client  *hw_regs;
+
+	uint32_t             irq_enabled;
+
+	uint32_t             offset;
+	uint32_t             width;
+	uint32_t             height;
+	uint32_t             stride;
+	uint32_t             format;
+	enum cam_vfe_bus_packer_format pack_fmt;
+
+	uint32_t             burst_len;
+	uint32_t             frame_based;
+
+	uint32_t             irq_subsample_period;
+	uint32_t             irq_subsample_pattern;
+	uint32_t             framedrop_period;
+	uint32_t             framedrop_pattern;
+};
+
+struct cam_vfe_bus_ver2_comp_grp_data {
+	enum cam_vfe_bus_ver2_comp_grp_type          comp_grp_type;
+	struct cam_vfe_bus_ver2_common_data         *common_data;
+	struct cam_vfe_bus_ver2_reg_offset_comp_grp *hw_regs;
+
+	uint32_t                         irq_enabled;
+	uint32_t                         comp_grp_local_idx;
+	uint32_t                         unique_id;
+
+	uint32_t                         is_master;
+	uint32_t                         dual_slave_core;
+	uint32_t                         intra_client_mask;
+	uint32_t                         composite_mask;
+};
+
+struct cam_vfe_bus_ver2_vfe_out_data {
+	uint32_t                              out_type;
+	struct cam_vfe_bus_ver2_common_data  *common_data;
+
+	uint32_t                         num_wm;
+	struct cam_isp_resource_node    *wm_res[PLANE_MAX];
+
+	struct cam_isp_resource_node    *comp_grp;
+	enum cam_isp_hw_sync_mode        dual_comp_sync_mode;
+	uint32_t                         dual_hw_alternate_vfe_id;
+	struct list_head                 vfe_out_list;
+
+	uint32_t                         format;
+	uint32_t                         max_width;
+	uint32_t                         max_height;
+	struct cam_cdm_utils_ops        *cdm_util_ops;
+};
+
+
+struct cam_vfe_bus_ver2_priv {
+	struct cam_vfe_bus_ver2_common_data common_data;
+
+	struct cam_isp_resource_node  bus_client[CAM_VFE_BUS_VER2_MAX_CLIENTS];
+	struct cam_isp_resource_node  comp_grp[CAM_VFE_BUS_VER2_COMP_GRP_MAX];
+	struct cam_isp_resource_node  vfe_out[CAM_VFE_BUS_VER2_VFE_OUT_MAX];
+
+	struct list_head                    free_comp_grp;
+	struct list_head                    free_dual_comp_grp;
+	struct list_head                    used_comp_grp;
+
+	struct cam_vfe_bus_irq_evt_payload  evt_payload[128];
+	struct list_head                    free_payload_list;
+};
+
+static int cam_vfe_bus_put_evt_payload(void     *core_info,
+	struct cam_vfe_bus_irq_evt_payload     **evt_payload);
+
+static int cam_vfe_bus_ver2_get_intra_client_mask(
+	enum cam_vfe_bus_ver2_vfe_core_id  dual_slave_core,
+	enum cam_vfe_bus_ver2_vfe_core_id  current_core,
+	uint32_t                          *intra_client_mask)
+{
+	int rc = 0;
+
+	*intra_client_mask = 0;
+
+	if (dual_slave_core == current_core) {
+		pr_err("Invalid params. Same core as Master and Slave\n");
+		return -EINVAL;
+	}
+
+	switch (current_core) {
+	case CAM_VFE_BUS_VER2_VFE_CORE_0:
+		switch (dual_slave_core) {
+		case CAM_VFE_BUS_VER2_VFE_CORE_1:
+			*intra_client_mask = 0x1;
+			break;
+		case CAM_VFE_BUS_VER2_VFE_CORE_2:
+			*intra_client_mask = 0x2;
+			break;
+		default:
+			pr_err("Invalid value for slave core %u\n",
+				dual_slave_core);
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_CORE_1:
+		switch (dual_slave_core) {
+		case CAM_VFE_BUS_VER2_VFE_CORE_0:
+			*intra_client_mask = 0x1;
+			break;
+		case CAM_VFE_BUS_VER2_VFE_CORE_2:
+			*intra_client_mask = 0x2;
+			break;
+		default:
+			pr_err("Invalid value for slave core %u\n",
+				dual_slave_core);
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_CORE_2:
+		switch (dual_slave_core) {
+		case CAM_VFE_BUS_VER2_VFE_CORE_0:
+			*intra_client_mask = 0x1;
+			break;
+		case CAM_VFE_BUS_VER2_VFE_CORE_1:
+			*intra_client_mask = 0x2;
+			break;
+		default:
+			pr_err("Invalid value for slave core %u\n",
+				dual_slave_core);
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	default:
+		pr_err("Invalid value for master core %u\n", current_core);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static enum cam_vfe_bus_ver2_vfe_out_type
+	cam_vfe_bus_get_out_res_id(uint32_t res_type)
+{
+	switch (res_type) {
+	case CAM_ISP_IFE_OUT_RES_FULL:
+		return CAM_VFE_BUS_VER2_VFE_OUT_FULL;
+	case CAM_ISP_IFE_OUT_RES_DS4:
+		return CAM_VFE_BUS_VER2_VFE_OUT_DS4;
+	case CAM_ISP_IFE_OUT_RES_DS16:
+		return CAM_VFE_BUS_VER2_VFE_OUT_DS16;
+	case CAM_ISP_IFE_OUT_RES_FD:
+		return CAM_VFE_BUS_VER2_VFE_OUT_FD;
+	case CAM_ISP_IFE_OUT_RES_RAW_DUMP:
+		return CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP;
+	case CAM_ISP_IFE_OUT_RES_PDAF:
+		return CAM_VFE_BUS_VER2_VFE_OUT_PDAF;
+	case CAM_ISP_IFE_OUT_RES_RDI_0:
+		return CAM_VFE_BUS_VER2_VFE_OUT_RDI0;
+	case CAM_ISP_IFE_OUT_RES_RDI_1:
+		return CAM_VFE_BUS_VER2_VFE_OUT_RDI1;
+	case CAM_ISP_IFE_OUT_RES_RDI_2:
+		return CAM_VFE_BUS_VER2_VFE_OUT_RDI2;
+	case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE;
+	case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST;
+	case CAM_ISP_IFE_OUT_RES_STATS_TL_BG:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG;
+	case CAM_ISP_IFE_OUT_RES_STATS_BF:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF;
+	case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG;
+	case CAM_ISP_IFE_OUT_RES_STATS_BHIST:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST;
+	case CAM_ISP_IFE_OUT_RES_STATS_RS:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS;
+	case CAM_ISP_IFE_OUT_RES_STATS_CS:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS;
+	case CAM_ISP_IFE_OUT_RES_STATS_IHIST:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST;
+	default:
+		return CAM_VFE_BUS_VER2_VFE_OUT_MAX;
+	}
+}
+
+static int cam_vfe_bus_get_num_wm(
+	enum cam_vfe_bus_ver2_vfe_out_type    res_type,
+	uint32_t                              format)
+{
+	switch (res_type) {
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI0:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI1:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI2:
+		switch (format) {
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_MIPI_RAW_10:
+		case CAM_FORMAT_MIPI_RAW_12:
+		case CAM_FORMAT_MIPI_RAW_14:
+		case CAM_FORMAT_MIPI_RAW_16:
+		case CAM_FORMAT_MIPI_RAW_20:
+		case CAM_FORMAT_PLAIN128:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_FULL:
+		switch (format) {
+		case CAM_FORMAT_NV21:
+		case CAM_FORMAT_NV12:
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_TP10:
+		case CAM_FORMAT_UBWC_NV12:
+		case CAM_FORMAT_UBWC_NV12_4R:
+		case CAM_FORMAT_UBWC_TP10:
+		case CAM_FORMAT_UBWC_P010:
+			return 2;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_FD:
+		switch (format) {
+		case CAM_FORMAT_NV21:
+		case CAM_FORMAT_NV12:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_TP10:
+		case CAM_FORMAT_PLAIN16_10:
+			return 2;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS4:
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS16:
+		switch (format) {
+		case CAM_FORMAT_PD8:
+		case CAM_FORMAT_PD10:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP:
+		switch (format) {
+		case CAM_FORMAT_ARGB_14:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_PDAF:
+		switch (format) {
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS:
+		switch (format) {
+		case CAM_FORMAT_PLAIN64:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST:
+		switch (format) {
+		case CAM_FORMAT_PLAIN16_16:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	pr_err("Unsupported format %u for resource_type %u", format, res_type);
+
+	return -EINVAL;
+}
+
+static int cam_vfe_bus_get_wm_idx(
+	enum cam_vfe_bus_ver2_vfe_out_type vfe_out_res_id,
+	enum cam_vfe_bus_plane_type plane)
+{
+	int wm_idx = -1;
+
+	switch (vfe_out_res_id) {
+	case CAM_VFE_BUS_VER2_VFE_OUT_FULL:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 3;
+			break;
+		case PLANE_C:
+			wm_idx = 4;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS4:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 5;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS16:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 6;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_FD:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 7;
+			break;
+		case PLANE_C:
+			wm_idx = 8;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 9;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_PDAF:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 10;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI0:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 0;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI1:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI2:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 11;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 12;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 13;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 14;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 15;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 16;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 17;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 18;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 19;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return wm_idx;
+}
+
+static enum cam_vfe_bus_packer_format
+	cam_vfe_bus_get_packer_fmt(uint32_t out_fmt)
+{
+	switch (out_fmt) {
+	case CAM_FORMAT_NV21:
+	case CAM_FORMAT_NV12:
+		return PACKER_FMT_PLAIN_8;
+	default:
+		return PACKER_FMT_MAX;
+	}
+}
+
+static int cam_vfe_bus_acquire_wm(
+	struct cam_vfe_bus_ver2_priv          *ver2_bus_priv,
+	struct cam_isp_out_port_info          *out_port_info,
+	enum cam_vfe_bus_ver2_vfe_out_type     vfe_out_res_id,
+	enum cam_vfe_bus_plane_type            plane,
+	enum cam_isp_hw_split_id               split_id,
+	uint32_t                               subscribe_irq,
+	struct cam_isp_resource_node         **wm_res,
+	uint32_t                              *client_done_mask)
+{
+	uint32_t wm_idx = 0;
+	struct cam_isp_resource_node              *wm_res_local = NULL;
+	struct cam_vfe_bus_ver2_wm_resource_data  *rsrc_data = NULL;
+
+	*wm_res = NULL;
+	*client_done_mask = 0;
+
+	/* No need to allocate for BUS VER2. VFE OUT to WM is fixed. */
+	wm_idx = cam_vfe_bus_get_wm_idx(vfe_out_res_id, plane);
+	if (wm_idx < 0 || wm_idx >= CAM_VFE_BUS_VER2_MAX_CLIENTS) {
+		pr_err("Unsupported VFE out %d plane %d\n",
+			vfe_out_res_id, plane);
+		return -EINVAL;
+	}
+
+	wm_res_local = &ver2_bus_priv->bus_client[wm_idx];
+	wm_res_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	rsrc_data = wm_res_local->res_priv;
+	rsrc_data->irq_enabled = subscribe_irq;
+	rsrc_data->format = out_port_info->format;
+	rsrc_data->pack_fmt = cam_vfe_bus_get_packer_fmt(rsrc_data->format);
+
+	rsrc_data->width = out_port_info->width;
+	rsrc_data->height = out_port_info->height;
+	if (plane == PLANE_C) {
+		switch (rsrc_data->format) {
+		case CAM_FORMAT_NV21:
+		case CAM_FORMAT_NV12:
+			rsrc_data->height /= 2;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (vfe_out_res_id >= CAM_ISP_IFE_OUT_RES_RDI_0 &&
+		vfe_out_res_id <= CAM_ISP_IFE_OUT_RES_RDI_3)
+		rsrc_data->frame_based = 1;
+
+	*client_done_mask = (1 << wm_idx);
+	*wm_res = wm_res_local;
+
+	return 0;
+}
+
+static int cam_vfe_bus_release_wm(void   *bus_priv,
+	struct cam_isp_resource_node     *wm_res)
+{
+	struct cam_vfe_bus_ver2_wm_resource_data   *rsrc_data =
+		wm_res->res_priv;
+
+	rsrc_data->irq_enabled = 0;
+	rsrc_data->offset = 0;
+	rsrc_data->width = 0;
+	rsrc_data->height = 0;
+	rsrc_data->stride = 0;
+	rsrc_data->format = 0;
+	rsrc_data->pack_fmt = 0;
+	rsrc_data->burst_len = 0;
+	rsrc_data->frame_based = 0;
+	rsrc_data->irq_subsample_period = 0;
+	rsrc_data->irq_subsample_pattern = 0;
+	rsrc_data->framedrop_period = 0;
+	rsrc_data->framedrop_pattern = 0;
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+	return 0;
+}
+
+static int cam_vfe_bus_start_wm(struct cam_isp_resource_node *wm_res)
+{
+	int rc = 0;
+	struct cam_vfe_bus_ver2_wm_resource_data   *rsrc_data =
+		wm_res->res_priv;
+	struct cam_vfe_bus_ver2_common_data        *common_data =
+		rsrc_data->common_data;
+	uint32_t                                    width;
+	uint32_t                                    height;
+	uint32_t                                    pack_fmt;
+	uint32_t                                    stride;
+	uint32_t                                    en_cfg;
+
+	CDBG("WM res %d width = %d, height = %d\n", rsrc_data->index,
+		rsrc_data->width, rsrc_data->height);
+	CDBG("WM res %d pk_fmt = %d\n", rsrc_data->index,
+		rsrc_data->pack_fmt & PACKER_FMT_MAX);
+	CDBG("WM res %d stride = %d, burst len = %d\n",
+		rsrc_data->index, rsrc_data->width, 0xf);
+
+	cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_addr);
+	cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_cfg);
+	cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->frame_inc);
+	cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit);
+
+	if (rsrc_data->index < 3) {
+		width = rsrc_data->width * 5/4 * rsrc_data->height;
+		height = 1;
+		pack_fmt = 0x0;
+		stride = rsrc_data->width * 5/4 * rsrc_data->height;
+		en_cfg = 0x3;
+	} else if (rsrc_data->index < 5) {
+		width = rsrc_data->width;
+		height = rsrc_data->height;
+		pack_fmt = 0xE;
+		stride = rsrc_data->width;
+		en_cfg = 0x1;
+	} else {
+		width = rsrc_data->width * 4;
+		height = rsrc_data->height / 2;
+		pack_fmt = 0x0;
+		stride = rsrc_data->width * 4;
+		en_cfg = 0x1;
+	}
+
+	cam_io_w_mb(width,
+		common_data->mem_base + rsrc_data->hw_regs->buffer_width_cfg);
+	cam_io_w(height,
+		common_data->mem_base + rsrc_data->hw_regs->buffer_height_cfg);
+	cam_io_w(pack_fmt,
+		common_data->mem_base + rsrc_data->hw_regs->packer_cfg);
+	cam_io_w(stride,
+		common_data->mem_base + rsrc_data->hw_regs->stride);
+
+	cam_io_w(0xFFFFFFFF, common_data->mem_base +
+		rsrc_data->hw_regs->irq_subsample_pattern);
+	cam_io_w(0x0, common_data->mem_base +
+		rsrc_data->hw_regs->irq_subsample_period);
+
+	cam_io_w(0xFFFFFFFF,
+		common_data->mem_base + rsrc_data->hw_regs->framedrop_pattern);
+	cam_io_w(0x0,
+		common_data->mem_base + rsrc_data->hw_regs->framedrop_period);
+
+	/* UBWC registers */
+	switch (rsrc_data->format) {
+	case CAM_FORMAT_UBWC_NV12:
+		/* Program UBWC registers */
+		break;
+	default:
+		break;
+	}
+
+	/* Subscribe IRQ */
+	if (rsrc_data->irq_enabled) {
+		/*
+		 * Currently all WM IRQ are subscribed in one place. Need to
+		 * make it dynamic later.
+		 */
+	}
+
+	/* Enable WM */
+	cam_io_w_mb(en_cfg, common_data->mem_base + rsrc_data->hw_regs->cfg);
+
+	CDBG("WM res %d width = %d, height = %d\n", rsrc_data->index,
+		width, height);
+	CDBG("WM res %d pk_fmt = %d\n", rsrc_data->index,
+		pack_fmt & PACKER_FMT_MAX);
+	CDBG("WM res %d stride = %d, burst len = %d\n",
+		rsrc_data->index, stride, 0xf);
+	CDBG("enable WM res %d offset 0x%x val 0x%x\n", rsrc_data->index,
+		(uint32_t) rsrc_data->hw_regs->cfg, en_cfg);
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return rc;
+}
+
+static int cam_vfe_bus_stop_wm(struct cam_isp_resource_node *wm_res)
+{
+	int rc = 0;
+	struct cam_vfe_bus_ver2_wm_resource_data   *rsrc_data =
+		wm_res->res_priv;
+	struct cam_vfe_bus_ver2_common_data        *common_data =
+		rsrc_data->common_data;
+
+	/* Disble WM */
+	cam_io_w_mb(0x0,
+		common_data->mem_base + rsrc_data->hw_regs->cfg);
+
+	CDBG("irq_enabled %d", rsrc_data->irq_enabled);
+	/* Unsubscribe IRQ */
+	if (rsrc_data->irq_enabled) {
+		/*
+		 * Currently all WM IRQ are unsubscribed in one place. Need to
+		 * make it dynamic.
+		 */
+	}
+
+	/* Halt & Reset WM */
+	cam_io_w_mb(BIT(rsrc_data->index),
+		common_data->mem_base + common_data->common_reg->sw_reset);
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	return rc;
+}
+
+static int cam_vfe_bus_handle_wm_done_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_bus_handle_wm_done_bottom_half(void *wm_node,
+	void *evt_payload_priv)
+{
+	int rc = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_isp_resource_node          *wm_res = wm_node;
+	struct cam_vfe_bus_irq_evt_payload    *evt_payload = evt_payload_priv;
+	struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data =
+		(wm_res == NULL) ? NULL : wm_res->res_priv;
+	uint32_t  *cam_ife_irq_regs;
+	uint32_t   status_reg;
+
+	if (!evt_payload || !rsrc_data)
+		return rc;
+
+	cam_ife_irq_regs = evt_payload->irq_reg_val;
+	status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
+
+	if (status_reg & BIT(rsrc_data->index)) {
+		cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1] &=
+			~BIT(rsrc_data->index);
+		rc = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
+		cam_vfe_bus_put_evt_payload(evt_payload->core_info,
+			&evt_payload);
+
+	return rc;
+}
+
+static int cam_vfe_bus_init_wm_resource(uint32_t index,
+	struct cam_vfe_bus_ver2_priv    *ver2_bus_priv,
+	struct cam_vfe_bus_ver2_hw_info *ver2_hw_info,
+	struct cam_isp_resource_node    *wm_res)
+{
+	int rc = 0;
+	struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data;
+
+	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_wm_resource_data),
+		GFP_KERNEL);
+	if (!rsrc_data) {
+		CDBG("Failed to alloc for wm res priv\n");
+		rc = -ENOMEM;
+		return rc;
+	}
+	wm_res->res_priv = rsrc_data;
+
+	rsrc_data->index = index;
+	rsrc_data->hw_regs = &ver2_hw_info->bus_client_reg[index];
+	rsrc_data->common_data = &ver2_bus_priv->common_data;
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	INIT_LIST_HEAD(&wm_res->list);
+
+	wm_res->start = cam_vfe_bus_start_wm;
+	wm_res->stop = cam_vfe_bus_stop_wm;
+	wm_res->top_half_handler = cam_vfe_bus_handle_wm_done_top_half;
+	wm_res->bottom_half_handler = cam_vfe_bus_handle_wm_done_bottom_half;
+	wm_res->hw_intf = ver2_bus_priv->common_data.hw_intf;
+
+	return rc;
+}
+
+static void cam_vfe_bus_add_wm_to_comp_grp(
+	struct cam_isp_resource_node    *comp_grp,
+	uint32_t                         composite_mask)
+{
+	struct cam_vfe_bus_ver2_comp_grp_data  *rsrc_data = comp_grp->res_priv;
+
+	rsrc_data->composite_mask |= composite_mask;
+}
+
+static void cam_vfe_bus_match_comp_grp(
+	struct cam_vfe_bus_ver2_priv  *ver2_bus_priv,
+	struct cam_isp_resource_node **comp_grp,
+	uint32_t                       comp_grp_local_idx,
+	uint32_t                       unique_id)
+{
+	struct cam_vfe_bus_ver2_comp_grp_data  *rsrc_data = NULL;
+	struct cam_isp_resource_node           *comp_grp_local = NULL;
+
+	list_for_each_entry(comp_grp_local,
+		&ver2_bus_priv->used_comp_grp, list) {
+		rsrc_data = comp_grp_local->res_priv;
+		if (rsrc_data->comp_grp_local_idx == comp_grp_local_idx &&
+			rsrc_data->unique_id == unique_id) {
+			/* Match found */
+			*comp_grp = comp_grp_local;
+			return;
+		}
+	}
+
+	*comp_grp = NULL;
+}
+
+static int cam_vfe_bus_acquire_comp_grp(
+	struct cam_vfe_bus_ver2_priv        *ver2_bus_priv,
+	struct cam_isp_out_port_info        *out_port_info,
+	uint32_t                             unique_id,
+	uint32_t                             is_dual,
+	uint32_t                             is_master,
+	enum cam_vfe_bus_ver2_vfe_core_id    dual_slave_core,
+	struct cam_isp_resource_node       **comp_grp)
+{
+	int rc = 0;
+	struct cam_isp_resource_node           *comp_grp_local = NULL;
+	struct cam_vfe_bus_ver2_comp_grp_data  *rsrc_data = NULL;
+
+	/* Check if matching comp_grp already acquired */
+	cam_vfe_bus_match_comp_grp(ver2_bus_priv, &comp_grp_local,
+		out_port_info->comp_grp_id, unique_id);
+
+	if (!comp_grp_local) {
+		/* First find a free group */
+		if (is_dual) {
+			if (list_empty(&ver2_bus_priv->free_dual_comp_grp)) {
+				pr_err("No Free Composite Group\n");
+				return -ENODEV;
+			}
+			comp_grp_local = list_first_entry(
+				&ver2_bus_priv->free_dual_comp_grp,
+				struct cam_isp_resource_node, list);
+			rsrc_data = comp_grp_local->res_priv;
+			rc = cam_vfe_bus_ver2_get_intra_client_mask(
+				dual_slave_core,
+				comp_grp_local->hw_intf->hw_idx,
+				&rsrc_data->intra_client_mask);
+		} else {
+			if (list_empty(&ver2_bus_priv->free_comp_grp)) {
+				pr_err("No Free Composite Group\n");
+				return -ENODEV;
+			}
+			comp_grp_local = list_first_entry(
+				&ver2_bus_priv->free_comp_grp,
+				struct cam_isp_resource_node, list);
+			rsrc_data = comp_grp_local->res_priv;
+		}
+
+		list_del(&comp_grp_local->list);
+		comp_grp_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+		rsrc_data->is_master = is_master;
+		rsrc_data->composite_mask = 0;
+		rsrc_data->unique_id = unique_id;
+		rsrc_data->comp_grp_local_idx = out_port_info->comp_grp_id;
+
+		list_add_tail(&comp_grp_local->list,
+			&ver2_bus_priv->used_comp_grp);
+
+	} else {
+		rsrc_data = comp_grp_local->res_priv;
+		/* Do not support runtime change in composite mask */
+		if (comp_grp_local->res_state ==
+			CAM_ISP_RESOURCE_STATE_STREAMING) {
+			pr_err("Invalid State %d Comp Grp %u\n",
+				comp_grp_local->res_state,
+				rsrc_data->comp_grp_type);
+			return -EBUSY;
+		}
+	}
+
+	*comp_grp = comp_grp_local;
+
+	return rc;
+}
+
+static int cam_vfe_bus_release_comp_grp(
+	struct cam_vfe_bus_ver2_priv         *ver2_bus_priv,
+	struct cam_isp_resource_node         *in_comp_grp)
+{
+	struct cam_isp_resource_node           *comp_grp = NULL;
+	struct cam_vfe_bus_ver2_comp_grp_data  *in_rsrc_data = NULL;
+	int match_found = 0;
+
+	if (!in_comp_grp) {
+		pr_err("Invalid Params Comp Grp %pK\n", in_rsrc_data);
+		return -EINVAL;
+	}
+
+	if (in_comp_grp->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		/* Already Released. Do Nothing */
+		return 0;
+	}
+
+	in_rsrc_data = in_comp_grp->res_priv;
+
+	list_for_each_entry(comp_grp, &ver2_bus_priv->used_comp_grp, list) {
+		if (comp_grp == in_comp_grp) {
+			match_found = 1;
+			break;
+		}
+	}
+
+	if (!match_found) {
+		pr_err("Could not find matching Comp Grp type %u\n",
+			in_rsrc_data->comp_grp_type);
+		return -ENODEV;
+	}
+
+
+	list_del(&comp_grp->list);
+	if (in_rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+		in_rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5)
+		list_add_tail(&comp_grp->list,
+			&ver2_bus_priv->free_dual_comp_grp);
+	else if (in_rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0
+		&& in_rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)
+		list_add_tail(&comp_grp->list, &ver2_bus_priv->free_comp_grp);
+
+	in_rsrc_data->unique_id = 0;
+	in_rsrc_data->comp_grp_local_idx = 0;
+	in_rsrc_data->composite_mask = 0;
+	in_rsrc_data->dual_slave_core = CAM_VFE_BUS_VER2_VFE_CORE_MAX;
+
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+	return 0;
+}
+
+static int cam_vfe_bus_start_comp_grp(struct cam_isp_resource_node *comp_grp)
+{
+	int rc = 0;
+	struct cam_vfe_bus_ver2_comp_grp_data      *rsrc_data =
+		comp_grp->res_priv;
+	struct cam_vfe_bus_ver2_common_data        *common_data =
+		rsrc_data->common_data;
+
+	/*
+	 * Individual Comp_Grp Subscribe IRQ can be done here once
+	 * dynamic IRQ enable support is added.
+	 */
+
+	cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
+		rsrc_data->hw_regs->comp_mask);
+
+	CDBG("composite_mask is 0x%x\n", rsrc_data->composite_mask);
+	CDBG("composite_mask addr 0x%x\n",  rsrc_data->hw_regs->comp_mask);
+
+	if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 &&
+		rsrc_data->is_master) {
+		int dual_comp_grp = (rsrc_data->comp_grp_type -
+			CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0);
+		int intra_client_en = cam_io_r_mb(common_data->mem_base +
+			common_data->common_reg->dual_master_comp_cfg);
+
+		/* 2 Bits per comp_grp. Hence left shift by comp_grp * 2 */
+		intra_client_en |=
+			(rsrc_data->intra_client_mask << dual_comp_grp * 2);
+
+		cam_io_w_mb(intra_client_en, common_data->mem_base +
+			common_data->common_reg->dual_master_comp_cfg);
+	}
+
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+	return rc;
+}
+
+static int cam_vfe_bus_stop_comp_grp(struct cam_isp_resource_node *comp_grp)
+{
+	int rc = 0;
+	struct cam_vfe_bus_ver2_comp_grp_data      *rsrc_data =
+		comp_grp->res_priv;
+	struct cam_vfe_bus_ver2_common_data        *common_data =
+		rsrc_data->common_data;
+
+	/* Unsubscribe IRQ */
+
+	cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
+		rsrc_data->hw_regs->comp_mask);
+	if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 &&
+		rsrc_data->is_master) {
+		int dual_comp_grp = (rsrc_data->comp_grp_type -
+			CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0);
+		int intra_client_en = cam_io_r_mb(common_data->mem_base +
+			common_data->common_reg->dual_master_comp_cfg);
+
+		/* 2 Bits per comp_grp. Hence left shift by comp_grp * 2 */
+		intra_client_en &=
+			~(rsrc_data->intra_client_mask << dual_comp_grp * 2);
+
+		cam_io_w_mb(intra_client_en, common_data->mem_base +
+			common_data->common_reg->dual_master_comp_cfg);
+	}
+
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	return rc;
+}
+
+static int cam_vfe_bus_handle_comp_done_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_bus_handle_comp_done_bottom_half(
+	void                *handler_priv,
+	void                *evt_payload_priv)
+{
+	int rc = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_isp_resource_node          *comp_grp = handler_priv;
+	struct cam_vfe_bus_irq_evt_payload    *evt_payload = evt_payload_priv;
+	struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = comp_grp->res_priv;
+	uint32_t                              *cam_ife_irq_regs;
+	uint32_t                               status_reg;
+	uint32_t                               comp_err_reg;
+	uint32_t                               comp_grp_id;
+
+	if (!evt_payload)
+		return rc;
+
+	cam_ife_irq_regs = evt_payload->irq_reg_val;
+
+	CDBG("comp grp type %d\n", rsrc_data->comp_grp_type);
+	switch (rsrc_data->comp_grp_type) {
+	case CAM_VFE_BUS_VER2_COMP_GRP_0:
+	case CAM_VFE_BUS_VER2_COMP_GRP_1:
+	case CAM_VFE_BUS_VER2_COMP_GRP_2:
+	case CAM_VFE_BUS_VER2_COMP_GRP_3:
+	case CAM_VFE_BUS_VER2_COMP_GRP_4:
+	case CAM_VFE_BUS_VER2_COMP_GRP_5:
+		comp_grp_id = (rsrc_data->comp_grp_type -
+			CAM_VFE_BUS_VER2_COMP_GRP_0);
+
+		/* Check for Regular composite error */
+		status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
+
+		comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_COMP_ERR];
+		if ((status_reg & BIT(11)) &&
+			(comp_err_reg & rsrc_data->composite_mask)) {
+			/* Check for Regular composite error */
+			rc = CAM_VFE_IRQ_STATUS_ERR_COMP;
+			break;
+		}
+
+		comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_COMP_OWRT];
+		/* Check for Regular composite Overwrite */
+		if ((status_reg & BIT(12)) &&
+			(comp_err_reg & rsrc_data->composite_mask)) {
+			rc = CAM_VFE_IRQ_STATUS_COMP_OWRT;
+			break;
+		}
+
+		/* Regular Composite SUCCESS */
+		if (status_reg & BIT(comp_grp_id + 5)) {
+			cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0] &=
+				~BIT(comp_grp_id + 5);
+			rc = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+
+		CDBG("status reg = 0x%x, bit index = %d\n",
+			status_reg, (comp_grp_id + 5));
+		break;
+
+	case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0:
+	case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1:
+	case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2:
+	case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3:
+	case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4:
+	case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5:
+		comp_grp_id = (rsrc_data->comp_grp_type -
+			CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0);
+
+		/* Check for DUAL composite error */
+		status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2];
+
+		comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_DUAL_COMP_ERR];
+		if ((status_reg & BIT(6)) &&
+			(comp_err_reg & rsrc_data->composite_mask)) {
+			/* Check for DUAL composite error */
+			rc = CAM_VFE_IRQ_STATUS_ERR_COMP;
+			break;
+		}
+
+		/* Check for Dual composite Overwrite */
+		comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_DUAL_COMP_OWRT];
+		if ((status_reg & BIT(7)) &&
+			(comp_err_reg & rsrc_data->composite_mask)) {
+			rc = CAM_VFE_IRQ_STATUS_COMP_OWRT;
+			break;
+		}
+
+		/* DUAL Composite SUCCESS */
+		if (status_reg & BIT(comp_grp_id)) {
+			cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2] &=
+				~BIT(comp_grp_id + 5);
+			rc = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+
+		break;
+	default:
+		rc = CAM_VFE_IRQ_STATUS_ERR;
+		break;
+	}
+
+	if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
+		cam_vfe_bus_put_evt_payload(evt_payload->core_info,
+			&evt_payload);
+
+	return rc;
+}
+
+static int cam_vfe_bus_init_comp_grp(uint32_t index,
+	struct cam_vfe_bus_ver2_priv    *ver2_bus_priv,
+	struct cam_vfe_bus_ver2_hw_info *ver2_hw_info,
+	struct cam_isp_resource_node    *comp_grp)
+{
+	struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data =
+		comp_grp->res_priv;
+
+	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_comp_grp_data),
+		GFP_KERNEL);
+	if (!rsrc_data) {
+		CDBG("Failed to alloc for comp_grp_priv\n");
+		return -ENOMEM;
+	}
+	comp_grp->res_priv = rsrc_data;
+
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	INIT_LIST_HEAD(&comp_grp->list);
+
+	rsrc_data->comp_grp_type   = index;
+	rsrc_data->common_data     = &ver2_bus_priv->common_data;
+	rsrc_data->hw_regs         = &ver2_hw_info->comp_grp_reg[index];
+	rsrc_data->dual_slave_core = CAM_VFE_BUS_VER2_VFE_CORE_MAX;
+
+
+	if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5)
+		list_add_tail(&comp_grp->list,
+			&ver2_bus_priv->free_dual_comp_grp);
+	else if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0
+		&& rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)
+		list_add_tail(&comp_grp->list, &ver2_bus_priv->free_comp_grp);
+
+	comp_grp->start = cam_vfe_bus_start_comp_grp;
+	comp_grp->stop = cam_vfe_bus_stop_comp_grp;
+	comp_grp->top_half_handler = cam_vfe_bus_handle_comp_done_top_half;
+	comp_grp->bottom_half_handler =
+		cam_vfe_bus_handle_comp_done_bottom_half;
+	comp_grp->hw_intf = ver2_bus_priv->common_data.hw_intf;
+
+	return 0;
+}
+
+static int cam_vfe_bus_acquire_vfe_out(void *bus_priv, void *acquire_args)
+{
+	int                                     rc = -ENODEV;
+	int                                     i;
+	enum cam_vfe_bus_ver2_vfe_out_type      vfe_out_res_id;
+	uint32_t                                format;
+	uint32_t                                num_wm;
+	uint32_t                                subscribe_irq;
+	uint32_t                                client_done_mask;
+	struct cam_vfe_bus_ver2_priv           *ver2_bus_priv = bus_priv;
+	struct cam_vfe_acquire_args            *acq_args = acquire_args;
+	struct cam_vfe_hw_vfe_out_acquire_args *out_acquire_args;
+	struct cam_isp_resource_node           *rsrc_node = NULL;
+	struct cam_vfe_bus_ver2_vfe_out_data   *rsrc_data = NULL;
+
+	if (!bus_priv || !acquire_args) {
+		pr_err("Invalid Param");
+		return -EINVAL;
+	}
+
+	out_acquire_args = &acq_args->vfe_out;
+	format = out_acquire_args->out_port_info->format;
+
+	CDBG("Acquiring resource type 0x%x\n",
+		out_acquire_args->out_port_info->res_type);
+
+	vfe_out_res_id = cam_vfe_bus_get_out_res_id(
+		out_acquire_args->out_port_info->res_type);
+	if (vfe_out_res_id == CAM_VFE_BUS_VER2_VFE_OUT_MAX)
+		return -ENODEV;
+
+	num_wm = cam_vfe_bus_get_num_wm(vfe_out_res_id, format);
+	if (num_wm < 1)
+		return -EINVAL;
+
+	rsrc_node = &ver2_bus_priv->vfe_out[vfe_out_res_id];
+	if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		pr_err("Resource not available: Res_id %d state:%d\n",
+			vfe_out_res_id, rsrc_node->res_state);
+		return -EBUSY;
+	}
+
+	rsrc_data = rsrc_node->res_priv;
+	rsrc_data->num_wm = num_wm;
+	rsrc_node->res_id = out_acquire_args->out_port_info->res_type;
+	rsrc_node->tasklet_info = acq_args->tasklet;
+	rsrc_node->cdm_ops = out_acquire_args->cdm_ops;
+	rsrc_data->cdm_util_ops = out_acquire_args->cdm_ops;
+
+	/* Reserve Composite Group */
+	if (num_wm > 1 || (out_acquire_args->out_port_info->comp_grp_id >
+		CAM_ISP_RES_COMP_GROUP_NONE &&
+		out_acquire_args->out_port_info->comp_grp_id <
+		CAM_ISP_RES_COMP_GROUP_ID_MAX)) {
+		rc = cam_vfe_bus_acquire_comp_grp(ver2_bus_priv,
+			out_acquire_args->out_port_info,
+			out_acquire_args->unique_id,
+			out_acquire_args->is_dual,
+			out_acquire_args->is_master,
+			out_acquire_args->dual_slave_core,
+			&rsrc_data->comp_grp);
+		if (rc < 0)
+			return rc;
+
+		subscribe_irq = 0;
+	} else
+		subscribe_irq = 1;
+
+	/* Reserve WM */
+	for (i = 0; i < num_wm; i++) {
+		rc = cam_vfe_bus_acquire_wm(ver2_bus_priv,
+			out_acquire_args->out_port_info,
+			vfe_out_res_id,
+			i,
+			out_acquire_args->split_id,
+			subscribe_irq,
+			&rsrc_data->wm_res[i],
+			&client_done_mask);
+		if (rc < 0)
+			goto release_wm;
+
+		if (rsrc_data->comp_grp)
+			cam_vfe_bus_add_wm_to_comp_grp(rsrc_data->comp_grp,
+				client_done_mask);
+	}
+
+	rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	out_acquire_args->rsrc_node = rsrc_node;
+
+	CDBG("Acquire successful\n");
+	return rc;
+
+release_wm:
+	for (i--; i >= 0; i--)
+		cam_vfe_bus_release_wm(ver2_bus_priv, rsrc_data->wm_res[i]);
+
+	cam_vfe_bus_release_comp_grp(ver2_bus_priv,
+		rsrc_data->comp_grp);
+
+	return rc;
+}
+
+static int cam_vfe_bus_release_vfe_out(void *bus_priv,
+	struct cam_isp_resource_node        *vfe_out)
+{
+	uint32_t i;
+	struct cam_vfe_bus_ver2_vfe_out_data  *rsrc_data = vfe_out->res_priv;
+
+	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		pr_err("Error! Invalid resource state:%d\n",
+			vfe_out->res_state);
+	}
+
+	for (i = 0; i < rsrc_data->num_wm; i++)
+		cam_vfe_bus_release_wm(bus_priv, rsrc_data->wm_res[i]);
+	rsrc_data->num_wm = 0;
+
+	if (rsrc_data->comp_grp)
+		cam_vfe_bus_release_comp_grp(bus_priv, rsrc_data->comp_grp);
+	rsrc_data->comp_grp = NULL;
+
+	vfe_out->tasklet_info = NULL;
+	vfe_out->cdm_ops = NULL;
+	rsrc_data->cdm_util_ops = NULL;
+
+	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED)
+		vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+	return 0;
+}
+
+static int cam_vfe_bus_start_vfe_out(struct cam_isp_resource_node *vfe_out)
+{
+	int rc = 0, i;
+	struct cam_vfe_bus_ver2_vfe_out_data  *rsrc_data = vfe_out->res_priv;
+	struct cam_vfe_bus_ver2_common_data   *common_data =
+		rsrc_data->common_data;
+
+	CDBG("Start resource index %d\n", rsrc_data->out_type);
+
+	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		pr_err("Error! Invalid resource state:%d\n",
+			vfe_out->res_state);
+		return -EACCES;
+	}
+
+	/* Enable IRQ Mask */
+	cam_io_w_mb(0x00001FE0, common_data->mem_base + 0x2044);
+	cam_io_w_mb(0x000FFFE7, common_data->mem_base + 0x2048);
+	cam_io_w_mb(0x000000FF, common_data->mem_base + 0x204c);
+
+	for (i = 0; i < rsrc_data->num_wm; i++)
+		rc = cam_vfe_bus_start_wm(rsrc_data->wm_res[i]);
+
+	if (rsrc_data->comp_grp)
+		rc = cam_vfe_bus_start_comp_grp(rsrc_data->comp_grp);
+
+	/* VFE_MODULE_BUS_CGC_OVERRIDE */
+	cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x0000003C);
+	/* VFE_MODULE_COLOR_CGC_OVERRIDE */
+	cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x00000034);
+	/* VFE_MODULE_ZOOM_CGC_OVERRIDE */
+	cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x38);
+	/* VFE_MODULE_LENS_CGC_OVERRIDE */
+	cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x0000002C);
+	/* VFE_MODULE_STATS_CGC_OVERRIDE */
+	cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x00000030);
+
+	/* BUS_WR_INPUT_IF_ADDR_SYNC_CFG */
+	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x0000207C);
+	/*  BUS_WR_INPUT_IF_ADDR_SYNC_FRAME_HEADER */
+	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x00002080);
+	/* BUS_WR_INPUT_IF_ADDR_SYNC_NO_SYNC */
+	cam_io_w_mb(0xFFFFF, rsrc_data->common_data->mem_base + 0x00002084);
+	/*  BUS_WR_INPUT_IF_ADDR_SYNC_0 */
+	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x00002088);
+	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x0000208c);
+	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x00002090);
+	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x00002094);
+	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x00002098);
+	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x0000209c);
+	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x000020a0);
+	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x000020a4);
+
+	/* no clock gating at bus input */
+	cam_io_w_mb(0xFFFFF, rsrc_data->common_data->mem_base + 0x0000200C);
+
+	/* BUS_WR_TEST_BUS_CTRL */
+	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x0000211C);
+
+	return rc;
+}
+
+static int cam_vfe_bus_stop_vfe_out(struct cam_isp_resource_node *vfe_out)
+{
+	int rc = 0, i;
+	struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+
+	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
+		vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		return rc;
+	}
+
+	if (rsrc_data->comp_grp)
+		rc = cam_vfe_bus_stop_comp_grp(rsrc_data->comp_grp);
+
+	for (i = 0; i < rsrc_data->num_wm; i++)
+		rc = cam_vfe_bus_stop_wm(rsrc_data->wm_res[i]);
+
+	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_vfe_bus_handle_vfe_out_done_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_bus_handle_vfe_out_done_bottom_half(
+	void                *handler_priv,
+	void                *evt_payload_priv)
+{
+	int rc = -EINVAL;
+	struct cam_isp_resource_node         *vfe_out = handler_priv;
+	struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+
+	/*
+	 * If this resource has Composite Group then we only handle
+	 * Composite done. We acquire Composite if number of WM > 1.
+	 * So Else case is only one individual buf_done = WM[0].
+	 */
+	if (rsrc_data->comp_grp) {
+		rc = rsrc_data->comp_grp->bottom_half_handler(
+			rsrc_data->comp_grp, evt_payload_priv);
+	} else {
+		rc = rsrc_data->wm_res[0]->bottom_half_handler(
+			rsrc_data->wm_res[0], evt_payload_priv);
+	}
+
+	return rc;
+}
+
+static int cam_vfe_bus_init_vfe_out_resource(uint32_t index,
+	struct cam_vfe_bus_ver2_priv    *ver2_bus_priv,
+	struct cam_vfe_bus_ver2_hw_info *ver2_hw_info,
+	struct cam_isp_resource_node    *vfe_out)
+{
+	struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
+	int rc = 0;
+
+	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_vfe_out_data),
+		GFP_KERNEL);
+	if (!rsrc_data) {
+		CDBG("Error! Failed to alloc for vfe out priv\n");
+		rc = -ENOMEM;
+		return rc;
+	}
+	vfe_out->res_priv = rsrc_data;
+
+	vfe_out->res_type = CAM_ISP_RESOURCE_VFE_OUT;
+	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	INIT_LIST_HEAD(&vfe_out->list);
+
+	rsrc_data->out_type    = index;
+	rsrc_data->common_data = &ver2_bus_priv->common_data;
+	rsrc_data->max_width   =
+		ver2_hw_info->vfe_out_hw_info[index].max_width;
+	rsrc_data->max_height  =
+		ver2_hw_info->vfe_out_hw_info[index].max_height;
+
+	vfe_out->start = cam_vfe_bus_start_vfe_out;
+	vfe_out->stop = cam_vfe_bus_stop_vfe_out;
+	vfe_out->top_half_handler = cam_vfe_bus_handle_vfe_out_done_top_half;
+	vfe_out->bottom_half_handler =
+		cam_vfe_bus_handle_vfe_out_done_bottom_half;
+	vfe_out->hw_intf = ver2_bus_priv->common_data.hw_intf;
+
+	return 0;
+}
+
+static int cam_vfe_bus_get_evt_payload(
+	struct cam_vfe_bus_ver2_priv         *bus_priv,
+	struct cam_vfe_bus_irq_evt_payload  **evt_payload)
+{
+	if (list_empty(&bus_priv->free_payload_list)) {
+		*evt_payload = NULL;
+		pr_err("No free payload\n");
+		return -ENODEV;
+	}
+
+	*evt_payload = list_first_entry(&bus_priv->free_payload_list,
+		struct cam_vfe_bus_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+	return 0;
+}
+
+static int cam_vfe_bus_put_evt_payload(void     *core_info,
+	struct cam_vfe_bus_irq_evt_payload     **evt_payload)
+{
+	struct cam_vfe_bus_ver2_priv         *bus_priv = NULL;
+	uint32_t  *cam_ife_irq_regs = (*evt_payload)->irq_reg_val;
+	uint32_t   status_reg0, status_reg1;
+
+	status_reg0 = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
+	status_reg1 = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
+
+	if (status_reg0 || status_reg1) {
+		CDBG("status0 0x%x status1 0x%x\n", status_reg0, status_reg1);
+		return 0;
+	}
+
+	if (!core_info) {
+		pr_err("Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		pr_err("No payload to put\n");
+		return -EINVAL;
+	}
+	bus_priv = (*evt_payload)->bus_priv;
+	list_add_tail(&(*evt_payload)->list, &bus_priv->free_payload_list);
+	*evt_payload = NULL;
+	return 0;
+}
+
+static int cam_vfe_bus_ver2_handle_irq(uint32_t    evt_id,
+	struct cam_irq_th_payload                 *th_payload)
+{
+	int32_t                                rc;
+	int                                    i;
+	struct cam_vfe_irq_handler_priv       *handler_priv;
+	struct cam_vfe_hw_core_info           *core_info;
+	struct cam_vfe_bus_irq_evt_payload    *evt_payload;
+	struct cam_vfe_bus                    *bus_info;
+	struct cam_vfe_bus_ver2_priv          *bus_priv;
+	struct cam_irq_controller_reg_info    *reg_info;
+	uint32_t                               irq_mask;
+
+	handler_priv = th_payload->handler_priv;
+	core_info    = handler_priv->core_info;
+	bus_info     = core_info->vfe_bus;
+	bus_priv     = bus_info->bus_priv;
+	reg_info     = &bus_priv->common_data.common_reg->irq_reg_info;
+
+	/*
+	 *  add reset ack handling here once supported.
+	 *  Just clear all the bus irq status registers and ignore the reset.
+	 */
+
+	CDBG("Enter\n");
+	rc  = cam_vfe_bus_get_evt_payload(bus_priv, &evt_payload);
+	if (rc) {
+		pr_err("No tasklet_cmd is free in queue\n");
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	evt_payload->core_index = handler_priv->core_index;
+	evt_payload->core_info  = handler_priv->core_info;
+	evt_payload->bus_priv   = bus_priv;
+	CDBG("core_idx %d, core_info %llx\n", handler_priv->core_index,
+			(uint64_t)handler_priv->core_info);
+
+	for (i = 0; i < CAM_IFE_BUS_IRQ_REGISTERS_MAX; i++) {
+		irq_mask = cam_io_r(handler_priv->mem_base +
+			irq_reg_offset[i] - (0xC * 2));
+		evt_payload->irq_reg_val[i] = irq_mask &
+			cam_io_r(handler_priv->mem_base + irq_reg_offset[i]);
+		CDBG("irq_status%d = 0x%x\n", i, evt_payload->irq_reg_val[i]);
+	}
+	for (i = 0; i <= CAM_IFE_IRQ_BUS_REG_STATUS2; i++) {
+		cam_io_w(evt_payload->irq_reg_val[i], handler_priv->mem_base +
+			reg_info->irq_reg_set[i].clear_reg_offset);
+		CDBG("Clear irq_status%d = 0x%x offset 0x%x\n", i,
+			evt_payload->irq_reg_val[i],
+			reg_info->irq_reg_set[i].clear_reg_offset);
+	}
+	cam_io_w(reg_info->global_clear_bitmask, handler_priv->mem_base +
+		reg_info->global_clear_offset);
+	CDBG("Global clear bitmask = 0x%x offset 0x%x\n",
+			reg_info->global_clear_bitmask,
+			reg_info->global_clear_offset);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
+static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_vfe_bus_ver2_priv             *bus_priv;
+	struct cam_isp_hw_get_buf_update         *update_buf;
+	struct cam_vfe_bus_ver2_vfe_out_data     *vfe_out_data = NULL;
+	struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
+	uint32_t  reg_val_pair[8];
+	uint32_t i, size = 0;
+
+	/*
+	 * Need the entire buf io config so we can get the stride info
+	 * for the wm.
+	 */
+
+	bus_priv = (struct cam_vfe_bus_ver2_priv  *) priv;
+	update_buf =  (struct cam_isp_hw_get_buf_update *) cmd_args;
+
+	vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
+		update_buf->cdm.res->res_priv;
+
+	if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
+		pr_err("Failed! Invalid data\n");
+		return -EINVAL;
+	}
+
+	if (update_buf->num_buf < vfe_out_data->num_wm) {
+		pr_err("Failed! Invalid number buffers:%d required:%d\n",
+			update_buf->num_buf, vfe_out_data->num_wm);
+		return -ENOMEM;
+	}
+
+	size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(
+		vfe_out_data->num_wm);
+
+	/* cdm util returns dwords, need to convert to bytes */
+	if ((size * 4) > update_buf->cdm.size) {
+		pr_err("Failed! Buf size:%d insufficient, expected size:%d\n",
+			update_buf->cdm.size, size);
+		return -ENOMEM;
+	}
+
+	for (i = 0 ; i < vfe_out_data->num_wm; i++) {
+		wm_data = vfe_out_data->wm_res[i]->res_priv;
+		reg_val_pair[2 * i] = wm_data->hw_regs->image_addr;
+		reg_val_pair[2 * i + 1] = update_buf->image_buf[i];
+		CDBG("offset 0x%x, value 0x%llx\n",
+			wm_data->hw_regs->image_addr,
+			(uint64_t) update_buf->image_buf[i]);
+	}
+
+	vfe_out_data->cdm_util_ops->cdm_write_regrandom(
+		update_buf->cdm.cmd_buf_addr,
+		vfe_out_data->num_wm, reg_val_pair);
+	/* cdm util returns dwords, need to convert to bytes */
+	update_buf->cdm.used_bytes = size * 4;
+
+	return 0;
+}
+
+static int cam_vfe_bus_process_cmd(void *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = -EINVAL;
+
+	if (!priv || !cmd_args) {
+		pr_err_ratelimited("Error! Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_VFE_HW_CMD_GET_BUF_UPDATE:
+		rc = cam_vfe_bus_update_buf(priv, cmd_args, arg_size);
+		break;
+	default:
+		pr_err_ratelimited("Error! Invalid camif process command:%d\n",
+			cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+int cam_vfe_bus_ver2_init(
+	void __iomem                         *mem_base,
+	struct cam_hw_intf                   *hw_intf,
+	void                                 *bus_hw_info,
+	void                                 *vfe_irq_controller,
+	struct cam_vfe_bus                  **vfe_bus)
+{
+	int i, rc = 0;
+	struct cam_vfe_bus_ver2_priv    *bus_priv = NULL;
+	struct cam_vfe_bus              *vfe_bus_local;
+	struct cam_vfe_bus_ver2_hw_info *ver2_hw_info = bus_hw_info;
+
+	CDBG("Enter\n");
+
+	vfe_bus_local = kzalloc(sizeof(struct cam_vfe_bus), GFP_KERNEL);
+	if (!vfe_bus_local) {
+		CDBG("Failed to alloc for vfe_bus\n");
+		rc = -ENOMEM;
+		goto err_alloc_bus;
+	}
+
+	bus_priv = kzalloc(sizeof(struct cam_vfe_bus_ver2_priv),
+		GFP_KERNEL);
+	if (!bus_priv) {
+		CDBG("Failed to alloc for vfe_bus_priv\n");
+		rc = -ENOMEM;
+		goto err_alloc_priv;
+	}
+	vfe_bus_local->bus_priv = bus_priv;
+
+	bus_priv->common_data.mem_base           = mem_base;
+	bus_priv->common_data.hw_intf            = hw_intf;
+	bus_priv->common_data.vfe_irq_controller = vfe_irq_controller;
+	bus_priv->common_data.common_reg         = &ver2_hw_info->common_reg;
+
+	INIT_LIST_HEAD(&bus_priv->free_comp_grp);
+	INIT_LIST_HEAD(&bus_priv->free_dual_comp_grp);
+	INIT_LIST_HEAD(&bus_priv->used_comp_grp);
+
+	for (i = 0; i < CAM_VFE_BUS_VER2_MAX_CLIENTS; i++) {
+		rc = cam_vfe_bus_init_wm_resource(i, bus_priv, bus_hw_info,
+			&bus_priv->bus_client[i]);
+		if (rc < 0) {
+			pr_err("Error! Init WM failed\n");
+			goto err_init_wm;
+		}
+	}
+
+	for (i = 0; i < CAM_VFE_BUS_VER2_COMP_GRP_MAX; i++) {
+		rc = cam_vfe_bus_init_comp_grp(i, bus_priv, bus_hw_info,
+			&bus_priv->comp_grp[i]);
+		if (rc < 0) {
+			pr_err("Error! Init Comp Grp failed\n");
+			goto err_init_comp_grp;
+		}
+	}
+
+	for (i = 0; i < CAM_VFE_BUS_VER2_VFE_OUT_MAX; i++) {
+		rc = cam_vfe_bus_init_vfe_out_resource(i, bus_priv, bus_hw_info,
+			&bus_priv->vfe_out[i]);
+		if (rc < 0) {
+			pr_err("Error! Init VFE Out failed\n");
+			goto err_init_vfe_out;
+		}
+	}
+
+	INIT_LIST_HEAD(&bus_priv->free_payload_list);
+	for (i = 0; i < 128; i++) {
+		INIT_LIST_HEAD(&bus_priv->evt_payload[i].list);
+		list_add_tail(&bus_priv->evt_payload[i].list,
+			&bus_priv->free_payload_list);
+	}
+
+	vfe_bus_local->acquire_resource = cam_vfe_bus_acquire_vfe_out;
+	vfe_bus_local->release_resource = cam_vfe_bus_release_vfe_out;
+	vfe_bus_local->start_resource   = cam_vfe_bus_start_vfe_out;
+	vfe_bus_local->stop_resource    = cam_vfe_bus_stop_vfe_out;
+	vfe_bus_local->top_half_handler = cam_vfe_bus_ver2_handle_irq;
+	vfe_bus_local->bottom_half_handler = NULL;
+	vfe_bus_local->process_cmd      = cam_vfe_bus_process_cmd;
+
+	*vfe_bus = vfe_bus_local;
+
+	return rc;
+
+err_init_vfe_out:
+err_init_comp_grp:
+err_init_wm:
+	kfree(vfe_bus_local->bus_priv);
+err_alloc_priv:
+	kfree(vfe_bus_local);
+err_alloc_bus:
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
new file mode 100644
index 0000000..e451174
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
@@ -0,0 +1,188 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_BUS_VER2_H_
+#define _CAM_VFE_BUS_VER2_H_
+
+#include "cam_irq_controller.h"
+#include "cam_vfe_bus.h"
+
+#define CAM_VFE_BUS_VER2_MAX_CLIENTS 20
+
+enum cam_vfe_bus_ver2_vfe_core_id {
+	CAM_VFE_BUS_VER2_VFE_CORE_0,
+	CAM_VFE_BUS_VER2_VFE_CORE_1,
+	CAM_VFE_BUS_VER2_VFE_CORE_2,
+	CAM_VFE_BUS_VER2_VFE_CORE_MAX,
+};
+
+enum cam_vfe_bus_ver2_comp_grp_type {
+	CAM_VFE_BUS_VER2_COMP_GRP_0,
+	CAM_VFE_BUS_VER2_COMP_GRP_1,
+	CAM_VFE_BUS_VER2_COMP_GRP_2,
+	CAM_VFE_BUS_VER2_COMP_GRP_3,
+	CAM_VFE_BUS_VER2_COMP_GRP_4,
+	CAM_VFE_BUS_VER2_COMP_GRP_5,
+	CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0,
+	CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1,
+	CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2,
+	CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3,
+	CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4,
+	CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5,
+	CAM_VFE_BUS_VER2_COMP_GRP_MAX,
+};
+
+enum cam_vfe_bus_ver2_vfe_out_type {
+	CAM_VFE_BUS_VER2_VFE_OUT_FULL,
+	CAM_VFE_BUS_VER2_VFE_OUT_DS4,
+	CAM_VFE_BUS_VER2_VFE_OUT_DS16,
+	CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP,
+	CAM_VFE_BUS_VER2_VFE_OUT_FD,
+	CAM_VFE_BUS_VER2_VFE_OUT_PDAF,
+	CAM_VFE_BUS_VER2_VFE_OUT_RDI0,
+	CAM_VFE_BUS_VER2_VFE_OUT_RDI1,
+	CAM_VFE_BUS_VER2_VFE_OUT_RDI2,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST,
+	CAM_VFE_BUS_VER2_VFE_OUT_MAX,
+};
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_common:
+ *
+ * @Brief:        Common registers across all BUS Clients
+ */
+struct cam_vfe_bus_ver2_reg_offset_common {
+	uint32_t hw_version;
+	uint32_t hw_capability;
+	uint32_t sw_reset;
+	uint32_t cgc_ovd;
+	uint32_t pwr_iso_cfg;
+	uint32_t dual_master_comp_cfg;
+	struct cam_irq_controller_reg_info irq_reg_info;
+	uint32_t comp_error_status;
+	uint32_t comp_ovrwr_status;
+	uint32_t dual_comp_error_status;
+	uint32_t dual_comp_ovrwr_status;
+	uint32_t addr_sync_cfg;
+	uint32_t addr_syn_frame_hdr;
+	uint32_t addr_syn_no_sync;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_ubwc_client:
+ *
+ * @Brief:        UBWC register offsets for BUS Clients
+ */
+struct cam_vfe_bus_ver2_reg_offset_ubwc_client {
+	uint32_t tile_cfg;
+	uint32_t h_init;
+	uint32_t v_init;
+	uint32_t meta_addr;
+	uint32_t meta_offset;
+	uint32_t meta_stride;
+	uint32_t mode_cfg;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_bus_client:
+ *
+ * @Brief:        Register offsets for BUS Clients
+ */
+struct cam_vfe_bus_ver2_reg_offset_bus_client {
+	uint32_t status0;
+	uint32_t status1;
+	uint32_t cfg;
+	uint32_t header_addr;
+	uint32_t header_cfg;
+	uint32_t image_addr;
+	uint32_t image_addr_offset;
+	uint32_t buffer_width_cfg;
+	uint32_t buffer_height_cfg;
+	uint32_t packer_cfg;
+	uint32_t stride;
+	uint32_t irq_subsample_period;
+	uint32_t irq_subsample_pattern;
+	uint32_t framedrop_period;
+	uint32_t framedrop_pattern;
+	uint32_t frame_inc;
+	uint32_t burst_limit;
+	struct cam_vfe_bus_ver2_reg_offset_ubwc_client *ubwc_regs;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_comp_grp:
+ *
+ * @Brief:        Register offsets for Composite Group registers
+ */
+struct cam_vfe_bus_ver2_reg_offset_comp_grp {
+	uint32_t                            comp_mask;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_vfe_out_hw_info:
+ *
+ * @Brief:        HW capability of VFE Bus Client
+ */
+struct cam_vfe_bus_ver2_vfe_out_hw_info {
+	enum cam_vfe_bus_ver2_vfe_out_type  vfe_out_type;
+	uint32_t                            max_width;
+	uint32_t                            max_height;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_hw_info:
+ *
+ * @Brief:            HW register info for entire Bus
+ *
+ * @common_reg:       Common register details
+ * @bus_client_reg:   Bus client register info
+ * @comp_reg_grp:     Composite group register info
+ * @vfe_out_hw_info:  VFE output capability
+ */
+struct cam_vfe_bus_ver2_hw_info {
+	struct cam_vfe_bus_ver2_reg_offset_common  common_reg;
+	struct cam_vfe_bus_ver2_reg_offset_bus_client
+		bus_client_reg[CAM_VFE_BUS_VER2_MAX_CLIENTS];
+	struct cam_vfe_bus_ver2_reg_offset_comp_grp
+		comp_grp_reg[CAM_VFE_BUS_VER2_COMP_GRP_MAX];
+	struct cam_vfe_bus_ver2_vfe_out_hw_info
+		vfe_out_hw_info[CAM_VFE_BUS_VER2_VFE_OUT_MAX];
+};
+
+/*
+ * cam_vfe_bus_ver2_init()
+ *
+ * @Brief:                   Initialize Bus layer
+ *
+ * @mem_base:                Mapped base address of register space
+ * @hw_intf:                 HW Interface of HW to which this resource belongs
+ * @bus_hw_info:             BUS HW info that contains details of BUS registers
+ * @vfe_irq_controller:      VFE IRQ Controller to use for subscribing to Top
+ *                           level IRQs
+ * @vfe_bus:                 Pointer to vfe_bus structure which will be filled
+ *                           and returned on successful initialize
+ */
+int cam_vfe_bus_ver2_init(void __iomem   *mem_base,
+	struct cam_hw_intf                   *hw_intf,
+	void                                 *bus_hw_info,
+	void                                 *vfe_irq_controller,
+	struct cam_vfe_bus                  **vfe_bus);
+
+#endif /* _CAM_VFE_BUS_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
new file mode 100644
index 0000000..d202c13
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_BUS_H_
+#define _CAM_VFE_BUS_H_
+
+#include <uapi/media/cam_isp.h>
+#include "cam_isp_hw.h"
+
+#define CAM_VFE_BUS_VER_1_0 0x1000
+#define CAM_VFE_BUS_VER_2_0 0x2000
+
+enum cam_vfe_bus_plane_type {
+	PLANE_Y,
+	PLANE_C,
+	PLANE_MAX,
+};
+
+/*
+ * struct cam_vfe_bus:
+ *
+ * @Brief:                   Bus interface structure
+ *
+ * @bus_priv:                Private data of BUS
+ * @acquire_resource:        Function pointer for acquiring BUS output resource
+ * @release_resource:        Function pointer for releasing BUS resource
+ * @start_resource:          Function for starting BUS Output resource
+ * @stop_resource:           Function for stopping BUS Output resource
+ * @process_cmd:             Function to process commands specific to BUS
+ *                           resources
+ * @top_half_handler:        Top Half handler function
+ * @bottom_half_handler:     Bottom Half handler function
+ */
+struct cam_vfe_bus {
+	void               *bus_priv;
+
+	int (*acquire_resource)(void *bus_priv, void *acquire_args);
+	int (*release_resource)(void *bus_priv,
+		struct cam_isp_resource_node *vfe_out);
+	int (*start_resource)(struct cam_isp_resource_node *vfe_out);
+	int (*stop_resource)(struct cam_isp_resource_node *vfe_out);
+	int (*process_cmd)(void *priv, uint32_t cmd_type, void *cmd_args,
+		uint32_t arg_size);
+	CAM_IRQ_HANDLER_TOP_HALF       top_half_handler;
+	CAM_IRQ_HANDLER_BOTTOM_HALF    bottom_half_handler;
+};
+
+/*
+ * cam_vfe_bus_init()
+ *
+ * @Brief:                   Initialize Bus layer
+ *
+ * @bus_version:             Version of BUS to initialize
+ * @mem_base:                Mapped base address of register space
+ * @hw_intf:                 HW Interface of HW to which this resource belongs
+ * @bus_hw_info:             BUS HW info that contains details of BUS registers
+ * @vfe_irq_controller:      VFE IRQ Controller to use for subscribing to Top
+ *                           level IRQs
+ * @vfe_bus:                 Pointer to vfe_bus structure which will be filled
+ *                           and returned on successful initialize
+ */
+int cam_vfe_bus_init(uint32_t          bus_version,
+	void __iomem                  *mem_base,
+	struct cam_hw_intf            *hw_intf,
+	void                          *bus_hw_info,
+	void                          *vfe_irq_controller,
+	struct cam_vfe_bus            **vfe_bus);
+
+#endif /* _CAM_VFE_BUS_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
new file mode 100644
index 0000000..0a94746
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
@@ -0,0 +1,10 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_top.o cam_vfe_top_ver2.o cam_vfe_camif_ver2.o cam_vfe_rdi.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
new file mode 100644
index 0000000..3f3c2a3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -0,0 +1,257 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_io_util.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_top.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_camif_ver2.h"
+
+#undef  CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+struct cam_vfe_mux_camif_data {
+	void __iomem                                *mem_base;
+	struct cam_hw_intf                          *hw_intf;
+	struct cam_vfe_camif_ver2_reg               *camif_reg;
+	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
+	struct cam_vfe_camif_reg_data               *reg_data;
+
+	enum cam_isp_hw_sync_mode          sync_mode;
+	uint32_t                           pix_pattern;
+	uint32_t                           first_pixel;
+	uint32_t                           first_line;
+	uint32_t                           last_pixel;
+	uint32_t                           last_line;
+};
+
+static int cam_vfe_camif_validate_pix_pattern(uint32_t pattern)
+{
+	int rc;
+
+	switch (pattern) {
+	case CAM_ISP_PATTERN_BAYER_RGRGRG:
+	case CAM_ISP_PATTERN_BAYER_GRGRGR:
+	case CAM_ISP_PATTERN_BAYER_BGBGBG:
+	case CAM_ISP_PATTERN_BAYER_GBGBGB:
+	case CAM_ISP_PATTERN_YUV_YCBYCR:
+	case CAM_ISP_PATTERN_YUV_YCRYCB:
+	case CAM_ISP_PATTERN_YUV_CBYCRY:
+	case CAM_ISP_PATTERN_YUV_CRYCBY:
+		rc = 0;
+		break;
+	default:
+		pr_err("Error! Invalid pix pattern:%d\n", pattern);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+int cam_vfe_camif_ver2_acquire_resource(
+	struct cam_isp_resource_node  *camif_res,
+	void                          *acquire_param)
+{
+	struct cam_vfe_mux_camif_data      *camif_data;
+	struct cam_vfe_acquire_args        *acquire_data;
+
+	int rc = 0;
+
+	camif_data   = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+	acquire_data = (struct cam_vfe_acquire_args   *)acquire_param;
+
+	rc = cam_vfe_camif_validate_pix_pattern(
+			acquire_data->vfe_in.in_port->test_pattern);
+	if (rc)
+		return rc;
+
+	camif_data->sync_mode   = acquire_data->vfe_in.sync_mode;
+	camif_data->pix_pattern = acquire_data->vfe_in.in_port->test_pattern;
+	camif_data->first_pixel = acquire_data->vfe_in.in_port->left_start;
+	camif_data->last_pixel  = acquire_data->vfe_in.in_port->left_stop;
+	camif_data->first_line  = acquire_data->vfe_in.in_port->line_start;
+	camif_data->last_line   = acquire_data->vfe_in.in_port->line_stop;
+
+	return rc;
+}
+
+static int cam_vfe_camif_resource_start(
+	struct cam_isp_resource_node        *camif_res)
+{
+	struct cam_vfe_mux_camif_data       *rsrc_data;
+	uint32_t                             val = 0;
+
+	if (!camif_res) {
+		pr_err("Error! Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	if (camif_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		pr_err("Error! Invalid camif res res_state:%d\n",
+			camif_res->res_state);
+		return -EINVAL;
+	}
+
+	rsrc_data = (struct cam_vfe_mux_camif_data  *)camif_res->res_priv;
+
+	/*config vfe core*/
+	val = (rsrc_data->pix_pattern <<
+			rsrc_data->reg_data->pixel_pattern_shift);
+	cam_io_w_mb(val, rsrc_data->mem_base + rsrc_data->common_reg->core_cfg);
+
+	cam_io_w_mb(0x00400040, rsrc_data->mem_base +
+		rsrc_data->camif_reg->camif_config);
+	cam_io_w_mb(0x1, rsrc_data->mem_base +
+			rsrc_data->camif_reg->line_skip_pattern);
+	cam_io_w_mb(0x1, rsrc_data->mem_base +
+			rsrc_data->camif_reg->pixel_skip_pattern);
+	cam_io_w_mb(0x0, rsrc_data->mem_base +
+			rsrc_data->camif_reg->skip_period);
+	cam_io_w_mb(0x1, rsrc_data->mem_base +
+			rsrc_data->camif_reg->irq_subsample_pattern);
+
+	/* epoch config with 20 line */
+	cam_io_w_mb(0x00140014,
+		rsrc_data->mem_base + rsrc_data->camif_reg->epoch_irq);
+
+	camif_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	/* Reg Update */
+	cam_io_w_mb(0x1, rsrc_data->mem_base + 0x4AC);
+
+	CDBG("Exit\n");
+	return 0;
+}
+
+
+static int cam_vfe_camif_resource_stop(
+	struct cam_isp_resource_node        *camif_res)
+{
+	struct cam_vfe_mux_camif_data       *camif_priv;
+	struct cam_vfe_camif_ver2_reg       *camif_reg;
+	int rc = 0;
+
+	if (!camif_res) {
+		pr_err("Error! Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED ||
+		camif_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)
+		return 0;
+
+	camif_priv = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+	camif_reg = camif_priv->camif_reg;
+
+	if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+		camif_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	return rc;
+}
+
+int cam_vfe_camif_process_cmd(void *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_camif_handle_irq_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv,
+	void *evt_payload_priv)
+{
+	int                                   ret = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_isp_resource_node         *camif_node;
+	struct cam_vfe_mux_camif_data        *camif_priv;
+	struct cam_vfe_top_irq_evt_payload   *payload;
+	uint32_t                              irq_status0;
+
+	if (!handler_priv || !evt_payload_priv)
+		return ret;
+
+	camif_node = handler_priv;
+	camif_priv = camif_node->res_priv;
+	payload = evt_payload_priv;
+	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+
+	CDBG("event ID:%d\n", payload->evt_id);
+	CDBG("irq_status_0 = %x\n", irq_status0);
+
+	switch (payload->evt_id) {
+	case CAM_ISP_HW_EVENT_SOF:
+		if (irq_status0 & camif_priv->reg_data->sof_irq_mask) {
+			CDBG("Received SOF\n");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	case CAM_ISP_HW_EVENT_EPOCH:
+		if (irq_status0 & camif_priv->reg_data->epoch0_irq_mask) {
+			CDBG("Received EPOCH\n");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		cam_vfe_put_evt_payload(payload->core_info, &payload);
+		break;
+	case CAM_ISP_HW_EVENT_REG_UPDATE:
+		if (irq_status0 & camif_priv->reg_data->reg_update_irq_mask) {
+			CDBG("Received REG_UPDATE_ACK\n");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	default:
+		break;
+	}
+
+	CDBG("returing status = %d\n", ret);
+	return ret;
+}
+
+int cam_vfe_camif_ver2_init(
+	struct cam_hw_intf            *hw_intf,
+	struct cam_hw_soc_info        *soc_info,
+	void                          *camif_hw_info,
+	struct cam_isp_resource_node  *camif_node)
+{
+	struct cam_vfe_mux_camif_data     *camif_priv = NULL;
+	struct cam_vfe_camif_ver2_hw_info *camif_info = camif_hw_info;
+
+	camif_priv = kzalloc(sizeof(struct cam_vfe_mux_camif_data),
+			GFP_KERNEL);
+	if (!camif_priv) {
+		CDBG("Error! Failed to alloc for camif_priv\n");
+		return -ENOMEM;
+	}
+
+	camif_node->res_priv = camif_priv;
+
+	camif_priv->mem_base   = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
+	camif_priv->camif_reg  = camif_info->camif_reg;
+	camif_priv->common_reg = camif_info->common_reg;
+	camif_priv->reg_data   = camif_info->reg_data;
+	camif_priv->hw_intf    = hw_intf;
+
+	camif_node->start = cam_vfe_camif_resource_start;
+	camif_node->stop  = cam_vfe_camif_resource_stop;
+	camif_node->top_half_handler = cam_vfe_camif_handle_irq_top_half;
+	camif_node->bottom_half_handler = cam_vfe_camif_handle_irq_bottom_half;
+
+	return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
new file mode 100644
index 0000000..cc6aab0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_CAMIF_VER2_H_
+#define _CAM_VFE_CAMIF_VER2_H_
+
+#include "cam_isp_hw.h"
+#include "cam_vfe_top.h"
+
+struct cam_vfe_camif_ver2_reg {
+	uint32_t     camif_cmd;
+	uint32_t     camif_config;
+	uint32_t     line_skip_pattern;
+	uint32_t     pixel_skip_pattern;
+	uint32_t     skip_period;
+	uint32_t     irq_subsample_pattern;
+	uint32_t     epoch_irq;
+	uint32_t     raw_crop_width_cfg;
+	uint32_t     raw_crop_height_cfg;
+	uint32_t     reg_update_cmd;
+};
+
+struct cam_vfe_camif_reg_data {
+	uint32_t     raw_crop_first_pixel_shift;
+	uint32_t     raw_crop_first_pixel_mask;
+
+	uint32_t     raw_crop_last_pixel_shift;
+	uint32_t     raw_crop_last_pixel_mask;
+
+	uint32_t     raw_crop_first_line_shift;
+	uint32_t     raw_crop_first_line_mask;
+
+	uint32_t     raw_crop_last_line_shift;
+	uint32_t     raw_crop_last_line_mask;
+
+	uint32_t     input_mux_sel_shift;
+	uint32_t     input_mux_sel_mask;
+	uint32_t     extern_reg_update_shift;
+	uint32_t     extern_reg_update_mask;
+
+	uint32_t     pixel_pattern_shift;
+	uint32_t     pixel_pattern_mask;
+
+	uint32_t     epoch_line_cfg;
+	uint32_t     sof_irq_mask;
+	uint32_t     epoch0_irq_mask;
+	uint32_t     reg_update_irq_mask;
+};
+
+struct cam_vfe_camif_ver2_hw_info {
+	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
+	struct cam_vfe_camif_ver2_reg               *camif_reg;
+	struct cam_vfe_camif_reg_data               *reg_data;
+};
+
+int cam_vfe_camif_ver2_acquire_resource(
+	struct cam_isp_resource_node  *camif_res,
+	void                          *acquire_param);
+
+int cam_vfe_camif_process_cmd(void *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+
+int cam_vfe_camif_ver2_init(
+	struct cam_hw_intf            *hw_intf,
+	struct cam_hw_soc_info        *soc_info,
+	void                          *camif_hw_info,
+	struct cam_isp_resource_node  *camif_node);
+
+#endif /* _CAM_VFE_CAMIF_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
new file mode 100644
index 0000000..5f77a7c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -0,0 +1,189 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/slab.h>
+#include "cam_vfe_rdi.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_io_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+struct cam_vfe_mux_rdi_data {
+	void __iomem                                *mem_base;
+	struct cam_hw_intf                          *hw_intf;
+	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
+
+	enum cam_isp_hw_sync_mode          sync_mode;
+};
+
+int cam_vfe_rdi_ver2_acquire_resource(
+	struct cam_isp_resource_node  *rdi_res,
+	void                          *acquire_param)
+{
+	struct cam_vfe_mux_rdi_data   *rdi_data;
+	struct cam_vfe_acquire_args   *acquire_data;
+
+	rdi_data     = (struct cam_vfe_mux_rdi_data *)rdi_res->res_priv;
+	acquire_data = (struct cam_vfe_acquire_args *)acquire_param;
+
+	rdi_data->sync_mode   = acquire_data->vfe_in.sync_mode;
+
+	return 0;
+}
+
+static int cam_vfe_rdi_resource_start(
+	struct cam_isp_resource_node  *rdi_res)
+{
+	struct cam_vfe_mux_rdi_data   *rsrc_data;
+	int                            rc = 0;
+
+	if (!rdi_res) {
+		pr_err("Error! Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	if (rdi_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		pr_err("Error! Invalid rdi res res_state:%d\n",
+			rdi_res->res_state);
+		return -EINVAL;
+	}
+
+	rsrc_data = (struct cam_vfe_mux_rdi_data  *)rdi_res->res_priv;
+	rdi_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	/* Reg Update */
+	cam_io_w_mb(0x2, rsrc_data->mem_base + 0x4AC);
+
+	CDBG("Exit\n");
+
+	return rc;
+}
+
+
+static int cam_vfe_rdi_resource_stop(
+	struct cam_isp_resource_node        *rdi_res)
+{
+	struct cam_vfe_mux_rdi_data           *rdi_priv;
+	int rc = 0;
+
+	if (!rdi_res) {
+		pr_err("Error! Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	if (rdi_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED ||
+		rdi_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)
+		return 0;
+
+	rdi_priv = (struct cam_vfe_mux_rdi_data *)rdi_res->res_priv;
+
+	if (rdi_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+		rdi_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+
+	return rc;
+}
+
+int cam_vfe_rdi_process_cmd(void *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = -EINVAL;
+
+	if (!priv || !cmd_args) {
+		pr_err("Error! Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	default:
+		pr_err("Error! unsupported RDI process command:%d\n", cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_vfe_rdi_handle_irq_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_rdi_handle_irq_bottom_half(void *handler_priv,
+	void *evt_payload_priv)
+{
+	int                                  ret = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_isp_resource_node        *rdi_node;
+	struct cam_vfe_mux_rdi_data         *rdi_priv;
+	struct cam_vfe_top_irq_evt_payload  *payload;
+	uint32_t                             irq_status0;
+
+	if (!handler_priv || !evt_payload_priv)
+		return ret;
+
+	rdi_node = handler_priv;
+	rdi_priv = rdi_node->res_priv;
+	payload = evt_payload_priv;
+	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+
+	CDBG("event ID:%d\n", payload->evt_id);
+	CDBG("irq_status_0 = %x\n", irq_status0);
+
+	switch (payload->evt_id) {
+	case CAM_ISP_HW_EVENT_SOF:
+		if (irq_status0 & 0x8000000)
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		break;
+	case CAM_ISP_HW_EVENT_REG_UPDATE:
+		if (irq_status0 & 0x20)
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		break;
+	default:
+		break;
+	}
+
+	CDBG("returing status = %d\n", ret);
+	return ret;
+}
+
+int cam_vfe_rdi_ver2_init(
+	struct cam_hw_intf            *hw_intf,
+	struct cam_hw_soc_info        *soc_info,
+	void                          *rdi_hw_info,
+	struct cam_isp_resource_node  *rdi_node)
+{
+	struct cam_vfe_mux_rdi_data     *rdi_priv = NULL;
+
+	rdi_priv = kzalloc(sizeof(struct cam_vfe_mux_rdi_data),
+			GFP_KERNEL);
+	if (!rdi_priv) {
+		CDBG("Error! Failed to alloc for rdi_priv\n");
+		return -ENOMEM;
+	}
+
+	rdi_node->res_priv = rdi_priv;
+
+	rdi_priv->mem_base   = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
+	rdi_priv->hw_intf    = hw_intf;
+
+	rdi_node->start = cam_vfe_rdi_resource_start;
+	rdi_node->stop  = cam_vfe_rdi_resource_stop;
+	rdi_node->top_half_handler = cam_vfe_rdi_handle_irq_top_half;
+	rdi_node->bottom_half_handler = cam_vfe_rdi_handle_irq_bottom_half;
+
+	return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
new file mode 100644
index 0000000..967cec3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_RDI_H_
+#define _CAM_VFE_RDI_H_
+
+#include "cam_isp_hw.h"
+#include "cam_vfe_top.h"
+
+struct cam_vfe_rdi_ver2_reg {
+	uint32_t     reg_update_cmd;
+};
+
+struct cam_vfe_rdi_reg_data {
+	uint32_t     reg_update_irq_mask;
+};
+
+struct cam_vfe_rdi_ver2_hw_info {
+	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
+	struct cam_vfe_rdi_ver2_reg               *rdi_reg;
+	struct cam_vfe_rdi_reg_data               *reg_data;
+};
+
+int cam_vfe_rdi_ver2_acquire_resource(
+	struct cam_isp_resource_node  *rdi_res,
+	void                          *acquire_param);
+
+int cam_vfe_rdi_process_cmd(void *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+
+int cam_vfe_rdi_ver2_init(
+	struct cam_hw_intf            *hw_intf,
+	struct cam_hw_soc_info        *soc_info,
+	void                          *rdi_hw_info,
+	struct cam_isp_resource_node  *rdi_node);
+
+#endif /* _CAM_VFE_RDI_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
new file mode 100644
index 0000000..e2bceb8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
@@ -0,0 +1,38 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include "cam_vfe_top.h"
+#include "cam_vfe_top_ver2.h"
+
+int cam_vfe_top_init(uint32_t          top_version,
+	struct cam_hw_soc_info        *soc_info,
+	struct cam_hw_intf            *hw_intf,
+	void                          *top_hw_info,
+	struct cam_vfe_top            **vfe_top)
+{
+	int rc = -EINVAL;
+
+	switch (top_version) {
+	case CAM_VFE_TOP_VER_2_0:
+		rc = cam_vfe_top_ver2_init(soc_info, hw_intf, top_hw_info,
+			vfe_top);
+		break;
+	default:
+		pr_err("Error! Unsupported Version %x\n", top_version);
+		break;
+	}
+
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
new file mode 100644
index 0000000..3ef4f49
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -0,0 +1,425 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/slab.h>
+#include "cam_io_util.h"
+#include "cam_cdm_util.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_top.h"
+#include "cam_vfe_top_ver2.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+struct cam_vfe_top_ver2_common_data {
+	struct cam_hw_soc_info                     *soc_info;
+	struct cam_hw_intf                         *hw_intf;
+	struct cam_vfe_top_ver2_reg_offset_common  *common_reg;
+};
+
+struct cam_vfe_top_ver2_priv {
+	struct cam_vfe_top_ver2_common_data common_data;
+	struct cam_vfe_camif               *camif;
+	struct cam_isp_resource_node        mux_rsrc[CAM_VFE_TOP_VER2_MUX_MAX];
+};
+
+static int cam_vfe_top_mux_get_base(struct cam_vfe_top_ver2_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	uint32_t                          size = 0;
+	uint32_t                          mem_base = 0;
+	struct cam_isp_hw_get_cdm_args   *cdm_args  = cmd_args;
+	struct cam_cdm_utils_ops         *cdm_util_ops = NULL;
+
+	if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
+		pr_err("Error! Invalid cmd size\n");
+		return -EINVAL;
+	}
+
+	if (!cdm_args || !cdm_args->res || !top_priv ||
+		!top_priv->common_data.soc_info) {
+		pr_err("Error! Invalid args\n");
+		return -EINVAL;
+	}
+
+	cdm_util_ops =
+		(struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+
+	if (!cdm_util_ops) {
+		pr_err("Invalid CDM ops\n");
+		return -EINVAL;
+	}
+
+	size = cdm_util_ops->cdm_required_size_changebase();
+	/* since cdm returns dwords, we need to convert it into bytes */
+	if ((size * 4) > cdm_args->size) {
+		pr_err("buf size:%d is not sufficient, expected: %d\n",
+			cdm_args->size, size);
+		return -EINVAL;
+	}
+
+	mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(
+		top_priv->common_data.soc_info, VFE_CORE_BASE_IDX);
+	CDBG("core %d mem_base 0x%x\n", top_priv->common_data.soc_info->index,
+		mem_base);
+
+	cdm_util_ops->cdm_write_changebase(cdm_args->cmd_buf_addr, mem_base);
+	cdm_args->used_bytes = (size * 4);
+
+	return 0;
+}
+
+static int cam_vfe_top_mux_get_reg_update(
+	struct cam_vfe_top_ver2_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	uint32_t                          size = 0;
+	uint32_t                          reg_val_pair[2];
+	struct cam_isp_hw_get_cdm_args   *cdm_args = cmd_args;
+	struct cam_cdm_utils_ops         *cdm_util_ops = NULL;
+
+	if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
+		pr_err("Error! Invalid cmd size\n");
+		return -EINVAL;
+	}
+
+	if (!cdm_args || !cdm_args->res) {
+		pr_err("Error! Invalid args\n");
+		return -EINVAL;
+	}
+
+	cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+
+	if (!cdm_util_ops) {
+		pr_err("Error! Invalid CDM ops\n");
+		return -EINVAL;
+	}
+
+	size = cdm_util_ops->cdm_required_size_reg_random(1);
+	/* since cdm returns dwords, we need to convert it into bytes */
+	if ((size * 4) > cdm_args->size) {
+		pr_err("Error! buf size:%d is not sufficient, expected: %d\n",
+			cdm_args->size, size);
+		return -EINVAL;
+	}
+
+	reg_val_pair[0] = top_priv->common_data.common_reg->reg_update_cmd;
+
+	if (cdm_args->res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
+		reg_val_pair[1] = BIT(0);
+	else {
+		uint32_t rdi_num = cdm_args->res->res_id -
+			CAM_ISP_HW_VFE_IN_RDI0;
+		/* RDI reg_update starts at BIT 1, so add 1 */
+		reg_val_pair[1] = BIT(rdi_num + 1);
+	}
+
+	cdm_util_ops->cdm_write_regrandom(cdm_args->cmd_buf_addr,
+		1, reg_val_pair);
+
+	cdm_args->used_bytes = size * 4;
+
+	return 0;
+}
+
+int cam_vfe_top_get_hw_caps(void *device_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_top_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_top_reset(void *device_priv,
+	void *reset_core_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver2_priv   *top_priv = device_priv;
+	struct cam_hw_soc_info         *soc_info = NULL;
+	struct cam_vfe_top_ver2_reg_offset_common *reg_common = NULL;
+
+	if (!top_priv) {
+		pr_err("Invalid arguments\n");
+		return -EINVAL;
+	}
+
+	soc_info = top_priv->common_data.soc_info;
+	reg_common = top_priv->common_data.common_reg;
+
+	/* Mask All the IRQs except RESET */
+	cam_io_w_mb((1 << 31),
+		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) + 0x5C);
+
+	/* Reset HW */
+	cam_io_w_mb(0x00003F9F,
+		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) +
+		reg_common->global_reset_cmd);
+
+	CDBG("Reset HW exit\n");
+	return 0;
+}
+
+int cam_vfe_top_reserve(void *device_priv,
+	void *reserve_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver2_priv            *top_priv;
+	struct cam_vfe_acquire_args             *args;
+	struct cam_vfe_hw_vfe_in_acquire_args   *acquire_args;
+	uint32_t i;
+	int rc = -EINVAL;
+
+	if (!device_priv || !reserve_args) {
+		pr_err("Error! Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_vfe_top_ver2_priv   *)device_priv;
+	args = (struct cam_vfe_acquire_args *)reserve_args;
+	acquire_args = &args->vfe_in;
+
+
+	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+		if (top_priv->mux_rsrc[i].res_id ==  acquire_args->res_id &&
+			top_priv->mux_rsrc[i].res_state ==
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+
+			if (acquire_args->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
+				rc = cam_vfe_camif_ver2_acquire_resource(
+					&top_priv->mux_rsrc[i],
+					args);
+				if (rc)
+					break;
+			}
+
+			top_priv->mux_rsrc[i].cdm_ops = acquire_args->cdm_ops;
+			top_priv->mux_rsrc[i].tasklet_info = args->tasklet;
+			top_priv->mux_rsrc[i].res_state =
+				CAM_ISP_RESOURCE_STATE_RESERVED;
+			acquire_args->rsrc_node =
+				&top_priv->mux_rsrc[i];
+
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+
+}
+
+int cam_vfe_top_release(void *device_priv,
+	void *release_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver2_priv            *top_priv;
+	struct cam_isp_resource_node            *mux_res;
+
+	if (!device_priv || !release_args) {
+		pr_err("Error! Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_vfe_top_ver2_priv   *)device_priv;
+	mux_res = (struct cam_isp_resource_node *)release_args;
+
+	CDBG("%s: Resource in state %d\n", __func__, mux_res->res_state);
+	if (mux_res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) {
+		pr_err("Error! Resource in Invalid res_state :%d\n",
+			mux_res->res_state);
+		return -EINVAL;
+	}
+	mux_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+	return 0;
+}
+
+int cam_vfe_top_start(void *device_priv,
+	void *start_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver2_priv            *top_priv;
+	struct cam_isp_resource_node            *mux_res;
+	int rc = 0;
+
+	if (!device_priv || !start_args) {
+		pr_err("Error! Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_vfe_top_ver2_priv   *)device_priv;
+	mux_res = (struct cam_isp_resource_node *)start_args;
+
+	if (mux_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
+		rc = mux_res->start(mux_res);
+	} else if (mux_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0 &&
+		mux_res->res_id <= CAM_ISP_HW_VFE_IN_RDI3) {
+		mux_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+		rc = 0;
+	} else {
+		pr_err("Invalid res id:%d\n", mux_res->res_id);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_vfe_top_stop(void *device_priv,
+	void *stop_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver2_priv            *top_priv;
+	struct cam_isp_resource_node            *mux_res;
+	int rc = 0;
+
+	if (!device_priv || !stop_args) {
+		pr_err("Error! Invalid input arguments\n");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_vfe_top_ver2_priv   *)device_priv;
+	mux_res = (struct cam_isp_resource_node *)stop_args;
+
+	if (mux_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF ||
+		(mux_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0 &&
+		mux_res->res_id <= CAM_ISP_HW_VFE_IN_RDI3)) {
+		rc = mux_res->stop(mux_res);
+	} else {
+		pr_err("Invalid res id:%d\n", mux_res->res_id);
+		rc = -EINVAL;
+	}
+
+	return rc;
+
+}
+
+int cam_vfe_top_read(void *device_priv,
+	void *read_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_top_write(void *device_priv,
+	void *write_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_top_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_vfe_top_ver2_priv            *top_priv;
+
+	if (!device_priv || !cmd_args) {
+		pr_err("Error! Invalid arguments\n");
+		return -EINVAL;
+	}
+	top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
+
+	switch (cmd_type) {
+	case CAM_VFE_HW_CMD_GET_CHANGE_BASE:
+		rc = cam_vfe_top_mux_get_base(top_priv, cmd_args, arg_size);
+		break;
+	case CAM_VFE_HW_CMD_GET_REG_UPDATE:
+		rc = cam_vfe_top_mux_get_reg_update(top_priv, cmd_args,
+			arg_size);
+		break;
+	default:
+		rc = -EINVAL;
+		pr_err("Error! Invalid cmd:%d\n", cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+int cam_vfe_top_ver2_init(
+	struct cam_hw_soc_info                 *soc_info,
+	struct cam_hw_intf                     *hw_intf,
+	void                                   *top_hw_info,
+	struct cam_vfe_top                    **vfe_top_ptr)
+{
+	int i, j, rc = 0;
+	struct cam_vfe_top_ver2_priv           *top_priv = NULL;
+	struct cam_vfe_top_ver2_hw_info        *ver2_hw_info = top_hw_info;
+	struct cam_vfe_top                     *vfe_top;
+
+	vfe_top = kzalloc(sizeof(struct cam_vfe_top), GFP_KERNEL);
+	if (!vfe_top) {
+		CDBG("Error! Failed to alloc for vfe_top\n");
+		rc = -ENOMEM;
+		goto err_alloc_top;
+	}
+
+	top_priv = kzalloc(sizeof(struct cam_vfe_top_ver2_priv),
+		GFP_KERNEL);
+	if (!top_priv) {
+		CDBG("Error! Failed to alloc for vfe_top_priv\n");
+		rc = -ENOMEM;
+		goto err_alloc_priv;
+	}
+	vfe_top->top_priv = top_priv;
+
+	for (i = 0, j = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+		top_priv->mux_rsrc[i].res_type = CAM_ISP_RESOURCE_VFE_IN;
+		top_priv->mux_rsrc[i].hw_intf = hw_intf;
+		top_priv->mux_rsrc[i].res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
+			top_priv->mux_rsrc[i].res_id =
+				CAM_ISP_HW_VFE_IN_CAMIF;
+
+			rc = cam_vfe_camif_ver2_init(hw_intf, soc_info,
+				&ver2_hw_info->camif_hw_info,
+				&top_priv->mux_rsrc[i]);
+			if (rc)
+				goto err_mux_init;
+		} else {
+			/* set the RDI resource id */
+			top_priv->mux_rsrc[i].res_id =
+				CAM_ISP_HW_VFE_IN_RDI0 + j;
+			rc = cam_vfe_rdi_ver2_init(hw_intf, soc_info,
+				NULL, &top_priv->mux_rsrc[i]);
+			if (rc)
+				goto deinit_resources;
+			j++;
+		}
+	}
+
+	vfe_top->hw_ops.get_hw_caps = cam_vfe_top_get_hw_caps;
+	vfe_top->hw_ops.init        = cam_vfe_top_init_hw;
+	vfe_top->hw_ops.reset       = cam_vfe_top_reset;
+	vfe_top->hw_ops.reserve     = cam_vfe_top_reserve;
+	vfe_top->hw_ops.release     = cam_vfe_top_release;
+	vfe_top->hw_ops.start       = cam_vfe_top_start;
+	vfe_top->hw_ops.stop        = cam_vfe_top_stop;
+	vfe_top->hw_ops.read        = cam_vfe_top_read;
+	vfe_top->hw_ops.write       = cam_vfe_top_write;
+	vfe_top->hw_ops.process_cmd = cam_vfe_top_process_cmd;
+	*vfe_top_ptr = vfe_top;
+
+	top_priv->common_data.soc_info     = soc_info;
+	top_priv->common_data.hw_intf      = hw_intf;
+	top_priv->common_data.common_reg   = ver2_hw_info->common_reg;
+
+	return rc;
+
+deinit_resources:
+err_mux_init:
+	kfree(vfe_top->top_priv);
+err_alloc_priv:
+	kfree(vfe_top);
+err_alloc_top:
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
new file mode 100644
index 0000000..1038721
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_TOP_VER2_H_
+#define _CAM_VFE_TOP_VER2_H_
+
+#include "cam_vfe_camif_ver2.h"
+#include "cam_vfe_rdi.h"
+
+#define CAM_VFE_TOP_VER2_MUX_MAX 4
+
+enum cam_vfe_top_ver2_module_type {
+	CAM_VFE_TOP_VER2_MODULE_LENS,
+	CAM_VFE_TOP_VER2_MODULE_STATS,
+	CAM_VFE_TOP_VER2_MODULE_COLOR,
+	CAM_VFE_TOP_VER2_MODULE_ZOOM,
+	CAM_VFE_TOP_VER2_MODULE_MAX,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl {
+	uint32_t reset;
+	uint32_t cgc_ovd;
+	uint32_t enable;
+};
+
+struct cam_vfe_top_ver2_reg_offset_common {
+	uint32_t hw_version;
+	uint32_t hw_capability;
+	uint32_t lens_feature;
+	uint32_t stats_feature;
+	uint32_t color_feature;
+	uint32_t zoom_feature;
+	uint32_t global_reset_cmd;
+	struct cam_vfe_top_ver2_reg_offset_module_ctrl
+		*module_ctrl[CAM_VFE_TOP_VER2_MODULE_MAX];
+	uint32_t bus_cgc_ovd;
+	uint32_t core_cfg;
+	uint32_t three_D_cfg;
+	uint32_t violation_status;
+	uint32_t reg_update_cmd;
+};
+
+struct cam_vfe_top_ver2_hw_info {
+	struct cam_vfe_top_ver2_reg_offset_common  *common_reg;
+	struct cam_vfe_camif_ver2_hw_info  camif_hw_info;
+	uint32_t mux_type[CAM_VFE_TOP_VER2_MUX_MAX];
+};
+
+int cam_vfe_top_ver2_init(struct cam_hw_soc_info     *soc_info,
+	struct cam_hw_intf                           *hw_intf,
+	void                                         *top_hw_info,
+	struct cam_vfe_top                           **vfe_top);
+
+#endif /* _CAM_VFE_TOP_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
new file mode 100644
index 0000000..44c046d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_VFE_TOP_H_
+#define _CAM_VFE_TOP_H_
+
+#include "cam_hw_intf.h"
+#include "cam_isp_hw.h"
+
+#define CAM_VFE_TOP_VER_1_0 0x100000
+#define CAM_VFE_TOP_VER_2_0 0x200000
+
+#define CAM_VFE_CAMIF_VER_1_0 0x10
+#define CAM_VFE_CAMIF_VER_2_0 0x20
+
+#define CAM_VFE_RDI_VER_1_0 0x1000
+
+struct cam_vfe_top {
+	void                   *top_priv;
+	struct cam_hw_ops       hw_ops;
+};
+
+struct cam_vfe_camif {
+	void               *camif_priv;
+	int (*start_resource)(void *priv,
+		struct cam_isp_resource_node *camif_res);
+	int (*stop_resource)(void *priv,
+		struct cam_isp_resource_node *camif_res);
+	int (*acquire_resource)(void *priv,
+		struct cam_isp_resource_node *camif_res,
+		void *acquire_param);
+	int (*release_resource)(void *priv,
+		struct cam_isp_resource_node *camif_res);
+	int (*process_cmd)(void *priv, uint32_t cmd_type, void *cmd_args,
+				uint32_t arg_size);
+};
+
+int cam_vfe_top_init(uint32_t          top_version,
+	struct cam_hw_soc_info        *soc_info,
+	struct cam_hw_intf            *hw_intf,
+	void                          *top_hw_info,
+	struct cam_vfe_top            **vfe_top);
+
+#endif /* _CAM_VFE_TOP_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
index c5f839b..06588c4 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
@@ -13,6 +13,7 @@
 #ifndef _CAM_MEM_MGR_H_
 #define _CAM_MEM_MGR_H_
 
+#include <linux/mutex.h>
 #include <media/cam_req_mgr.h>
 #include "cam_mem_mgr_api.h"
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
index 019a775..7bc26ec 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
@@ -196,6 +196,7 @@
 	hdl_tbl->hdl[idx].ops = hdl_data->ops;
 	spin_unlock_bh(&hdl_tbl_lock);
 
+	pr_debug("%s: handle = %x\n", __func__, handle);
 	return handle;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
new file mode 100644
index 0000000..e515a40
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_utils/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cci/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_csiphy/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_actuator/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
new file mode 100644
index 0000000..8670d80
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_actuator_dev.o cam_actuator_core.o cam_actuator_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
new file mode 100644
index 0000000..648617e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -0,0 +1,628 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <cam_sensor_cmn_header.h>
+#include "cam_actuator_core.h"
+#include <cam_sensor_util.h>
+
+int32_t cam_actuator_slaveInfo_pkt_parser(struct cam_actuator_ctrl_t *a_ctrl,
+	uint32_t *cmd_buf)
+{
+	int32_t rc = 0;
+	struct cam_cmd_i2c_info *i2c_info;
+
+	if (!a_ctrl || !cmd_buf) {
+		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+	a_ctrl->io_master_info.cci_client->i2c_freq_mode =
+		i2c_info->i2c_freq_mode;
+	a_ctrl->io_master_info.cci_client->sid =
+		i2c_info->slave_addr >> 1;
+	CDBG("%s:%d Slave addr: 0x%x Freq Mode: %d\n", __func__,
+		__LINE__, i2c_info->slave_addr, i2c_info->i2c_freq_mode);
+
+	return rc;
+}
+
+int32_t cam_actuator_apply_settings(struct cam_actuator_ctrl_t *a_ctrl,
+	struct i2c_settings_array *i2c_set)
+{
+	struct i2c_settings_list *i2c_list;
+	int32_t rc = 0;
+	uint32_t i, size;
+
+	if (a_ctrl == NULL || i2c_set == NULL) {
+		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (i2c_set->is_settings_valid != 1) {
+		pr_err("%s: %d :Error: Invalid settings\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	list_for_each_entry(i2c_list,
+		&(i2c_set->list_head), list) {
+		if (i2c_list->op_code ==  CAM_SENSOR_I2C_WRITE_RANDOM) {
+			rc = camera_io_dev_write(&(a_ctrl->io_master_info),
+				&(i2c_list->i2c_settings));
+			if (rc < 0) {
+				pr_err("%s: %d :Error: Failed in Applying i2c write settings\n",
+					__func__, __LINE__);
+				return rc;
+			}
+		} else if (i2c_list->op_code == CAM_SENSOR_I2C_POLL) {
+			size = i2c_list->i2c_settings.size;
+			for (i = 0; i < size; i++) {
+				rc = camera_io_dev_poll(
+					&(a_ctrl->io_master_info),
+					i2c_list->i2c_settings.
+						reg_setting[i].reg_addr,
+					i2c_list->i2c_settings.
+						reg_setting[i].reg_data,
+					i2c_list->i2c_settings.
+						reg_setting[i].data_mask,
+					i2c_list->i2c_settings.addr_type,
+					i2c_list->i2c_settings.data_type,
+					i2c_list->i2c_settings.
+						reg_setting[i].delay);
+				if (rc < 0) {
+					pr_err("%s: %d :Error: Failed in Applying i2c poll settings\n",
+						__func__, __LINE__);
+					return rc;
+				}
+			}
+		}
+	}
+
+	return rc;
+}
+
+int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply)
+{
+	int32_t rc = 0, request_id, del_req_id;
+	struct cam_actuator_ctrl_t *a_ctrl = NULL;
+
+	if (!apply) {
+		pr_err("%s:%d :Error: Invalid Input Args\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	a_ctrl = (struct cam_actuator_ctrl_t *)
+		cam_get_device_priv(apply->dev_hdl);
+	if (!a_ctrl) {
+		pr_err("%s: %d :Error: Device data is NULL\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	request_id = apply->request_id % MAX_PER_FRAME_ARRAY;
+	CDBG("%s:%d Request Id: %lld\n",
+		__func__, __LINE__, apply->request_id);
+
+	if ((apply->request_id ==
+		a_ctrl->i2c_data.per_frame[request_id].request_id) &&
+		(a_ctrl->i2c_data.per_frame[request_id].is_settings_valid)
+		== 1) {
+		rc = cam_actuator_apply_settings(a_ctrl,
+			&a_ctrl->i2c_data.per_frame[request_id]);
+		if (rc < 0) {
+			pr_err("%s:%d Failed in applying the request: %lld\n",
+				__func__, __LINE__, apply->request_id);
+			return rc;
+		}
+	}
+	del_req_id = (request_id +
+		MAX_PER_FRAME_ARRAY - MAX_SYSTEM_PIPELINE_DELAY) %
+		MAX_PER_FRAME_ARRAY;
+
+	if (apply->request_id >
+		a_ctrl->i2c_data.per_frame[del_req_id].request_id) {
+		a_ctrl->i2c_data.per_frame[del_req_id].request_id = 0;
+		rc = delete_request(&a_ctrl->i2c_data.per_frame[del_req_id]);
+		if (rc < 0) {
+			pr_err("%s: %d :Error: Fail deleting the req: %d err: %d\n",
+				__func__, __LINE__, del_req_id, rc);
+			return rc;
+		}
+	} else {
+		CDBG("%s:%d No Valid Req to clean Up\n", __func__, __LINE__);
+	}
+
+	return rc;
+}
+
+int32_t cam_actuator_establish_link(
+	struct cam_req_mgr_core_dev_link_setup *link)
+{
+	struct cam_actuator_ctrl_t *a_ctrl = NULL;
+
+	if (!link) {
+		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	a_ctrl = (struct cam_actuator_ctrl_t *)
+		cam_get_device_priv(link->dev_hdl);
+	if (!a_ctrl) {
+		pr_err("%s:%d :Error: Device data is NULL\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	if (link->link_enable) {
+		a_ctrl->bridge_intf.link_hdl = link->link_hdl;
+		a_ctrl->bridge_intf.crm_cb = link->crm_cb;
+	} else {
+		a_ctrl->bridge_intf.link_hdl = -1;
+		a_ctrl->bridge_intf.crm_cb = NULL;
+	}
+
+	return 0;
+}
+
+int32_t cam_actuator_publish_dev_info(struct cam_req_mgr_device_info *info)
+{
+	if (!info) {
+		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	info->dev_id = CAM_REQ_MGR_DEVICE_ACTUATOR;
+	strlcpy(info->name, CAM_ACTUATOR_NAME, sizeof(info->name));
+	info->p_delay = 0;
+
+	return 0;
+}
+
+int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
+	void *arg)
+{
+	int32_t rc = 0;
+	uint64_t generic_ptr;
+	struct cam_control *ioctl_ctrl = NULL;
+	struct cam_packet *csl_packet = NULL;
+	struct cam_config_dev_cmd config;
+	struct i2c_data_settings *i2c_data = NULL;
+	struct i2c_settings_array *i2c_reg_settings = NULL;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	size_t len_of_buff = 0;
+	uint32_t *offset = NULL, *cmd_buf;
+	struct cam_req_mgr_add_request add_req;
+
+	if (!a_ctrl || !arg) {
+		pr_err("%s:%d :Error: Invalid Args\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	ioctl_ctrl = (struct cam_control *)arg;
+	if (copy_from_user(&config, (void __user *) ioctl_ctrl->handle,
+		sizeof(config)))
+		return -EFAULT;
+	rc = cam_mem_get_cpu_buf(config.packet_handle,
+		(uint64_t *)&generic_ptr, &len_of_buff);
+	if (rc < 0) {
+		pr_err("%s:%d :Error: error in converting command Handle %d\n",
+			__func__, __LINE__, rc);
+		return rc;
+	}
+
+	if (config.offset > len_of_buff) {
+		pr_err("%s: %d offset is out of bounds: offset: %lld len: %zu\n",
+			__func__, __LINE__, config.offset, len_of_buff);
+		return -EINVAL;
+	}
+
+	csl_packet = (struct cam_packet *)(generic_ptr +
+		config.offset);
+	CDBG("%s:%d Pkt opcode: %d\n",
+		__func__, __LINE__, csl_packet->header.op_code);
+
+	if ((csl_packet->header.op_code & 0xFFFFFF) ==
+			CAM_ACTUATOR_PACKET_OPCODE_INIT) {
+		i2c_data = &(a_ctrl->i2c_data);
+		i2c_reg_settings = &i2c_data->init_settings;
+
+		offset = (uint32_t *)&csl_packet->payload;
+		offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+		if (csl_packet->num_cmd_buf != 2) {
+			pr_err("%s:: %d :Error: cmd Buffers in Init : %d\n",
+				__func__, __LINE__, csl_packet->num_cmd_buf);
+			return -EINVAL;
+		}
+
+		rc = cam_mem_get_cpu_buf(cmd_desc[0].mem_handle,
+			(uint64_t *)&generic_ptr, &len_of_buff);
+		if (rc < 0) {
+			pr_err("%s:%d Failed to get cpu buf\n",
+				__func__, __LINE__);
+			return rc;
+		}
+		cmd_buf = (uint32_t *)generic_ptr;
+		cmd_buf += cmd_desc->offset / sizeof(uint32_t);
+		rc = cam_actuator_slaveInfo_pkt_parser(a_ctrl, cmd_buf);
+		if (rc < 0) {
+			pr_err("%s:%d Failed in parsing the pkt\n",
+				__func__, __LINE__);
+			return rc;
+		}
+		cmd_buf += (sizeof(struct cam_cmd_i2c_info)/sizeof(uint32_t));
+		i2c_data->init_settings.request_id = 0;
+		i2c_reg_settings->is_settings_valid = 1;
+		rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings,
+			&cmd_desc[1], 1);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: actuator pkt parsing failed: %d\n",
+				__func__, __LINE__, rc);
+			return rc;
+		}
+	} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
+		CAM_ACTUATOR_PACKET_AUTO_MOVE_LENS) {
+		a_ctrl->act_apply_state =
+			ACT_APPLY_SETTINGS_NOW;
+
+		i2c_data = &(a_ctrl->i2c_data);
+		i2c_reg_settings = &i2c_data->init_settings;
+
+		i2c_data->init_settings.request_id =
+			csl_packet->header.request_id;
+		i2c_reg_settings->is_settings_valid = 1;
+		offset = (uint32_t *)&csl_packet->payload;
+		offset += csl_packet->cmd_buf_offset / sizeof(uint32_t);
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings,
+			cmd_desc, 1);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: actuator pkt parsing failed: %d\n",
+				__func__, __LINE__, rc);
+			return rc;
+		}
+	} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
+		CAM_ACTUATOR_PACKET_MANUAL_MOVE_LENS) {
+		i2c_data = &(a_ctrl->i2c_data);
+		i2c_reg_settings =
+			&i2c_data->per_frame
+			[csl_packet->header.request_id % MAX_PER_FRAME_ARRAY];
+
+		i2c_data->init_settings.request_id =
+			csl_packet->header.request_id;
+		i2c_reg_settings->is_settings_valid = 1;
+		offset = (uint32_t *)&csl_packet->payload;
+		offset += csl_packet->cmd_buf_offset / sizeof(uint32_t);
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings,
+			cmd_desc, 1);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: actuator pkt parsing failed: %d\n",
+				__func__, __LINE__, rc);
+			return rc;
+		}
+	}
+
+	if ((csl_packet->header.op_code & 0xFFFFFF) !=
+		CAM_ACTUATOR_PACKET_OPCODE_INIT) {
+		add_req.link_hdl = a_ctrl->bridge_intf.link_hdl;
+		add_req.req_id = csl_packet->header.request_id;
+		add_req.dev_hdl = a_ctrl->bridge_intf.device_hdl;
+		if (a_ctrl->bridge_intf.crm_cb &&
+			a_ctrl->bridge_intf.crm_cb->add_req)
+			a_ctrl->bridge_intf.crm_cb->add_req(&add_req);
+		CDBG("%s: %d Req Id: %lld added to Bridge\n",
+			__func__, __LINE__, add_req.req_id);
+	}
+
+	return rc;
+}
+
+static int32_t cam_actuator_vreg_control(struct cam_actuator_ctrl_t *a_ctrl,
+	int config)
+{
+	int rc = 0, i, cnt;
+	struct cam_actuator_vreg *vreg_cfg;
+
+	vreg_cfg = &a_ctrl->vreg_cfg;
+	cnt = vreg_cfg->num_vreg;
+	if (!cnt)
+		return 0;
+
+	if (cnt >= MSM_ACTUATOR_MAX_VREGS) {
+		pr_err("%s:%d Regulators more than supported %d\n",
+			__func__, __LINE__, cnt);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < cnt; i++) {
+		if (a_ctrl->io_master_info.master_type ==
+			CCI_MASTER) {
+			rc = msm_camera_config_single_vreg(
+				&(a_ctrl->v4l2_dev_str.pdev->dev),
+				&vreg_cfg->cam_vreg[i],
+				(struct regulator **)&vreg_cfg->data[i],
+				config);
+		} else if (a_ctrl->io_master_info.master_type ==
+			I2C_MASTER) {
+			rc = msm_camera_config_single_vreg(
+				&(a_ctrl->io_master_info.client->dev),
+				&vreg_cfg->cam_vreg[i],
+				(struct regulator **)&vreg_cfg->data[i],
+				config);
+		}
+	}
+
+	return rc;
+}
+
+static int32_t cam_actuator_power_up(struct cam_actuator_ctrl_t *a_ctrl)
+{
+	int rc = 0;
+
+	rc = cam_actuator_vreg_control(a_ctrl, 1);
+	if (rc < 0) {
+		pr_err("%s:%d Actuator Reg Failed %d\n",
+			__func__, __LINE__, rc);
+		return rc;
+	}
+
+	if (a_ctrl->gconf &&
+		a_ctrl->gconf->gpio_num_info &&
+		a_ctrl->gconf->gpio_num_info->valid[SENSOR_VAF] == 1) {
+		rc = msm_camera_request_gpio_table(
+			a_ctrl->gconf->cam_gpio_req_tbl,
+			a_ctrl->gconf->cam_gpio_req_tbl_size, 1);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: Failed in req gpio: %d\n",
+				__func__, __LINE__, rc);
+			return rc;
+		}
+		if (a_ctrl->cam_pinctrl_status) {
+			rc = pinctrl_select_state(
+				a_ctrl->pinctrl_info.pinctrl,
+				a_ctrl->pinctrl_info.gpio_state_active);
+			if (rc < 0)
+				pr_err("%s:%d :Error: cannot set pin to active state: %d",
+					__func__, __LINE__, rc);
+		}
+
+		gpio_set_value_cansleep(
+			a_ctrl->gconf->gpio_num_info->gpio_num[SENSOR_VAF],
+			1);
+	}
+
+	/* VREG needs some delay to power up */
+	usleep_range(2000, 2050);
+
+	return rc;
+}
+
+static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl)
+{
+	int32_t rc = 0;
+
+	rc = cam_actuator_vreg_control(a_ctrl, 0);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return rc;
+	}
+
+	if (a_ctrl->gconf &&
+		a_ctrl->gconf->gpio_num_info &&
+		a_ctrl->gconf->gpio_num_info->
+			valid[SENSOR_VAF] == 1) {
+
+		gpio_set_value_cansleep(
+			a_ctrl->gconf->gpio_num_info->
+				gpio_num[SENSOR_VAF],
+			GPIOF_OUT_INIT_LOW);
+
+		if (a_ctrl->cam_pinctrl_status) {
+			rc = pinctrl_select_state(
+				a_ctrl->pinctrl_info.pinctrl,
+				a_ctrl->pinctrl_info.
+					gpio_state_suspend);
+			if (rc < 0)
+				pr_err("%s:%d cannot set pin to suspend state: %d",
+					__func__, __LINE__, rc);
+
+			devm_pinctrl_put(
+				a_ctrl->pinctrl_info.pinctrl);
+		}
+		a_ctrl->cam_pinctrl_status = 0;
+		rc = msm_camera_request_gpio_table(
+			a_ctrl->gconf->cam_gpio_req_tbl,
+			a_ctrl->gconf->cam_gpio_req_tbl_size,
+			0);
+		if (rc < 0)
+			pr_err("%s:%d Failed in selecting state: %d\n",
+				__func__, __LINE__, rc);
+	}
+
+	return rc;
+}
+
+int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl,
+	void *arg)
+{
+	int rc = 0;
+	struct cam_control *cmd = (struct cam_control *)arg;
+
+	if (!a_ctrl || !cmd) {
+		pr_err("%s: %d :Error: Invalid Args\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s:%d Opcode to Actuator: %d\n",
+		__func__, __LINE__, cmd->op_code);
+
+	mutex_lock(&(a_ctrl->actuator_mutex));
+	switch (cmd->op_code) {
+	case CAM_ACQUIRE_DEV: {
+		struct cam_sensor_acquire_dev actuator_acq_dev;
+		struct cam_create_dev_hdl bridge_params;
+
+		if (a_ctrl->bridge_intf.device_hdl != -1) {
+			pr_err("%s:%d Device is already acquired\n",
+				__func__, __LINE__);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		rc = copy_from_user(&actuator_acq_dev,
+			(void __user *) cmd->handle,
+			sizeof(actuator_acq_dev));
+		if (rc < 0) {
+			pr_err("%s:%d :Error: Failed Copying from user\n",
+				__func__, __LINE__);
+			goto release_mutex;
+		}
+
+		bridge_params.session_hdl = actuator_acq_dev.session_handle;
+		bridge_params.ops = &a_ctrl->bridge_intf.ops;
+		bridge_params.v4l2_sub_dev_flag = 0;
+		bridge_params.media_entity_flag = 0;
+		bridge_params.priv = a_ctrl;
+
+		actuator_acq_dev.device_handle =
+			cam_create_device_hdl(&bridge_params);
+		a_ctrl->bridge_intf.device_hdl = actuator_acq_dev.device_handle;
+		a_ctrl->bridge_intf.session_hdl =
+			actuator_acq_dev.session_handle;
+
+		CDBG("%s:%d Device Handle: %d\n",
+			__func__, __LINE__, actuator_acq_dev.device_handle);
+		if (copy_to_user((void __user *) cmd->handle, &actuator_acq_dev,
+			sizeof(struct cam_sensor_acquire_dev))) {
+			pr_err("%s:%d :Error: Failed Copy to User\n",
+				__func__, __LINE__);
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+
+	}
+		break;
+	case CAM_RELEASE_DEV: {
+		if (a_ctrl->bridge_intf.device_hdl == -1) {
+			pr_err("%s:%d :Error: link hdl: %d device hdl: %d\n",
+				__func__, __LINE__,
+				a_ctrl->bridge_intf.device_hdl,
+				a_ctrl->bridge_intf.link_hdl);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		rc = cam_destroy_device_hdl(a_ctrl->bridge_intf.device_hdl);
+		if (rc < 0)
+			pr_err("%s:%d :Error: destroying the device hdl\n",
+				__func__, __LINE__);
+		a_ctrl->bridge_intf.device_hdl = -1;
+		a_ctrl->bridge_intf.link_hdl = -1;
+		a_ctrl->bridge_intf.session_hdl = -1;
+	}
+		break;
+	case CAM_QUERY_CAP: {
+		struct cam_actuator_query_cap actuator_cap;
+
+		actuator_cap.slot_info = a_ctrl->id;
+		if (copy_to_user((void __user *) cmd->handle, &actuator_cap,
+			sizeof(struct cam_actuator_query_cap))) {
+			pr_err("%s:%d :Error: Failed Copy to User\n",
+				__func__, __LINE__);
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+	}
+		break;
+	case CAM_START_DEV: {
+		rc = cam_actuator_power_up(a_ctrl);
+		if (rc < 0) {
+			pr_err("%s: %d :Error: Actuator Power up failed\n",
+				__func__, __LINE__);
+			goto release_mutex;
+		}
+		rc = camera_io_init(&a_ctrl->io_master_info);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: cci_init failed\n",
+				__func__, __LINE__);
+			cam_actuator_power_down(a_ctrl);
+		}
+
+		rc = cam_actuator_apply_settings(a_ctrl,
+			&a_ctrl->i2c_data.init_settings);
+		if (rc < 0)
+			pr_err("%s: %d :Error: Cannot apply Init settings\n",
+				__func__, __LINE__);
+
+		/* Delete the request even if the apply is failed */
+		rc = delete_request(&a_ctrl->i2c_data.init_settings);
+		if (rc < 0) {
+			pr_err("%s:%d Fail in deleting the Init settings\n",
+				__func__, __LINE__);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+	}
+		break;
+	case CAM_STOP_DEV: {
+		rc = camera_io_release(&a_ctrl->io_master_info);
+		if (rc < 0)
+			pr_err("%s:%d :Error: Failed in releasing CCI\n",
+				__func__, __LINE__);
+		rc = cam_actuator_power_down(a_ctrl);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: Actuator Power down failed\n",
+				__func__, __LINE__);
+			goto release_mutex;
+		}
+	}
+		break;
+	case CAM_CONFIG_DEV: {
+		a_ctrl->act_apply_state =
+			ACT_APPLY_SETTINGS_LATER;
+		rc = cam_actuator_i2c_pkt_parse(a_ctrl, arg);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: Failed in actuator Parsing\n",
+				__func__, __LINE__);
+		}
+
+		if (a_ctrl->act_apply_state ==
+			ACT_APPLY_SETTINGS_NOW) {
+			rc = cam_actuator_apply_settings(a_ctrl,
+				&a_ctrl->i2c_data.init_settings);
+			if (rc < 0)
+				pr_err("%s:%d :Error: Cannot apply Update settings\n",
+					__func__, __LINE__);
+
+			/* Delete the request even if the apply is failed */
+			rc = delete_request(&a_ctrl->i2c_data.init_settings);
+			if (rc < 0) {
+				pr_err("%s: %d :Error: Failed in Deleting the Init Pkt: %d\n",
+					__func__, __LINE__, rc);
+				goto release_mutex;
+			}
+		}
+	}
+		break;
+	default:
+		pr_err("%s:%d Invalid Opcode %d\n",
+			__func__, __LINE__, cmd->op_code);
+	}
+
+release_mutex:
+	mutex_unlock(&(a_ctrl->actuator_mutex));
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h
new file mode 100644
index 0000000..d2cb96d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h
@@ -0,0 +1,48 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ACTUATOR_CORE_H_
+#define _CAM_ACTUATOR_CORE_H_
+
+#include "cam_actuator_dev.h"
+
+/**
+ * @apply: Req mgr structure for applying request
+ *
+ * This API applies the request that is mentioned
+ */
+int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply);
+
+/**
+ * @info: Sub device info to req mgr
+ *
+ * This API publish the subdevice info to req mgr
+ */
+int32_t cam_actuator_publish_dev_info(struct cam_req_mgr_device_info *info);
+
+/**
+ * @link: Link setup info
+ *
+ * This API establishes link actuator subdevice with req mgr
+ */
+int32_t cam_actuator_establish_link(
+	struct cam_req_mgr_core_dev_link_setup *link);
+
+/**
+ * @a_ctrl: Actuator ctrl structure
+ * @arg:    Camera control command argument
+ *
+ * This API handles the camera control argument reached to actuator
+ */
+int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl, void *arg);
+
+#endif /* _CAM_ACTUATOR_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
new file mode 100644
index 0000000..3835680
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -0,0 +1,334 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_actuator_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_actuator_soc.h"
+#include "cam_actuator_core.h"
+
+static long cam_actuator_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int rc = 0;
+	struct cam_actuator_ctrl_t *a_ctrl =
+		v4l2_get_subdevdata(sd);
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_actuator_driver_cmd(a_ctrl, arg);
+		break;
+	default:
+		pr_err("%s:%d Invalid ioctl cmd\n",
+			__func__, __LINE__);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int32_t cam_actuator_driver_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int32_t rc = 0, i = 0;
+	struct cam_actuator_ctrl_t *a_ctrl;
+
+	if (client == NULL || id == NULL) {
+		pr_err("%s:%d: :Error: Invalid Args client: %pK id: %pK\n",
+			__func__, __LINE__, client, id);
+		return -EINVAL;
+	}
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		pr_err("%s %s :Error: i2c_check_functionality failed\n",
+			__func__, client->name);
+		rc = -EFAULT;
+		return rc;
+	}
+
+	/* Create sensor control structure */
+	a_ctrl = kzalloc(sizeof(*a_ctrl), GFP_KERNEL);
+	if (!a_ctrl)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, a_ctrl);
+
+	a_ctrl->i2c_data.per_frame =
+		(struct i2c_settings_array *)
+		kzalloc(sizeof(struct i2c_settings_array) *
+		MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+	if (a_ctrl->i2c_data.per_frame == NULL) {
+		rc = -ENOMEM;
+		goto free_ctrl;
+	}
+
+	INIT_LIST_HEAD(&(a_ctrl->i2c_data.init_settings.list_head));
+
+	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+		INIT_LIST_HEAD(&(a_ctrl->i2c_data.per_frame[i].list_head));
+
+	/* Initialize sensor device type */
+	a_ctrl->of_node = client->dev.of_node;
+	a_ctrl->io_master_info.master_type = I2C_MASTER;
+
+	rc = cam_actuator_parse_dt(a_ctrl, &client->dev);
+	if (rc < 0) {
+		pr_err("failed: cam_sensor_parse_dt rc %d", rc);
+		goto free_mem;
+	}
+
+	return rc;
+free_mem:
+	kfree(a_ctrl->i2c_data.per_frame);
+free_ctrl:
+	kfree(a_ctrl);
+	return rc;
+}
+
+static int32_t cam_actuator_platform_remove(struct platform_device *pdev)
+{
+	struct cam_actuator_ctrl_t  *a_ctrl;
+	int32_t rc = 0;
+
+	a_ctrl = platform_get_drvdata(pdev);
+	if (!a_ctrl) {
+		pr_err("%s: Actuator device is NULL\n", __func__);
+		return 0;
+	}
+
+	kfree(a_ctrl->io_master_info.cci_client);
+	a_ctrl->io_master_info.cci_client = NULL;
+	kfree(a_ctrl->i2c_data.per_frame);
+	a_ctrl->i2c_data.per_frame = NULL;
+	devm_kfree(&pdev->dev, a_ctrl);
+
+	return rc;
+}
+
+static int32_t cam_actuator_driver_i2c_remove(struct i2c_client *client)
+{
+	struct cam_actuator_ctrl_t  *a_ctrl = i2c_get_clientdata(client);
+	int32_t rc = 0;
+
+	/* Handle I2C Devices */
+	if (!a_ctrl) {
+		pr_err("%s: Actuator device is NULL\n", __func__);
+		return -EINVAL;
+	}
+	/*Free Allocated Mem */
+	kfree(a_ctrl->i2c_data.per_frame);
+	a_ctrl->i2c_data.per_frame = NULL;
+	kfree(a_ctrl);
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_actuator_init_subdev_do_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	struct cam_control cmd_data;
+	int32_t rc = 0;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		cmd = VIDIOC_CAM_CONTROL;
+		rc = cam_actuator_subdev_ioctl(sd, cmd, &cmd_data);
+		if (rc < 0) {
+			pr_err("%s:%d Failed in actuator suddev handling",
+				__func__, __LINE__);
+			return rc;
+		}
+		break;
+	default:
+		pr_err("%s:%d Invalid compat ioctl: %d\n",
+			__func__, __LINE__, cmd);
+		rc = -EINVAL;
+	}
+
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+	return rc;
+}
+
+#endif
+
+static struct v4l2_subdev_core_ops cam_actuator_subdev_core_ops = {
+	.ioctl = cam_actuator_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_actuator_init_subdev_do_ioctl,
+#endif
+};
+
+static struct v4l2_subdev_ops cam_actuator_subdev_ops = {
+	.core = &cam_actuator_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cam_actuator_internal_ops;
+
+static const struct of_device_id cam_actuator_driver_dt_match[] = {
+	{.compatible = "qcom,actuator"},
+	{}
+};
+
+static int32_t cam_actuator_driver_platform_probe(
+	struct platform_device *pdev)
+{
+	int32_t rc = 0, i = 0;
+	struct cam_actuator_ctrl_t *a_ctrl = NULL;
+
+	/* Create sensor control structure */
+	a_ctrl = devm_kzalloc(&pdev->dev,
+		sizeof(struct cam_actuator_ctrl_t), GFP_KERNEL);
+	if (!a_ctrl)
+		return -ENOMEM;
+
+	/* Initialize actuator device type */
+	a_ctrl->of_node = pdev->dev.of_node;
+
+	/*fill in platform device*/
+	a_ctrl->v4l2_dev_str.pdev = pdev;
+
+	a_ctrl->io_master_info.master_type = CCI_MASTER;
+
+	a_ctrl->io_master_info.cci_client = kzalloc(sizeof(
+		struct cam_sensor_cci_client), GFP_KERNEL);
+	if (!(a_ctrl->io_master_info.cci_client))
+		return -ENOMEM;
+
+	a_ctrl->i2c_data.per_frame =
+		(struct i2c_settings_array *)
+		kzalloc(sizeof(struct i2c_settings_array) *
+		MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+	if (a_ctrl->i2c_data.per_frame == NULL)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&(a_ctrl->i2c_data.init_settings.list_head));
+
+	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+		INIT_LIST_HEAD(&(a_ctrl->i2c_data.per_frame[i].list_head));
+
+	rc = cam_actuator_parse_dt(a_ctrl, &(pdev->dev));
+	if (rc < 0) {
+		pr_err("%s:%d :Error: Paring actuator dt failed rc %d",
+			__func__, __LINE__, rc);
+		goto free_ctrl;
+	}
+
+	/* Fill platform device id*/
+	pdev->id = a_ctrl->id;
+
+	a_ctrl->v4l2_dev_str.internal_ops =
+		&cam_actuator_internal_ops;
+	a_ctrl->v4l2_dev_str.ops =
+		&cam_actuator_subdev_ops;
+	strlcpy(a_ctrl->device_name, CAMX_ACTUATOR_DEV_NAME,
+		sizeof(a_ctrl->device_name));
+	a_ctrl->v4l2_dev_str.name =
+		a_ctrl->device_name;
+	a_ctrl->v4l2_dev_str.sd_flags =
+		(V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+	a_ctrl->v4l2_dev_str.ent_function =
+		CAM_ACTUATOR_DEVICE_TYPE;
+	a_ctrl->v4l2_dev_str.token = a_ctrl;
+
+	rc = cam_register_subdev(&(a_ctrl->v4l2_dev_str));
+	if (rc < 0) {
+		pr_err("%s:%d :ERROR: Fail with cam_register_subdev\n",
+			__func__, __LINE__);
+		goto free_mem;
+	}
+
+	a_ctrl->bridge_intf.device_hdl = -1;
+	a_ctrl->bridge_intf.ops.get_dev_info =
+		cam_actuator_publish_dev_info;
+	a_ctrl->bridge_intf.ops.link_setup =
+		cam_actuator_establish_link;
+	a_ctrl->bridge_intf.ops.apply_req =
+		cam_actuator_apply_request;
+
+	platform_set_drvdata(pdev, a_ctrl);
+	v4l2_set_subdevdata(&a_ctrl->v4l2_dev_str.sd, a_ctrl);
+
+	return rc;
+free_mem:
+	kfree(a_ctrl->i2c_data.per_frame);
+free_ctrl:
+	devm_kfree(&pdev->dev, a_ctrl);
+	return rc;
+}
+
+MODULE_DEVICE_TABLE(of, cam_actuator_driver_dt_match);
+
+static struct platform_driver cam_actuator_platform_driver = {
+	.probe = cam_actuator_driver_platform_probe,
+	.driver = {
+		.name = "qcom,actuator",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_actuator_driver_dt_match,
+	},
+	.remove = cam_actuator_platform_remove,
+};
+
+static const struct i2c_device_id i2c_id[] = {
+	{ACTUATOR_DRIVER_I2C, (kernel_ulong_t)NULL},
+	{ }
+};
+
+static struct i2c_driver cam_actuator_driver_i2c = {
+	.id_table = i2c_id,
+	.probe  = cam_actuator_driver_i2c_probe,
+	.remove = cam_actuator_driver_i2c_remove,
+	.driver = {
+		.name = ACTUATOR_DRIVER_I2C,
+	},
+};
+
+static int __init cam_actuator_driver_init(void)
+{
+	int32_t rc = 0;
+
+	rc = platform_driver_register(&cam_actuator_platform_driver);
+	if (rc < 0) {
+		pr_err("%s platform_driver_register failed rc = %d",
+			__func__, rc);
+		return rc;
+	}
+	rc = i2c_add_driver(&cam_actuator_driver_i2c);
+	if (rc)
+		pr_err("%s:%d :Error: i2c_add_driver failed rc = %d",
+			__func__, __LINE__, rc);
+
+	return rc;
+}
+
+static void __exit cam_actuator_driver_exit(void)
+{
+	platform_driver_unregister(&cam_actuator_platform_driver);
+	i2c_del_driver(&cam_actuator_driver_i2c);
+}
+
+module_init(cam_actuator_driver_init);
+module_exit(cam_actuator_driver_exit);
+MODULE_DESCRIPTION("cam_actuator_driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
new file mode 100644
index 0000000..22ef29e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -0,0 +1,125 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ACTUATOR_DEV_H_
+#define _CAM_ACTUATOR_DEV_H_
+
+#include <cam_sensor_io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <cam_cci_dev.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_subdev.h>
+#include "cam_sensor_util.h"
+#include "cam_sensor_soc_api.h"
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE  1
+#define FALSE 0
+
+#undef CDBG
+#ifdef CAM_SENSOR_DEBUG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define ACTUATOR_DRIVER_I2C "i2c_actuator"
+#define CAMX_ACTUATOR_DEV_NAME "cam-actuator-driver"
+
+#define MSM_ACTUATOR_MAX_VREGS (10)
+#define ACTUATOR_MAX_POLL_COUNT 10
+
+
+enum msm_actuator_state_t {
+	ACT_APPLY_SETTINGS_NOW,
+	ACT_APPLY_SETTINGS_LATER,
+};
+
+/**
+ * struct cam_actuator_vreg
+ * @cam_vreg: Regulator structure
+ * @data: Regulator data
+ * @num_vreg: Number of regulators
+ */
+struct cam_actuator_vreg {
+	struct camera_vreg_t *cam_vreg;
+	void *data[MSM_ACTUATOR_MAX_VREGS];
+	int num_vreg;
+};
+
+/**
+ * struct intf_params
+ * @device_hdl: Device Handle
+ * @session_hdl: Session Handle
+ * @ops: KMD operations
+ * @crm_cb: Callback API pointers
+ */
+struct intf_params {
+	int32_t device_hdl;
+	int32_t session_hdl;
+	int32_t link_hdl;
+	struct cam_req_mgr_kmd_ops ops;
+	struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_actuator_ctrl_t
+ * @i2c_driver: I2C device info
+ * @pdev: Platform device
+ * @cci_i2c_master: I2C structure
+ * @io_master_info: Information about the communication master
+ * @actuator_mutex: Actuator mutex
+ * @id: Cell Index
+ * @act_apply_state: Actuator settings aRegulator config
+ * @gconf: GPIO config
+ * @pinctrl_info: Pinctrl information
+ * @v4l2_dev_str: V4L2 device structure
+ * @i2c_data: I2C register settings structure
+ * @act_info: Sensor query cap structure
+ * @of_node: Node ptr
+ * @device_name: Device name
+ */
+struct cam_actuator_ctrl_t {
+	struct i2c_driver *i2c_driver;
+	enum cci_i2c_master_t cci_i2c_master;
+	struct camera_io_master io_master_info;
+	struct mutex actuator_mutex;
+	uint32_t id;
+	enum msm_actuator_state_t act_apply_state;
+	struct cam_actuator_vreg vreg_cfg;
+	struct msm_camera_gpio_conf *gconf;
+	struct msm_pinctrl_info pinctrl_info;
+	uint8_t cam_pinctrl_status;
+	struct cam_subdev v4l2_dev_str;
+	struct i2c_data_settings i2c_data;
+	struct cam_actuator_query_cap act_info;
+	struct intf_params bridge_intf;
+	struct device_node *of_node;
+	char device_name[20];
+};
+
+#endif /* _CAM_ACTUATOR_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
new file mode 100644
index 0000000..767f3b0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
@@ -0,0 +1,77 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_actuator_soc.h"
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_util.h>
+#include <cam_sensor_io.h>
+#include <cam_req_mgr_util.h>
+
+int32_t cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl,
+	struct device *dev)
+{
+	int32_t                   rc = 0;
+	struct cam_actuator_vreg *vreg_cfg;
+
+	/* Initialize mutex */
+	mutex_init(&(a_ctrl->actuator_mutex));
+
+	rc = of_property_read_u32(a_ctrl->of_node, "cell-index",
+		&(a_ctrl->id));
+	CDBG("cell-index %d, rc %d\n", a_ctrl->id, rc);
+	if (rc < 0) {
+		pr_err("%s:%d :Error: parsing dt for cellindex rc %d\n",
+			__func__, __LINE__, rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(a_ctrl->of_node, "qcom,cci-master",
+		&(a_ctrl->cci_i2c_master));
+	CDBG("qcom,cci-master %d, rc %d\n", a_ctrl->cci_i2c_master, rc);
+	if (rc < 0 || a_ctrl->cci_i2c_master >= MASTER_MAX) {
+		pr_err("%s:%d :Error: Wrong info from dt CCI master as : %d\n",
+			__func__, __LINE__, a_ctrl->cci_i2c_master);
+		return rc;
+	}
+
+	if (of_find_property(a_ctrl->of_node,
+			"qcom,cam-vreg-name", NULL)) {
+		vreg_cfg = &(a_ctrl->vreg_cfg);
+		rc = cam_sensor_get_dt_vreg_data(dev->of_node,
+			&vreg_cfg->cam_vreg, &vreg_cfg->num_vreg);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: parsing regulator dt: %d\n",
+				__func__, __LINE__, rc);
+			return rc;
+		}
+	}
+	rc = msm_sensor_driver_get_gpio_data(&(a_ctrl->gconf),
+		a_ctrl->of_node);
+	if (rc < 0) {
+		pr_err("%s:%d No/Error Actuator GPIOs\n",
+			__func__, __LINE__);
+	} else {
+		a_ctrl->cam_pinctrl_status = 1;
+		rc = msm_camera_pinctrl_init(
+			&(a_ctrl->pinctrl_info), dev);
+		if (rc < 0) {
+			pr_err("ERR:%s: Error in reading actuator pinctrl\n",
+				__func__);
+			a_ctrl->cam_pinctrl_status = 0;
+			rc = 0;
+		}
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.h
new file mode 100644
index 0000000..05d51f4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ACTUATOR_SOC_H_
+#define _CAM_ACTUATOR_SOC_H_
+
+#include "cam_actuator_dev.h"
+
+/**
+ * @a_ctrl: Actuator ctrl structure
+ *
+ * This API parses actuator device tree
+ */
+int cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl,
+	struct device *dev);
+
+#endif /* _CAM_ACTUATOR_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile
new file mode 100644
index 0000000..57dfed5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cci_dev.o cam_cci_core.o cam_cci_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
new file mode 100644
index 0000000..746b786
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -0,0 +1,1303 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "cam_cci_core.h"
+#include "cam_cci_dev.h"
+
+static int32_t cam_cci_convert_type_to_num_bytes(
+	enum camera_sensor_i2c_type type)
+{
+	int32_t num_bytes;
+
+	switch (type) {
+	case CAMERA_SENSOR_I2C_TYPE_BYTE:
+		num_bytes = 1;
+		break;
+	case CAMERA_SENSOR_I2C_TYPE_WORD:
+		num_bytes = 2;
+		break;
+	case CAMERA_SENSOR_I2C_TYPE_3B:
+		num_bytes = 3;
+		break;
+	case CAMERA_SENSOR_I2C_TYPE_DWORD:
+		num_bytes = 4;
+		break;
+	default:
+		pr_err("%s: %d failed: %d\n", __func__, __LINE__, type);
+		num_bytes = 0;
+		break;
+	}
+	return num_bytes;
+}
+
+static void cam_cci_flush_queue(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master)
+{
+	int32_t rc = 0;
+
+	cam_io_w_mb(1 << master, cci_dev->base + CCI_HALT_REQ_ADDR);
+	rc = wait_for_completion_timeout(
+		&cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
+	if (rc < 0) {
+		pr_err("%s:%d wait failed\n", __func__, __LINE__);
+	} else if (rc == 0) {
+		pr_err("%s:%d wait timeout\n", __func__, __LINE__);
+
+		/* Set reset pending flag to TRUE */
+		cci_dev->cci_master_info[master].reset_pending = TRUE;
+
+		/* Set proper mask to RESET CMD address based on MASTER */
+		if (master == MASTER_0)
+			cam_io_w_mb(CCI_M0_RESET_RMSK,
+				cci_dev->base + CCI_RESET_CMD_ADDR);
+		else
+			cam_io_w_mb(CCI_M1_RESET_RMSK,
+				cci_dev->base + CCI_RESET_CMD_ADDR);
+
+		/* wait for reset done irq */
+		rc = wait_for_completion_timeout(
+			&cci_dev->cci_master_info[master].reset_complete,
+			CCI_TIMEOUT);
+		if (rc <= 0)
+			pr_err("%s:%d wait failed %d\n", __func__, __LINE__,
+				rc);
+	}
+}
+
+static int32_t cam_cci_validate_queue(struct cci_device *cci_dev,
+	uint32_t len,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+	uint32_t read_val = 0;
+	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+
+	read_val = cam_io_r_mb(cci_dev->base +
+		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+	CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d\n",
+		__func__, __LINE__, read_val, len,
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
+	if ((read_val + len + 1) > cci_dev->
+		cci_i2c_queue_info[master][queue].max_queue_size) {
+		uint32_t reg_val = 0;
+		uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
+
+		CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
+		cam_io_w_mb(report_val,
+			cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+			reg_offset);
+		read_val++;
+		CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d\n",
+			__func__, __LINE__, read_val, queue);
+		cam_io_w_mb(read_val, cci_dev->base +
+			CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+		reg_val = 1 << ((master * 2) + queue);
+		CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
+		atomic_set(&cci_dev->cci_master_info[master].
+						done_pending[queue], 1);
+		cam_io_w_mb(reg_val, cci_dev->base +
+			CCI_QUEUE_START_ADDR);
+		CDBG("%s line %d wait_for_completion_timeout\n",
+			__func__, __LINE__);
+		atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+		rc = wait_for_completion_timeout(&cci_dev->
+			cci_master_info[master].report_q[queue], CCI_TIMEOUT);
+		if (rc <= 0) {
+			pr_err("%s: wait_for_completion_timeout %d\n",
+				 __func__, __LINE__);
+			if (rc == 0)
+				rc = -ETIMEDOUT;
+			cam_cci_flush_queue(cci_dev, master);
+			return rc;
+		}
+		rc = cci_dev->cci_master_info[master].status;
+		if (rc < 0)
+			pr_err("%s failed rc %d\n", __func__, rc);
+	}
+
+	return rc;
+}
+
+static int32_t cam_cci_write_i2c_queue(struct cci_device *cci_dev,
+	uint32_t val,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+
+	if (!cci_dev) {
+		pr_err("%s: failed %d", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	rc = cam_cci_validate_queue(cci_dev, 1, master, queue);
+	if (rc < 0) {
+		pr_err("%s: failed %d", __func__, __LINE__);
+		return rc;
+	}
+	CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
+		__func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset, val);
+	cam_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset);
+	return rc;
+}
+
+static int32_t cam_cci_lock_queue(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue, uint32_t en)
+{
+	uint32_t val;
+
+	if (queue != PRIORITY_QUEUE)
+		return 0;
+
+	val = en ? CCI_I2C_LOCK_CMD : CCI_I2C_UNLOCK_CMD;
+	return cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+}
+
+#ifdef DUMP_CCI_REGISTERS
+static void cam_cci_dump_registers(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master, enum cci_i2c_queue_t queue)
+{
+	uint32_t read_val = 0;
+	uint32_t i = 0;
+	uint32_t reg_offset = 0;
+
+	/* CCI Top Registers */
+	CCI_DBG(" **** %s : %d CCI TOP Registers ****\n", __func__, __LINE__);
+	for (i = 0; i < DEBUG_TOP_REG_COUNT; i++) {
+		reg_offset = DEBUG_TOP_REG_START + i * 4;
+		read_val = cam_io_r_mb(cci_dev->base + reg_offset);
+		CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
+			__func__, __LINE__, reg_offset, read_val);
+	}
+
+	/* CCI Master registers */
+	CCI_DBG(" **** %s : %d CCI MASTER%d Registers ****\n",
+		__func__, __LINE__, master);
+	for (i = 0; i < DEBUG_MASTER_REG_COUNT; i++) {
+		if (i == 6)
+			continue;
+		reg_offset = DEBUG_MASTER_REG_START + master*0x100 + i * 4;
+		read_val = cam_io_r_mb(cci_dev->base + reg_offset);
+		CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
+			__func__, __LINE__, reg_offset, read_val);
+	}
+
+	/* CCI Master Queue registers */
+	CCI_DBG(" **** %s : %d CCI MASTER%d QUEUE%d Registers ****\n",
+		__func__, __LINE__, master, queue);
+	for (i = 0; i < DEBUG_MASTER_QUEUE_REG_COUNT; i++) {
+		reg_offset = DEBUG_MASTER_QUEUE_REG_START +  master*0x200 +
+			queue*0x100 + i * 4;
+		read_val = cam_io_r_mb(cci_dev->base + reg_offset);
+		CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
+			__func__, __LINE__, reg_offset, read_val);
+	}
+
+	/* CCI Interrupt registers */
+	CCI_DBG(" **** %s : %d CCI Interrupt Registers ****\n",
+		__func__, __LINE__);
+	for (i = 0; i < DEBUG_INTR_REG_COUNT; i++) {
+		reg_offset = DEBUG_INTR_REG_START + i * 4;
+		read_val = cam_io_r_mb(cci_dev->base + reg_offset);
+		CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
+			__func__, __LINE__, reg_offset, read_val);
+	}
+}
+#endif
+
+static uint32_t cam_cci_wait(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+
+	if (!cci_dev) {
+		pr_err("%s: failed %d", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	rc = wait_for_completion_timeout(&cci_dev->
+		cci_master_info[master].report_q[queue], CCI_TIMEOUT);
+	CDBG("%s line %d wait DONE_for_completion_timeout\n",
+		__func__, __LINE__);
+
+	if (rc <= 0) {
+#ifdef DUMP_CCI_REGISTERS
+		cam_cci_dump_registers(cci_dev, master, queue);
+#endif
+		pr_err("%s: %d wait for queue: %d\n",
+			 __func__, __LINE__, queue);
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+		cam_cci_flush_queue(cci_dev, master);
+		return rc;
+	}
+	rc = cci_dev->cci_master_info[master].status;
+	if (rc < 0) {
+		pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void cam_cci_load_report_cmd(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+	uint32_t read_val = cam_io_r_mb(cci_dev->base +
+		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+	uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
+
+	CDBG("%s:%d CCI_I2C_REPORT_CMD curr_w_cnt: %d\n",
+		__func__, __LINE__, read_val);
+	cam_io_w_mb(report_val,
+		cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset);
+	read_val++;
+
+	CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d\n",
+		__func__, __LINE__, read_val);
+	cam_io_w_mb(read_val, cci_dev->base +
+		CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+}
+
+static int32_t cam_cci_wait_report_cmd(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	uint32_t reg_val = 1 << ((master * 2) + queue);
+
+	cam_cci_load_report_cmd(cci_dev, master, queue);
+	atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+	atomic_set(&cci_dev->cci_master_info[master].done_pending[queue], 1);
+	cam_io_w_mb(reg_val, cci_dev->base +
+		CCI_QUEUE_START_ADDR);
+
+	return cam_cci_wait(cci_dev, master, queue);
+}
+
+static int32_t cam_cci_transfer_end(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+
+	if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
+		rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
+		if (rc < 0) {
+			pr_err("%s failed line %d\n", __func__, __LINE__);
+			return rc;
+		}
+		rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
+		if (rc < 0) {
+			pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+			return rc;
+		}
+	} else {
+		atomic_set(&cci_dev->cci_master_info[master].
+						done_pending[queue], 1);
+		rc = cam_cci_wait(cci_dev, master, queue);
+		if (rc < 0) {
+			pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+			return rc;
+		}
+		rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
+		if (rc < 0) {
+			pr_err("%s failed line %d\n", __func__, __LINE__);
+			return rc;
+		}
+		rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
+		if (rc < 0) {
+			pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int32_t cam_cci_get_queue_free_size(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	uint32_t read_val = 0;
+	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+
+	read_val = cam_io_r_mb(cci_dev->base +
+		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+	CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d\n",
+		__func__, __LINE__, read_val,
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
+	return (cci_dev->
+		cci_i2c_queue_info[master][queue].max_queue_size) -
+		read_val;
+}
+
+static void cam_cci_process_half_q(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	uint32_t reg_val = 1 << ((master * 2) + queue);
+
+	if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
+		cam_cci_load_report_cmd(cci_dev, master, queue);
+		atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+		cam_io_w_mb(reg_val, cci_dev->base +
+			CCI_QUEUE_START_ADDR);
+	}
+}
+
+static int32_t cam_cci_process_full_q(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+
+	if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 1) {
+		atomic_set(&cci_dev->cci_master_info[master].
+						done_pending[queue], 1);
+		rc = cam_cci_wait(cci_dev, master, queue);
+		if (rc < 0) {
+			pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+			return rc;
+		}
+	} else {
+		rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
+		if (rc < 0) {
+			pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int32_t cam_cci_calc_cmd_len(struct cci_device *cci_dev,
+	struct cam_cci_ctrl *c_ctrl, uint32_t cmd_size,
+	 struct cam_sensor_i2c_reg_array *i2c_cmd, uint32_t *pack)
+{
+	uint8_t i;
+	uint32_t len = 0;
+	uint8_t data_len = 0, addr_len = 0;
+	uint8_t pack_max_len;
+	struct cam_sensor_i2c_reg_setting *msg;
+	struct cam_sensor_i2c_reg_array *cmd = i2c_cmd;
+	uint32_t size = cmd_size;
+
+	if (!cci_dev || !c_ctrl) {
+		pr_err("%s: failed %d", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	msg = &c_ctrl->cfg.cci_i2c_write_cfg;
+	*pack = 0;
+
+	if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) {
+		addr_len = cam_cci_convert_type_to_num_bytes(msg->addr_type);
+		len = (size + addr_len) <= (cci_dev->payload_size) ?
+			(size + addr_len):cci_dev->payload_size;
+	} else {
+		addr_len = cam_cci_convert_type_to_num_bytes(msg->addr_type);
+		data_len = cam_cci_convert_type_to_num_bytes(msg->data_type);
+		len = data_len + addr_len;
+		pack_max_len = size < (cci_dev->payload_size-len) ?
+			size : (cci_dev->payload_size-len);
+		for (i = 0; i < pack_max_len;) {
+			if (cmd->delay || ((cmd - i2c_cmd) >= (cmd_size - 1)))
+				break;
+			if (cmd->reg_addr + 1 ==
+				(cmd+1)->reg_addr) {
+				len += data_len;
+				*pack += data_len;
+			} else {
+				break;
+			}
+			i += data_len;
+			cmd++;
+		}
+	}
+
+	if (len > cci_dev->payload_size) {
+		pr_err("%s: %d Len error: %d",
+			__func__, __LINE__, len);
+		return -EINVAL;
+	}
+
+	len += 1; /*add i2c WR command*/
+	len = len/4 + 1;
+
+	return len;
+}
+
+static uint32_t cam_cci_cycles_per_ms(unsigned long clk)
+{
+	uint32_t cycles_per_us;
+
+	if (clk) {
+		cycles_per_us = ((clk/1000)*256)/1000;
+	} else {
+		pr_err("%s:%d, failed: Can use default: %d",
+			__func__, __LINE__, CYCLES_PER_MICRO_SEC_DEFAULT);
+		cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
+	}
+
+	return cycles_per_us;
+}
+
+uint32_t *cam_cci_get_clk_rates(struct cci_device *cci_dev,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	uint32_t j;
+	int32_t idx;
+	uint32_t cci_clk_src;
+	unsigned long clk;
+	struct cam_cci_clk_params_t *clk_params = NULL;
+	struct device_node *of_node = cci_dev->v4l2_dev_str.pdev->dev.of_node;
+	enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+
+	if (i2c_freq_mode >= I2C_MAX_MODES ||
+		i2c_freq_mode < I2C_STANDARD_MODE) {
+		pr_err("%s:%d Invalid frequency mode: %d\n",
+			__func__, __LINE__, (int32_t)i2c_freq_mode);
+		return NULL;
+	}
+
+	clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
+	cci_clk_src = clk_params->cci_clk_src;
+
+	idx = of_property_match_string(of_node,
+		"clock-names", CCI_CLK_SRC_NAME);
+	if (idx < 0) {
+		cci_dev->cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
+		return cci_dev->cci_clk_rates[0];
+	}
+
+	if (cci_clk_src == 0) {
+		clk = cci_dev->cci_clk_rates[0][idx];
+		cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk);
+		return cci_dev->cci_clk_rates[0];
+	}
+
+	CDBG("%s:%d CCI: 3 cases:%d idx: %d\n", __func__,
+		__LINE__, (int32_t)cci_dev->num_clk_cases, idx);
+	for (j = 0; j < cci_dev->num_clk_cases; j++) {
+		clk = cci_dev->cci_clk_rates[j][idx];
+		if (clk == cci_clk_src) {
+			cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk);
+			cci_dev->cci_clk_src = cci_clk_src;
+			return cci_dev->cci_clk_rates[j];
+		}
+	}
+
+	return NULL;
+}
+
+static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	struct cam_cci_clk_params_t *clk_params = NULL;
+	enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
+	enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+
+	if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) {
+		pr_err("%s:%d invalid i2c_freq_mode = %d",
+			__func__, __LINE__, i2c_freq_mode);
+		return -EINVAL;
+	}
+
+	clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
+
+	if (cci_dev->i2c_freq_mode[master] == i2c_freq_mode)
+		return 0;
+	if (master == MASTER_0) {
+		cam_io_w_mb(clk_params->hw_thigh << 16 |
+			clk_params->hw_tlow,
+			cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR);
+		cam_io_w_mb(clk_params->hw_tsu_sto << 16 |
+			clk_params->hw_tsu_sta,
+			cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR);
+		cam_io_w_mb(clk_params->hw_thd_dat << 16 |
+			clk_params->hw_thd_sta,
+			cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR);
+		cam_io_w_mb(clk_params->hw_tbuf,
+			cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR);
+		cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
+			clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
+			cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR);
+	} else if (master == MASTER_1) {
+		cam_io_w_mb(clk_params->hw_thigh << 16 |
+			clk_params->hw_tlow,
+			cci_dev->base + CCI_I2C_M1_SCL_CTL_ADDR);
+		cam_io_w_mb(clk_params->hw_tsu_sto << 16 |
+			clk_params->hw_tsu_sta,
+			cci_dev->base + CCI_I2C_M1_SDA_CTL_0_ADDR);
+		cam_io_w_mb(clk_params->hw_thd_dat << 16 |
+			clk_params->hw_thd_sta,
+			cci_dev->base + CCI_I2C_M1_SDA_CTL_1_ADDR);
+		cam_io_w_mb(clk_params->hw_tbuf,
+			cci_dev->base + CCI_I2C_M1_SDA_CTL_2_ADDR);
+		cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
+			clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
+			cci_dev->base + CCI_I2C_M1_MISC_CTL_ADDR);
+	}
+	cci_dev->i2c_freq_mode[master] = i2c_freq_mode;
+
+	return 0;
+}
+
+static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
+	struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+	enum cci_i2c_sync sync_en)
+{
+	uint16_t i = 0, j = 0, k = 0, h = 0, len = 0;
+	int32_t rc = 0, free_size = 0, en_seq_write = 0;
+	uint8_t data[12];
+	struct cam_sensor_i2c_reg_setting *i2c_msg =
+		&c_ctrl->cfg.cci_i2c_write_cfg;
+	struct cam_sensor_i2c_reg_array *i2c_cmd = i2c_msg->reg_setting;
+	enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
+	uint16_t reg_addr = 0, cmd_size = i2c_msg->size;
+	uint32_t read_val = 0, reg_offset, val, delay = 0;
+	uint32_t max_queue_size, queue_size = 0, cmd = 0;
+
+	if (i2c_cmd == NULL) {
+		pr_err("%s:%d Failed line\n", __func__,
+			__LINE__);
+		return -EINVAL;
+	}
+
+	if ((!cmd_size) || (cmd_size > CCI_I2C_MAX_WRITE)) {
+		pr_err("%s:%d failed: invalid cmd_size %d\n",
+			__func__, __LINE__, cmd_size);
+		return -EINVAL;
+	}
+
+	CDBG("%s addr type %d data type %d cmd_size %d\n", __func__,
+		i2c_msg->addr_type, i2c_msg->data_type, cmd_size);
+
+	if (i2c_msg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		pr_err("%s:%d failed: invalid addr_type 0x%X\n",
+			__func__, __LINE__, i2c_msg->addr_type);
+		return -EINVAL;
+	}
+	if (i2c_msg->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		pr_err("%s:%d failed: invalid data_type 0x%X\n",
+			__func__, __LINE__, i2c_msg->data_type);
+		return -EINVAL;
+	}
+	reg_offset = master * 0x200 + queue * 0x100;
+
+	cam_io_w_mb(cci_dev->cci_wait_sync_cfg.cid,
+		cci_dev->base + CCI_SET_CID_SYNC_TIMER_ADDR +
+		cci_dev->cci_wait_sync_cfg.csid *
+		CCI_SET_CID_SYNC_TIMER_OFFSET);
+
+	val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+		c_ctrl->cci_info->retries << 16 |
+		c_ctrl->cci_info->id_map << 18;
+
+	CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
+		__func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset, val);
+	cam_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset);
+
+	atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 0);
+
+	max_queue_size = cci_dev->cci_i2c_queue_info[master][queue].
+			max_queue_size;
+
+	if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ)
+		queue_size = max_queue_size;
+	else
+		queue_size = max_queue_size/2;
+	reg_addr = i2c_cmd->reg_addr;
+
+	if (sync_en == MSM_SYNC_ENABLE && cci_dev->valid_sync &&
+		cmd_size < max_queue_size) {
+		val = CCI_I2C_WAIT_SYNC_CMD |
+			((cci_dev->cci_wait_sync_cfg.line) << 4);
+		cam_io_w_mb(val,
+			cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+			reg_offset);
+	}
+
+	rc = cam_cci_lock_queue(cci_dev, master, queue, 1);
+	if (rc < 0) {
+		pr_err("%s failed line %d\n", __func__, __LINE__);
+		return rc;
+	}
+
+	while (cmd_size) {
+		uint32_t pack = 0;
+
+		len = cam_cci_calc_cmd_len(cci_dev, c_ctrl, cmd_size,
+			i2c_cmd, &pack);
+		if (len <= 0) {
+			pr_err("%s failed line %d\n", __func__, __LINE__);
+			return -EINVAL;
+		}
+
+		read_val = cam_io_r_mb(cci_dev->base +
+			CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+		CDBG("%s line %d CUR_WORD_CNT_ADDR %d len %d max %d\n",
+			__func__, __LINE__, read_val, len, max_queue_size);
+		/* + 1 - space alocation for Report CMD */
+		if ((read_val + len + 1) > queue_size) {
+			if ((read_val + len + 1) > max_queue_size) {
+				rc = cam_cci_process_full_q(cci_dev,
+					master, queue);
+				if (rc < 0) {
+					pr_err("%s failed line %d\n",
+						__func__, __LINE__);
+					return rc;
+				}
+				continue;
+			}
+			cam_cci_process_half_q(cci_dev, master, queue);
+		}
+
+		CDBG("%s cmd_size %d addr 0x%x data 0x%x\n", __func__,
+			cmd_size, i2c_cmd->reg_addr, i2c_cmd->reg_data);
+		delay = i2c_cmd->delay;
+		i = 0;
+		data[i++] = CCI_I2C_WRITE_CMD;
+
+		/*
+		 * in case of multiple command
+		 * MSM_CCI_I2C_WRITE : address is not continuous, so update
+		 *	address for a new packet.
+		 * MSM_CCI_I2C_WRITE_SEQ : address is continuous, need to keep
+		 *	the incremented address for a
+		 *	new packet
+		 */
+		if (c_ctrl->cmd == MSM_CCI_I2C_WRITE ||
+			c_ctrl->cmd == MSM_CCI_I2C_WRITE_ASYNC ||
+			c_ctrl->cmd == MSM_CCI_I2C_WRITE_SYNC ||
+			c_ctrl->cmd == MSM_CCI_I2C_WRITE_SYNC_BLOCK)
+			reg_addr = i2c_cmd->reg_addr;
+
+		if (en_seq_write == 0) {
+			/* either byte or word addr */
+			if (i2c_msg->addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+				data[i++] = reg_addr;
+			else {
+				data[i++] = (reg_addr & 0xFF00) >> 8;
+				data[i++] = reg_addr & 0x00FF;
+			}
+		}
+		/* max of 10 data bytes */
+		do {
+			if (i2c_msg->data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+				data[i++] = i2c_cmd->reg_data;
+				reg_addr++;
+			} else {
+				if ((i + 1) <= cci_dev->payload_size) {
+					data[i++] = (i2c_cmd->reg_data &
+						0xFF00) >> 8; /* MSB */
+					data[i++] = i2c_cmd->reg_data &
+						0x00FF; /* LSB */
+					reg_addr++;
+				} else
+					break;
+			}
+			i2c_cmd++;
+			--cmd_size;
+		} while (((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) || pack--) &&
+				(cmd_size > 0) && (i <= cci_dev->payload_size));
+		free_size = cam_cci_get_queue_free_size(cci_dev, master,
+				queue);
+		if ((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) &&
+			((i-1) == MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11) &&
+			cci_dev->support_seq_write && cmd_size > 0 &&
+			free_size > BURST_MIN_FREE_SIZE) {
+			data[0] |= 0xF0;
+			en_seq_write = 1;
+		} else {
+			data[0] |= ((i-1) << 4);
+			en_seq_write = 0;
+		}
+		len = ((i-1)/4) + 1;
+
+		read_val = cam_io_r_mb(cci_dev->base +
+			CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+		for (h = 0, k = 0; h < len; h++) {
+			cmd = 0;
+			for (j = 0; (j < 4 && k < i); j++)
+				cmd |= (data[k++] << (j * 8));
+			CDBG("%s LOAD_DATA_ADDR 0x%x, q: %d, len:%d, cnt: %d\n",
+				__func__, cmd, queue, len, read_val);
+			cam_io_w_mb(cmd, cci_dev->base +
+				CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+				master * 0x200 + queue * 0x100);
+
+			read_val += 1;
+			cam_io_w_mb(read_val, cci_dev->base +
+				CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+		}
+
+		if ((delay > 0) && (delay < CCI_MAX_DELAY) &&
+			en_seq_write == 0) {
+			cmd = (uint32_t)((delay * cci_dev->cycles_per_us) /
+				0x100);
+			cmd <<= 4;
+			cmd |= CCI_I2C_WAIT_CMD;
+			CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x\n",
+				__func__, cmd);
+			cam_io_w_mb(cmd, cci_dev->base +
+				CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+				master * 0x200 + queue * 0x100);
+			read_val += 1;
+			cam_io_w_mb(read_val, cci_dev->base +
+				CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+		}
+	}
+
+	rc = cam_cci_transfer_end(cci_dev, master, queue);
+	if (rc < 0) {
+		pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int32_t cam_cci_read(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	int32_t rc = 0;
+	uint32_t val = 0;
+	int32_t read_words = 0, exp_words = 0;
+	int32_t index = 0, first_byte = 0;
+	uint32_t i = 0;
+	enum cci_i2c_master_t master;
+	enum cci_i2c_queue_t queue = QUEUE_1;
+	struct cci_device *cci_dev = NULL;
+	struct cam_cci_read_cfg *read_cfg = NULL;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+	master = c_ctrl->cci_info->cci_i2c_master;
+	read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+
+	if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+		|| c_ctrl->cci_info->cci_i2c_master < 0) {
+		pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+	mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+
+	/*
+	 * Todo: If there is a change in frequency of operation
+	 * Wait for previos transaction to complete
+	 */
+
+	/* Set the I2C Frequency */
+	rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
+	if (rc < 0) {
+		pr_err("%s:%d cam_cci_set_clk_param failed rc = %d\n",
+			__func__, __LINE__, rc);
+		goto rel_mutex;
+	}
+
+	/*
+	 * Call validate queue to make sure queue is empty before starting.
+	 * If this call fails, don't proceed with i2c_read call. This is to
+	 * avoid overflow / underflow of queue
+	 */
+	rc = cam_cci_validate_queue(cci_dev,
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
+		master, queue);
+	if (rc < 0) {
+		pr_err("%s:%d Initial validataion failed rc %d\n", __func__,
+			__LINE__, rc);
+		goto rel_mutex;
+	}
+
+	if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+		pr_err("%s:%d More than max retries\n", __func__,
+			__LINE__);
+		goto rel_mutex;
+	}
+
+	if (read_cfg->data == NULL) {
+		pr_err("%s:%d Data ptr is NULL\n", __func__,
+			__LINE__);
+		goto rel_mutex;
+	}
+
+	CDBG("%s master %d, queue %d\n", __func__, master, queue);
+	CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
+		c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+		c_ctrl->cci_info->id_map);
+	val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+		c_ctrl->cci_info->retries << 16 |
+		c_ctrl->cci_info->id_map << 18;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_LOCK_CMD;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto rel_mutex;
+	}
+
+	if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		pr_err("%s failed line %d\n", __func__, __LINE__);
+		rc = -EINVAL;
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4);
+	for (i = 0; i < read_cfg->addr_type; i++) {
+		val |= ((read_cfg->addr >> (i << 3)) & 0xFF)  <<
+		((read_cfg->addr_type - i) << 3);
+	}
+
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_UNLOCK_CMD;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto rel_mutex;
+	}
+
+	val = cam_io_r_mb(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+			+ master * 0x200 + queue * 0x100);
+	CDBG("%s cur word cnt 0x%x\n", __func__, val);
+	cam_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+			+ master * 0x200 + queue * 0x100);
+
+	val = 1 << ((master * 2) + queue);
+	cam_io_w_mb(val, cci_dev->base + CCI_QUEUE_START_ADDR);
+	CDBG("%s:%d E wait_for_completion_timeout\n", __func__,
+		__LINE__);
+
+	rc = wait_for_completion_timeout(&cci_dev->
+		cci_master_info[master].reset_complete, CCI_TIMEOUT);
+	if (rc <= 0) {
+#ifdef DUMP_CCI_REGISTERS
+		cam_cci_dump_registers(cci_dev, master, queue);
+#endif
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+		pr_err("%s: %d wait_for_completion_timeout rc = %d\n",
+			 __func__, __LINE__, rc);
+		cam_cci_flush_queue(cci_dev, master);
+		goto rel_mutex;
+	} else {
+		rc = 0;
+	}
+
+	read_words = cam_io_r_mb(cci_dev->base +
+		CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+	exp_words = ((read_cfg->num_byte / 4) + 1);
+	if (read_words != exp_words) {
+		pr_err("%s:%d read_words = %d, exp words = %d\n", __func__,
+			__LINE__, read_words, exp_words);
+		memset(read_cfg->data, 0, read_cfg->num_byte);
+		rc = -EINVAL;
+		goto rel_mutex;
+	}
+	index = 0;
+	CDBG("%s index %d num_type %d\n", __func__, index,
+		read_cfg->num_byte);
+	first_byte = 0;
+	do {
+		val = cam_io_r_mb(cci_dev->base +
+			CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
+		CDBG("%s read val 0x%x\n", __func__, val);
+		for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) {
+			CDBG("%s i %d index %d\n", __func__, i, index);
+			if (!first_byte) {
+				CDBG("%s sid 0x%x\n", __func__, val & 0xFF);
+				first_byte++;
+			} else {
+				read_cfg->data[index] =
+					(val  >> (i * 8)) & 0xFF;
+				CDBG("%s data[%d] 0x%x\n", __func__, index,
+					read_cfg->data[index]);
+				index++;
+			}
+		}
+	} while (--read_words > 0);
+rel_mutex:
+	mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+
+	return rc;
+}
+
+static int32_t cam_cci_i2c_write(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+	enum cci_i2c_sync sync_en)
+{
+	int32_t rc = 0;
+	struct cci_device *cci_dev;
+	enum cci_i2c_master_t master;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+
+	if (cci_dev->cci_state != CCI_STATE_ENABLED) {
+		pr_err("%s invalid cci state %d\n",
+			__func__, cci_dev->cci_state);
+		return -EINVAL;
+	}
+	master = c_ctrl->cci_info->cci_i2c_master;
+	CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
+		c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+		c_ctrl->cci_info->id_map);
+
+	/* Set the I2C Frequency */
+	rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
+	if (rc < 0) {
+		pr_err("%s:%d cam_cci_set_clk_param failed rc = %d\n",
+			__func__, __LINE__, rc);
+		return rc;
+	}
+	/*
+	 * Call validate queue to make sure queue is empty before starting.
+	 * If this call fails, don't proceed with i2c_write call. This is to
+	 * avoid overflow / underflow of queue
+	 */
+	rc = cam_cci_validate_queue(cci_dev,
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size-1,
+		master, queue);
+	if (rc < 0) {
+		pr_err("%s:%d Initial validataion failed rc %d\n",
+		__func__, __LINE__, rc);
+		return rc;
+	}
+	if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+		pr_err("%s:%d More than max retries\n", __func__,
+			__LINE__);
+		return rc;
+	}
+	rc = cam_cci_data_queue(cci_dev, c_ctrl, queue, sync_en);
+	if (rc < 0) {
+		pr_err("%s failed line %d\n", __func__, __LINE__);
+		return rc;
+	}
+
+	return rc;
+}
+
+static void cam_cci_write_async_helper(struct work_struct *work)
+{
+	int rc;
+	struct cci_device *cci_dev;
+	struct cci_write_async *write_async =
+		container_of(work, struct cci_write_async, work);
+	struct cam_sensor_i2c_reg_setting *i2c_msg;
+	enum cci_i2c_master_t master;
+	struct cam_cci_master_info *cci_master_info;
+
+	cci_dev = write_async->cci_dev;
+	i2c_msg = &write_async->c_ctrl.cfg.cci_i2c_write_cfg;
+	master = write_async->c_ctrl.cci_info->cci_i2c_master;
+	cci_master_info = &cci_dev->cci_master_info[master];
+
+	mutex_lock(&cci_master_info->mutex_q[write_async->queue]);
+	rc = cam_cci_i2c_write(&(cci_dev->v4l2_dev_str.sd),
+		&write_async->c_ctrl, write_async->queue, write_async->sync_en);
+	mutex_unlock(&cci_master_info->mutex_q[write_async->queue]);
+	if (rc < 0)
+		pr_err("%s: %d failed\n", __func__, __LINE__);
+
+	kfree(write_async->c_ctrl.cfg.cci_i2c_write_cfg.reg_setting);
+	kfree(write_async);
+}
+
+static int32_t cam_cci_i2c_write_async(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+	enum cci_i2c_sync sync_en)
+{
+	int32_t rc = 0;
+	struct cci_write_async *write_async;
+	struct cci_device *cci_dev;
+	struct cam_sensor_i2c_reg_setting *cci_i2c_write_cfg;
+	struct cam_sensor_i2c_reg_setting *cci_i2c_write_cfg_w;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+
+	write_async = kzalloc(sizeof(*write_async), GFP_KERNEL);
+	if (!write_async)
+		return -ENOMEM;
+
+
+	INIT_WORK(&write_async->work, cam_cci_write_async_helper);
+	write_async->cci_dev = cci_dev;
+	write_async->c_ctrl = *c_ctrl;
+	write_async->queue = queue;
+	write_async->sync_en = sync_en;
+
+	cci_i2c_write_cfg = &c_ctrl->cfg.cci_i2c_write_cfg;
+	cci_i2c_write_cfg_w = &write_async->c_ctrl.cfg.cci_i2c_write_cfg;
+
+	if (cci_i2c_write_cfg->size == 0) {
+		kfree(write_async);
+		return -EINVAL;
+	}
+
+	cci_i2c_write_cfg_w->reg_setting =
+		kzalloc(sizeof(struct cam_sensor_i2c_reg_array)*
+		cci_i2c_write_cfg->size, GFP_KERNEL);
+	if (!cci_i2c_write_cfg_w->reg_setting) {
+		pr_err("%s: %d Couldn't allocate memory\n", __func__, __LINE__);
+		kfree(write_async);
+		return -ENOMEM;
+	}
+	memcpy(cci_i2c_write_cfg_w->reg_setting,
+		cci_i2c_write_cfg->reg_setting,
+		(sizeof(struct cam_sensor_i2c_reg_array)*
+						cci_i2c_write_cfg->size));
+
+	cci_i2c_write_cfg_w->addr_type = cci_i2c_write_cfg->addr_type;
+	cci_i2c_write_cfg_w->addr_type = cci_i2c_write_cfg->addr_type;
+	cci_i2c_write_cfg_w->data_type = cci_i2c_write_cfg->data_type;
+	cci_i2c_write_cfg_w->size = cci_i2c_write_cfg->size;
+	cci_i2c_write_cfg_w->delay = cci_i2c_write_cfg->delay;
+
+	queue_work(cci_dev->write_wq[write_async->queue], &write_async->work);
+
+	return rc;
+}
+
+static int32_t cam_cci_read_bytes(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	int32_t rc = 0;
+	struct cci_device *cci_dev = NULL;
+	enum cci_i2c_master_t master;
+	struct cam_cci_read_cfg *read_cfg = NULL;
+	uint16_t read_bytes = 0;
+
+	if (!sd || !c_ctrl) {
+		pr_err("%s:%d sd %pK c_ctrl %pK\n", __func__,
+			__LINE__, sd, c_ctrl);
+		return -EINVAL;
+	}
+	if (!c_ctrl->cci_info) {
+		pr_err("%s:%d cci_info NULL\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+	cci_dev = v4l2_get_subdevdata(sd);
+	if (!cci_dev) {
+		pr_err("%s:%d cci_dev NULL\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+	if (cci_dev->cci_state != CCI_STATE_ENABLED) {
+		pr_err("%s invalid cci state %d\n",
+			__func__, cci_dev->cci_state);
+		return -EINVAL;
+	}
+
+	if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+			|| c_ctrl->cci_info->cci_i2c_master < 0) {
+		pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	master = c_ctrl->cci_info->cci_i2c_master;
+	read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+	if ((!read_cfg->num_byte) || (read_cfg->num_byte > CCI_I2C_MAX_READ)) {
+		pr_err("%s:%d read num bytes 0\n", __func__, __LINE__);
+		rc = -EINVAL;
+		goto ERROR;
+	}
+
+	read_bytes = read_cfg->num_byte;
+	do {
+		if (read_bytes > CCI_READ_MAX)
+			read_cfg->num_byte = CCI_READ_MAX;
+		else
+			read_cfg->num_byte = read_bytes;
+		rc = cam_cci_read(sd, c_ctrl);
+		if (rc < 0) {
+			pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc);
+			goto ERROR;
+		}
+		if (read_bytes > CCI_READ_MAX) {
+			read_cfg->addr += CCI_READ_MAX;
+			read_cfg->data += CCI_READ_MAX;
+			read_bytes -= CCI_READ_MAX;
+		} else {
+			read_bytes = 0;
+		}
+	} while (read_bytes);
+
+ERROR:
+	return rc;
+}
+
+static int32_t cam_cci_i2c_set_sync_prms(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	int32_t rc = 0;
+	struct cci_device *cci_dev;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+	if (!cci_dev || !c_ctrl) {
+		pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
+			__LINE__, cci_dev, c_ctrl);
+		rc = -EINVAL;
+		return rc;
+	}
+	cci_dev->cci_wait_sync_cfg = c_ctrl->cfg.cci_wait_sync_cfg;
+	cci_dev->valid_sync = cci_dev->cci_wait_sync_cfg.csid < 0 ? 0 : 1;
+
+	return rc;
+}
+
+static int32_t cam_cci_release(struct v4l2_subdev *sd)
+{
+	uint8_t rc = 0;
+	struct cci_device *cci_dev;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+
+	rc = cam_cci_soc_release(cci_dev);
+	if (rc < 0) {
+		pr_err("%s:%d Failed in releasing the cci: %d\n",
+			__func__, __LINE__, rc);
+		cam_cpas_stop(cci_dev->cpas_handle);
+		return rc;
+	}
+	cam_cpas_stop(cci_dev->cpas_handle);
+
+	return rc;
+}
+
+static int32_t cam_cci_write(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	int32_t rc = 0;
+	struct cci_device *cci_dev;
+	enum cci_i2c_master_t master;
+	struct cam_cci_master_info *cci_master_info;
+	uint32_t i;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+	if (!cci_dev || !c_ctrl) {
+		pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
+			__LINE__, cci_dev, c_ctrl);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	master = c_ctrl->cci_info->cci_i2c_master;
+
+	if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+		|| c_ctrl->cci_info->cci_i2c_master < 0) {
+		pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	cci_master_info = &cci_dev->cci_master_info[master];
+
+	switch (c_ctrl->cmd) {
+	case MSM_CCI_I2C_WRITE_SYNC_BLOCK:
+		mutex_lock(&cci_master_info->mutex_q[SYNC_QUEUE]);
+		rc = cam_cci_i2c_write(sd, c_ctrl,
+			SYNC_QUEUE, MSM_SYNC_ENABLE);
+		mutex_unlock(&cci_master_info->mutex_q[SYNC_QUEUE]);
+		break;
+	case MSM_CCI_I2C_WRITE_SYNC:
+		rc = cam_cci_i2c_write_async(sd, c_ctrl,
+			SYNC_QUEUE, MSM_SYNC_ENABLE);
+		break;
+	case MSM_CCI_I2C_WRITE:
+	case MSM_CCI_I2C_WRITE_SEQ:
+		for (i = 0; i < NUM_QUEUES; i++) {
+			if (mutex_trylock(&cci_master_info->mutex_q[i])) {
+				rc = cam_cci_i2c_write(sd, c_ctrl, i,
+					MSM_SYNC_DISABLE);
+				mutex_unlock(&cci_master_info->mutex_q[i]);
+				return rc;
+			}
+		}
+		mutex_lock(&cci_master_info->mutex_q[PRIORITY_QUEUE]);
+		rc = cam_cci_i2c_write(sd, c_ctrl,
+			PRIORITY_QUEUE, MSM_SYNC_DISABLE);
+		mutex_unlock(&cci_master_info->mutex_q[PRIORITY_QUEUE]);
+		break;
+	case MSM_CCI_I2C_WRITE_ASYNC:
+		rc = cam_cci_i2c_write_async(sd, c_ctrl,
+			PRIORITY_QUEUE, MSM_SYNC_DISABLE);
+		break;
+	default:
+		rc = -ENOIOCTLCMD;
+	}
+
+	return rc;
+}
+
+int32_t cam_cci_core_cfg(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *cci_ctrl)
+{
+	int32_t rc = 0;
+
+	CDBG("%s line %d cmd %d\n", __func__, __LINE__,
+		cci_ctrl->cmd);
+	switch (cci_ctrl->cmd) {
+	case MSM_CCI_INIT:
+		rc = cam_cci_init(sd, cci_ctrl);
+		break;
+	case MSM_CCI_RELEASE:
+		rc = cam_cci_release(sd);
+		break;
+	case MSM_CCI_I2C_READ:
+		rc = cam_cci_read_bytes(sd, cci_ctrl);
+		break;
+	case MSM_CCI_I2C_WRITE:
+	case MSM_CCI_I2C_WRITE_SEQ:
+	case MSM_CCI_I2C_WRITE_SYNC:
+	case MSM_CCI_I2C_WRITE_ASYNC:
+	case MSM_CCI_I2C_WRITE_SYNC_BLOCK:
+		rc = cam_cci_write(sd, cci_ctrl);
+		break;
+	case MSM_CCI_GPIO_WRITE:
+		break;
+	case MSM_CCI_SET_SYNC_CID:
+		rc = cam_cci_i2c_set_sync_prms(sd, cci_ctrl);
+		break;
+
+	default:
+		rc = -ENOIOCTLCMD;
+	}
+
+	cci_ctrl->status = rc;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h
new file mode 100644
index 0000000..f6e82dc
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_CCI_CORE_H_
+#define _CAM_CCI_CORE_H_
+
+#include <linux/irqreturn.h>
+#include <media/cam_sensor.h>
+#include "cam_cci_dev.h"
+#include "cam_cci_soc.h"
+
+/**
+ * @cci_dev: CCI device structure
+ * @c_ctrl: CCI control structure
+ *
+ * This API gets CCI clk rates
+ */
+uint32_t *cam_cci_get_clk_rates(struct cci_device *cci_dev,
+	struct cam_cci_ctrl *c_ctrl);
+
+/**
+ * @sd: V4L2 sub device
+ * @c_ctrl: CCI control structure
+ *
+ * This API handles I2C operations for CCI
+ */
+int32_t cam_cci_core_cfg(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *cci_ctrl);
+
+/**
+ * @irq_num: IRQ number
+ * @data: CCI private structure
+ *
+ * This API handles CCI IRQs
+ */
+irqreturn_t cam_cci_irq(int irq_num, void *data);
+
+#endif /* _CAM_CCI_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
new file mode 100644
index 0000000..789522d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -0,0 +1,276 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_cci_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_cci_soc.h"
+#include "cam_cci_core.h"
+
+#define CCI_MAX_DELAY 1000000
+#define CCI_TIMEOUT msecs_to_jiffies(500)
+
+static struct v4l2_subdev *g_cci_subdev;
+
+struct v4l2_subdev *cam_cci_get_subdev(void)
+{
+	return g_cci_subdev;
+}
+
+static long cam_cci_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int32_t rc = 0;
+
+	switch (cmd) {
+	case VIDIOC_MSM_CCI_CFG:
+		rc = cam_cci_core_cfg(sd, arg);
+		break;
+	default:
+		pr_err("%s:%d Invalid ioctl cmd: %d\n",
+			__func__, __LINE__, cmd);
+		rc = -ENOIOCTLCMD;
+	}
+
+	return rc;
+}
+
+irqreturn_t cam_cci_irq(int irq_num, void *data)
+{
+	uint32_t irq;
+	struct cci_device *cci_dev = data;
+
+	irq = cam_io_r_mb(cci_dev->base + CCI_IRQ_STATUS_0_ADDR);
+	cam_io_w_mb(irq, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+	cam_io_w_mb(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+	if (irq & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
+		if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
+			cci_dev->cci_master_info[MASTER_0].reset_pending =
+				FALSE;
+			complete(&cci_dev->cci_master_info[MASTER_0].
+				reset_complete);
+		}
+		if (cci_dev->cci_master_info[MASTER_1].reset_pending == TRUE) {
+			cci_dev->cci_master_info[MASTER_1].reset_pending =
+				FALSE;
+			complete(&cci_dev->cci_master_info[MASTER_1].
+				reset_complete);
+		}
+	}
+	if (irq & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) {
+		cci_dev->cci_master_info[MASTER_0].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+	}
+	if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
+		struct cam_cci_master_info *cci_master_info;
+
+		cci_master_info = &cci_dev->cci_master_info[MASTER_0];
+		atomic_set(&cci_master_info->q_free[QUEUE_0], 0);
+		cci_master_info->status = 0;
+		if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) {
+			complete(&cci_master_info->report_q[QUEUE_0]);
+			atomic_set(&cci_master_info->done_pending[QUEUE_0], 0);
+		}
+	}
+	if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) {
+		struct cam_cci_master_info *cci_master_info;
+
+		cci_master_info = &cci_dev->cci_master_info[MASTER_0];
+		atomic_set(&cci_master_info->q_free[QUEUE_1], 0);
+		cci_master_info->status = 0;
+		if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) {
+			complete(&cci_master_info->report_q[QUEUE_1]);
+			atomic_set(&cci_master_info->done_pending[QUEUE_1], 0);
+		}
+	}
+	if (irq & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
+		cci_dev->cci_master_info[MASTER_1].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+	}
+	if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) {
+		struct cam_cci_master_info *cci_master_info;
+
+		cci_master_info = &cci_dev->cci_master_info[MASTER_1];
+		atomic_set(&cci_master_info->q_free[QUEUE_0], 0);
+		cci_master_info->status = 0;
+		if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) {
+			complete(&cci_master_info->report_q[QUEUE_0]);
+			atomic_set(&cci_master_info->done_pending[QUEUE_0], 0);
+		}
+	}
+	if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) {
+		struct cam_cci_master_info *cci_master_info;
+
+		cci_master_info = &cci_dev->cci_master_info[MASTER_1];
+		atomic_set(&cci_master_info->q_free[QUEUE_1], 0);
+		cci_master_info->status = 0;
+		if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) {
+			complete(&cci_master_info->report_q[QUEUE_1]);
+			atomic_set(&cci_master_info->done_pending[QUEUE_1], 0);
+		}
+	}
+	if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
+		cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+		cam_io_w_mb(CCI_M0_RESET_RMSK,
+			cci_dev->base + CCI_RESET_CMD_ADDR);
+	}
+	if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
+		cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
+		cam_io_w_mb(CCI_M1_RESET_RMSK,
+			cci_dev->base + CCI_RESET_CMD_ADDR);
+	}
+	if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
+		pr_err("%s:%d MASTER_0 error 0x%x\n", __func__, __LINE__, irq);
+		cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
+		cam_io_w_mb(CCI_M0_HALT_REQ_RMSK,
+			cci_dev->base + CCI_HALT_REQ_ADDR);
+	}
+	if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
+		pr_err("%s:%d MASTER_1 error 0x%x\n", __func__, __LINE__, irq);
+		cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
+		cam_io_w_mb(CCI_M1_HALT_REQ_RMSK,
+			cci_dev->base + CCI_HALT_REQ_ADDR);
+	}
+	return IRQ_HANDLED;
+}
+
+static int cam_cci_irq_routine(struct v4l2_subdev *sd, u32 status,
+	bool *handled)
+{
+	struct cci_device *cci_dev = v4l2_get_subdevdata(sd);
+	irqreturn_t ret;
+
+	ret = cam_cci_irq(cci_dev->irq->start, cci_dev);
+	*handled = TRUE;
+	return 0;
+}
+
+static struct v4l2_subdev_core_ops cci_subdev_core_ops = {
+	.ioctl = cam_cci_subdev_ioctl,
+	.interrupt_service_routine = cam_cci_irq_routine,
+};
+
+static const struct v4l2_subdev_ops cci_subdev_ops = {
+	.core = &cci_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cci_subdev_intern_ops;
+
+static int cam_cci_platform_probe(struct platform_device *pdev)
+{
+	struct cam_cpas_register_params cpas_parms;
+	struct cci_device *new_cci_dev;
+	int rc = 0;
+
+	new_cci_dev = kzalloc(sizeof(struct cci_device),
+		GFP_KERNEL);
+	if (!new_cci_dev)
+		return -ENOMEM;
+
+	new_cci_dev->v4l2_dev_str.pdev = pdev;
+
+	rc = cam_cci_parse_dt_info(pdev, new_cci_dev);
+	if (rc < 0) {
+		pr_err("%s: %d Resource get Failed: %d\n",
+			__func__, __LINE__, rc);
+		goto cci_no_resource;
+	}
+
+	new_cci_dev->v4l2_dev_str.internal_ops =
+		&cci_subdev_intern_ops;
+	new_cci_dev->v4l2_dev_str.ops =
+		&cci_subdev_ops;
+	strlcpy(new_cci_dev->device_name, CAMX_CCI_DEV_NAME,
+		sizeof(new_cci_dev->device_name));
+	new_cci_dev->v4l2_dev_str.name =
+		new_cci_dev->device_name;
+	new_cci_dev->v4l2_dev_str.sd_flags =
+		(V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+	new_cci_dev->v4l2_dev_str.ent_function =
+		CAM_CCI_DEVICE_TYPE;
+	new_cci_dev->v4l2_dev_str.token =
+		new_cci_dev;
+
+	rc = cam_register_subdev(&(new_cci_dev->v4l2_dev_str));
+	if (rc < 0) {
+		pr_err("%s:%d :Error: Fail with cam_register_subdev\n",
+			__func__, __LINE__);
+		goto cci_no_resource;
+	}
+
+	platform_set_drvdata(pdev, &(new_cci_dev->v4l2_dev_str.sd));
+	v4l2_set_subdevdata(&new_cci_dev->v4l2_dev_str.sd, new_cci_dev);
+	g_cci_subdev = &new_cci_dev->v4l2_dev_str.sd;
+
+	cpas_parms.cam_cpas_client_cb = NULL;
+	cpas_parms.cell_index = 0;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = new_cci_dev;
+	strlcpy(cpas_parms.identifier, "cci", CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		pr_err("%s:%d CPAS registration failed\n", __func__, __LINE__);
+		goto cci_no_resource;
+	}
+	CDBG("CPAS registration successful handle=%d\n",
+		cpas_parms.client_handle);
+	new_cci_dev->cpas_handle = cpas_parms.client_handle;
+
+	return rc;
+cci_no_resource:
+	kfree(new_cci_dev);
+	return rc;
+}
+
+static int cam_cci_device_remove(struct platform_device *pdev)
+{
+	struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+	struct cci_device *cci_dev =
+		v4l2_get_subdevdata(subdev);
+
+	cam_cpas_unregister_client(cci_dev->cpas_handle);
+	cam_cci_soc_remove(pdev, cci_dev);
+	devm_kfree(&pdev->dev, cci_dev);
+	return 0;
+}
+
+static const struct of_device_id cam_cci_dt_match[] = {
+	{.compatible = "qcom,cci"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, cam_cci_dt_match);
+
+static struct platform_driver cci_driver = {
+	.probe = cam_cci_platform_probe,
+	.remove = cam_cci_device_remove,
+	.driver = {
+		.name = CAMX_CCI_DEV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_cci_dt_match,
+	},
+};
+
+static int __init cam_cci_init_module(void)
+{
+	return platform_driver_register(&cci_driver);
+}
+
+static void __exit cam_cci_exit_module(void)
+{
+	platform_driver_unregister(&cci_driver);
+}
+
+module_init(cam_cci_init_module);
+module_exit(cam_cci_exit_module);
+MODULE_DESCRIPTION("MSM CCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
new file mode 100644
index 0000000..996fc62
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -0,0 +1,322 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CCI_DEV_H_
+#define _CAM_CCI_DEV_H_
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/cam_sensor.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_soc_api.h>
+#include <cam_io_util.h>
+#include <cam_sensor_util.h>
+#include <cam_subdev.h>
+#include <cam_cpas_api.h>
+#include "cam_cci_hwreg.h"
+
+#define V4L2_IDENT_CCI 50005
+#define CCI_I2C_QUEUE_0_SIZE 128
+#define CCI_I2C_QUEUE_1_SIZE 32
+#define CYCLES_PER_MICRO_SEC_DEFAULT 4915
+#define CCI_MAX_DELAY 1000000
+
+#define CCI_TIMEOUT msecs_to_jiffies(500)
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE  1
+#define FALSE 0
+
+#define CCI_PINCTRL_STATE_DEFAULT "cci_default"
+#define CCI_PINCTRL_STATE_SLEEP "cci_suspend"
+
+#define CCI_NUM_CLK_MAX 16
+#define CCI_NUM_CLK_CASES 5
+#define CCI_CLK_SRC_NAME "cci_src_clk"
+#define MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_10 10
+#define MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11 11
+#define BURST_MIN_FREE_SIZE 8
+#define MAX_LRME_V4l2_EVENTS 30
+
+/* Max bytes that can be read per CCI read transaction */
+#define CCI_READ_MAX 12
+#define CCI_I2C_READ_MAX_RETRIES 3
+#define CCI_I2C_MAX_READ 8192
+#define CCI_I2C_MAX_WRITE 8192
+
+#define CAMX_CCI_DEV_NAME "cam-cci-driver"
+
+/* Max bytes that can be read per CCI read transaction */
+#define CCI_READ_MAX 12
+#define CCI_I2C_READ_MAX_RETRIES 3
+#define CCI_I2C_MAX_READ 8192
+#define CCI_I2C_MAX_WRITE 8192
+
+#define PRIORITY_QUEUE (QUEUE_0)
+#define SYNC_QUEUE (QUEUE_1)
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#undef CCI_DBG
+#ifdef MSM_CCI_DEBUG
+#define CCI_DBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CCI_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+enum cci_i2c_sync {
+	MSM_SYNC_DISABLE,
+	MSM_SYNC_ENABLE,
+};
+
+enum cam_cci_cmd_type {
+	MSM_CCI_INIT,
+	MSM_CCI_RELEASE,
+	MSM_CCI_SET_SID,
+	MSM_CCI_SET_FREQ,
+	MSM_CCI_SET_SYNC_CID,
+	MSM_CCI_I2C_READ,
+	MSM_CCI_I2C_WRITE,
+	MSM_CCI_I2C_WRITE_SEQ,
+	MSM_CCI_I2C_WRITE_ASYNC,
+	MSM_CCI_GPIO_WRITE,
+	MSM_CCI_I2C_WRITE_SYNC,
+	MSM_CCI_I2C_WRITE_SYNC_BLOCK,
+};
+
+enum cci_i2c_queue_t {
+	QUEUE_0,
+	QUEUE_1,
+	QUEUE_INVALID,
+};
+
+struct cam_cci_wait_sync_cfg {
+	uint16_t cid;
+	int16_t csid;
+	uint16_t line;
+	uint16_t delay;
+};
+
+struct cam_cci_gpio_cfg {
+	uint16_t gpio_queue;
+	uint16_t i2c_queue;
+};
+
+struct cam_cci_read_cfg {
+	uint32_t addr;
+	uint16_t addr_type;
+	uint8_t *data;
+	uint16_t num_byte;
+};
+
+struct cam_cci_i2c_queue_info {
+	uint32_t max_queue_size;
+	uint32_t report_id;
+	uint32_t irq_en;
+	uint32_t capture_rep_data;
+};
+
+struct cam_cci_master_info {
+	uint32_t status;
+	atomic_t q_free[NUM_QUEUES];
+	uint8_t q_lock[NUM_QUEUES];
+	uint8_t reset_pending;
+	struct mutex mutex;
+	struct completion reset_complete;
+	struct mutex mutex_q[NUM_QUEUES];
+	struct completion report_q[NUM_QUEUES];
+	atomic_t done_pending[NUM_QUEUES];
+};
+
+struct cam_cci_clk_params_t {
+	uint16_t hw_thigh;
+	uint16_t hw_tlow;
+	uint16_t hw_tsu_sto;
+	uint16_t hw_tsu_sta;
+	uint16_t hw_thd_dat;
+	uint16_t hw_thd_sta;
+	uint16_t hw_tbuf;
+	uint8_t hw_scl_stretch_en;
+	uint8_t hw_trdhld;
+	uint8_t hw_tsp;
+	uint32_t cci_clk_src;
+};
+
+enum cam_cci_state_t {
+	CCI_STATE_ENABLED,
+	CCI_STATE_DISABLED,
+};
+
+/**
+ * struct cci_device
+ * @pdev: Platform device
+ * @subdev: V4L2 sub device
+ * @base: Base address of CCI device
+ * @hw_version: Hardware version
+ * @ref_count: Reference Count
+ * @cci_state: CCI state machine
+ * @num_clk: Number of CCI clock
+ * @cci_clk: CCI clock structure
+ * @cci_clk_info: CCI clock information
+ * @cam_cci_i2c_queue_info: CCI queue information
+ * @i2c_freq_mode: I2C frequency of operations
+ * @cci_clk_params: CCI hw clk params
+ * @cci_gpio_tbl: CCI GPIO table
+ * @cci_gpio_tbl_size: GPIO table size
+ * @cci_pinctrl: Pinctrl structure
+ * @cci_pinctrl_status: CCI pinctrl status
+ * @cci_clk_src: CCI clk src rate
+ * @cci_vreg: CCI regulator structure
+ * @cci_reg_ptr: CCI individual regulator structure
+ * @regulator_count: Regulator count
+ * @support_seq_write:
+ *     Set this flag when sequential write is enabled
+ * @write_wq: Work queue structure
+ * @valid_sync: Is it a valid sync with CSID
+ * @v4l2_dev_str: V4L2 device structure
+ * @cci_wait_sync_cfg: CCI sync config
+ * @cycles_per_us: Cycles per micro sec
+ * @payload_size: CCI packet payload size
+ */
+struct cci_device {
+	struct v4l2_subdev subdev;
+	struct resource *irq;
+	void __iomem *base;
+	uint32_t hw_version;
+	uint8_t ref_count;
+	enum cam_cci_state_t cci_state;
+	size_t num_clk;
+	struct clk **cci_clk;
+	struct msm_cam_clk_info *cci_clk_info;
+	struct cam_cci_i2c_queue_info
+		cci_i2c_queue_info[NUM_MASTERS][NUM_QUEUES];
+	struct cam_cci_master_info cci_master_info[NUM_MASTERS];
+	enum i2c_freq_mode i2c_freq_mode[NUM_MASTERS];
+	struct cam_cci_clk_params_t cci_clk_params[I2C_MAX_MODES];
+	struct gpio *cci_gpio_tbl;
+	uint8_t cci_gpio_tbl_size;
+	struct msm_pinctrl_info cci_pinctrl;
+	uint8_t cci_pinctrl_status;
+	uint32_t cci_clk_src;
+	struct camera_vreg_t *cci_vreg;
+	struct regulator *cci_reg_ptr[MAX_REGULATOR];
+	int32_t regulator_count;
+	uint8_t support_seq_write;
+	struct workqueue_struct *write_wq[MASTER_MAX];
+	struct cam_cci_wait_sync_cfg cci_wait_sync_cfg;
+	uint8_t valid_sync;
+	struct cam_subdev v4l2_dev_str;
+	uint32_t cycles_per_us;
+	uint8_t payload_size;
+	size_t num_clk_cases;
+	uint32_t **cci_clk_rates;
+	char device_name[20];
+	uint32_t cpas_handle;
+};
+
+enum cam_cci_i2c_cmd_type {
+	CCI_I2C_SET_PARAM_CMD = 1,
+	CCI_I2C_WAIT_CMD,
+	CCI_I2C_WAIT_SYNC_CMD,
+	CCI_I2C_WAIT_GPIO_EVENT_CMD,
+	CCI_I2C_TRIG_I2C_EVENT_CMD,
+	CCI_I2C_LOCK_CMD,
+	CCI_I2C_UNLOCK_CMD,
+	CCI_I2C_REPORT_CMD,
+	CCI_I2C_WRITE_CMD,
+	CCI_I2C_READ_CMD,
+	CCI_I2C_WRITE_DISABLE_P_CMD,
+	CCI_I2C_READ_DISABLE_P_CMD,
+	CCI_I2C_WRITE_CMD2,
+	CCI_I2C_WRITE_CMD3,
+	CCI_I2C_REPEAT_CMD,
+	CCI_I2C_INVALID_CMD,
+};
+
+enum cam_cci_gpio_cmd_type {
+	CCI_GPIO_SET_PARAM_CMD = 1,
+	CCI_GPIO_WAIT_CMD,
+	CCI_GPIO_WAIT_SYNC_CMD,
+	CCI_GPIO_WAIT_GPIO_IN_EVENT_CMD,
+	CCI_GPIO_WAIT_I2C_Q_TRIG_EVENT_CMD,
+	CCI_GPIO_OUT_CMD,
+	CCI_GPIO_TRIG_EVENT_CMD,
+	CCI_GPIO_REPORT_CMD,
+	CCI_GPIO_REPEAT_CMD,
+	CCI_GPIO_CONTINUE_CMD,
+	CCI_GPIO_INVALID_CMD,
+};
+
+struct cam_sensor_cci_client {
+	struct v4l2_subdev *cci_subdev;
+	uint32_t freq;
+	enum i2c_freq_mode i2c_freq_mode;
+	enum cci_i2c_master_t cci_i2c_master;
+	uint16_t sid;
+	uint16_t cid;
+	uint32_t timeout;
+	uint16_t retries;
+	uint16_t id_map;
+};
+
+struct cam_cci_ctrl {
+	int32_t status;
+	struct cam_sensor_cci_client *cci_info;
+	enum cam_cci_cmd_type cmd;
+	union {
+		struct cam_sensor_i2c_reg_setting cci_i2c_write_cfg;
+		struct cam_cci_read_cfg cci_i2c_read_cfg;
+		struct cam_cci_wait_sync_cfg cci_wait_sync_cfg;
+		struct cam_cci_gpio_cfg gpio_cfg;
+	} cfg;
+};
+
+struct cci_write_async {
+	struct cci_device *cci_dev;
+	struct cam_cci_ctrl c_ctrl;
+	enum cci_i2c_queue_t queue;
+	struct work_struct work;
+	enum cci_i2c_sync sync_en;
+};
+
+irqreturn_t cam_cci_irq(int irq_num, void *data);
+
+#ifdef CONFIG_SPECTRA_CAMERA
+struct v4l2_subdev *cam_cci_get_subdev(void);
+#else
+static inline struct v4l2_subdev *cam_cci_get_subdev(void)
+{
+	return NULL;
+}
+#endif
+
+#define VIDIOC_MSM_CCI_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 23, struct cam_cci_ctrl *)
+
+#endif /* _CAM_CCI_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
new file mode 100644
index 0000000..c18593e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CCI_HWREG_
+#define _CAM_CCI_HWREG_
+
+#define CCI_HW_VERSION_ADDR                                         0x00000000
+#define CCI_RESET_CMD_ADDR                                          0x00000004
+#define CCI_RESET_CMD_RMSK                                          0x0f73f3f7
+#define CCI_M0_RESET_RMSK                                                0x3F1
+#define CCI_M1_RESET_RMSK                                              0x3F001
+#define CCI_QUEUE_START_ADDR                                        0x00000008
+#define CCI_SET_CID_SYNC_TIMER_ADDR                                 0x00000010
+#define CCI_SET_CID_SYNC_TIMER_OFFSET                               0x00000004
+#define CCI_I2C_M0_SCL_CTL_ADDR                                     0x00000100
+#define CCI_I2C_M0_SDA_CTL_0_ADDR                                   0x00000104
+#define CCI_I2C_M0_SDA_CTL_1_ADDR                                   0x00000108
+#define CCI_I2C_M0_SDA_CTL_2_ADDR                                   0x0000010c
+#define CCI_I2C_M0_READ_DATA_ADDR                                   0x00000118
+#define CCI_I2C_M0_MISC_CTL_ADDR                                    0x00000110
+#define CCI_I2C_M0_READ_BUF_LEVEL_ADDR                              0x0000011C
+#define CCI_HALT_REQ_ADDR                                           0x00000034
+#define CCI_M0_HALT_REQ_RMSK                                               0x1
+#define CCI_M1_HALT_REQ_RMSK                                               0x2
+#define CCI_I2C_M1_SCL_CTL_ADDR                                     0x00000200
+#define CCI_I2C_M1_SDA_CTL_0_ADDR                                   0x00000204
+#define CCI_I2C_M1_SDA_CTL_1_ADDR                                   0x00000208
+#define CCI_I2C_M1_SDA_CTL_2_ADDR                                   0x0000020c
+#define CCI_I2C_M1_MISC_CTL_ADDR                                    0x00000210
+#define CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR                             0x00000304
+#define CCI_I2C_M0_Q0_CUR_CMD_ADDR                                  0x00000308
+#define CCI_I2C_M0_Q0_REPORT_STATUS_ADDR                            0x0000030c
+#define CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR                            0x00000300
+#define CCI_I2C_M0_Q0_LOAD_DATA_ADDR                                0x00000310
+#define CCI_IRQ_MASK_0_ADDR                                         0x00000c04
+#define CCI_IRQ_MASK_0_RMSK                                         0x7fff7ff7
+#define CCI_IRQ_CLEAR_0_ADDR                                        0x00000c08
+#define CCI_IRQ_STATUS_0_ADDR                                       0x00000c0c
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK                   0x4000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK                   0x2000000
+#define CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK                           0x1000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK                        0x100000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK                         0x10000
+#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK                            0x1000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK                           0x100
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK                            0x10
+#define CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK                          0x18000EE6
+#define CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK                          0x60EE6000
+#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK                               0x1
+#define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR                               0x00000c00
+
+#define DEBUG_TOP_REG_START                                                0x0
+#define DEBUG_TOP_REG_COUNT                                                 14
+#define DEBUG_MASTER_REG_START                                           0x100
+#define DEBUG_MASTER_REG_COUNT                                               8
+#define DEBUG_MASTER_QUEUE_REG_START                                     0x300
+#define DEBUG_MASTER_QUEUE_REG_COUNT                                         6
+#define DEBUG_INTR_REG_START                                             0xC00
+#define DEBUG_INTR_REG_COUNT                                                 7
+#endif /* _CAM_CCI_HWREG_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
new file mode 100644
index 0000000..59cdfaa
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -0,0 +1,624 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_cci_dev.h"
+#include "cam_cci_core.h"
+
+static int32_t cam_cci_pinctrl_init(struct cci_device *cci_dev)
+{
+	struct msm_pinctrl_info *cci_pctrl = NULL;
+
+	cci_pctrl = &cci_dev->cci_pinctrl;
+	cci_pctrl->pinctrl = devm_pinctrl_get(&cci_dev->v4l2_dev_str.pdev->dev);
+	if (IS_ERR_OR_NULL(cci_pctrl->pinctrl)) {
+		pr_err("%s:%d devm_pinctrl_get cci_pinctrl failed\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	cci_pctrl->gpio_state_active = pinctrl_lookup_state(
+						cci_pctrl->pinctrl,
+						CCI_PINCTRL_STATE_DEFAULT);
+	if (IS_ERR_OR_NULL(cci_pctrl->gpio_state_active)) {
+		pr_err("%s:%d look up state  for active state failed\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	cci_pctrl->gpio_state_suspend = pinctrl_lookup_state(
+						cci_pctrl->pinctrl,
+						CCI_PINCTRL_STATE_SLEEP);
+	if (IS_ERR_OR_NULL(cci_pctrl->gpio_state_suspend)) {
+		pr_err("%s:%d look up state for suspend state failed\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int cam_cci_init(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	uint8_t i = 0, j = 0;
+	int32_t rc = 0, ret = 0;
+	struct cci_device *cci_dev;
+	enum cci_i2c_master_t master = MASTER_0;
+	uint32_t *clk_rates  = NULL;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+	if (!cci_dev || !c_ctrl) {
+		pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
+			__LINE__, cci_dev, c_ctrl);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	if (cci_dev->ref_count++) {
+		CDBG("%s ref_count %d\n", __func__, cci_dev->ref_count);
+		master = c_ctrl->cci_info->cci_i2c_master;
+		CDBG("%s:%d master %d\n", __func__, __LINE__, master);
+		if (master < MASTER_MAX && master >= 0) {
+			mutex_lock(&cci_dev->cci_master_info[master].mutex);
+			flush_workqueue(cci_dev->write_wq[master]);
+			/* Re-initialize the completion */
+			reinit_completion(&cci_dev->
+				cci_master_info[master].reset_complete);
+			for (i = 0; i < NUM_QUEUES; i++)
+				reinit_completion(&cci_dev->
+					cci_master_info[master].report_q[i]);
+			/* Set reset pending flag to TRUE */
+			cci_dev->cci_master_info[master].reset_pending = TRUE;
+			/* Set proper mask to RESET CMD address */
+			if (master == MASTER_0)
+				cam_io_w_mb(CCI_M0_RESET_RMSK,
+					cci_dev->base + CCI_RESET_CMD_ADDR);
+			else
+				cam_io_w_mb(CCI_M1_RESET_RMSK,
+					cci_dev->base + CCI_RESET_CMD_ADDR);
+			/* wait for reset done irq */
+			rc = wait_for_completion_timeout(
+				&cci_dev->cci_master_info[master].
+				reset_complete,
+				CCI_TIMEOUT);
+			if (rc <= 0)
+				pr_err("%s:%d wait failed %d\n", __func__,
+					__LINE__, rc);
+			mutex_unlock(&cci_dev->cci_master_info[master].mutex);
+		}
+		return 0;
+	}
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+	rc = cam_cpas_start(cci_dev->cpas_handle,
+		&ahb_vote, &axi_vote);
+	if (rc != 0) {
+		pr_err("%s:%d CPAS start failed\n",
+			__func__, __LINE__);
+	}
+
+	ret = cam_cci_pinctrl_init(cci_dev);
+	if (ret < 0) {
+		pr_err("%s:%d Initialization of pinctrl failed\n",
+				__func__, __LINE__);
+		cci_dev->cci_pinctrl_status = 0;
+	} else {
+		cci_dev->cci_pinctrl_status = 1;
+	}
+	rc = msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
+		cci_dev->cci_gpio_tbl_size, 1);
+	if (cci_dev->cci_pinctrl_status) {
+		ret = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
+				cci_dev->cci_pinctrl.gpio_state_active);
+		if (ret)
+			pr_err("%s:%d cannot set pin to active state\n",
+				__func__, __LINE__);
+	}
+	if (rc < 0) {
+		CDBG("%s: request gpio failed\n", __func__);
+		goto request_gpio_failed;
+	}
+
+	rc = msm_camera_config_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
+		cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
+		&cci_dev->cci_reg_ptr[0], 1);
+	if (rc < 0) {
+		pr_err("%s:%d cci config_vreg failed\n", __func__, __LINE__);
+		goto clk_enable_failed;
+	}
+
+	rc = msm_camera_enable_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
+		cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
+		&cci_dev->cci_reg_ptr[0], 1);
+	if (rc < 0) {
+		pr_err("%s:%d cci enable_vreg failed\n", __func__, __LINE__);
+		goto reg_enable_failed;
+	}
+
+	clk_rates = cam_cci_get_clk_rates(cci_dev, c_ctrl);
+	if (!clk_rates) {
+		pr_err("%s: clk enable failed\n", __func__);
+		goto reg_enable_failed;
+	}
+
+	for (i = 0; i < cci_dev->num_clk; i++) {
+		cci_dev->cci_clk_info[i].clk_rate =
+			clk_rates[i];
+	}
+	rc = msm_camera_clk_enable(&cci_dev->v4l2_dev_str.pdev->dev,
+		cci_dev->cci_clk_info, cci_dev->cci_clk,
+		cci_dev->num_clk, true);
+	if (rc < 0) {
+		pr_err("%s: clk enable failed\n", __func__);
+		goto reg_enable_failed;
+	}
+
+	/* Re-initialize the completion */
+	reinit_completion(&cci_dev->cci_master_info[master].reset_complete);
+	for (i = 0; i < NUM_QUEUES; i++)
+		reinit_completion(&cci_dev->cci_master_info[master].
+			report_q[i]);
+	rc = msm_camera_enable_irq(cci_dev->irq, true);
+	if (rc < 0) {
+		pr_err("%s: irq enable failed\n", __func__);
+		return -EINVAL;
+	}
+	cci_dev->hw_version = cam_io_r_mb(cci_dev->base +
+		CCI_HW_VERSION_ADDR);
+	CDBG("%s:%d: hw_version = 0x%x\n", __func__, __LINE__,
+		cci_dev->hw_version);
+
+	cci_dev->payload_size =
+		MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11;
+	cci_dev->support_seq_write = 1;
+
+	for (i = 0; i < NUM_MASTERS; i++) {
+		for (j = 0; j < NUM_QUEUES; j++) {
+			if (j == QUEUE_0)
+				cci_dev->cci_i2c_queue_info[i][j].
+					max_queue_size =
+						CCI_I2C_QUEUE_0_SIZE;
+			else
+				cci_dev->cci_i2c_queue_info[i][j].
+					max_queue_size =
+						CCI_I2C_QUEUE_1_SIZE;
+
+			CDBG("CCI Master[%d] :: Q0 size: %d Q1 size: %d\n", i,
+				cci_dev->cci_i2c_queue_info[i][j].
+				max_queue_size,
+				cci_dev->cci_i2c_queue_info[i][j].
+				max_queue_size);
+		}
+	}
+
+	cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+	cam_io_w_mb(CCI_RESET_CMD_RMSK, cci_dev->base +
+			CCI_RESET_CMD_ADDR);
+	cam_io_w_mb(0x1, cci_dev->base + CCI_RESET_CMD_ADDR);
+	rc = wait_for_completion_timeout(
+		&cci_dev->cci_master_info[MASTER_0].reset_complete,
+		CCI_TIMEOUT);
+	if (rc <= 0) {
+		pr_err("%s: wait_for_completion_timeout %d\n",
+			 __func__, __LINE__);
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+		goto reset_complete_failed;
+	}
+	for (i = 0; i < MASTER_MAX; i++)
+		cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
+	cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
+		cci_dev->base + CCI_IRQ_MASK_0_ADDR);
+	cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
+		cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+	cam_io_w_mb(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+	for (i = 0; i < MASTER_MAX; i++) {
+		if (!cci_dev->write_wq[i]) {
+			pr_err("Failed to flush write wq\n");
+			rc = -ENOMEM;
+			goto reset_complete_failed;
+		} else {
+			flush_workqueue(cci_dev->write_wq[i]);
+		}
+	}
+	cci_dev->cci_state = CCI_STATE_ENABLED;
+
+	return 0;
+
+reset_complete_failed:
+	msm_camera_enable_irq(cci_dev->irq, false);
+	msm_camera_clk_enable(&cci_dev->v4l2_dev_str.pdev->dev,
+		cci_dev->cci_clk_info, cci_dev->cci_clk,
+		cci_dev->num_clk, false);
+reg_enable_failed:
+	msm_camera_config_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
+		cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
+		&cci_dev->cci_reg_ptr[0], 0);
+clk_enable_failed:
+	if (cci_dev->cci_pinctrl_status) {
+		ret = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
+				cci_dev->cci_pinctrl.gpio_state_suspend);
+		if (ret)
+			pr_err("%s:%d cannot set pin to suspend state\n",
+				__func__, __LINE__);
+	}
+	msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
+		cci_dev->cci_gpio_tbl_size, 0);
+request_gpio_failed:
+	cci_dev->ref_count--;
+	cam_cpas_stop(cci_dev->cpas_handle);
+
+	return rc;
+}
+
+void cam_cci_soc_remove(struct platform_device *pdev,
+	struct cci_device *cci_dev)
+{
+	msm_camera_put_clk_info_and_rates(pdev,
+		&cci_dev->cci_clk_info, &cci_dev->cci_clk,
+		&cci_dev->cci_clk_rates, cci_dev->num_clk_cases,
+		cci_dev->num_clk);
+
+	msm_camera_put_reg_base(pdev, cci_dev->base, "cci", true);
+}
+
+static void cam_cci_init_cci_params(struct cci_device *new_cci_dev)
+{
+	uint8_t i = 0, j = 0;
+
+	for (i = 0; i < NUM_MASTERS; i++) {
+		new_cci_dev->cci_master_info[i].status = 0;
+		mutex_init(&new_cci_dev->cci_master_info[i].mutex);
+		init_completion(&new_cci_dev->
+			cci_master_info[i].reset_complete);
+
+		for (j = 0; j < NUM_QUEUES; j++) {
+			mutex_init(&new_cci_dev->cci_master_info[i].mutex_q[j]);
+			init_completion(&new_cci_dev->
+				cci_master_info[i].report_q[j]);
+			}
+	}
+}
+
+static int32_t cam_cci_init_gpio_params(struct cci_device *cci_dev)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t *val_array = NULL;
+	uint8_t tbl_size = 0;
+	struct device_node *of_node = cci_dev->v4l2_dev_str.pdev->dev.of_node;
+	struct gpio *gpio_tbl = NULL;
+
+	cci_dev->cci_gpio_tbl_size = tbl_size = of_gpio_count(of_node);
+	CDBG("%s gpio count %d\n", __func__, tbl_size);
+	if (!tbl_size) {
+		pr_err("%s:%d gpio count 0\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	gpio_tbl = cci_dev->cci_gpio_tbl =
+		kzalloc(sizeof(struct gpio) * tbl_size, GFP_KERNEL);
+	if (!gpio_tbl) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < tbl_size; i++) {
+		gpio_tbl[i].gpio = of_get_gpio(of_node, i);
+		CDBG("%s gpio_tbl[%d].gpio = %d\n", __func__, i,
+			gpio_tbl[i].gpio);
+	}
+
+	val_array = kcalloc(tbl_size, sizeof(uint32_t),
+		GFP_KERNEL);
+	if (!val_array) {
+		rc = -ENOMEM;
+		goto free_gpio_tbl;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,gpio-tbl-flags",
+		val_array, tbl_size);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto free_val_array;
+	}
+	for (i = 0; i < tbl_size; i++) {
+		gpio_tbl[i].flags = val_array[i];
+		CDBG("%s gpio_tbl[%d].flags = %ld\n", __func__, i,
+			gpio_tbl[i].flags);
+	}
+
+	for (i = 0; i < tbl_size; i++) {
+		rc = of_property_read_string_index(of_node,
+			"qcom,gpio-tbl-label", i, &gpio_tbl[i].label);
+		CDBG("%s gpio_tbl[%d].label = %s\n", __func__, i,
+			gpio_tbl[i].label);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			goto free_val_array;
+		}
+	}
+
+	kfree(val_array);
+	return rc;
+
+free_val_array:
+	kfree(val_array);
+free_gpio_tbl:
+	kfree(cci_dev->cci_gpio_tbl);
+	cci_dev->cci_gpio_tbl = NULL;
+	cci_dev->cci_gpio_tbl_size = 0;
+	return rc;
+}
+
+static void cam_cci_init_default_clk_params(struct cci_device *cci_dev,
+	uint8_t index)
+{
+	/* default clock params are for 100Khz */
+	cci_dev->cci_clk_params[index].hw_thigh = 201;
+	cci_dev->cci_clk_params[index].hw_tlow = 174;
+	cci_dev->cci_clk_params[index].hw_tsu_sto = 204;
+	cci_dev->cci_clk_params[index].hw_tsu_sta = 231;
+	cci_dev->cci_clk_params[index].hw_thd_dat = 22;
+	cci_dev->cci_clk_params[index].hw_thd_sta = 162;
+	cci_dev->cci_clk_params[index].hw_tbuf = 227;
+	cci_dev->cci_clk_params[index].hw_scl_stretch_en = 0;
+	cci_dev->cci_clk_params[index].hw_trdhld = 6;
+	cci_dev->cci_clk_params[index].hw_tsp = 3;
+	cci_dev->cci_clk_params[index].cci_clk_src = 37500000;
+}
+
+static void cam_cci_init_clk_params(struct cci_device *cci_dev)
+{
+	int32_t rc = 0;
+	uint32_t val = 0;
+	uint8_t count = 0;
+	struct device_node *of_node = cci_dev->v4l2_dev_str.pdev->dev.of_node;
+	struct device_node *src_node = NULL;
+
+	for (count = 0; count < I2C_MAX_MODES; count++) {
+
+		if (count == I2C_STANDARD_MODE)
+			src_node = of_find_node_by_name(of_node,
+				"qcom,i2c_standard_mode");
+		else if (count == I2C_FAST_MODE)
+			src_node = of_find_node_by_name(of_node,
+				"qcom,i2c_fast_mode");
+		else if (count == I2C_FAST_PLUS_MODE)
+			src_node = of_find_node_by_name(of_node,
+				"qcom,i2c_fast_plus_mode");
+		else
+			src_node = of_find_node_by_name(of_node,
+				"qcom,i2c_custom_mode");
+
+		rc = of_property_read_u32(src_node, "qcom,hw-thigh", &val);
+		CDBG("%s qcom,hw-thigh %d, rc %d\n", __func__, val, rc);
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_thigh = val;
+			rc = of_property_read_u32(src_node, "qcom,hw-tlow",
+				&val);
+			CDBG("%s qcom,hw-tlow %d, rc %d\n", __func__, val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_tlow = val;
+			rc = of_property_read_u32(src_node, "qcom,hw-tsu-sto",
+				&val);
+			CDBG("%s qcom,hw-tsu-sto %d, rc %d\n",
+				__func__, val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_tsu_sto = val;
+			rc = of_property_read_u32(src_node, "qcom,hw-tsu-sta",
+				&val);
+			CDBG("%s qcom,hw-tsu-sta %d, rc %d\n",
+				__func__, val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_tsu_sta = val;
+			rc = of_property_read_u32(src_node, "qcom,hw-thd-dat",
+				&val);
+			CDBG("%s qcom,hw-thd-dat %d, rc %d\n",
+				__func__, val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_thd_dat = val;
+			rc = of_property_read_u32(src_node, "qcom,hw-thd-sta",
+				&val);
+			CDBG("%s qcom,hw-thd-sta %d, rc %d\n", __func__,
+				val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_thd_sta = val;
+			rc = of_property_read_u32(src_node, "qcom,hw-tbuf",
+				&val);
+			CDBG("%s qcom,hw-tbuf %d, rc %d\n", __func__, val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_tbuf = val;
+			rc = of_property_read_u32(src_node,
+				"qcom,hw-scl-stretch-en", &val);
+			CDBG("%s qcom,hw-scl-stretch-en %d, rc %d\n",
+				__func__, val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_scl_stretch_en = val;
+			rc = of_property_read_u32(src_node, "qcom,hw-trdhld",
+				&val);
+			CDBG("%s qcom,hw-trdhld %d, rc %d\n",
+				__func__, val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_trdhld = val;
+			rc = of_property_read_u32(src_node, "qcom,hw-tsp",
+				&val);
+			CDBG("%s qcom,hw-tsp %d, rc %d\n", __func__, val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_tsp = val;
+			val = 0;
+			rc = of_property_read_u32(src_node, "qcom,cci-clk-src",
+				&val);
+			CDBG("%s qcom,cci-clk-src %d, rc %d\n",
+				__func__, val, rc);
+			cci_dev->cci_clk_params[count].cci_clk_src = val;
+		} else
+			cam_cci_init_default_clk_params(cci_dev, count);
+
+		of_node_put(src_node);
+	}
+}
+
+int cam_cci_parse_dt_info(struct platform_device *pdev,
+	struct cci_device *new_cci_dev)
+{
+	int rc = 0, i = 0;
+
+	/* Get Clock Info*/
+	rc = msm_camera_get_clk_info_and_rates(pdev,
+		&new_cci_dev->cci_clk_info, &new_cci_dev->cci_clk,
+		&new_cci_dev->cci_clk_rates, &new_cci_dev->num_clk_cases,
+		&new_cci_dev->num_clk);
+	if (rc < 0) {
+		pr_err("%s: cam_cci_get_clk_info() failed", __func__);
+		kfree(new_cci_dev);
+		new_cci_dev = NULL;
+		return -EFAULT;
+	}
+
+	new_cci_dev->ref_count = 0;
+	new_cci_dev->base = msm_camera_get_reg_base(pdev, "cci", true);
+	if (!new_cci_dev->base) {
+		pr_err("%s: no mem resource?\n", __func__);
+		return -ENODEV;
+	}
+	new_cci_dev->irq = msm_camera_get_irq(pdev, "cci");
+	if (!new_cci_dev->irq) {
+		pr_err("%s: no irq resource?\n", __func__);
+		return -ENODEV;
+	}
+	CDBG("%s line %d cci irq start %d end %d\n", __func__,
+		__LINE__,
+		(int) new_cci_dev->irq->start,
+		(int) new_cci_dev->irq->end);
+	rc = msm_camera_register_irq(pdev, new_cci_dev->irq,
+		cam_cci_irq, IRQF_TRIGGER_RISING, "cci", new_cci_dev);
+	if (rc < 0) {
+		pr_err("%s: irq request fail\n", __func__);
+		rc = -EBUSY;
+		goto cci_release_mem;
+	}
+
+	msm_camera_enable_irq(new_cci_dev->irq, false);
+	new_cci_dev->v4l2_dev_str.pdev = pdev;
+	cam_cci_init_cci_params(new_cci_dev);
+	cam_cci_init_clk_params(new_cci_dev);
+	rc = cam_cci_init_gpio_params(new_cci_dev);
+	if (rc < 0) {
+		pr_err("%s:%d :Error: In Initializing GPIO params:%d\n",
+			__func__, __LINE__, rc);
+		goto cci_release_mem;
+	}
+
+	rc = cam_sensor_get_dt_vreg_data(new_cci_dev->
+		v4l2_dev_str.pdev->dev.of_node,
+		&(new_cci_dev->cci_vreg), &(new_cci_dev->regulator_count));
+	if (rc < 0) {
+		pr_err("%s: cam_sensor_get_dt_vreg_data fail\n", __func__);
+		rc = -EFAULT;
+		goto cci_release_mem;
+	}
+
+	/* Parse VREG data */
+	if ((new_cci_dev->regulator_count < 0) ||
+		(new_cci_dev->regulator_count > MAX_REGULATOR)) {
+		pr_err("%s: invalid reg count = %d, max is %d\n", __func__,
+			new_cci_dev->regulator_count, MAX_REGULATOR);
+		rc = -EFAULT;
+		goto cci_invalid_vreg_data;
+	}
+
+	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (rc)
+		pr_err("%s: failed to add child nodes, rc=%d\n", __func__, rc);
+	for (i = 0; i < MASTER_MAX; i++) {
+		new_cci_dev->write_wq[i] = create_singlethread_workqueue(
+			"cam_cci_wq");
+		if (!new_cci_dev->write_wq[i])
+			pr_err("Failed to create write wq\n");
+	}
+	CDBG("%s line %d\n", __func__, __LINE__);
+	return 0;
+
+cci_invalid_vreg_data:
+	kfree(new_cci_dev->cci_vreg);
+	new_cci_dev->cci_vreg = NULL;
+cci_release_mem:
+	msm_camera_put_reg_base(pdev, new_cci_dev->base, "cci", true);
+
+	return rc;
+}
+
+int cam_cci_soc_release(struct cci_device *cci_dev)
+{
+	uint8_t i = 0, rc = 0;
+
+	if (!cci_dev->ref_count || cci_dev->cci_state != CCI_STATE_ENABLED) {
+		pr_err("%s invalid ref count %d / cci state %d\n",
+			__func__, cci_dev->ref_count, cci_dev->cci_state);
+		return -EINVAL;
+	}
+	if (--cci_dev->ref_count) {
+		CDBG("%s ref_count Exit %d\n", __func__, cci_dev->ref_count);
+		return 0;
+	}
+	for (i = 0; i < MASTER_MAX; i++)
+		if (cci_dev->write_wq[i])
+			flush_workqueue(cci_dev->write_wq[i]);
+
+	msm_camera_enable_irq(cci_dev->irq, false);
+	msm_camera_clk_enable(&cci_dev->v4l2_dev_str.pdev->dev,
+		cci_dev->cci_clk_info, cci_dev->cci_clk,
+		cci_dev->num_clk, false);
+
+	rc = msm_camera_enable_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
+		cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
+		&cci_dev->cci_reg_ptr[0], 0);
+	if (rc < 0)
+		pr_err("%s:%d cci disable_vreg failed\n", __func__, __LINE__);
+
+	rc = msm_camera_config_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
+		cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
+		&cci_dev->cci_reg_ptr[0], 0);
+	if (rc < 0)
+		pr_err("%s:%d cci unconfig_vreg failed\n", __func__, __LINE__);
+
+	if (cci_dev->cci_pinctrl_status) {
+		rc = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
+				cci_dev->cci_pinctrl.gpio_state_suspend);
+		if (rc)
+			pr_err("%s:%d cannot set pin to active state\n",
+				__func__, __LINE__);
+	}
+	cci_dev->cci_pinctrl_status = 0;
+	msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
+		cci_dev->cci_gpio_tbl_size, 0);
+	for (i = 0; i < MASTER_MAX; i++)
+		cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
+	cci_dev->cci_state = CCI_STATE_DISABLED;
+	cci_dev->cycles_per_us = 0;
+	cci_dev->cci_clk_src = 0;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h
new file mode 100644
index 0000000..ca4bbe0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CCI_SOC_H_
+#define _CAM_CCI_SOC_H_
+
+#include "cam_cci_core.h"
+
+/**
+ * @sd: V4L2 sub device
+ * @c_ctrl: CCI control structure
+ *
+ * This API initializes the CCI and acquires SOC resources
+ */
+int cam_cci_init(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl);
+
+/**
+ * @cci_dev: CCI device structure
+ *
+ * This API releases the CCI and its SOC resources
+ */
+int cam_cci_soc_release(struct cci_device *cci_dev);
+
+/**
+ * @pdev: Platform device
+ * @new_cci_dev: CCI device structure
+ *
+ * This API parses CCI device tree
+ */
+int cam_cci_parse_dt_info(struct platform_device *pdev,
+	struct cci_device *new_cci_dev);
+
+/**
+ * @pdev: Platform device
+ * @cci_dev: CCI device structure
+ *
+ * This API puts all SOC resources
+ */
+void cam_cci_soc_remove(struct platform_device *pdev,
+	struct cci_device *cci_dev);
+#endif /* _CAM_CCI_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
new file mode 100644
index 0000000..0337b32
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_csiphy_soc.o cam_csiphy_dev.o cam_csiphy_core.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
new file mode 100644
index 0000000..8dc65f5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -0,0 +1,500 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "cam_csiphy_core.h"
+#include "cam_csiphy_dev.h"
+#include "cam_csiphy_soc.h"
+#include <cam_mem_mgr.h>
+
+void cam_csiphy_query_cap(struct csiphy_device *csiphy_dev,
+	struct cam_csiphy_query_cap *csiphy_cap)
+{
+	csiphy_cap->slot_info = csiphy_dev->v4l2_dev_str.pdev->id;
+	csiphy_cap->version = csiphy_dev->hw_version;
+	csiphy_cap->clk_lane = csiphy_dev->clk_lane;
+}
+
+void cam_csiphy_reset(struct csiphy_device *csiphy_dev)
+{
+	int32_t  i;
+	uint32_t size =
+		csiphy_dev->ctrl_reg->csiphy_reg.csiphy_reset_array_size;
+
+	for (i = 0; i < size; i++) {
+		cam_io_w(
+			csiphy_dev->ctrl_reg->
+			csiphy_reset_reg[i].reg_data,
+			csiphy_dev->base +
+			csiphy_dev->ctrl_reg->
+			csiphy_reset_reg[i].reg_addr);
+
+		usleep_range(csiphy_dev->ctrl_reg->
+			csiphy_reset_reg[i].delay * 100,
+			csiphy_dev->ctrl_reg->
+			csiphy_reset_reg[i].delay * 100 + 1000);
+	}
+}
+
+int32_t cam_cmd_buf_parser(struct csiphy_device *csiphy_dev,
+	struct cam_config_dev_cmd *cfg_dev)
+{
+	int32_t                 rc = 0;
+	uint64_t                generic_ptr;
+	struct cam_packet       *csl_packet = NULL;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	uint32_t                *cmd_buf = NULL;
+	struct cam_csiphy_info  *cam_cmd_csiphy_info = NULL;
+	size_t                  len;
+
+	if (!cfg_dev || !csiphy_dev) {
+		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	csiphy_dev->csiphy_info =
+		kzalloc(sizeof(struct cam_csiphy_info), GFP_KERNEL);
+	if (!csiphy_dev->csiphy_info)
+		return -ENOMEM;
+
+	rc = cam_mem_get_cpu_buf((int32_t) cfg_dev->packet_handle,
+		(uint64_t *)&generic_ptr, &len);
+	if (rc < 0) {
+		pr_err("%s:%d :ERROR: Failed to get packet Mem address: %d\n",
+			__func__, __LINE__, rc);
+		kfree(csiphy_dev->csiphy_info);
+		csiphy_dev->csiphy_info = NULL;
+		return rc;
+	}
+
+	if (cfg_dev->offset > len) {
+		pr_err("%s: %d offset is out of bounds: offset: %lld len: %zu\n",
+			__func__, __LINE__, cfg_dev->offset, len);
+		kfree(csiphy_dev->csiphy_info);
+		csiphy_dev->csiphy_info = NULL;
+		return -EINVAL;
+	}
+
+	csl_packet = (struct cam_packet *)(generic_ptr + cfg_dev->offset);
+
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *)&csl_packet->payload +
+		csl_packet->cmd_buf_offset / 4);
+
+	rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+		(uint64_t *)&generic_ptr, &len);
+	if (rc < 0) {
+		pr_err("%s:%d :ERROR: Failed to get cmd buf Mem address : %d\n",
+			__func__, __LINE__, rc);
+		kfree(csiphy_dev->csiphy_info);
+		csiphy_dev->csiphy_info = NULL;
+		return rc;
+	}
+
+	cmd_buf = (uint32_t *)generic_ptr;
+	cmd_buf += cmd_desc->offset / 4;
+	cam_cmd_csiphy_info = (struct cam_csiphy_info *)cmd_buf;
+
+	csiphy_dev->csiphy_info->lane_cnt = cam_cmd_csiphy_info->lane_cnt;
+	csiphy_dev->csiphy_info->lane_mask = cam_cmd_csiphy_info->lane_mask;
+	csiphy_dev->csiphy_info->csiphy_3phase =
+		cam_cmd_csiphy_info->csiphy_3phase;
+	csiphy_dev->csiphy_info->combo_mode = cam_cmd_csiphy_info->combo_mode;
+	csiphy_dev->csiphy_info->settle_time = cam_cmd_csiphy_info->settle_time;
+	csiphy_dev->csiphy_info->data_rate = cam_cmd_csiphy_info->data_rate;
+
+	return rc;
+}
+
+void cam_csiphy_cphy_irq_config(struct csiphy_device *csiphy_dev)
+{
+	int32_t i;
+
+	for (i = 0; i < csiphy_dev->num_irq_registers; i++)
+		cam_io_w(csiphy_dev->ctrl_reg->
+			csiphy_irq_reg[i].reg_data,
+			csiphy_dev->base +
+			csiphy_dev->ctrl_reg->
+			csiphy_irq_reg[i].reg_addr);
+}
+
+void cam_csiphy_cphy_irq_disable(struct csiphy_device *csiphy_dev)
+{
+	int32_t i;
+
+	for (i = 0; i < csiphy_dev->num_irq_registers; i++)
+		cam_io_w(0x0,
+			csiphy_dev->base +
+			csiphy_dev->ctrl_reg->
+			csiphy_irq_reg[i].reg_addr);
+}
+
+irqreturn_t cam_csiphy_irq(int irq_num, void *data)
+{
+	uint32_t irq;
+	uint8_t i;
+	struct csiphy_device *csiphy_dev =
+		(struct csiphy_device *)data;
+
+	if (!csiphy_dev) {
+		pr_err("%s:%d Invalid Args\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < csiphy_dev->num_irq_registers; i++) {
+		irq = cam_io_r(
+			csiphy_dev->base +
+			csiphy_dev->ctrl_reg->csiphy_reg.
+			mipi_csiphy_interrupt_status0_addr + 0x4*i);
+		cam_io_w(irq,
+			csiphy_dev->base +
+			csiphy_dev->ctrl_reg->csiphy_reg.
+			mipi_csiphy_interrupt_clear0_addr + 0x4*i);
+		pr_err_ratelimited(
+			"%s CSIPHY%d_IRQ_STATUS_ADDR%d = 0x%x\n",
+			__func__, csiphy_dev->v4l2_dev_str.pdev->id, i, irq);
+		cam_io_w(0x0,
+			csiphy_dev->base +
+			csiphy_dev->ctrl_reg->csiphy_reg.
+			mipi_csiphy_interrupt_clear0_addr + 0x4*i);
+	}
+	cam_io_w(0x1, csiphy_dev->base +
+		csiphy_dev->ctrl_reg->
+		csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
+	cam_io_w(0x0, csiphy_dev->base +
+		csiphy_dev->ctrl_reg->
+		csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
+
+	return IRQ_HANDLED;
+}
+
+int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev)
+{
+	int32_t      rc = 0;
+	uint32_t     lane_enable = 0, mask = 1, size = 0;
+	uint16_t     lane_mask = 0, i = 0, cfg_size = 0;
+	uint8_t      settle_cnt, lane_cnt, lane_pos = 0;
+	void __iomem *csiphybase;
+	struct csiphy_reg_t (*reg_array)[MAX_SETTINGS_PER_LANE];
+
+	if (csiphy_dev->csiphy_info == NULL) {
+		pr_err("%s:%d csiphy_info is NULL, No/Fail CONFIG_DEV ?\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	lane_cnt = csiphy_dev->csiphy_info->lane_cnt;
+	lane_mask = csiphy_dev->csiphy_info->lane_mask & 0x1f;
+	settle_cnt = (csiphy_dev->csiphy_info->settle_time / 200000000);
+	csiphybase = csiphy_dev->base;
+
+	if (!csiphybase) {
+		pr_err("%s: csiphybase NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < MAX_DPHY_DATA_LN; i++) {
+		if (mask == 0x2) {
+			if (lane_mask & mask)
+				lane_enable |= 0x80;
+			i--;
+		} else if (lane_mask & mask) {
+			lane_enable |= 0x1 << (i<<1);
+		}
+		mask <<= 1;
+	}
+
+	if (!csiphy_dev->csiphy_info->csiphy_3phase) {
+		if (csiphy_dev->csiphy_info->combo_mode == 1)
+			reg_array =
+				csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg;
+		else
+			reg_array =
+				csiphy_dev->ctrl_reg->csiphy_2ph_reg;
+		csiphy_dev->num_irq_registers = 11;
+		cfg_size = csiphy_dev->ctrl_reg->csiphy_reg.
+			csiphy_2ph_config_array_size;
+	} else {
+		if (csiphy_dev->csiphy_info->combo_mode == 1)
+			reg_array =
+				csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg;
+		else
+			reg_array =
+				csiphy_dev->ctrl_reg->csiphy_3ph_reg;
+		csiphy_dev->num_irq_registers = 20;
+		cfg_size = csiphy_dev->ctrl_reg->csiphy_reg.
+			csiphy_3ph_config_array_size;
+	}
+
+	size = csiphy_dev->ctrl_reg->csiphy_reg.csiphy_common_array_size;
+
+	for (i = 0; i < size; i++) {
+		switch (csiphy_dev->ctrl_reg->
+			csiphy_common_reg[i].csiphy_param_type) {
+			case CSIPHY_LANE_ENABLE:
+				cam_io_w(lane_enable,
+					csiphy_dev->base +
+					csiphy_dev->ctrl_reg->
+					csiphy_common_reg[i].reg_addr);
+			break;
+			case CSIPHY_DEFAULT_PARAMS:
+				cam_io_w(csiphy_dev->ctrl_reg->
+					csiphy_common_reg[i].reg_data,
+					csiphy_dev->base +
+					csiphy_dev->ctrl_reg->
+					csiphy_common_reg[i].reg_addr);
+			break;
+			default:
+			break;
+		}
+	}
+
+	while (lane_mask & 0x1f) {
+		if (!(lane_mask & 0x1)) {
+			lane_pos++;
+			lane_mask >>= 1;
+			continue;
+		}
+
+		for (i = 0; i < cfg_size; i++) {
+			switch (reg_array[lane_pos][i].csiphy_param_type) {
+			case CSIPHY_LANE_ENABLE:
+				cam_io_w(lane_enable,
+					csiphy_dev->base +
+					reg_array[lane_pos][i].reg_addr);
+			break;
+			case CSIPHY_DEFAULT_PARAMS:
+				cam_io_w(reg_array[lane_pos][i].reg_data,
+					csiphy_dev->base +
+					reg_array[lane_pos][i].reg_addr);
+			break;
+			case CSIPHY_SETTLE_CNT_LOWER_BYTE:
+				cam_io_w(settle_cnt & 0xFF,
+					csiphy_dev->base +
+					reg_array[lane_pos][i].reg_addr);
+			break;
+			case CSIPHY_SETTLE_CNT_HIGHER_BYTE:
+				cam_io_w((settle_cnt >> 8) & 0xFF,
+					csiphy_dev->base +
+					reg_array[lane_pos][i].reg_addr);
+			break;
+			default:
+				CDBG("%s: %d Do Nothing\n", __func__, __LINE__);
+			break;
+			}
+			usleep_range(reg_array[lane_pos][i].delay*1000,
+				reg_array[lane_pos][i].delay*1000 + 1000);
+		}
+		lane_mask >>= 1;
+		lane_pos++;
+	}
+
+	cam_csiphy_cphy_irq_config(csiphy_dev);
+
+	return rc;
+}
+
+int32_t cam_csiphy_core_cfg(void *phy_dev,
+			void *arg)
+{
+	struct csiphy_device *csiphy_dev =
+		(struct csiphy_device *)phy_dev;
+	struct cam_control   *cmd = (struct cam_control *)arg;
+	int32_t              rc = 0;
+
+	if (!csiphy_dev || !cmd) {
+		pr_err("%s:%d Invalid input args\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s:%d Opcode received: %d\n", __func__, __LINE__,
+		cmd->op_code);
+	mutex_lock(&csiphy_dev->mutex);
+	switch (cmd->op_code) {
+	case CAM_ACQUIRE_DEV: {
+		struct cam_sensor_acquire_dev csiphy_acq_dev;
+		struct cam_csiphy_acquire_dev_info csiphy_acq_params;
+
+		struct cam_create_dev_hdl bridge_params;
+
+		rc = copy_from_user(&csiphy_acq_dev,
+			(void __user *)cmd->handle,
+			sizeof(csiphy_acq_dev));
+		if (rc < 0) {
+			pr_err("%s:%d :ERROR: Failed copying from User\n",
+				__func__, __LINE__);
+			goto release_mutex;
+		}
+
+		csiphy_acq_params.combo_mode = 0;
+
+		if (csiphy_dev->acquire_count == 2) {
+			pr_err("%s:%d CSIPHY device do not allow more than 2 acquires\n",
+				__func__, __LINE__);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		bridge_params.ops = NULL;
+		bridge_params.session_hdl = csiphy_acq_dev.session_handle;
+		bridge_params.v4l2_sub_dev_flag = 0;
+		bridge_params.media_entity_flag = 0;
+		bridge_params.priv = csiphy_dev;
+
+		csiphy_acq_dev.device_handle =
+			cam_create_device_hdl(&bridge_params);
+		csiphy_dev->bridge_intf.
+			device_hdl[csiphy_acq_params.combo_mode] =
+				csiphy_acq_dev.device_handle;
+		csiphy_dev->bridge_intf.
+			session_hdl[csiphy_acq_params.combo_mode] =
+			csiphy_acq_dev.session_handle;
+
+		if (copy_to_user((void __user *)cmd->handle,
+				&csiphy_acq_dev,
+				sizeof(struct cam_sensor_acquire_dev))) {
+			pr_err("%s:%d :ERROR: Failed copying from User\n",
+				__func__, __LINE__);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		if (csiphy_acq_params.combo_mode == 1)
+			csiphy_dev->is_acquired_dev_combo_mode = 1;
+		csiphy_dev->acquire_count++;
+	}
+		break;
+	case CAM_QUERY_CAP: {
+		struct cam_csiphy_query_cap csiphy_cap;
+
+		cam_csiphy_query_cap(csiphy_dev, &csiphy_cap);
+		if (copy_to_user((void __user *)cmd->handle,
+			&csiphy_cap, sizeof(struct cam_csiphy_query_cap))) {
+			pr_err("%s:%d :ERROR: Failed copying from User\n",
+				__func__, __LINE__);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+	}
+		break;
+	case CAM_STOP_DEV: {
+		rc = cam_csiphy_soc_release(csiphy_dev);
+		if (rc < 0) {
+			pr_err("%s:%d Failed in csiphy release\n",
+				__func__, __LINE__);
+			cam_cpas_stop(csiphy_dev->cpas_handle);
+			goto release_mutex;
+		}
+		rc = cam_cpas_stop(csiphy_dev->cpas_handle);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: de-voting CPAS: %d\n",
+				__func__, __LINE__, rc);
+			goto release_mutex;
+		}
+	}
+		break;
+	case CAM_RELEASE_DEV: {
+		struct cam_release_dev_cmd release;
+
+		if (!csiphy_dev->acquire_count) {
+			pr_err("%s:%d No valid devices to release\n",
+				__func__, __LINE__);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		if (copy_from_user(&release, (void __user *) cmd->handle,
+			sizeof(release))) {
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+
+		rc = cam_destroy_device_hdl(release.dev_handle);
+		if (rc < 0)
+			pr_err("%s:%d :ERROR: destroying the device hdl\n",
+				__func__, __LINE__);
+		if (release.dev_handle ==
+			csiphy_dev->bridge_intf.device_hdl[0]) {
+			csiphy_dev->bridge_intf.device_hdl[0] = -1;
+			csiphy_dev->bridge_intf.link_hdl[0] = -1;
+			csiphy_dev->bridge_intf.session_hdl[0] = -1;
+		} else {
+			csiphy_dev->bridge_intf.device_hdl[1] = -1;
+			csiphy_dev->bridge_intf.link_hdl[1] = -1;
+			csiphy_dev->bridge_intf.
+				session_hdl[1] = -1;
+		}
+		csiphy_dev->acquire_count--;
+	}
+		break;
+	case CAM_CONFIG_DEV: {
+		struct cam_config_dev_cmd config;
+
+		if (copy_from_user(&config, (void __user *)cmd->handle,
+					sizeof(config))) {
+			rc = -EFAULT;
+		} else {
+			rc = cam_cmd_buf_parser(csiphy_dev, &config);
+			if (rc < 0) {
+				pr_err("%s:%d Fail in cmd buf parser\n",
+					__func__, __LINE__);
+				goto release_mutex;
+			}
+		}
+		break;
+	}
+	case CAM_START_DEV: {
+		struct cam_ahb_vote ahb_vote;
+		struct cam_axi_vote axi_vote;
+
+		ahb_vote.type = CAM_VOTE_ABSOLUTE;
+		ahb_vote.vote.level = CAM_SVS_VOTE;
+		axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+		rc = cam_cpas_start(csiphy_dev->cpas_handle,
+			&ahb_vote, &axi_vote);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: voting CPAS: %d\n",
+				__func__, __LINE__, rc);
+			goto release_mutex;
+		}
+
+		rc = cam_csiphy_enable_hw(csiphy_dev);
+		if (rc != 0) {
+			pr_err("%s: %d cam_csiphy_enable_hw failed\n",
+				__func__, __LINE__);
+			cam_cpas_stop(csiphy_dev->cpas_handle);
+			goto release_mutex;
+		}
+		rc = cam_csiphy_config_dev(csiphy_dev);
+		if (rc < 0) {
+			pr_err("%s: %d cam_csiphy_config_dev failed\n",
+				__func__, __LINE__);
+			cam_cpas_stop(csiphy_dev->cpas_handle);
+			goto release_mutex;
+		}
+	}
+		break;
+	default:
+		pr_err("%s:%d :Error: Invalid Opcode: %d\n",
+			__func__, __LINE__, cmd->op_code);
+		rc = -EINVAL;
+		goto release_mutex;
+	}
+
+release_mutex:
+	mutex_unlock(&csiphy_dev->mutex);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.h
new file mode 100644
index 0000000..6eeeea4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CSIPHY_CORE_H_
+#define _CAM_CSIPHY_CORE_H_
+
+#include <linux/irqreturn.h>
+#include "cam_csiphy_dev.h"
+#include <cam_mem_mgr.h>
+#include <cam_req_mgr_util.h>
+#include <cam_io_util.h>
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API programs CSIPhy IRQ  registers
+ */
+void cam_csiphy_cphy_irq_config(struct csiphy_device *csiphy_dev);
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API resets CSIPhy hardware
+ */
+void cam_csiphy_reset(struct csiphy_device *csiphy_dev);
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ * @arg:    Camera control command argument
+ *
+ * This API handles the camera control argument reached to CSIPhy
+ */
+int cam_csiphy_core_cfg(void *csiphy_dev, void *arg);
+
+/**
+ * @irq_num: IRQ number
+ * @data: CSIPhy device structure
+ *
+ * This API handles CSIPhy IRQs
+ */
+irqreturn_t cam_csiphy_irq(int irq_num, void *data);
+
+#endif /* _CAM_CSIPHY_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
new file mode 100644
index 0000000..f2ece9d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
@@ -0,0 +1,239 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_csiphy_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_csiphy_soc.h"
+#include "cam_csiphy_core.h"
+#include <media/cam_sensor.h>
+
+#undef CDBG
+#ifdef CAM_CSIPHY_DEV_DEBUG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+static long cam_csiphy_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	struct csiphy_device *csiphy_dev = v4l2_get_subdevdata(sd);
+	int rc = 0;
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_csiphy_core_cfg(csiphy_dev, arg);
+		if (rc != 0) {
+			pr_err("%s: %d :ERROR: in configuring the device\n",
+				__func__, __LINE__);
+			return rc;
+		}
+		break;
+	default:
+		pr_err("%s:%d :ERROR: Wrong ioctl\n", __func__, __LINE__);
+		break;
+	}
+
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_csiphy_subdev_compat_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	int32_t rc = 0;
+	struct cam_control cmd_data;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
+	/* All the arguments converted to 64 bit here
+	 * Passed to the api in core.c
+	 */
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_csiphy_subdev_ioctl(sd, cmd, &cmd_data);
+		break;
+	default:
+		pr_err("%s:%d Invalid compat ioctl cmd: %d\n",
+			__func__, __LINE__, cmd);
+		rc = -EINVAL;
+	}
+
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+
+	return rc;
+}
+#endif
+
+static struct v4l2_subdev_core_ops csiphy_subdev_core_ops = {
+	.ioctl = cam_csiphy_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_csiphy_subdev_compat_ioctl,
+#endif
+};
+
+static const struct v4l2_subdev_ops csiphy_subdev_ops = {
+	.core = &csiphy_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops csiphy_subdev_intern_ops;
+
+static int32_t cam_csiphy_platform_probe(struct platform_device *pdev)
+{
+	struct cam_cpas_register_params cpas_parms;
+	struct csiphy_device *new_csiphy_dev;
+	int32_t              rc = 0;
+
+	new_csiphy_dev = devm_kzalloc(&pdev->dev,
+		sizeof(struct csiphy_device), GFP_KERNEL);
+	if (!new_csiphy_dev)
+		return -ENOMEM;
+
+	new_csiphy_dev->ctrl_reg = kzalloc(sizeof(struct csiphy_ctrl_t),
+		GFP_KERNEL);
+	if (!new_csiphy_dev->ctrl_reg) {
+		devm_kfree(&pdev->dev, new_csiphy_dev);
+		return -ENOMEM;
+	}
+
+	mutex_init(&new_csiphy_dev->mutex);
+	new_csiphy_dev->v4l2_dev_str.pdev = pdev;
+
+	new_csiphy_dev->ref_count = 0;
+
+	rc = cam_csiphy_parse_dt_info(pdev, new_csiphy_dev);
+	if (rc < 0) {
+		pr_err("%s:%d :ERROR: dt paring failed: %d\n",
+			__func__, __LINE__, rc);
+		goto csiphy_no_resource;
+	}
+
+	new_csiphy_dev->v4l2_dev_str.internal_ops =
+		&csiphy_subdev_intern_ops;
+	new_csiphy_dev->v4l2_dev_str.ops =
+		&csiphy_subdev_ops;
+	strlcpy(new_csiphy_dev->device_name, CAMX_CSIPHY_DEV_NAME,
+		sizeof(new_csiphy_dev->device_name));
+	new_csiphy_dev->v4l2_dev_str.name =
+		new_csiphy_dev->device_name;
+	new_csiphy_dev->v4l2_dev_str.sd_flags =
+		(V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+	new_csiphy_dev->v4l2_dev_str.ent_function =
+		CAM_CSIPHY_DEVICE_TYPE;
+	new_csiphy_dev->v4l2_dev_str.token =
+		new_csiphy_dev;
+
+	rc = cam_register_subdev(&(new_csiphy_dev->v4l2_dev_str));
+	if (rc < 0) {
+		pr_err("%s:%d :ERROR: In cam_register_subdev\n",
+			__func__, __LINE__);
+		goto csiphy_no_resource;
+	}
+
+	platform_set_drvdata(pdev, &(new_csiphy_dev->v4l2_dev_str.sd));
+	v4l2_set_subdevdata(&(new_csiphy_dev->v4l2_dev_str.sd), new_csiphy_dev);
+
+	new_csiphy_dev->bridge_intf.device_hdl[0] = -1;
+	new_csiphy_dev->bridge_intf.device_hdl[1] = -1;
+	new_csiphy_dev->bridge_intf.ops.get_dev_info =
+		NULL;
+	new_csiphy_dev->bridge_intf.ops.link_setup =
+		NULL;
+	new_csiphy_dev->bridge_intf.ops.apply_req =
+		NULL;
+
+	new_csiphy_dev->acquire_count = 0;
+	new_csiphy_dev->is_acquired_dev_combo_mode = 0;
+
+	cpas_parms.cam_cpas_client_cb = NULL;
+	cpas_parms.cell_index = pdev->id;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = new_csiphy_dev;
+
+	strlcpy(cpas_parms.identifier, "csiphy", CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		pr_err("%s:%d CPAS registration failed\n",
+			__func__, __LINE__);
+		goto csiphy_no_resource;
+	}
+	CDBG("CPAS registration successful handle=%d\n",
+		cpas_parms.client_handle);
+	new_csiphy_dev->cpas_handle = cpas_parms.client_handle;
+
+	return rc;
+csiphy_no_resource:
+	mutex_destroy(&new_csiphy_dev->mutex);
+	kfree(new_csiphy_dev->ctrl_reg);
+	devm_kfree(&pdev->dev, new_csiphy_dev);
+	return rc;
+}
+
+
+static int32_t cam_csiphy_device_remove(struct platform_device *pdev)
+{
+	struct v4l2_subdev *subdev =
+		platform_get_drvdata(pdev);
+	struct csiphy_device *csiphy_dev =
+		v4l2_get_subdevdata(subdev);
+
+	cam_cpas_unregister_client(csiphy_dev->cpas_handle);
+	cam_csiphy_soc_release(csiphy_dev);
+	kfree(csiphy_dev->ctrl_reg);
+	devm_kfree(&pdev->dev, csiphy_dev);
+
+	return 0;
+}
+
+static const struct of_device_id cam_csiphy_dt_match[] = {
+	{.compatible = "qcom,csiphy"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, cam_csiphy_dt_match);
+
+static struct platform_driver csiphy_driver = {
+	.probe = cam_csiphy_platform_probe,
+	.remove = cam_csiphy_device_remove,
+	.driver = {
+		.name = CAMX_CSIPHY_DEV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_csiphy_dt_match,
+	},
+};
+
+static int32_t __init cam_csiphy_init_module(void)
+{
+	return platform_driver_register(&csiphy_driver);
+}
+
+static void __exit cam_csiphy_exit_module(void)
+{
+	platform_driver_unregister(&csiphy_driver);
+}
+
+module_init(cam_csiphy_init_module);
+module_exit(cam_csiphy_exit_module);
+MODULE_DESCRIPTION("CAM CSIPHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
new file mode 100644
index 0000000..9049e4e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -0,0 +1,209 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CSIPHY_DEV_H_
+#define _CAM_CSIPHY_DEV_H_
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_defs.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_req_mgr_interface.h>
+#include <cam_subdev.h>
+#include <cam_sensor_soc_api.h>
+#include <cam_io_util.h>
+#include <cam_cpas_api.h>
+
+#define MAX_CSIPHY                  3
+#define MAX_DPHY_DATA_LN            4
+#define MAX_LRME_V4l2_EVENTS        30
+#define CSIPHY_NUM_CLK_MAX          16
+#define MAX_CSIPHY_REG_ARRAY        70
+#define MAX_CSIPHY_CMN_REG_ARRAY    5
+
+#define MAX_LANES             5
+#define MAX_SETTINGS_PER_LANE 20
+
+#define MAX_REGULATOR         5
+#define CAMX_CSIPHY_DEV_NAME "cam-csiphy-driver"
+
+#define CSIPHY_POWER_UP       0
+#define CSIPHY_POWER_DOWN     1
+
+#define CSIPHY_DEFAULT_PARAMS            0
+#define CSIPHY_LANE_ENABLE               1
+#define CSIPHY_SETTLE_CNT_LOWER_BYTE     2
+#define CSIPHY_SETTLE_CNT_HIGHER_BYTE    3
+#define CSIPHY_DNP_PARAMS                4
+
+#define ENABLE_IRQ false
+
+#undef CDBG
+#ifdef CAM_CSIPHY_CORE_DEBUG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+/**
+ * struct csiphy_reg_parms_t
+ * @mipi_csiphy_glbl_irq_cmd_addr: CSIPhy irq addr
+ * @mipi_csiphy_interrupt_status0_addr:
+ *     CSIPhy interrupt status addr
+ * @mipi_csiphy_interrupt_mask0_addr:
+ *     CSIPhy interrupt mask addr
+ * @mipi_csiphy_interrupt_mask_val:
+ *      CSIPhy interrupt mask val
+ * @mipi_csiphy_interrupt_clear0_addr:
+ *     CSIPhy interrupt clear addr
+ * @csiphy_version: CSIPhy Version
+ * @csiphy_common_array_size: CSIPhy common array size
+ * @csiphy_reset_array_size: CSIPhy reset array size
+ */
+struct csiphy_reg_parms_t {
+/*MIPI CSI PHY registers*/
+	uint32_t mipi_csiphy_glbl_irq_cmd_addr;
+	uint32_t mipi_csiphy_interrupt_status0_addr;
+	uint32_t mipi_csiphy_interrupt_mask0_addr;
+	uint32_t mipi_csiphy_interrupt_mask_val;
+	uint32_t mipi_csiphy_interrupt_mask_addr;
+	uint32_t mipi_csiphy_interrupt_clear0_addr;
+	uint32_t csiphy_version;
+	uint32_t csiphy_common_array_size;
+	uint32_t csiphy_reset_array_size;
+	uint32_t csiphy_2ph_config_array_size;
+	uint32_t csiphy_3ph_config_array_size;
+};
+
+/**
+ * struct intf_params
+ * @device_hdl: Device Handle
+ * @session_hdl: Session Handle
+ * @ops: KMD operations
+ * @crm_cb: Callback API pointers
+ */
+struct intf_params {
+	int32_t device_hdl[2];
+	int32_t session_hdl[2];
+	int32_t link_hdl[2];
+	struct cam_req_mgr_kmd_ops ops;
+	struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct csiphy_reg_t
+ * @reg_addr: Register address
+ * @reg_data: Register data
+ * @delay: Delay
+ * @csiphy_param_type: CSIPhy parameter type
+ */
+struct csiphy_reg_t {
+	int32_t  reg_addr;
+	int32_t  reg_data;
+	int32_t  delay;
+	uint32_t csiphy_param_type;
+};
+
+/**
+ * struct csiphy_ctrl_t
+ * @csiphy_reg: Register address
+ * @csiphy_common_reg: Common register set
+ * @csiphy_reset_reg: Reset register set
+ * @csiphy_2ph_reg: 2phase register set
+ * @csiphy_2ph_combo_mode_reg:
+ *     2phase combo register set
+ * @csiphy_3ph_reg: 3phase register set
+ * @csiphy_2ph_3ph_mode_reg:
+ *     2 phase 3phase combo register set
+ */
+struct csiphy_ctrl_t {
+	struct csiphy_reg_parms_t csiphy_reg;
+	struct csiphy_reg_t *csiphy_common_reg;
+	struct csiphy_reg_t *csiphy_irq_reg;
+	struct csiphy_reg_t *csiphy_reset_reg;
+	struct csiphy_reg_t (*csiphy_2ph_reg)[MAX_SETTINGS_PER_LANE];
+	struct csiphy_reg_t (*csiphy_2ph_combo_mode_reg)[MAX_SETTINGS_PER_LANE];
+	struct csiphy_reg_t (*csiphy_3ph_reg)[MAX_SETTINGS_PER_LANE];
+	struct csiphy_reg_t (*csiphy_2ph_3ph_mode_reg)[MAX_SETTINGS_PER_LANE];
+};
+
+/**
+ * struct csiphy_device
+ * @pdev: Platform device
+ * @irq: Interrupt structure
+ * @base: Base address
+ * @hw_version: Hardware Version
+ * @csiphy_state: CSIPhy state
+ * @ctrl_reg: CSIPhy control registers
+ * @num_clk: Number of clocks
+ * @csiphy_max_clk: Max timer clock rate
+ * @num_vreg: Number of regulators
+ * @csiphy_clk: Clock structure
+ * @csiphy_clk_info: Clock information structure
+ * @csiphy_vreg: Regulator structure
+ * @csiphy_reg_ptr: Regulator structure
+ * @csiphy_3p_clk_info: 3Phase clock information
+ * @csiphy_3p_clk: 3Phase clocks structure
+ * @csiphy_clk_index: Timer Src clk index
+ * @csi_3phase: Is it a 3Phase mode
+ * @ref_count: Reference count
+ * @clk_lane: Clock lane
+ * @acquire_count: Acquire device count
+ * @is_acquired_dev_combo_mode:
+ *    Flag that mentions whether already acquired
+ *   device is for combo mode
+ */
+struct csiphy_device {
+	struct resource *irq;
+	void __iomem *base;
+	struct mutex mutex;
+	uint32_t hw_version;
+	uint32_t csiphy_state;
+	struct csiphy_ctrl_t *ctrl_reg;
+	size_t num_clk;
+	uint32_t csiphy_max_clk;
+	int32_t num_vreg;
+	struct clk **csiphy_clk;
+	struct msm_cam_clk_info *csiphy_clk_info;
+	struct camera_vreg_t *csiphy_vreg;
+	struct regulator *csiphy_reg_ptr[MAX_REGULATOR];
+	struct msm_cam_clk_info csiphy_3p_clk_info[2];
+	struct clk *csiphy_3p_clk[2];
+	uint32_t csiphy_clk_index;
+	unsigned char csi_3phase;
+	int32_t ref_count;
+	uint16_t lane_mask[MAX_CSIPHY];
+	uint8_t is_csiphy_3phase_hw;
+	uint8_t num_irq_registers;
+	struct cam_subdev v4l2_dev_str;
+	struct cam_csiphy_info *csiphy_info;
+	struct intf_params bridge_intf;
+	uint32_t clk_lane;
+	uint32_t acquire_count;
+	char device_name[20];
+	uint32_t is_acquired_dev_combo_mode;
+	uint32_t cpas_handle;
+};
+
+#endif /* _CAM_CSIPHY_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
new file mode 100644
index 0000000..540ec76
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
@@ -0,0 +1,274 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_csiphy_soc.h"
+#include "cam_csiphy_core.h"
+#include "include/cam_csiphy_1_0_hwreg.h"
+#include "cam_sensor_util.h"
+
+int32_t cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev)
+{
+	int32_t rc = 0;
+	long clk_rate = 0;
+
+	if (csiphy_dev->ref_count++) {
+		pr_err("%s:%d csiphy refcount = %d\n", __func__,
+			__LINE__, csiphy_dev->ref_count);
+		return rc;
+	}
+
+	rc = msm_camera_config_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
+		csiphy_dev->csiphy_vreg,
+		csiphy_dev->num_vreg, NULL, 0,
+		&csiphy_dev->csiphy_reg_ptr[0], 1);
+	if (rc < 0) {
+		pr_err("%s:%d failed regulator get\n", __func__, __LINE__);
+		goto csiphy_config_regulator_fail;
+	}
+
+	rc = msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
+		csiphy_dev->csiphy_vreg,
+		csiphy_dev->num_vreg, NULL, 0,
+		&csiphy_dev->csiphy_reg_ptr[0], 1);
+	if (rc < 0) {
+		pr_err("%s:%d failed to enable regulators\n", __func__, rc);
+		goto csiphy_regulator_fail;
+	}
+
+	/*Enable clocks*/
+	rc = msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
+		csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+		csiphy_dev->num_clk, true);
+	if (rc < 0) {
+		pr_err("%s: csiphy clk enable failed\n", __func__);
+		csiphy_dev->ref_count--;
+		goto csiphy_regulator_fail;
+	}
+
+	clk_rate = msm_camera_clk_set_rate(&csiphy_dev->v4l2_dev_str.pdev->dev,
+		csiphy_dev->csiphy_clk[csiphy_dev->csiphy_clk_index],
+		clk_rate);
+	if (clk_rate < 0) {
+		pr_err("csiphy_clk_set_rate failed\n");
+		goto csiphy_clk_enable_fail;
+	}
+
+	rc = msm_camera_enable_irq(csiphy_dev->irq, ENABLE_IRQ);
+	if (rc < 0) {
+		pr_err("%s:%d :ERROR: irq enable failed\n",
+			__func__, __LINE__);
+		goto csiphy_clk_enable_fail;
+		return -EINVAL;
+	}
+
+	cam_csiphy_reset(csiphy_dev);
+
+	return rc;
+csiphy_clk_enable_fail:
+	msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
+		csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+		csiphy_dev->num_clk, false);
+csiphy_regulator_fail:
+	msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
+		csiphy_dev->csiphy_vreg,
+		csiphy_dev->num_vreg, NULL, 0,
+		&csiphy_dev->csiphy_reg_ptr[0], 0);
+csiphy_config_regulator_fail:
+	msm_camera_config_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
+		csiphy_dev->csiphy_vreg,
+		csiphy_dev->num_vreg, NULL, 0,
+		&csiphy_dev->csiphy_reg_ptr[0], 0);
+
+	return rc;
+}
+
+int32_t cam_csiphy_disable_hw(struct platform_device *pdev)
+{
+	struct csiphy_device *csiphy_dev =
+		platform_get_drvdata(pdev);
+
+	/*Disable regulators*/
+	msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
+		csiphy_dev->csiphy_vreg,
+		csiphy_dev->num_vreg, NULL, 0,
+		&csiphy_dev->csiphy_reg_ptr[0], 0);
+
+	/*Disable clocks*/
+	msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
+		csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+		csiphy_dev->num_clk, false);
+
+	/*Disable IRQ*/
+	msm_camera_enable_irq(csiphy_dev->irq, false);
+
+	return 0;
+
+}
+
+int32_t cam_csiphy_parse_dt_info(struct platform_device *pdev,
+	struct csiphy_device *csiphy_dev)
+{
+	int32_t   rc = 0, i = 0;
+	uint32_t  clk_cnt = 0;
+	char      *csi_3p_clk_name = "csi_phy_3p_clk";
+	char      *csi_3p_clk_src_name = "csiphy_3p_clk_src";
+
+	if (pdev->dev.of_node) {
+		of_property_read_u32((&pdev->dev)->of_node,
+			"cell-index", &pdev->id);
+		CDBG("%s: device id = %d\n", __func__, pdev->id);
+	}
+
+	csiphy_dev->is_csiphy_3phase_hw = 0;
+	if (of_device_is_compatible(csiphy_dev->v4l2_dev_str.pdev->dev.of_node,
+		"qcom,csiphy-v1.0")) {
+		csiphy_dev->ctrl_reg->csiphy_2ph_reg = csiphy_2ph_v1_0_reg;
+		csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg =
+			csiphy_2ph_v1_0_combo_mode_reg;
+		csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_3ph_v1_0_reg;
+		csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg =
+			csiphy_3ph_v1_0_combo_mode_reg;
+		csiphy_dev->ctrl_reg->csiphy_irq_reg = csiphy_irq_reg_1_0;
+		csiphy_dev->ctrl_reg->csiphy_common_reg = csiphy_common_reg_1_0;
+		csiphy_dev->ctrl_reg->csiphy_reset_reg = csiphy_reset_reg_1_0;
+		csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v1_0;
+		csiphy_dev->hw_version = CSIPHY_VERSION_V10;
+		csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
+		csiphy_dev->clk_lane = 0;
+	} else {
+		pr_err("%s:%d, invalid hw version : 0x%x\n", __func__, __LINE__,
+		csiphy_dev->hw_version);
+		rc =  -EINVAL;
+		return rc;
+	}
+
+	rc = msm_camera_get_clk_info(csiphy_dev->v4l2_dev_str.pdev,
+		&csiphy_dev->csiphy_clk_info,
+		&csiphy_dev->csiphy_clk,
+		&csiphy_dev->num_clk);
+	if (rc < 0) {
+		pr_err("%s:%d failed clock get\n", __func__, __LINE__);
+		return rc;
+	}
+
+	if (csiphy_dev->num_clk > CSIPHY_NUM_CLK_MAX) {
+		pr_err("%s: invalid clk count=%zu, max is %d\n", __func__,
+			csiphy_dev->num_clk, CSIPHY_NUM_CLK_MAX);
+		goto clk_mem_ovf_err;
+	}
+
+	for (i = 0; i < csiphy_dev->num_clk; i++) {
+		if (!strcmp(csiphy_dev->csiphy_clk_info[i].clk_name,
+			csi_3p_clk_src_name)) {
+			csiphy_dev->csiphy_3p_clk_info[0].clk_name =
+				csiphy_dev->csiphy_clk_info[i].clk_name;
+			csiphy_dev->csiphy_3p_clk_info[0].clk_rate =
+				csiphy_dev->csiphy_clk_info[i].clk_rate;
+			csiphy_dev->csiphy_3p_clk[0] =
+				csiphy_dev->csiphy_clk[i];
+			continue;
+		} else if (!strcmp(csiphy_dev->csiphy_clk_info[i].clk_name,
+					csi_3p_clk_name)) {
+			csiphy_dev->csiphy_3p_clk_info[1].clk_name =
+				csiphy_dev->csiphy_clk_info[i].clk_name;
+			csiphy_dev->csiphy_3p_clk_info[1].clk_rate =
+				csiphy_dev->csiphy_clk_info[i].clk_rate;
+			csiphy_dev->csiphy_3p_clk[1] =
+				csiphy_dev->csiphy_clk[i];
+			continue;
+		}
+
+		if (!strcmp(csiphy_dev->csiphy_clk_info[clk_cnt].clk_name,
+			"csiphy_timer_src_clk")) {
+			csiphy_dev->csiphy_max_clk =
+				csiphy_dev->csiphy_clk_info[clk_cnt].clk_rate;
+			csiphy_dev->csiphy_clk_index = clk_cnt;
+		}
+		CDBG("%s: clk_rate[%d] = %ld\n", __func__, clk_cnt,
+			csiphy_dev->csiphy_clk_info[clk_cnt].clk_rate);
+		clk_cnt++;
+	}
+
+	rc = cam_sensor_get_dt_vreg_data(pdev->dev.of_node,
+		&(csiphy_dev->csiphy_vreg), &(csiphy_dev->num_vreg));
+	if (rc < 0) {
+		pr_err("%s:%d Reg get failed\n", __func__, __LINE__);
+		csiphy_dev->num_vreg = 0;
+	}
+
+	csiphy_dev->base = msm_camera_get_reg_base(pdev, "csiphy", true);
+	if (!csiphy_dev->base) {
+		pr_err("%s: no mem resource?\n", __func__);
+		rc = -ENODEV;
+		goto csiphy_no_resource;
+	}
+
+	csiphy_dev->irq = msm_camera_get_irq(pdev, "csiphy");
+	if (!csiphy_dev->irq) {
+		pr_err("%s: no irq resource?\n", __func__);
+		rc = -ENODEV;
+		goto csiphy_no_resource;
+	}
+
+	rc = msm_camera_register_irq(pdev, csiphy_dev->irq,
+		cam_csiphy_irq, IRQF_TRIGGER_RISING, "csiphy", csiphy_dev);
+	if (rc < 0) {
+		pr_err("%s: irq request fail\n", __func__);
+		rc = -EBUSY;
+		goto csiphy_no_resource;
+	}
+	msm_camera_enable_irq(csiphy_dev->irq, false);
+	return rc;
+
+csiphy_no_resource:
+	msm_camera_put_reg_base(pdev, csiphy_dev->base, "csiphy", true);
+clk_mem_ovf_err:
+	msm_camera_put_clk_info(csiphy_dev->v4l2_dev_str.pdev,
+		&csiphy_dev->csiphy_clk_info,
+		&csiphy_dev->csiphy_clk,
+		csiphy_dev->num_clk);
+	return rc;
+}
+
+int32_t cam_csiphy_soc_release(struct csiphy_device *csiphy_dev)
+{
+
+	if (!csiphy_dev || !csiphy_dev->ref_count) {
+		pr_err("%s csiphy dev NULL / ref_count ZERO\n", __func__);
+		return 0;
+	}
+
+	if (--csiphy_dev->ref_count) {
+		pr_err("%s:%d csiphy refcount = %d\n", __func__,
+			__LINE__, csiphy_dev->ref_count);
+		return 0;
+	}
+
+	cam_csiphy_reset(csiphy_dev);
+
+	msm_camera_enable_irq(csiphy_dev->irq, false);
+
+	msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
+		csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+		csiphy_dev->num_clk, false);
+
+	msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
+		csiphy_dev->csiphy_vreg, csiphy_dev->num_vreg,
+		NULL, 0, &csiphy_dev->csiphy_reg_ptr[0], 0);
+
+	msm_camera_config_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
+		csiphy_dev->csiphy_vreg, csiphy_dev->num_vreg,
+		NULL, 0, &csiphy_dev->csiphy_reg_ptr[0], 0);
+
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
new file mode 100644
index 0000000..27de3fc
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CSIPHY_SOC_H_
+#define _CAM_CSIPHY_SOC_H_
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include "cam_csiphy_dev.h"
+#include "cam_csiphy_core.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define CSI_3PHASE_HW                               1
+#define CSIPHY_VERSION_V35                        0x35
+#define CSIPHY_VERSION_V10                        0x10
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API release SOC related parameters
+ */
+int cam_csiphy_soc_release(struct csiphy_device *csiphy_dev);
+
+/**
+ * @pdev: Platform device
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API parses csiphy device tree information
+ */
+int cam_csiphy_parse_dt_info(struct platform_device *pdev,
+	struct csiphy_device *csiphy_dev);
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API enables SOC related parameters
+ */
+int cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev);
+
+#endif /* _CAM_CSIPHY_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
new file mode 100644
index 0000000..7a4aede
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
@@ -0,0 +1,351 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CSIPHY_1_0_HWREG_H_
+#define _CAM_CSIPHY_1_0_HWREG_H_
+
+#include "../cam_csiphy_dev.h"
+
+struct csiphy_reg_parms_t csiphy_v1_0 = {
+	.mipi_csiphy_interrupt_status0_addr = 0x8B0,
+	.mipi_csiphy_interrupt_clear0_addr = 0x858,
+	.mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+	.csiphy_common_array_size = 3,
+	.csiphy_reset_array_size = 3,
+	.csiphy_2ph_config_array_size = 14,
+	.csiphy_3ph_config_array_size = 19,
+};
+
+struct csiphy_reg_t csiphy_common_reg_1_0[] = {
+	{0x0814, 0x00, 0x00, CSIPHY_LANE_ENABLE},
+	{0x0818, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x081C, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_reset_reg_1_0[] = {
+	{0x0814, 0x00, 0x50, CSIPHY_LANE_ENABLE},
+	{0x0818, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x081C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_irq_reg_1_0[] = {
+	{0x082c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0830, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0834, 0xFB, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0838, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x083c, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0840, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0844, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0848, 0xEF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x084c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0850, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0854, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_2ph_v1_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0060, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0708, 0x14, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0760, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0260, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0460, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0600, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0660, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+struct csiphy_reg_t
+	csiphy_2ph_v1_0_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0028, 0x0A, 0x00, CSIPHY_DNP_PARAMS},
+		{0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0060, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0708, 0x14, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0760, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0228, 0x0A, 0x00, CSIPHY_DNP_PARAMS},
+		{0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0260, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0428, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0460, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0628, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0600, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x060C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0638, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0660, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+struct csiphy_reg_t csiphy_3ph_v1_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x015C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0168, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x016C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0150, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0144, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0164, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x035C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0368, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x036C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0350, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0344, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0364, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x055C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0568, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x056C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0550, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0544, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0564, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+struct csiphy_reg_t
+	csiphy_3ph_v1_0_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x015C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0168, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x016C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0150, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0144, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0164, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x035C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0368, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x036C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0350, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0344, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0364, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x055C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0568, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x056C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0550, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0544, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0564, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+#endif /* _CAM_CSIPHY_1_0_HWREG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile
new file mode 100644
index 0000000..d8c75fb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_dev.o cam_sensor_core.o cam_sensor_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
new file mode 100644
index 0000000..c837232
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -0,0 +1,1002 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <cam_sensor_cmn_header.h>
+#include "cam_sensor_core.h"
+#include <cam_sensor_util.h>
+
+static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
+	void *arg)
+{
+	int32_t rc = 0;
+	uint64_t generic_ptr;
+	struct cam_control *ioctl_ctrl = NULL;
+	struct cam_packet *csl_packet = NULL;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct i2c_settings_array *i2c_reg_settings = NULL;
+	size_t len_of_buff = 0;
+	uint32_t *offset = NULL;
+	struct cam_config_dev_cmd config;
+	struct i2c_data_settings *i2c_data = NULL;
+	struct cam_req_mgr_add_request add_req;
+
+	ioctl_ctrl = (struct cam_control *)arg;
+
+	if (ioctl_ctrl->handle_type != CAM_HANDLE_USER_POINTER) {
+		pr_err("%s:%d :Error: Invalid Handle Type\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&config, (void __user *) ioctl_ctrl->handle,
+		sizeof(config)))
+		return -EFAULT;
+
+	rc = cam_mem_get_cpu_buf(
+		config.packet_handle,
+		(uint64_t *)&generic_ptr,
+		&len_of_buff);
+	if (rc < 0) {
+		pr_err("%s:%d :Error: Failed in getting the buffer: %d\n",
+			__func__, __LINE__, rc);
+		return rc;
+	}
+
+	csl_packet = (struct cam_packet *)(generic_ptr +
+		config.offset);
+	if (config.offset > len_of_buff) {
+		pr_err("%s: %d offset is out of bounds: off: %lld len: %zu\n",
+			__func__, __LINE__, config.offset, len_of_buff);
+		return -EINVAL;
+	}
+
+	i2c_data = &(s_ctrl->i2c_data);
+	CDBG("%s:%d Header OpCode: %d\n",
+		__func__, __LINE__, csl_packet->header.op_code);
+	if ((csl_packet->header.op_code & 0xFFFFFF) ==
+		CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG) {
+		i2c_reg_settings = &i2c_data->init_settings;
+		i2c_reg_settings->request_id = 0;
+		i2c_reg_settings->is_settings_valid = 1;
+	} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
+		CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE) {
+		i2c_reg_settings =
+			&i2c_data->
+			per_frame[csl_packet->header.request_id %
+			MAX_PER_FRAME_ARRAY];
+		CDBG("%s:%d Received Packet: %lld\n", __func__, __LINE__,
+			csl_packet->header.request_id % MAX_PER_FRAME_ARRAY);
+		if (i2c_reg_settings->is_settings_valid == 1) {
+			pr_err("%s:%d :Error: Already some pkt in offset req : %lld\n",
+				__func__, __LINE__,
+				csl_packet->header.request_id);
+			rc = delete_request(i2c_reg_settings);
+			if (rc < 0) {
+				pr_err("%s: %d :Error: Failed in Deleting the err: %d\n",
+					__func__, __LINE__, rc);
+				return rc;
+			}
+		}
+
+		i2c_reg_settings->request_id =
+			csl_packet->header.request_id;
+		i2c_reg_settings->is_settings_valid = 1;
+	} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
+		CAM_PKT_NOP_OPCODE) {
+		goto update_req_mgr;
+	} else {
+		pr_err("%s:%d Invalid Packet Header\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	offset = (uint32_t *)&csl_packet->payload;
+	offset += csl_packet->cmd_buf_offset / 4;
+	cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+	rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings, cmd_desc, 1);
+	if (rc < 0) {
+		pr_err("%s:%d :Error: Fail parsing I2C Pkt: %d\n",
+			__func__, __LINE__, rc);
+		return rc;
+	}
+
+update_req_mgr:
+	if (((csl_packet->header.op_code & 0xFFFFFF) ==
+		CAM_PKT_NOP_OPCODE) || (csl_packet->header.op_code ==
+		CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE)) {
+		add_req.link_hdl = s_ctrl->bridge_intf.link_hdl;
+		add_req.req_id = csl_packet->header.request_id;
+		CDBG("%s:%d Rxed Req Id: %lld\n",
+			__func__, __LINE__, csl_packet->header.request_id);
+		add_req.dev_hdl = s_ctrl->bridge_intf.device_hdl;
+		if (s_ctrl->bridge_intf.crm_cb &&
+			s_ctrl->bridge_intf.crm_cb->add_req)
+			s_ctrl->bridge_intf.crm_cb->add_req(&add_req);
+		CDBG("%s:%d add req to req mgr: %lld\n",
+			__func__, __LINE__, add_req.req_id);
+	}
+	return rc;
+}
+
+int32_t cam_sensor_update_i2c_info(struct cam_cmd_i2c_info *i2c_info,
+	struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int32_t rc = 0;
+	struct cam_sensor_cci_client   *cci_client = NULL;
+
+	if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
+		cci_client = s_ctrl->io_master_info.cci_client;
+		if (!cci_client) {
+			pr_err("failed: cci_client %pK", cci_client);
+			return -EINVAL;
+		}
+		cci_client->cci_i2c_master = s_ctrl->cci_i2c_master;
+		cci_client->sid = i2c_info->slave_addr >> 1;
+		cci_client->retries = 3;
+		cci_client->id_map = 0;
+		cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
+		CDBG("%s:%d Master: %d sid: %d freq_mode: %d\n",
+			__func__, __LINE__,
+			cci_client->cci_i2c_master, i2c_info->slave_addr,
+			i2c_info->i2c_freq_mode);
+	}
+
+	return rc;
+}
+
+int32_t cam_sensor_update_slave_info(struct cam_cmd_probe *probe_info,
+	struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int32_t rc = 0;
+
+	s_ctrl->sensordata->slave_info.sensor_id_reg_addr =
+		probe_info->reg_addr;
+	s_ctrl->sensordata->slave_info.sensor_id =
+		probe_info->expected_data;
+	s_ctrl->sensordata->slave_info.sensor_id_mask =
+		probe_info->data_mask;
+
+	s_ctrl->sensor_probe_addr_type =  probe_info->addr_type;
+	s_ctrl->sensor_probe_data_type =  probe_info->data_type;
+	CDBG("%s:%d Sensor Addr: 0x%x sensor_id: 0x%x sensor_mask: 0x%x\n",
+		__func__, __LINE__,
+		s_ctrl->sensordata->slave_info.sensor_id_reg_addr,
+		s_ctrl->sensordata->slave_info.sensor_id,
+		s_ctrl->sensordata->slave_info.sensor_id_mask);
+	return rc;
+}
+
+int32_t cam_sensor_update_power_settings(void *cmd_buf,
+	int cmd_length, struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int32_t rc = 0, tot_size = 0, last_cmd_type = 0;
+	int32_t i = 0, pwr_up = 0, pwr_down = 0;
+	void *ptr = cmd_buf, *scr;
+	struct cam_cmd_power *pwr_cmd = (struct cam_cmd_power *)cmd_buf;
+	struct common_header *cmm_hdr = (struct common_header *)cmd_buf;
+	struct cam_sensor_power_ctrl_t *power_info =
+		&s_ctrl->sensordata->power_info;
+
+	if (!pwr_cmd || !cmd_length) {
+		pr_err("%s:%d Invalid Args: pwr_cmd %pK, cmd_length: %d\n",
+			__func__, __LINE__, pwr_cmd, cmd_length);
+		return -EINVAL;
+	}
+
+	power_info->power_setting_size = 0;
+	power_info->power_setting =
+		(struct cam_sensor_power_setting *)
+		kzalloc(sizeof(struct cam_sensor_power_setting) *
+			MAX_POWER_CONFIG, GFP_KERNEL);
+	if (!power_info->power_setting)
+		return -ENOMEM;
+
+	power_info->power_down_setting =
+		(struct cam_sensor_power_setting *)
+		kzalloc(sizeof(struct cam_sensor_power_setting) *
+			MAX_POWER_CONFIG, GFP_KERNEL);
+	if (!power_info->power_down_setting) {
+		rc = -ENOMEM;
+		goto free_power_settings;
+	}
+
+	while (tot_size < cmd_length) {
+		if (cmm_hdr->cmd_type ==
+			CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
+			struct cam_cmd_power *pwr_cmd =
+				(struct cam_cmd_power *)ptr;
+
+			power_info->
+				power_setting_size +=
+				pwr_cmd->count;
+			scr = ptr + sizeof(struct cam_cmd_power);
+			tot_size = tot_size + sizeof(struct cam_cmd_power);
+
+			if (pwr_cmd->count == 0)
+				CDBG("%s:%d Un expected Command\n",
+					__func__, __LINE__);
+
+			for (i = 0; i < pwr_cmd->count; i++, pwr_up++) {
+				power_info->
+					power_setting[pwr_up].seq_type =
+					pwr_cmd->power_settings[i].
+						power_seq_type;
+				power_info->
+					power_setting[pwr_up].config_val =
+					pwr_cmd->power_settings[i].
+						config_val_low;
+				power_info->power_setting[pwr_up].delay = 0;
+				if (i) {
+					scr = scr +
+						sizeof(
+						struct cam_power_settings);
+					tot_size = tot_size +
+						sizeof(
+						struct cam_power_settings);
+				}
+				if (tot_size > cmd_length) {
+					pr_err("%s:%d :Error: Command Buffer is wrong\n",
+						__func__, __LINE__);
+					rc = -EINVAL;
+					goto free_power_down_settings;
+				}
+				CDBG("Seq Type[%d]: %d Config_val: %ldn",
+					pwr_up,
+					power_info->
+						power_setting[pwr_up].seq_type,
+					power_info->
+						power_setting[pwr_up].
+						config_val);
+			}
+			last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_UP;
+			ptr = (void *) scr;
+			cmm_hdr = (struct common_header *)ptr;
+		} else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_WAIT) {
+			struct cam_cmd_unconditional_wait *wait_cmd =
+				(struct cam_cmd_unconditional_wait *)ptr;
+			if (wait_cmd->op_code ==
+				CAMERA_SENSOR_WAIT_OP_SW_UCND) {
+				if (last_cmd_type ==
+					CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
+					if (pwr_up > 0)
+						power_info->
+							power_setting
+							[pwr_up - 1].delay +=
+							wait_cmd->delay;
+					else
+						pr_err("%s:%d Delay is expected only after valid power up setting\n",
+							__func__, __LINE__);
+				} else if (last_cmd_type ==
+					CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
+					if (pwr_down > 0)
+						power_info->
+							power_down_setting
+							[pwr_down - 1].delay +=
+							wait_cmd->delay;
+					else
+						pr_err("%s:%d Delay is expected only after valid power down setting\n",
+							__func__, __LINE__);
+				}
+			} else
+				CDBG("%s:%d Invalid op code: %d\n",
+					__func__, __LINE__, wait_cmd->op_code);
+			tot_size = tot_size +
+				sizeof(struct cam_cmd_unconditional_wait);
+			if (tot_size > cmd_length) {
+				pr_err("Command Buffer is wrong\n");
+				return -EINVAL;
+			}
+			scr = (void *) (wait_cmd);
+			ptr = (void *)
+				(scr +
+				sizeof(struct cam_cmd_unconditional_wait));
+			CDBG("%s:%d ptr: %pK sizeof: %d Next: %pK\n",
+				__func__, __LINE__, scr,
+				(int32_t)sizeof(
+				struct cam_cmd_unconditional_wait), ptr);
+
+			cmm_hdr = (struct common_header *)ptr;
+		} else if (cmm_hdr->cmd_type ==
+			CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
+			struct cam_cmd_power *pwr_cmd =
+				(struct cam_cmd_power *)ptr;
+
+			scr = ptr + sizeof(struct cam_cmd_power);
+			tot_size = tot_size + sizeof(struct cam_cmd_power);
+			power_info->power_down_setting_size += pwr_cmd->count;
+
+			if (pwr_cmd->count == 0)
+				pr_err("%s:%d Invalid Command\n",
+					__func__, __LINE__);
+
+			for (i = 0; i < pwr_cmd->count; i++, pwr_down++) {
+				power_info->
+					power_down_setting[pwr_down].
+					seq_type =
+					pwr_cmd->power_settings[i].
+					power_seq_type;
+				power_info->
+					power_down_setting[pwr_down].
+					config_val =
+					pwr_cmd->power_settings[i].
+					config_val_low;
+				power_info->
+					power_down_setting[pwr_down].delay = 0;
+				if (i) {
+					scr = scr +
+						sizeof(
+						struct cam_power_settings);
+					tot_size =
+						tot_size +
+						sizeof(
+						struct cam_power_settings);
+				}
+				if (tot_size > cmd_length) {
+					pr_err("Command Buffer is wrong\n");
+					rc = -EINVAL;
+					goto free_power_down_settings;
+				}
+				CDBG("%s:%d Seq Type[%d]: %d Config_val: %ldn",
+					__func__, __LINE__,
+					pwr_down,
+					power_info->
+						power_down_setting[pwr_down].
+						seq_type,
+					power_info->
+						power_down_setting[pwr_down].
+						config_val);
+			}
+			last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_DOWN;
+			ptr = (void *) scr;
+			cmm_hdr = (struct common_header *)ptr;
+		} else {
+			pr_err("%s:%d: :Error: Un expected Header Type: %d\n",
+				__func__, __LINE__, cmm_hdr->cmd_type);
+		}
+	}
+
+	return rc;
+free_power_down_settings:
+	kfree(power_info->power_down_setting);
+free_power_settings:
+	kfree(power_info->power_setting);
+	return rc;
+}
+
+int32_t cam_handle_cmd_buffers_for_probe(void *cmd_buf,
+	struct cam_sensor_ctrl_t *s_ctrl,
+	int32_t cmd_buf_num, int cmd_buf_length)
+{
+	int32_t rc = 0;
+
+	switch (cmd_buf_num) {
+	case 0: {
+		struct cam_cmd_i2c_info *i2c_info = NULL;
+		struct cam_cmd_probe *probe_info;
+
+		i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+		rc = cam_sensor_update_i2c_info(i2c_info, s_ctrl);
+		if (rc < 0) {
+			pr_err("%s:%d Failed in Updating the i2c Info\n",
+				__func__, __LINE__);
+			return rc;
+		}
+		probe_info = (struct cam_cmd_probe *)
+			(cmd_buf + sizeof(struct cam_cmd_i2c_info));
+		rc = cam_sensor_update_slave_info(probe_info, s_ctrl);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: Updating the slave Info\n",
+				__func__, __LINE__);
+			return rc;
+		}
+		cmd_buf = probe_info;
+	}
+		break;
+	case 1: {
+		rc = cam_sensor_update_power_settings(cmd_buf,
+			cmd_buf_length, s_ctrl);
+		if (rc < 0) {
+			pr_err("Failed in updating power settings\n");
+			return rc;
+		}
+	}
+		break;
+	default:
+		pr_err("%s:%d Invalid command buffer\n",
+			__func__, __LINE__);
+		break;
+	}
+	return rc;
+}
+
+int32_t cam_handle_mem_ptr(uint64_t handle, struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int rc = 0, i;
+	void *packet = NULL, *cmd_buf1 = NULL;
+	uint32_t *cmd_buf;
+	void *ptr;
+	size_t len;
+	struct cam_packet *pkt;
+	struct cam_cmd_buf_desc *cmd_desc;
+
+	rc = cam_mem_get_cpu_buf(handle,
+		(uint64_t *)&packet, &len);
+	if (rc < 0) {
+		pr_err("%s: %d Failed to get the command Buffer\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	pkt = (struct cam_packet *)packet;
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *)&pkt->payload + pkt->cmd_buf_offset/4);
+	if (cmd_desc == NULL) {
+		pr_err("%s: %d command descriptor pos is invalid\n",
+		__func__, __LINE__);
+		return -EINVAL;
+	}
+	if (pkt->num_cmd_buf != 2) {
+		pr_err("%s: %d Expected More Command Buffers : %d\n",
+			__func__, __LINE__, pkt->num_cmd_buf);
+		return -EINVAL;
+	}
+	for (i = 0; i < pkt->num_cmd_buf; i++) {
+		if (!(cmd_desc[i].length))
+			continue;
+		rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+			(uint64_t *)&cmd_buf1, &len);
+		if (rc < 0) {
+			pr_err("%s: %d Failed to parse the command Buffer Header\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		cmd_buf = (uint32_t *)cmd_buf1;
+		cmd_buf += cmd_desc[i].offset/4;
+		ptr = (void *) cmd_buf;
+
+		rc = cam_handle_cmd_buffers_for_probe(ptr, s_ctrl,
+			i, cmd_desc[i].length);
+		if (rc < 0) {
+			pr_err("%s: %d Failed to parse the command Buffer Header\n",
+			__func__, __LINE__);
+			return -EINVAL;
+		}
+	}
+	return rc;
+}
+
+void cam_sensor_query_cap(struct cam_sensor_ctrl_t *s_ctrl,
+	struct  cam_sensor_query_cap *query_cap)
+{
+	query_cap->pos_roll = s_ctrl->sensordata->pos_roll;
+	query_cap->pos_pitch = s_ctrl->sensordata->pos_pitch;
+	query_cap->pos_yaw = s_ctrl->sensordata->pos_yaw;
+	query_cap->secure_camera = 0;
+	query_cap->actuator_slot_id =
+		s_ctrl->sensordata->subdev_id[SUB_MODULE_ACTUATOR];
+	query_cap->csiphy_slot_id =
+		s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY];
+	query_cap->eeprom_slot_id =
+		s_ctrl->sensordata->subdev_id[SUB_MODULE_EEPROM];
+	query_cap->flash_slot_id =
+		s_ctrl->sensordata->subdev_id[SUB_MODULE_LED_FLASH];
+	query_cap->ois_slot_id =
+		s_ctrl->sensordata->subdev_id[SUB_MODULE_OIS];
+	query_cap->slot_info =
+		s_ctrl->id;
+}
+
+static uint16_t cam_sensor_id_by_mask(struct cam_sensor_ctrl_t *s_ctrl,
+	uint32_t chipid)
+{
+	uint16_t sensor_id = (uint16_t)(chipid & 0xFFFF);
+	int16_t sensor_id_mask = s_ctrl->sensordata->slave_info.sensor_id_mask;
+
+	if (!sensor_id_mask)
+		sensor_id_mask = ~sensor_id_mask;
+
+	sensor_id &= sensor_id_mask;
+	sensor_id_mask &= -sensor_id_mask;
+	sensor_id_mask -= 1;
+	while (sensor_id_mask) {
+		sensor_id_mask >>= 1;
+		sensor_id >>= 1;
+	}
+	return sensor_id;
+}
+
+int cam_sensor_match_id(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int rc = 0;
+	uint32_t chipid = 0;
+	struct cam_camera_slave_info *slave_info;
+
+	slave_info = &(s_ctrl->sensordata->slave_info);
+
+	if (!slave_info) {
+		pr_err("%s:%d failed: %pK\n",
+			__func__, __LINE__, slave_info);
+		return -EINVAL;
+	}
+
+	rc = camera_io_dev_read(
+		&(s_ctrl->io_master_info),
+		slave_info->sensor_id_reg_addr,
+		&chipid, CAMERA_SENSOR_I2C_TYPE_WORD);
+
+	CDBG("%s:%d read id: 0x%x expected id 0x%x:\n",
+			__func__, __LINE__, chipid, slave_info->sensor_id);
+	if (cam_sensor_id_by_mask(s_ctrl, chipid) != slave_info->sensor_id) {
+		pr_err("%s: chip id %x does not match %x\n",
+				__func__, chipid, slave_info->sensor_id);
+		return -ENODEV;
+	}
+	return rc;
+}
+
+int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
+	void *arg)
+{
+	int rc = 0;
+	struct cam_control *cmd = (struct cam_control *)arg;
+	struct cam_sensor_power_setting *pu = NULL;
+	struct cam_sensor_power_setting *pd = NULL;
+	struct cam_sensor_power_ctrl_t *power_info =
+		&s_ctrl->sensordata->power_info;
+
+	if (!s_ctrl || !arg) {
+		pr_err("%s: %d s_ctrl is NULL\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&(s_ctrl->cam_sensor_mutex));
+	switch (cmd->op_code) {
+	case CAM_SENSOR_PROBE_CMD: {
+		if (s_ctrl->is_probe_succeed == 1) {
+			pr_err("Already Sensor Probed in the slot\n");
+			break;
+		}
+		/* Allocate memory for power up setting */
+		pu = kzalloc(sizeof(struct cam_sensor_power_setting) *
+			MAX_POWER_CONFIG, GFP_KERNEL);
+		if (!pu) {
+			rc = -ENOMEM;
+			goto release_mutex;
+		}
+
+		pd = kzalloc(sizeof(struct cam_sensor_power_setting) *
+			MAX_POWER_CONFIG, GFP_KERNEL);
+		if (!pd) {
+			kfree(pu);
+			rc = -ENOMEM;
+			goto release_mutex;
+		}
+
+		power_info->power_setting = pu;
+		power_info->power_down_setting = pd;
+
+		if (cmd->handle_type ==
+			CAM_HANDLE_MEM_HANDLE) {
+			rc = cam_handle_mem_ptr(cmd->handle, s_ctrl);
+			if (rc < 0) {
+				pr_err("%s: %d Get Buffer Handle Failed\n",
+					__func__, __LINE__);
+				kfree(pu);
+				kfree(pd);
+				goto release_mutex;
+			}
+		} else {
+			pr_err("%s:%d :Error: Invalid Command Type: %d",
+				__func__, __LINE__, cmd->handle_type);
+		}
+
+		/* Parse and fill vreg params for powerup settings */
+		rc = msm_camera_fill_vreg_params(
+			s_ctrl->sensordata->power_info.cam_vreg,
+			s_ctrl->sensordata->power_info.num_vreg,
+			s_ctrl->sensordata->power_info.power_setting,
+			s_ctrl->sensordata->power_info.power_setting_size);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: Fail in filling vreg params for PUP rc %d",
+				__func__, __LINE__, rc);
+			kfree(pu);
+			kfree(pd);
+			goto release_mutex;
+		}
+
+		/* Parse and fill vreg params for powerdown settings*/
+		rc = msm_camera_fill_vreg_params(
+			s_ctrl->sensordata->power_info.cam_vreg,
+			s_ctrl->sensordata->power_info.num_vreg,
+			s_ctrl->sensordata->power_info.power_down_setting,
+			s_ctrl->sensordata->power_info.power_down_setting_size);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: Fail in filling vreg params for PDOWN rc %d",
+				__func__, __LINE__, rc);
+			kfree(pu);
+			kfree(pd);
+			goto release_mutex;
+		}
+
+		/* Power up and probe sensor */
+		rc = cam_sensor_power_up(s_ctrl);
+		if (rc < 0) {
+			pr_err("power up failed");
+			cam_sensor_power_down(s_ctrl);
+			kfree(pu);
+			kfree(pd);
+			goto release_mutex;
+		}
+
+		/* Match sensor ID */
+		rc = cam_sensor_match_id(s_ctrl);
+		if (rc < 0) {
+			cam_sensor_power_down(s_ctrl);
+			msleep(20);
+			kfree(pu);
+			kfree(pd);
+			goto release_mutex;
+		}
+
+		CDBG("%s:%d Probe Succeeded on the slot: %d\n",
+			__func__, __LINE__, s_ctrl->id);
+		rc = cam_sensor_power_down(s_ctrl);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: fail in Sensor Power Down\n",
+				__func__, __LINE__);
+			kfree(pu);
+			kfree(pd);
+			goto release_mutex;
+		}
+		/*
+		 * Set probe succeeded flag to 1 so that no other camera shall
+		 * probed on this slot
+		 */
+		s_ctrl->is_probe_succeed = 1;
+	}
+		break;
+	case CAM_ACQUIRE_DEV: {
+		struct cam_sensor_acquire_dev sensor_acq_dev;
+		struct cam_create_dev_hdl bridge_params;
+
+		if (s_ctrl->bridge_intf.device_hdl != -1) {
+			pr_err("%s:%d Device is already acquired\n",
+				__func__, __LINE__);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		rc = copy_from_user(&sensor_acq_dev,
+			(void __user *) cmd->handle, sizeof(sensor_acq_dev));
+		if (rc < 0) {
+			pr_err("Failed Copying from user\n");
+			goto release_mutex;
+		}
+
+		bridge_params.session_hdl = sensor_acq_dev.session_handle;
+		bridge_params.ops = &s_ctrl->bridge_intf.ops;
+		bridge_params.v4l2_sub_dev_flag = 0;
+		bridge_params.media_entity_flag = 0;
+		bridge_params.priv = s_ctrl;
+
+		sensor_acq_dev.device_handle =
+			cam_create_device_hdl(&bridge_params);
+		s_ctrl->bridge_intf.device_hdl = sensor_acq_dev.device_handle;
+		s_ctrl->bridge_intf.session_hdl = sensor_acq_dev.session_handle;
+
+		CDBG("%s:%d Device Handle: %d\n", __func__, __LINE__,
+			sensor_acq_dev.device_handle);
+		if (copy_to_user((void __user *) cmd->handle, &sensor_acq_dev,
+			sizeof(struct cam_sensor_acquire_dev))) {
+			pr_err("Failed Copy to User\n");
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+	}
+		break;
+	case CAM_RELEASE_DEV: {
+		if (s_ctrl->bridge_intf.device_hdl == -1) {
+			pr_err("%s:%d Invalid Handles: link hdl: %d device hdl: %d\n",
+				__func__, __LINE__,
+				s_ctrl->bridge_intf.device_hdl,
+				s_ctrl->bridge_intf.link_hdl);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
+		if (rc < 0)
+			pr_err("%s:%d Failed in destroying the device hdl\n",
+			__func__, __LINE__);
+		s_ctrl->bridge_intf.device_hdl = -1;
+		s_ctrl->bridge_intf.link_hdl = -1;
+		s_ctrl->bridge_intf.session_hdl = -1;
+	}
+		break;
+	case CAM_QUERY_CAP: {
+		struct  cam_sensor_query_cap sensor_cap;
+
+		cam_sensor_query_cap(s_ctrl, &sensor_cap);
+		if (copy_to_user((void __user *) cmd->handle, &sensor_cap,
+			sizeof(struct  cam_sensor_query_cap))) {
+			pr_err("Failed Copy to User\n");
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+		break;
+	}
+	case CAM_START_DEV: {
+		rc = cam_sensor_power_up(s_ctrl);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: Sensor Power up failed\n",
+				__func__, __LINE__);
+			goto release_mutex;
+		}
+		rc = cam_sensor_apply_settings(s_ctrl, 0);
+		if (rc < 0) {
+			pr_err("cannot apply settings\n");
+			goto release_mutex;
+		}
+		rc = delete_request(&s_ctrl->i2c_data.init_settings);
+		if (rc < 0) {
+			pr_err("%s:%d Fail in deleting the Init settings\n",
+				__func__, __LINE__);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+	}
+		break;
+	case CAM_STOP_DEV: {
+		rc = cam_sensor_power_down(s_ctrl);
+		if (rc < 0) {
+			pr_err("%s:%d Sensor Power Down failed\n",
+				__func__, __LINE__);
+			goto release_mutex;
+		}
+	}
+		break;
+	case CAM_CONFIG_DEV: {
+		rc = cam_sensor_i2c_pkt_parse(s_ctrl, arg);
+		if (rc < 0) {
+			pr_err("%s:%d :Error: Failed CCI Config: %d\n",
+				__func__, __LINE__, rc);
+			goto release_mutex;
+		}
+	}
+		break;
+	default:
+		pr_err("%s:%d :Error: Invalid Opcode: %d\n",
+			__func__, __LINE__, cmd->op_code);
+		rc = -EINVAL;
+		goto release_mutex;
+	}
+
+release_mutex:
+	mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+	return rc;
+}
+
+int cam_sensor_publish_dev_info(struct cam_req_mgr_device_info *info)
+{
+	int rc = 0;
+
+	if (!info)
+		return -EINVAL;
+
+	info->dev_id = CAM_REQ_MGR_DEVICE_SENSOR;
+	strlcpy(info->name, CAM_SENSOR_NAME, sizeof(info->name));
+	info->p_delay = 2;
+
+	return rc;
+}
+
+int cam_sensor_establish_link(struct cam_req_mgr_core_dev_link_setup *link)
+{
+	struct cam_sensor_ctrl_t *s_ctrl = NULL;
+
+	if (!link)
+		return -EINVAL;
+
+	s_ctrl = (struct cam_sensor_ctrl_t *)
+		cam_get_device_priv(link->dev_hdl);
+	if (!s_ctrl) {
+		pr_err("%s: Device data is NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (link->link_enable) {
+		s_ctrl->bridge_intf.link_hdl = link->link_hdl;
+		s_ctrl->bridge_intf.crm_cb = link->crm_cb;
+	} else {
+		s_ctrl->bridge_intf.link_hdl = -1;
+		s_ctrl->bridge_intf.crm_cb = NULL;
+	}
+
+	return 0;
+}
+
+int cam_sensor_power(struct v4l2_subdev *sd, int on)
+{
+	struct cam_sensor_ctrl_t *s_ctrl = v4l2_get_subdevdata(sd);
+
+	mutex_lock(&(s_ctrl->cam_sensor_mutex));
+	if (!on && s_ctrl->sensor_state == CAM_SENSOR_POWER_UP) {
+		cam_sensor_power_down(s_ctrl);
+		s_ctrl->sensor_state = CAM_SENSOR_POWER_DOWN;
+	}
+	mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+
+	return 0;
+}
+
+int cam_sensor_power_up(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int rc;
+	struct cam_sensor_power_ctrl_t *power_info;
+	struct cam_camera_slave_info *slave_info;
+
+	if (!s_ctrl) {
+		pr_err("%s:%d failed: %pK\n",
+			__func__, __LINE__, s_ctrl);
+		return -EINVAL;
+	}
+
+	power_info = &s_ctrl->sensordata->power_info;
+	slave_info = &(s_ctrl->sensordata->slave_info);
+
+	if (!power_info || !slave_info) {
+		pr_err("%s:%d failed: %pK %pK\n",
+			__func__, __LINE__, power_info,
+			slave_info);
+		return -EINVAL;
+	}
+
+	rc = cam_sensor_core_power_up(power_info);
+	if (rc < 0) {
+		pr_err("%s:%d power up the core is failed:%d\n",
+			__func__, __LINE__, rc);
+		return rc;
+	}
+
+	if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
+		rc = camera_io_init(&(s_ctrl->io_master_info));
+		if (rc < 0) {
+			pr_err("%s cci_init failed\n", __func__);
+			return -EINVAL;
+		}
+	}
+
+	s_ctrl->sensor_state = CAM_SENSOR_POWER_UP;
+
+	return rc;
+}
+
+int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	struct cam_sensor_power_ctrl_t *power_info;
+	int rc = 0;
+
+	if (!s_ctrl) {
+		pr_err("%s:%d failed: s_ctrl %pK\n",
+			__func__, __LINE__, s_ctrl);
+		return -EINVAL;
+	}
+
+	power_info = &s_ctrl->sensordata->power_info;
+
+	if (!power_info) {
+		pr_err("%s:%d failed: power_info %pK\n",
+			__func__, __LINE__, power_info);
+		return -EINVAL;
+	}
+	rc = msm_camera_power_down(power_info);
+	if (rc < 0) {
+		pr_err("%s:%d power down the core is failed:%d\n",
+			__func__, __LINE__, rc);
+		return rc;
+	}
+
+	if (s_ctrl->io_master_info.master_type == CCI_MASTER)
+		camera_io_release(&(s_ctrl->io_master_info));
+
+	s_ctrl->sensor_state = CAM_SENSOR_POWER_DOWN;
+
+	return rc;
+}
+
+int cam_sensor_apply_settings(struct cam_sensor_ctrl_t *s_ctrl,
+	int64_t req_id)
+{
+	int rc = 0, offset, del_req_id;
+	struct i2c_settings_array *i2c_set = NULL;
+	struct i2c_settings_list *i2c_list;
+
+	if (req_id == 0) {
+		i2c_set = &s_ctrl->i2c_data.init_settings;
+		if (i2c_set->is_settings_valid == 1) {
+			list_for_each_entry(i2c_list,
+				&(i2c_set->list_head), list) {
+				rc = camera_io_dev_write(
+					&(s_ctrl->io_master_info),
+					&(i2c_list->i2c_settings));
+				if (rc < 0) {
+					pr_err("Failed to write the I2C settings\n");
+					return rc;
+				}
+			}
+			rc = delete_request(&(s_ctrl->i2c_data.init_settings));
+			i2c_set->is_settings_valid = 0;
+			if (rc < 0) {
+				pr_err("%s:%d :Error: Failed in deleting the Init request: %d\n",
+					__func__, __LINE__, rc);
+			}
+		}
+	} else {
+		offset = req_id % MAX_PER_FRAME_ARRAY;
+		i2c_set = &(s_ctrl->i2c_data.per_frame[offset]);
+		if (i2c_set->is_settings_valid == 1 &&
+			i2c_set->request_id == req_id) {
+			list_for_each_entry(i2c_list,
+				&(i2c_set->list_head), list) {
+				rc = camera_io_dev_write(
+					&(s_ctrl->io_master_info),
+					&(i2c_list->i2c_settings));
+				if (rc < 0) {
+					pr_err("%s:%d :Error: Fail to write the I2C settings: %d\n",
+						__func__, __LINE__, rc);
+					return rc;
+				}
+			}
+			del_req_id = (req_id +
+				MAX_PER_FRAME_ARRAY -
+				MAX_SYSTEM_PIPELINE_DELAY) %
+				MAX_PER_FRAME_ARRAY;
+			CDBG("%s:%d Deleting the Request: %d\n",
+				__func__, __LINE__,	del_req_id);
+			if (req_id >
+				s_ctrl->i2c_data.per_frame[del_req_id].
+				request_id) {
+				s_ctrl->i2c_data.per_frame[del_req_id].
+					request_id = 0;
+				rc = delete_request(
+					&(s_ctrl->i2c_data.
+					per_frame[del_req_id]));
+				if (rc < 0)
+					pr_err("%s:%d :Error: Failed in deleting the request: %d rc: %d\n",
+						__func__, __LINE__,
+						del_req_id, rc);
+			}
+		} else {
+			CDBG("%s:%d Invalid/NOP request to apply: %lld\n",
+				__func__, __LINE__, req_id);
+		}
+	}
+	return rc;
+}
+
+int32_t cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply)
+{
+	int32_t rc = 0;
+	struct cam_sensor_ctrl_t *s_ctrl = NULL;
+
+	if (!apply)
+		return -EINVAL;
+
+	s_ctrl = (struct cam_sensor_ctrl_t *)
+		cam_get_device_priv(apply->dev_hdl);
+	if (!s_ctrl) {
+		pr_err("%s: Device data is NULL\n", __func__);
+		return -EINVAL;
+	}
+	CDBG("%s:%d Req Id: %lld\n", __func__, __LINE__,
+		apply->request_id);
+	rc = cam_sensor_apply_settings(s_ctrl, apply->request_id);
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h
new file mode 100644
index 0000000..b23edce
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_CORE_H_
+#define _CAM_SENSOR_CORE_H_
+
+#include "cam_sensor_dev.h"
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ *
+ * This API powers up the camera sensor module
+ */
+int cam_sensor_power_up(struct cam_sensor_ctrl_t *s_ctrl);
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ *
+ * This API powers down the camera sensor module
+ */
+int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl);
+
+/**
+ * @sd: V4L2 subdevice
+ * @on: Turn off/on flag
+ *
+ * This API powers down the sensor module
+ */
+int cam_sensor_power(struct v4l2_subdev *sd, int on);
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ * @req_id: Request id
+ *
+ * This API applies the req_id settings to sensor
+ */
+int cam_sensor_apply_settings(struct cam_sensor_ctrl_t *s_ctrl, int64_t req_id);
+
+/**
+ * @apply: Req mgr structure for applying request
+ *
+ * This API applies the request that is mentioned
+ */
+int cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply);
+
+/**
+ * @info: Sub device info to req mgr
+ *
+ * Publish the subdevice info
+ */
+int cam_sensor_publish_dev_info(struct cam_req_mgr_device_info *info);
+
+/**
+ * @link: Link setup info
+ *
+ * This API establishes link with sensor subdevice with req mgr
+ */
+int cam_sensor_establish_link(struct cam_req_mgr_core_dev_link_setup *link);
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ * @arg:    Camera control command argument
+ *
+ * This API handles the camera control argument reached to sensor
+ */
+int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl, void *arg);
+
+#endif /* _CAM_SENSOR_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
new file mode 100644
index 0000000..448ce51
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
@@ -0,0 +1,294 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_sensor_soc.h"
+#include "cam_sensor_core.h"
+
+static long cam_sensor_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int rc = 0;
+	struct cam_sensor_ctrl_t *s_ctrl =
+		v4l2_get_subdevdata(sd);
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_sensor_driver_cmd(s_ctrl, arg);
+		break;
+	default:
+		pr_err("%s:%d Invalid ioctl cmd: %d\n",
+			__func__, __LINE__, cmd);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int32_t cam_sensor_driver_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int32_t rc = 0;
+	struct cam_sensor_ctrl_t *s_ctrl;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		pr_err("%s %s :Error: i2c_check_functionality failed\n",
+			__func__, client->name);
+		return -EFAULT;
+	}
+
+	/* Create sensor control structure */
+	s_ctrl = kzalloc(sizeof(*s_ctrl), GFP_KERNEL);
+	if (!s_ctrl)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, s_ctrl);
+
+	/* Initialize sensor device type */
+	s_ctrl->of_node = client->dev.of_node;
+	s_ctrl->io_master_info.master_type = I2C_MASTER;
+
+	rc = cam_sensor_parse_dt(s_ctrl);
+	if (rc < 0) {
+		pr_err("%s:%d :Error: cam_sensor_parse_dt rc %d",
+			__func__, __LINE__, rc);
+		goto free_s_ctrl;
+	}
+
+	return rc;
+free_s_ctrl:
+	kfree(s_ctrl);
+	return rc;
+}
+
+static int cam_sensor_platform_remove(struct platform_device *pdev)
+{
+	struct cam_sensor_ctrl_t  *s_ctrl;
+
+	s_ctrl = platform_get_drvdata(pdev);
+	if (!s_ctrl) {
+		pr_err("%s: sensor device is NULL\n", __func__);
+		return 0;
+	}
+
+	kfree(s_ctrl->i2c_data.per_frame);
+	devm_kfree(&pdev->dev, s_ctrl);
+
+	return 0;
+}
+
+static int cam_sensor_driver_i2c_remove(struct i2c_client *client)
+{
+	struct cam_sensor_ctrl_t  *s_ctrl = i2c_get_clientdata(client);
+
+	if (!s_ctrl) {
+		pr_err("%s: sensor device is NULL\n", __func__);
+		return 0;
+	}
+
+	kfree(s_ctrl->i2c_data.per_frame);
+	kfree(s_ctrl);
+
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_sensor_init_subdev_do_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	struct cam_control cmd_data;
+	int32_t rc = 0;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_sensor_subdev_ioctl(sd, cmd, &cmd_data);
+		if (rc < 0)
+			pr_err("%s:%d cam_sensor_subdev_ioctl failed\n",
+				__func__, __LINE__);
+		break;
+	default:
+		pr_err("%s:%d Invalid compat ioctl cmd_type: %d\n",
+			__func__, __LINE__, cmd);
+		rc = -EINVAL;
+	}
+
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+	return rc;
+}
+
+#endif
+
+static struct v4l2_subdev_core_ops cam_sensor_subdev_core_ops = {
+	.ioctl = cam_sensor_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_sensor_init_subdev_do_ioctl,
+#endif
+	.s_power = cam_sensor_power,
+};
+
+static struct v4l2_subdev_ops cam_sensor_subdev_ops = {
+	.core = &cam_sensor_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cam_sensor_internal_ops;
+
+static const struct of_device_id cam_sensor_driver_dt_match[] = {
+	{.compatible = "qcom,cam-sensor"},
+	{}
+};
+
+static int32_t cam_sensor_driver_platform_probe(
+	struct platform_device *pdev)
+{
+	int32_t rc = 0, i = 0;
+	struct cam_sensor_ctrl_t *s_ctrl = NULL;
+
+	/* Create sensor control structure */
+	s_ctrl = devm_kzalloc(&pdev->dev,
+		sizeof(struct cam_sensor_ctrl_t), GFP_KERNEL);
+	if (!s_ctrl)
+		return -ENOMEM;
+
+	/* Initialize sensor device type */
+	s_ctrl->of_node = pdev->dev.of_node;
+	s_ctrl->is_probe_succeed = 0;
+
+	/*fill in platform device*/
+	s_ctrl->pdev = pdev;
+
+	s_ctrl->io_master_info.master_type = CCI_MASTER;
+
+	rc = cam_sensor_parse_dt(s_ctrl);
+	if (rc < 0) {
+		pr_err("failed: cam_sensor_parse_dt rc %d", rc);
+		goto free_s_ctrl;
+	}
+
+	/* Fill platform device id*/
+	pdev->id = s_ctrl->id;
+
+	s_ctrl->v4l2_dev_str.internal_ops =
+		&cam_sensor_internal_ops;
+	s_ctrl->v4l2_dev_str.ops =
+		&cam_sensor_subdev_ops;
+	strlcpy(s_ctrl->device_name, CAMX_SENSOR_DEV_NAME,
+		sizeof(s_ctrl->device_name));
+	s_ctrl->v4l2_dev_str.name =
+		s_ctrl->device_name;
+	s_ctrl->v4l2_dev_str.sd_flags =
+		(V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+	s_ctrl->v4l2_dev_str.ent_function =
+		CAM_SENSOR_DEVICE_TYPE;
+	s_ctrl->v4l2_dev_str.token = s_ctrl;
+
+	rc = cam_register_subdev(&(s_ctrl->v4l2_dev_str));
+	if (rc < 0) {
+		pr_err("%s:%d :ERROR: Fail with cam_register_subdev\n",
+			__func__, __LINE__);
+		goto free_s_ctrl;
+	}
+
+	s_ctrl->i2c_data.per_frame =
+		(struct i2c_settings_array *)
+		kzalloc(sizeof(struct i2c_settings_array) *
+		MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+	if (s_ctrl->i2c_data.per_frame == NULL) {
+		rc = -ENOMEM;
+		goto free_s_ctrl;
+	}
+
+	INIT_LIST_HEAD(&(s_ctrl->i2c_data.init_settings.list_head));
+
+	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+		INIT_LIST_HEAD(&(s_ctrl->i2c_data.per_frame[i].list_head));
+
+	s_ctrl->bridge_intf.device_hdl = -1;
+	s_ctrl->bridge_intf.ops.get_dev_info = cam_sensor_publish_dev_info;
+	s_ctrl->bridge_intf.ops.link_setup = cam_sensor_establish_link;
+	s_ctrl->bridge_intf.ops.apply_req = cam_sensor_apply_request;
+
+	s_ctrl->sensordata->power_info.dev = &pdev->dev;
+	platform_set_drvdata(pdev, s_ctrl);
+	v4l2_set_subdevdata(&(s_ctrl->v4l2_dev_str.sd), s_ctrl);
+
+	return rc;
+free_s_ctrl:
+	devm_kfree(&pdev->dev, s_ctrl);
+	return rc;
+}
+
+MODULE_DEVICE_TABLE(of, cam_sensor_driver_dt_match);
+
+static struct platform_driver cam_sensor_platform_driver = {
+	.probe = cam_sensor_driver_platform_probe,
+	.driver = {
+		.name = "qcom,camera",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_sensor_driver_dt_match,
+	},
+	.remove = cam_sensor_platform_remove,
+};
+
+static const struct i2c_device_id i2c_id[] = {
+	{SENSOR_DRIVER_I2C, (kernel_ulong_t)NULL},
+	{ }
+};
+
+static struct i2c_driver cam_sensor_driver_i2c = {
+	.id_table = i2c_id,
+	.probe = cam_sensor_driver_i2c_probe,
+	.remove = cam_sensor_driver_i2c_remove,
+	.driver = {
+		.name = SENSOR_DRIVER_I2C,
+	},
+};
+
+static int __init cam_sensor_driver_init(void)
+{
+	int32_t rc = 0;
+
+	rc = platform_driver_register(&cam_sensor_platform_driver);
+	if (rc)
+		pr_err("%s platform_driver_register failed rc = %d",
+			__func__, rc);
+	rc = i2c_add_driver(&cam_sensor_driver_i2c);
+	if (rc)
+		pr_err("%s i2c_add_driver failed rc = %d", __func__, rc);
+
+	return rc;
+}
+
+static void __exit cam_sensor_driver_exit(void)
+{
+	platform_driver_unregister(&cam_sensor_platform_driver);
+	i2c_del_driver(&cam_sensor_driver_i2c);
+}
+
+module_init(cam_sensor_driver_init);
+module_exit(cam_sensor_driver_exit);
+MODULE_DESCRIPTION("cam_sensor_driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
new file mode 100644
index 0000000..f597c36
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
@@ -0,0 +1,111 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_DEV_H_
+#define _CAM_SENSOR_DEV_H_
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <cam_cci_dev.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_subdev.h>
+#include <cam_sensor_soc_api.h>
+#include <cam_sensor_io.h>
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE  1
+#define FALSE 0
+
+#undef CDBG
+#ifdef CAM_SENSOR_DEBUG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define SENSOR_DRIVER_I2C "i2c_camera"
+#define CAMX_SENSOR_DEV_NAME "cam-sensor-driver"
+
+enum cam_sensor_state_t {
+	CAM_SENSOR_POWER_DOWN,
+	CAM_SENSOR_POWER_UP,
+};
+
+/**
+ * struct intf_params
+ * @device_hdl: Device Handle
+ * @session_hdl: Session Handle
+ * @link_hdl: Link Handle
+ * @ops: KMD operations
+ * @crm_cb: Callback API pointers
+ */
+struct intf_params {
+	int32_t device_hdl;
+	int32_t session_hdl;
+	int32_t link_hdl;
+	struct cam_req_mgr_kmd_ops ops;
+	struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_sensor_ctrl_t: Camera control structure
+ * @pdev: Platform device
+ * @cam_sensor_mutex: Sensor mutex
+ * @sensordata: Sensor board Information
+ * @cci_i2c_master: I2C structure
+ * @io_master_info: Information about the communication master
+ * @sensor_state: Sensor states
+ * @is_probe_succeed: Probe succeeded or not
+ * @id: Cell Index
+ * @of_node: Of node ptr
+ * @v4l2_dev_str: V4L2 device structure
+ * @sensor_probe_addr_type: Sensor probe address type
+ * @sensor_probe_data_type: Sensor probe data type
+ * @i2c_data: Sensor I2C register settings
+ * @sensor_info: Sensor query cap structure
+ * @bridge_intf: Bridge interface structure
+ * @device_name: Sensor device structure
+ */
+struct cam_sensor_ctrl_t {
+	struct platform_device *pdev;
+	struct mutex cam_sensor_mutex;
+	struct cam_sensor_board_info *sensordata;
+	enum cci_i2c_master_t cci_i2c_master;
+	struct camera_io_master io_master_info;
+	enum cam_sensor_state_t sensor_state;
+	uint8_t is_probe_succeed;
+	uint32_t id;
+	struct device_node *of_node;
+	struct cam_subdev v4l2_dev_str;
+	uint8_t sensor_probe_addr_type;
+	uint8_t sensor_probe_data_type;
+	struct i2c_data_settings i2c_data;
+	struct  cam_sensor_query_cap sensor_info;
+	struct intf_params bridge_intf;
+	char device_name[20];
+};
+
+#endif /* _CAM_SENSOR_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
new file mode 100644
index 0000000..8cb1078
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
@@ -0,0 +1,267 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_util.h>
+#include <cam_sensor_io.h>
+#include <cam_req_mgr_util.h>
+#include "cam_sensor_soc.h"
+
+int32_t cam_sensor_get_sub_module_index(struct device_node *of_node,
+	struct cam_sensor_board_info *s_info)
+{
+	int rc = 0, i = 0;
+	uint32_t val = 0;
+	struct device_node *src_node = NULL;
+	struct cam_sensor_board_info *sensor_info;
+
+	sensor_info = s_info;
+
+	for (i = 0; i < SUB_MODULE_MAX; i++)
+		sensor_info->subdev_id[i] = -1;
+
+	src_node = of_parse_phandle(of_node, "qcom,actuator-src", 0);
+	if (!src_node) {
+		CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+	} else {
+		rc = of_property_read_u32(src_node, "cell-index", &val);
+		CDBG("%s qcom,actuator cell index %d, rc %d\n", __func__,
+			val, rc);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			of_node_put(src_node);
+			return rc;
+		}
+		sensor_info->subdev_id[SUB_MODULE_ACTUATOR] = val;
+		of_node_put(src_node);
+	}
+
+	src_node = of_parse_phandle(of_node, "qcom,ois-src", 0);
+	if (!src_node) {
+		CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+	} else {
+		rc = of_property_read_u32(src_node, "cell-index", &val);
+		CDBG("%s qcom,ois cell index %d, rc %d\n", __func__,
+			val, rc);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			of_node_put(src_node);
+			return rc;
+		}
+		sensor_info->subdev_id[SUB_MODULE_OIS] = val;
+		of_node_put(src_node);
+	}
+
+	src_node = of_parse_phandle(of_node, "qcom,eeprom-src", 0);
+	if (!src_node) {
+		CDBG("%s:%d eeprom src_node NULL\n", __func__, __LINE__);
+	} else {
+		rc = of_property_read_u32(src_node, "cell-index", &val);
+		CDBG("%s qcom,eeprom cell index %d, rc %d\n", __func__,
+			val, rc);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			of_node_put(src_node);
+			return rc;
+		}
+		sensor_info->subdev_id[SUB_MODULE_EEPROM] = val;
+		of_node_put(src_node);
+	}
+
+	src_node = of_parse_phandle(of_node, "qcom,led-flash-src", 0);
+	if (!src_node) {
+		CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+	} else {
+		rc = of_property_read_u32(src_node, "cell-index", &val);
+		CDBG("%s qcom,led flash cell index %d, rc %d\n", __func__,
+			val, rc);
+		if (rc < 0) {
+			pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+			of_node_put(src_node);
+			return rc;
+		}
+		sensor_info->subdev_id[SUB_MODULE_LED_FLASH] = val;
+		of_node_put(src_node);
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,csiphy-sd-index", &val);
+	if (rc < 0)
+		pr_err("%s:%d :Error: paring the dt node for csiphy rc %d\n",
+			__func__, __LINE__, rc);
+	else
+		sensor_info->subdev_id[SUB_MODULE_CSIPHY] = val;
+
+	return rc;
+}
+
+static int32_t cam_sensor_driver_get_dt_data(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int32_t rc = 0;
+	struct cam_sensor_board_info *sensordata = NULL;
+	struct device_node *of_node = s_ctrl->of_node;
+	uint32_t cell_id;
+
+	s_ctrl->sensordata = kzalloc(sizeof(*sensordata), GFP_KERNEL);
+	if (!s_ctrl->sensordata)
+		return -ENOMEM;
+
+	sensordata = s_ctrl->sensordata;
+	/*
+	 * Read cell index - this cell index will be the camera slot where
+	 * this camera will be mounted
+	 */
+	rc = of_property_read_u32(of_node, "cell-index", &cell_id);
+	if (rc < 0) {
+		pr_err("failed: cell-index rc %d", rc);
+		goto FREE_SENSOR_DATA;
+	}
+	s_ctrl->id = cell_id;
+
+	/* Validate cell_id */
+	if (cell_id >= MAX_CAMERAS) {
+		pr_err("failed: invalid cell_id %d", cell_id);
+		rc = -EINVAL;
+		goto FREE_SENSOR_DATA;
+	}
+
+	/* Read subdev info */
+	rc = cam_sensor_get_sub_module_index(of_node, sensordata);
+	if (rc < 0) {
+		pr_err("failed");
+		goto FREE_SENSOR_DATA;
+	}
+
+	/* Read vreg information */
+	rc = cam_sensor_get_dt_vreg_data(of_node,
+		&sensordata->power_info.cam_vreg,
+		&sensordata->power_info.num_vreg);
+	if (rc < 0) {
+		pr_err("failed: cam_sensor_get_dt_vreg_data rc %d", rc);
+		goto FREE_SENSOR_DATA;
+	}
+
+	/* Read gpio information */
+	rc = msm_sensor_driver_get_gpio_data
+		(&(sensordata->power_info.gpio_conf), of_node);
+	if (rc < 0) {
+		pr_err("failed: msm_sensor_driver_get_gpio_data rc %d", rc);
+		goto FREE_VREG_DATA;
+	}
+
+	/* Get CCI master */
+	rc = of_property_read_u32(of_node, "qcom,cci-master",
+		&s_ctrl->cci_i2c_master);
+	CDBG("qcom,cci-master %d, rc %d", s_ctrl->cci_i2c_master, rc);
+	if (rc < 0) {
+		/* Set default master 0 */
+		s_ctrl->cci_i2c_master = MASTER_0;
+		rc = 0;
+	}
+
+	if (of_property_read_u32(of_node, "qcom,sensor-position-pitch",
+		&sensordata->pos_pitch) < 0) {
+		CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+		sensordata->pos_pitch = 360;
+	}
+	if (of_property_read_u32(of_node, "qcom,sensor-position-roll",
+		&sensordata->pos_roll) < 0) {
+		CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+		sensordata->pos_roll = 360;
+	}
+	if (of_property_read_u32(of_node, "qcom,sensor-position-yaw",
+		&sensordata->pos_yaw) < 0) {
+		CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+		sensordata->pos_yaw = 360;
+	}
+
+	return rc;
+
+FREE_VREG_DATA:
+	kfree(sensordata->power_info.cam_vreg);
+FREE_SENSOR_DATA:
+	kfree(sensordata);
+	return rc;
+}
+
+int32_t msm_sensor_init_default_params(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	/* Validate input parameters */
+	if (!s_ctrl) {
+		pr_err("%s:%d failed: invalid params s_ctrl %pK\n", __func__,
+			__LINE__, s_ctrl);
+		return -EINVAL;
+	}
+
+	CDBG("%s: %d master_type: %d\n", __func__, __LINE__,
+		s_ctrl->io_master_info.master_type);
+	/* Initialize cci_client */
+	if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
+		s_ctrl->io_master_info.cci_client = kzalloc(sizeof(
+			struct cam_sensor_cci_client), GFP_KERNEL);
+		if (!(s_ctrl->io_master_info.cci_client))
+			return -ENOMEM;
+
+	} else {
+		pr_err("%s:%d Invalid master / Master type Not supported\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int32_t cam_sensor_parse_dt(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int32_t rc = 0;
+
+	/* Parse dt information and store in sensor control structure */
+	rc = cam_sensor_driver_get_dt_data(s_ctrl);
+	if (rc < 0) {
+		pr_err("failed: rc %d", rc);
+		return rc;
+	}
+
+	/* Initialize mutex */
+	mutex_init(&(s_ctrl->cam_sensor_mutex));
+
+	pr_err("%s: %d\n", __func__, __LINE__);
+	/* Initialize default parameters */
+	rc = msm_sensor_init_default_params(s_ctrl);
+	if (rc < 0) {
+		pr_err("failed: msm_sensor_init_default_params rc %d", rc);
+		goto FREE_DT_DATA;
+	}
+
+	/* Get clocks information */
+	rc = msm_camera_get_clk_info(s_ctrl->pdev,
+		&s_ctrl->sensordata->power_info.clk_info,
+		&s_ctrl->sensordata->power_info.clk_ptr,
+		&s_ctrl->sensordata->power_info.clk_info_size);
+	if (rc < 0) {
+		pr_err("failed: msm_camera_get_clk_info rc %d", rc);
+		goto FREE_DT_DATA;
+	}
+
+	return rc;
+
+FREE_DT_DATA:
+	kfree(s_ctrl->sensordata->power_info.gpio_conf->gpio_num_info);
+	kfree(s_ctrl->sensordata->power_info.gpio_conf->cam_gpio_req_tbl);
+	kfree(s_ctrl->sensordata->power_info.gpio_conf);
+	kfree(s_ctrl->sensordata->power_info.cam_vreg);
+	kfree(s_ctrl->sensordata);
+	s_ctrl->sensordata = NULL;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.h
new file mode 100644
index 0000000..7f4a551
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_SOC_H_
+#define _CAM_SENSOR_SOC_H_
+
+#include "cam_sensor_dev.h"
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ *
+ * Parses sensor dt
+ */
+int cam_sensor_parse_dt(struct cam_sensor_ctrl_t *s_ctrl);
+
+#endif /* _CAM_SENSOR_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
new file mode 100644
index 0000000..bdae1d1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
new file mode 100644
index 0000000..40a69ef
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
@@ -0,0 +1,182 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_cmn_header.h"
+#include "cam_sensor_i2c.h"
+#include "cam_cci_dev.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+int32_t cam_cci_i2c_read(struct cam_sensor_cci_client *cci_client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type)
+{
+	int32_t rc = -EINVAL;
+	unsigned char buf[data_type];
+	struct cam_cci_ctrl cci_ctrl;
+
+	if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		return rc;
+
+	cci_ctrl.cmd = MSM_CCI_I2C_READ;
+	cci_ctrl.cci_info = cci_client;
+	cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+	cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+	cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+	cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = data_type;
+	rc = v4l2_subdev_call(cci_client->cci_subdev,
+		core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+	if (rc < 0) {
+		pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+		return rc;
+	}
+	rc = cci_ctrl.status;
+	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+		*data = buf[0];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD)
+		*data = buf[0] << 8 | buf[1];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B)
+		*data = buf[0] << 16 | buf[1] << 8 | buf[2];
+	else
+		*data = buf[0] << 24 | buf[1] << 16 |
+			buf[2] << 8 | buf[3];
+
+	return rc;
+}
+
+static int32_t cam_cci_i2c_write_table_cmd(
+	struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting,
+	enum cam_cci_cmd_type cmd)
+{
+	int32_t rc = -EINVAL;
+	struct cam_cci_ctrl cci_ctrl;
+
+	if (!client || !write_setting)
+		return rc;
+
+	if (write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		return rc;
+
+	cci_ctrl.cmd = cmd;
+	cci_ctrl.cci_info = client->cci_client;
+	cci_ctrl.cfg.cci_i2c_write_cfg.reg_setting =
+		write_setting->reg_setting;
+	cci_ctrl.cfg.cci_i2c_write_cfg.data_type = write_setting->data_type;
+	cci_ctrl.cfg.cci_i2c_write_cfg.addr_type = write_setting->addr_type;
+	cci_ctrl.cfg.cci_i2c_write_cfg.size = write_setting->size;
+	rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+		core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+	if (rc < 0) {
+		pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+		return rc;
+	}
+	rc = cci_ctrl.status;
+	if (write_setting->delay > 20)
+		msleep(write_setting->delay);
+	else if (write_setting->delay)
+		usleep_range(write_setting->delay * 1000, (write_setting->delay
+			* 1000) + 1000);
+
+	return rc;
+}
+
+
+int32_t cam_cci_i2c_write_table(
+	struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting)
+{
+	return cam_cci_i2c_write_table_cmd(client, write_setting,
+		MSM_CCI_I2C_WRITE);
+}
+
+static int32_t cam_cci_i2c_compare(struct cam_sensor_cci_client *client,
+	uint32_t addr, uint16_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type data_type,
+	enum camera_sensor_i2c_type addr_type)
+{
+	int32_t rc;
+	uint32_t reg_data = 0;
+
+	rc = cam_cci_i2c_read(client, addr, &reg_data,
+		addr_type, data_type);
+	if (rc < 0)
+		return rc;
+
+	reg_data = reg_data & 0xFFFF;
+	if (data == (reg_data & ~data_mask))
+		return I2C_COMPARE_MATCH;
+	return I2C_COMPARE_MISMATCH;
+}
+
+int32_t cam_cci_i2c_poll(struct cam_sensor_cci_client *client,
+	uint32_t addr, uint16_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type data_type,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t delay_ms)
+{
+	int32_t rc = -EINVAL;
+	int32_t i = 0;
+
+	CDBG("%s: addr: 0x%x data: 0x%x dt: %d\n",
+		__func__, addr, data, data_type);
+
+	if (delay_ms > MAX_POLL_DELAY_MS) {
+		pr_err("%s:%d invalid delay = %d max_delay = %d\n",
+			__func__, __LINE__, delay_ms, MAX_POLL_DELAY_MS);
+		return -EINVAL;
+	}
+	for (i = 0; i < delay_ms; i++) {
+		rc = cam_cci_i2c_compare(client,
+			addr, data, data_mask, data_type, addr_type);
+		if (!rc)
+			return rc;
+
+		usleep_range(1000, 1010);
+	}
+
+	/* If rc is 1 then read is successful but poll is failure */
+	if (rc == 1)
+		pr_err("%s:%d poll failed rc=%d(non-fatal)\n",
+			__func__, __LINE__, rc);
+
+	if (rc < 0)
+		pr_err("%s:%d poll failed rc=%d\n", __func__, __LINE__, rc);
+
+	return rc;
+}
+
+int32_t cam_sensor_cci_i2c_util(struct cam_sensor_cci_client *cci_client,
+	uint16_t cci_cmd)
+{
+	int32_t rc = 0;
+	struct cam_cci_ctrl cci_ctrl;
+
+	CDBG("%s line %d\n", __func__, __LINE__);
+	cci_ctrl.cmd = cci_cmd;
+	cci_ctrl.cci_info = cci_client;
+	rc = v4l2_subdev_call(cci_client->cci_subdev,
+		core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+	if (rc < 0) {
+		pr_err("%s line %d rc = %d\n", __func__, __LINE__, rc);
+		return rc;
+	}
+	return cci_ctrl.status;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
new file mode 100644
index 0000000..1261c4b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_I2C_H_
+#define _CAM_SENSOR_I2C_H_
+
+#include <linux/delay.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_sensor.h>
+#include <media/cam_sensor.h>
+#include "cam_cci_dev.h"
+#include "cam_sensor_io.h"
+
+#define I2C_POLL_TIME_MS 5
+#define MAX_POLL_DELAY_MS 100
+
+#define I2C_COMPARE_MATCH 0
+#define I2C_COMPARE_MISMATCH 1
+
+/**
+ * @client: CCI client structure
+ * @data: I2C data
+ * @addr_type: I2c address type
+ * @data_type: I2C data type
+ *
+ * This API handles CCI read
+ */
+int32_t cam_cci_i2c_read(struct cam_sensor_cci_client *client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type);
+
+/**
+ * @client: CCI client structure
+ * @cci_cmd: CCI command type
+ *
+ * This API handles CCI random write
+ */
+int32_t cam_cci_i2c_write_table(
+	struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting);
+
+/**
+ * @cci_client: CCI client structure
+ * @cci_cmd: CCI command type
+ *
+ * Does I2C call to I2C functionalities
+ */
+int32_t cam_sensor_cci_i2c_util(struct cam_sensor_cci_client *cci_client,
+	uint16_t cci_cmd);
+
+/**
+ * @client: CCI client structure
+ * @addr: I2C address
+ * @data: I2C data
+ * @data_mask: I2C data mask
+ * @data_type: I2C data type
+ * @addr_type: I2C addr type
+ * @delay_ms: Delay in milli seconds
+ *
+ * This API implements CCI based I2C poll
+ */
+int32_t cam_cci_i2c_poll(struct cam_sensor_cci_client *client,
+	uint32_t addr, uint16_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type data_type,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t delay_ms);
+
+#endif /* _CAM_SENSOR_I2C_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
new file mode 100644
index 0000000..f889abc
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -0,0 +1,111 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_io.h"
+#include "cam_sensor_i2c.h"
+
+int32_t camera_io_dev_poll(struct camera_io_master *io_master_info,
+	uint32_t addr, uint16_t data, uint32_t data_mask,
+	enum camera_sensor_i2c_type data_type,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t delay_ms)
+{
+	int16_t mask = data_mask & 0xFF;
+
+	if (!io_master_info) {
+		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (io_master_info->master_type == CCI_MASTER) {
+		return cam_cci_i2c_poll(io_master_info->cci_client,
+			addr, data, mask, data_type, addr_type, delay_ms);
+	} else {
+		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
+			__LINE__, io_master_info->master_type);
+		return -EINVAL;
+	}
+}
+
+int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type data_type)
+{
+	if (!io_master_info) {
+		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (io_master_info->master_type == CCI_MASTER) {
+		return cam_cci_i2c_read(io_master_info->cci_client,
+			addr, data, data_type, data_type);
+	} else {
+		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
+			__LINE__, io_master_info->master_type);
+		return -EINVAL;
+	}
+}
+
+int32_t camera_io_dev_write(struct camera_io_master *io_master_info,
+	struct cam_sensor_i2c_reg_setting *write_setting)
+{
+	if (!write_setting || !io_master_info) {
+		pr_err("Input parameters not valid ws: %pK ioinfo: %pK",
+			write_setting, io_master_info);
+		return -EINVAL;
+	}
+
+	if (io_master_info->master_type == CCI_MASTER) {
+		return cam_cci_i2c_write_table(io_master_info,
+			write_setting);
+	} else {
+		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
+			__LINE__, io_master_info->master_type);
+		return -EINVAL;
+	}
+}
+
+int32_t camera_io_init(struct camera_io_master *io_master_info)
+{
+	if (!io_master_info) {
+		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (io_master_info->master_type == CCI_MASTER) {
+		io_master_info->cci_client->cci_subdev =
+			cam_cci_get_subdev();
+		return cam_sensor_cci_i2c_util(io_master_info->cci_client,
+			MSM_CCI_INIT);
+	} else {
+		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
+			__LINE__, io_master_info->master_type);
+		return -EINVAL;
+	}
+}
+
+int32_t camera_io_release(struct camera_io_master *io_master_info)
+{
+	if (!io_master_info) {
+		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (io_master_info->master_type == CCI_MASTER) {
+		return cam_sensor_cci_i2c_util(io_master_info->cci_client,
+			MSM_CCI_RELEASE);
+	} else {
+		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
+			__LINE__, io_master_info->master_type);
+		return -EINVAL;
+	}
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
new file mode 100644
index 0000000..757ac17
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
@@ -0,0 +1,89 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_IO_H_
+#define _CAM_SENSOR_IO_H_
+
+#include <media/cam_sensor.h>
+
+#include "cam_sensor_cmn_header.h"
+
+#define CCI_MASTER 1
+#define I2C_MASTER 2
+#define SPI_MASTER 3
+
+/**
+ * @master_type: CCI master type
+ * @client: I2C client information structure
+ * @cci_client: CCI client information structure
+ * @spi_client: SPI client information structure
+ */
+struct camera_io_master {
+	int master_type;
+	struct i2c_client *client;
+	struct cam_sensor_cci_client *cci_client;
+	struct cam_sensor_spi_client *spi_client;
+};
+
+/**
+ * @io_master_info: I2C/SPI master information
+ * @addr: I2C address
+ * @data: I2C data
+ * @data_type: I2C data type
+ *
+ * This API abstracts read functionality based on master type
+ */
+int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type data_type);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ *
+ * This API initializes the I2C/SPI master based on master type
+ */
+int32_t camera_io_init(struct camera_io_master *io_master_info);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ *
+ * This API releases the I2C/SPI master based on master type
+ */
+int32_t camera_io_release(struct camera_io_master *io_master_info);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ * @write_setting: write settings information
+ *
+ * This API abstracts write functionality based on master type
+ */
+int32_t camera_io_dev_write(struct camera_io_master *io_master_info,
+	struct cam_sensor_i2c_reg_setting *write_setting);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ * @addr: I2C address
+ * @data: I2C data
+ * @data_mask: I2C data mask
+ * @data_type: I2C data type
+ * @addr_type: I2C address type
+ * @delay_ms: delay in milli seconds
+ *
+ * This API abstracts poll functionality based on master type
+ */
+int32_t camera_io_dev_poll(struct camera_io_master *io_master_info,
+	uint32_t addr, uint16_t data, uint32_t data_mask,
+	enum camera_sensor_i2c_type data_type,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t delay_ms);
+
+#endif /* _CAM_SENSOR_IO_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
new file mode 100644
index 0000000..766828e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+
+obj-$(CONFIG_SPECTRA_CAMERA) +=  cam_sensor_util.o cam_sensor_soc_api.o
\ No newline at end of file
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
new file mode 100644
index 0000000..e5e4872
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
@@ -0,0 +1,375 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_CMN_HEADER_
+#define _CAM_SENSOR_CMN_HEADER_
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <media/cam_sensor.h>
+#include <media/cam_req_mgr.h>
+
+#define MAX_REGULATOR 5
+#define MAX_POWER_CONFIG 12
+
+#define MAX_PER_FRAME_ARRAY 8
+
+#define CAM_SENSOR_NAME    "cam-sensor"
+#define CAM_ACTUATOR_NAME  "cam-actuator"
+#define CAM_CSIPHY_NAME    "cam-csiphy"
+
+#define MAX_SYSTEM_PIPELINE_DELAY 2
+
+#define CAM_PKT_NOP_OPCODE 127
+
+enum camera_sensor_cmd_type {
+	CAMERA_SENSOR_CMD_TYPE_INVALID,
+	CAMERA_SENSOR_CMD_TYPE_PROBE,
+	CAMERA_SENSOR_CMD_TYPE_PWR_UP,
+	CAMERA_SENSOR_CMD_TYPE_PWR_DOWN,
+	CAMERA_SENSOR_CMD_TYPE_I2C_INFO,
+	CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR,
+	CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_RD,
+	CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR,
+	CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD,
+	CAMERA_SENSOR_CMD_TYPE_WAIT,
+	CAMERA_SENSOR_CMD_TYPE_MAX,
+};
+
+enum camera_sensor_i2c_op_code {
+	CAMERA_SENSOR_I2C_OP_INVALID,
+	CAMERA_SENSOR_I2C_OP_RNDM_WR,
+	CAMERA_SENSOR_I2C_OP_RNDM_WR_VERF,
+	CAMERA_SENSOR_I2C_OP_CONT_WR_BRST,
+	CAMERA_SENSOR_I2C_OP_CONT_WR_BRST_VERF,
+	CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN,
+	CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN_VERF,
+	CAMERA_SENSOR_I2C_OP_MAX,
+};
+
+enum camera_sensor_wait_op_code {
+	CAMERA_SENSOR_WAIT_OP_INVALID,
+	CAMERA_SENSOR_WAIT_OP_COND,
+	CAMERA_SENSOR_WAIT_OP_HW_UCND,
+	CAMERA_SENSOR_WAIT_OP_SW_UCND,
+	CAMERA_SENSOR_WAIT_OP_MAX,
+};
+
+enum camera_sensor_i2c_type {
+	CAMERA_SENSOR_I2C_TYPE_INVALID,
+	CAMERA_SENSOR_I2C_TYPE_BYTE,
+	CAMERA_SENSOR_I2C_TYPE_WORD,
+	CAMERA_SENSOR_I2C_TYPE_3B,
+	CAMERA_SENSOR_I2C_TYPE_DWORD,
+	CAMERA_SENSOR_I2C_TYPE_MAX,
+};
+
+enum i2c_freq_mode {
+	I2C_STANDARD_MODE,
+	I2C_FAST_MODE,
+	I2C_CUSTOM_MODE,
+	I2C_FAST_PLUS_MODE,
+	I2C_MAX_MODES,
+};
+
+enum position_roll {
+	ROLL_0       = 0,
+	ROLL_90      = 90,
+	ROLL_180     = 180,
+	ROLL_270     = 270,
+	ROLL_INVALID = 360,
+};
+
+enum position_yaw {
+	FRONT_CAMERA_YAW = 0,
+	REAR_CAMERA_YAW  = 180,
+	INVALID_YAW      = 360,
+};
+
+enum position_pitch {
+	LEVEL_PITCH    = 0,
+	INVALID_PITCH  = 360,
+};
+
+enum sensor_sub_module {
+	SUB_MODULE_SENSOR,
+	SUB_MODULE_ACTUATOR,
+	SUB_MODULE_EEPROM,
+	SUB_MODULE_LED_FLASH,
+	SUB_MODULE_CSID,
+	SUB_MODULE_CSIPHY,
+	SUB_MODULE_OIS,
+	SUB_MODULE_EXT,
+	SUB_MODULE_MAX,
+};
+
+enum msm_camera_power_seq_type {
+	SENSOR_MCLK,
+	SENSOR_VANA,
+	SENSOR_VDIG,
+	SENSOR_VIO,
+	SENSOR_VAF,
+	SENSOR_VAF_PWDM,
+	SENSOR_CUSTOM_REG1,
+	SENSOR_CUSTOM_REG2,
+	SENSOR_RESET,
+	SENSOR_STANDBY,
+	SENSOR_CUSTOM_GPIO1,
+	SENSOR_CUSTOM_GPIO2,
+	SENSOR_SEQ_TYPE_MAX,
+};
+
+enum cam_sensor_packet_opcodes {
+	CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON,
+	CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE,
+	CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG,
+	CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE
+};
+
+enum cam_actuator_packet_opcodes {
+	CAM_ACTUATOR_PACKET_OPCODE_INIT,
+	CAM_ACTUATOR_PACKET_AUTO_MOVE_LENS,
+	CAM_ACTUATOR_PACKET_MANUAL_MOVE_LENS
+};
+
+enum msm_bus_perf_setting {
+	S_INIT,
+	S_PREVIEW,
+	S_VIDEO,
+	S_CAPTURE,
+	S_ZSL,
+	S_STEREO_VIDEO,
+	S_STEREO_CAPTURE,
+	S_DEFAULT,
+	S_LIVESHOT,
+	S_DUAL,
+	S_EXIT
+};
+
+enum msm_camera_device_type_t {
+	MSM_CAMERA_I2C_DEVICE,
+	MSM_CAMERA_PLATFORM_DEVICE,
+	MSM_CAMERA_SPI_DEVICE,
+};
+
+enum cci_i2c_master_t {
+	MASTER_0,
+	MASTER_1,
+	MASTER_MAX,
+};
+
+enum camera_vreg_type {
+	VREG_TYPE_DEFAULT,
+	VREG_TYPE_CUSTOM,
+};
+
+enum cam_sensor_i2c_cmd_type {
+	CAM_SENSOR_I2C_WRITE_RANDOM,
+	CAM_SENSOR_I2C_READ,
+	CAM_SENSOR_I2C_POLL
+};
+
+struct common_header {
+	uint16_t    first_word;
+	uint8_t     third_byte;
+	uint8_t     cmd_type;
+};
+
+struct camera_vreg_t {
+	const char *reg_name;
+	int min_voltage;
+	int max_voltage;
+	int op_mode;
+	uint32_t delay;
+	const char *custom_vreg_name;
+	enum camera_vreg_type type;
+};
+
+struct cam_sensor_module_power_setting {
+	enum msm_camera_power_seq_type seq_type;
+	unsigned short seq_val;
+	uint32_t config_val_low;
+	uint32_t config_val_high;
+	unsigned short delay;
+};
+
+struct msm_camera_gpio_num_info {
+	uint16_t gpio_num[SENSOR_SEQ_TYPE_MAX];
+	uint8_t valid[SENSOR_SEQ_TYPE_MAX];
+};
+
+struct msm_cam_clk_info {
+	const char *clk_name;
+	long clk_rate;
+	uint32_t delay;
+};
+
+struct msm_pinctrl_info {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+	bool use_pinctrl;
+};
+
+struct cam_sensor_i2c_reg_array {
+	uint32_t reg_addr;
+	uint32_t reg_data;
+	uint32_t delay;
+	uint32_t data_mask;
+};
+
+struct cam_sensor_i2c_reg_setting {
+	struct cam_sensor_i2c_reg_array *reg_setting;
+	unsigned short size;
+	enum camera_sensor_i2c_type addr_type;
+	enum camera_sensor_i2c_type data_type;
+	unsigned short delay;
+};
+
+struct i2c_settings_list {
+	struct cam_sensor_i2c_reg_setting i2c_settings;
+	enum cam_sensor_i2c_cmd_type op_code;
+	struct list_head list;
+};
+
+struct i2c_settings_array {
+	struct list_head list_head;
+	int32_t is_settings_valid;
+	int64_t request_id;
+};
+
+struct i2c_data_settings {
+	struct i2c_settings_array init_settings;
+	struct i2c_settings_array *per_frame;
+};
+
+struct cam_sensor_power_ctrl_t {
+	struct device *dev;
+	struct cam_sensor_power_setting *power_setting;
+	uint16_t power_setting_size;
+	struct cam_sensor_power_setting *power_down_setting;
+	uint16_t power_down_setting_size;
+	struct msm_camera_gpio_conf *gpio_conf;
+	struct camera_vreg_t *cam_vreg;
+	int num_vreg;
+	struct clk **clk_ptr;
+	struct msm_cam_clk_info *clk_info;
+	struct msm_pinctrl_info pinctrl_info;
+	uint8_t cam_pinctrl_status;
+	size_t clk_info_size;
+};
+
+struct cam_camera_slave_info {
+	uint16_t sensor_slave_addr;
+	uint16_t sensor_id_reg_addr;
+	uint16_t sensor_id;
+	uint16_t sensor_id_mask;
+};
+
+struct msm_sensor_init_params {
+	int modes_supported;
+	unsigned int sensor_mount_angle;
+};
+
+enum msm_sensor_camera_id_t {
+	CAMERA_0,
+	CAMERA_1,
+	CAMERA_2,
+	CAMERA_3,
+	MAX_CAMERAS,
+};
+
+struct msm_sensor_id_info_t {
+	unsigned short sensor_id_reg_addr;
+	unsigned short sensor_id;
+	unsigned short sensor_id_mask;
+};
+
+enum msm_sensor_output_format_t {
+	MSM_SENSOR_BAYER,
+	MSM_SENSOR_YCBCR,
+	MSM_SENSOR_META,
+};
+
+struct cam_sensor_power_setting {
+	enum msm_camera_power_seq_type seq_type;
+	unsigned short seq_val;
+	long config_val;
+	unsigned short delay;
+	void *data[10];
+};
+
+struct cam_sensor_power_setting_array {
+	struct cam_sensor_power_setting  power_setting_a[MAX_POWER_CONFIG];
+	struct cam_sensor_power_setting *power_setting;
+	unsigned short size;
+	struct cam_sensor_power_setting  power_down_setting_a[MAX_POWER_CONFIG];
+	struct cam_sensor_power_setting *power_down_setting;
+	unsigned short size_down;
+};
+
+struct msm_camera_sensor_slave_info {
+	enum msm_sensor_camera_id_t camera_id;
+	unsigned short slave_addr;
+	enum i2c_freq_mode i2c_freq_mode;
+	enum camera_sensor_i2c_type addr_type;
+	struct msm_sensor_id_info_t sensor_id_info;
+	struct cam_sensor_power_setting_array power_setting_array;
+	unsigned char  is_init_params_valid;
+	enum msm_sensor_output_format_t output_format;
+};
+
+struct cam_sensor_board_info {
+	struct cam_camera_slave_info slave_info;
+	int32_t sensor_mount_angle;
+	int32_t secure_mode;
+	int modes_supported;
+	int32_t pos_roll;
+	int32_t pos_yaw;
+	int32_t pos_pitch;
+	int32_t  subdev_id[SUB_MODULE_MAX];
+	int32_t  subdev_intf[SUB_MODULE_MAX];
+	const char *misc_regulator;
+	struct cam_sensor_power_ctrl_t power_info;
+};
+
+enum msm_camera_vreg_name_t {
+	CAM_VDIG,
+	CAM_VIO,
+	CAM_VANA,
+	CAM_VAF,
+	CAM_V_CUSTOM1,
+	CAM_V_CUSTOM2,
+	CAM_VREG_MAX,
+};
+
+struct msm_camera_gpio_conf {
+	void *cam_gpiomux_conf_tbl;
+	uint8_t cam_gpiomux_conf_tbl_size;
+	struct gpio *cam_gpio_common_tbl;
+	uint8_t cam_gpio_common_tbl_size;
+	struct gpio *cam_gpio_req_tbl;
+	uint8_t cam_gpio_req_tbl_size;
+	uint32_t gpio_no_mux;
+	uint32_t *camera_off_table;
+	uint8_t camera_off_table_size;
+	uint32_t *camera_on_table;
+	uint8_t camera_on_table_size;
+	struct msm_camera_gpio_num_info *gpio_num_info;
+};
+
+#endif /* _CAM_SENSOR_CMN_HEADER_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.c
new file mode 100644
index 0000000..2eed9ce
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.c
@@ -0,0 +1,1331 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/msm-bus.h>
+#include "cam_sensor_soc_api.h"
+
+#define NO_SET_RATE -1
+#define INIT_RATE -2
+
+#ifdef CONFIG_CAM_SOC_API_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
+		struct msm_cam_clk_info *clk_src_info, int num_clk)
+{
+	int i;
+	int rc = 0;
+	struct clk *mux_clk = NULL;
+	struct clk *src_clk = NULL;
+
+	for (i = 0; i < num_clk; i++) {
+		if (clk_src_info[i].clk_name) {
+			mux_clk = clk_get(dev, clk_info[i].clk_name);
+			if (IS_ERR(mux_clk)) {
+				pr_err("%s get failed\n",
+					 clk_info[i].clk_name);
+				continue;
+			}
+			src_clk = clk_get(dev, clk_src_info[i].clk_name);
+			if (IS_ERR(src_clk)) {
+				pr_err("%s get failed\n",
+					clk_src_info[i].clk_name);
+				continue;
+			}
+			clk_set_parent(mux_clk, src_clk);
+		}
+	}
+	return rc;
+}
+
+int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
+		struct clk **clk_ptr, int num_clk, int enable)
+{
+	int i;
+	int rc = 0;
+	long clk_rate;
+
+	if (enable) {
+		for (i = 0; i < num_clk; i++) {
+			CDBG("%s enable %s\n", __func__, clk_info[i].clk_name);
+			clk_ptr[i] = clk_get(dev, clk_info[i].clk_name);
+			if (IS_ERR(clk_ptr[i])) {
+				pr_err("%s get failed\n", clk_info[i].clk_name);
+				rc = PTR_ERR(clk_ptr[i]);
+				goto cam_clk_get_err;
+			}
+			if (clk_info[i].clk_rate > 0) {
+				clk_rate = clk_round_rate(clk_ptr[i],
+					clk_info[i].clk_rate);
+				if (clk_rate < 0) {
+					pr_err("%s round failed\n",
+						   clk_info[i].clk_name);
+					goto cam_clk_set_err;
+				}
+				rc = clk_set_rate(clk_ptr[i],
+					clk_rate);
+				if (rc < 0) {
+					pr_err("%s set failed\n",
+						clk_info[i].clk_name);
+					goto cam_clk_set_err;
+				}
+
+			} else if (clk_info[i].clk_rate == INIT_RATE) {
+				clk_rate = clk_get_rate(clk_ptr[i]);
+				if (clk_rate == 0) {
+					clk_rate =
+						  clk_round_rate(clk_ptr[i], 0);
+					if (clk_rate < 0) {
+						pr_err("%s round rate failed\n",
+							  clk_info[i].clk_name);
+						goto cam_clk_set_err;
+					}
+					rc = clk_set_rate(clk_ptr[i],
+								clk_rate);
+					if (rc < 0) {
+						pr_err("%s set rate failed\n",
+							  clk_info[i].clk_name);
+						goto cam_clk_set_err;
+					}
+				}
+			}
+			rc = clk_prepare(clk_ptr[i]);
+			if (rc < 0) {
+				pr_err("%s prepare failed\n",
+					   clk_info[i].clk_name);
+				goto cam_clk_prepare_err;
+			}
+
+			rc = clk_enable(clk_ptr[i]);
+			if (rc < 0) {
+				pr_err("%s enable failed\n",
+					   clk_info[i].clk_name);
+				goto cam_clk_enable_err;
+			}
+			if (clk_info[i].delay > 20)
+				msleep(clk_info[i].delay);
+			else if (clk_info[i].delay)
+				usleep_range(clk_info[i].delay * 1000,
+					(clk_info[i].delay * 1000) + 1000);
+		}
+	} else {
+		for (i = num_clk - 1; i >= 0; i--) {
+			if (clk_ptr[i] != NULL) {
+				CDBG("%s disable %s\n", __func__,
+					clk_info[i].clk_name);
+				clk_disable(clk_ptr[i]);
+				clk_unprepare(clk_ptr[i]);
+				clk_put(clk_ptr[i]);
+			}
+		}
+	}
+
+	return rc;
+
+cam_clk_enable_err:
+	clk_unprepare(clk_ptr[i]);
+cam_clk_prepare_err:
+cam_clk_set_err:
+	clk_put(clk_ptr[i]);
+cam_clk_get_err:
+	for (i--; i >= 0; i--) {
+		if (clk_ptr[i] != NULL) {
+			clk_disable(clk_ptr[i]);
+			clk_unprepare(clk_ptr[i]);
+			clk_put(clk_ptr[i]);
+		}
+	}
+
+	return rc;
+}
+
+int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
+		int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
+		int num_vreg_seq, struct regulator **reg_ptr, int config)
+{
+	int i = 0, j = 0;
+	int rc = 0;
+	struct camera_vreg_t *curr_vreg;
+
+	if (num_vreg_seq > num_vreg) {
+		pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+	if (!num_vreg_seq)
+		num_vreg_seq = num_vreg;
+
+	if (config) {
+		for (i = 0; i < num_vreg_seq; i++) {
+			if (vreg_seq) {
+				j = vreg_seq[i];
+				if (j >= num_vreg)
+					continue;
+			} else {
+				j = i;
+			}
+			curr_vreg = &cam_vreg[j];
+			reg_ptr[j] = regulator_get(dev,
+				curr_vreg->reg_name);
+			if (IS_ERR(reg_ptr[j])) {
+				pr_err("%s: %s get failed\n",
+					 __func__,
+					 curr_vreg->reg_name);
+				reg_ptr[j] = NULL;
+				goto vreg_get_fail;
+			}
+			if (regulator_count_voltages(reg_ptr[j]) > 0) {
+				rc = regulator_set_voltage(
+					reg_ptr[j],
+					curr_vreg->min_voltage,
+					curr_vreg->max_voltage);
+				if (rc < 0) {
+					pr_err("%s: %s set voltage failed\n",
+						__func__,
+						curr_vreg->reg_name);
+					goto vreg_set_voltage_fail;
+				}
+				if (curr_vreg->op_mode >= 0) {
+					rc = regulator_set_load(
+						reg_ptr[j],
+						curr_vreg->op_mode);
+					if (rc < 0) {
+						pr_err(
+						"%s:%s set optimum mode fail\n",
+						__func__,
+						curr_vreg->reg_name);
+						goto vreg_set_opt_mode_fail;
+					}
+				}
+			}
+		}
+	} else {
+		for (i = num_vreg_seq-1; i >= 0; i--) {
+			if (vreg_seq) {
+				j = vreg_seq[i];
+				if (j >= num_vreg)
+					continue;
+			} else {
+				j = i;
+			}
+			curr_vreg = &cam_vreg[j];
+			if (reg_ptr[j]) {
+				if (regulator_count_voltages(reg_ptr[j]) > 0) {
+					if (curr_vreg->op_mode >= 0) {
+						regulator_set_load(
+							reg_ptr[j], 0);
+					}
+					regulator_set_voltage(
+						reg_ptr[j], 0, curr_vreg->
+						max_voltage);
+				}
+				regulator_put(reg_ptr[j]);
+				reg_ptr[j] = NULL;
+			}
+		}
+	}
+
+	return 0;
+
+vreg_unconfig:
+	if (regulator_count_voltages(reg_ptr[j]) > 0)
+		regulator_set_load(reg_ptr[j], 0);
+
+vreg_set_opt_mode_fail:
+	if (regulator_count_voltages(reg_ptr[j]) > 0)
+		regulator_set_voltage(reg_ptr[j], 0,
+			curr_vreg->max_voltage);
+
+vreg_set_voltage_fail:
+	regulator_put(reg_ptr[j]);
+	reg_ptr[j] = NULL;
+
+vreg_get_fail:
+	for (i--; i >= 0; i--) {
+		if (vreg_seq) {
+			j = vreg_seq[i];
+			if (j >= num_vreg)
+				continue;
+		} else {
+			j = i;
+		}
+		curr_vreg = &cam_vreg[j];
+		goto vreg_unconfig;
+	}
+
+	return -ENODEV;
+}
+
+int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
+		int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
+		int num_vreg_seq, struct regulator **reg_ptr, int enable)
+{
+	int i = 0, j = 0, rc = 0;
+
+	if (num_vreg_seq > num_vreg) {
+		pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+	if (!num_vreg_seq)
+		num_vreg_seq = num_vreg;
+
+	if (enable) {
+		for (i = 0; i < num_vreg_seq; i++) {
+			if (vreg_seq) {
+				j = vreg_seq[i];
+				if (j >= num_vreg)
+					continue;
+			} else
+				j = i;
+			if (IS_ERR(reg_ptr[j])) {
+				pr_err("%s: %s null regulator\n",
+					__func__, cam_vreg[j].reg_name);
+				goto disable_vreg;
+			}
+			rc = regulator_enable(reg_ptr[j]);
+			if (rc < 0) {
+				pr_err("%s: %s enable failed\n",
+					__func__, cam_vreg[j].reg_name);
+				goto disable_vreg;
+			}
+			if (cam_vreg[j].delay > 20)
+				msleep(cam_vreg[j].delay);
+			else if (cam_vreg[j].delay)
+				usleep_range(cam_vreg[j].delay * 1000,
+					(cam_vreg[j].delay * 1000) + 1000);
+		}
+	} else {
+		for (i = num_vreg_seq-1; i >= 0; i--) {
+			if (vreg_seq) {
+				j = vreg_seq[i];
+				if (j >= num_vreg)
+					continue;
+			} else
+				j = i;
+			regulator_disable(reg_ptr[j]);
+			if (cam_vreg[j].delay > 20)
+				msleep(cam_vreg[j].delay);
+			else if (cam_vreg[j].delay)
+				usleep_range(cam_vreg[j].delay * 1000,
+					(cam_vreg[j].delay * 1000) + 1000);
+		}
+	}
+
+	return rc;
+disable_vreg:
+	for (i--; i >= 0; i--) {
+		if (vreg_seq) {
+			j = vreg_seq[i];
+			if (j >= num_vreg)
+				continue;
+		} else
+			j = i;
+		regulator_disable(reg_ptr[j]);
+		if (cam_vreg[j].delay > 20)
+			msleep(cam_vreg[j].delay);
+		else if (cam_vreg[j].delay)
+			usleep_range(cam_vreg[j].delay * 1000,
+				(cam_vreg[j].delay * 1000) + 1000);
+	}
+
+	return rc;
+}
+
+int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl,
+	uint8_t gpio_tbl_size, int gpio_en)
+{
+	int rc = 0, i;
+
+	if (gpio_en) {
+		for (i = 0; i < gpio_tbl_size; i++) {
+			gpio_set_value_cansleep(gpio_tbl[i].gpio,
+				gpio_tbl[i].flags);
+			usleep_range(gpio_tbl[i].delay,
+				gpio_tbl[i].delay + 1000);
+		}
+	} else {
+		for (i = gpio_tbl_size - 1; i >= 0; i--) {
+			if (gpio_tbl[i].flags)
+				gpio_set_value_cansleep(gpio_tbl[i].gpio,
+					GPIOF_OUT_INIT_LOW);
+		}
+	}
+
+	return rc;
+}
+
+int msm_camera_config_single_vreg(struct device *dev,
+	struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config)
+{
+	int rc = 0;
+	const char *vreg_name = NULL;
+
+	if (!dev || !cam_vreg || !reg_ptr) {
+		pr_err("%s: get failed NULL parameter\n", __func__);
+		goto vreg_get_fail;
+	}
+	if (cam_vreg->type == VREG_TYPE_CUSTOM) {
+		if (cam_vreg->custom_vreg_name == NULL) {
+			pr_err("%s : can't find sub reg name",
+				__func__);
+			goto vreg_get_fail;
+		}
+		vreg_name = cam_vreg->custom_vreg_name;
+	} else {
+		if (cam_vreg->reg_name == NULL) {
+			pr_err("%s : can't find reg name", __func__);
+			goto vreg_get_fail;
+		}
+		vreg_name = cam_vreg->reg_name;
+	}
+
+	if (config) {
+		CDBG("%s enable %s\n", __func__, vreg_name);
+		*reg_ptr = regulator_get(dev, vreg_name);
+		if (IS_ERR(*reg_ptr)) {
+			pr_err("%s: %s get failed\n", __func__, vreg_name);
+			*reg_ptr = NULL;
+			goto vreg_get_fail;
+		}
+		if (regulator_count_voltages(*reg_ptr) > 0) {
+			CDBG("%s: voltage min=%d, max=%d\n",
+				__func__, cam_vreg->min_voltage,
+				cam_vreg->max_voltage);
+			rc = regulator_set_voltage(
+				*reg_ptr, cam_vreg->min_voltage,
+				cam_vreg->max_voltage);
+			if (rc < 0) {
+				pr_err("%s: %s set voltage failed\n",
+					__func__, vreg_name);
+				goto vreg_set_voltage_fail;
+			}
+			if (cam_vreg->op_mode >= 0) {
+				rc = regulator_set_load(*reg_ptr,
+					cam_vreg->op_mode);
+				if (rc < 0) {
+					pr_err(
+					"%s: %s set optimum mode failed\n",
+					__func__, vreg_name);
+					goto vreg_set_opt_mode_fail;
+				}
+			}
+		}
+		rc = regulator_enable(*reg_ptr);
+		if (rc < 0) {
+			pr_err("%s: %s regulator_enable failed\n", __func__,
+				vreg_name);
+			goto vreg_unconfig;
+		}
+	} else {
+		CDBG("%s disable %s\n", __func__, vreg_name);
+		if (*reg_ptr) {
+			CDBG("%s disable %s\n", __func__, vreg_name);
+			regulator_disable(*reg_ptr);
+			if (regulator_count_voltages(*reg_ptr) > 0) {
+				if (cam_vreg->op_mode >= 0)
+					regulator_set_load(*reg_ptr, 0);
+				regulator_set_voltage(
+					*reg_ptr, 0, cam_vreg->max_voltage);
+			}
+			regulator_put(*reg_ptr);
+			*reg_ptr = NULL;
+		} else {
+			pr_err("%s can't disable %s\n", __func__, vreg_name);
+		}
+	}
+
+	return 0;
+
+vreg_unconfig:
+	if (regulator_count_voltages(*reg_ptr) > 0)
+		regulator_set_load(*reg_ptr, 0);
+
+vreg_set_opt_mode_fail:
+	if (regulator_count_voltages(*reg_ptr) > 0)
+		regulator_set_voltage(*reg_ptr, 0,
+			cam_vreg->max_voltage);
+
+vreg_set_voltage_fail:
+	regulator_put(*reg_ptr);
+	*reg_ptr = NULL;
+
+vreg_get_fail:
+	return -EINVAL;
+}
+
+int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size,
+	int gpio_en)
+{
+	int rc = 0, i = 0, err = 0;
+
+	if (!gpio_tbl || !size) {
+		pr_err("%s:%d invalid gpio_tbl %pK / size %d\n", __func__,
+			__LINE__, gpio_tbl, size);
+		return -EINVAL;
+	}
+	for (i = 0; i < size; i++) {
+		CDBG("%s:%d i %d, gpio %d dir %ld\n", __func__, __LINE__, i,
+			gpio_tbl[i].gpio, gpio_tbl[i].flags);
+	}
+	if (gpio_en) {
+		for (i = 0; i < size; i++) {
+			err = gpio_request_one(gpio_tbl[i].gpio,
+				gpio_tbl[i].flags, gpio_tbl[i].label);
+			if (err) {
+				/*
+				 * After GPIO request fails, contine to
+				 * apply new gpios, outout a error message
+				 * for driver bringup debug
+				 */
+				pr_err("%s:%d gpio %d:%s request fails\n",
+					__func__, __LINE__,
+					gpio_tbl[i].gpio, gpio_tbl[i].label);
+			}
+		}
+	} else {
+		gpio_free_array(gpio_tbl, size);
+	}
+
+	return rc;
+}
+
+/* Get all clocks from DT */
+static int msm_camera_get_clk_info_internal(struct device *dev,
+			struct msm_cam_clk_info **clk_info,
+			struct clk ***clk_ptr,
+			size_t *num_clk)
+{
+	int rc = 0;
+	size_t cnt, tmp;
+	uint32_t *rates, i = 0;
+	const char *clk_ctl = NULL;
+	bool clock_cntl_support = false;
+	struct device_node *of_node;
+
+	of_node = dev->of_node;
+
+	cnt = of_property_count_strings(of_node, "clock-names");
+	if (cnt <= 0) {
+		pr_err("err: No clocks found in DT=%zu\n", cnt);
+		return -EINVAL;
+	}
+
+	tmp = of_property_count_u32_elems(of_node, "qcom,clock-rates");
+	if (tmp <= 0) {
+		pr_err("err: No clk rates device tree, count=%zu", tmp);
+		return -EINVAL;
+	}
+
+	if (cnt != tmp) {
+		pr_err("err: clk name/rates mismatch, strings=%zu, rates=%zu\n",
+			cnt, tmp);
+		return -EINVAL;
+	}
+
+	if (of_property_read_bool(of_node, "qcom,clock-cntl-support")) {
+		tmp = of_property_count_strings(of_node,
+				"qcom,clock-control");
+		if (tmp <= 0) {
+			pr_err("err: control strings not found in DT count=%zu",
+				tmp);
+			return -EINVAL;
+		}
+		if (cnt != tmp) {
+			pr_err("err: controls mismatch, strings=%zu, ctl=%zu\n",
+				cnt, tmp);
+			return -EINVAL;
+		}
+		clock_cntl_support = true;
+	}
+
+	*num_clk = cnt;
+
+	*clk_info = devm_kcalloc(dev, cnt,
+				sizeof(struct msm_cam_clk_info), GFP_KERNEL);
+	if (!*clk_info)
+		return -ENOMEM;
+
+	*clk_ptr = devm_kcalloc(dev, cnt, sizeof(struct clk *),
+				GFP_KERNEL);
+	if (!*clk_ptr) {
+		rc = -ENOMEM;
+		goto free_clk_info;
+	}
+
+	rates = devm_kcalloc(dev, cnt, sizeof(long), GFP_KERNEL);
+	if (!rates) {
+		rc = -ENOMEM;
+		goto free_clk_ptr;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+		rates, cnt);
+	if (rc < 0) {
+		pr_err("err: failed reading clock rates\n");
+		rc = -EINVAL;
+		goto free_rates;
+	}
+
+	for (i = 0; i < cnt; i++) {
+		rc = of_property_read_string_index(of_node, "clock-names",
+				i, &((*clk_info)[i].clk_name));
+		if (rc < 0) {
+			pr_err("%s reading clock-name failed index %d\n",
+				__func__, i);
+			rc = -EINVAL;
+			goto free_rates;
+		}
+
+		CDBG("dbg: clk-name[%d] = %s\n", i, (*clk_info)[i].clk_name);
+		if (clock_cntl_support) {
+			rc = of_property_read_string_index(of_node,
+				"qcom,clock-control", i, &clk_ctl);
+			if (rc < 0) {
+				pr_err("%s reading clock-control failed index %d\n",
+					__func__, i);
+				rc = -EINVAL;
+				goto free_rates;
+			}
+
+			if (!strcmp(clk_ctl, "NO_SET_RATE")) {
+				(*clk_info)[i].clk_rate = NO_SET_RATE;
+			} else if (!strcmp(clk_ctl, "INIT_RATE")) {
+				(*clk_info)[i].clk_rate = INIT_RATE;
+			} else if (!strcmp(clk_ctl, "SET_RATE")) {
+				(*clk_info)[i].clk_rate = rates[i];
+			} else {
+				pr_err("%s: error: clock control has invalid value\n",
+					 __func__);
+				rc = -EINVAL;
+				goto free_rates;
+			}
+		} else {
+			(*clk_info)[i].clk_rate =
+				(rates[i] == 0) ? (long)-1 : rates[i];
+		}
+
+		CDBG("dbg: clk-rate[%d] = rate: %ld\n",
+			i, (*clk_info)[i].clk_rate);
+
+		(*clk_ptr)[i] =
+			devm_clk_get(dev, (*clk_info)[i].clk_name);
+		if (IS_ERR((*clk_ptr)[i])) {
+			rc = PTR_ERR((*clk_ptr)[i]);
+			goto release_clk;
+		}
+		CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
+	}
+
+	devm_kfree(dev, rates);
+
+	return rc;
+
+release_clk:
+	for (--i; i >= 0; i--)
+		devm_clk_put(dev, (*clk_ptr)[i]);
+free_rates:
+	devm_kfree(dev, rates);
+free_clk_ptr:
+	devm_kfree(dev, *clk_ptr);
+free_clk_info:
+	devm_kfree(dev, *clk_info);
+	return rc;
+}
+
+/* Get all clocks from DT  for I2C devices */
+int msm_camera_i2c_dev_get_clk_info(struct device *dev,
+			struct msm_cam_clk_info **clk_info,
+			struct clk ***clk_ptr,
+			size_t *num_clk)
+{
+	int rc = 0;
+
+	if (!dev || !clk_info || !clk_ptr || !num_clk)
+		return -EINVAL;
+
+	rc = msm_camera_get_clk_info_internal(dev, clk_info, clk_ptr, num_clk);
+	return rc;
+}
+EXPORT_SYMBOL(msm_camera_i2c_dev_get_clk_info);
+
+/* Get all clocks from DT  for platform devices */
+int msm_camera_get_clk_info(struct platform_device *pdev,
+			struct msm_cam_clk_info **clk_info,
+			struct clk ***clk_ptr,
+			size_t *num_clk)
+{
+	int rc = 0;
+
+	if (!pdev || !&pdev->dev || !clk_info || !clk_ptr || !num_clk)
+		return -EINVAL;
+
+	rc = msm_camera_get_clk_info_internal(&pdev->dev,
+			clk_info, clk_ptr, num_clk);
+	return rc;
+}
+EXPORT_SYMBOL(msm_camera_get_clk_info);
+
+/* Get all clocks and multiple rates from DT */
+int msm_camera_get_clk_info_and_rates(
+			struct platform_device *pdev,
+			struct msm_cam_clk_info **pclk_info,
+			struct clk ***pclks,
+			uint32_t ***pclk_rates,
+			size_t *num_set,
+			size_t *num_clk)
+{
+	int rc = 0, tmp_var, cnt, tmp;
+	uint32_t i = 0, j = 0;
+	struct device_node *of_node;
+	uint32_t **rates;
+	struct clk **clks;
+	struct msm_cam_clk_info *clk_info;
+
+	if (!pdev || !pclk_info || !num_clk
+		|| !pclk_rates || !pclks || !num_set)
+		return -EINVAL;
+
+	of_node = pdev->dev.of_node;
+
+	cnt = of_property_count_strings(of_node, "clock-names");
+	if (cnt <= 0) {
+		pr_err("err: No clocks found in DT=%d\n", cnt);
+		return -EINVAL;
+	}
+
+	tmp = of_property_count_u32_elems(of_node, "qcom,clock-rates");
+	if (tmp <= 0) {
+		pr_err("err: No clk rates device tree, count=%d\n", tmp);
+		return -EINVAL;
+	}
+
+	if ((tmp % cnt) != 0) {
+		pr_err("err: clk name/rates mismatch, strings=%d, rates=%d\n",
+			cnt, tmp);
+		return -EINVAL;
+	}
+
+	*num_clk = cnt;
+	*num_set = (tmp / cnt);
+
+	clk_info = devm_kcalloc(&pdev->dev, cnt,
+				sizeof(struct msm_cam_clk_info), GFP_KERNEL);
+	if (!clk_info)
+		return -ENOMEM;
+
+	clks = devm_kcalloc(&pdev->dev, cnt, sizeof(struct clk *),
+				GFP_KERNEL);
+	if (!clks) {
+		rc = -ENOMEM;
+		goto free_clk_info;
+	}
+
+	rates = devm_kcalloc(&pdev->dev, *num_set,
+		sizeof(uint32_t *), GFP_KERNEL);
+	if (!rates) {
+		rc = -ENOMEM;
+		goto free_clk;
+	}
+
+	for (i = 0; i < *num_set; i++) {
+		rates[i] = devm_kcalloc(&pdev->dev, *num_clk,
+			sizeof(uint32_t), GFP_KERNEL);
+		if (!rates[i]) {
+			rc = -ENOMEM;
+			for (--i; i >= 0; i--)
+				devm_kfree(&pdev->dev, rates[i]);
+			goto free_rate;
+		}
+	}
+
+	tmp_var = 0;
+	for (i = 0; i < *num_set; i++) {
+		for (j = 0; j < *num_clk; j++) {
+			rc = of_property_read_u32_index(of_node,
+				"qcom,clock-rates", tmp_var++, &rates[i][j]);
+			if (rc < 0) {
+				pr_err("err: failed reading clock rates\n");
+				rc = -EINVAL;
+				goto free_rate_array;
+			}
+			CDBG("Clock rate idx %d idx %d value %d\n",
+				i, j, rates[i][j]);
+		}
+	}
+	for (i = 0; i < *num_clk; i++) {
+		rc = of_property_read_string_index(of_node, "clock-names",
+				i, &clk_info[i].clk_name);
+		if (rc < 0) {
+			pr_err("%s reading clock-name failed index %d\n",
+				__func__, i);
+			rc = -EINVAL;
+			goto free_rate_array;
+		}
+
+		CDBG("dbg: clk-name[%d] = %s\n", i, clk_info[i].clk_name);
+
+		clks[i] =
+			devm_clk_get(&pdev->dev, clk_info[i].clk_name);
+		if (IS_ERR(clks[i])) {
+			rc = PTR_ERR(clks[i]);
+			goto release_clk;
+		}
+		CDBG("clk ptr[%d] :%pK\n", i, clks[i]);
+	}
+	*pclk_info = clk_info;
+	*pclks = clks;
+	*pclk_rates = rates;
+
+	return rc;
+
+release_clk:
+	for (--i; i >= 0; i--)
+		devm_clk_put(&pdev->dev, clks[i]);
+free_rate_array:
+	for (i = 0; i < *num_set; i++)
+		devm_kfree(&pdev->dev, rates[i]);
+free_rate:
+	devm_kfree(&pdev->dev, rates);
+free_clk:
+	devm_kfree(&pdev->dev, clks);
+free_clk_info:
+	devm_kfree(&pdev->dev, clk_info);
+	return rc;
+}
+EXPORT_SYMBOL(msm_camera_get_clk_info_and_rates);
+
+/* Enable/Disable all clocks */
+int msm_camera_clk_enable(struct device *dev,
+		struct msm_cam_clk_info *clk_info,
+		struct clk **clk_ptr, int num_clk, int enable)
+{
+	int i;
+	int rc = 0;
+	long clk_rate;
+
+	if (enable) {
+		for (i = 0; i < num_clk; i++) {
+			pr_err("enable %s\n", clk_info[i].clk_name);
+			if (clk_info[i].clk_rate > 0) {
+				clk_rate = clk_round_rate(clk_ptr[i],
+					clk_info[i].clk_rate);
+				if (clk_rate < 0) {
+					pr_err("%s round failed\n",
+						   clk_info[i].clk_name);
+					goto cam_clk_set_err;
+				}
+				rc = clk_set_rate(clk_ptr[i],
+					clk_rate);
+				if (rc < 0) {
+					pr_err("%s set failed\n",
+						clk_info[i].clk_name);
+					goto cam_clk_set_err;
+				}
+
+			} else if (clk_info[i].clk_rate == INIT_RATE) {
+				clk_rate = clk_get_rate(clk_ptr[i]);
+				if (clk_rate == 0) {
+					clk_rate =
+						  clk_round_rate(clk_ptr[i], 0);
+					if (clk_rate < 0) {
+						pr_err("%s round rate failed\n",
+							  clk_info[i].clk_name);
+						goto cam_clk_set_err;
+					}
+					rc = clk_set_rate(clk_ptr[i],
+								clk_rate);
+					if (rc < 0) {
+						pr_err("%s set rate failed\n",
+							  clk_info[i].clk_name);
+						goto cam_clk_set_err;
+					}
+				}
+			}
+			rc = clk_prepare_enable(clk_ptr[i]);
+			if (rc < 0) {
+				pr_err("%s enable failed\n",
+					   clk_info[i].clk_name);
+				goto cam_clk_enable_err;
+			}
+			if (clk_info[i].delay > 20) {
+				msleep(clk_info[i].delay);
+			} else if (clk_info[i].delay) {
+				usleep_range(clk_info[i].delay * 1000,
+					(clk_info[i].delay * 1000) + 1000);
+			}
+		}
+	} else {
+		for (i = num_clk - 1; i >= 0; i--) {
+			if (clk_ptr[i] != NULL) {
+				pr_err("%s disable %s\n", __func__,
+					clk_info[i].clk_name);
+				clk_disable_unprepare(clk_ptr[i]);
+			}
+		}
+	}
+	return rc;
+
+cam_clk_enable_err:
+cam_clk_set_err:
+	for (i--; i >= 0; i--) {
+		if (clk_ptr[i] != NULL)
+			clk_disable_unprepare(clk_ptr[i]);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(msm_camera_clk_enable);
+
+/* Set rate on a specific clock */
+long msm_camera_clk_set_rate(struct device *dev,
+			struct clk *clk,
+			long clk_rate)
+{
+	int rc = 0;
+	long rate = 0;
+
+	if (!dev || !clk || (clk_rate < 0))
+		return -EINVAL;
+
+	CDBG("clk : %pK, enable : %ld\n", clk, clk_rate);
+
+	if (clk_rate > 0) {
+		rate = clk_round_rate(clk, clk_rate);
+		if (rate < 0) {
+			pr_err("round rate failed\n");
+			return -EINVAL;
+		}
+
+		rc = clk_set_rate(clk, rate);
+		if (rc < 0) {
+			pr_err("set rate failed\n");
+			return -EINVAL;
+		}
+	}
+
+	return rate;
+}
+EXPORT_SYMBOL(msm_camera_clk_set_rate);
+
+/* release memory allocated for clocks */
+static int msm_camera_put_clk_info_internal(struct device *dev,
+				struct msm_cam_clk_info **clk_info,
+				struct clk ***clk_ptr, int cnt)
+{
+	int i;
+
+	for (i = cnt - 1; i >= 0; i--) {
+		if (clk_ptr[i] != NULL)
+			devm_clk_put(dev, (*clk_ptr)[i]);
+
+		CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
+	}
+	devm_kfree(dev, *clk_info);
+	devm_kfree(dev, *clk_ptr);
+	*clk_info = NULL;
+	*clk_ptr = NULL;
+	return 0;
+}
+
+/* release memory allocated for clocks for i2c devices */
+int msm_camera_i2c_dev_put_clk_info(struct device *dev,
+				struct msm_cam_clk_info **clk_info,
+				struct clk ***clk_ptr, int cnt)
+{
+	int rc = 0;
+
+	if (!dev || !clk_info || !clk_ptr)
+		return -EINVAL;
+
+	rc = msm_camera_put_clk_info_internal(dev, clk_info, clk_ptr, cnt);
+	return rc;
+}
+EXPORT_SYMBOL(msm_camera_i2c_dev_put_clk_info);
+
+/* release memory allocated for clocks for platform devices */
+int msm_camera_put_clk_info(struct platform_device *pdev,
+				struct msm_cam_clk_info **clk_info,
+				struct clk ***clk_ptr, int cnt)
+{
+	int rc = 0;
+
+	if (!pdev || !&pdev->dev || !clk_info || !clk_ptr)
+		return -EINVAL;
+
+	rc = msm_camera_put_clk_info_internal(&pdev->dev,
+			clk_info, clk_ptr, cnt);
+	return rc;
+}
+EXPORT_SYMBOL(msm_camera_put_clk_info);
+
+int msm_camera_put_clk_info_and_rates(struct platform_device *pdev,
+		struct msm_cam_clk_info **clk_info,
+		struct clk ***clk_ptr, uint32_t ***clk_rates,
+		size_t set, size_t cnt)
+{
+	int i;
+
+	for (i = set - 1; i >= 0; i--)
+		devm_kfree(&pdev->dev, (*clk_rates)[i]);
+
+	devm_kfree(&pdev->dev, *clk_rates);
+	for (i = cnt - 1; i >= 0; i--) {
+		if (clk_ptr[i] != NULL)
+			devm_clk_put(&pdev->dev, (*clk_ptr)[i]);
+		CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
+	}
+	devm_kfree(&pdev->dev, *clk_info);
+	devm_kfree(&pdev->dev, *clk_ptr);
+	*clk_info = NULL;
+	*clk_ptr = NULL;
+	*clk_rates = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_camera_put_clk_info_and_rates);
+
+/* Get regulators from DT */
+int msm_camera_get_regulator_info(struct platform_device *pdev,
+				struct msm_cam_regulator **vdd_info,
+				int *num_reg)
+{
+	uint32_t cnt;
+	int i, rc;
+	struct device_node *of_node;
+	char prop_name[32];
+	struct msm_cam_regulator *tmp_reg;
+
+	if (!pdev || !vdd_info || !num_reg)
+		return -EINVAL;
+
+	of_node = pdev->dev.of_node;
+
+	if (!of_get_property(of_node, "qcom,vdd-names", NULL)) {
+		pr_err("err: Regulators property not found\n");
+		return -EINVAL;
+	}
+
+	cnt = of_property_count_strings(of_node, "qcom,vdd-names");
+	if (cnt <= 0) {
+		pr_err("err: no regulators found in device tree, count=%d",
+			cnt);
+		return -EINVAL;
+	}
+
+	tmp_reg = devm_kcalloc(&pdev->dev, cnt,
+				sizeof(struct msm_cam_regulator), GFP_KERNEL);
+	if (!tmp_reg)
+		return -ENOMEM;
+
+	for (i = 0; i < cnt; i++) {
+		rc = of_property_read_string_index(of_node,
+			"qcom,vdd-names", i, &tmp_reg[i].name);
+		if (rc < 0) {
+			pr_err("Fail to fetch regulators: %d\n", i);
+			rc = -EINVAL;
+			goto err1;
+		}
+
+		CDBG("regulator-names[%d] = %s\n", i, tmp_reg[i].name);
+
+		snprintf(prop_name, 32, "%s-supply", tmp_reg[i].name);
+
+		if (of_get_property(of_node, prop_name, NULL)) {
+			tmp_reg[i].vdd =
+				devm_regulator_get(&pdev->dev, tmp_reg[i].name);
+			if (IS_ERR(tmp_reg[i].vdd)) {
+				rc = -EINVAL;
+				pr_err("Fail to get regulator :%d\n", i);
+				goto err1;
+			}
+		} else {
+			pr_err("Regulator phandle not found :%s\n",
+				tmp_reg[i].name);
+			rc = -EINVAL;
+			goto err1;
+		}
+		CDBG("vdd ptr[%d] :%pK\n", i, tmp_reg[i].vdd);
+	}
+
+	*num_reg = cnt;
+	*vdd_info = tmp_reg;
+
+	return 0;
+
+err1:
+	for (--i; i >= 0; i--)
+		devm_regulator_put(tmp_reg[i].vdd);
+	devm_kfree(&pdev->dev, tmp_reg);
+	return rc;
+}
+EXPORT_SYMBOL(msm_camera_get_regulator_info);
+
+
+/* Enable/Disable regulators */
+int msm_camera_regulator_enable(struct msm_cam_regulator *vdd_info,
+				int cnt, int enable)
+{
+	int i;
+	int rc;
+	struct msm_cam_regulator *tmp = vdd_info;
+
+	if (!tmp) {
+		pr_err("Invalid params");
+		return -EINVAL;
+	}
+	CDBG("cnt : %d\n", cnt);
+
+	for (i = 0; i < cnt; i++) {
+		if (tmp && !IS_ERR_OR_NULL(tmp->vdd)) {
+			CDBG("name : %s, enable : %d\n", tmp->name, enable);
+			if (enable) {
+				rc = regulator_enable(tmp->vdd);
+				if (rc < 0) {
+					pr_err("regulator enable failed %d\n",
+						i);
+					goto disable_reg;
+				}
+			} else {
+				rc = regulator_disable(tmp->vdd);
+				if (rc < 0)
+					pr_err("regulator disable failed %d\n",
+						i);
+			}
+		}
+		tmp++;
+	}
+
+	return 0;
+disable_reg:
+	for (--i; i > 0; i--) {
+		--tmp;
+		if (!IS_ERR_OR_NULL(tmp->vdd))
+			regulator_disable(tmp->vdd);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(msm_camera_regulator_enable);
+
+/* Put regulators regulators */
+void msm_camera_put_regulators(struct platform_device *pdev,
+	struct msm_cam_regulator **vdd_info, int cnt)
+{
+	int i;
+
+	if (!vdd_info || !*vdd_info) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	for (i = cnt - 1; i >= 0; i--) {
+		if (vdd_info[i] && !IS_ERR_OR_NULL(vdd_info[i]->vdd))
+			devm_regulator_put(vdd_info[i]->vdd);
+			CDBG("vdd ptr[%d] :%pK\n", i, vdd_info[i]->vdd);
+	}
+
+	devm_kfree(&pdev->dev, *vdd_info);
+	*vdd_info = NULL;
+}
+EXPORT_SYMBOL(msm_camera_put_regulators);
+
+struct resource *msm_camera_get_irq(struct platform_device *pdev,
+							char *irq_name)
+{
+	if (!pdev || !irq_name) {
+		pr_err("Invalid params\n");
+		return NULL;
+	}
+
+	CDBG("Get irq for %s\n", irq_name);
+	return platform_get_resource_byname(pdev, IORESOURCE_IRQ, irq_name);
+}
+EXPORT_SYMBOL(msm_camera_get_irq);
+
+int msm_camera_register_irq(struct platform_device *pdev,
+			struct resource *irq, irq_handler_t handler,
+			unsigned long irqflags, char *irq_name, void *dev_id)
+{
+	int rc = 0;
+
+	if (!pdev || !irq || !handler || !irq_name || !dev_id) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	rc = devm_request_irq(&pdev->dev, irq->start, handler,
+		irqflags, irq_name, dev_id);
+	if (rc < 0) {
+		pr_err("irq request fail\n");
+		rc = -EINVAL;
+	}
+
+	CDBG("Registered irq for %s[resource - %pK]\n", irq_name, irq);
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_camera_register_irq);
+
+int msm_camera_register_threaded_irq(struct platform_device *pdev,
+			struct resource *irq, irq_handler_t handler_fn,
+			irq_handler_t thread_fn, unsigned long irqflags,
+			const char *irq_name, void *dev_id)
+{
+	int rc = 0;
+
+	if (!pdev || !irq || !irq_name || !dev_id) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	rc = devm_request_threaded_irq(&pdev->dev, irq->start, handler_fn,
+			thread_fn, irqflags, irq_name, dev_id);
+	if (rc < 0) {
+		pr_err("irq request fail\n");
+		rc = -EINVAL;
+	}
+
+	CDBG("Registered irq for %s[resource - %pK]\n", irq_name, irq);
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_camera_register_threaded_irq);
+
+int msm_camera_enable_irq(struct resource *irq, int enable)
+{
+	if (!irq) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	CDBG("irq Enable %d\n", enable);
+	if (enable)
+		enable_irq(irq->start);
+	else
+		disable_irq(irq->start);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_camera_enable_irq);
+
+int msm_camera_unregister_irq(struct platform_device *pdev,
+	struct resource *irq, void *dev_id)
+{
+
+	if (!pdev || !irq || !dev_id) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	CDBG("Un Registering irq for [resource - %pK]\n", irq);
+	devm_free_irq(&pdev->dev, irq->start, dev_id);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_camera_unregister_irq);
+
+void __iomem *msm_camera_get_reg_base(struct platform_device *pdev,
+		char *device_name, int reserve_mem)
+{
+	struct resource *mem;
+	void *base;
+
+	if (!pdev || !device_name) {
+		pr_err("Invalid params\n");
+		return NULL;
+	}
+
+	CDBG("device name :%s\n", device_name);
+	mem = platform_get_resource_byname(pdev,
+			IORESOURCE_MEM, device_name);
+	if (!mem) {
+		pr_err("err: mem resource %s not found\n", device_name);
+		return NULL;
+	}
+
+	if (reserve_mem) {
+		CDBG("device:%pK, mem : %pK, size : %d\n",
+			&pdev->dev, mem, (int)resource_size(mem));
+		if (!devm_request_mem_region(&pdev->dev, mem->start,
+			resource_size(mem),
+			device_name)) {
+			pr_err("err: no valid mem region for device:%s\n",
+				device_name);
+			return NULL;
+		}
+	}
+
+	base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+	if (!base) {
+		devm_release_mem_region(&pdev->dev, mem->start,
+				resource_size(mem));
+		pr_err("err: ioremap failed: %s\n", device_name);
+		return NULL;
+	}
+
+	CDBG("base : %pK\n", base);
+	return base;
+}
+EXPORT_SYMBOL(msm_camera_get_reg_base);
+
+uint32_t msm_camera_get_res_size(struct platform_device *pdev,
+	char *device_name)
+{
+	struct resource *mem;
+
+	if (!pdev || !device_name) {
+		pr_err("Invalid params\n");
+		return 0;
+	}
+
+	CDBG("device name :%s\n", device_name);
+	mem = platform_get_resource_byname(pdev,
+		IORESOURCE_MEM, device_name);
+	if (!mem) {
+		pr_err("err: mem resource %s not found\n", device_name);
+		return 0;
+	}
+	return resource_size(mem);
+}
+EXPORT_SYMBOL(msm_camera_get_res_size);
+
+
+int msm_camera_put_reg_base(struct platform_device *pdev,
+	void __iomem *base, char *device_name, int reserve_mem)
+{
+	struct resource *mem;
+
+	if (!pdev || !base || !device_name) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	CDBG("device name :%s\n", device_name);
+	mem = platform_get_resource_byname(pdev,
+			IORESOURCE_MEM, device_name);
+	if (!mem) {
+		pr_err("err: mem resource %s not found\n", device_name);
+		return -EINVAL;
+	}
+	CDBG("mem : %pK, size : %d\n", mem, (int)resource_size(mem));
+
+	devm_iounmap(&pdev->dev, base);
+	if (reserve_mem)
+		devm_release_mem_region(&pdev->dev,
+			mem->start, resource_size(mem));
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_camera_put_reg_base);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.h
new file mode 100644
index 0000000..c316090
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.h
@@ -0,0 +1,473 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_SOC_API_H_
+#define _CAM_SENSOR_SOC_API_H_
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include "cam_sensor_cmn_header.h"
+
+struct msm_cam_regulator {
+	const char *name;
+	struct regulator *vdd;
+};
+
+struct msm_gpio_set_tbl {
+	unsigned int gpio;
+	unsigned long flags;
+	uint32_t delay;
+};
+
+/**
+ * @brief      : Gets clock information from dtsi
+ *
+ * This function extracts the clocks information for a specific
+ * platform device
+ *
+ * @param pdev   : Platform device to get clocks information
+ * @param clk_info   : Pointer to populate clock information array
+ * @param clk_ptr   : Pointer to populate clock resource pointers
+ * @param num_clk: Pointer to populate the number of clocks
+ *                 extracted from dtsi
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_get_clk_info(struct platform_device *pdev,
+			struct msm_cam_clk_info **clk_info,
+			struct clk ***clk_ptr,
+			size_t *num_clk);
+
+/**
+ * @brief      : Gets clock information from dtsi
+ *
+ * This function extracts the clocks information for a specific
+ * i2c device
+ *
+ * @param dev   : i2c device to get clocks information
+ * @param clk_info   : Pointer to populate clock information array
+ * @param clk_ptr   : Pointer to populate clock resource pointers
+ * @param num_clk: Pointer to populate the number of clocks
+ *                 extracted from dtsi
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_i2c_dev_get_clk_info(struct device *dev,
+			struct msm_cam_clk_info **clk_info,
+			struct clk ***clk_ptr,
+			size_t *num_clk);
+
+/**
+ * @brief      : Gets clock information and rates from dtsi
+ *
+ * This function extracts the clocks information for a specific
+ * platform device
+ *
+ * @param pdev   : Platform device to get clocks information
+ * @param clk_info   : Pointer to populate clock information array
+ * @param clk_ptr   : Pointer to populate clock resource pointers
+ * @param clk_rates   : Pointer to populate clock rates
+ * @param num_set: Pointer to populate the number of sets of rates
+ * @param num_clk: Pointer to populate the number of clocks
+ *                 extracted from dtsi
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_get_clk_info_and_rates(
+			struct platform_device *pdev,
+			struct msm_cam_clk_info **clk_info,
+			struct clk ***clk_ptr,
+			uint32_t ***clk_rates,
+			size_t *num_set,
+			size_t *num_clk);
+
+/**
+ * @brief      : Puts clock information
+ *
+ * This function releases the memory allocated for the clocks
+ *
+ * @param pdev   : Pointer to platform device
+ * @param clk_info   : Pointer to release the allocated memory
+ * @param clk_ptr   : Pointer to release the clock resources
+ * @param cnt   : Number of clk resources
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_put_clk_info(struct platform_device *pdev,
+				struct msm_cam_clk_info **clk_info,
+				struct clk ***clk_ptr, int cnt);
+
+/**
+ * @brief      : Puts clock information
+ *
+ * This function releases the memory allocated for the clocks
+ *
+ * @param dev   : Pointer to i2c device
+ * @param clk_info   : Pointer to release the allocated memory
+ * @param clk_ptr   : Pointer to release the clock resources
+ * @param cnt   : Number of clk resources
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_i2c_dev_put_clk_info(struct device *dev,
+			struct msm_cam_clk_info **clk_info,
+			struct clk ***clk_ptr, int cnt);
+
+/**
+ * @brief      : Puts clock information
+ *
+ * This function releases the memory allocated for the clocks
+ *
+ * @param pdev   : Pointer to platform device
+ * @param clk_info   : Pointer to release the allocated memory
+ * @param clk_ptr   : Pointer to release the clock resources
+ * @param clk_ptr   : Pointer to release the clock rates
+ * @param set   : Number of sets of clock rates
+ * @param cnt   : Number of clk resources
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_put_clk_info_and_rates(struct platform_device *pdev,
+		struct msm_cam_clk_info **clk_info,
+		struct clk ***clk_ptr, uint32_t ***clk_rates,
+		size_t set, size_t cnt);
+/**
+ * @brief      : Enable clocks
+ *
+ * This function enables the clocks for a specified device
+ *
+ * @param dev   : Device to get clocks information
+ * @param clk_info   : Pointer to populate clock information
+ * @param clk_ptr   : Pointer to populate clock information
+ * @param num_clk: Pointer to populate the number of clocks
+ *                 extracted from dtsi
+ * @param enable   : Flag to specify enable/disable
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_clk_enable(struct device *dev,
+					struct msm_cam_clk_info *clk_info,
+					struct clk **clk_ptr,
+					int num_clk,
+					int enable);
+/**
+ * @brief      : Set clock rate
+ *
+ * This function sets the rate for a specified clock and
+ * returns the rounded value
+ *
+ * @param dev   : Device to get clocks information
+ * @param clk   : Pointer to clock to set rate
+ * @param clk_rate   : Rate to be set
+ *
+ * @return Status of operation. Negative in case of error. clk rate otherwise.
+ */
+
+long msm_camera_clk_set_rate(struct device *dev,
+				struct clk *clk,
+				long clk_rate);
+/**
+ * @brief      : Gets regulator info
+ *
+ * This function extracts the regulator information for a specific
+ * platform device
+ *
+ * @param pdev   : platform device to get regulator information
+ * @param vdd_info: Pointer to populate the regulator names
+ * @param num_reg: Pointer to populate the number of regulators
+ *                 extracted from dtsi
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_get_regulator_info(struct platform_device *pdev,
+		struct msm_cam_regulator **vdd_info, int *num_reg);
+/**
+ * @brief      : Enable/Disable the regultors
+ *
+ * This function enables/disables the regulators for a specific
+ * platform device
+ *
+ * @param vdd_info: Pointer to list of regulators
+ * @param cnt: Number of regulators to enable/disable
+ * @param enable: Flags specifies either enable/disable
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_regulator_enable(struct msm_cam_regulator *vdd_info,
+				int cnt, int enable);
+
+/**
+ * @brief      : Release the regulators
+ *
+ * This function releases the regulator resources.
+ *
+ * @param pdev: Pointer to platform device
+ * @param vdd_info: Pointer to list of regulators
+ * @param cnt: Number of regulators to release
+ */
+
+void msm_camera_put_regulators(struct platform_device *pdev,
+	struct msm_cam_regulator **vdd_info, int cnt);
+/**
+ * @brief      : Get the IRQ resource
+ *
+ * This function gets the irq resource from dtsi for a specific
+ * platform device
+ *
+ * @param pdev   : Platform device to get IRQ
+ * @param irq_name: Name of the IRQ resource to get from DTSI
+ *
+ * @return Pointer to resource if success else null
+ */
+
+struct resource *msm_camera_get_irq(struct platform_device *pdev,
+							char *irq_name);
+/**
+ * @brief      : Register the IRQ
+ *
+ * This function registers the irq resource for specified hardware
+ *
+ * @param pdev    : Platform device to register IRQ resource
+ * @param irq	  : IRQ resource
+ * @param handler : IRQ handler
+ * @param irqflags : IRQ flags
+ * @param irq_name: Name of the IRQ
+ * @param dev	 : Token of the device
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_register_irq(struct platform_device *pdev,
+						struct resource *irq,
+						irq_handler_t handler,
+						unsigned long irqflags,
+						char *irq_name,
+						void *dev);
+
+/**
+ * @brief      : Register the threaded IRQ
+ *
+ * This function registers the irq resource for specified hardware
+ *
+ * @param pdev    : Platform device to register IRQ resource
+ * @param irq	  : IRQ resource
+ * @param handler_fn : IRQ handler function
+ * @param thread_fn : thread handler function
+ * @param irqflags : IRQ flags
+ * @param irq_name: Name of the IRQ
+ * @param dev	 : Token of the device
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_register_threaded_irq(struct platform_device *pdev,
+						struct resource *irq,
+						irq_handler_t handler_fn,
+						irq_handler_t thread_fn,
+						unsigned long irqflags,
+						const char *irq_name,
+						void *dev);
+
+/**
+ * @brief      : Enable/Disable the IRQ
+ *
+ * This function enables or disables a specific IRQ
+ *
+ * @param irq    : IRQ resource
+ * @param flag   : flag to enable/disable
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_enable_irq(struct resource *irq, int flag);
+
+/**
+ * @brief      : UnRegister the IRQ
+ *
+ * This function Unregisters/Frees the irq resource
+ *
+ * @param pdev   : Pointer to platform device
+ * @param irq    : IRQ resource
+ * @param dev    : Token of the device
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_unregister_irq(struct platform_device *pdev,
+	struct resource *irq, void *dev_id);
+
+/**
+ * @brief      : Gets device register base
+ *
+ * This function extracts the device's register base from the dtsi
+ * for the specified platform device
+ *
+ * @param pdev   : Platform device to get regulator infor
+ * @param device_name   : Name of the device to fetch the register base
+ * @param reserve_mem   : Flag to decide whether to reserve memory
+ * region or not.
+ *
+ * @return Pointer to resource if success else null
+ */
+
+void __iomem *msm_camera_get_reg_base(struct platform_device *pdev,
+		char *device_name, int reserve_mem);
+
+/**
+ * @brief      :  Puts device register base
+ *
+ * This function releases the memory region for the specified
+ * resource
+ *
+ * @param pdev   : Pointer to platform device
+ * @param base   : Pointer to base to unmap
+ * @param device_name : Device name
+ * @param reserve_mem   : Flag to decide whether to release memory
+ * region or not.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_put_reg_base(struct platform_device *pdev, void __iomem *base,
+		char *device_name, int reserve_mem);
+
+/**
+ * @brief      : Gets resource size
+ *
+ * This function returns the size of the resource for the
+ * specified platform device
+ *
+ * @param pdev   : Platform device to get regulator infor
+ * @param device_name   : Name of the device to fetch the register base
+ *
+ * @return size of the resource
+ */
+
+uint32_t msm_camera_get_res_size(struct platform_device *pdev,
+	char *device_name);
+
+/**
+ * @brief      : Selects clock source
+ *
+ *
+ * @param dev : Token of the device
+ * @param clk_info : Clock Info structure
+ * @param clk_src_info : Clock Info structure
+ * @param num_clk : Number of clocks
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
+		struct msm_cam_clk_info *clk_src_info, int num_clk);
+
+/**
+ * @brief      : Enables the clock
+ *
+ *
+ * @param dev : Token of the device
+ * @param clk_info : Clock Info structure
+ * @param clk_tr : Pointer to lock strucure
+ * @param num_clk : Number of clocks
+ * @param enable : Enable/disable the clock
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
+		struct clk **clk_ptr, int num_clk, int enable);
+
+/**
+ * @brief      : Configures voltage regulator
+ *
+ *
+ * @param dev : Token of the device
+ * @param cam_vreg : Regulator dt structure
+ * @param num_vreg : Number of regulators
+ * @param vreg_seq : Regulator sequence type
+ * @param num_clk : Number of clocks
+ * @param reg_ptr : Regulator pointer
+ * @param config : Enable/disable configuring the regulator
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
+		int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
+		int num_vreg_seq, struct regulator **reg_ptr, int config);
+
+/**
+ * @brief      : Enables voltage regulator
+ *
+ *
+ * @param dev : Token of the device
+ * @param cam_vreg : Regulator dt structure
+ * @param num_vreg : Number of regulators
+ * @param vreg_seq : Regulator sequence type
+ * @param num_clk : Number of clocks
+ * @param reg_ptr : Regulator pointer
+ * @param config : Enable/disable configuring the regulator
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
+		int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
+		int num_vreg_seq, struct regulator **reg_ptr, int enable);
+
+/**
+ * @brief      : Sets table of GPIOs
+ *
+ * @param gpio_tbl : GPIO table parsed from dt
+ * @param gpio_tbl_size : Size of GPIO table
+ * @param gpio_en : Enable/disable the GPIO
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl,
+	uint8_t gpio_tbl_size, int gpio_en);
+
+/**
+ * @brief      : Configures single voltage regulator
+ *
+ *
+ * @param dev : Token of the device
+ * @param cam_vreg : Regulator dt structure
+ * @param num_vreg : Number of regulators
+ * @param reg_ptr : Regulator pointer
+ * @param config : Enable/disable configuring the regulator
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_config_single_vreg(struct device *dev,
+	struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config);
+
+/**
+ * @brief      : Request table of gpios
+ *
+ *
+ * @param gpio_tbl : Table of GPIOs
+ * @param size : Size of table
+ * @param gpio_en : Enable/disable the gpio
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size,
+	int gpio_en);
+
+#endif /* _CAM_SENSOR_SOC_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
new file mode 100644
index 0000000..44294e8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -0,0 +1,1399 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "cam_sensor_util.h"
+#include "cam_sensor_soc_api.h"
+
+#define CAM_SENSOR_PINCTRL_STATE_SLEEP "cam_suspend"
+#define CAM_SENSOR_PINCTRL_STATE_DEFAULT "cam_default"
+
+#define VALIDATE_VOLTAGE(min, max, config_val) ((config_val) && \
+	(config_val >= min) && (config_val <= max))
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static struct i2c_settings_list*
+	cam_sensor_get_i2c_ptr(struct i2c_settings_array *i2c_reg_settings,
+		uint32_t size)
+{
+	struct i2c_settings_list *tmp;
+
+	tmp = (struct i2c_settings_list *)
+		kzalloc(sizeof(struct i2c_settings_list), GFP_KERNEL);
+
+	if (tmp != NULL)
+		list_add_tail(&(tmp->list),
+			&(i2c_reg_settings->list_head));
+	else
+		return NULL;
+
+	tmp->i2c_settings.reg_setting = (struct cam_sensor_i2c_reg_array *)
+		kzalloc(sizeof(struct cam_sensor_i2c_reg_array) *
+		size, GFP_KERNEL);
+	if (tmp->i2c_settings.reg_setting == NULL) {
+		list_del(&(tmp->list));
+		kfree(tmp);
+		return NULL;
+	}
+	tmp->i2c_settings.size = size;
+
+	return tmp;
+}
+
+int32_t delete_request(struct i2c_settings_array *i2c_array)
+{
+	struct i2c_settings_list *i2c_list = NULL, *i2c_next = NULL;
+	int32_t rc = 0;
+
+	if (i2c_array == NULL) {
+		pr_err("%s:%d ::FATAL:: Invalid argument\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	list_for_each_entry_safe(i2c_list, i2c_next,
+		&(i2c_array->list_head), list) {
+		kfree(i2c_list->i2c_settings.reg_setting);
+		list_del(&(i2c_list->list));
+		kfree(i2c_list);
+	}
+	INIT_LIST_HEAD(&(i2c_array->list_head));
+	i2c_array->is_settings_valid = 0;
+
+	return rc;
+}
+
+int32_t cam_sensor_handle_delay(
+	uint32_t **cmd_buf,
+	uint16_t generic_op_code,
+	struct i2c_settings_array *i2c_reg_settings,
+	uint32_t offset, uint32_t *byte_cnt,
+	struct list_head *list_ptr)
+{
+	int32_t rc = 0;
+	struct cam_cmd_unconditional_wait *cmd_uncond_wait =
+		(struct cam_cmd_unconditional_wait *) *cmd_buf;
+	struct i2c_settings_list *i2c_list = NULL;
+
+	if (i2c_list == NULL) {
+		pr_err("%s:%d Invalid list ptr\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (offset > 0) {
+		i2c_list =
+			list_entry(list_ptr, struct i2c_settings_list, list);
+		if (generic_op_code ==
+			CAMERA_SENSOR_WAIT_OP_HW_UCND)
+			i2c_list->i2c_settings.
+				reg_setting[offset - 1].delay =
+				cmd_uncond_wait->delay;
+		else
+			i2c_list->i2c_settings.delay =
+				cmd_uncond_wait->delay;
+		(*cmd_buf) +=
+			sizeof(
+			struct cam_cmd_unconditional_wait) / sizeof(uint32_t);
+		(*byte_cnt) +=
+			sizeof(
+			struct cam_cmd_unconditional_wait);
+	} else {
+		pr_err("%s: %d Error: Delay Rxed Before any buffer: %d\n",
+			__func__, __LINE__, offset);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+int32_t cam_sensor_handle_poll(
+	uint32_t **cmd_buf,
+	struct i2c_settings_array *i2c_reg_settings,
+	uint32_t *byte_cnt, int32_t *offset,
+	struct list_head **list_ptr)
+{
+	struct i2c_settings_list  *i2c_list;
+	int32_t rc = 0;
+	struct cam_cmd_conditional_wait *cond_wait
+		= (struct cam_cmd_conditional_wait *) *cmd_buf;
+
+	i2c_list =
+		cam_sensor_get_i2c_ptr(i2c_reg_settings, 1);
+	if (!i2c_list || !i2c_list->i2c_settings.reg_setting) {
+		pr_err("%s: %d Failed in allocating mem for list\n",
+			__func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	i2c_list->op_code = CAM_SENSOR_I2C_POLL;
+	i2c_list->i2c_settings.data_type =
+		cond_wait->data_type;
+	i2c_list->i2c_settings.addr_type =
+		cond_wait->addr_type;
+	i2c_list->i2c_settings.reg_setting->reg_addr =
+		cond_wait->reg_addr;
+	i2c_list->i2c_settings.reg_setting->reg_data =
+		cond_wait->reg_data;
+	i2c_list->i2c_settings.reg_setting->delay =
+		cond_wait->timeout;
+
+	(*cmd_buf) += sizeof(struct cam_cmd_conditional_wait) /
+		sizeof(uint32_t);
+	(*byte_cnt) += sizeof(struct cam_cmd_conditional_wait);
+
+	(*offset) += 1;
+	*list_ptr = &(i2c_list->list);
+
+	return rc;
+}
+
+int32_t cam_sensor_handle_random_write(
+	struct cam_cmd_i2c_random_wr *cam_cmd_i2c_random_wr,
+	struct i2c_settings_array *i2c_reg_settings,
+	uint16_t *cmd_length_in_bytes, int32_t *offset,
+	struct list_head **list)
+{
+	struct i2c_settings_list  *i2c_list;
+	int32_t rc = 0, cnt;
+
+	i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings,
+		cam_cmd_i2c_random_wr->header.count);
+	if (i2c_list == NULL ||
+		i2c_list->i2c_settings.reg_setting == NULL) {
+		pr_err("%s: %d Failed in allocating i2c_list\n",
+			__func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	*cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) +
+		sizeof(struct i2c_random_wr_payload) *
+		(cam_cmd_i2c_random_wr->header.count));
+	i2c_list->op_code = CAM_SENSOR_I2C_WRITE_RANDOM;
+	i2c_list->i2c_settings.addr_type =
+		cam_cmd_i2c_random_wr->header.addr_type;
+	i2c_list->i2c_settings.data_type =
+		cam_cmd_i2c_random_wr->header.data_type;
+
+	for (cnt = 0; cnt < (cam_cmd_i2c_random_wr->header.count);
+		cnt++) {
+		i2c_list->i2c_settings.reg_setting[cnt].reg_addr =
+			cam_cmd_i2c_random_wr->
+			random_wr_payload[cnt].reg_addr;
+		i2c_list->i2c_settings.
+			reg_setting[cnt].reg_data =
+			cam_cmd_i2c_random_wr->
+			random_wr_payload[cnt].reg_data;
+		i2c_list->i2c_settings.
+			reg_setting[cnt].data_mask = 0;
+	}
+	(*offset) += cnt;
+	*list = &(i2c_list->list);
+
+	return rc;
+}
+
+/**
+ * Name : cam_sensor_i2c_pkt_parser
+ * Description : Parse CSL CCI packet and apply register settings
+ * Parameters :  s_ctrl  input/output    sub_device
+ *              arg     input           cam_control
+ * Description :
+ * Handle multiple I2C RD/WR and WAIT cmd formats in one command
+ * buffer, for example, a command buffer of m x RND_WR + 1 x HW_
+ * WAIT + n x RND_WR with num_cmd_buf = 1. Do not exepect RD/WR
+ * with different cmd_type and op_code in one command buffer.
+ */
+int cam_sensor_i2c_pkt_parser(struct i2c_settings_array *i2c_reg_settings,
+	struct cam_cmd_buf_desc   *cmd_desc, int32_t num_cmd_buffers)
+{
+	int16_t                   rc = 0, i = 0;
+	size_t                    len_of_buff = 0;
+	uint64_t                  generic_ptr;
+
+	for (i = 0; i < num_cmd_buffers; i++) {
+		uint32_t                  *cmd_buf = NULL;
+		struct common_header      *cmm_hdr;
+		uint16_t                  generic_op_code;
+		uint32_t                  byte_cnt = 0;
+		uint32_t                  j = 0;
+		struct list_head          *list = NULL;
+
+		/*
+		 * It is not expected the same settings to
+		 * be spread across multiple cmd buffers
+		 */
+
+		CDBG("%s:%d Total cmd Buf in Bytes: %d\n", __func__,
+			__LINE__, cmd_desc[i].length);
+
+		if (!cmd_desc[i].length)
+			continue;
+
+		rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+			(uint64_t *)&generic_ptr, &len_of_buff);
+		cmd_buf = (uint32_t *)generic_ptr;
+		if (rc < 0) {
+			pr_err("%s:%d Failed in getting cmd hdl: %d Err: %d Buffer Len: %ld\n",
+				__func__, __LINE__,
+				cmd_desc[i].mem_handle, rc,
+				len_of_buff);
+			return rc;
+		}
+		cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+
+		while (byte_cnt < cmd_desc[i].length) {
+			cmm_hdr = (struct common_header *)cmd_buf;
+			generic_op_code = cmm_hdr->third_byte;
+			switch (cmm_hdr->cmd_type) {
+			case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR: {
+				uint16_t cmd_length_in_bytes   = 0;
+				struct cam_cmd_i2c_random_wr
+					*cam_cmd_i2c_random_wr =
+					(struct cam_cmd_i2c_random_wr *)cmd_buf;
+
+				rc = cam_sensor_handle_random_write(
+					cam_cmd_i2c_random_wr,
+					i2c_reg_settings,
+					&cmd_length_in_bytes, &j, &list);
+				if (rc < 0) {
+					pr_err("%s:%d :Error: Failed in random read %d\n",
+						__func__, __LINE__, rc);
+					return rc;
+				}
+
+				cmd_buf += cmd_length_in_bytes /
+					sizeof(uint32_t);
+				byte_cnt += cmd_length_in_bytes;
+				break;
+			}
+			case CAMERA_SENSOR_CMD_TYPE_WAIT: {
+				if (generic_op_code ==
+					CAMERA_SENSOR_WAIT_OP_HW_UCND ||
+					generic_op_code ==
+						CAMERA_SENSOR_WAIT_OP_SW_UCND) {
+
+					rc = cam_sensor_handle_delay(
+						&cmd_buf, generic_op_code,
+						i2c_reg_settings, j, &byte_cnt,
+						list);
+					if (rc < 0) {
+						pr_err("%s:%d :Error: Failed in handling delay %d\n",
+							__func__, __LINE__, rc);
+						return rc;
+					}
+
+				} else if (generic_op_code ==
+					CAMERA_SENSOR_WAIT_OP_COND) {
+					rc = cam_sensor_handle_poll(
+						&cmd_buf, i2c_reg_settings,
+						&byte_cnt, &j, &list);
+					if (rc < 0) {
+						pr_err("%s:%d :Error: Failed in random read %d\n",
+							__func__, __LINE__, rc);
+						return rc;
+					}
+				} else {
+					pr_err("%s: %d Wrong Wait Command: %d\n",
+						__func__, __LINE__,
+						generic_op_code);
+					return -EINVAL;
+				}
+				break;
+			}
+			default:
+				pr_err("%s:%d Invalid Command Type:%d\n",
+					__func__, __LINE__, cmm_hdr->cmd_type);
+				return -EINVAL;
+			}
+		}
+		i2c_reg_settings->is_settings_valid = 1;
+	}
+
+	return rc;
+}
+
+int32_t msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
+	int num_vreg, struct cam_sensor_power_setting *power_setting,
+	uint16_t power_setting_size)
+{
+	int32_t rc = 0, j = 0, i = 0;
+
+	/* Validate input parameters */
+	if (!cam_vreg || !power_setting) {
+		pr_err("%s:%d failed: cam_vreg %pK power_setting %pK", __func__,
+			__LINE__,  cam_vreg, power_setting);
+		return -EINVAL;
+	}
+
+	/* Validate size of num_vreg */
+	if (num_vreg <= 0) {
+		pr_err("failed: num_vreg %d", num_vreg);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < power_setting_size; i++) {
+		switch (power_setting[i].seq_type) {
+		case SENSOR_VDIG:
+			for (j = 0; j < num_vreg; j++) {
+				if (!strcmp(cam_vreg[j].reg_name, "cam_vdig")) {
+					CDBG("%s:%d i %d j %d cam_vdig\n",
+						__func__, __LINE__, i, j);
+					power_setting[i].seq_val = j;
+					if (VALIDATE_VOLTAGE(
+						cam_vreg[j].min_voltage,
+						cam_vreg[j].max_voltage,
+						power_setting[i].config_val)) {
+						cam_vreg[j].min_voltage =
+						cam_vreg[j].max_voltage =
+						power_setting[i].config_val;
+					}
+					break;
+				}
+			}
+			if (j == num_vreg)
+				power_setting[i].seq_val = INVALID_VREG;
+			break;
+
+		case SENSOR_VIO:
+			for (j = 0; j < num_vreg; j++) {
+				if (!strcmp(cam_vreg[j].reg_name, "cam_vio")) {
+					CDBG("%s:%d i %d j %d cam_vio\n",
+						__func__, __LINE__, i, j);
+					power_setting[i].seq_val = j;
+					if (VALIDATE_VOLTAGE(
+						cam_vreg[j].min_voltage,
+						cam_vreg[j].max_voltage,
+						power_setting[i].config_val)) {
+						cam_vreg[j].min_voltage =
+						cam_vreg[j].max_voltage =
+						power_setting[i].config_val;
+					}
+					break;
+				}
+			}
+			if (j == num_vreg)
+				power_setting[i].seq_val = INVALID_VREG;
+			break;
+
+		case SENSOR_VANA:
+			for (j = 0; j < num_vreg; j++) {
+				if (!strcmp(cam_vreg[j].reg_name, "cam_vana")) {
+					CDBG("%s:%d i %d j %d cam_vana\n",
+						__func__, __LINE__, i, j);
+					power_setting[i].seq_val = j;
+					if (VALIDATE_VOLTAGE(
+						cam_vreg[j].min_voltage,
+						cam_vreg[j].max_voltage,
+						power_setting[i].config_val)) {
+						cam_vreg[j].min_voltage =
+						cam_vreg[j].max_voltage =
+						power_setting[i].config_val;
+					}
+					break;
+				}
+			}
+			if (j == num_vreg)
+				power_setting[i].seq_val = INVALID_VREG;
+			break;
+
+		case SENSOR_VAF:
+			for (j = 0; j < num_vreg; j++) {
+				if (!strcmp(cam_vreg[j].reg_name, "cam_vaf")) {
+					CDBG("%s:%d i %d j %d cam_vaf\n",
+						__func__, __LINE__, i, j);
+					power_setting[i].seq_val = j;
+					if (VALIDATE_VOLTAGE(
+						cam_vreg[j].min_voltage,
+						cam_vreg[j].max_voltage,
+						power_setting[i].config_val)) {
+						cam_vreg[j].min_voltage =
+						cam_vreg[j].max_voltage =
+						power_setting[i].config_val;
+					}
+					break;
+				}
+			}
+			if (j == num_vreg)
+				power_setting[i].seq_val = INVALID_VREG;
+			break;
+
+		case SENSOR_CUSTOM_REG1:
+			for (j = 0; j < num_vreg; j++) {
+				if (!strcmp(cam_vreg[j].reg_name,
+					"cam_v_custom1")) {
+					CDBG("%s:%d i %d j %d cam_vcustom1\n",
+						__func__, __LINE__, i, j);
+					power_setting[i].seq_val = j;
+					if (VALIDATE_VOLTAGE(
+						cam_vreg[j].min_voltage,
+						cam_vreg[j].max_voltage,
+						power_setting[i].config_val)) {
+						cam_vreg[j].min_voltage =
+						cam_vreg[j].max_voltage =
+						power_setting[i].config_val;
+					}
+					break;
+				}
+			}
+			if (j == num_vreg)
+				power_setting[i].seq_val = INVALID_VREG;
+			break;
+		case SENSOR_CUSTOM_REG2:
+			for (j = 0; j < num_vreg; j++) {
+				if (!strcmp(cam_vreg[j].reg_name,
+					"cam_v_custom2")) {
+					CDBG("%s:%d i %d j %d cam_vcustom2\n",
+						__func__, __LINE__, i, j);
+					power_setting[i].seq_val = j;
+					if (VALIDATE_VOLTAGE(
+						cam_vreg[j].min_voltage,
+						cam_vreg[j].max_voltage,
+						power_setting[i].config_val)) {
+						cam_vreg[j].min_voltage =
+						cam_vreg[j].max_voltage =
+						power_setting[i].config_val;
+					}
+					break;
+				}
+			}
+			if (j == num_vreg)
+				power_setting[i].seq_val = INVALID_VREG;
+			break;
+
+		default: {
+			pr_err("%s:%d invalid seq_val %d\n", __func__,
+				__LINE__, power_setting[i].seq_val);
+			break;
+			}
+		}
+	}
+
+	return rc;
+}
+
+int32_t msm_camera_get_dt_gpio_req_tbl(struct device_node *of_node,
+	struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+	uint16_t gpio_array_size)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0, *val_array = NULL;
+
+	if (!of_get_property(of_node, "qcom,gpio-req-tbl-num", &count))
+		return 0;
+
+	count /= sizeof(uint32_t);
+	if (!count) {
+		pr_err("%s qcom,gpio-req-tbl-num 0\n", __func__);
+		return 0;
+	}
+
+	val_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+	if (!val_array)
+		return -ENOMEM;
+
+	gconf->cam_gpio_req_tbl = kcalloc(count, sizeof(struct gpio),
+		GFP_KERNEL);
+	if (!gconf->cam_gpio_req_tbl) {
+		rc = -ENOMEM;
+		goto free_val_array;
+	}
+	gconf->cam_gpio_req_tbl_size = count;
+
+	rc = of_property_read_u32_array(of_node, "qcom,gpio-req-tbl-num",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto free_gpio_req_tbl;
+	}
+
+	for (i = 0; i < count; i++) {
+		if (val_array[i] >= gpio_array_size) {
+			pr_err("%s gpio req tbl index %d invalid\n",
+				__func__, val_array[i]);
+			return -EINVAL;
+		}
+		gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
+		CDBG("%s cam_gpio_req_tbl[%d].gpio = %d\n", __func__, i,
+			gconf->cam_gpio_req_tbl[i].gpio);
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,gpio-req-tbl-flags",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto free_gpio_req_tbl;
+	}
+
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_req_tbl[i].flags = val_array[i];
+		CDBG("%s cam_gpio_req_tbl[%d].flags = %ld\n", __func__, i,
+			gconf->cam_gpio_req_tbl[i].flags);
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node,
+			"qcom,gpio-req-tbl-label", i,
+			&gconf->cam_gpio_req_tbl[i].label);
+		CDBG("%s cam_gpio_req_tbl[%d].label = %s\n", __func__, i,
+			gconf->cam_gpio_req_tbl[i].label);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			goto free_gpio_req_tbl;
+		}
+	}
+
+	kfree(val_array);
+
+	return rc;
+
+free_gpio_req_tbl:
+	kfree(gconf->cam_gpio_req_tbl);
+free_val_array:
+	kfree(val_array);
+	gconf->cam_gpio_req_tbl_size = 0;
+
+	return rc;
+}
+
+int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
+	struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+	uint16_t gpio_array_size)
+{
+	int rc = 0, val = 0;
+
+	gconf->gpio_num_info = kzalloc(sizeof(struct msm_camera_gpio_num_info),
+		GFP_KERNEL);
+	if (!gconf->gpio_num_info)
+		return -ENOMEM;
+
+	rc = of_property_read_u32(of_node, "qcom,gpio-vana", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			pr_err("%s:%d read qcom,gpio-vana failed rc %d\n",
+				__func__, __LINE__, rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			pr_err("%s:%d qcom,gpio-vana invalid %d\n",
+				__func__, __LINE__, val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gconf->gpio_num_info->gpio_num[SENSOR_VANA] =
+			gpio_array[val];
+		gconf->gpio_num_info->valid[SENSOR_VANA] = 1;
+		CDBG("%s qcom,gpio-vana %d\n", __func__,
+			gconf->gpio_num_info->gpio_num[SENSOR_VANA]);
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,gpio-vio", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			pr_err("%s:%d read qcom,gpio-vio failed rc %d\n",
+				__func__, __LINE__, rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			pr_err("%s:%d qcom,gpio-vio invalid %d\n",
+				__func__, __LINE__, val);
+			goto free_gpio_info;
+		}
+		gconf->gpio_num_info->gpio_num[SENSOR_VIO] =
+			gpio_array[val];
+		gconf->gpio_num_info->valid[SENSOR_VIO] = 1;
+		CDBG("%s qcom,gpio-vio %d\n", __func__,
+			gconf->gpio_num_info->gpio_num[SENSOR_VIO]);
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,gpio-vaf", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			pr_err("%s:%d read qcom,gpio-vaf failed rc %d\n",
+				__func__, __LINE__, rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			pr_err("%s:%d qcom,gpio-vaf invalid %d\n",
+				__func__, __LINE__, val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gconf->gpio_num_info->gpio_num[SENSOR_VAF] =
+			gpio_array[val];
+		gconf->gpio_num_info->valid[SENSOR_VAF] = 1;
+		CDBG("%s qcom,gpio-vaf %d\n", __func__,
+			gconf->gpio_num_info->gpio_num[SENSOR_VAF]);
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,gpio-vdig", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			pr_err("%s:%d read qcom,gpio-vdig failed rc %d\n",
+				__func__, __LINE__, rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			pr_err("%s:%d qcom,gpio-vdig invalid %d\n",
+				__func__, __LINE__, val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gconf->gpio_num_info->gpio_num[SENSOR_VDIG] =
+			gpio_array[val];
+		gconf->gpio_num_info->valid[SENSOR_VDIG] = 1;
+		CDBG("%s qcom,gpio-vdig %d\n", __func__,
+			gconf->gpio_num_info->gpio_num[SENSOR_VDIG]);
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,gpio-reset", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			pr_err("%s:%d read qcom,gpio-reset failed rc %d\n",
+				__func__, __LINE__, rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			pr_err("%s:%d qcom,gpio-reset invalid %d\n",
+				__func__, __LINE__, val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gconf->gpio_num_info->gpio_num[SENSOR_RESET] =
+			gpio_array[val];
+		gconf->gpio_num_info->valid[SENSOR_RESET] = 1;
+		CDBG("%s qcom,gpio-reset %d\n", __func__,
+			gconf->gpio_num_info->gpio_num[SENSOR_RESET]);
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,gpio-standby", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			pr_err("%s:%d read qcom,gpio-standby failed rc %d\n",
+				__func__, __LINE__, rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			pr_err("%s:%d qcom,gpio-standby invalid %d\n",
+				__func__, __LINE__, val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gconf->gpio_num_info->gpio_num[SENSOR_STANDBY] =
+			gpio_array[val];
+		gconf->gpio_num_info->valid[SENSOR_STANDBY] = 1;
+		CDBG("%s qcom,gpio-standby %d\n", __func__,
+			gconf->gpio_num_info->gpio_num[SENSOR_STANDBY]);
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,gpio-af-pwdm", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			pr_err("%s:%d read qcom,gpio-af-pwdm failed rc %d\n",
+				__func__, __LINE__, rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			pr_err("%s:%d qcom,gpio-af-pwdm invalid %d\n",
+				__func__, __LINE__, val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gconf->gpio_num_info->gpio_num[SENSOR_VAF_PWDM] =
+			gpio_array[val];
+		gconf->gpio_num_info->valid[SENSOR_VAF_PWDM] = 1;
+		CDBG("%s qcom,gpio-af-pwdm %d\n", __func__,
+			gconf->gpio_num_info->gpio_num[SENSOR_VAF_PWDM]);
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,gpio-custom1", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			pr_err("%s:%d read qcom,gpio-custom1 failed rc %d\n",
+				__func__, __LINE__, rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			pr_err("%s:%d qcom,gpio-custom1 invalid %d\n",
+				__func__, __LINE__, val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1] =
+			gpio_array[val];
+		gconf->gpio_num_info->valid[SENSOR_CUSTOM_GPIO1] = 1;
+		CDBG("%s qcom,gpio-custom1 %d\n", __func__,
+			gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1]);
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,gpio-custom2", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			pr_err("%s:%d read qcom,gpio-custom2 failed rc %d\n",
+				__func__, __LINE__, rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			pr_err("%s:%d qcom,gpio-custom2 invalid %d\n",
+				__func__, __LINE__, val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2] =
+			gpio_array[val];
+		gconf->gpio_num_info->valid[SENSOR_CUSTOM_GPIO2] = 1;
+		CDBG("%s qcom,gpio-custom2 %d\n", __func__,
+			gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2]);
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+
+free_gpio_info:
+	kfree(gconf->gpio_num_info);
+	gconf->gpio_num_info = NULL;
+	return rc;
+}
+
+int cam_sensor_get_dt_vreg_data(struct device_node *of_node,
+	struct camera_vreg_t **cam_vreg, int *num_vreg)
+{
+	int rc = 0, i = 0;
+	int32_t count = 0;
+	uint32_t *vreg_array = NULL;
+	struct camera_vreg_t *vreg = NULL;
+
+	count = of_property_count_strings(of_node, "qcom,cam-vreg-name");
+	CDBG("%s qcom,cam-vreg-name count %d\n", __func__, count);
+
+	if (!count || (count == -EINVAL)) {
+		pr_err("%s:%d number of entries is 0 or not present in dts\n",
+			__func__, __LINE__);
+		*num_vreg = 0;
+		return 0;
+	}
+
+	vreg = kcalloc(count, sizeof(*vreg), GFP_KERNEL);
+	if (!vreg)
+		return -ENOMEM;
+
+	*cam_vreg = vreg;
+	*num_vreg = count;
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node,
+			"qcom,cam-vreg-name", i,
+			&vreg[i].reg_name);
+		CDBG("%s reg_name[%d] = %s\n", __func__, i,
+			vreg[i].reg_name);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			goto free_vreg;
+		}
+	}
+
+	vreg_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+	if (!vreg_array) {
+		rc = -ENOMEM;
+		goto free_vreg;
+	}
+
+	for (i = 0; i < count; i++)
+		vreg[i].type = VREG_TYPE_DEFAULT;
+
+	rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-type",
+		vreg_array, count);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			goto free_vreg_array;
+		} else {
+			for (i = 0; i < count; i++) {
+				vreg[i].type = vreg_array[i];
+				CDBG("%s cam_vreg[%d].type = %d\n",
+					__func__, i, vreg[i].type);
+			}
+		}
+	} else {
+		CDBG("%s:%d no qcom,cam-vreg-type entries in dts\n",
+			__func__, __LINE__);
+		rc = 0;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-min-voltage",
+		vreg_array, count);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			goto free_vreg_array;
+		} else {
+			for (i = 0; i < count; i++) {
+				vreg[i].min_voltage = vreg_array[i];
+				CDBG("%s cam_vreg[%d].min_voltage = %d\n",
+					__func__, i, vreg[i].min_voltage);
+			}
+		}
+	} else {
+		CDBG("%s:%d no qcom,cam-vreg-min-voltage entries in dts\n",
+			__func__, __LINE__);
+		rc = 0;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-max-voltage",
+		vreg_array, count);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			goto free_vreg_array;
+		} else {
+			for (i = 0; i < count; i++) {
+				vreg[i].max_voltage = vreg_array[i];
+				CDBG("%s cam_vreg[%d].max_voltage = %d\n",
+					__func__, i, vreg[i].max_voltage);
+			}
+		}
+	} else {
+		CDBG("%s:%d no qcom,cam-vreg-max-voltage entries in dts\n",
+			__func__, __LINE__);
+		rc = 0;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-op-mode",
+		vreg_array, count);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			goto free_vreg_array;
+		} else {
+			for (i = 0; i < count; i++) {
+				vreg[i].op_mode = vreg_array[i];
+				CDBG("%s cam_vreg[%d].op_mode = %d\n",
+					__func__, i, vreg[i].op_mode);
+			}
+		}
+	} else {
+		CDBG("%s:%d no qcom,cam-vreg-op-mode entries in dts\n",
+			__func__, __LINE__);
+		rc = 0;
+	}
+
+	kfree(vreg_array);
+
+	return rc;
+
+free_vreg_array:
+	kfree(vreg_array);
+free_vreg:
+	kfree(vreg);
+	*num_vreg = 0;
+
+	return rc;
+}
+
+int msm_camera_pinctrl_init(
+	struct msm_pinctrl_info *sensor_pctrl, struct device *dev) {
+
+	sensor_pctrl->pinctrl = devm_pinctrl_get(dev);
+	if (IS_ERR_OR_NULL(sensor_pctrl->pinctrl)) {
+		pr_err("%s:%d Getting pinctrl handle failed\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	sensor_pctrl->gpio_state_active =
+		pinctrl_lookup_state(sensor_pctrl->pinctrl,
+				CAM_SENSOR_PINCTRL_STATE_DEFAULT);
+	if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_active)) {
+		pr_err("%s:%d Failed to get the active state pinctrl handle\n",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	sensor_pctrl->gpio_state_suspend
+		= pinctrl_lookup_state(sensor_pctrl->pinctrl,
+				CAM_SENSOR_PINCTRL_STATE_SLEEP);
+	if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_suspend)) {
+		pr_err("%s:%d Failed to get the suspend state pinctrl handle\n",
+				__func__, __LINE__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int msm_cam_sensor_handle_reg_gpio(int seq_type,
+	struct msm_camera_gpio_conf *gconf, int val)
+{
+
+	int gpio_offset = -1;
+
+	if (!gconf) {
+		pr_err("ERR:%s: Input Parameters are not proper\n", __func__);
+		return -EINVAL;
+	}
+	CDBG("%s: %d Seq type: %d, config: %d", __func__, __LINE__,
+		seq_type, val);
+
+	gpio_offset = seq_type;
+
+	if ((gconf->gpio_num_info->valid[gpio_offset] == 1)) {
+		CDBG("%s: %d VALID GPIO offset: %d, seqtype: %d\n",
+			__func__, __LINE__,	gpio_offset, seq_type);
+		gpio_set_value_cansleep(
+			gconf->gpio_num_info->gpio_num
+			[gpio_offset], val);
+	}
+
+	return 0;
+}
+
+int32_t msm_sensor_driver_get_gpio_data(
+	struct msm_camera_gpio_conf **gpio_conf,
+	struct device_node *of_node)
+{
+	int32_t                      rc = 0, i = 0;
+	uint16_t                    *gpio_array = NULL;
+	int16_t                     gpio_array_size = 0;
+	struct msm_camera_gpio_conf *gconf = NULL;
+
+	/* Validate input parameters */
+	if (!of_node) {
+		pr_err("failed: invalid param of_node %pK", of_node);
+		return -EINVAL;
+	}
+
+	gpio_array_size = of_gpio_count(of_node);
+	CDBG("gpio count %d\n", gpio_array_size);
+	if (gpio_array_size <= 0)
+		return 0;
+
+	gconf = kzalloc(sizeof(*gconf), GFP_KERNEL);
+	if (!gconf)
+		return -ENOMEM;
+
+	*gpio_conf = gconf;
+
+	gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t), GFP_KERNEL);
+	if (!gpio_array)
+		goto free_gpio_conf;
+
+	for (i = 0; i < gpio_array_size; i++) {
+		gpio_array[i] = of_get_gpio(of_node, i);
+		CDBG("gpio_array[%d] = %d", i, gpio_array[i]);
+	}
+	rc = msm_camera_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
+		gpio_array_size);
+	if (rc < 0) {
+		pr_err("failed in msm_camera_get_dt_gpio_req_tbl\n");
+		goto free_gpio_array;
+	}
+
+	rc = msm_camera_init_gpio_pin_tbl(of_node, gconf, gpio_array,
+		gpio_array_size);
+	if (rc < 0) {
+		pr_err("failed in msm_camera_init_gpio_pin_tbl\n");
+		goto free_gpio_req_tbl;
+	}
+	kfree(gpio_array);
+
+	return rc;
+
+free_gpio_req_tbl:
+	kfree(gconf->cam_gpio_req_tbl);
+free_gpio_array:
+	kfree(gpio_array);
+free_gpio_conf:
+	kfree(gconf);
+	*gpio_conf = NULL;
+
+	return rc;
+}
+
+int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl)
+{
+	int rc = 0, index = 0, no_gpio = 0, ret = 0, num_vreg, j = 0;
+	struct cam_sensor_power_setting *power_setting = NULL;
+	struct camera_vreg_t *cam_vreg;
+
+	CDBG("%s:%d\n", __func__, __LINE__);
+	if (!ctrl) {
+		pr_err("failed ctrl %pK\n", ctrl);
+		return -EINVAL;
+	}
+
+	cam_vreg = ctrl->cam_vreg;
+	num_vreg = ctrl->num_vreg;
+
+	if (ctrl->gpio_conf->cam_gpiomux_conf_tbl != NULL)
+		CDBG("%s:%d mux install\n", __func__, __LINE__);
+
+	ret = msm_camera_pinctrl_init(&(ctrl->pinctrl_info), ctrl->dev);
+	if (ret < 0) {
+		pr_err("%s:%d Initialization of pinctrl failed\n",
+				__func__, __LINE__);
+		ctrl->cam_pinctrl_status = 0;
+	} else {
+		ctrl->cam_pinctrl_status = 1;
+	}
+	rc = msm_camera_request_gpio_table(
+		ctrl->gpio_conf->cam_gpio_req_tbl,
+		ctrl->gpio_conf->cam_gpio_req_tbl_size, 1);
+	if (rc < 0)
+		no_gpio = rc;
+	if (ctrl->cam_pinctrl_status) {
+		ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+			ctrl->pinctrl_info.gpio_state_active);
+		if (ret)
+			pr_err("%s:%d cannot set pin to active state",
+				__func__, __LINE__);
+	}
+
+	for (index = 0; index < ctrl->power_setting_size; index++) {
+		CDBG("%s index %d\n", __func__, index);
+		power_setting = &ctrl->power_setting[index];
+
+		switch (power_setting->seq_type) {
+		case SENSOR_MCLK:
+			if (power_setting->seq_val >= ctrl->clk_info_size) {
+				pr_err("%s:%d :Error: clk index %d >= max %zu\n",
+					__func__, __LINE__,
+					power_setting->seq_val,
+					ctrl->clk_info_size);
+				goto power_up_failed;
+			}
+			for (j = 0; j < num_vreg; j++) {
+				if (!strcmp(cam_vreg[j].reg_name,
+					"cam_clk")) {
+					CDBG("%s:%d Enable cam_clk: %d\n",
+						__func__, __LINE__, j);
+					msm_camera_config_single_vreg(ctrl->dev,
+						&cam_vreg[j],
+						(struct regulator **)
+						&power_setting->data[0],
+						1);
+				}
+			}
+			if (power_setting->config_val)
+				ctrl->clk_info[power_setting->seq_val].
+					clk_rate = power_setting->config_val;
+			rc = msm_camera_clk_enable(ctrl->dev,
+				ctrl->clk_info, ctrl->clk_ptr,
+				ctrl->clk_info_size, true);
+			if (rc < 0) {
+				pr_err("%s: clk enable failed\n", __func__);
+				goto power_up_failed;
+			}
+			break;
+		case SENSOR_RESET:
+		case SENSOR_STANDBY:
+		case SENSOR_CUSTOM_GPIO1:
+		case SENSOR_CUSTOM_GPIO2:
+			if (no_gpio) {
+				pr_err("%s: request gpio failed\n", __func__);
+				return no_gpio;
+			}
+			if (power_setting->seq_val >= CAM_VREG_MAX ||
+				!ctrl->gpio_conf->gpio_num_info) {
+				pr_err("%s gpio index %d >= max %d\n", __func__,
+					power_setting->seq_val,
+					CAM_VREG_MAX);
+				goto power_up_failed;
+			}
+			CDBG("%s:%d gpio set val %d\n",
+				__func__, __LINE__,
+				ctrl->gpio_conf->gpio_num_info->gpio_num
+				[power_setting->seq_val]);
+
+			rc = msm_cam_sensor_handle_reg_gpio(
+				power_setting->seq_type,
+				ctrl->gpio_conf, 1);
+			if (rc < 0) {
+				pr_err("ERR:%s Error in handling VREG GPIO\n",
+					__func__);
+				goto power_up_failed;
+			}
+			break;
+		case SENSOR_VANA:
+		case SENSOR_VDIG:
+		case SENSOR_VIO:
+		case SENSOR_VAF:
+		case SENSOR_VAF_PWDM:
+		case SENSOR_CUSTOM_REG1:
+		case SENSOR_CUSTOM_REG2:
+			if (power_setting->seq_val == INVALID_VREG)
+				break;
+
+			if (power_setting->seq_val >= CAM_VREG_MAX) {
+				pr_err("%s vreg index %d >= max %d\n", __func__,
+					power_setting->seq_val,
+					CAM_VREG_MAX);
+				goto power_up_failed;
+			}
+			if (power_setting->seq_val < ctrl->num_vreg)
+				msm_camera_config_single_vreg(ctrl->dev,
+					&ctrl->cam_vreg
+					[power_setting->seq_val],
+					(struct regulator **)
+					&power_setting->data[0],
+					1);
+			else
+				pr_err("%s: %d usr_idx:%d dts_idx:%d\n",
+					__func__, __LINE__,
+					power_setting->seq_val, ctrl->num_vreg);
+
+			rc = msm_cam_sensor_handle_reg_gpio(
+				power_setting->seq_type,
+				ctrl->gpio_conf, 1);
+			if (rc < 0) {
+				pr_err("ERR:%s Error in handling VREG GPIO\n",
+					__func__);
+				goto power_up_failed;
+			}
+			break;
+		default:
+			pr_err("%s error power seq type %d\n", __func__,
+				power_setting->seq_type);
+			break;
+		}
+		if (power_setting->delay > 20)
+			msleep(power_setting->delay);
+		else if (power_setting->delay)
+			usleep_range(power_setting->delay * 1000,
+				(power_setting->delay * 1000) + 1000);
+	}
+
+	return 0;
+power_up_failed:
+	pr_err("%s:%d failed\n", __func__, __LINE__);
+	for (index--; index >= 0; index--) {
+		CDBG("%s index %d\n", __func__, index);
+		power_setting = &ctrl->power_setting[index];
+		CDBG("%s type %d\n", __func__, power_setting->seq_type);
+		switch (power_setting->seq_type) {
+		case SENSOR_RESET:
+		case SENSOR_STANDBY:
+		case SENSOR_CUSTOM_GPIO1:
+		case SENSOR_CUSTOM_GPIO2:
+			if (!ctrl->gpio_conf->gpio_num_info)
+				continue;
+			if (!ctrl->gpio_conf->gpio_num_info->valid
+				[power_setting->seq_val])
+				continue;
+			gpio_set_value_cansleep(
+				ctrl->gpio_conf->gpio_num_info->gpio_num
+				[power_setting->seq_val], GPIOF_OUT_INIT_LOW);
+			break;
+		case SENSOR_VANA:
+		case SENSOR_VDIG:
+		case SENSOR_VIO:
+		case SENSOR_VAF:
+		case SENSOR_VAF_PWDM:
+		case SENSOR_CUSTOM_REG1:
+		case SENSOR_CUSTOM_REG2:
+			if (power_setting->seq_val < ctrl->num_vreg)
+				msm_camera_config_single_vreg(ctrl->dev,
+					&ctrl->cam_vreg
+					[power_setting->seq_val],
+					(struct regulator **)
+					&power_setting->data[0],
+					0);
+			else
+				pr_err("%s:%d:seq_val: %d > num_vreg: %d\n",
+					__func__, __LINE__,
+					power_setting->seq_val, ctrl->num_vreg);
+
+			msm_cam_sensor_handle_reg_gpio(power_setting->seq_type,
+				ctrl->gpio_conf, GPIOF_OUT_INIT_LOW);
+			break;
+		default:
+			pr_err("%s error power seq type %d\n", __func__,
+				power_setting->seq_type);
+			break;
+		}
+		if (power_setting->delay > 20) {
+			msleep(power_setting->delay);
+		} else if (power_setting->delay) {
+			usleep_range(power_setting->delay * 1000,
+				(power_setting->delay * 1000) + 1000);
+		}
+	}
+	if (ctrl->cam_pinctrl_status) {
+		ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+				ctrl->pinctrl_info.gpio_state_suspend);
+		if (ret)
+			pr_err("%s:%d cannot set pin to suspend state\n",
+				__func__, __LINE__);
+		devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+	}
+	ctrl->cam_pinctrl_status = 0;
+	msm_camera_request_gpio_table(
+		ctrl->gpio_conf->cam_gpio_req_tbl,
+		ctrl->gpio_conf->cam_gpio_req_tbl_size, 0);
+
+	return rc;
+}
+
+static struct cam_sensor_power_setting*
+msm_camera_get_power_settings(struct cam_sensor_power_ctrl_t *ctrl,
+				enum msm_camera_power_seq_type seq_type,
+				uint16_t seq_val)
+{
+	struct cam_sensor_power_setting *power_setting, *ps = NULL;
+	int idx;
+
+	for (idx = 0; idx < ctrl->power_setting_size; idx++) {
+		power_setting = &ctrl->power_setting[idx];
+		if (power_setting->seq_type == seq_type &&
+			power_setting->seq_val ==  seq_val) {
+			ps = power_setting;
+			return ps;
+		}
+
+	}
+
+	return ps;
+}
+
+static int cam_config_mclk_reg(struct cam_sensor_power_ctrl_t *ctrl,
+	int32_t index)
+{
+	struct camera_vreg_t *cam_vreg;
+	int32_t num_vreg = 0, j = 0, rc = 0, idx = 0;
+	struct cam_sensor_power_setting *ps = NULL;
+	struct cam_sensor_power_setting *pd = NULL;
+
+	cam_vreg = ctrl->cam_vreg;
+	num_vreg = ctrl->num_vreg;
+	pd = &ctrl->power_down_setting[index];
+
+	for (j = 0; j < num_vreg; j++) {
+		if (!strcmp(cam_vreg[j].reg_name, "cam_clk")) {
+
+			ps = NULL;
+			for (idx = 0; idx <
+				ctrl->power_setting_size; idx++) {
+				if (ctrl->power_setting[idx].
+					seq_type == pd->seq_type) {
+					ps = &ctrl->power_setting[idx];
+					break;
+				}
+			}
+
+			if (ps != NULL)
+				msm_camera_config_single_vreg(
+					ctrl->dev,
+					&cam_vreg[j],
+					(struct regulator **)
+					&ps->data[0], 0);
+		}
+	}
+
+	return rc;
+}
+
+int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl)
+{
+	int index = 0, ret = 0, num_vreg = 0;
+	struct cam_sensor_power_setting *pd = NULL;
+	struct cam_sensor_power_setting *ps;
+	struct camera_vreg_t *cam_vreg;
+
+	CDBG("%s:%d\n", __func__, __LINE__);
+	if (!ctrl) {
+		pr_err("failed ctrl %pK\n", ctrl);
+		return -EINVAL;
+	}
+
+	cam_vreg = ctrl->cam_vreg;
+	num_vreg = ctrl->num_vreg;
+
+	for (index = 0; index < ctrl->power_down_setting_size; index++) {
+		CDBG("%s index %d\n", __func__, index);
+		pd = &ctrl->power_down_setting[index];
+		ps = NULL;
+		CDBG("%s type %d\n", __func__, pd->seq_type);
+		switch (pd->seq_type) {
+		case SENSOR_MCLK:
+			ret = cam_config_mclk_reg(ctrl, index);
+			if (ret < 0) {
+				pr_err("%s:%d :Error: in config clk reg\n",
+					__func__, __LINE__);
+				return ret;
+			}
+			msm_camera_clk_enable(ctrl->dev,
+				ctrl->clk_info, ctrl->clk_ptr,
+				ctrl->clk_info_size, false);
+			break;
+		case SENSOR_RESET:
+		case SENSOR_STANDBY:
+		case SENSOR_CUSTOM_GPIO1:
+		case SENSOR_CUSTOM_GPIO2:
+			if (!ctrl->gpio_conf->gpio_num_info->valid
+				[pd->seq_val])
+				continue;
+			gpio_set_value_cansleep(
+				ctrl->gpio_conf->gpio_num_info->gpio_num
+				[pd->seq_val],
+				(int) pd->config_val);
+			break;
+		case SENSOR_VANA:
+		case SENSOR_VDIG:
+		case SENSOR_VIO:
+		case SENSOR_VAF:
+		case SENSOR_VAF_PWDM:
+		case SENSOR_CUSTOM_REG1:
+		case SENSOR_CUSTOM_REG2:
+			if (pd->seq_val == INVALID_VREG)
+				break;
+			ps = msm_camera_get_power_settings(
+				ctrl, pd->seq_type,
+				pd->seq_val);
+			if (ps) {
+				if (pd->seq_val < ctrl->num_vreg)
+					msm_camera_config_single_vreg(ctrl->dev,
+						&ctrl->cam_vreg
+						[pd->seq_val],
+						(struct regulator **)
+						&ps->data[0],
+						0);
+				else
+					pr_err("%s:%d:seq_val:%d > num_vreg: %d\n",
+						__func__, __LINE__, pd->seq_val,
+						ctrl->num_vreg);
+			} else
+				pr_err("%s error in power up/down seq data\n",
+								__func__);
+			ret = msm_cam_sensor_handle_reg_gpio(pd->seq_type,
+				ctrl->gpio_conf, GPIOF_OUT_INIT_LOW);
+			if (ret < 0)
+				pr_err("ERR:%s Error while disabling VREG GPIO\n",
+					__func__);
+			break;
+		default:
+			pr_err("%s error power seq type %d\n", __func__,
+				pd->seq_type);
+			break;
+		}
+		if (pd->delay > 20)
+			msleep(pd->delay);
+		else if (pd->delay)
+			usleep_range(pd->delay * 1000,
+				(pd->delay * 1000) + 1000);
+	}
+
+	if (ctrl->cam_pinctrl_status) {
+		ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+				ctrl->pinctrl_info.gpio_state_suspend);
+		if (ret)
+			pr_err("%s:%d cannot set pin to suspend state",
+				__func__, __LINE__);
+		devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+	}
+
+	ctrl->cam_pinctrl_status = 0;
+	msm_camera_request_gpio_table(
+		ctrl->gpio_conf->cam_gpio_req_tbl,
+		ctrl->gpio_conf->cam_gpio_req_tbl_size, 0);
+
+	return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
new file mode 100644
index 0000000..7e7fc35
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
@@ -0,0 +1,62 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_UTIL_H_
+#define _CAM_SENSOR_UTIL_H_
+
+#include <linux/kernel.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_req_mgr_util.h>
+#include <cam_req_mgr_interface.h>
+#include <cam_mem_mgr.h>
+
+#define INVALID_VREG 100
+
+int msm_camera_get_dt_power_setting_data(struct device_node *of_node,
+	struct camera_vreg_t *cam_vreg, int num_vreg,
+	struct cam_sensor_power_ctrl_t *power_info);
+
+int msm_camera_get_dt_gpio_req_tbl(struct device_node *of_node,
+	struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+	uint16_t gpio_array_size);
+
+int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
+	struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+	uint16_t gpio_array_size);
+
+int cam_sensor_get_dt_vreg_data(struct device_node *of_node,
+	struct camera_vreg_t **cam_vreg, int *num_vreg);
+
+int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl);
+
+int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl);
+
+int msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
+	int num_vreg, struct cam_sensor_power_setting *power_setting,
+	uint16_t power_setting_size);
+
+int msm_camera_pinctrl_init
+	(struct msm_pinctrl_info *sensor_pctrl, struct device *dev);
+
+int32_t msm_sensor_driver_get_gpio_data(
+	struct msm_camera_gpio_conf **gpio_conf,
+	struct device_node *of_node);
+
+int cam_sensor_i2c_pkt_parser(struct i2c_settings_array *i2c_reg_settings,
+	struct cam_cmd_buf_desc *cmd_desc, int32_t num_cmd_buffers);
+
+int32_t delete_request(struct i2c_settings_array *i2c_array);
+#endif /* _CAM_SENSOR_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index 236e7f1..ca0dfac 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -94,11 +94,6 @@
 	dma_addr_t base;
 };
 
-struct cam_smmu_region_info {
-	dma_addr_t iova_start;
-	size_t iova_len;
-};
-
 struct cam_context_bank_info {
 	struct device *dev;
 	struct dma_iommu_mapping *mapping;
@@ -994,6 +989,87 @@
 }
 EXPORT_SYMBOL(cam_smmu_dealloc_firmware);
 
+int cam_smmu_get_region_info(int32_t smmu_hdl,
+	enum cam_smmu_region_id region_id,
+	struct cam_smmu_region_info *region_info)
+{
+	int32_t idx;
+	struct cam_context_bank_info *cb = NULL;
+
+	if (!region_info) {
+		pr_err("Invalid region_info pointer\n");
+		return -EINVAL;
+	}
+
+	if (smmu_hdl == HANDLE_INIT) {
+		pr_err("Invalid handle\n");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Handle or index invalid. idx = %d hdl = %x\n",
+			idx, smmu_hdl);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	cb = &iommu_cb_set.cb_info[idx];
+	if (!cb) {
+		pr_err("SMMU context bank pointer invalid\n");
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return -EINVAL;
+	}
+
+	switch (region_id) {
+	case CAM_SMMU_REGION_FIRMWARE:
+		if (!cb->firmware_support) {
+			pr_err("Firmware not supported\n");
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return -ENODEV;
+		}
+		region_info->iova_start = cb->firmware_info.iova_start;
+		region_info->iova_len = cb->firmware_info.iova_len;
+		break;
+	case CAM_SMMU_REGION_SHARED:
+		if (!cb->shared_support) {
+			pr_err("Shared mem not supported\n");
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return -ENODEV;
+		}
+		region_info->iova_start = cb->shared_info.iova_start;
+		region_info->iova_len = cb->shared_info.iova_len;
+		break;
+	case CAM_SMMU_REGION_SCRATCH:
+		if (!cb->scratch_buf_support) {
+			pr_err("Scratch memory not supported\n");
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return -ENODEV;
+		}
+		region_info->iova_start = cb->scratch_info.iova_start;
+		region_info->iova_len = cb->scratch_info.iova_len;
+		break;
+	case CAM_SMMU_REGION_IO:
+		if (!cb->io_support) {
+			pr_err("IO memory not supported\n");
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return -ENODEV;
+		}
+		region_info->iova_start = cb->io_info.iova_start;
+		region_info->iova_len = cb->io_info.iova_len;
+		break;
+	default:
+		pr_err("Invalid region id: %d for smmu hdl: %X\n",
+			smmu_hdl, region_id);
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return 0;
+}
+EXPORT_SYMBOL(cam_smmu_get_region_info);
+
 static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
 	 enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
 	 size_t *len_ptr,
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
index 76e9135..20445f3 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -49,6 +49,17 @@
 };
 
 /**
+ * @brief            : Structure to store region information
+ *
+ * @param iova_start : Start address of region
+ * @param iova_len   : length of region
+ */
+struct cam_smmu_region_info {
+	dma_addr_t iova_start;
+	size_t iova_len;
+};
+
+/**
  * @brief           : Gets an smmu handle
  *
  * @param identifier: Unique identifier to be used by clients which they
@@ -252,4 +263,17 @@
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
 int cam_smmu_dealloc_firmware(int32_t smmu_hdl);
+
+/**
+ * @brief Gets region information specified by smmu handle and region id
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ * @param region_id: Region id for which information is desired
+ * @param region_info: Struct populated with region information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_region_info(int32_t smmu_hdl,
+	enum cam_smmu_region_id region_id,
+	struct cam_smmu_region_info *region_info);
 #endif /* _CAM_SMMU_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index a736148..901632a 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -246,7 +246,6 @@
 			sync_cb->status = list_info->status;
 			queue_work(sync_dev->work_queue,
 				&sync_cb->cb_dispatch_work);
-			list_del_init(&sync_cb->list);
 		}
 
 		/* Dispatch user payloads if any were registered earlier */
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index ecc62c8..3b3cbff 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -223,6 +223,7 @@
 		cb_info->status,
 		cb_info->cb_data);
 
+	list_del_init(&cb_info->list);
 	kfree(cb_info);
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_utils/Makefile b/drivers/media/platform/msm/camera/cam_utils/Makefile
index 6f9525e..f22115c 100644
--- a/drivers/media/platform/msm/camera/cam_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_utils/Makefile
@@ -1 +1,3 @@
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o cam_packet_util.o
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
new file mode 100644
index 0000000..6d90c1e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -0,0 +1,78 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include "cam_mem_mgr.h"
+#include "cam_packet_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+int cam_packet_util_process_patches(struct cam_packet *packet,
+	int32_t iommu_hdl)
+{
+	struct cam_patch_desc *patch_desc = NULL;
+	uint64_t   iova_addr;
+	uint64_t   cpu_addr;
+	uint32_t   temp;
+	uint32_t  *dst_cpu_addr;
+	uint32_t  *src_buf_iova_addr;
+	size_t     dst_buf_len;
+	size_t     src_buf_size;
+	int        i;
+	int        rc = 0;
+
+	/* process patch descriptor */
+	patch_desc = (struct cam_patch_desc *)
+			((uint32_t *) &packet->payload +
+			packet->patch_offset/4);
+	CDBG("packet = %pK patch_desc = %pK size = %lu\n",
+			(void *)packet, (void *)patch_desc,
+			sizeof(struct cam_patch_desc));
+
+	for (i = 0; i < packet->num_patches; i++) {
+		rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl,
+			iommu_hdl, &iova_addr, &src_buf_size);
+		if (rc < 0) {
+			pr_err("unable to get src buf address\n");
+			return rc;
+		}
+		src_buf_iova_addr = (uint32_t *)iova_addr;
+		temp = iova_addr;
+
+		rc = cam_mem_get_cpu_buf(patch_desc[i].dst_buf_hdl,
+			&cpu_addr, &dst_buf_len);
+		if (rc < 0) {
+			pr_err("unable to get dst buf address\n");
+			return rc;
+		}
+		dst_cpu_addr = (uint32_t *)cpu_addr;
+
+		CDBG("i = %d patch info = %x %x %x %x\n", i,
+			patch_desc[i].dst_buf_hdl, patch_desc[i].dst_offset,
+			patch_desc[i].src_buf_hdl, patch_desc[i].src_offset);
+
+		dst_cpu_addr = (uint32_t *)((uint8_t *)dst_cpu_addr +
+			patch_desc[i].dst_offset);
+		temp += patch_desc[i].src_offset;
+
+		*dst_cpu_addr = temp;
+
+		CDBG("patch is done for dst %pK with src %pK value %llx\n",
+			dst_cpu_addr, src_buf_iova_addr,
+			*((uint64_t *)dst_cpu_addr));
+	}
+
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
new file mode 100644
index 0000000..614e868
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_PACKET_UTIL_H_
+#define _CAM_PACKET_UTIL_H_
+
+#include <uapi/media/cam_defs.h>
+
+/**
+ * cam_packet_util_process_patches()
+ *
+ * @brief:              Replace the handle in Packet to Address using the
+ *                      information from patches.
+ *
+ * @packet:             Input packet containing Command Buffers and Patches
+ * @iommu_hdl:          IOMMU handle of the HW Device that received the packet
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_packet_util_process_patches(struct cam_packet *packet,
+	int32_t iommu_hdl);
+
+#endif /* _CAM_PACKET_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index b16e37e..2dfb90a 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -267,7 +267,7 @@
 
 	of_node = pdev->dev.of_node;
 
-	rc = of_property_read_u32(of_node, "cell-index", &pdev->id);
+	rc = of_property_read_u32(of_node, "cell-index", &soc_info->index);
 	if (rc) {
 		pr_err("device %s failed to read cell-index\n", pdev->name);
 		return rc;
@@ -317,11 +317,13 @@
 		}
 	}
 
-	rc = of_property_read_u32_array(of_node, "reg-cam-base",
-		soc_info->mem_block_cam_base, soc_info->num_mem_block);
-	if (rc) {
-		pr_err("Error reading register offsets\n");
-		return rc;
+	if (soc_info->num_mem_block > 0) {
+		rc = of_property_read_u32_array(of_node, "reg-cam-base",
+			soc_info->mem_block_cam_base, soc_info->num_mem_block);
+		if (rc) {
+			pr_err("Error reading register offsets\n");
+			return rc;
+		}
 	}
 
 	rc = of_property_read_string_index(of_node, "interrupt-names", 0,
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index 3e8226f..e556bba 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -18,6 +18,7 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
+
 #include "cam_io_util.h"
 
 #define NO_SET_RATE  -1
@@ -54,7 +55,7 @@
  *                          Camera hardware driver module
  *
  * @pdev:                   Platform device pointer
- * @hw_version;             Camera device version
+ * @hw_version:             Camera device version
  * @index:                  Instance id for the camera device
  * @irq_name:               Name of the irq associated with the device
  * @irq_line:               Irq resource
@@ -76,7 +77,7 @@
  * @clk:                    Array of associated clock resources
  * @clk_rate:               Array of default clock rates
  * @src_clk_idx:            Source clock index that is rate-controllable
- * @soc_private;            Soc private data
+ * @soc_private:            Soc private data
  *
  */
 struct cam_hw_soc_info {
@@ -172,7 +173,6 @@
  */
 int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info);
 
-
 /**
  * cam_soc_util_request_platform_resource()
  *
@@ -208,7 +208,7 @@
  *                          TRUE: Enable all clocks in soc_info Now.
  *                          False: Don't enable clocks Now. Driver will
  *                                 enable independently.
- @enable_irq:           Boolean flag:
+ * @enable_irq:         Boolean flag:
  *                          TRUE: Enable IRQ in soc_info Now.
  *                          False: Don't enable IRQ Now. Driver will
  *                                 enable independently.
diff --git a/drivers/media/platform/msm/camera/icp/Makefile b/drivers/media/platform/msm/camera/icp/Makefile
new file mode 100644
index 0000000..c42b162
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/icp
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+
+obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_subdev.o cam_icp_context.o hfi.o
diff --git a/drivers/media/platform/msm/camera/icp/cam_icp_context.c b/drivers/media/platform/msm/camera/icp/cam_icp_context.c
new file mode 100644
index 0000000..41290f4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/cam_icp_context.c
@@ -0,0 +1,195 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-ICP-CTXT %s:%d " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <media/cam_sync.h>
+#include <media/cam_defs.h>
+#include "cam_sync_api.h"
+#include "cam_node.h"
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_icp_context.h"
+#include "cam_req_mgr_util.h"
+#include "cam_mem_mgr.h"
+
+static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+	if (!rc)
+		ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+static int __cam_icp_release_dev_in_acquired(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc)
+		pr_err("Unable to release device\n");
+
+	ctx->state = CAM_CTX_AVAILABLE;
+	return rc;
+}
+
+static int __cam_icp_start_dev_in_acquired(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_start_dev_to_hw(ctx, cmd);
+	if (!rc)
+		ctx->state = CAM_CTX_READY;
+
+	return rc;
+}
+
+static int __cam_icp_config_dev_in_ready(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+	if (rc)
+		pr_err("Unable to prepare device\n");
+
+	return rc;
+}
+
+static int __cam_icp_stop_dev_in_ready(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_stop_dev_to_hw(ctx);
+	if (rc)
+		pr_err("Unable to stop device\n");
+
+	ctx->state = CAM_CTX_ACQUIRED;
+	return rc;
+}
+
+static int __cam_icp_release_dev_in_ready(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = __cam_icp_stop_dev_in_ready(ctx, NULL);
+	if (rc)
+		pr_err("Unable to stop device\n");
+
+	rc = __cam_icp_release_dev_in_acquired(ctx, cmd);
+	if (rc)
+		pr_err("Unable to stop device\n");
+
+	return rc;
+}
+
+static int __cam_icp_handle_buf_done_in_ready(void *ctx,
+	uint32_t evt_id, void *done)
+{
+	return cam_context_buf_done_from_hw(ctx, done, 0);
+}
+
+static struct cam_ctx_ops
+	cam_icp_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_icp_acquire_dev_in_available,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_icp_release_dev_in_acquired,
+			.start_dev = __cam_icp_start_dev_in_acquired,
+			.config_dev = __cam_icp_config_dev_in_ready,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_icp_handle_buf_done_in_ready,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = {
+			.stop_dev = __cam_icp_stop_dev_in_ready,
+			.release_dev = __cam_icp_release_dev_in_ready,
+			.config_dev = __cam_icp_config_dev_in_ready,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_icp_handle_buf_done_in_ready,
+	},
+	/* Activated */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+};
+
+int cam_icp_context_init(struct cam_icp_context *ctx,
+	struct cam_hw_mgr_intf *hw_intf)
+{
+	int rc;
+
+	if ((!ctx) || (!ctx->base) || (!hw_intf)) {
+		pr_err("Invalid params: %pK %pK\n", ctx, hw_intf);
+		rc = -EINVAL;
+		goto err;
+	}
+
+	rc = cam_context_init(ctx->base, NULL, hw_intf, ctx->req_base,
+		CAM_CTX_REQ_MAX);
+	if (rc) {
+		pr_err("Camera Context Base init failed!\n");
+		goto err;
+	}
+
+	ctx->base->state_machine = cam_icp_ctx_state_machine;
+	ctx->base->ctx_priv = ctx;
+	ctx->ctxt_to_hw_map = NULL;
+
+err:
+	return rc;
+}
+
+int cam_icp_context_deinit(struct cam_icp_context *ctx)
+{
+	if ((!ctx) || (!ctx->base)) {
+		pr_err("Invalid params: %pK\n", ctx);
+		return -EINVAL;
+	}
+
+	cam_context_deinit(ctx->base);
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera/icp/cam_icp_context.h b/drivers/media/platform/msm/camera/icp/cam_icp_context.h
new file mode 100644
index 0000000..709fc56
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/cam_icp_context.h
@@ -0,0 +1,48 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_ICP_CONTEXT_H_
+#define _CAM_ICP_CONTEXT_H_
+
+#include "cam_context.h"
+
+/**
+ * struct cam_icp_context - icp context
+ * @base: icp context object
+ * @state_machine: state machine for ICP context
+ * @req_base: common request structure
+ * @state: icp context state
+ * @ctxt_to_hw_map: context to FW handle mapping
+ */
+struct cam_icp_context {
+	struct cam_context *base;
+	struct cam_ctx_ops *state_machine;
+	struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+	uint32_t state;
+	void *ctxt_to_hw_map;
+};
+
+/**
+ * cam_icp_context_init() - ICP context init
+ * @ctx: Pointer to context
+ * @hw_intf: Pointer to ICP hardware interface
+ */
+int cam_icp_context_init(struct cam_icp_context *ctx,
+	struct cam_hw_mgr_intf *hw_intf);
+
+/**
+ * cam_icp_context_deinit() - ICP context deinit
+ * @ctx: Pointer to context
+ */
+int cam_icp_context_deinit(struct cam_icp_context *ctx);
+
+#endif /* _CAM_ICP_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera/icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera/icp/cam_icp_subdev.c
new file mode 100644
index 0000000..703561d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/cam_icp_subdev.c
@@ -0,0 +1,259 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-ICP %s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_req_mgr_dev.h"
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_context.h"
+#include "cam_icp_context.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+
+#define CAM_ICP_DEV_NAME        "cam-icp"
+
+struct cam_icp_subdev {
+	struct cam_subdev sd;
+	struct cam_node *node;
+	struct cam_context ctx[CAM_CTX_MAX];
+	struct cam_icp_context ctx_icp[CAM_CTX_MAX];
+	struct mutex icp_lock;
+	int32_t open_cnt;
+	int32_t reserved;
+};
+
+static struct cam_icp_subdev g_icp_dev;
+
+static const struct of_device_id cam_icp_dt_match[] = {
+	{.compatible = "qcom,cam-icp"},
+	{}
+};
+
+static int cam_icp_subdev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_hw_mgr_intf *hw_mgr_intf = NULL;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+	int rc = 0;
+
+	mutex_lock(&g_icp_dev.icp_lock);
+	if (g_icp_dev.open_cnt >= 1) {
+		pr_err("ICP subdev is already opened\n");
+		rc = -EALREADY;
+		goto end;
+	}
+
+	if (!node) {
+		pr_err("Invalid args\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	hw_mgr_intf = &node->hw_mgr_intf;
+	rc = hw_mgr_intf->download_fw(hw_mgr_intf->hw_mgr_priv, NULL);
+	if (rc < 0) {
+		pr_err("FW download failed\n");
+		goto end;
+	}
+	g_icp_dev.open_cnt++;
+end:
+	mutex_unlock(&g_icp_dev.icp_lock);
+	return rc;
+}
+
+static int cam_icp_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	int rc = 0;
+	struct cam_hw_mgr_intf *hw_mgr_intf = NULL;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+
+	mutex_lock(&g_icp_dev.icp_lock);
+	if (g_icp_dev.open_cnt <= 0) {
+		pr_err("ICP subdev is already closed\n");
+		rc = -EINVAL;
+		goto end;
+	}
+	g_icp_dev.open_cnt--;
+	if (!node) {
+		pr_err("Invalid args\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	hw_mgr_intf = &node->hw_mgr_intf;
+	if (!hw_mgr_intf) {
+		pr_err("hw_mgr_intf is not initialized\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = hw_mgr_intf->hw_close(hw_mgr_intf->hw_mgr_priv, NULL);
+	if (rc < 0) {
+		pr_err("HW close failed\n");
+		goto end;
+	}
+
+end:
+	mutex_unlock(&g_icp_dev.icp_lock);
+	return 0;
+}
+
+const struct v4l2_subdev_internal_ops cam_icp_subdev_internal_ops = {
+	.open = cam_icp_subdev_open,
+	.close = cam_icp_subdev_close,
+};
+
+static int cam_icp_probe(struct platform_device *pdev)
+{
+	int rc = 0, i = 0;
+	struct cam_node *node;
+	struct cam_hw_mgr_intf *hw_mgr_intf;
+
+	if (!pdev) {
+		pr_err("pdev is NULL\n");
+		return -EINVAL;
+	}
+
+	memset(&g_icp_dev, 0, sizeof(g_icp_dev));
+
+	g_icp_dev.sd.pdev = pdev;
+	g_icp_dev.sd.internal_ops = &cam_icp_subdev_internal_ops;
+	rc = cam_subdev_probe(&g_icp_dev.sd, pdev, CAM_ICP_DEV_NAME,
+		CAM_ICP_DEVICE_TYPE);
+	if (rc) {
+		pr_err("ICP cam_subdev_probe failed!\n");
+		goto probe_fail;
+	}
+
+	node = (struct cam_node *) g_icp_dev.sd.token;
+
+	hw_mgr_intf = kzalloc(sizeof(*hw_mgr_intf), GFP_KERNEL);
+	if (!hw_mgr_intf) {
+		rc = -EINVAL;
+		goto hw_alloc_fail;
+	}
+
+	rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf);
+	if (rc) {
+		pr_err("ICP HW manager init failed: %d\n", rc);
+		goto hw_init_fail;
+	}
+
+	pr_debug("Initializing the ICP contexts\n");
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		g_icp_dev.ctx_icp[i].base = &g_icp_dev.ctx[i];
+		rc = cam_icp_context_init(&g_icp_dev.ctx_icp[i],
+					hw_mgr_intf);
+		if (rc) {
+			pr_err("ICP context init failed!\n");
+			goto ctx_fail;
+		}
+	}
+
+	pr_debug("Initializing the ICP Node\n");
+	rc = cam_node_init(node, hw_mgr_intf, g_icp_dev.ctx,
+				CAM_CTX_MAX, CAM_ICP_DEV_NAME);
+	if (rc) {
+		pr_err("ICP node init failed!\n");
+		goto ctx_fail;
+	}
+
+	g_icp_dev.open_cnt = 0;
+	mutex_init(&g_icp_dev.icp_lock);
+
+	return rc;
+
+ctx_fail:
+	for (--i; i >= 0; i--)
+		cam_icp_context_deinit(&g_icp_dev.ctx_icp[i]);
+hw_init_fail:
+	kfree(hw_mgr_intf);
+hw_alloc_fail:
+	cam_subdev_remove(&g_icp_dev.sd);
+probe_fail:
+	return rc;
+}
+
+static int cam_icp_remove(struct platform_device *pdev)
+{
+	int i;
+	struct v4l2_subdev *sd;
+	struct cam_subdev *subdev;
+
+	if (!pdev) {
+		pr_err("pdev is NULL\n");
+		return -EINVAL;
+	}
+
+	sd = platform_get_drvdata(pdev);
+	if (!sd) {
+		pr_err("V4l2 subdev is NULL\n");
+		return -EINVAL;
+	}
+
+	subdev = v4l2_get_subdevdata(sd);
+	if (!subdev) {
+		pr_err("cam subdev is NULL\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_CTX_MAX; i++)
+		cam_icp_context_deinit(&g_icp_dev.ctx_icp[i]);
+	cam_node_deinit(g_icp_dev.node);
+	cam_subdev_remove(&g_icp_dev.sd);
+	mutex_destroy(&g_icp_dev.icp_lock);
+
+	return 0;
+}
+
+static struct platform_driver cam_icp_driver = {
+	.probe = cam_icp_probe,
+	.remove = cam_icp_remove,
+	.driver = {
+		.name = "cam_icp",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_icp_dt_match,
+	},
+};
+
+static int __init cam_icp_init_module(void)
+{
+	return platform_driver_register(&cam_icp_driver);
+}
+
+static void __exit cam_icp_exit_module(void)
+{
+	platform_driver_unregister(&cam_icp_driver);
+}
+module_init(cam_icp_init_module);
+module_exit(cam_icp_exit_module);
+MODULE_DESCRIPTION("MSM ICP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
new file mode 100644
index 0000000..1e42f75
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _HFI_INTF_H_
+#define _HFI_INTF_H_
+
+#include <linux/types.h>
+
+/**
+ * struct hfi_mem
+ * @len: length of memory
+ * @kva: kernel virtual address
+ * @iova: IO virtual address
+ * @reserved: reserved field
+ */
+struct hfi_mem {
+	uint64_t len;
+	uint64_t kva;
+	uint32_t iova;
+	uint32_t reserved;
+};
+
+/**
+ * struct hfi_mem_info
+ * @qtbl: qtable hfi memory
+ * @cmd_q: command queue hfi memory for host to firmware communication
+ * @msg_q: message queue hfi memory for firmware to host communication
+ * @dbg_q: debug queue hfi memory for firmware debug information
+ * @sec_heap: secondary heap hfi memory for firmware
+ * @icp_base: icp base address
+ */
+struct hfi_mem_info {
+	struct hfi_mem qtbl;
+	struct hfi_mem cmd_q;
+	struct hfi_mem msg_q;
+	struct hfi_mem dbg_q;
+	struct hfi_mem sec_heap;
+	void __iomem *icp_base;
+};
+
+/**
+ * hfi_write_cmd() - function for hfi write
+ * @cmd_ptr: pointer to command data for hfi write
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int hfi_write_cmd(void *cmd_ptr);
+
+/**
+ * hfi_read_message() - function for hfi read
+ * @pmsg: buffer to place read message for hfi queue
+ * @q_id: queue id
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id);
+
+/**
+ * hfi_init() - function initialize hfi after firmware download
+ * @event_driven_mode: event mode
+ * @hfi_mem: hfi memory info
+ * @icp_base: icp base address
+ * @debug: debug flag
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
+	void *__iomem icp_base, bool debug);
+
+/**
+ * hfi_get_hw_caps() - hardware capabilities from firmware
+ * @query_caps: holds query information from hfi
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int hfi_get_hw_caps(void *query_caps);
+
+/**
+ * hfi_send_system_cmd() - send hfi system command to firmware
+ * @type: type of system command
+ * @data: command data
+ * @size: size of command data
+ */
+void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size);
+
+/**
+ * cam_hfi_enable_cpu() - enable A5 CPU
+ * @icp_base: icp base address
+ */
+void cam_hfi_enable_cpu(void __iomem *icp_base);
+/**
+ * cam_hfi_deinit() - cleanup HFI
+ */
+void cam_hfi_deinit(void);
+
+#endif /* _HFI_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h
new file mode 100644
index 0000000..d1bbe01
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h
@@ -0,0 +1,308 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HFI_REG_H_
+#define _CAM_HFI_REG_H_
+
+#include <linux/types.h>
+#include "hfi_intf.h"
+
+
+/* start of ICP CSR registers */
+#define HFI_REG_A5_HW_VERSION                   0x0
+#define HFI_REG_A5_CSR_NSEC_RESET               0x4
+#define HFI_REG_A5_CSR_A5_CONTROL               0x8
+#define HFI_REG_A5_CSR_ETM                      0xC
+#define HFI_REG_A5_CSR_A2HOSTINTEN              0x10
+#define HFI_REG_A5_CSR_A2HOSTINT                0x14
+#define HFI_REG_A5_CSR_A2HOSTINTCLR             0x18
+#define HFI_REG_A5_CSR_A2HOSTINTSTATUS          0x1C
+#define HFI_REG_A5_CSR_A2HOSTINTSET             0x20
+#define HFI_REG_A5_CSR_HOST2ICPINT              0x30
+#define HFI_REG_A5_CSR_A5_STATUS                0x200
+#define HFI_REG_A5_QGIC2_LM_ID                  0x204
+#define HFI_REG_A5_SPARE                        0x400
+
+/* general purpose registers from */
+#define HFI_REG_FW_VERSION                      0x44
+#define HFI_REG_HOST_ICP_INIT_REQUEST           0x48
+#define HFI_REG_ICP_HOST_INIT_RESPONSE          0x4C
+#define HFI_REG_SHARED_MEM_PTR                  0x50
+#define HFI_REG_SHARED_MEM_SIZE                 0x54
+#define HFI_REG_QTBL_PTR                        0x58
+#define HFI_REG_UNCACHED_HEAP_PTR               0x5C
+#define HFI_REG_UNCACHED_HEAP_SIZE              0x60
+/* end of ICP CSR registers */
+
+/* flags for ICP CSR registers */
+#define ICP_FLAG_CSR_WAKE_UP_EN                 (1 << 4)
+#define ICP_FLAG_CSR_A5_EN                      (1 << 9)
+#define ICP_CSR_EN_CLKGATE_WFI                  (1 << 12)
+#define ICP_CSR_EDBGRQ                          (1 << 14)
+#define ICP_CSR_DBGSWENABLE                     (1 << 22)
+
+/* start of Queue table and queues */
+#define MAX_ICP_HFI_QUEUES                      4
+#define ICP_QHDR_TX_TYPE_MASK                   0xFF000000
+#define ICP_QHDR_RX_TYPE_MASK                   0x00FF0000
+#define ICP_QHDR_PRI_TYPE_MASK                  0x0000FF00
+#define ICP_QHDR_Q_ID_MASK                      0x000000FF
+
+#define ICP_CMD_Q_SIZE_IN_BYTES                 4096
+#define ICP_MSG_Q_SIZE_IN_BYTES                 4096
+#define ICP_DBG_Q_SIZE_IN_BYTES                 8192
+
+#define ICP_SHARED_MEM_IN_BYTES                 (1024 * 1024)
+#define ICP_UNCACHED_HEAP_SIZE_IN_BYTES         (2 * 1024 * 1024)
+#define ICP_HFI_MAX_MSG_SIZE_IN_WORDS           128
+
+#define ICP_HFI_QTBL_HOSTID1                    0x01000000
+#define ICP_HFI_QTBL_STATUS_ENABLED             0x00000001
+#define ICP_HFI_NUMBER_OF_QS                    3
+#define ICP_HFI_NUMBER_OF_ACTIVE_QS             3
+#define ICP_HFI_QTBL_OFFSET                     0
+#define ICP_HFI_VAR_SIZE_PKT                    0
+#define ICP_HFI_MAX_MSG_SIZE_IN_WORDS           128
+
+
+/* Queue Header type masks. Use these to access bitfields in qhdr_type */
+#define HFI_MASK_QHDR_TX_TYPE                   0xFF000000
+#define HFI_MASK_QHDR_RX_TYPE                   0x00FF0000
+#define HFI_MASK_QHDR_PRI_TYPE                  0x0000FF00
+#define HFI_MASK_QHDR_Q_ID_TYPE                 0x000000FF
+
+
+#define TX_EVENT_DRIVEN_MODE_1                  0
+#define RX_EVENT_DRIVEN_MODE_1                  0
+#define TX_EVENT_DRIVEN_MODE_2                  0x01000000
+#define RX_EVENT_DRIVEN_MODE_2                  0x00010000
+#define TX_EVENT_POLL_MODE_2                    0x02000000
+#define RX_EVENT_POLL_MODE_2                    0x00020000
+#define U32_OFFSET                              0x1
+#define BYTE_WORD_SHIFT                         2
+
+/**
+ * @INVALID: Invalid state
+ * @FW_LOAD_DONE: Firmware load is completed
+ * @FW_RESP_DONE: Firmware response is received
+ * @FW_START_SENT: firmware start is send
+ * @FW_READY: firmware is ready to accept commands
+ */
+enum hfi_state {
+	INVALID,
+	FW_LOAD_DONE,
+	FW_RESP_DONE,
+	FW_START_SENT,
+	FW_READY
+};
+
+/**
+ * @RESET: init success
+ * @SET: init failed
+ */
+enum reg_settings {
+	RESET,
+	SET
+};
+
+/**
+ * @INTR_DISABLE: Disable interrupt
+ * @INTR_ENABLE: Enable interrupt
+ */
+enum intr_status {
+	INTR_DISABLE,
+	INTR_ENABLE
+};
+
+/**
+ * @ICP_INIT_RESP_RESET: reset init state
+ * @ICP_INIT_RESP_SUCCESS: init success
+ * @ICP_INIT_RESP_FAILED: init failed
+ */
+enum host_init_resp {
+	ICP_INIT_RESP_RESET,
+	ICP_INIT_RESP_SUCCESS,
+	ICP_INIT_RESP_FAILED
+};
+
+/**
+ * @ICP_INIT_REQUEST_RESET: reset init request
+ * @ICP_INIT_REQUEST_SET: set init request
+ */
+enum host_init_request {
+	ICP_INIT_REQUEST_RESET,
+	ICP_INIT_REQUEST_SET
+};
+
+/**
+ * @QHDR_INACTIVE: Queue is inactive
+ * @QHDR_ACTIVE: Queue is active
+ */
+enum qhdr_status {
+	QHDR_INACTIVE,
+	QHDR_ACTIVE
+};
+
+/**
+ * @INTR_MODE: event driven mode 1, each send and receive generates interrupt
+ * @WM_MODE: event driven mode 2, interrupts based on watermark mechanism
+ * @POLL_MODE: poll method
+ */
+enum qhdr_event_drv_type {
+	INTR_MODE,
+	WM_MODE,
+	POLL_MODE
+};
+
+/**
+ * @TX_INT: event driven mode 1, each send and receive generates interrupt
+ * @TX_INT_WM: event driven mode 2, interrupts based on watermark mechanism
+ * @TX_POLL: poll method
+ * @ICP_QHDR_TX_TYPE_MASK defines position in qhdr_type
+ */
+enum qhdr_tx_type {
+	TX_INT,
+	TX_INT_WM,
+	TX_POLL
+};
+
+/**
+ * @RX_INT: event driven mode 1, each send and receive generates interrupt
+ * @RX_INT_WM: event driven mode 2, interrupts based on watermark mechanism
+ * @RX_POLL: poll method
+ * @ICP_QHDR_RX_TYPE_MASK defines position in qhdr_type
+ */
+enum qhdr_rx_type {
+	RX_INT,
+	RX_INT_WM,
+	RX_POLL
+};
+
+/**
+ * @Q_CMD: Host to FW command queue
+ * @Q_MSG: FW to Host message queue
+ * @Q_DEBUG: FW to Host debug queue
+ * @ICP_QHDR_Q_ID_MASK defines position in qhdr_type
+ */
+enum qhdr_q_id {
+	Q_CMD,
+	Q_MSG,
+	Q_DBG
+};
+
+/**
+ * struct hfi_qtbl_hdr
+ * @qtbl_version: Queue table version number
+ *                Higher 16 bits: Major version
+ *                Lower 16 bits: Minor version
+ * @qtbl_size: Queue table size from version to last parametr in qhdr entry
+ * @qtbl_qhdr0_offset: Offset to the start of first qhdr
+ * @qtbl_qhdr_size: Queue header size in bytes
+ * @qtbl_num_q: Total number of queues in Queue table
+ * @qtbl_num_active_q: Total number of active queues
+ */
+struct hfi_qtbl_hdr {
+	uint32_t qtbl_version;
+	uint32_t qtbl_size;
+	uint32_t qtbl_qhdr0_offset;
+	uint32_t qtbl_qhdr_size;
+	uint32_t qtbl_num_q;
+	uint32_t qtbl_num_active_q;
+} __packed;
+
+/**
+ * struct hfi_q_hdr
+ * @qhdr_status: Queue status, qhdr_state define possible status
+ * @qhdr_start_addr: Queue start address in non cached memory
+ * @qhdr_type: qhdr_tx, qhdr_rx, qhdr_q_id and priority defines qhdr type
+ * @qhdr_q_size: Queue size
+ *		Number of queue packets if qhdr_pkt_size is non-zero
+ *		Queue size in bytes if qhdr_pkt_size is zero
+ * @qhdr_pkt_size: Size of queue packet entries
+ *		0x0: variable queue packet size
+ *		non zero: size of queue packet entry, fixed
+ * @qhdr_pkt_drop_cnt: Number of packets dropped by sender
+ * @qhdr_rx_wm: Receiver watermark, applicable in event driven mode
+ * @qhdr_tx_wm: Sender watermark, applicable in event driven mode
+ * @qhdr_rx_req: Receiver sets this bit if queue is empty
+ * @qhdr_tx_req: Sender sets this bit if queue is full
+ * @qhdr_rx_irq_status: Receiver sets this bit and triggers an interrupt to
+ *		the sender after packets are dequeued. Sender clears this bit
+ * @qhdr_tx_irq_status: Sender sets this bit and triggers an interrupt to
+ *		the receiver after packets are queued. Receiver clears this bit
+ * @qhdr_read_idx: Read index
+ * @qhdr_write_idx: Write index
+ */
+struct hfi_q_hdr {
+	uint32_t dummy[15];
+	uint32_t qhdr_status;
+	uint32_t dummy1[15];
+	uint32_t qhdr_start_addr;
+	uint32_t dummy2[15];
+	uint32_t qhdr_type;
+	uint32_t dummy3[15];
+	uint32_t qhdr_q_size;
+	uint32_t dummy4[15];
+	uint32_t qhdr_pkt_size;
+	uint32_t dummy5[15];
+	uint32_t qhdr_pkt_drop_cnt;
+	uint32_t dummy6[15];
+	uint32_t qhdr_rx_wm;
+	uint32_t dummy7[15];
+	uint32_t qhdr_tx_wm;
+	uint32_t dummy8[15];
+	uint32_t qhdr_rx_req;
+	uint32_t dummy9[15];
+	uint32_t qhdr_tx_req;
+	uint32_t dummy10[15];
+	uint32_t qhdr_rx_irq_status;
+	uint32_t dummy11[15];
+	uint32_t qhdr_tx_irq_status;
+	uint32_t dummy12[15];
+	uint32_t qhdr_read_idx;
+	uint32_t dummy13[15];
+	uint32_t qhdr_write_idx;
+	uint32_t dummy14[15];
+};
+
+/**
+ * struct hfi_q_tbl
+ * @q_tbl_hdr: Queue table header
+ * @q_hdr: Queue header info, it holds info of cmd, msg and debug queues
+ */
+struct hfi_qtbl {
+	struct hfi_qtbl_hdr q_tbl_hdr;
+	struct hfi_q_hdr q_hdr[MAX_ICP_HFI_QUEUES];
+};
+
+/**
+ * struct hfi_info
+ * @map: Hfi shared memory info
+ * @smem_size: Shared memory size
+ * @uncachedheap_size: uncached heap size
+ * @msgpacket_buf: message buffer
+ * @hfi_state: State machine for hfi
+ * @cmd_q_lock: Lock for command queue
+ * @csr_base: CSR base address
+ */
+struct hfi_info {
+	struct hfi_mem_info map;
+	uint32_t smem_size;
+	uint32_t uncachedheap_size;
+	uint32_t msgpacket_buf[ICP_HFI_MAX_MSG_SIZE_IN_WORDS];
+	uint8_t hfi_state;
+	struct mutex cmd_q_lock;
+	struct mutex msg_q_lock;
+	void __iomem *csr_base;
+};
+
+#endif /* _CAM_HFI_REG_H_ */
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_session_defs.h b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_session_defs.h
new file mode 100644
index 0000000..837efec
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_session_defs.h
@@ -0,0 +1,428 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HFI_SESSION_DEFS_H
+#define _CAM_HFI_SESSION_DEFS_H
+
+#include <linux/types.h>
+
+#define HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO             0x1
+#define HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS         0x2
+#define HFI_IPEBPS_CMD_OPCODE_BPS_ABORT                 0x3
+#define HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY               0x4
+
+#define HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO             0x5
+#define HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS         0x6
+#define HFI_IPEBPS_CMD_OPCODE_IPE_ABORT                 0x7
+#define HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY               0x8
+
+#define HFI_IPEBPS_CMD_OPCODE_BPS_WAIT_FOR_IPE          0x9
+#define HFI_IPEBPS_CMD_OPCODE_BPS_WAIT_FOR_BPS          0xa
+#define HFI_IPEBPS_CMD_OPCODE_IPE_WAIT_FOR_BPS          0xb
+#define HFI_IPEBPS_CMD_OPCODE_IPE_WAIT_FOR_IPE          0xc
+
+#define HFI_IPEBPS_HANDLE_TYPE_BPS                      0x1
+#define HFI_IPEBPS_HANDLE_TYPE_IPE_RT                   0x2
+#define HFI_IPEBPS_HANDLE_TYPE_IPE_NON_RT               0x3
+
+/**
+ * struct hfi_cmd_abort_destroy
+ * @user_data: user supplied data
+ *
+ * IPE/BPS destroy/abort command
+ * @HFI_IPEBPS_CMD_OPCODE_IPE_ABORT
+ * @HFI_IPEBPS_CMD_OPCODE_BPS_ABORT
+ * @HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY
+ * @HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY
+ */
+struct hfi_cmd_abort_destroy {
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_chaining_ops
+ * @wait_hdl: current session handle waits on wait_hdl to complete operation
+ * @user_data: user supplied argument
+ *
+ * this structure for chaining opcodes
+ * BPS_WAITS_FOR_IPE
+ * BPS_WAITS_FOR_BPS
+ * IPE_WAITS_FOR_BPS
+ * IPE_WAITS_FOR_IPE
+ */
+struct hfi_cmd_chaining_ops {
+	uint32_t wait_hdl;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_create_handle
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @handle_type: IPE/BPS firmware session handle type
+ * @user_data1: caller provided data1
+ * @user_data2: caller provided data2
+ *
+ * create firmware session handle
+ */
+struct hfi_cmd_create_handle {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t handle_type;
+	uint64_t user_data1;
+	uint64_t user_data2;
+} __packed;
+
+/**
+ * struct hfi_cmd_ipebps_async
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @opcode: opcode for IPE/BPS async operation
+ *          CONFIG_IO: configures I/O for IPE/BPS handle
+ *          FRAME_PROCESS: image frame to be processed by IPE/BPS
+ *          ABORT: abort all processing frames of IPE/BPS handle
+ *          DESTROY: destroy earlier created IPE/BPS handle
+ *          BPS_WAITS_FOR_IPE: sync for BPS to wait for IPE
+ *          BPS_WAITS_FOR_BPS: sync for BPS to wait for BPS
+ *          IPE_WAITS_FOR_IPE: sync for IPE to wait for IPE
+ *          IPE_WAITS_FOR_BPS: sync for IPE to wait for BPS
+ * @num_fw_handles: number of IPE/BPS firmware handles in fw_handles array
+ * @fw_handles: IPE/BPS handles array
+ * @payload: command payload for IPE/BPS opcodes
+ * @direct: points to actual payload
+ * @indirect: points to address of payload
+ *
+ * sends async command to the earlier created IPE or BPS handle
+ * for asynchronous operation.
+ */
+struct hfi_cmd_ipebps_async {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t opcode;
+	uint64_t user_data1;
+	uint64_t user_data2;
+	uint32_t num_fw_handles;
+	uint32_t fw_handles[1];
+	union {
+		uint32_t direct[1];
+		uint32_t indirect;
+	} payload;
+} __packed;
+
+/**
+ * struct hfi_msg_create_handle_ack
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @err_type: error code
+ * @fw_handle: output param for IPE/BPS handle
+ * @user_data1: user provided data1
+ * @user_data2: user provided data2
+ *
+ * ack for create handle command of IPE/BPS
+ * @HFI_MSG_IPEBPS_CREATE_HANDLE_ACK
+ */
+struct hfi_msg_create_handle_ack {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t err_type;
+	uint32_t fw_handle;
+	uint64_t user_data1;
+	uint64_t user_data2;
+} __packed;
+
+/**
+ * struct hfi_msg_ipebps_async
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @opcode: opcode of IPE/BPS async operation
+ * @user_data1: user provided data1
+ * @user_data2: user provided data2
+ * @err_type: error code
+ * @msg_data: IPE/BPS async done message data
+ *
+ * result of IPE/BPS async command
+ * @HFI_MSG_IPEBPS_ASYNC_COMMAND_ACK
+ */
+struct hfi_msg_ipebps_async_ack {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t opcode;
+	uint64_t user_data1;
+	uint64_t user_data2;
+	uint32_t err_type;
+	uint32_t msg_data[1];
+} __packed;
+
+/**
+ * struct hfi_msg_frame_process_done
+ * @result: result of frame process command
+ * @scratch_buffer_address: address of scratch buffer
+ */
+struct hfi_msg_frame_process_done {
+	uint32_t result;
+	uint32_t scratch_buffer_address;
+};
+
+/**
+ * struct hfi_msg_chaining_op
+ * @status: return status
+ * @user_data: user data provided as part of chaining ops
+ *
+ * IPE/BPS wait response
+ */
+struct hfi_msg_chaining_op {
+	uint32_t status;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_msg_abort_destroy
+ * @status: return status
+ * @user_data: user data provided as part of abort/destroy ops
+ *
+ * IPE/BPS abort/destroy response
+ */
+struct hfi_msg_abort_destroy {
+	uint32_t status;
+	uint64_t user_data;
+} __packed;
+
+#define MAX_NUM_OF_IMAGE_PLANES	2
+
+enum hfi_ipe_io_images {
+	IPE_INPUT_IMAGE_FULL,
+	IPE_INPUT_IMAGE_DS4,
+	IPE_INPUT_IMAGE_DS16,
+	IPE_INPUT_IMAGE_DS64,
+	IPE_INPUT_IMAGE_FULL_REF,
+	IPE_INPUT_IMAGE_DS4_REF,
+	IPE_INPUT_IMAGE_DS16_REF,
+	IPE_INPUT_IMAGE_DS64_REF,
+	IPE_OUTPUT_IMAGE_DISPLAY,
+	IPE_OUTPUT_IMAGE_VIDEO,
+	IPE_OUTPUT_IMAGE_FULL_REF,
+	IPE_OUTPUT_IMAGE_DS4_REF,
+	IPE_OUTPUT_IMAGE_DS16_REF,
+	IPE_OUTPUT_IMAGE_DS64_REF,
+	IPE_INPUT_IMAGE_FIRST = IPE_INPUT_IMAGE_FULL,
+	IPE_INPUT_IMAGE_LAST = IPE_INPUT_IMAGE_DS64_REF,
+	IPE_OUTPUT_IMAGE_FIRST = IPE_OUTPUT_IMAGE_DISPLAY,
+	IPE_OUTPUT_IMAGE_LAST = IPE_OUTPUT_IMAGE_DS64_REF,
+	IPE_IO_IMAGES_MAX
+};
+
+enum hfi_ipe_image_format {
+	IMAGE_FORMAT_INVALID,
+	IMAGE_FORMAT_MIPI_8,
+	IMAGE_FORMAT_MIPI_10,
+	IMAGE_FORMAT_MIPI_12,
+	IMAGE_FORMAT_MIPI_14,
+	IMAGE_FORMAT_BAYER_8,
+	IMAGE_FORMAT_BAYER_10,
+	IMAGE_FORMAT_BAYER_12,
+	IMAGE_FORMAT_BAYER_14,
+	IMAGE_FORMAT_PDI_10,
+	IMAGE_FORMAT_PD_10,
+	IMAGE_FORMAT_PD_8,
+	IMAGE_FORMAT_INDICATIONS,
+	IMAGE_FORMAT_REFINEMENT,
+	IMAGE_FORMAT_UBWC_TP_10,
+	IMAGE_FORMAT_UBWC_NV_12,
+	IMAGE_FORMAT_UBWC_NV12_4R,
+	IMAGE_FORMAT_UBWC_P010,
+	IMAGE_FORMAT_LINEAR_TP_10,
+	IMAGE_FORMAT_LINEAR_P010,
+	IMAGE_FORMAT_LINEAR_NV12,
+	IMAGE_FORMAT_LINEAR_PLAIN_16,
+	IMAGE_FORMAT_YUV422_8,
+	IMAGE_FORMAT_YUV422_10,
+	IMAGE_FORMAT_STATISTICS_BAYER_GRID,
+	IMAGE_FORMAT_STATISTICS_BAYER_HISTOGRAM,
+	IMAGE_FORMAT_MAX
+};
+
+enum hfi_ipe_plane_format {
+	PLANE_FORMAT_INVALID = 0,
+	PLANE_FORMAT_MIPI_8,
+	PLANE_FORMAT_MIPI_10,
+	PLANE_FORMAT_MIPI_12,
+	PLANE_FORMAT_MIPI_14,
+	PLANE_FORMAT_BAYER_8,
+	PLANE_FORMAT_BAYER_10,
+	PLANE_FORMAT_BAYER_12,
+	PLANE_FORMAT_BAYER_14,
+	PLANE_FORMAT_PDI_10,
+	PLANE_FORMAT_PD_10,
+	PLANE_FORMAT_PD_8,
+	PLANE_FORMAT_INDICATIONS,
+	PLANE_FORMAT_REFINEMENT,
+	PLANE_FORMAT_UBWC_TP_10_Y,
+	PLANE_FORMAT_UBWC_TP_10_C,
+	PLANE_FORMAT_UBWC_NV_12_Y,
+	PLANE_FORMAT_UBWC_NV_12_C,
+	PLANE_FORMAT_UBWC_NV_12_4R_Y,
+	PLANE_FORMAT_UBWC_NV_12_4R_C,
+	PLANE_FORMAT_UBWC_P010_Y,
+	PLANE_FORMAT_UBWC_P010_C,
+	PLANE_FORMAT_LINEAR_TP_10_Y,
+	PLANE_FORMAT_LINEAR_TP_10_C,
+	PLANE_FORMAT_LINEAR_P010_Y,
+	PLANE_FORMAT_LINEAR_P010_C,
+	PLANE_FORMAT_LINEAR_NV12_Y,
+	PLANE_FORMAT_LINEAR_NV12_C,
+	PLANE_FORMAT_LINEAR_PLAIN_16_Y,
+	PLANE_FORMAT_LINEAR_PLAIN_16_C,
+	PLANE_FORMAT_YUV422_8,
+	PLANE_FORMAT_YUV422_10,
+	PLANE_FORMAT_STATISTICS_BAYER_GRID,
+	PLANE_FORMAT_STATISTICS_BAYER_HISTOGRAM,
+	PLANE_FORMAT_MAX
+};
+
+enum hfi_ipe_bayer_pixel_order {
+	FIRST_PIXEL_R,
+	FIRST_PIXEL_GR,
+	FIRST_PIXEL_B,
+	FIRST_PIXEL_GB,
+	FIRST_PIXEL_MAX
+};
+
+enum hfi_ipe_pixel_pack_alignment {
+	PIXEL_LSB_ALIGNED,
+	PIXEL_MSB_ALIGNED,
+};
+
+enum hfi_ipe_yuv_422_order {
+	PIXEL_ORDER_Y_U_Y_V,
+	PIXEL_ORDER_Y_V_Y_U,
+	PIXEL_ORDER_U_Y_V_Y,
+	PIXEL_ORDER_V_Y_U_Y,
+	PIXEL_ORDER_YUV422_MAX
+};
+
+enum ubwc_write_client {
+	IPE_WR_CLIENT_0 = 0,
+	IPE_WR_CLIENT_1,
+	IPE_WR_CLIENT_5,
+	IPE_WR_CLIENT_6,
+	IPE_WR_CLIENT_7,
+	IPE_WR_CLIENT_8,
+	IPE_WR_CLIENT_MAX
+};
+
+/**
+ * struct image_info
+ * @format: image format
+ * @img_width: image width
+ * @img_height: image height
+ * @bayer_order: pixel order
+ * @pix_align: alignment
+ * @yuv422_order: YUV order
+ * @byte_swap: byte swap
+ */
+struct image_info {
+	enum hfi_ipe_image_format format;
+	uint32_t img_width;
+	uint32_t img_height;
+	enum hfi_ipe_bayer_pixel_order bayer_order;
+	enum hfi_ipe_pixel_pack_alignment pix_align;
+	enum hfi_ipe_yuv_422_order yuv422_order;
+	uint32_t byte_swap;
+} __packed;
+
+/**
+ * struct buffer_layout
+ * @buf_stride: buffer stride
+ * @buf_height: buffer height
+ */
+struct buffer_layout {
+	uint32_t buf_stride;
+	uint32_t buf_height;
+} __packed;
+
+/**
+ * struct image_desc
+ * @info: image info
+ * @buf_layout: buffer layout
+ * @meta_buf_layout: meta buffer layout
+ */
+struct image_desc {
+	struct image_info info;
+	struct buffer_layout buf_layout[MAX_NUM_OF_IMAGE_PLANES];
+	struct buffer_layout meta_buf_layout[MAX_NUM_OF_IMAGE_PLANES];
+} __packed;
+
+/**
+ * struct hfi_cmd_ipe_config
+ * @images: images descreptions
+ * @user_data: user supplied data
+ *
+ * payload for IPE async command
+ */
+struct hfi_cmd_ipe_config {
+	struct image_desc images[IPE_IO_IMAGES_MAX];
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct frame_buffers
+ * @buf_ptr: buffer pointers for all planes
+ * @meta_buf_ptr: meta buffer pointers for all planes
+ */
+struct frame_buffers {
+	uint32_t buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+	uint32_t meta_buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+} __packed;
+
+/**
+ * struct hfi_msg_ipe_config
+ * @rc: result of ipe config command
+ * @scratch_mem_size: scratch mem size for a config
+ * @user_data: user data
+ */
+struct hfi_msg_ipe_config {
+	uint32_t rc;
+	uint32_t scratch_mem_size;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_msg_bps_common
+ * @rc: result of ipe config command
+ * @user_data: user data
+ */
+struct hfi_msg_bps_common {
+	uint32_t rc;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct ipe_bps_destroy
+ * @user_data: user data
+ */
+struct ipe_bps_destroy {
+	uint64_t userdata;
+};
+
+/**
+ * struct hfi_msg_ipe_frame_process
+ * @status: result of ipe frame process command
+ * @scratch_buf_addr: address of scratch buffer
+ * @user_data: user data
+ */
+struct hfi_msg_ipe_frame_process {
+	uint32_t status;
+	uint32_t scratch_buf_addr;
+	uint64_t user_data;
+} __packed;
+
+#endif /* _CAM_HFI_SESSION_DEFS_H */
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_sys_defs.h b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_sys_defs.h
new file mode 100644
index 0000000..e7163ac
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_sys_defs.h
@@ -0,0 +1,483 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _HFI_DEFS_H_
+#define _HFI_DEFS_H_
+
+#include <linux/types.h>
+
+/*
+ * Following base acts as common starting points
+ * for all enumerations.
+ */
+#define HFI_COMMON_BASE                 0x0
+
+/* HFI Domain base offset for commands and messages */
+#define HFI_DOMAIN_SHFT                 (24)
+#define HFI_DOMAIN_BMSK                 (0x7 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_ICP             (0x0 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_IPE_BPS         (0x1 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_CDM             (0x2 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_DBG             (0x3 << HFI_DOMAIN_SHFT)
+
+/* Command base offset for commands */
+#define HFI_CMD_START_OFFSET            0x10000
+
+/* Command base offset for messages */
+#define HFI_MSG_START_OFFSET            0x20000
+
+/* System Level Error types */
+#define HFI_ERR_SYS_NONE                (HFI_COMMON_BASE)
+#define HFI_ERR_SYS_FATAL               (HFI_COMMON_BASE + 0x1)
+#define HFI_ERR_SYS_VERSION_MISMATCH    (HFI_COMMON_BASE + 0x2)
+#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN  (HFI_COMMON_BASE + 0x3)
+#define HFI_ERR_SYS_UNSUPPORT_CMD       (HFI_COMMON_BASE + 0x4)
+#define HFI_ERR_SYS_CMDFAILED           (HFI_COMMON_BASE + 0x5)
+#define HFI_ERR_SYS_CMDSIZE             (HFI_COMMON_BASE + 0x6)
+
+/* System Level Event types */
+#define HFI_EVENT_SYS_ERROR             (HFI_COMMON_BASE + 0x1)
+#define HFI_EVENT_ICP_ERROR             (HFI_COMMON_BASE + 0x2)
+#define HFI_EVENT_IPE_BPS_ERROR         (HFI_COMMON_BASE + 0x3)
+#define HFI_EVENT_CDM_ERROR             (HFI_COMMON_BASE + 0x4)
+#define HFI_EVENT_DBG_ERROR             (HFI_COMMON_BASE + 0x5)
+
+/* Core level start Ranges for errors */
+#define HFI_ERR_ICP_START               (HFI_COMMON_BASE + 0x64)
+#define HFI_ERR_IPE_BPS_START           (HFI_ERR_ICP_START + 0x64)
+#define HFI_ERR_CDM_START               (HFI_ERR_IPE_BPS_START + 0x64)
+#define HFI_ERR_DBG_START               (HFI_ERR_CDM_START + 0x64)
+
+/*ICP Core level  error messages */
+#define HFI_ERR_NO_RES                  (HFI_ERR_ICP_START + 0x1)
+#define HFI_ERR_UNSUPPORTED_RES         (HFI_ERR_ICP_START + 0x2)
+#define HFI_ERR_UNSUPPORTED_PROP        (HFI_ERR_ICP_START + 0x3)
+#define HFI_ERR_INIT_EXPECTED           (HFI_ERR_ICP_START + 0x4)
+#define HFI_ERR_INIT_IGNORED            (HFI_ERR_ICP_START + 0x5)
+
+/* System level commands */
+#define HFI_CMD_COMMON_START \
+		(HFI_DOMAIN_BASE_ICP + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_SYS_INIT               (HFI_CMD_COMMON_START + 0x1)
+#define HFI_CMD_SYS_PC_PREP            (HFI_CMD_COMMON_START + 0x2)
+#define HFI_CMD_SYS_SET_PROPERTY       (HFI_CMD_COMMON_START + 0x3)
+#define HFI_CMD_SYS_GET_PROPERTY       (HFI_CMD_COMMON_START + 0x4)
+#define HFI_CMD_SYS_PING               (HFI_CMD_COMMON_START + 0x5)
+#define HFI_CMD_SYS_RESET              (HFI_CMD_COMMON_START + 0x6)
+
+/* Core level commands */
+/* IPE/BPS core Commands */
+#define HFI_CMD_IPE_BPS_COMMON_START \
+		(HFI_DOMAIN_BASE_IPE_BPS + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_IPEBPS_CREATE_HANDLE \
+		(HFI_CMD_IPE_BPS_COMMON_START + 0x8)
+#define HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT \
+		(HFI_CMD_IPE_BPS_COMMON_START + 0xa)
+#define HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT \
+		(HFI_CMD_IPE_BPS_COMMON_START + 0xe)
+
+/* CDM core Commands */
+#define HFI_CMD_CDM_COMMON_START \
+		(HFI_DOMAIN_BASE_CDM + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_CDM_TEST_START (HFI_CMD_CDM_COMMON_START + 0x800)
+#define HFI_CMD_CDM_END        (HFI_CMD_CDM_COMMON_START + 0xFFF)
+
+/* Debug/Test Commands */
+#define HFI_CMD_DBG_COMMON_START \
+		(HFI_DOMAIN_BASE_DBG + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_DBG_TEST_START  (HFI_CMD_DBG_COMMON_START + 0x800)
+#define HFI_CMD_DBG_END         (HFI_CMD_DBG_COMMON_START + 0xFFF)
+
+/* System level messages */
+#define HFI_MSG_ICP_COMMON_START \
+		(HFI_DOMAIN_BASE_ICP + HFI_MSG_START_OFFSET + 0x0)
+#define HFI_MSG_SYS_INIT_DONE           (HFI_MSG_ICP_COMMON_START + 0x1)
+#define HFI_MSG_SYS_PC_PREP_DONE        (HFI_MSG_ICP_COMMON_START + 0x2)
+#define HFI_MSG_SYS_DEBUG               (HFI_MSG_ICP_COMMON_START + 0x3)
+#define HFI_MSG_SYS_IDLE                (HFI_MSG_ICP_COMMON_START + 0x4)
+#define HFI_MSG_SYS_PROPERTY_INFO       (HFI_MSG_ICP_COMMON_START + 0x5)
+#define HFI_MSG_SYS_PING_ACK            (HFI_MSG_ICP_COMMON_START + 0x6)
+#define HFI_MSG_SYS_RESET_ACK           (HFI_MSG_ICP_COMMON_START + 0x7)
+#define HFI_MSG_EVENT_NOTIFY            (HFI_MSG_ICP_COMMON_START + 0x8)
+
+/* Core level Messages */
+/* IPE/BPS core Messages */
+#define HFI_MSG_IPE_BPS_COMMON_START \
+		(HFI_DOMAIN_BASE_IPE_BPS + HFI_MSG_START_OFFSET + 0x0)
+#define HFI_MSG_IPEBPS_CREATE_HANDLE_ACK \
+		(HFI_MSG_IPE_BPS_COMMON_START + 0x08)
+#define HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK \
+		(HFI_MSG_IPE_BPS_COMMON_START + 0x0a)
+#define HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK \
+		(HFI_MSG_IPE_BPS_COMMON_START + 0x0e)
+#define HFI_MSG_IPE_BPS_TEST_START	\
+		(HFI_MSG_IPE_BPS_COMMON_START + 0x800)
+#define HFI_MSG_IPE_BPS_END \
+		(HFI_MSG_IPE_BPS_COMMON_START + 0xFFF)
+
+/* CDM core Messages */
+#define HFI_MSG_CDM_COMMON_START \
+		(HFI_DOMAIN_BASE_CDM + HFI_MSG_START_OFFSET + 0x0)
+#define  HFI_MSG_PRI_CDM_PAYLOAD_ACK    (HFI_MSG_CDM_COMMON_START + 0xa)
+#define  HFI_MSG_PRI_LLD_PAYLOAD_ACK    (HFI_MSG_CDM_COMMON_START + 0xb)
+#define HFI_MSG_CDM_TEST_START          (HFI_MSG_CDM_COMMON_START + 0x800)
+#define HFI_MSG_CDM_END                 (HFI_MSG_CDM_COMMON_START + 0xFFF)
+
+/* core level test command ranges */
+/* ICP core level test command range */
+#define HFI_CMD_ICP_TEST_START          (HFI_CMD_ICP_COMMON_START + 0x800)
+#define HFI_CMD_ICP_END                 (HFI_CMD_ICP_COMMON_START + 0xFFF)
+
+/* IPE/BPS core level test command range */
+#define HFI_CMD_IPE_BPS_TEST_START \
+		(HFI_CMD_IPE_BPS_COMMON_START + 0x800)
+#define HFI_CMD_IPE_BPS_END (HFI_CMD_IPE_BPS_COMMON_START + 0xFFF)
+
+/* ICP core level test message range */
+#define HFI_MSG_ICP_TEST_START  (HFI_MSG_ICP_COMMON_START + 0x800)
+#define HFI_MSG_ICP_END         (HFI_MSG_ICP_COMMON_START + 0xFFF)
+
+/* ICP core level Debug test message range */
+#define HFI_MSG_DBG_COMMON_START \
+		(HFI_DOMAIN_BASE_DBG + 0x0)
+#define HFI_MSG_DBG_TEST_START  (HFI_MSG_DBG_COMMON_START + 0x800)
+#define HFI_MSG_DBG_END         (HFI_MSG_DBG_COMMON_START + 0xFFF)
+
+/* System  level property base offset */
+#define HFI_PROPERTY_ICP_COMMON_START  (HFI_DOMAIN_BASE_ICP + 0x0)
+
+#define HFI_PROP_SYS_DEBUG_CFG         (HFI_PROPERTY_ICP_COMMON_START + 0x1)
+#define HFI_PROP_SYS_IMAGE_VER         (HFI_PROPERTY_ICP_COMMON_START + 0x3)
+#define HFI_PROP_SYS_SUPPORTED         (HFI_PROPERTY_ICP_COMMON_START + 0x4)
+
+/* Capabilities reported at sys init */
+#define HFI_CAPS_PLACEHOLDER_1         (HFI_COMMON_BASE + 0x1)
+#define HFI_CAPS_PLACEHOLDER_2         (HFI_COMMON_BASE + 0x2)
+
+/* Section describes different debug levels (HFI_DEBUG_MSG_X)
+ * available for debug messages from FW
+ */
+#define  HFI_DEBUG_MSG_LOW      0x00000001
+#define  HFI_DEBUG_MSG_MEDIUM   0x00000002
+#define  HFI_DEBUG_MSG_HIGH     0x00000004
+#define  HFI_DEBUG_MSG_ERROR    0x00000008
+#define  HFI_DEBUG_MSG_FATAL    0x00000010
+/* Messages containing performance data */
+#define  HFI_DEBUG_MSG_PERF     0x00000020
+/* Disable ARM9 WFI in low power mode. */
+#define  HFI_DEBUG_CFG_WFI      0x01000000
+/* Disable ARM9 watchdog. */
+#define  HFI_DEBUG_CFG_ARM9WD   0x10000000
+
+/* Debug Msg Communication types:
+ * Section describes different modes (HFI_DEBUG_MODE_X)
+ * available to communicate the debug messages
+ */
+ /* Debug message output through   the interface debug queue. */
+#define HFI_DEBUG_MODE_QUEUE     0x00000001
+ /* Debug message output through QDSS. */
+#define HFI_DEBUG_MODE_QDSS      0x00000002
+
+
+#define HFI_DEBUG_MSG_LOW        0x00000001
+#define HFI_DEBUG_MSG_MEDIUM     0x00000002
+#define HFI_DEBUG_MSG_HIGH       0x00000004
+#define HFI_DEBUG_MSG_ERROR      0x00000008
+#define HFI_DEBUG_MSG_FATAL      0x00000010
+#define HFI_DEBUG_MSG_PERF       0x00000020
+#define HFI_DEBUG_CFG_WFI        0x01000000
+#define HFI_DEBUG_CFG_ARM9WD     0x10000000
+
+#define HFI_DEBUG_MODE_QUEUE     0x00000001
+#define HFI_DEBUG_MODE_QDSS      0x00000002
+
+/**
+ * start of sys command packet types
+ * These commands are used to get system level information
+ * from firmware
+ */
+
+/**
+ * struct hfi_caps_support
+ * payload to report caps through HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED
+ * @type: capability type
+ * @min: minimum supported value for the capability
+ * @max: maximum supported value for the capability
+ * @step_size: supported steps between min-max
+ */
+struct hfi_caps_support {
+	uint32_t type;
+	uint32_t min;
+	uint32_t max;
+	uint32_t step_size;
+} __packed;
+
+/**
+ * struct hfi_caps_support_info
+ * capability report through HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED
+ * @num_caps: number of capabilities listed
+ * @caps_data: capabilities info array
+ */
+struct hfi_caps_support_info {
+	uint32_t num_caps;
+	struct hfi_caps_support caps_data[1];
+} __packed;
+
+/**
+ * struct hfi_debug
+ * payload structure to configure HFI_PROPERTY_SYS_DEBUG_CONFIG
+ * @debug_config: it is a result of HFI_DEBUG_MSG_X values that
+ *                are OR-ed together to specify the debug message types
+ *                to otput
+ * @debug_mode: debug message output through debug queue/qdss
+ * @HFI_PROPERTY_SYS_DEBUG_CONFIG
+ */
+struct hfi_debug {
+	uint32_t debug_config;
+	uint32_t debug_mode;
+} __packed;
+
+
+/**
+ * struct hfi_cmd_sys_init
+ * command to initialization of system session
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @HFI_CMD_SYS_INIT
+ */
+struct hfi_cmd_sys_init {
+	uint32_t size;
+	uint32_t pkt_type;
+} __packed;
+
+/**
+ * struct hfi_cmd_pc_prep
+ * command to firmware to prepare for power collapse
+ * @eize: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @HFI_CMD_SYS_PC_PREP
+ */
+struct hfi_cmd_pc_prep {
+	uint32_t size;
+	uint32_t pkt_type;
+} __packed;
+
+/**
+ * struct hfi_cmd_prop
+ * command to get/set properties of firmware
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @num_prop: number of properties queried/set
+ * @prop_data: array of property IDs being queried. size depends on num_prop
+ *             array of property IDs and associated structure pairs in set
+ * @HFI_CMD_SYS_GET_PROPERTY
+ * @HFI_CMD_SYS_SET_PROPERTY
+ */
+struct hfi_cmd_prop {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t num_prop;
+	uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_cmd_ping_pkt
+ * ping command pings the firmware to confirm whether
+ * it is alive.
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @user_data: client data, firmware returns this data
+ *             as part of HFI_MSG_SYS_PING_ACK
+ * @HFI_CMD_SYS_PING
+ */
+struct hfi_cmd_ping_pkt {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_sys_reset_pkt
+ * sends the reset command to FW. FW responds in the same type
+ * of packet. so can be used for reset_ack_pkt type also
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @user_data: client data, firmware returns this data
+ *             as part of HFI_MSG_SYS_RESET_ACK
+ * @HFI_CMD_SYS_RESET
+ */
+
+struct hfi_cmd_sys_reset_pkt {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint64_t user_data;
+} __packed;
+
+/* end of sys command packet types */
+
+/* start of sys message packet types */
+
+/**
+ * struct hfi_prop
+ * structure to report maximum supported features of firmware.
+ */
+struct hfi_sys_support {
+	uint32_t place_holder;
+} __packed;
+
+/**
+ * struct hfi_supported_prop
+ * structure to report HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED
+ * for a session
+ * @num_prop: number of properties supported
+ * @prop_data: array of supported property IDs
+ */
+struct hfi_supported_prop {
+	uint32_t num_prop;
+	uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_image_version
+ * system image version
+ * @major: major version number
+ * @minor: minor version number
+ * @ver_name_size: size of version name
+ * @ver_name: image version name
+ */
+struct hfi_image_version {
+	uint32_t major;
+	uint32_t minor;
+	uint32_t ver_name_size;
+	uint8_t  ver_name[1];
+} __packed;
+
+/**
+ * struct hfi_msg_init_done
+ * system init done message from firmware. Many system level properties
+ * are returned with the packet
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @err_type: error code associated with response
+ * @num_prop: number of default capability info
+ * @prop_data: array of property ids and corresponding structure pairs
+ */
+struct hfi_msg_init_done {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t err_type;
+	uint32_t num_prop;
+	uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_msg_pc_prep_done
+ * system power collapse preperation done message
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @err_type: error code associated with the response
+ */
+struct hfi_msg_pc_prep_done {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t err_type;
+} __packed;
+
+/**
+ * struct hfi_msg_prop
+ * system property info from firmware
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @num_prop: number of property info structures
+ * @prop_data: array of property IDs and associated structure pairs
+ */
+struct hfi_msg_prop {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t num_prop;
+	uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_msg_idle
+ * system idle message from firmware
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ */
+struct hfi_msg_idle {
+	uint32_t size;
+	uint32_t pkt_type;
+} __packed;
+
+/**
+ * struct hfi_msg_ping_ack
+ * system ping ack message
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @user_data: this data is sent as part of ping command from host
+ */
+struct hfi_msg_ping_ack {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_msg_debug
+ * system debug message defination
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @msg_type: debug message type
+ * @msg_size: size of debug message in bytes
+ * @timestamp_hi: most significant 32 bits of the 64 bit timestamp field.
+ *                timestamp shall be interpreted as a signed 64-bit value
+ *                representing microseconds.
+ * @timestamp_lo: least significant 32 bits of the 64 bit timestamp field.
+ *                timestamp shall be interpreted as a signed 64-bit value
+ *                representing microseconds.
+ * @msg_data: message data in string form
+ */
+struct hfi_msg_debug {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t msg_type;
+	uint32_t msg_size;
+	uint32_t timestamp_hi;
+	uint32_t timestamp_lo;
+	uint8_t  msg_data[1];
+} __packed;
+/**
+ * struct hfi_msg_event_notify
+ * event notify message
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @fw_handle: firmware session handle
+ * @event_id: session event id
+ * @event_data1: event data corresponding to event ID
+ * @event_data2: event data corresponding to event ID
+ * @ext_event_data: info array, interpreted based on event_data1
+ * and event_data2
+ */
+struct hfi_msg_event_notify {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t fw_handle;
+	uint32_t event_id;
+	uint32_t event_data1;
+	uint32_t event_data2;
+	uint32_t ext_event_data[1];
+} __packed;
+/**
+ * end of sys message packet types
+ */
+
+#endif /* _HFI_DEFS_H_ */
diff --git a/drivers/media/platform/msm/camera/icp/hfi.c b/drivers/media/platform/msm/camera/icp/hfi.c
new file mode 100644
index 0000000..4315865
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/hfi.c
@@ -0,0 +1,522 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "HFI-FW %s:%d " fmt, __func__, __LINE__
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/errno.h>
+#include <linux/timer.h>
+#include <media/cam_icp.h>
+#include "cam_io_util.h"
+#include "hfi_reg.h"
+#include "hfi_sys_defs.h"
+#include "hfi_session_defs.h"
+#include "hfi_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+
+#define HFI_VERSION_INFO_MAJOR_VAL  1
+#define HFI_VERSION_INFO_MINOR_VAL  1
+#define HFI_VERSION_INFO_STEP_VAL   0
+#define HFI_VERSION_INFO_STEP_VAL   0
+#define HFI_VERSION_INFO_MAJOR_BMSK  0xFF000000
+#define HFI_VERSION_INFO_MAJOR_SHFT  24
+#define HFI_VERSION_INFO_MINOR_BMSK  0xFFFF00
+#define HFI_VERSION_INFO_MINOR_SHFT  8
+#define HFI_VERSION_INFO_STEP_BMSK   0xFF
+#define HFI_VERSION_INFO_STEP_SHFT  0
+
+#undef  HFI_DBG
+#define HFI_DBG(fmt, args...) pr_debug(fmt, ##args)
+
+struct hfi_info *g_hfi;
+unsigned int g_icp_mmu_hdl;
+
+int hfi_write_cmd(void *cmd_ptr)
+{
+	uint32_t size_in_words, empty_space, new_write_idx, read_idx, temp;
+	uint32_t *write_q, *write_ptr;
+	struct hfi_qtbl *q_tbl;
+	struct hfi_q_hdr *q;
+	int rc = 0;
+	int i = 0;
+
+	if (!cmd_ptr) {
+		pr_err("Invalid args\n");
+		return -EINVAL;
+	}
+
+	if (!g_hfi || g_hfi->hfi_state < FW_START_SENT) {
+		pr_err("FW not ready yet\n");
+		return -EIO;
+	}
+
+	mutex_lock(&g_hfi->cmd_q_lock);
+
+	q_tbl = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
+	q = &q_tbl->q_hdr[Q_CMD];
+
+	write_q = (uint32_t *)g_hfi->map.cmd_q.kva;
+
+	size_in_words = (*(uint32_t *)cmd_ptr) >> BYTE_WORD_SHIFT;
+	if (!size_in_words) {
+		pr_debug("failed");
+		rc = -EINVAL;
+		goto err;
+	}
+
+	HFI_DBG("size_in_words : %u\n", size_in_words);
+	HFI_DBG("q->qhdr_write_idx %x\n", q->qhdr_write_idx);
+
+	read_idx = q->qhdr_read_idx;
+
+	empty_space = (q->qhdr_write_idx >= read_idx) ?
+		(q->qhdr_q_size - (q->qhdr_write_idx - read_idx)) :
+		(read_idx - q->qhdr_write_idx);
+	if (empty_space <= size_in_words) {
+		pr_err("failed");
+		rc = -EIO;
+		goto err;
+	}
+	HFI_DBG("empty_space : %u\n", empty_space);
+
+	new_write_idx = q->qhdr_write_idx + size_in_words;
+	write_ptr = (uint32_t *)(write_q + q->qhdr_write_idx);
+
+	if (new_write_idx < q->qhdr_q_size) {
+		memcpy(write_ptr, (uint8_t *)cmd_ptr,
+			size_in_words << BYTE_WORD_SHIFT);
+	} else {
+		new_write_idx -= q->qhdr_q_size;
+		temp = (size_in_words - new_write_idx) << BYTE_WORD_SHIFT;
+		memcpy(write_ptr, (uint8_t *)cmd_ptr, temp);
+		memcpy(write_q, (uint8_t *)cmd_ptr + temp,
+			new_write_idx << BYTE_WORD_SHIFT);
+	}
+	for (i = 0; i < size_in_words; i++)
+		pr_debug("%x\n", write_ptr[i]);
+
+	q->qhdr_write_idx = new_write_idx;
+	HFI_DBG("q->qhdr_write_idx %x\n", q->qhdr_write_idx);
+	cam_io_w((uint32_t)INTR_ENABLE,
+		g_hfi->csr_base + HFI_REG_A5_CSR_HOST2ICPINT);
+err:
+	mutex_unlock(&g_hfi->cmd_q_lock);
+	return 0;
+}
+
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id)
+{
+	struct hfi_qtbl *q_tbl_ptr;
+	struct hfi_q_hdr *q;
+	uint32_t new_read_idx, size_in_words, temp;
+	uint32_t *read_q, *read_ptr;
+	int rc = 0;
+	int i = 0;
+
+	if (!pmsg || q_id > Q_DBG) {
+		pr_err("Inavlid args\n");
+		return -EINVAL;
+	}
+
+	q_tbl_ptr = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
+	q = &q_tbl_ptr->q_hdr[q_id];
+
+	if ((g_hfi->hfi_state < FW_START_SENT) ||
+		(q->qhdr_read_idx == q->qhdr_write_idx)) {
+		pr_debug("FW or Q not ready, hfi state : %u, r idx : %u, w idx : %u\n",
+			g_hfi->hfi_state, q->qhdr_read_idx, q->qhdr_write_idx);
+		return -EIO;
+	}
+
+	mutex_lock(&g_hfi->msg_q_lock);
+
+	if (q_id == Q_CMD)
+		read_q = (uint32_t *)g_hfi->map.cmd_q.kva;
+	else if (q_id == Q_MSG)
+		read_q = (uint32_t *)g_hfi->map.msg_q.kva;
+	else
+		read_q = (uint32_t *)g_hfi->map.dbg_q.kva;
+
+	read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx);
+	size_in_words = (*read_ptr) >> BYTE_WORD_SHIFT;
+
+	HFI_DBG("size_in_words : %u\n", size_in_words);
+	HFI_DBG("read_ptr : %pK\n", (void *)read_ptr);
+
+	if ((size_in_words == 0) ||
+		(size_in_words > ICP_HFI_MAX_MSG_SIZE_IN_WORDS)) {
+		pr_err("invalid HFI message packet size - 0x%08x\n",
+			size_in_words << BYTE_WORD_SHIFT);
+		q->qhdr_read_idx = q->qhdr_write_idx;
+		rc = -EIO;
+		goto err;
+	}
+
+	new_read_idx = q->qhdr_read_idx + size_in_words;
+	HFI_DBG("new_read_idx : %u\n", new_read_idx);
+
+	if (new_read_idx < q->qhdr_q_size) {
+		memcpy(pmsg, read_ptr, size_in_words << BYTE_WORD_SHIFT);
+	} else {
+		new_read_idx -= q->qhdr_q_size;
+		temp = (size_in_words - new_read_idx) << BYTE_WORD_SHIFT;
+		memcpy(pmsg, read_ptr, temp);
+		memcpy((uint8_t *)pmsg + temp, read_q,
+			new_read_idx << BYTE_WORD_SHIFT);
+	}
+
+	for (i = 0; i < size_in_words; i++)
+		pr_debug("%x\n", read_ptr[i]);
+
+	q->qhdr_read_idx = new_read_idx;
+err:
+	mutex_unlock(&g_hfi->msg_q_lock);
+	HFI_DBG("Exit\n");
+	return 0;
+}
+
+void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size)
+{
+	switch (type) {
+	case HFI_CMD_SYS_INIT: {
+		struct hfi_cmd_sys_init init;
+
+		memset(&init, 0, sizeof(init));
+
+		init.size = sizeof(struct hfi_cmd_sys_init);
+		init.pkt_type = type;
+		hfi_write_cmd(&init);
+	}
+		break;
+	case HFI_CMD_SYS_PC_PREP: {
+		struct hfi_cmd_pc_prep prep;
+
+		prep.size = sizeof(struct hfi_cmd_pc_prep);
+		prep.pkt_type = type;
+		hfi_write_cmd(&prep);
+	}
+		break;
+	case HFI_CMD_SYS_SET_PROPERTY: {
+		struct hfi_cmd_prop prop;
+
+		if ((uint32_t)data == (uint32_t)HFI_PROP_SYS_DEBUG_CFG) {
+			prop.size = sizeof(struct hfi_cmd_prop);
+			prop.pkt_type = type;
+			prop.num_prop = 1;
+			prop.prop_data[0] = HFI_PROP_SYS_DEBUG_CFG;
+			hfi_write_cmd(&prop);
+		}
+	}
+		break;
+	case HFI_CMD_SYS_GET_PROPERTY:
+		break;
+	case HFI_CMD_SYS_PING: {
+		struct hfi_cmd_ping_pkt ping;
+
+		ping.size = sizeof(struct hfi_cmd_ping_pkt);
+		ping.pkt_type = type;
+		ping.user_data = (uint64_t)data;
+		hfi_write_cmd(&ping);
+	}
+		break;
+	case HFI_CMD_SYS_RESET: {
+		struct hfi_cmd_sys_reset_pkt reset;
+
+		reset.size = sizeof(struct hfi_cmd_sys_reset_pkt);
+		reset.pkt_type = type;
+		reset.user_data = (uint64_t)data;
+		hfi_write_cmd(&reset);
+	}
+		break;
+	case HFI_CMD_IPEBPS_CREATE_HANDLE: {
+		struct hfi_cmd_create_handle handle;
+
+		handle.size = sizeof(struct hfi_cmd_create_handle);
+		handle.pkt_type = type;
+		handle.handle_type = (uint32_t)data;
+		handle.user_data1 = 0;
+		hfi_write_cmd(&handle);
+	}
+		break;
+	case HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT:
+		break;
+	default:
+		pr_err("command not supported :%d\n", type);
+		break;
+	}
+}
+
+
+int hfi_get_hw_caps(void *query_buf)
+{
+	int i = 0;
+	struct cam_icp_query_cap_cmd *query_cmd = NULL;
+
+	if (!query_buf) {
+		pr_err("%s: query buf is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	query_cmd = (struct cam_icp_query_cap_cmd *)query_buf;
+	query_cmd->fw_version.major = 0x12;
+	query_cmd->fw_version.minor = 0x12;
+	query_cmd->fw_version.revision = 0x12;
+
+	query_cmd->api_version.major = 0x13;
+	query_cmd->api_version.minor = 0x13;
+	query_cmd->api_version.revision = 0x13;
+
+	query_cmd->num_ipe = 2;
+	query_cmd->num_bps = 1;
+
+	for (i = 0; i < CAM_ICP_DEV_TYPE_MAX; i++) {
+		query_cmd->dev_ver[i].dev_type = i;
+		query_cmd->dev_ver[i].hw_ver.major = 0x34 + i;
+		query_cmd->dev_ver[i].hw_ver.minor = 0x34 + i;
+		query_cmd->dev_ver[i].hw_ver.incr = 0x34 + i;
+	}
+	return 0;
+}
+
+
+void cam_hfi_enable_cpu(void __iomem *icp_base)
+{
+	cam_io_w((uint32_t)ICP_FLAG_CSR_A5_EN,
+			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	cam_io_w((uint32_t)0x10, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+}
+
+int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
+		void __iomem *icp_base, bool debug)
+{
+	int rc = 0;
+	struct hfi_qtbl *qtbl;
+	struct hfi_qtbl_hdr *qtbl_hdr;
+	struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr, *dbg_q_hdr;
+	uint32_t hw_version, fw_version;
+	uint32_t status;
+
+	if (!g_hfi) {
+		g_hfi = kzalloc(sizeof(struct hfi_info), GFP_KERNEL);
+		if (!g_hfi) {
+			rc = -ENOMEM;
+			goto alloc_fail;
+		}
+	}
+
+	pr_debug("g_hfi: %pK\n", (void *)g_hfi);
+	if (g_hfi->hfi_state != INVALID) {
+		pr_err("hfi_init: invalid state\n");
+		return -EINVAL;
+	}
+
+	g_hfi->hfi_state = FW_LOAD_DONE;
+	memcpy(&g_hfi->map, hfi_mem, sizeof(g_hfi->map));
+
+	if (debug) {
+		cam_io_w_mb(
+		(uint32_t)(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN |
+		ICP_CSR_EDBGRQ | ICP_CSR_DBGSWENABLE),
+		icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+		msleep(100);
+		cam_io_w_mb((uint32_t)(ICP_FLAG_CSR_A5_EN |
+		ICP_FLAG_CSR_WAKE_UP_EN | ICP_CSR_EN_CLKGATE_WFI),
+		icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	} else {
+		cam_io_w((uint32_t)ICP_FLAG_CSR_A5_EN |
+			ICP_FLAG_CSR_WAKE_UP_EN,
+			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	}
+
+	mutex_init(&g_hfi->cmd_q_lock);
+	mutex_init(&g_hfi->msg_q_lock);
+
+	g_hfi->csr_base = icp_base;
+
+	qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva;
+	qtbl_hdr = &qtbl->q_tbl_hdr;
+	qtbl_hdr->qtbl_version = 0xFFFFFFFF;
+	qtbl_hdr->qtbl_size = sizeof(struct hfi_qtbl);
+	qtbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_qtbl_hdr);
+	qtbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_q_hdr);
+	qtbl_hdr->qtbl_num_q = ICP_HFI_NUMBER_OF_QS;
+	qtbl_hdr->qtbl_num_active_q = ICP_HFI_NUMBER_OF_QS;
+
+	/* setup host-to-firmware command queue */
+	pr_debug("updating the command queue info\n");
+	cmd_q_hdr = &qtbl->q_hdr[Q_CMD];
+	cmd_q_hdr->qhdr_status = QHDR_ACTIVE;
+	cmd_q_hdr->qhdr_start_addr = hfi_mem->cmd_q.iova;
+	cmd_q_hdr->qhdr_q_size =  ICP_CMD_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
+	cmd_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
+	cmd_q_hdr->qhdr_pkt_drop_cnt = RESET;
+	cmd_q_hdr->qhdr_read_idx = RESET;
+	cmd_q_hdr->qhdr_write_idx = RESET;
+
+	/* setup firmware-to-Host message queue */
+	pr_debug("updating the message queue info\n");
+	msg_q_hdr = &qtbl->q_hdr[Q_MSG];
+	msg_q_hdr->qhdr_status = QHDR_ACTIVE;
+	msg_q_hdr->qhdr_start_addr = hfi_mem->msg_q.iova;
+	msg_q_hdr->qhdr_q_size = ICP_MSG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
+	msg_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
+	msg_q_hdr->qhdr_pkt_drop_cnt = RESET;
+	msg_q_hdr->qhdr_read_idx = RESET;
+	msg_q_hdr->qhdr_write_idx = RESET;
+
+	/* setup firmware-to-Host message queue */
+	pr_debug("updating the debug queue info\n");
+	dbg_q_hdr = &qtbl->q_hdr[Q_DBG];
+	dbg_q_hdr->qhdr_status = QHDR_ACTIVE;
+	dbg_q_hdr->qhdr_start_addr = hfi_mem->dbg_q.iova;
+	dbg_q_hdr->qhdr_q_size = ICP_DBG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
+	dbg_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
+	dbg_q_hdr->qhdr_pkt_drop_cnt = RESET;
+	dbg_q_hdr->qhdr_read_idx = RESET;
+	dbg_q_hdr->qhdr_write_idx = RESET;
+	pr_debug("Done updating the debug queue info\n");
+
+	switch (event_driven_mode) {
+	case INTR_MODE:
+		cmd_q_hdr->qhdr_type = Q_CMD;
+		cmd_q_hdr->qhdr_rx_wm = SET;
+		cmd_q_hdr->qhdr_tx_wm = SET;
+		cmd_q_hdr->qhdr_rx_req = SET;
+		cmd_q_hdr->qhdr_tx_req = RESET;
+		cmd_q_hdr->qhdr_rx_irq_status = RESET;
+		cmd_q_hdr->qhdr_tx_irq_status = RESET;
+
+		msg_q_hdr->qhdr_type = Q_MSG;
+		msg_q_hdr->qhdr_rx_wm = SET;
+		msg_q_hdr->qhdr_tx_wm = SET;
+		msg_q_hdr->qhdr_rx_req = SET;
+		msg_q_hdr->qhdr_tx_req = RESET;
+		msg_q_hdr->qhdr_rx_irq_status = RESET;
+		msg_q_hdr->qhdr_tx_irq_status = RESET;
+
+		dbg_q_hdr->qhdr_type = Q_DBG;
+		dbg_q_hdr->qhdr_rx_wm = SET;
+		dbg_q_hdr->qhdr_tx_wm = SET;
+		dbg_q_hdr->qhdr_rx_req = SET;
+		dbg_q_hdr->qhdr_tx_req = RESET;
+		dbg_q_hdr->qhdr_rx_irq_status = RESET;
+		dbg_q_hdr->qhdr_tx_irq_status = RESET;
+
+		break;
+
+	case POLL_MODE:
+		cmd_q_hdr->qhdr_type = Q_CMD | TX_EVENT_POLL_MODE_2 |
+			RX_EVENT_POLL_MODE_2;
+		msg_q_hdr->qhdr_type = Q_MSG | TX_EVENT_POLL_MODE_2 |
+			RX_EVENT_POLL_MODE_2;
+		dbg_q_hdr->qhdr_type = Q_DBG | TX_EVENT_POLL_MODE_2 |
+			RX_EVENT_POLL_MODE_2;
+		break;
+
+	case WM_MODE:
+		cmd_q_hdr->qhdr_type = Q_CMD | TX_EVENT_DRIVEN_MODE_2 |
+			RX_EVENT_DRIVEN_MODE_2;
+		cmd_q_hdr->qhdr_rx_wm = SET;
+		cmd_q_hdr->qhdr_tx_wm = SET;
+		cmd_q_hdr->qhdr_rx_req = RESET;
+		cmd_q_hdr->qhdr_tx_req = SET;
+		cmd_q_hdr->qhdr_rx_irq_status = RESET;
+		cmd_q_hdr->qhdr_tx_irq_status = RESET;
+
+		msg_q_hdr->qhdr_type = Q_MSG | TX_EVENT_DRIVEN_MODE_2 |
+			RX_EVENT_DRIVEN_MODE_2;
+		msg_q_hdr->qhdr_rx_wm = SET;
+		msg_q_hdr->qhdr_tx_wm = SET;
+		msg_q_hdr->qhdr_rx_req = SET;
+		msg_q_hdr->qhdr_tx_req = RESET;
+		msg_q_hdr->qhdr_rx_irq_status = RESET;
+		msg_q_hdr->qhdr_tx_irq_status = RESET;
+
+		dbg_q_hdr->qhdr_type = Q_DBG | TX_EVENT_DRIVEN_MODE_2 |
+			RX_EVENT_DRIVEN_MODE_2;
+		dbg_q_hdr->qhdr_rx_wm = SET;
+		dbg_q_hdr->qhdr_tx_wm = SET;
+		dbg_q_hdr->qhdr_rx_req = SET;
+		dbg_q_hdr->qhdr_tx_req = RESET;
+		dbg_q_hdr->qhdr_rx_irq_status = RESET;
+		dbg_q_hdr->qhdr_tx_irq_status = RESET;
+		break;
+
+	default:
+		pr_err("Invalid event driven mode :%u", event_driven_mode);
+		break;
+	}
+
+	cam_io_w((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
+	cam_io_w((uint32_t)0x7400000, icp_base + HFI_REG_SHARED_MEM_PTR);
+	cam_io_w((uint32_t)0x6400000, icp_base + HFI_REG_SHARED_MEM_SIZE);
+	cam_io_w((uint32_t)hfi_mem->sec_heap.iova,
+		icp_base + HFI_REG_UNCACHED_HEAP_PTR);
+	cam_io_w((uint32_t)hfi_mem->sec_heap.len,
+		icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
+	cam_io_w((uint32_t)ICP_INIT_REQUEST_SET,
+		icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
+
+	hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
+	pr_debug("hw version : %u[%x]\n", hw_version, hw_version);
+
+	do {
+		msleep(500);
+		status = cam_io_r(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
+	} while (status != ICP_INIT_RESP_SUCCESS);
+
+	if (status == ICP_INIT_RESP_SUCCESS) {
+		g_hfi->hfi_state = FW_RESP_DONE;
+		rc = 0;
+	} else {
+		rc = -ENODEV;
+		pr_err("FW initialization failed");
+		goto regions_fail;
+	}
+
+	fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+	g_hfi->hfi_state = FW_START_SENT;
+
+	pr_debug("fw version : %u[%x]\n", fw_version, fw_version);
+	pr_debug("hfi init is successful\n");
+	cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+
+	return rc;
+regions_fail:
+	kzfree(g_hfi);
+alloc_fail:
+	return rc;
+}
+
+
+void cam_hfi_deinit(void)
+{
+	kfree(g_hfi);
+	g_hfi = NULL;
+}
+
+void icp_enable_fw_debug(void)
+{
+	hfi_send_system_cmd(HFI_CMD_SYS_SET_PROPERTY,
+		(uint64_t)HFI_PROP_SYS_DEBUG_CFG, 0);
+}
+
+int icp_ping_fw(void)
+{
+	hfi_send_system_cmd(HFI_CMD_SYS_PING,
+		(uint64_t)0x12123434, 0);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/Makefile
new file mode 100644
index 0000000..8e95286
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/icp
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw_mgr/ a5_hw/ ipe_hw/ bps_hw/
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/Makefile
new file mode 100644
index 0000000..a4df0b8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/icp
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/a5_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += a5_dev.o a5_core.o a5_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c
new file mode 100644
index 0000000..f562bb9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c
@@ -0,0 +1,459 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "A5-CORE %s:%d " fmt, __func__, __LINE__
+
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/elf.h>
+#include <media/cam_icp.h>
+#include "cam_io_util.h"
+#include "cam_a5_hw_intf.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "a5_core.h"
+#include "a5_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "hfi_intf.h"
+#include "hfi_sys_defs.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+
+static int cam_a5_cpas_vote(struct cam_a5_device_core_info *core_info,
+	struct cam_icp_cpas_vote *cpas_vote)
+{
+	int rc = 0;
+
+	if (cpas_vote->ahb_vote_valid)
+		rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
+			&cpas_vote->ahb_vote);
+
+	if (cpas_vote->axi_vote_valid)
+		rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
+			&cpas_vote->axi_vote);
+
+	if (rc)
+		pr_err("cpas vote is failed: %d\n", rc);
+
+	return rc;
+}
+
+static int32_t cam_icp_validate_fw(const uint8_t *elf)
+{
+	struct elf32_hdr *elf_hdr;
+
+	if (!elf) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	elf_hdr = (struct elf32_hdr *)elf;
+
+	if (memcmp(elf_hdr->e_ident, ELFMAG, SELFMAG)) {
+		pr_err("ICP elf identifier is failed\n");
+		return -EINVAL;
+	}
+
+	/* check architecture */
+	if (elf_hdr->e_machine != EM_ARM) {
+		pr_err("unsupported arch\n");
+		return -EINVAL;
+	}
+
+	/* check elf bit format */
+	if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
+		pr_err("elf doesn't support 32 bit format\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int32_t cam_icp_get_fw_size(const uint8_t *elf, uint32_t *fw_size)
+{
+	int32_t rc = 0;
+	int32_t i = 0;
+	uint32_t num_prg_hdrs;
+	unsigned char *icp_prg_hdr_tbl;
+	uint32_t seg_mem_size = 0;
+	struct elf32_hdr *elf_hdr;
+	struct elf32_phdr *prg_hdr;
+
+	if (!elf || !fw_size) {
+		pr_err("invalid args\n");
+		return -EINVAL;
+	}
+
+	*fw_size = 0;
+
+	elf_hdr = (struct elf32_hdr *)elf;
+	num_prg_hdrs = elf_hdr->e_phnum;
+	icp_prg_hdr_tbl = (unsigned char *)elf + elf_hdr->e_phoff;
+	prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
+
+	if (!prg_hdr) {
+		pr_err("failed to get elf program header attr\n");
+		return -EINVAL;
+	}
+
+	pr_debug("num_prg_hdrs = %d\n", num_prg_hdrs);
+	for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) {
+		if (prg_hdr->p_flags == 0)
+			continue;
+
+		seg_mem_size = (prg_hdr->p_memsz + prg_hdr->p_align - 1) &
+					~(prg_hdr->p_align - 1);
+		seg_mem_size += prg_hdr->p_vaddr;
+		pr_debug("p_memsz = %x p_align = %x p_vaddr = %x seg_mem_size = %x\n",
+			(int)prg_hdr->p_memsz, (int)prg_hdr->p_align,
+			(int)prg_hdr->p_vaddr, (int)seg_mem_size);
+		if (*fw_size < seg_mem_size)
+			*fw_size = seg_mem_size;
+
+	}
+
+	if (*fw_size == 0) {
+		pr_err("invalid elf fw file\n");
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int32_t cam_icp_program_fw(const uint8_t *elf,
+		struct cam_a5_device_core_info *core_info)
+{
+	int32_t rc = 0;
+	uint32_t num_prg_hdrs;
+	unsigned char *icp_prg_hdr_tbl;
+	int32_t i = 0;
+	u8 *dest;
+	u8 *src;
+	struct elf32_hdr *elf_hdr;
+	struct elf32_phdr *prg_hdr;
+
+	elf_hdr = (struct elf32_hdr *)elf;
+	num_prg_hdrs = elf_hdr->e_phnum;
+	icp_prg_hdr_tbl = (unsigned char *)elf + elf_hdr->e_phoff;
+	prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
+
+	if (!prg_hdr) {
+		pr_err("failed to get elf program header attr\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) {
+		if (prg_hdr->p_flags == 0)
+			continue;
+
+		pr_debug("Loading FW header size: %u\n", prg_hdr->p_filesz);
+		if (prg_hdr->p_filesz != 0) {
+			src = (u8 *)((u8 *)elf + prg_hdr->p_offset);
+			dest = (u8 *)(((u8 *)core_info->fw_kva_addr) +
+						prg_hdr->p_vaddr);
+
+			memcpy_toio(dest, src, prg_hdr->p_filesz);
+			pr_debug("fw kva: %pK, p_vaddr: 0x%x\n",
+					dest, prg_hdr->p_vaddr);
+		}
+	}
+
+	return rc;
+}
+
+static int32_t cam_a5_download_fw(void *device_priv)
+{
+	int32_t rc = 0;
+	uint32_t fw_size;
+	const uint8_t *fw_start = NULL;
+	struct cam_hw_info *a5_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_a5_device_hw_info *hw_info = NULL;
+	struct platform_device         *pdev = NULL;
+	struct a5_soc_info *cam_a5_soc_info = NULL;
+
+	if (!device_priv) {
+		pr_err("Invalid cam_dev_info\n");
+		return -EINVAL;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+	hw_info = core_info->a5_hw_info;
+	pdev = soc_info->pdev;
+	cam_a5_soc_info = soc_info->soc_private;
+
+	rc = request_firmware(&core_info->fw_elf, "CAMERA_ICP.elf", &pdev->dev);
+	pr_debug("request_firmware: %d\n", rc);
+	if (rc < 0) {
+		pr_err("Failed to locate fw\n");
+		return rc;
+	}
+
+	if (!core_info->fw_elf) {
+		pr_err("request_firmware is failed\n");
+		return -EINVAL;
+	}
+
+	fw_start = core_info->fw_elf->data;
+	rc = cam_icp_validate_fw(fw_start);
+	if (rc < 0) {
+		pr_err("fw elf validation failed\n");
+		return -EINVAL;
+	}
+
+	rc = cam_icp_get_fw_size(fw_start, &fw_size);
+	if (rc < 0) {
+		pr_err("unable to get fw file size\n");
+		return rc;
+	}
+	pr_debug("cam_icp_get_fw_size: %u\n", fw_size);
+
+	/* Check FW firmware memory allocation is OK or not */
+	pr_debug("cam_icp_get_fw_size: %u %llu\n",
+		fw_size, core_info->fw_buf_len);
+
+	if (core_info->fw_buf_len < fw_size) {
+		pr_err("fw allocation failed\n");
+		goto fw_alloc_failed;
+	}
+
+	/* download fw */
+	rc = cam_icp_program_fw(fw_start, core_info);
+	if (rc < 0) {
+		pr_err("fw program is failed\n");
+		goto fw_program_failed;
+	}
+
+	return 0;
+fw_program_failed:
+fw_alloc_failed:
+	return rc;
+}
+
+int cam_a5_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *a5_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_icp_cpas_vote cpas_vote;
+	int rc = 0;
+
+	if (!device_priv) {
+		pr_err("Invalid cam_dev_info\n");
+		return -EINVAL;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+
+	if ((!soc_info) || (!core_info)) {
+		pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+		return -EINVAL;
+	}
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
+	cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
+	cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+	if (rc < 0) {
+		pr_err("cpass start failed: %d\n", rc);
+		return rc;
+	}
+
+	rc = cam_a5_enable_soc_resources(soc_info);
+	if (rc < 0) {
+		pr_err("soc enable is failed\n");
+		rc = cam_cpas_stop(core_info->cpas_handle);
+		return rc;
+	}
+
+	return 0;
+}
+
+int cam_a5_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *a5_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		pr_err("Invalid cam_dev_info\n");
+		return -EINVAL;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_a5_disable_soc_resources(soc_info);
+	if (rc < 0)
+		pr_err("soc enable is failed\n");
+
+	rc = cam_cpas_stop(core_info->cpas_handle);
+	if (rc < 0)
+		pr_err("cpas stop is failed: %d\n", rc);
+
+	return 0;
+}
+
+irqreturn_t cam_a5_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *a5_dev = data;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_a5_device_hw_info *hw_info = NULL;
+	uint32_t irq_status = 0;
+
+	if (!data) {
+		pr_err("Invalid cam_dev_info or query_cap args\n");
+		return IRQ_HANDLED;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+	hw_info = core_info->a5_hw_info;
+
+	irq_status = cam_io_r_mb(soc_info->reg_map[A5_SIERRA_BASE].mem_base +
+				core_info->a5_hw_info->a5_host_int_status);
+
+	cam_io_w_mb(irq_status,
+			soc_info->reg_map[A5_SIERRA_BASE].mem_base +
+			core_info->a5_hw_info->a5_host_int_clr);
+
+	pr_debug("irq_status = %x\n", irq_status);
+	if (irq_status & A5_HOST_INT)
+		pr_debug("A5 to Host interrupt, read msg Q\n");
+
+	if ((irq_status & A5_WDT_0) ||
+		(irq_status & A5_WDT_1)) {
+		pr_err_ratelimited("watch dog interrupt from A5\n");
+	}
+
+	if (core_info->irq_cb.icp_hw_mgr_cb)
+		core_info->irq_cb.icp_hw_mgr_cb(irq_status,
+					core_info->irq_cb.data);
+	return IRQ_HANDLED;
+}
+
+int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *a5_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_a5_device_hw_info *hw_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		pr_err("Invalid arguments\n");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_ICP_A5_CMD_MAX) {
+		pr_err("Invalid command : %x\n", cmd_type);
+		return -EINVAL;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+	hw_info = core_info->a5_hw_info;
+
+	switch (cmd_type) {
+	case CAM_ICP_A5_CMD_FW_DOWNLOAD:
+		rc = cam_a5_download_fw(device_priv);
+
+		break;
+	case CAM_ICP_A5_CMD_SET_FW_BUF: {
+		struct cam_icp_a5_set_fw_buf_info *fw_buf_info = cmd_args;
+
+		if (!cmd_args) {
+			pr_err("cmd args NULL\n");
+			return -EINVAL;
+		}
+
+		core_info->fw_buf = fw_buf_info->iova;
+		core_info->fw_kva_addr = fw_buf_info->kva;
+		core_info->fw_buf_len = fw_buf_info->len;
+
+		pr_debug("fw buf info = %x %llx %lld\n", core_info->fw_buf,
+			core_info->fw_kva_addr, core_info->fw_buf_len);
+		break;
+	}
+	case CAM_ICP_A5_SET_IRQ_CB: {
+		struct cam_icp_a5_set_irq_cb *irq_cb = cmd_args;
+
+		if (!cmd_args) {
+			pr_err("cmd args NULL\n");
+			return -EINVAL;
+		}
+
+		core_info->irq_cb.icp_hw_mgr_cb = irq_cb->icp_hw_mgr_cb;
+		core_info->irq_cb.data = irq_cb->data;
+		break;
+	}
+
+	case CAM_ICP_A5_SEND_INIT:
+		hfi_send_system_cmd(HFI_CMD_SYS_INIT, 0, 0);
+		break;
+	case CAM_ICP_A5_CMD_VOTE_CPAS: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args) {
+			pr_err("cmd args NULL\n");
+			return -EINVAL;
+		}
+
+		cam_a5_cpas_vote(core_info, cpas_vote);
+		break;
+	}
+
+	case CAM_ICP_A5_CMD_CPAS_START: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args) {
+			pr_err("cmd args NULL\n");
+			return -EINVAL;
+		}
+
+		rc = cam_cpas_start(core_info->cpas_handle,
+				&cpas_vote->ahb_vote, &cpas_vote->axi_vote);
+		break;
+	}
+
+	case CAM_ICP_A5_CMD_CPAS_STOP:
+		cam_cpas_stop(core_info->cpas_handle);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.h b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.h
new file mode 100644
index 0000000..8b84270
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.h
@@ -0,0 +1,87 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_A5_CORE_H
+#define CAM_A5_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include "cam_a5_hw_intf.h"
+
+#define A5_QGIC_BASE            0
+#define A5_SIERRA_BASE          1
+#define A5_CSR_BASE             2
+
+#define A5_HOST_INT             0x1
+#define A5_WDT_0                0x10
+#define A5_WDT_1                0x100
+
+#define ELF_GUARD_PAGE          (2 * 1024 * 1024)
+
+struct cam_a5_device_hw_info {
+	uint32_t hw_ver;
+	uint32_t nsec_reset;
+	uint32_t a5_control;
+	uint32_t a5_host_int_en;
+	uint32_t a5_host_int;
+	uint32_t a5_host_int_clr;
+	uint32_t a5_host_int_status;
+	uint32_t a5_host_int_set;
+	uint32_t host_a5_int;
+	uint32_t fw_version;
+	uint32_t init_req;
+	uint32_t init_response;
+	uint32_t shared_mem_ptr;
+	uint32_t shared_mem_size;
+	uint32_t qtbl_ptr;
+	uint32_t uncached_heap_ptr;
+	uint32_t uncached_heap_size;
+	uint32_t a5_status;
+};
+
+/**
+ * struct cam_a5_device_hw_info
+ * @a5_hw_info: A5 hardware info
+ * @fw_elf: start address of fw start with elf header
+ * @fw: start address of fw blob
+ * @fw_buf: smmu alloc/mapped fw buffer
+ * @fw_buf_len: fw buffer length
+ * @query_cap: A5 query info from firmware
+ * @a5_acquire: Acquire information of A5
+ * @irq_cb: IRQ callback
+ * @cpas_handle: CPAS handle for A5
+ */
+struct cam_a5_device_core_info {
+	struct cam_a5_device_hw_info *a5_hw_info;
+	const struct firmware *fw_elf;
+	void *fw;
+	uint32_t fw_buf;
+	uint64_t fw_kva_addr;
+	uint64_t fw_buf_len;
+	struct cam_icp_a5_query_cap query_cap;
+	struct cam_icp_a5_acquire_dev a5_acquire[8];
+	struct cam_icp_a5_set_irq_cb irq_cb;
+	uint32_t cpas_handle;
+};
+
+int cam_a5_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_a5_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+
+irqreturn_t cam_a5_irq(int irq_num, void *data);
+#endif /* CAM_A5_CORE_H */
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_dev.c b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_dev.c
new file mode 100644
index 0000000..f649c3b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_dev.c
@@ -0,0 +1,197 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include "a5_core.h"
+#include "a5_soc.h"
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_a5_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+
+struct a5_soc_info cam_a5_soc_info;
+EXPORT_SYMBOL(cam_a5_soc_info);
+
+struct cam_a5_device_hw_info cam_a5_hw_info = {
+	.hw_ver = 0x0,
+	.nsec_reset = 0x4,
+	.a5_control = 0x8,
+	.a5_host_int_en = 0x10,
+	.a5_host_int = 0x14,
+	.a5_host_int_clr = 0x18,
+	.a5_host_int_status = 0x1c,
+	.a5_host_int_set = 0x20,
+	.host_a5_int = 0x30,
+	.fw_version = 0x44,
+	.init_req = 0x48,
+	.init_response = 0x4c,
+	.shared_mem_ptr = 0x50,
+	.shared_mem_size = 0x54,
+	.qtbl_ptr = 0x58,
+	.uncached_heap_ptr = 0x5c,
+	.uncached_heap_size = 0x60,
+	.a5_status = 0x200,
+};
+EXPORT_SYMBOL(cam_a5_hw_info);
+
+int cam_a5_register_cpas(struct cam_hw_soc_info *soc_info,
+			struct cam_a5_device_core_info *core_info,
+			uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "icp", sizeof("icp"));
+	cpas_register_params.cam_cpas_client_cb = NULL;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc < 0) {
+		pr_err("cam_cpas_register_client is failed: %d\n", rc);
+		return rc;
+	}
+
+	core_info->cpas_handle = cpas_register_params.client_handle;
+	return rc;
+}
+
+int cam_a5_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct cam_hw_info *a5_dev = NULL;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_a5_device_hw_info *hw_info = NULL;
+
+	a5_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!a5_dev_intf)
+		return -ENOMEM;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &a5_dev_intf->hw_idx);
+
+	a5_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!a5_dev) {
+		rc = -ENOMEM;
+		goto a5_dev_alloc_failure;
+	}
+
+	a5_dev->soc_info.pdev = pdev;
+	a5_dev_intf->hw_priv = a5_dev;
+	a5_dev_intf->hw_ops.init = cam_a5_init_hw;
+	a5_dev_intf->hw_ops.deinit = cam_a5_deinit_hw;
+	a5_dev_intf->hw_ops.process_cmd = cam_a5_process_cmd;
+	a5_dev_intf->hw_type = CAM_ICP_DEV_A5;
+
+	pr_debug("%s: type %d index %d\n", __func__,
+		a5_dev_intf->hw_type,
+		a5_dev_intf->hw_idx);
+
+	platform_set_drvdata(pdev, a5_dev_intf);
+
+	a5_dev->core_info = kzalloc(sizeof(struct cam_a5_device_core_info),
+					GFP_KERNEL);
+	if (!a5_dev->core_info) {
+		rc = -ENOMEM;
+		goto core_info_alloc_failure;
+	}
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		pr_err("%s: No a5 hardware info\n", __func__);
+		rc = -EINVAL;
+		goto pr_err;
+	}
+	hw_info = (struct cam_a5_device_hw_info *)match_dev->data;
+	core_info->a5_hw_info = hw_info;
+
+	a5_dev->soc_info.soc_private = &cam_a5_soc_info;
+
+	rc = cam_a5_init_soc_resources(&a5_dev->soc_info, cam_a5_irq,
+		a5_dev);
+	if (rc < 0) {
+		pr_err("%s: failed to init_soc\n", __func__);
+		goto init_soc_failure;
+	}
+
+	pr_debug("cam_a5_init_soc_resources : %pK\n",
+				(void *)&a5_dev->soc_info);
+	rc = cam_a5_register_cpas(&a5_dev->soc_info,
+			core_info, a5_dev_intf->hw_idx);
+	if (rc < 0) {
+		pr_err("a5 cpas registration failed\n");
+		goto cpas_reg_failed;
+	}
+	a5_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&a5_dev->hw_mutex);
+	spin_lock_init(&a5_dev->hw_lock);
+	init_completion(&a5_dev->hw_complete);
+
+	pr_debug("%s: A5%d probe successful\n", __func__,
+		a5_dev_intf->hw_idx);
+	return 0;
+
+cpas_reg_failed:
+init_soc_failure:
+pr_err:
+	kfree(a5_dev->core_info);
+core_info_alloc_failure:
+	kfree(a5_dev);
+a5_dev_alloc_failure:
+	kfree(a5_dev_intf);
+
+	return rc;
+}
+
+static const struct of_device_id cam_a5_dt_match[] = {
+	{
+		.compatible = "qcom,cam_a5",
+		.data = &cam_a5_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_a5_dt_match);
+
+static struct platform_driver cam_a5_driver = {
+	.probe = cam_a5_probe,
+	.driver = {
+		.name = "cam_a5",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_a5_dt_match,
+	},
+};
+
+static int __init cam_a5_init_module(void)
+{
+	return platform_driver_register(&cam_a5_driver);
+}
+
+static void __exit cam_a5_exit_module(void)
+{
+	platform_driver_unregister(&cam_a5_driver);
+}
+
+module_init(cam_a5_init_module);
+module_exit(cam_a5_exit_module);
+MODULE_DESCRIPTION("CAM A5 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c
new file mode 100644
index 0000000..641c154
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c
@@ -0,0 +1,101 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "a5_soc.h"
+#include "cam_soc_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+	const char *fw_name;
+	struct a5_soc_info *camp_a5_soc_info;
+	struct device_node *of_node = NULL;
+	struct platform_device *pdev = NULL;
+
+	pdev = soc_info->pdev;
+	of_node = pdev->dev.of_node;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0) {
+		pr_err("%s: get a5 dt prop is failed\n", __func__);
+		return rc;
+	}
+
+	camp_a5_soc_info = soc_info->soc_private;
+	fw_name = camp_a5_soc_info->fw_name;
+
+	rc = of_property_read_string(of_node, "fw_name", &fw_name);
+	if (rc < 0)
+		pr_err("%s: fw_name read failed\n", __func__);
+
+	return rc;
+}
+
+static int cam_a5_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t a5_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, a5_irq_handler,
+		irq_data);
+
+	return rc;
+}
+
+int cam_a5_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t a5_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_a5_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	rc = cam_a5_request_platform_resource(soc_info, a5_irq_handler,
+		irq_data);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+int cam_a5_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+	if (rc)
+		pr_err("%s: enable platform failed\n", __func__);
+
+	return rc;
+}
+
+int cam_a5_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		pr_err("%s: enable platform failed\n", __func__);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.h b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.h
new file mode 100644
index 0000000..916143d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_A5_SOC_H
+#define CAM_A5_SOC_H
+
+#include "cam_soc_util.h"
+
+struct a5_soc_info {
+	char *fw_name;
+};
+
+int cam_a5_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t a5_irq_handler, void *irq_data);
+
+int cam_a5_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_a5_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/Makefile
new file mode 100644
index 0000000..6aeb5f1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/icp
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/bps_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += bps_dev.o bps_core.o bps_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c
new file mode 100644
index 0000000..50863a5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c
@@ -0,0 +1,189 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "BPS-CORE %s:%d " fmt, __func__, __LINE__
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "bps_core.h"
+#include "bps_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_bps_hw_intf.h"
+#include "cam_icp_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+
+static int cam_bps_cpas_vote(struct cam_bps_device_core_info *core_info,
+			struct cam_icp_cpas_vote *cpas_vote)
+{
+	int rc = 0;
+
+	if (cpas_vote->ahb_vote_valid)
+		rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
+				&cpas_vote->ahb_vote);
+	if (cpas_vote->axi_vote_valid)
+		rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
+				&cpas_vote->axi_vote);
+
+	if (rc < 0)
+		pr_err("cpas vote is failed: %d\n", rc);
+
+	return rc;
+}
+
+
+int cam_bps_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *bps_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_bps_device_core_info *core_info = NULL;
+	struct cam_icp_cpas_vote cpas_vote;
+	int rc = 0;
+
+	if (!device_priv) {
+		pr_err("Invalid cam_dev_info\n");
+		return -EINVAL;
+	}
+
+	soc_info = &bps_dev->soc_info;
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+
+	if ((!soc_info) || (!core_info)) {
+		pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+		return -EINVAL;
+	}
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
+	cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
+	cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+			&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+	if (rc < 0) {
+		pr_err("cpass start failed: %d\n", rc);
+		return rc;
+	}
+
+	rc = cam_bps_enable_soc_resources(soc_info);
+	if (rc < 0) {
+		pr_err("soc enable is failed\n");
+		rc = cam_cpas_stop(core_info->cpas_handle);
+	}
+
+	return rc;
+}
+
+int cam_bps_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *bps_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_bps_device_core_info *core_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		pr_err("Invalid cam_dev_info\n");
+		return -EINVAL;
+	}
+
+	soc_info = &bps_dev->soc_info;
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_bps_disable_soc_resources(soc_info);
+	if (rc < 0)
+		pr_err("soc enable is failed\n");
+
+	rc = cam_cpas_stop(core_info->cpas_handle);
+	if (rc < 0)
+		pr_err("cpas stop is failed: %d\n", rc);
+
+	return rc;
+}
+
+int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *bps_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_bps_device_core_info *core_info = NULL;
+	struct cam_bps_device_hw_info *hw_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		pr_err("Invalid arguments\n");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_ICP_BPS_CMD_MAX) {
+		pr_err("Invalid command : %x\n", cmd_type);
+		return -EINVAL;
+	}
+
+	soc_info = &bps_dev->soc_info;
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+	hw_info = core_info->bps_hw_info;
+
+	switch (cmd_type) {
+	case CAM_ICP_BPS_CMD_VOTE_CPAS: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args) {
+			pr_err("cmd args NULL\n");
+			return -EINVAL;
+		}
+
+		cam_bps_cpas_vote(core_info, cpas_vote);
+		break;
+	}
+
+	case CAM_ICP_BPS_CMD_CPAS_START: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args) {
+			pr_err("cmd args NULL\n");
+			return -EINVAL;
+		}
+
+		rc = cam_cpas_start(core_info->cpas_handle,
+				&cpas_vote->ahb_vote, &cpas_vote->axi_vote);
+		break;
+	}
+
+	case CAM_ICP_BPS_CMD_CPAS_STOP:
+		cam_cpas_stop(core_info->cpas_handle);
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+irqreturn_t cam_bps_irq(int irq_num, void *data)
+{
+	return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.h b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.h
new file mode 100644
index 0000000..67e1c03
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_BPS_CORE_H
+#define CAM_BPS_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+struct cam_bps_device_hw_info {
+	uint32_t reserved;
+};
+
+struct cam_bps_device_core_info {
+	struct cam_bps_device_hw_info *bps_hw_info;
+	uint32_t cpas_handle;
+};
+
+int cam_bps_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_bps_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+
+irqreturn_t cam_bps_irq(int irq_num, void *data);
+#endif /* CAM_BPS_CORE_H */
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_dev.c b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_dev.c
new file mode 100644
index 0000000..c3477ee
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_dev.c
@@ -0,0 +1,169 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include "bps_core.h"
+#include "bps_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_icp_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+struct cam_bps_device_hw_info cam_bps_hw_info = {
+	.reserved = 0,
+};
+EXPORT_SYMBOL(cam_bps_hw_info);
+
+int cam_bps_register_cpas(struct cam_hw_soc_info *soc_info,
+			struct cam_bps_device_core_info *core_info,
+			uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "bps", sizeof("bps"));
+	cpas_register_params.cam_cpas_client_cb = NULL;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc < 0) {
+		pr_err("cam_cpas_register_client is failed: %d\n", rc);
+		return rc;
+	}
+	core_info->cpas_handle = cpas_register_params.client_handle;
+
+	return rc;
+}
+
+int cam_bps_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info            *bps_dev = NULL;
+	struct cam_hw_intf            *bps_dev_intf = NULL;
+	const struct of_device_id         *match_dev = NULL;
+	struct cam_bps_device_core_info   *core_info = NULL;
+	struct cam_bps_device_hw_info     *hw_info = NULL;
+	int                                rc = 0;
+
+	bps_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!bps_dev_intf)
+		return -ENOMEM;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &bps_dev_intf->hw_idx);
+
+	bps_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!bps_dev) {
+		kfree(bps_dev_intf);
+		return -ENOMEM;
+	}
+	bps_dev->soc_info.pdev = pdev;
+	bps_dev_intf->hw_priv = bps_dev;
+	bps_dev_intf->hw_ops.init = cam_bps_init_hw;
+	bps_dev_intf->hw_ops.deinit = cam_bps_deinit_hw;
+	bps_dev_intf->hw_ops.process_cmd = cam_bps_process_cmd;
+	bps_dev_intf->hw_type = CAM_ICP_DEV_BPS;
+	platform_set_drvdata(pdev, bps_dev_intf);
+	bps_dev->core_info = kzalloc(sizeof(struct cam_bps_device_core_info),
+					GFP_KERNEL);
+	if (!bps_dev->core_info) {
+		kfree(bps_dev);
+		kfree(bps_dev_intf);
+		return -ENOMEM;
+	}
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		pr_err("%s: No bps hardware info\n", __func__);
+		kfree(bps_dev->core_info);
+		kfree(bps_dev);
+		kfree(bps_dev_intf);
+		rc = -EINVAL;
+		return rc;
+	}
+	hw_info = (struct cam_bps_device_hw_info *)match_dev->data;
+	core_info->bps_hw_info = hw_info;
+
+	rc = cam_bps_init_soc_resources(&bps_dev->soc_info, cam_bps_irq,
+		bps_dev);
+	if (rc < 0) {
+		pr_err("%s: failed to init_soc\n", __func__);
+		kfree(bps_dev->core_info);
+		kfree(bps_dev);
+		kfree(bps_dev_intf);
+		return rc;
+	}
+	pr_debug("cam_bps_init_soc_resources : %pK\n",
+		(void *)&bps_dev->soc_info);
+
+	rc = cam_bps_register_cpas(&bps_dev->soc_info,
+			core_info, bps_dev_intf->hw_idx);
+	if (rc < 0) {
+		kfree(bps_dev->core_info);
+		kfree(bps_dev);
+		kfree(bps_dev_intf);
+		return rc;
+	}
+	bps_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&bps_dev->hw_mutex);
+	spin_lock_init(&bps_dev->hw_lock);
+	init_completion(&bps_dev->hw_complete);
+	pr_debug("%s: BPS%d probe successful\n", __func__,
+		bps_dev_intf->hw_idx);
+
+	return rc;
+}
+
+static const struct of_device_id cam_bps_dt_match[] = {
+	{
+		.compatible = "qcom,cam_bps",
+		.data = &cam_bps_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_bps_dt_match);
+
+static struct platform_driver cam_bps_driver = {
+	.probe = cam_bps_probe,
+	.driver = {
+		.name = "cam_bps",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_bps_dt_match,
+	},
+};
+
+static int __init cam_bps_init_module(void)
+{
+	return platform_driver_register(&cam_bps_driver);
+}
+
+static void __exit cam_bps_exit_module(void)
+{
+	platform_driver_unregister(&cam_bps_driver);
+}
+
+module_init(cam_bps_init_module);
+module_exit(cam_bps_exit_module);
+MODULE_DESCRIPTION("CAM BPS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.c
new file mode 100644
index 0000000..76884bf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.c
@@ -0,0 +1,85 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "bps_soc.h"
+#include "cam_soc_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static int cam_bps_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0)
+		pr_err("get bps dt prop is failed\n");
+
+	return rc;
+}
+
+static int cam_bps_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t bps_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, bps_irq_handler,
+		irq_data);
+
+	return rc;
+}
+
+int cam_bps_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t bps_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_bps_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	rc = cam_bps_request_platform_resource(soc_info, bps_irq_handler,
+		irq_data);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, false);
+	if (rc)
+		pr_err("%s: enable platform failed\n", __func__);
+
+	return rc;
+}
+
+int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+	if (rc)
+		pr_err("%s: disable platform failed\n", __func__);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.h b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.h
new file mode 100644
index 0000000..b16db01
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_BPS_SOC_H_
+#define _CAM_BPS_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_bps_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t bps_irq_handler, void *irq_data);
+
+int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_BPS_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/Makefile
new file mode 100644
index 0000000..4a6c3c0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/Makefile
@@ -0,0 +1,16 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/isp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/a5_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
new file mode 100644
index 0000000..2fa39c8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -0,0 +1,1968 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "ICP-HW-MGR %s:%d " fmt, __func__, __LINE__
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_sync_api.h"
+#include "cam_packet_util.h"
+#include "cam_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_icp_hw_mgr.h"
+#include "cam_a5_hw_intf.h"
+#include "cam_bps_hw_intf.h"
+#include "cam_ipe_hw_intf.h"
+#include "cam_smmu_api.h"
+#include "cam_mem_mgr.h"
+#include "hfi_intf.h"
+#include "hfi_reg.h"
+#include "hfi_session_defs.h"
+#include "hfi_sys_defs.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "a5_core.h"
+#include "hfi_sys_defs.h"
+
+#undef  ICP_DBG
+#define ICP_DBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define ICP_WORKQ_NUM_TASK 30
+#define ICP_WORKQ_TASK_CMD_TYPE 1
+#define ICP_WORKQ_TASK_MSG_TYPE 2
+
+static struct cam_icp_hw_mgr icp_hw_mgr;
+
+static int cam_icp_stop_cpas(struct cam_icp_hw_mgr *hw_mgr_priv)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_icp_cpas_vote cpas_vote;
+	int rc = 0;
+
+	if (!hw_mgr) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
+
+	if ((!a5_dev_intf) || (!bps_dev_intf) || (!ipe0_dev_intf)) {
+		pr_err("dev intfs are NULL\n");
+		return -EINVAL;
+	}
+
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_CPAS_STOP,
+		&cpas_vote,
+		sizeof(struct cam_icp_cpas_vote));
+	if (rc < 0)
+		pr_err("CAM_ICP_A5_CMD_CPAS_STOP is failed: %d\n", rc);
+
+	rc = bps_dev_intf->hw_ops.process_cmd(
+		bps_dev_intf->hw_priv,
+		CAM_ICP_BPS_CMD_CPAS_STOP,
+		&cpas_vote,
+		sizeof(struct cam_icp_cpas_vote));
+	if (rc < 0)
+		pr_err("CAM_ICP_BPS_CMD_CPAS_STOP is failed: %d\n", rc);
+
+	rc = ipe0_dev_intf->hw_ops.process_cmd(
+		ipe0_dev_intf->hw_priv,
+		CAM_ICP_IPE_CMD_CPAS_STOP,
+		&cpas_vote,
+		sizeof(struct cam_icp_cpas_vote));
+	if (rc < 0)
+		pr_err("CAM_ICP_IPE_CMD_CPAS_STOP is failed: %d\n", rc);
+
+	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
+	if (!ipe1_dev_intf)
+		return rc;
+
+	rc = ipe1_dev_intf->hw_ops.process_cmd(
+		ipe1_dev_intf->hw_priv,
+		CAM_ICP_IPE_CMD_CPAS_STOP,
+		&cpas_vote,
+		sizeof(struct cam_icp_cpas_vote));
+	if (rc < 0)
+		pr_err("CAM_ICP_IPE_CMD_CPAS_STOP is failed: %d\n", rc);
+
+	return rc;
+}
+
+static int cam_icp_start_cpas(struct cam_icp_hw_mgr *hw_mgr_priv)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_icp_cpas_vote cpas_vote;
+	int rc = 0;
+
+	if (!hw_mgr) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
+
+	if ((!a5_dev_intf) || (!bps_dev_intf) || (!ipe0_dev_intf)) {
+		pr_err("dev intfs are null\n");
+		return -EINVAL;
+	}
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
+	cpas_vote.axi_vote.compressed_bw = 640000000;
+	cpas_vote.axi_vote.uncompressed_bw = 640000000;
+
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_CPAS_START,
+		&cpas_vote,
+		sizeof(struct cam_icp_cpas_vote));
+	if (rc) {
+		pr_err("CAM_ICP_A5_CMD_CPAS_START is failed: %d\n", rc);
+		goto a5_cpas_start_failed;
+	}
+
+	rc = bps_dev_intf->hw_ops.process_cmd(
+		bps_dev_intf->hw_priv,
+		CAM_ICP_BPS_CMD_CPAS_START,
+		&cpas_vote,
+		sizeof(struct cam_icp_cpas_vote));
+	if (rc < 0) {
+		pr_err("CAM_ICP_BPS_CMD_CPAS_START is failed: %d\n", rc);
+		goto bps_cpas_start_failed;
+	}
+
+	rc = ipe0_dev_intf->hw_ops.process_cmd(
+		ipe0_dev_intf->hw_priv,
+		CAM_ICP_IPE_CMD_CPAS_START,
+		&cpas_vote,
+		sizeof(struct cam_icp_cpas_vote));
+	if (rc < 0) {
+		pr_err("CAM_ICP_IPE_CMD_CPAS_START is failed: %d\n", rc);
+		goto ipe0_cpas_start_failed;
+	}
+
+	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
+	if (!ipe1_dev_intf)
+		return rc;
+
+	rc = ipe1_dev_intf->hw_ops.process_cmd(
+		ipe1_dev_intf->hw_priv,
+		CAM_ICP_IPE_CMD_CPAS_START,
+		&cpas_vote,
+		sizeof(struct cam_icp_cpas_vote));
+	if (rc < 0) {
+		pr_err("CAM_ICP_IPE_CMD_CPAS_START is failed: %d\n", rc);
+		goto ipe1_cpas_start_failed;
+	}
+
+	return rc;
+
+ipe1_cpas_start_failed:
+	rc = ipe0_dev_intf->hw_ops.process_cmd(
+		ipe0_dev_intf->hw_priv,
+		CAM_ICP_IPE_CMD_CPAS_STOP,
+		&cpas_vote,
+		sizeof(struct cam_icp_cpas_vote));
+ipe0_cpas_start_failed:
+	rc = bps_dev_intf->hw_ops.process_cmd(
+		bps_dev_intf->hw_priv,
+		CAM_ICP_BPS_CMD_CPAS_STOP,
+		&cpas_vote,
+		sizeof(struct cam_icp_cpas_vote));
+bps_cpas_start_failed:
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_CPAS_STOP,
+		&cpas_vote,
+		sizeof(struct cam_icp_cpas_vote));
+a5_cpas_start_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_process_cmd(void *priv, void *data)
+{
+	int rc;
+	struct hfi_cmd_work_data *task_data = NULL;
+	struct cam_icp_hw_mgr *hw_mgr;
+
+	if (!data || !priv) {
+		pr_err("Invalid params%pK %pK\n", data, priv);
+		return -EINVAL;
+	}
+
+	hw_mgr = priv;
+	task_data = (struct hfi_cmd_work_data *)data;
+
+	rc = hfi_write_cmd(task_data->data);
+	if (rc < 0)
+		pr_err("unable to write\n");
+
+	ICP_DBG("task type : %u, rc : %d\n", task_data->type, rc);
+	return rc;
+}
+
+static int cam_icp_mgr_process_msg_frame_process(uint32_t *msg_ptr)
+{
+	int i;
+	uint32_t idx;
+	uint32_t request_id;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+	struct hfi_msg_frame_process_done *frame_done;
+	struct hfi_frame_process_info *hfi_frame_process;
+	struct cam_hw_done_event_data   buf_data;
+
+	ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+	if (ioconfig_ack->err_type != HFI_ERR_SYS_NONE) {
+		pr_err("failed with error : %u\n", ioconfig_ack->err_type);
+		return -EIO;
+	}
+
+	frame_done =
+		(struct hfi_msg_frame_process_done *)ioconfig_ack->msg_data;
+	if (frame_done->result) {
+		pr_err("result : %u\n", frame_done->result);
+		return -EIO;
+	}
+	ICP_DBG("result : %u\n", frame_done->result);
+
+	ctx_data = (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+	request_id = ioconfig_ack->user_data2;
+	ICP_DBG("ctx : %pK, request_id :%d\n",
+		(void *)ctx_data->context_priv, request_id);
+
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
+		if (hfi_frame_process->request_id[i] == request_id)
+			break;
+
+	if (i >= CAM_FRAME_CMD_MAX) {
+		pr_err("unable to find pkt in ctx data for req_id =%d\n",
+			request_id);
+		return -EINVAL;
+	}
+	idx = i;
+
+	/* send event to ctx this needs to be done in msg handler */
+	buf_data.num_handles = hfi_frame_process->num_out_resources[idx];
+	for (i = 0; i < buf_data.num_handles; i++)
+		buf_data.resource_handle[i] =
+			hfi_frame_process->out_resource[idx][i];
+
+	ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+
+	/* now release memory for hfi frame process command */
+	ICP_DBG("matching request id: %d\n",
+			hfi_frame_process->request_id[idx]);
+	mutex_lock(&ctx_data->hfi_frame_process.lock);
+	hfi_frame_process->request_id[idx] = 0;
+	clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+	mutex_unlock(&ctx_data->hfi_frame_process.lock);
+	return 0;
+}
+
+static int cam_icp_mgr_process_msg_config_io(uint32_t *msg_ptr)
+{
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+	struct hfi_msg_ipe_config *ipe_config_ack = NULL;
+	struct hfi_msg_bps_common *bps_config_ack = NULL;
+
+	ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+	ICP_DBG("opcode : %u\n", ioconfig_ack->opcode);
+
+	if (ioconfig_ack->opcode == HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO) {
+		ipe_config_ack =
+			(struct hfi_msg_ipe_config *)(ioconfig_ack->msg_data);
+		if (ipe_config_ack->rc) {
+			pr_err("rc = %d err = %u\n",
+				ipe_config_ack->rc, ioconfig_ack->err_type);
+			return -EIO;
+		}
+		ctx_data =
+			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+		mutex_lock(&ctx_data->ctx_mutex);
+		ctx_data->scratch_mem_size = ipe_config_ack->scratch_mem_size;
+		mutex_unlock(&ctx_data->ctx_mutex);
+		ICP_DBG("scratch_mem_size = %u\n",
+			ipe_config_ack->scratch_mem_size);
+	} else {
+		bps_config_ack =
+			(struct hfi_msg_bps_common *)(ioconfig_ack->msg_data);
+		if (bps_config_ack->rc) {
+			pr_err("rc : %u, opcode :%u\n",
+				bps_config_ack->rc, ioconfig_ack->opcode);
+			return -EIO;
+		}
+		ctx_data =
+			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+	}
+	complete(&ctx_data->wait_complete);
+
+	return 0;
+}
+
+static int cam_icp_mgr_process_msg_create_handle(uint32_t *msg_ptr)
+{
+	struct hfi_msg_create_handle_ack *create_handle_ack = NULL;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+
+	create_handle_ack = (struct hfi_msg_create_handle_ack *)msg_ptr;
+	if (!create_handle_ack) {
+		pr_err("Invalid create_handle_ack\n");
+		return -EINVAL;
+	}
+
+	ICP_DBG("err type : %u\n", create_handle_ack->err_type);
+
+	ctx_data = (struct cam_icp_hw_ctx_data *)create_handle_ack->user_data1;
+	if (!ctx_data) {
+		pr_err("Invalid ctx_data\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	ctx_data->fw_handle = create_handle_ack->fw_handle;
+	mutex_unlock(&ctx_data->ctx_mutex);
+	ICP_DBG("fw_handle = %x\n", ctx_data->fw_handle);
+	complete(&ctx_data->wait_complete);
+
+	return 0;
+}
+
+static int cam_icp_mgr_process_msg_ping_ack(uint32_t *msg_ptr)
+{
+	struct hfi_msg_ping_ack *ping_ack = NULL;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+
+	ping_ack = (struct hfi_msg_ping_ack *)msg_ptr;
+	if (!ping_ack) {
+		pr_err("Empty ping ack message\n");
+		return -EINVAL;
+	}
+
+	ctx_data = (struct cam_icp_hw_ctx_data *)ping_ack->user_data;
+	if (!ctx_data) {
+		pr_err("Invalid ctx_data\n");
+		return -EINVAL;
+	}
+
+	ICP_DBG("%x %x %pK\n", ping_ack->size, ping_ack->pkt_type,
+		(void *)ping_ack->user_data);
+	complete(&ctx_data->wait_complete);
+
+	return 0;
+}
+
+static int cam_icp_mgr_process_indirect_ack_msg(uint32_t *msg_ptr)
+{
+	int rc;
+
+	switch (msg_ptr[ICP_PACKET_IPCODE]) {
+	case HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO:
+	case HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO:
+		ICP_DBG("received HFI_IPEBPS_CMD_OPCODE_IPE/BPS_CONFIG_IO:\n");
+		rc = cam_icp_mgr_process_msg_config_io(msg_ptr);
+		if (rc < 0) {
+			pr_err("error in process_msg_config_io\n");
+			return rc;
+		}
+		break;
+
+	case HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS:
+	case HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS:
+		ICP_DBG("received OPCODE_IPE/BPS_FRAME_PROCESS:\n");
+		rc = cam_icp_mgr_process_msg_frame_process(msg_ptr);
+		if (rc < 0) {
+			pr_err("error in msg_frame_process\n");
+			return rc;
+		}
+		break;
+	default:
+		pr_err("Invalid opcode : %u\n",
+			msg_ptr[ICP_PACKET_IPCODE]);
+		break;
+	}
+
+	return 0;
+}
+
+static int cam_icp_mgr_process_direct_ack_msg(uint32_t *msg_ptr)
+{
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+
+	if (msg_ptr[ICP_PACKET_IPCODE] ==
+		HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY ||
+		msg_ptr[ICP_PACKET_IPCODE] ==
+		HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY) {
+		ICP_DBG("received HFI_IPEBPS_CMD_OPCODE_IPE/BPS_DESTROY:\n");
+		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+		ctx_data =
+			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+		complete(&ctx_data->wait_complete);
+
+	} else {
+		pr_err("Invalid opcode : %u\n", msg_ptr[ICP_PACKET_IPCODE]);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
+{
+	int rc = 0;
+	uint32_t *msg_ptr = NULL;
+	struct hfi_msg_work_data *task_data;
+	struct cam_icp_hw_mgr *hw_mgr;
+	int read_len;
+
+	if (!data || !priv) {
+		pr_err("Invalid data\n");
+		return -EINVAL;
+	}
+
+	task_data = data;
+	hw_mgr = priv;
+	ICP_DBG("irq status : %u\n", task_data->irq_status);
+
+	read_len = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG);
+	if (read_len < 0) {
+		ICP_DBG("Unable to read msg q\n");
+		return read_len;
+	}
+
+	msg_ptr = (uint32_t *)icp_hw_mgr.msg_buf;
+	ICP_DBG("packet type: %x\n", msg_ptr[ICP_PACKET_TYPE]);
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	switch (msg_ptr[ICP_PACKET_TYPE]) {
+	case HFI_MSG_SYS_INIT_DONE:
+		ICP_DBG("received HFI_MSG_SYS_INIT_DONE\n");
+		complete(&hw_mgr->a5_complete);
+		break;
+
+	case HFI_MSG_SYS_PING_ACK:
+		ICP_DBG("received HFI_MSG_SYS_PING_ACK\n");
+		rc = cam_icp_mgr_process_msg_ping_ack(msg_ptr);
+		if (rc)
+			pr_err("fail process PING_ACK\n");
+		break;
+
+	case HFI_MSG_IPEBPS_CREATE_HANDLE_ACK:
+		ICP_DBG("received HFI_MSG_IPEBPS_CREATE_HANDLE_ACK\n");
+		rc = cam_icp_mgr_process_msg_create_handle(msg_ptr);
+		if (rc)
+			pr_err("fail process CREATE_HANDLE_ACK\n");
+		break;
+
+	case HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK:
+		rc = cam_icp_mgr_process_indirect_ack_msg(msg_ptr);
+		if (rc)
+			pr_err("fail process INDIRECT_ACK\n");
+		break;
+
+	case  HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK:
+		rc = cam_icp_mgr_process_direct_ack_msg(msg_ptr);
+		if (rc)
+			pr_err("fail process DIRECT_ACK\n");
+		break;
+
+	case HFI_MSG_EVENT_NOTIFY:
+		ICP_DBG("received HFI_MSG_EVENT_NOTIFY\n");
+		break;
+
+	default:
+		pr_err("invalid msg : %u\n", msg_ptr[ICP_PACKET_TYPE]);
+		break;
+	}
+
+	mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
+
+	return rc;
+}
+
+int32_t cam_icp_hw_mgr_cb(uint32_t irq_status, void *data)
+{
+	int32_t rc = 0;
+	unsigned long flags;
+	struct cam_icp_hw_mgr *hw_mgr = data;
+	struct crm_workq_task *task;
+	struct hfi_msg_work_data *task_data;
+
+	spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.msg_work);
+	if (!task) {
+		pr_err("no empty task\n");
+		spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+		return -ENOMEM;
+	}
+
+	task_data = (struct hfi_msg_work_data *)task->payload;
+	task_data->data = hw_mgr;
+	task_data->irq_status = irq_status;
+	task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_icp_mgr_process_msg;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+
+	return rc;
+}
+
+static int cam_icp_free_hfi_mem(void)
+{
+	cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.msg_q);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sec_heap);
+
+	return 0;
+}
+
+static int cam_icp_allocate_hfi_mem(void)
+{
+	int rc;
+	struct cam_mem_mgr_request_desc alloc;
+	struct cam_mem_mgr_memory_desc out;
+	dma_addr_t iova;
+	uint64_t kvaddr;
+	size_t len;
+
+	pr_err("Allocating FW for iommu handle: %x\n", icp_hw_mgr.iommu_hdl);
+	rc = cam_smmu_alloc_firmware(icp_hw_mgr.iommu_hdl,
+		&iova, &kvaddr, &len);
+	if (rc < 0) {
+		pr_err("Unable to allocate FW memory\n");
+		return -ENOMEM;
+	}
+
+	icp_hw_mgr.hfi_mem.fw_buf.len = len;
+	icp_hw_mgr.hfi_mem.fw_buf.kva = kvaddr;
+	icp_hw_mgr.hfi_mem.fw_buf.iova = iova;
+	icp_hw_mgr.hfi_mem.fw_buf.smmu_hdl = icp_hw_mgr.iommu_hdl;
+
+	ICP_DBG("kva = %llX\n", kvaddr);
+	ICP_DBG("IOVA = %llX\n", iova);
+	ICP_DBG("length = %zu\n", len);
+
+	memset(&alloc, 0, sizeof(alloc));
+	memset(&out, 0, sizeof(out));
+	alloc.size = SZ_1M;
+	alloc.align = 0;
+	alloc.region = CAM_MEM_MGR_REGION_SHARED;
+	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+	rc = cam_mem_mgr_request_mem(&alloc, &out);
+	if (rc < 0) {
+		pr_err("Unable to allocate qtbl memory\n");
+		goto qtbl_alloc_failed;
+	}
+	icp_hw_mgr.hfi_mem.qtbl = out;
+
+	ICP_DBG("kva = %llX\n", out.kva);
+	ICP_DBG("qtbl IOVA = %X\n", out.iova);
+	ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
+	ICP_DBG("MEM HDL = %X\n", out.mem_handle);
+	ICP_DBG("length = %lld\n", out.len);
+	ICP_DBG("region = %d\n", out.region);
+
+	/* Allocate memory for cmd queue */
+	memset(&alloc, 0, sizeof(alloc));
+	memset(&out, 0, sizeof(out));
+	alloc.size = SZ_1M;
+	alloc.align = 0;
+	alloc.region = CAM_MEM_MGR_REGION_SHARED;
+	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+	rc = cam_mem_mgr_request_mem(&alloc, &out);
+	if (rc < 0) {
+		pr_err("Unable to allocate cmd q memory\n");
+		goto cmd_q_alloc_failed;
+	}
+	icp_hw_mgr.hfi_mem.cmd_q = out;
+
+	ICP_DBG("kva = %llX\n", out.kva);
+	ICP_DBG("cmd_q IOVA = %X\n", out.iova);
+	ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
+	ICP_DBG("MEM HDL = %X\n", out.mem_handle);
+	ICP_DBG("length = %lld\n", out.len);
+	ICP_DBG("region = %d\n", out.region);
+
+	/* Allocate memory for msg queue */
+	memset(&alloc, 0, sizeof(alloc));
+	memset(&out, 0, sizeof(out));
+	alloc.size = SZ_1M;
+	alloc.align = 0;
+	alloc.region = CAM_MEM_MGR_REGION_SHARED;
+	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+	rc = cam_mem_mgr_request_mem(&alloc, &out);
+	if (rc < 0) {
+		pr_err("Unable to allocate msg q memory\n");
+		goto msg_q_alloc_failed;
+	}
+	icp_hw_mgr.hfi_mem.msg_q = out;
+
+	ICP_DBG("kva = %llX\n", out.kva);
+	ICP_DBG("msg_q IOVA = %X\n", out.iova);
+	ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
+	ICP_DBG("MEM HDL = %X\n", out.mem_handle);
+	ICP_DBG("length = %lld\n", out.len);
+	ICP_DBG("region = %d\n", out.region);
+
+	/* Allocate memory for dbg queue */
+	memset(&alloc, 0, sizeof(alloc));
+	memset(&out, 0, sizeof(out));
+	alloc.size = SZ_1M;
+	alloc.align = 0;
+	alloc.region = CAM_MEM_MGR_REGION_SHARED;
+	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+	rc = cam_mem_mgr_request_mem(&alloc, &out);
+	if (rc < 0) {
+		pr_err("Unable to allocate dbg q memory\n");
+		goto dbg_q_alloc_failed;
+	}
+	icp_hw_mgr.hfi_mem.dbg_q = out;
+
+	ICP_DBG("kva = %llX\n", out.kva);
+	ICP_DBG("dbg_q IOVA = %X\n", out.iova);
+	ICP_DBG("SMMU HDL = %X\n",  out.smmu_hdl);
+	ICP_DBG("MEM HDL = %X\n", out.mem_handle);
+	ICP_DBG("length = %lld\n", out.len);
+	ICP_DBG("region = %d\n", out.region);
+
+	/* Allocate memory for sec heap queue */
+	memset(&alloc, 0, sizeof(alloc));
+	memset(&out, 0, sizeof(out));
+	alloc.size = SZ_1M;
+	alloc.align = 0;
+	alloc.region = CAM_MEM_MGR_REGION_SHARED;
+	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+	rc = cam_mem_mgr_request_mem(&alloc, &out);
+	if (rc < 0) {
+		pr_err("Unable to allocate sec heap q memory\n");
+		goto sec_heap_alloc_failed;
+	}
+	icp_hw_mgr.hfi_mem.sec_heap = out;
+
+	ICP_DBG("kva = %llX\n", out.kva);
+	ICP_DBG("sec_heap IOVA = %X\n", out.iova);
+	ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
+	ICP_DBG("MEM HDL = %X\n", out.mem_handle);
+	ICP_DBG("length = %lld\n", out.len);
+	ICP_DBG("region = %d\n", out.region);
+
+	return rc;
+
+sec_heap_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+dbg_q_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.msg_q);
+msg_q_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+cmd_q_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
+qtbl_alloc_failed:
+	cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
+	pr_err("returned with error : %d\n", rc);
+
+	return rc;
+}
+
+static int cam_icp_mgr_get_free_ctx(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int i = 0;
+	int num_ctx = CAM_ICP_CTX_MAX;
+
+	for (i = 0; i < num_ctx; i++) {
+		mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex);
+		if (hw_mgr->ctx_data[i].in_use == 0) {
+			hw_mgr->ctx_data[i].in_use = 1;
+			mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+			break;
+		}
+		mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+	}
+
+	return i;
+}
+
+static int cam_icp_mgr_destroy_handle(
+		struct cam_icp_hw_ctx_data *ctx_data,
+		struct crm_workq_task *task)
+{
+	int rc = 0;
+	int timeout = 5000;
+	struct hfi_cmd_work_data *task_data;
+	struct hfi_cmd_ipebps_async destroy_cmd;
+	unsigned long rem_jiffies;
+
+	destroy_cmd.size =
+		sizeof(struct hfi_cmd_ipebps_async) +
+		sizeof(struct ipe_bps_destroy) -
+		sizeof(destroy_cmd.payload.direct);
+	destroy_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
+	if (ctx_data->icp_dev_acquire_info.dev_type == CAM_ICP_RES_TYPE_BPS)
+		destroy_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY;
+	else
+		destroy_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY;
+
+	reinit_completion(&ctx_data->wait_complete);
+	destroy_cmd.num_fw_handles = 1;
+	destroy_cmd.fw_handles[0] = ctx_data->fw_handle;
+	destroy_cmd.user_data1 = (uint64_t)ctx_data;
+	destroy_cmd.user_data2 = (uint64_t)0x0;
+	memcpy(destroy_cmd.payload.direct, &ctx_data->temp_payload,
+						sizeof(uint32_t));
+
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)&destroy_cmd;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+	ICP_DBG("fw_handle = %x ctx_data = %pK\n",
+		ctx_data->fw_handle, ctx_data);
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		pr_err("timeout/err in iconfig command: %d\n", rc);
+	}
+
+	return rc;
+}
+
+static int cam_icp_mgr_release_ctx(struct cam_icp_hw_mgr *hw_mgr, int ctx_id)
+{
+	struct crm_workq_task *task;
+	int i = 0;
+
+	if (ctx_id >= CAM_ICP_CTX_MAX) {
+		pr_err("ctx_id is wrong: %d\n", ctx_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+	if (!hw_mgr->ctx_data[ctx_id].in_use) {
+		pr_err("ctx is already in use: %d\n", ctx_id);
+		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	if (task)
+		cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id], task);
+
+	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+	hw_mgr->ctx_data[ctx_id].in_use = 0;
+	hw_mgr->ctx_data[ctx_id].fw_handle = 0;
+	hw_mgr->ctx_data[ctx_id].scratch_mem_size = 0;
+	mutex_lock(&hw_mgr->ctx_data[ctx_id].hfi_frame_process.lock);
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
+		clear_bit(i, hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+	mutex_unlock(&hw_mgr->ctx_data[ctx_id].hfi_frame_process.lock);
+	mutex_destroy(&hw_mgr->ctx_data[ctx_id].hfi_frame_process.lock);
+	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+	kfree(hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+
+	return 0;
+}
+
+static int cam_icp_mgr_get_ctx_from_fw_handle(struct cam_icp_hw_mgr *hw_mgr,
+							uint32_t fw_handle)
+{
+	int ctx_id;
+
+	for (ctx_id = 0; ctx_id < CAM_ICP_CTX_MAX; ctx_id++) {
+		mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+		if (hw_mgr->ctx_data[ctx_id].in_use) {
+			if (hw_mgr->ctx_data[ctx_id].fw_handle == fw_handle) {
+				mutex_unlock(
+					&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+				return ctx_id;
+			}
+		}
+		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+	}
+	ICP_DBG("Invalid fw handle to get ctx\n");
+
+	return -EINVAL;
+}
+
+static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
+{
+	struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	int rc = 0;
+
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
+	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+
+	if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
+		pr_err("dev intfs are wrong\n");
+		return rc;
+	}
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	rc = a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+	if (rc < 0)
+		pr_err("a5 dev de-init failed\n");
+
+	rc = bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+	if (rc < 0)
+		pr_err("bps dev de-init failed\n");
+
+	rc = ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+	if (rc < 0)
+		pr_err("ipe0 dev de-init failed\n");
+
+	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
+	if (ipe1_dev_intf) {
+		rc = ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
+						NULL, 0);
+		if (rc < 0)
+			pr_err("ipe1 dev de-init failed\n");
+	}
+
+	cam_icp_free_hfi_mem();
+	hw_mgr->fw_download = false;
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return 0;
+}
+
+static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_icp_a5_set_irq_cb irq_cb;
+	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
+	struct hfi_mem_info hfi_mem;
+	int rc = 0;
+
+	if (!hw_mgr) {
+		pr_err("hw_mgr is NULL\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (hw_mgr->fw_download) {
+		ICP_DBG("FW already downloaded\n");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return rc;
+	}
+
+	/* Allocate memory for FW and shared memory */
+	rc = cam_icp_allocate_hfi_mem();
+	if (rc < 0) {
+		pr_err("hfi mem alloc failed\n");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return rc;
+	}
+
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
+	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
+	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+
+	if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
+		pr_err("dev intfs are wrong\n");
+		goto dev_intf_fail;
+	}
+
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+	rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
+	if (rc < 0) {
+		pr_err("a5 dev init failed\n");
+		goto a5_dev_init_failed;
+	}
+	rc = bps_dev_intf->hw_ops.init(bps_dev_intf->hw_priv, NULL, 0);
+	if (rc < 0) {
+		pr_err("bps dev init failed\n");
+		goto bps_dev_init_failed;
+	}
+	rc = ipe0_dev_intf->hw_ops.init(ipe0_dev_intf->hw_priv, NULL, 0);
+	if (rc < 0) {
+		pr_err("ipe0 dev init failed\n");
+		goto ipe0_dev_init_failed;
+	}
+
+	if (ipe1_dev_intf) {
+		rc = ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
+						NULL, 0);
+		if (rc < 0) {
+			pr_err("ipe1 dev init failed\n");
+			goto ipe1_dev_init_failed;
+		}
+	}
+	/* Set IRQ callback */
+	irq_cb.icp_hw_mgr_cb = cam_icp_hw_mgr_cb;
+	irq_cb.data = hw_mgr_priv;
+	rc = a5_dev_intf->hw_ops.process_cmd(
+				a5_dev_intf->hw_priv,
+				CAM_ICP_A5_SET_IRQ_CB,
+				&irq_cb, sizeof(irq_cb));
+	if (rc < 0) {
+		pr_err("CAM_ICP_A5_SET_IRQ_CB failed\n");
+		rc = -EINVAL;
+		goto set_irq_failed;
+	}
+
+	fw_buf_info.kva = icp_hw_mgr.hfi_mem.fw_buf.kva;
+	fw_buf_info.iova = icp_hw_mgr.hfi_mem.fw_buf.iova;
+	fw_buf_info.len = icp_hw_mgr.hfi_mem.fw_buf.len;
+
+	rc = a5_dev_intf->hw_ops.process_cmd(
+			a5_dev_intf->hw_priv,
+			CAM_ICP_A5_CMD_SET_FW_BUF,
+			&fw_buf_info,
+			sizeof(fw_buf_info));
+	if (rc < 0) {
+		pr_err("CAM_ICP_A5_CMD_SET_FW_BUF failed\n");
+		goto set_irq_failed;
+	}
+
+	cam_hfi_enable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+
+	rc = a5_dev_intf->hw_ops.process_cmd(
+			a5_dev_intf->hw_priv,
+			CAM_ICP_A5_CMD_FW_DOWNLOAD,
+			NULL, 0);
+	if (rc < 0) {
+		pr_err("FW download is failed\n");
+		goto set_irq_failed;
+	}
+
+	hfi_mem.qtbl.kva = icp_hw_mgr.hfi_mem.qtbl.kva;
+	hfi_mem.qtbl.iova = icp_hw_mgr.hfi_mem.qtbl.iova;
+	hfi_mem.qtbl.len = icp_hw_mgr.hfi_mem.qtbl.len;
+	ICP_DBG("kva = %llX\n", hfi_mem.qtbl.kva);
+	ICP_DBG("IOVA = %X\n", hfi_mem.qtbl.iova);
+	ICP_DBG("length = %lld\n", hfi_mem.qtbl.len);
+
+	hfi_mem.cmd_q.kva = icp_hw_mgr.hfi_mem.cmd_q.kva;
+	hfi_mem.cmd_q.iova = icp_hw_mgr.hfi_mem.cmd_q.iova;
+	hfi_mem.cmd_q.len = icp_hw_mgr.hfi_mem.cmd_q.len;
+	ICP_DBG("kva = %llX\n", hfi_mem.cmd_q.kva);
+	ICP_DBG("IOVA = %X\n", hfi_mem.cmd_q.iova);
+	ICP_DBG("length = %lld\n", hfi_mem.cmd_q.len);
+
+	hfi_mem.msg_q.kva = icp_hw_mgr.hfi_mem.msg_q.kva;
+	hfi_mem.msg_q.iova = icp_hw_mgr.hfi_mem.msg_q.iova;
+	hfi_mem.msg_q.len = icp_hw_mgr.hfi_mem.msg_q.len;
+	ICP_DBG("kva = %llX\n", hfi_mem.msg_q.kva);
+	ICP_DBG("IOVA = %X\n", hfi_mem.msg_q.iova);
+	ICP_DBG("length = %lld\n", hfi_mem.msg_q.len);
+
+	hfi_mem.dbg_q.kva = icp_hw_mgr.hfi_mem.dbg_q.kva;
+	hfi_mem.dbg_q.iova = icp_hw_mgr.hfi_mem.dbg_q.iova;
+	hfi_mem.dbg_q.len = icp_hw_mgr.hfi_mem.dbg_q.len;
+	ICP_DBG("kva = %llX\n", hfi_mem.dbg_q.kva);
+	ICP_DBG("IOVA = %X\n",  hfi_mem.dbg_q.iova);
+	ICP_DBG("length = %lld\n", hfi_mem.dbg_q.len);
+
+	hfi_mem.sec_heap.kva = icp_hw_mgr.hfi_mem.sec_heap.kva;
+	hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
+	hfi_mem.sec_heap.len = icp_hw_mgr.hfi_mem.sec_heap.len;
+
+	rc = cam_hfi_init(0, &hfi_mem,
+		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
+		false);
+	if (rc < 0) {
+		pr_err("hfi_init is failed\n");
+		goto set_irq_failed;
+	}
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	ICP_DBG("Sending HFI init command\n");
+	reinit_completion(&hw_mgr->a5_complete);
+
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_SEND_INIT,
+		NULL, 0);
+
+	ICP_DBG("Wait for INIT DONE Message\n");
+	wait_for_completion(&hw_mgr->a5_complete);
+
+	ICP_DBG("Done Waiting for INIT DONE Message\n");
+
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_POWER_COLLAPSE,
+		NULL, 0);
+
+	hw_mgr->fw_download = true;
+
+	rc = cam_icp_stop_cpas(hw_mgr);
+	if (rc) {
+		pr_err("cpas stop failed\n");
+		goto set_irq_failed;
+	}
+
+	hw_mgr->ctxt_cnt = 0;
+
+	return rc;
+
+set_irq_failed:
+	if (ipe1_dev_intf)
+		rc = ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
+			NULL, 0);
+ipe1_dev_init_failed:
+	rc = ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+ipe0_dev_init_failed:
+	rc = bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+bps_dev_init_failed:
+	rc = a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+a5_dev_init_failed:
+dev_intf_fail:
+	cam_icp_free_hfi_mem();
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_icp_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
+{
+	int rc = 0;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_config_args *config_args = config_hw_args;
+	uint32_t fw_handle;
+	int ctx_id = 0;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	int32_t request_id = 0;
+	struct cam_hw_update_entry *hw_update_entries;
+	struct crm_workq_task *task;
+	struct hfi_cmd_work_data *task_data;
+	struct hfi_cmd_ipebps_async *hfi_cmd;
+
+	if (!hw_mgr || !config_args) {
+		pr_err("Invalid arguments %pK %pK\n",
+			hw_mgr, config_args);
+		return -EINVAL;
+	}
+
+	if (!config_args->num_hw_update_entries) {
+		pr_err("No hw update enteries are available\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	fw_handle = *(uint32_t *)config_args->ctxt_to_hw_map;
+	ctx_id = cam_icp_mgr_get_ctx_from_fw_handle(hw_mgr, fw_handle);
+	if (ctx_id < 0) {
+		pr_err("Fw handle to ctx mapping is failed\n");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	ctx_data = &hw_mgr->ctx_data[ctx_id];
+	if (!ctx_data->in_use) {
+		pr_err("ctx is not in use\n");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	request_id = *(uint32_t *)config_args->priv;
+	hw_update_entries = config_args->hw_update_entries;
+	ICP_DBG("req_id = %d\n", request_id);
+	ICP_DBG("fw_handle = %x req_id = %d %pK\n",
+		fw_handle, request_id, config_args->priv);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task) {
+		pr_err("no empty task\n");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -ENOMEM;
+	}
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	if (!task_data) {
+		pr_err("task_data is NULL\n");
+		return -EINVAL;
+	}
+
+	task_data->data = (void *)hw_update_entries->addr;
+	hfi_cmd = (struct hfi_cmd_ipebps_async *)hw_update_entries->addr;
+	ICP_DBG("request from hfi_cmd :%llu, hfi_cmd: %pK\n",
+		hfi_cmd->user_data2, hfi_cmd);
+	task_data->request_id = request_id;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+			CRM_TASK_PRIORITY_0);
+	return rc;
+}
+
+static int cam_icp_mgr_prepare_frame_process_cmd(
+			struct cam_icp_hw_ctx_data *ctx_data,
+			struct hfi_cmd_ipebps_async *hfi_cmd,
+			uint32_t request_id,
+			uint32_t fw_cmd_buf_iova_addr)
+{
+	hfi_cmd->size = sizeof(struct hfi_cmd_ipebps_async);
+	hfi_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
+	if (ctx_data->icp_dev_acquire_info.dev_type == CAM_ICP_RES_TYPE_BPS)
+		hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS;
+	else
+		hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS;
+	hfi_cmd->num_fw_handles = 1;
+	hfi_cmd->fw_handles[0] = ctx_data->fw_handle;
+	hfi_cmd->payload.indirect = fw_cmd_buf_iova_addr;
+	hfi_cmd->user_data1 = (uint64_t)ctx_data;
+	hfi_cmd->user_data2 = request_id;
+
+	ICP_DBG("ctx_data : %pK, request_id :%d cmd_buf %x\n",
+		(void *)ctx_data->context_priv,
+		request_id, fw_cmd_buf_iova_addr);
+
+	return 0;
+}
+
+static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
+				void *prepare_hw_update_args)
+{
+	int        rc = 0, i, j;
+	int        ctx_id = 0;
+	uint32_t   fw_handle;
+	int32_t    idx;
+	uint64_t   iova_addr;
+	uint32_t   fw_cmd_buf_iova_addr;
+	size_t     fw_cmd_buf_len;
+	int32_t    sync_in_obj[CAM_ICP_IPE_IMAGE_MAX];
+	int32_t    merged_sync_in_obj;
+
+
+	struct cam_hw_prepare_update_args *prepare_args =
+		prepare_hw_update_args;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct cam_packet *packet = NULL;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+	struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
+
+	if ((!prepare_args) || (!hw_mgr)) {
+		pr_err("Invalid args\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	fw_handle = *(uint32_t *)prepare_args->ctxt_to_hw_map;
+	ctx_id = cam_icp_mgr_get_ctx_from_fw_handle(hw_mgr, fw_handle);
+	if (ctx_id < 0) {
+		pr_err("Fw handle to ctx mapping is failed\n");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	ctx_data = &hw_mgr->ctx_data[ctx_id];
+	if (!ctx_data->in_use) {
+		pr_err("ctx is not in use\n");
+		return -EINVAL;
+	}
+
+	packet = prepare_args->packet;
+	if (!packet) {
+		pr_err("received packet is NULL\n");
+		return -EINVAL;
+	}
+
+	ICP_DBG("packet header : opcode = %x size = %x",
+			packet->header.op_code,
+			packet->header.size);
+
+	ICP_DBG(" req_id = %x flags = %x\n",
+			(uint32_t)packet->header.request_id,
+			packet->header.flags);
+
+	ICP_DBG("packet data : c_off = %x c_num = %x\n",
+			packet->cmd_buf_offset,
+			packet->num_cmd_buf);
+
+	ICP_DBG("io_off = %x io_num = %x p_off = %x p_num = %x %x %x\n",
+			packet->io_configs_offset,
+			packet->num_io_configs, packet->patch_offset,
+			packet->num_patches, packet->kmd_cmd_buf_index,
+			packet->kmd_cmd_buf_offset);
+
+	if (((packet->header.op_code & 0xff) !=
+		CAM_ICP_OPCODE_IPE_UPDATE) &&
+		((packet->header.op_code & 0xff) !=
+		CAM_ICP_OPCODE_BPS_UPDATE)) {
+		pr_err("Invalid Opcode in pkt: %d\n",
+			packet->header.op_code & 0xff);
+		return -EINVAL;
+	}
+
+	if ((packet->num_cmd_buf > 1) || (!packet->num_patches) ||
+					(!packet->num_io_configs)) {
+		pr_err("wrong number of cmd/patch info: %u %u\n",
+				packet->num_cmd_buf,
+				packet->num_patches);
+		return -EINVAL;
+	}
+
+	/* process command buffer descriptors */
+	cmd_desc = (struct cam_cmd_buf_desc *)
+			((uint32_t *) &packet->payload +
+				packet->cmd_buf_offset/4);
+	ICP_DBG("packet = %pK cmd_desc = %pK size = %lu\n",
+			(void *)packet, (void *)cmd_desc,
+			sizeof(struct cam_cmd_buf_desc));
+
+	rc = cam_mem_get_io_buf(cmd_desc->mem_handle,
+		hw_mgr->iommu_hdl, &iova_addr, &fw_cmd_buf_len);
+	if (rc < 0) {
+		pr_err("unable to get src buf info for cmd buf: %x\n",
+						hw_mgr->iommu_hdl);
+		return rc;
+	}
+	ICP_DBG("cmd_buf desc cpu and iova address: %pK %zu\n",
+				(void *)iova_addr, fw_cmd_buf_len);
+	fw_cmd_buf_iova_addr = iova_addr;
+	fw_cmd_buf_iova_addr = (fw_cmd_buf_iova_addr + cmd_desc->offset);
+
+	/* Update Buffer Address from handles and patch information */
+	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
+	if (rc) {
+		pr_err("Patch processing failed\n");
+		return rc;
+	}
+
+	/* process io config out descriptors */
+	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
+				packet->io_configs_offset/4);
+	ICP_DBG("packet = %pK io_cfg_ptr = %pK size = %lu\n",
+			(void *)packet, (void *)io_cfg_ptr,
+			sizeof(struct cam_buf_io_cfg));
+
+	prepare_args->num_out_map_entries = 0;
+	for (i = 0, j = 0; i < packet->num_io_configs; i++) {
+		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
+			ICP_DBG("direction is i : %d :%u\n",
+					i, io_cfg_ptr[i].direction);
+			ICP_DBG("fence is i : %d :%d\n",
+					i, io_cfg_ptr[i].fence);
+			continue;
+		}
+
+		prepare_args->out_map_entries[j].sync_id = io_cfg_ptr[i].fence;
+		prepare_args->out_map_entries[j++].resource_handle =
+							io_cfg_ptr[i].fence;
+		prepare_args->num_out_map_entries++;
+		ICP_DBG(" out fence = %x index = %d\n", io_cfg_ptr[i].fence, i);
+	}
+	ICP_DBG("out buf entries processing is done\n");
+
+	/* process io config in descriptors */
+	for (i = 0, j = 0; i < packet->num_io_configs; i++) {
+		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
+			sync_in_obj[j++] = io_cfg_ptr[i].fence;
+			ICP_DBG(" in fence = %x index = %d\n",
+					io_cfg_ptr[i].fence, i);
+		}
+	}
+
+	if (j == 1)
+		merged_sync_in_obj = sync_in_obj[j - 1];
+	else if (j > 1) {
+		rc = cam_sync_merge(&sync_in_obj[0], j, &merged_sync_in_obj);
+		if (rc < 0) {
+			pr_err("unable to create in merged object: %d\n",
+								rc);
+			return rc;
+		}
+	} else {
+		pr_err("no input fence provided %u\n", j);
+		return -EINVAL;
+	}
+
+	prepare_args->in_map_entries[0].sync_id = merged_sync_in_obj;
+	prepare_args->in_map_entries[0].resource_handle =
+			ctx_data->icp_dev_acquire_info.dev_type;
+	prepare_args->num_in_map_entries = 1;
+	ICP_DBG("out buf entries processing is done\n");
+
+	mutex_lock(&ctx_data->hfi_frame_process.lock);
+	idx = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap,
+			ctx_data->hfi_frame_process.bits);
+	if (idx < 0 || idx >= CAM_FRAME_CMD_MAX) {
+		pr_err("request idx is wrong: %d\n", idx);
+		mutex_unlock(&ctx_data->hfi_frame_process.lock);
+		return -EINVAL;
+	}
+	set_bit(idx, ctx_data->hfi_frame_process.bitmap);
+	mutex_unlock(&ctx_data->hfi_frame_process.lock);
+
+	ctx_data->hfi_frame_process.request_id[idx] = packet->header.request_id;
+	ICP_DBG("slot[%d]: %d\n", idx,
+		ctx_data->hfi_frame_process.request_id[idx]);
+	ctx_data->hfi_frame_process.num_out_resources[idx] =
+				prepare_args->num_out_map_entries;
+	for (i = 0; i < prepare_args->num_out_map_entries; i++)
+		ctx_data->hfi_frame_process.out_resource[idx][i] =
+			prepare_args->out_map_entries[i].resource_handle;
+
+	hfi_cmd = (struct hfi_cmd_ipebps_async *)
+			&ctx_data->hfi_frame_process.hfi_frame_cmd[idx];
+
+	cam_icp_mgr_prepare_frame_process_cmd(
+			ctx_data, hfi_cmd, packet->header.request_id,
+			fw_cmd_buf_iova_addr);
+
+	prepare_args->num_hw_update_entries = 1;
+	prepare_args->hw_update_entries[0].addr = (uint64_t)hfi_cmd;
+
+	prepare_args->priv = &ctx_data->hfi_frame_process.request_id[idx];
+
+	ICP_DBG("slot : %d, hfi_cmd : %pK, request : %d\n",	idx,
+		(void *)hfi_cmd,
+		ctx_data->hfi_frame_process.request_id[idx]);
+
+	return rc;
+}
+
+static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
+{
+	int rc = 0;
+	int ctx_id = 0;
+	int i;
+	uint32_t fw_handle;
+	struct cam_hw_release_args *release_hw = release_hw_args;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+
+	if (!release_hw || !hw_mgr) {
+		pr_err("Invalid args\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+		ctx_data = &hw_mgr->ctx_data[i];
+		ICP_DBG("i = %d in_use = %u fw_handle = %u\n", i,
+				ctx_data->in_use, ctx_data->fw_handle);
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	fw_handle = *(uint32_t *)release_hw->ctxt_to_hw_map;
+	ctx_id = cam_icp_mgr_get_ctx_from_fw_handle(hw_mgr, fw_handle);
+	if (ctx_id < 0) {
+		pr_err("Invalid ctx id\n");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	rc = cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
+	if (rc) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	--hw_mgr->ctxt_cnt;
+	if (!hw_mgr->ctxt_cnt) {
+		ICP_DBG("stop cpas for last context\n");
+		cam_icp_stop_cpas(hw_mgr);
+	}
+	ICP_DBG("context count : %u\n", hw_mgr->ctxt_cnt);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	ICP_DBG("fw handle %d\n", fw_handle);
+	return rc;
+}
+
+static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
+			struct crm_workq_task *task, uint32_t io_buf_addr)
+{
+	int rc = 0;
+	struct hfi_cmd_work_data *task_data;
+	struct hfi_cmd_ipebps_async ioconfig_cmd;
+
+	ioconfig_cmd.size = sizeof(struct hfi_cmd_ipebps_async);
+	ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
+	if (ctx_data->icp_dev_acquire_info.dev_type == CAM_ICP_RES_TYPE_BPS)
+		ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO;
+	else
+		ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
+
+	reinit_completion(&ctx_data->wait_complete);
+	ICP_DBG("Sending HFI_CMD_IPEBPS_ASYNC_COMMAND: opcode :%u\n",
+						ioconfig_cmd.opcode);
+	ioconfig_cmd.num_fw_handles = 1;
+	ioconfig_cmd.fw_handles[0] = ctx_data->fw_handle;
+	ioconfig_cmd.payload.indirect = io_buf_addr;
+	ioconfig_cmd.user_data1 = (uint64_t)ctx_data;
+	ioconfig_cmd.user_data2 = (uint64_t)0x0;
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)&ioconfig_cmd;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+	ICP_DBG("fw_hdl = %x ctx_data = %pK\n", ctx_data->fw_handle, ctx_data);
+	wait_for_completion(&ctx_data->wait_complete);
+
+	return rc;
+}
+
+static int cam_icp_mgr_create_handle(uint32_t dev_type,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct crm_workq_task *task)
+{
+	struct hfi_cmd_create_handle create_handle;
+	struct hfi_cmd_work_data *task_data;
+	int rc = 0;
+
+	create_handle.size = sizeof(struct hfi_cmd_create_handle);
+	create_handle.pkt_type = HFI_CMD_IPEBPS_CREATE_HANDLE;
+	create_handle.handle_type = dev_type;
+	create_handle.user_data1 = (uint64_t)ctx_data;
+	ICP_DBG("%x %x %x %pK\n", create_handle.size,	create_handle.pkt_type,
+		create_handle.handle_type, (void *)create_handle.user_data1);
+	ICP_DBG("Sending HFI_CMD_IPEBPS_CREATE_HANDLE\n");
+
+	reinit_completion(&ctx_data->wait_complete);
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)&create_handle;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+	wait_for_completion(&ctx_data->wait_complete);
+
+	return rc;
+}
+
+static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data,
+	struct crm_workq_task *task)
+{
+	struct hfi_cmd_ping_pkt ping_pkt;
+	struct hfi_cmd_work_data *task_data;
+	int rc = 0;
+
+	ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
+	ping_pkt.pkt_type = HFI_CMD_SYS_PING;
+	ping_pkt.user_data = (uint64_t)ctx_data;
+	ICP_DBG("Sending HFI_CMD_SYS_PING\n");
+	ICP_DBG("%x %x %pK\n", ping_pkt.size,	ping_pkt.pkt_type,
+		(void *)ping_pkt.user_data);
+
+	init_completion(&ctx_data->wait_complete);
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)&ping_pkt;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+	wait_for_completion(&ctx_data->wait_complete);
+
+	return rc;
+}
+
+static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
+{
+	int rc = 0, i, bitmap_size = 0, tmp_size;
+	uint32_t ctx_id = 0;
+	uint64_t io_buf_addr;
+	size_t io_buf_size;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct cam_hw_acquire_args *args = acquire_hw_args;
+	struct cam_icp_acquire_dev_info icp_dev_acquire_info;
+	struct cam_icp_res_info *p_icp_out = NULL;
+	struct crm_workq_task *task;
+	uint8_t *tmp_acquire;
+
+	if ((!hw_mgr_priv) || (!acquire_hw_args)) {
+		pr_err("Invalid params: %pK %pK\n", hw_mgr_priv,
+			acquire_hw_args);
+		return -EINVAL;
+	}
+
+	if (args->num_acq > 1) {
+		pr_err("number of resources are wrong: %u\n", args->num_acq);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&icp_dev_acquire_info,
+			(void __user *)args->acquire_info,
+			sizeof(icp_dev_acquire_info)))
+		return -EFAULT;
+
+	if (icp_dev_acquire_info.num_out_res > ICP_IPE_MAX_OUTPUT_SUPPORTED) {
+		pr_err("num of out resources exceeding : %u\n",
+			icp_dev_acquire_info.num_out_res);
+		return -EINVAL;
+	}
+
+	ICP_DBG("%x %x %x %x %x %x %x\n",
+		icp_dev_acquire_info.dev_type,
+		icp_dev_acquire_info.in_res.format,
+		icp_dev_acquire_info.in_res.width,
+		icp_dev_acquire_info.in_res.height,
+		icp_dev_acquire_info.in_res.fps,
+		icp_dev_acquire_info.num_out_res,
+		icp_dev_acquire_info.scratch_mem_size);
+
+	tmp_size = sizeof(icp_dev_acquire_info) +
+			icp_dev_acquire_info.num_out_res *
+			sizeof(struct cam_icp_res_info);
+
+	tmp_acquire = kzalloc(tmp_size, GFP_KERNEL);
+	if (!tmp_acquire)
+		return -EINVAL;
+
+	if (copy_from_user(tmp_acquire,
+			(void __user *)args->acquire_info,
+			tmp_size)) {
+		kfree(tmp_acquire);
+		return -EFAULT;
+	}
+
+	p_icp_out =
+		(struct cam_icp_res_info *)(tmp_acquire +
+		sizeof(icp_dev_acquire_info)-
+		sizeof(struct cam_icp_res_info));
+	ICP_DBG("out[0] %x %x %x %x\n",
+		p_icp_out[0].format,
+		p_icp_out[0].width,
+		p_icp_out[0].height,
+		p_icp_out[0].fps);
+
+	ICP_DBG("out[1] %x %x %x %x\n",
+		p_icp_out[1].format,
+		p_icp_out[1].width,
+		p_icp_out[1].height,
+		p_icp_out[1].fps);
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	ctx_id = cam_icp_mgr_get_free_ctx(hw_mgr);
+	if (ctx_id >= CAM_ICP_CTX_MAX) {
+		pr_err("No free ctx space in hw_mgr\n");
+		kfree(tmp_acquire);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EFAULT;
+	}
+
+	/* Fill ctx with acquire info */
+	ctx_data = &hw_mgr->ctx_data[ctx_id];
+
+	if (!hw_mgr->ctxt_cnt++) {
+		ICP_DBG("starting cpas\n");
+		cam_icp_start_cpas(hw_mgr);
+	}
+	ICP_DBG("context count : %u\n", hw_mgr->ctxt_cnt);
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	/* Fill ctx with acquire info */
+	mutex_lock(&ctx_data->ctx_mutex);
+	ctx_data->icp_dev_acquire_info = icp_dev_acquire_info;
+	for (i = 0; i < icp_dev_acquire_info.num_out_res; i++)
+		ctx_data->icp_out_acquire_info[i] = p_icp_out[i];
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	/* Get IOCONFIG command info */
+	if (ctx_data->icp_dev_acquire_info.secure_mode)
+		rc = cam_mem_get_io_buf(
+			icp_dev_acquire_info.io_config_cmd_handle,
+			hw_mgr->iommu_sec_hdl,
+			&io_buf_addr, &io_buf_size);
+	else
+		rc = cam_mem_get_io_buf(
+			icp_dev_acquire_info.io_config_cmd_handle,
+			hw_mgr->iommu_hdl,
+			&io_buf_addr, &io_buf_size);
+
+	ICP_DBG("io_config_cmd_handle : %d\n",
+		icp_dev_acquire_info.io_config_cmd_handle);
+	ICP_DBG("io_buf_addr : %pK\n", (void *)io_buf_addr);
+	ICP_DBG("io_buf_size : %zu\n", io_buf_size);
+	if (rc < 0) {
+		pr_err("unable to get src buf info from io desc\n");
+		goto cmd_cpu_buf_failed;
+	}
+
+	mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task) {
+		pr_err("no free task\n");
+		mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
+		goto get_create_task_failed;
+	}
+	mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
+
+	rc = cam_icp_mgr_send_ping(ctx_data, task);
+	if (rc) {
+		pr_err("ping ack not received\n");
+		goto create_handle_failed;
+	}
+	mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task) {
+		pr_err("no free task\n");
+		mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
+		goto get_create_task_failed;
+	}
+	mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
+
+	/* Send create fw handle command */
+	rc = cam_icp_mgr_create_handle(icp_dev_acquire_info.dev_type,
+			ctx_data, task);
+	if (rc) {
+		pr_err("create handle failed\n");
+		goto create_handle_failed;
+	}
+
+	/* Send IOCONFIG command */
+	mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task) {
+		pr_err("no empty task\n");
+		mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
+		goto get_ioconfig_task_failed;
+	}
+	mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
+
+	rc = cam_icp_mgr_send_config_io(ctx_data, task, io_buf_addr);
+	if (rc) {
+		pr_err("IO Config command failed\n");
+		goto ioconfig_failed;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	ctx_data->context_priv = args->context_data;
+	args->ctxt_to_hw_map = &ctx_data->fw_handle;
+
+	bitmap_size = BITS_TO_LONGS(CAM_FRAME_CMD_MAX) * sizeof(long);
+	ctx_data->hfi_frame_process.bitmap =
+			kzalloc(sizeof(bitmap_size), GFP_KERNEL);
+	ctx_data->hfi_frame_process.bits = bitmap_size * BITS_PER_BYTE;
+	mutex_init(&ctx_data->hfi_frame_process.lock);
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
+
+	icp_dev_acquire_info.scratch_mem_size = ctx_data->scratch_mem_size;
+	if (copy_to_user((void __user *)args->acquire_info,
+				&icp_dev_acquire_info,
+			sizeof(icp_dev_acquire_info)))
+		goto copy_to_user_failed;
+
+	ICP_DBG("scratch mem size = %x fw_handle = %x\n",
+			(unsigned int)icp_dev_acquire_info.scratch_mem_size,
+			(unsigned int)ctx_data->fw_handle);
+	kfree(tmp_acquire);
+	return 0;
+
+copy_to_user_failed:
+ioconfig_failed:
+get_ioconfig_task_failed:
+	mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
+	if (task)
+		cam_icp_mgr_destroy_handle(ctx_data, task);
+create_handle_failed:
+get_create_task_failed:
+cmd_cpu_buf_failed:
+	--hw_mgr->ctxt_cnt;
+	if (!hw_mgr->ctxt_cnt)
+		cam_icp_stop_cpas(hw_mgr);
+	cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
+	kfree(tmp_acquire);
+	return rc;
+}
+
+static int cam_icp_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args)
+{
+	int rc = 0;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd *query_cap = hw_caps_args;
+
+	if ((!hw_mgr_priv) || (!hw_caps_args)) {
+		pr_err("Invalid params: %pK %pK\n", hw_mgr_priv, hw_caps_args);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&icp_hw_mgr.icp_caps,
+			(void __user *)query_cap->caps_handle,
+			sizeof(struct cam_icp_query_cap_cmd))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	rc = hfi_get_hw_caps(&icp_hw_mgr.icp_caps);
+	if (rc < 0) {
+		pr_err("Unable to get caps from HFI: %d\n", rc);
+		goto hfi_get_caps_fail;
+	}
+
+	icp_hw_mgr.icp_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl;
+	icp_hw_mgr.icp_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl;
+
+	if (copy_to_user((void __user *)query_cap->caps_handle,
+			&icp_hw_mgr.icp_caps,
+			sizeof(struct cam_icp_query_cap_cmd))) {
+		pr_err("copy_to_user failed\n");
+		rc = -EFAULT;
+		goto hfi_get_caps_fail;
+	}
+
+hfi_get_caps_fail:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
+{
+	int count, i, rc = 0;
+	uint32_t num_dev;
+	uint32_t num_ipe_dev;
+	const char *name = NULL;
+	struct device_node *child_node = NULL;
+	struct platform_device *child_pdev = NULL;
+	struct cam_hw_intf *child_dev_intf = NULL;
+	struct cam_hw_mgr_intf *hw_mgr_intf;
+
+
+	hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
+	if (!of_node || !hw_mgr_intf) {
+		pr_err("Invalid args of_node %pK hw_mgr %pK\n",
+			of_node, hw_mgr_intf);
+		return -EINVAL;
+	}
+
+	hw_mgr_intf->hw_mgr_priv = &icp_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_icp_mgr_get_hw_caps;
+	hw_mgr_intf->hw_acquire = cam_icp_mgr_acquire_hw;
+	hw_mgr_intf->hw_release = cam_icp_mgr_release_hw;
+	hw_mgr_intf->hw_prepare_update = cam_icp_mgr_prepare_hw_update;
+	hw_mgr_intf->hw_config = cam_icp_mgr_config_hw;
+	hw_mgr_intf->download_fw = cam_icp_mgr_download_fw;
+	hw_mgr_intf->hw_close = cam_icp_mgr_hw_close;
+
+	mutex_init(&icp_hw_mgr.hw_mgr_mutex);
+	spin_lock_init(&icp_hw_mgr.hw_mgr_lock);
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++)
+		mutex_init(&icp_hw_mgr.ctx_data[i].ctx_mutex);
+
+	/* Get number of device objects */
+	count = of_property_count_strings(of_node, "compat-hw-name");
+	if (!count) {
+		pr_err("no compat hw found in dev tree, count = %d\n", count);
+		rc = -EINVAL;
+		goto num_dev_failed;
+	}
+
+	/* Get number of a5 device nodes and a5 mem allocation */
+	rc = of_property_read_u32(of_node, "num-a5", &num_dev);
+	if (rc < 0) {
+		pr_err("getting num of a5 failed\n");
+		goto num_dev_failed;
+	}
+
+	icp_hw_mgr.devices[CAM_ICP_DEV_A5] = kzalloc(
+		sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
+	if (!icp_hw_mgr.devices[CAM_ICP_DEV_A5]) {
+		rc = -ENOMEM;
+		goto num_dev_failed;
+	}
+
+	/* Get number of ipe device nodes and ipe mem allocation */
+	rc = of_property_read_u32(of_node, "num-ipe", &num_ipe_dev);
+	if (rc < 0) {
+		pr_err("getting number of ipe dev nodes failed\n");
+		goto num_ipe_failed;
+	}
+
+	icp_hw_mgr.devices[CAM_ICP_DEV_IPE] = kzalloc(
+		sizeof(struct cam_hw_intf *) * num_ipe_dev, GFP_KERNEL);
+	if (!icp_hw_mgr.devices[CAM_ICP_DEV_IPE]) {
+		rc = -ENOMEM;
+		goto num_ipe_failed;
+	}
+
+	/* Get number of bps device nodes and bps mem allocation */
+	rc = of_property_read_u32(of_node, "num-bps", &num_dev);
+	if (rc < 0) {
+		pr_err("read num bps devices failed\n");
+		goto num_bps_failed;
+	}
+	icp_hw_mgr.devices[CAM_ICP_DEV_BPS] = kzalloc(
+		sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
+	if (!icp_hw_mgr.devices[CAM_ICP_DEV_BPS]) {
+		rc = -ENOMEM;
+		goto num_bps_failed;
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "compat-hw-name",
+								i, &name);
+		if (rc < 0) {
+			pr_err("getting dev object name failed\n");
+			goto compat_hw_name_failed;
+		}
+
+		child_node = of_find_node_by_name(NULL, name);
+		if (!child_node) {
+			pr_err("error! Cannot find node in dtsi %s\n", name);
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+
+		child_pdev = of_find_device_by_node(child_node);
+		if (!child_pdev) {
+			pr_err("failed to find device on bus %s\n",
+				child_node->name);
+			rc = -ENODEV;
+			of_node_put(child_node);
+			goto compat_hw_name_failed;
+		}
+
+		child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
+								child_pdev);
+		if (!child_dev_intf) {
+			pr_err("no child device\n");
+			of_node_put(child_node);
+			goto compat_hw_name_failed;
+		}
+		ICP_DBG("child_intf %pK\n", child_dev_intf);
+		ICP_DBG("child type %d index %d\n",	child_dev_intf->hw_type,
+				child_dev_intf->hw_idx);
+
+		icp_hw_mgr.devices[child_dev_intf->hw_type]
+			[child_dev_intf->hw_idx] = child_dev_intf;
+
+		of_node_put(child_node);
+	}
+
+	rc = cam_smmu_get_handle("icp", &icp_hw_mgr.iommu_hdl);
+	if (rc < 0) {
+		pr_err("icp get iommu handle failed\n");
+		goto compat_hw_name_failed;
+	}
+
+	pr_err("mmu handle :%d\n", icp_hw_mgr.iommu_hdl);
+	rc = cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_ATTACH);
+	if (rc < 0) {
+		pr_err("icp attach failed: %d\n", rc);
+		goto icp_attach_failed;
+	}
+
+	rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK,
+					&icp_hw_mgr.cmd_work);
+	if (rc < 0) {
+		pr_err("unable to create a worker\n");
+		goto cmd_work_failed;
+	}
+
+	rc = cam_req_mgr_workq_create("icp_message_queue", ICP_WORKQ_NUM_TASK,
+					&icp_hw_mgr.msg_work);
+	if (rc < 0) {
+		pr_err("unable to create a worker\n");
+		goto msg_work_failed;
+	}
+
+	icp_hw_mgr.cmd_work_data = (struct hfi_cmd_work_data *)
+		kzalloc(sizeof(struct hfi_cmd_work_data) * ICP_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!icp_hw_mgr.cmd_work_data)
+		goto cmd_work_data_failed;
+
+	icp_hw_mgr.msg_work_data = (struct hfi_msg_work_data *)
+		kzalloc(sizeof(struct hfi_msg_work_data) * ICP_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!icp_hw_mgr.msg_work_data)
+		goto msg_work_data_failed;
+
+
+	for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
+		icp_hw_mgr.msg_work->task.pool[i].payload =
+				&icp_hw_mgr.msg_work_data[i];
+
+	for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
+		icp_hw_mgr.cmd_work->task.pool[i].payload =
+				&icp_hw_mgr.cmd_work_data[i];
+
+	init_completion(&icp_hw_mgr.a5_complete);
+
+	pr_err("Exit\n");
+	return rc;
+
+msg_work_data_failed:
+	kfree(icp_hw_mgr.cmd_work_data);
+cmd_work_data_failed:
+	cam_req_mgr_workq_destroy(&icp_hw_mgr.msg_work);
+msg_work_failed:
+	cam_req_mgr_workq_destroy(&icp_hw_mgr.cmd_work);
+cmd_work_failed:
+	cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
+icp_attach_failed:
+	icp_hw_mgr.iommu_hdl = 0;
+compat_hw_name_failed:
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_BPS]);
+num_bps_failed:
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_IPE]);
+num_ipe_failed:
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_A5]);
+num_dev_failed:
+	mutex_destroy(&icp_hw_mgr.hw_mgr_mutex);
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++)
+		mutex_destroy(&icp_hw_mgr.ctx_data[i].ctx_mutex);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
new file mode 100644
index 0000000..e5ffa7a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -0,0 +1,181 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_ICP_HW_MGR_H
+#define CAM_ICP_HW_MGR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_icp.h>
+#include "cam_icp_hw_intf.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_a5_hw_intf.h"
+#include "hfi_session_defs.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+
+#define CAM_ICP_ROLE_PARENT     1
+#define CAM_ICP_ROLE_CHILD      2
+
+#define CAM_FRAME_CMD_MAX       20
+
+#define CAM_MAX_OUT_RES         6
+
+#define ICP_WORKQ_NUM_TASK      30
+#define ICP_WORKQ_TASK_CMD_TYPE 1
+#define ICP_WORKQ_TASK_MSG_TYPE 2
+
+#define ICP_PACKET_SIZE         0
+#define ICP_PACKET_TYPE         1
+#define ICP_PACKET_IPCODE       2
+#define ICP_IPE_MAX_OUTPUT_SUPPORTED 6
+
+/**
+ * struct icp_hfi_mem_info
+ * @qtbl: Memory info of queue table
+ * @cmd_q: Memory info of command queue
+ * @msg_q: Memory info of message queue
+ * @dbg_q: Memory info of debug queue
+ * @sec_heap: Memory info of secondary heap
+ * @fw_buf: Memory info of firmware
+ */
+struct icp_hfi_mem_info {
+	struct cam_mem_mgr_memory_desc qtbl;
+	struct cam_mem_mgr_memory_desc cmd_q;
+	struct cam_mem_mgr_memory_desc msg_q;
+	struct cam_mem_mgr_memory_desc dbg_q;
+	struct cam_mem_mgr_memory_desc sec_heap;
+	struct cam_mem_mgr_memory_desc fw_buf;
+};
+
+/**
+ * struct hfi_cmd_work_data
+ * @type: Task type
+ * @data: Pointer to command data
+ * @request_id: Request id
+ */
+struct hfi_cmd_work_data {
+	uint32_t type;
+	void *data;
+	int32_t request_id;
+};
+
+/**
+ * struct hfi_msg_work_data
+ * @type: Task type
+ * @data: Pointer to message data
+ * @irq_status: IRQ status
+ */
+struct hfi_msg_work_data {
+	uint32_t type;
+	void *data;
+	uint32_t irq_status;
+};
+
+/**
+ * struct hfi_frame_process_info
+ * @hfi_frame_cmd: Frame process command info
+ * @bitmap: Bitmap for hfi_frame_cmd
+ * @bits: Used in hfi_frame_cmd bitmap
+ * @lock: Lock for hfi_frame_cmd
+ * @request_id: Request id list
+ * @num_out_resources: Number of out syncs
+ * @out_resource: Out sync info
+ */
+struct hfi_frame_process_info {
+	struct hfi_cmd_ipebps_async hfi_frame_cmd[CAM_FRAME_CMD_MAX];
+	void *bitmap;
+	size_t bits;
+	struct mutex lock;
+	int32_t request_id[CAM_FRAME_CMD_MAX];
+	uint32_t num_out_resources[CAM_FRAME_CMD_MAX];
+	uint32_t out_resource[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES];
+};
+
+/**
+ * struct cam_icp_hw_ctx_data
+ * @context_priv: Context private data
+ * @ctx_mutex: Mutex for context
+ * @fw_handle: Firmware handle
+ * @scratch_mem_size: Scratch memory size
+ * @acquire_dev_cmd: Acquire command
+ * @icp_dev_acquire_info: Acquire device info
+ * @icp_out_acquire_info: Acquire out resource info
+ * @ctxt_event_cb: Context callback function
+ * @in_use: Flag for context usage
+ * @role: Role of a context in case of chaining
+ * @chain_ctx: Peer context
+ * @hfi_frame_process: Frame process command
+ * @wait_complete: Completion info
+ * @temp_payload: Payload for destroy handle data
+ */
+struct cam_icp_hw_ctx_data {
+	void *context_priv;
+	struct mutex ctx_mutex;
+	uint32_t fw_handle;
+	uint32_t scratch_mem_size;
+	struct cam_acquire_dev_cmd acquire_dev_cmd;
+	struct cam_icp_acquire_dev_info icp_dev_acquire_info;
+	struct cam_icp_res_info icp_out_acquire_info[CAM_MAX_OUT_RES];
+	cam_hw_event_cb_func ctxt_event_cb;
+	uint32_t in_use;
+	uint32_t role;
+	struct cam_icp_hw_ctx_data *chain_ctx;
+	struct hfi_frame_process_info hfi_frame_process;
+	struct completion wait_complete;
+	struct ipe_bps_destroy temp_payload;
+};
+
+/**
+ * struct cam_icp_hw_mgr
+ * @hw_mgr_mutex: Mutex for ICP hardware manager
+ * @hw_mgr_lock: Spinlock for ICP hardware manager
+ * @devices: Devices of ICP hardware manager
+ * @ctx_data: Context data
+ * @icp_caps: ICP capabilities
+ * @fw_download: Firmware download state
+ * @iommu_hdl: Non secure IOMMU handle
+ * @iommu_sec_hdl: Secure IOMMU handle
+ * @hfi_mem: Memory for hfi
+ * @cmd_work: Work queue for hfi commands
+ * @msg_work: Work queue for hfi messages
+ * @msg_buf: Buffer for message data from firmware
+ * @dbg_buf: Buffer for debug data from firmware
+ * @a5_complete: Completion info
+ * @cmd_work_data: Pointer to command work queue task
+ * @msg_work_data: Pointer to message work queue task
+ * @ctxt_cnt: Active context count
+ */
+struct cam_icp_hw_mgr {
+	struct mutex hw_mgr_mutex;
+	spinlock_t hw_mgr_lock;
+
+	struct cam_hw_intf **devices[CAM_ICP_DEV_MAX];
+	struct cam_icp_hw_ctx_data ctx_data[CAM_ICP_CTX_MAX];
+	struct cam_icp_query_cap_cmd icp_caps;
+
+	bool fw_download;
+	int32_t iommu_hdl;
+	int32_t iommu_sec_hdl;
+	struct icp_hfi_mem_info hfi_mem;
+	struct cam_req_mgr_core_workq *cmd_work;
+	struct cam_req_mgr_core_workq *msg_work;
+	uint32_t msg_buf[256];
+	uint32_t dbg_buf[256];
+	struct completion a5_complete;
+	struct hfi_cmd_work_data *cmd_work_data;
+	struct hfi_msg_work_data *msg_work_data;
+	uint32_t ctxt_cnt;
+};
+
+#endif /* CAM_ICP_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
new file mode 100644
index 0000000..2686877
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_A5_HW_INTF_H
+#define CAM_A5_HW_INTF_H
+
+#include <linux/timer.h>
+#include <uapi/media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_intf.h"
+
+enum cam_icp_a5_cmd_type {
+	CAM_ICP_A5_CMD_FW_DOWNLOAD,
+	CAM_ICP_A5_CMD_POWER_COLLAPSE,
+	CAM_ICP_A5_CMD_POWER_RESUME,
+	CAM_ICP_A5_CMD_SET_FW_BUF,
+	CAM_ICP_A5_CMD_ACQUIRE,
+	CAM_ICP_A5_SET_IRQ_CB,
+	CAM_ICP_A5_TEST_IRQ,
+	CAM_ICP_A5_SEND_INIT,
+	CAM_ICP_A5_CMD_VOTE_CPAS,
+	CAM_ICP_A5_CMD_CPAS_START,
+	CAM_ICP_A5_CMD_CPAS_STOP,
+	CAM_ICP_A5_CMD_MAX,
+};
+
+struct cam_icp_a5_set_fw_buf_info {
+	uint32_t iova;
+	uint64_t kva;
+	uint64_t len;
+};
+
+/**
+ * struct cam_icp_a5_query_cap - ICP query device capability payload
+ * @fw_version: firmware version info
+ * @api_version: api version info
+ * @num_ipe: number of ipes
+ * @num_bps: number of bps
+ * @num_dev: number of device capabilities in dev_caps
+ * @reserved: reserved
+ * @dev_ver: returned device capability array
+ * @CAM_QUERY_CAP IOCTL
+ */
+struct cam_icp_a5_query_cap {
+	struct cam_icp_ver fw_version;
+	struct cam_icp_ver api_version;
+	uint32_t num_ipe;
+	uint32_t num_bps;
+	uint32_t num_dev;
+	uint32_t reserved;
+	struct cam_icp_dev_ver dev_ver[CAM_ICP_DEV_TYPE_MAX];
+};
+
+struct cam_icp_a5_acquire_dev {
+	uint32_t ctx_id;
+	struct cam_icp_acquire_dev_info icp_acquire_info;
+	struct cam_icp_res_info icp_out_acquire_info[2];
+	uint32_t fw_handle;
+};
+
+struct cam_icp_a5_set_irq_cb {
+	int32_t (*icp_hw_mgr_cb)(uint32_t irq_status, void *data);
+	void *data;
+};
+
+struct cam_icp_a5_test_irq {
+	uint32_t test_irq;
+};
+#endif /* CAM_A5_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
new file mode 100644
index 0000000..4427a30
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_BPS_HW_INTF_H
+#define CAM_BPS_HW_INTF_H
+
+#include <uapi/media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_intf.h"
+
+enum cam_icp_bps_cmd_type {
+	CAM_ICP_BPS_CMD_FW_DOWNLOAD,
+	CAM_ICP_BPS_CMD_POWER_COLLAPSE,
+	CAM_ICP_BPS_CMD_POWER_RESUME,
+	CAM_ICP_BPS_CMD_SET_FW_BUF,
+	CAM_ICP_BPS_CMD_VOTE_CPAS,
+	CAM_ICP_BPS_CMD_CPAS_START,
+	CAM_ICP_BPS_CMD_CPAS_STOP,
+	CAM_ICP_BPS_CMD_MAX,
+};
+
+#endif /* CAM_BPS_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
new file mode 100644
index 0000000..9300ea8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_ICP_HW_INTF_H
+#define CAM_ICP_HW_INTF_H
+
+#define CAM_ICP_CTX_MAX                 8
+
+#define CAM_ICP_CMD_BUF_MAX_SIZE     128
+#define CAM_ICP_MSG_BUF_MAX_SIZE     CAM_ICP_CMD_BUF_MAX_SIZE
+
+enum cam_a5_hw_type {
+	CAM_ICP_DEV_A5,
+	CAM_ICP_DEV_IPE,
+	CAM_ICP_DEV_BPS,
+	CAM_ICP_DEV_MAX,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
new file mode 100644
index 0000000..0db66c0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_IPE_HW_INTF_H
+#define CAM_IPE_HW_INTF_H
+
+#include <uapi/media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_intf.h"
+
+enum cam_icp_ipe_cmd_type {
+	CAM_ICP_IPE_CMD_FW_DOWNLOAD,
+	CAM_ICP_IPE_CMD_POWER_COLLAPSE,
+	CAM_ICP_IPE_CMD_POWER_RESUME,
+	CAM_ICP_IPE_CMD_SET_FW_BUF,
+	CAM_ICP_IPE_CMD_VOTE_CPAS,
+	CAM_ICP_IPE_CMD_CPAS_START,
+	CAM_ICP_IPE_CMD_CPAS_STOP,
+	CAM_ICP_IPE_CMD_MAX,
+};
+
+#endif /* CAM_IPE_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/icp/icp_hw/include/cam_icp_hw_mgr_intf.h
new file mode 100644
index 0000000..2f100ca
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/include/cam_icp_hw_mgr_intf.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_ICP_HW_MGR_INTF_H
+#define CAM_ICP_HW_MGR_INTF_H
+
+#include <uapi/media/cam_icp.h>
+#include <uapi/media/cam_defs.h>
+#include <linux/of.h>
+#include "cam_cpas_api.h"
+
+#define ICP_TURBO_VOTE           640000000
+
+int cam_icp_hw_mgr_init(struct device_node *of_node,
+	uint64_t *hw_mgr_hdl);
+
+/**
+ * struct cam_icp_cpas_vote
+ * @ahb_vote: AHB vote info
+ * @axi_vote: AXI vote info
+ * @ahb_vote_valid: Flag for ahb vote data
+ * @axi_vote_valid: flag for axi vote data
+ */
+struct cam_icp_cpas_vote {
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	uint32_t ahb_vote_valid;
+	uint32_t axi_vote_valid;
+};
+
+#endif /* CAM_ICP_HW_MGR_INTF_H */
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/Makefile
new file mode 100644
index 0000000..8af20ae
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/icp
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/ipe_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += ipe_dev.o ipe_core.o ipe_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c
new file mode 100644
index 0000000..15cb943
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c
@@ -0,0 +1,183 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "IPE-CORE %s:%d " fmt, __func__, __LINE__
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "ipe_core.h"
+#include "ipe_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_ipe_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+
+static int cam_ipe_caps_vote(struct cam_ipe_device_core_info *core_info,
+	struct cam_icp_cpas_vote *cpas_vote)
+{
+	int rc = 0;
+
+	if (cpas_vote->ahb_vote_valid)
+		rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
+			&cpas_vote->ahb_vote);
+	if (cpas_vote->axi_vote_valid)
+		rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
+			&cpas_vote->axi_vote);
+
+	if (rc < 0)
+		pr_err("cpas vote is failed: %d\n", rc);
+
+	return rc;
+}
+
+int cam_ipe_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *ipe_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_ipe_device_core_info *core_info = NULL;
+	struct cam_icp_cpas_vote cpas_vote;
+	int rc = 0;
+
+	if (!device_priv) {
+		pr_err("Invalid cam_dev_info\n");
+		return -EINVAL;
+	}
+
+	soc_info = &ipe_dev->soc_info;
+	core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+
+	if ((!soc_info) || (!core_info)) {
+		pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+		return -EINVAL;
+	}
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
+	cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
+	cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+	if (rc < 0) {
+		pr_err("cpass start failed: %d\n", rc);
+		return rc;
+	}
+
+	rc = cam_ipe_enable_soc_resources(soc_info);
+	if (rc < 0) {
+		pr_err("soc enable is failed\n");
+		rc = cam_cpas_stop(core_info->cpas_handle);
+	}
+
+	return rc;
+}
+
+int cam_ipe_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *ipe_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_ipe_device_core_info *core_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		pr_err("Invalid cam_dev_info\n");
+		return -EINVAL;
+	}
+
+	soc_info = &ipe_dev->soc_info;
+	core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_ipe_disable_soc_resources(soc_info);
+	if (rc < 0)
+		pr_err("soc enable is failed\n");
+
+	rc = cam_cpas_stop(core_info->cpas_handle);
+	if (rc < 0)
+		pr_err("cpas stop is failed: %d\n", rc);
+
+	return rc;
+}
+
+int cam_ipe_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *ipe_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_ipe_device_core_info *core_info = NULL;
+	struct cam_ipe_device_hw_info *hw_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		pr_err("Invalid arguments\n");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_ICP_IPE_CMD_MAX) {
+		pr_err("Invalid command : %x\n", cmd_type);
+		return -EINVAL;
+	}
+
+	soc_info = &ipe_dev->soc_info;
+	core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+	hw_info = core_info->ipe_hw_info;
+
+	switch (cmd_type) {
+	case CAM_ICP_IPE_CMD_VOTE_CPAS: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args)
+			return -EINVAL;
+
+		cam_ipe_caps_vote(core_info, cpas_vote);
+		break;
+	}
+
+	case CAM_ICP_IPE_CMD_CPAS_START: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args)
+			return -EINVAL;
+
+		rc = cam_cpas_start(core_info->cpas_handle,
+			&cpas_vote->ahb_vote, &cpas_vote->axi_vote);
+		break;
+	}
+
+	case CAM_ICP_IPE_CMD_CPAS_STOP:
+		cam_cpas_stop(core_info->cpas_handle);
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+irqreturn_t cam_ipe_irq(int irq_num, void *data)
+{
+	return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.h b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.h
new file mode 100644
index 0000000..4818846
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_IPE_CORE_H
+#define CAM_IPE_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+struct cam_ipe_device_hw_info {
+	uint32_t reserved;
+};
+
+struct cam_ipe_device_core_info {
+	struct cam_ipe_device_hw_info *ipe_hw_info;
+	uint32_t cpas_handle;
+};
+
+int cam_ipe_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_ipe_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_ipe_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_ipe_irq(int irq_num, void *data);
+
+#endif /* CAM_IPE_CORE_H */
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_dev.c b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_dev.c
new file mode 100644
index 0000000..0efb1de
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_dev.c
@@ -0,0 +1,176 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include "ipe_core.h"
+#include "ipe_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_icp_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+struct cam_ipe_device_hw_info cam_ipe_hw_info = {
+	.reserved = 0,
+};
+EXPORT_SYMBOL(cam_ipe_hw_info);
+
+int cam_ipe_register_cpas(struct cam_hw_soc_info *soc_info,
+	struct cam_ipe_device_core_info *core_info,
+	uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "ipe", sizeof("ipe"));
+	cpas_register_params.cam_cpas_client_cb = NULL;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc < 0) {
+		pr_err("cam_cpas_register_client is failed: %d\n", rc);
+		return rc;
+	}
+	core_info->cpas_handle = cpas_register_params.client_handle;
+
+	return rc;
+}
+
+int cam_ipe_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info            *ipe_dev = NULL;
+	struct cam_hw_intf            *ipe_dev_intf = NULL;
+	const struct of_device_id         *match_dev = NULL;
+	struct cam_ipe_device_core_info   *core_info = NULL;
+	struct cam_ipe_device_hw_info     *hw_info = NULL;
+	int                                rc = 0;
+
+	ipe_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!ipe_dev_intf)
+		return -ENOMEM;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &ipe_dev_intf->hw_idx);
+
+	ipe_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!ipe_dev) {
+		kfree(ipe_dev_intf);
+		return -ENOMEM;
+	}
+	ipe_dev->soc_info.pdev = pdev;
+	ipe_dev_intf->hw_priv = ipe_dev;
+	ipe_dev_intf->hw_ops.init = cam_ipe_init_hw;
+	ipe_dev_intf->hw_ops.deinit = cam_ipe_deinit_hw;
+	ipe_dev_intf->hw_ops.process_cmd = cam_ipe_process_cmd;
+	ipe_dev_intf->hw_type = CAM_ICP_DEV_IPE;
+
+	pr_debug("%s: type %d index %d\n", __func__,
+		ipe_dev_intf->hw_type,
+		ipe_dev_intf->hw_idx);
+
+	platform_set_drvdata(pdev, ipe_dev_intf);
+
+	ipe_dev->core_info = kzalloc(sizeof(struct cam_ipe_device_core_info),
+		GFP_KERNEL);
+	if (!ipe_dev->core_info) {
+		kfree(ipe_dev);
+		kfree(ipe_dev_intf);
+		return -ENOMEM;
+	}
+	core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		pr_debug("%s: No ipe hardware info\n", __func__);
+		kfree(ipe_dev->core_info);
+		kfree(ipe_dev);
+		kfree(ipe_dev_intf);
+		rc = -EINVAL;
+		return rc;
+	}
+	hw_info = (struct cam_ipe_device_hw_info *)match_dev->data;
+	core_info->ipe_hw_info = hw_info;
+
+	rc = cam_ipe_init_soc_resources(&ipe_dev->soc_info, cam_ipe_irq,
+		ipe_dev);
+	if (rc < 0) {
+		pr_err("%s: failed to init_soc\n", __func__);
+		kfree(ipe_dev->core_info);
+		kfree(ipe_dev);
+		kfree(ipe_dev_intf);
+		return rc;
+	}
+
+	pr_debug("cam_ipe_init_soc_resources : %pK\n",
+		(void *)&ipe_dev->soc_info);
+	rc = cam_ipe_register_cpas(&ipe_dev->soc_info,
+		core_info, ipe_dev_intf->hw_idx);
+	if (rc < 0) {
+		kfree(ipe_dev->core_info);
+		kfree(ipe_dev);
+		kfree(ipe_dev_intf);
+		return rc;
+	}
+	ipe_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&ipe_dev->hw_mutex);
+	spin_lock_init(&ipe_dev->hw_lock);
+	init_completion(&ipe_dev->hw_complete);
+
+	pr_debug("%s: IPE%d probe successful\n", __func__,
+		ipe_dev_intf->hw_idx);
+
+	return rc;
+}
+
+static const struct of_device_id cam_ipe_dt_match[] = {
+	{
+		.compatible = "qcom,cam_ipe",
+		.data = &cam_ipe_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_ipe_dt_match);
+
+static struct platform_driver cam_ipe_driver = {
+	.probe = cam_ipe_probe,
+	.driver = {
+		.name = "cam_ipe",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_ipe_dt_match,
+	},
+};
+
+static int __init cam_ipe_init_module(void)
+{
+	return platform_driver_register(&cam_ipe_driver);
+}
+
+static void __exit cam_ipe_exit_module(void)
+{
+	platform_driver_unregister(&cam_ipe_driver);
+}
+
+module_init(cam_ipe_init_module);
+module_exit(cam_ipe_exit_module);
+MODULE_DESCRIPTION("CAM IPE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.c
new file mode 100644
index 0000000..527e716
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.c
@@ -0,0 +1,87 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "ipe_soc.h"
+#include "cam_soc_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static int cam_ipe_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0)
+		pr_err("get ipe dt prop is failed\n");
+
+	return rc;
+}
+
+static int cam_ipe_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t ipe_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, ipe_irq_handler,
+		irq_data);
+
+	return rc;
+}
+
+int cam_ipe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t ipe_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_ipe_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	rc = cam_ipe_request_platform_resource(soc_info, ipe_irq_handler,
+		irq_data);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, false);
+	if (rc) {
+		pr_err("%s: enable platform failed\n", __func__);
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+	if (rc)
+		pr_err("%s: enable platform failed\n", __func__);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.h b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.h
new file mode 100644
index 0000000..12ab444
--- /dev/null
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_IPE_SOC_H
+#define CAM_IPE_SOC_H
+
+#include "cam_soc_util.h"
+
+int cam_ipe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t ipe_irq_handler, void *irq_data);
+
+int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* CAM_IPE_SOC_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index c147b0b..980df9f 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -48,7 +48,7 @@
 #define XIN_WRITEBACK		1
 
 /* wait for at most 2 vsync for lowest refresh rate (24hz) */
-#define KOFF_TIMEOUT		(84)
+#define KOFF_TIMEOUT		(42 * 32)
 
 /* default stream buffer headroom in lines */
 #define DEFAULT_SBUF_HEADROOM	20
diff --git a/drivers/media/platform/msm/vidc/Kconfig b/drivers/media/platform/msm/vidc/Kconfig
index db12cae..d6297d4 100644
--- a/drivers/media/platform/msm/vidc/Kconfig
+++ b/drivers/media/platform/msm/vidc/Kconfig
@@ -7,5 +7,4 @@
 		depends on ARCH_QCOM && VIDEO_V4L2
 		select VIDEOBUF2_CORE
 
-source "drivers/media/platform/msm/vidc/vmem/Kconfig"
 source "drivers/media/platform/msm/vidc/governors/Kconfig"
diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
index 12b5b04..f7ce757 100644
--- a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
+++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
@@ -22,8 +22,7 @@
 
 enum governor_mode {
 	GOVERNOR_DDR,
-	GOVERNOR_VMEM,
-	GOVERNOR_VMEM_PLUS,
+	GOVERNOR_LLCC,
 };
 
 struct governor {
@@ -275,38 +274,6 @@
 	}
 }
 
-static unsigned long __calculate_vmem_plus_ab(struct vidc_bus_vote_data *d)
-{
-	unsigned long i = 0, vmem_plus = 0;
-
-	if (!d->imem_ab_tbl || !d->imem_ab_tbl_size) {
-		vmem_plus = 1; /* Vote for the min ab value */
-		goto exit;
-	}
-
-	/* Pick up vmem frequency based on venus core frequency */
-	for (i = 0; i < d->imem_ab_tbl_size; i++) {
-		if (d->imem_ab_tbl[i].core_freq == d->core_freq) {
-			vmem_plus = d->imem_ab_tbl[i].imem_ab;
-			break;
-		}
-	}
-
-	/*
-	 * Incase we get an unsupported freq throw a warning
-	 * and set ab to the minimum value.
-	 */
-	if (!vmem_plus) {
-		vmem_plus = 1;
-		dprintk(VIDC_WARN,
-			"could not calculate vmem ab value due to core freq mismatch\n");
-		WARN_ON(1);
-	}
-
-exit:
-	return vmem_plus;
-}
-
 static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d,
 		enum governor_mode gm) {
 	/*
@@ -611,12 +578,9 @@
 	case GOVERNOR_DDR:
 		ret = kbps(fp_round(ddr.total));
 		break;
-	case GOVERNOR_VMEM:
+	case GOVERNOR_LLCC:
 		ret = kbps(fp_round(vmem.total));
 		break;
-	case GOVERNOR_VMEM_PLUS:
-		ret = __calculate_vmem_plus_ab(d);
-		break;
 	default:
 		dprintk(VIDC_ERR, "%s - Unknown governor\n", __func__);
 	}
@@ -1016,12 +980,9 @@
 	case GOVERNOR_DDR:
 		ret = kbps(fp_round(ddr.total));
 		break;
-	case GOVERNOR_VMEM:
+	case GOVERNOR_LLCC:
 		ret = kbps(fp_round(vmem.total));
 		break;
-	case GOVERNOR_VMEM_PLUS:
-		ret = __calculate_vmem_plus_ab(d);
-		break;
 	default:
 		dprintk(VIDC_ERR, "%s - Unknown governor\n", __func__);
 	}
@@ -1107,17 +1068,9 @@
 		},
 	},
 	{
-		.mode = GOVERNOR_VMEM,
+		.mode = GOVERNOR_LLCC,
 		.devfreq_gov = {
-			.name = "msm-vidc-vmem",
-			.get_target_freq = __get_target_freq,
-			.event_handler = __event_handler,
-		},
-	},
-	{
-		.mode = GOVERNOR_VMEM_PLUS,
-		.devfreq_gov = {
-			.name = "msm-vidc-vmem+",
+			.name = "msm-vidc-llcc",
 			.get_target_freq = __get_target_freq,
 			.event_handler = __event_handler,
 		},
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 9331c94..a477340 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -332,37 +332,56 @@
 
 int create_pkt_cmd_sys_set_resource(
 		struct hfi_cmd_sys_set_resource_packet *pkt,
-		struct vidc_resource_hdr *resource_hdr,
-		void *resource_value)
+		struct vidc_resource_hdr *res_hdr,
+		void *res_value)
 {
 	int rc = 0;
+	u32 i = 0;
 
-	if (!pkt || !resource_hdr || !resource_value)
+	if (!pkt || !res_hdr || !res_value) {
+		dprintk(VIDC_ERR,
+			"Invalid paramas pkt %pK res_hdr %pK res_value %pK\n",
+				pkt, res_hdr, res_value);
 		return -EINVAL;
+	}
 
 	pkt->packet_type = HFI_CMD_SYS_SET_RESOURCE;
 	pkt->size = sizeof(struct hfi_cmd_sys_set_resource_packet);
-	pkt->resource_handle = hash32_ptr(resource_hdr->resource_handle);
+	pkt->resource_handle = hash32_ptr(res_hdr->resource_handle);
 
-	switch (resource_hdr->resource_id) {
-	case VIDC_RESOURCE_OCMEM:
-	case VIDC_RESOURCE_VMEM:
+	switch (res_hdr->resource_id) {
+	case VIDC_RESOURCE_SYSCACHE:
 	{
-		struct hfi_resource_ocmem *hfioc_mem =
-			(struct hfi_resource_ocmem *)
-			&pkt->rg_resource_data[0];
+		struct hfi_resource_syscache_info_type *res_sc_info =
+			(struct hfi_resource_syscache_info_type *) res_value;
+		struct hfi_resource_subcache_type *res_sc =
+			(struct hfi_resource_subcache_type *)
+				&(res_sc_info->rg_subcache_entries[0]);
 
-		phys_addr_t imem_addr = (phys_addr_t)resource_value;
+		struct hfi_resource_syscache_info_type *hfi_sc_info =
+			(struct hfi_resource_syscache_info_type *)
+				&pkt->rg_resource_data[0];
 
-		pkt->resource_type = HFI_RESOURCE_OCMEM;
-		pkt->size += sizeof(struct hfi_resource_ocmem) - sizeof(u32);
-		hfioc_mem->size = (u32)resource_hdr->size;
-		hfioc_mem->mem = imem_addr;
+		struct hfi_resource_subcache_type *hfi_sc =
+			(struct hfi_resource_subcache_type *)
+			&(hfi_sc_info->rg_subcache_entries[0]);
+
+		pkt->resource_type = HFI_RESOURCE_SYSCACHE;
+		hfi_sc_info->num_entries = res_sc_info->num_entries;
+
+		pkt->size += (sizeof(struct hfi_resource_subcache_type))
+				 * hfi_sc_info->num_entries;
+
+		for (i = 0; i < hfi_sc_info->num_entries; i++) {
+			hfi_sc[i] = res_sc[i];
+		dprintk(VIDC_DBG, "entry hfi#%d, sc_id %d, size %d\n",
+				 i, hfi_sc[i].sc_id, hfi_sc[i].size);
+		}
 		break;
 	}
 	default:
-		dprintk(VIDC_ERR, "Invalid resource_id %d\n",
-					resource_hdr->resource_id);
+		dprintk(VIDC_ERR,
+			"Invalid resource_id %d\n", res_hdr->resource_id);
 		rc = -ENOTSUPP;
 	}
 
@@ -371,28 +390,35 @@
 
 int create_pkt_cmd_sys_release_resource(
 		struct hfi_cmd_sys_release_resource_packet *pkt,
-		struct vidc_resource_hdr *resource_hdr)
+		struct vidc_resource_hdr *res_hdr)
 {
 	int rc = 0;
 
-	if (!pkt)
+	if (!pkt || !res_hdr) {
+		dprintk(VIDC_ERR,
+			"Invalid paramas pkt %pK res_hdr %pK\n",
+				pkt, res_hdr);
 		return -EINVAL;
+	}
 
 	pkt->size = sizeof(struct hfi_cmd_sys_release_resource_packet);
 	pkt->packet_type = HFI_CMD_SYS_RELEASE_RESOURCE;
-	pkt->resource_handle = hash32_ptr(resource_hdr->resource_handle);
+	pkt->resource_handle = hash32_ptr(res_hdr->resource_handle);
 
-	switch (resource_hdr->resource_id) {
-	case VIDC_RESOURCE_OCMEM:
-	case VIDC_RESOURCE_VMEM:
-		pkt->resource_type = HFI_RESOURCE_OCMEM;
+	switch (res_hdr->resource_id) {
+	case VIDC_RESOURCE_SYSCACHE:
+		pkt->resource_type = HFI_RESOURCE_SYSCACHE;
 		break;
 	default:
-		dprintk(VIDC_ERR, "Invalid resource_id %d\n",
-					resource_hdr->resource_id);
+		dprintk(VIDC_ERR,
+			 "Invalid resource_id %d\n", res_hdr->resource_id);
 		rc = -ENOTSUPP;
 	}
 
+	dprintk(VIDC_DBG,
+		"rel_res: pkt_type 0x%x res_type 0x%x prepared\n",
+		pkt->packet_type, pkt->resource_type);
+
 	return rc;
 }
 
@@ -1837,6 +1863,14 @@
 		pkt->size += sizeof(u32) + sizeof(*work_mode);
 		break;
 	}
+	case HAL_PARAM_USE_SYS_CACHE:
+	{
+		create_pkt_enable(pkt->rg_property_data,
+			HFI_PROPERTY_PARAM_USE_SYS_CACHE,
+			(((struct hal_enable *) pdata)->enable));
+		pkt->size += sizeof(u32) * 2;
+		break;
+	}
 	/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
 	case HAL_CONFIG_BUFFER_REQUIREMENTS:
 	case HAL_CONFIG_PRIORITY:
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 19a1e3f..3d3d567 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -94,9 +94,11 @@
 		trace_msm_smem_buffer_iommu_op_start("MAP", 0, 0,
 			align, *iova, *buffer_size);
 
-		/* Map a scatterlist into an SMMU */
-		rc = msm_dma_map_sg_lazy(cb->dev, table->sgl, table->nents,
-				DMA_BIDIRECTIONAL, buf);
+		/* Map a scatterlist into an SMMU with system cacheability */
+		rc = msm_dma_map_sg_attrs(cb->dev, table->sgl,
+			table->nents, DMA_BIDIRECTIONAL,
+			buf, DMA_ATTR_IOMMU_USE_UPSTREAM_HINT);
+
 		if (rc != table->nents) {
 			dprintk(VIDC_ERR,
 				"Mapping failed with rc(%d), expected rc(%d)\n",
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index c42d7aa..053d748 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -639,6 +639,7 @@
 int msm_vdec_inst_init(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
+	struct msm_vidc_format *fmt = NULL;
 
 	if (!inst) {
 		dprintk(VIDC_ERR, "Invalid input = %pK\n", inst);
@@ -661,10 +662,31 @@
 	inst->bufq[CAPTURE_PORT].num_planes = 1;
 	inst->prop.fps = DEFAULT_FPS;
 	inst->clk_data.operating_rate = 0;
-	memcpy(&inst->fmts[OUTPUT_PORT], &vdec_formats[2],
+
+	/* By default, initialize CAPTURE port to UBWC YUV format */
+	fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
+		ARRAY_SIZE(vdec_formats), V4L2_PIX_FMT_NV12_UBWC,
+			CAPTURE_PORT);
+	if (!fmt || fmt->type != CAPTURE_PORT) {
+		dprintk(VIDC_ERR,
+			"vdec_formats corrupted\n");
+		return -EINVAL;
+	}
+	memcpy(&inst->fmts[fmt->type], fmt,
 			sizeof(struct msm_vidc_format));
-	memcpy(&inst->fmts[CAPTURE_PORT], &vdec_formats[0],
+
+	/* By default, initialize OUTPUT port to H264 decoder */
+	fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
+		ARRAY_SIZE(vdec_formats), V4L2_PIX_FMT_H264,
+			OUTPUT_PORT);
+	if (!fmt || fmt->type != OUTPUT_PORT) {
+		dprintk(VIDC_ERR,
+			"vdec_formats corrupted\n");
+		return -EINVAL;
+	}
+	memcpy(&inst->fmts[fmt->type], fmt,
 			sizeof(struct msm_vidc_format));
+
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index e3d52bf..8906027 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -2096,6 +2096,7 @@
 int msm_venc_inst_init(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
+	struct msm_vidc_format *fmt = NULL;
 
 	if (!inst) {
 		dprintk(VIDC_ERR, "Invalid input = %pK\n", inst);
@@ -2120,10 +2121,30 @@
 	inst->bufq[CAPTURE_PORT].num_planes = 1;
 	inst->clk_data.operating_rate = 0;
 
-	memcpy(&inst->fmts[CAPTURE_PORT], &venc_formats[4],
+	/* By default, initialize OUTPUT port to UBWC YUV format */
+	fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
+		ARRAY_SIZE(venc_formats), V4L2_PIX_FMT_NV12_UBWC,
+			OUTPUT_PORT);
+	if (!fmt || fmt->type != OUTPUT_PORT) {
+		dprintk(VIDC_ERR,
+			"venc_formats corrupted\n");
+		return -EINVAL;
+	}
+	memcpy(&inst->fmts[fmt->type], fmt,
 			sizeof(struct msm_vidc_format));
-	memcpy(&inst->fmts[OUTPUT_PORT], &venc_formats[0],
+
+	/* By default, initialize CAPTURE port to H264 encoder */
+	fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
+		ARRAY_SIZE(venc_formats), V4L2_PIX_FMT_H264,
+			CAPTURE_PORT);
+	if (!fmt || fmt->type != CAPTURE_PORT) {
+		dprintk(VIDC_ERR,
+			"venc_formats corrupted\n");
+		return -EINVAL;
+	}
+	memcpy(&inst->fmts[fmt->type], fmt,
 			sizeof(struct msm_vidc_format));
+
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 1cab039..2289b23 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -909,7 +909,7 @@
 
 	switch (found_buf) {
 	case 0:
-		dprintk(VIDC_WARN,
+		dprintk(VIDC_DBG,
 			"%s: No buffer(type: %d) found for index %d\n",
 			__func__, buffer_type, buffer_index);
 		break;
@@ -1449,6 +1449,8 @@
 		}
 	}
 
+	msm_comm_set_use_sys_cache(inst);
+
 	/*
 	 * For seq_changed_insufficient, driver should set session_continue
 	 * to firmware after the following sequence
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index b80aa08..a52fe05 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -309,10 +309,10 @@
 		return freq;
 	}
 
-	dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq);
-
 	freq = max(vpp_cycles, vsp_cycles);
 
+	dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq);
+
 	return freq;
 }
 
@@ -968,6 +968,8 @@
 
 decision_done:
 	core_info.video_core_enable_mask = inst->clk_data.core_id;
+	dprintk(VIDC_DBG,
+		"Core Enable Mask %d\n", core_info.video_core_enable_mask);
 
 	rc = call_hfi_op(hdev, session_set_property,
 			(void *)inst->session,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 4f53850..c0bbfbb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -476,7 +476,7 @@
 	for (; idx < num_ctrls; idx++) {
 		struct v4l2_ctrl *ctrl = NULL;
 
-		if (1) {
+		if (IS_PRIV_CTRL(drv_ctrls[idx].id)) {
 			/*add private control*/
 			ctrl_cfg.def = drv_ctrls[idx].default_value;
 			ctrl_cfg.flags = 0;
@@ -5497,6 +5497,14 @@
 		return -EINVAL;
 	hdev = inst->core->device;
 	mutex_lock(&inst->lock);
+	if (inst->state >= MSM_VIDC_RELEASE_RESOURCES_DONE ||
+			inst->state < MSM_VIDC_START_DONE ||
+			inst->core->state == VIDC_CORE_INVALID) {
+		dprintk(VIDC_DBG,
+			"Inst %pK : Not in valid state to call %s\n",
+				inst, __func__);
+		goto sess_continue_fail;
+	}
 	if (inst->session_type == MSM_VIDC_DECODER && inst->in_reconfig) {
 		dprintk(VIDC_DBG, "send session_continue\n");
 		rc = call_hfi_op(hdev, session_continue,
@@ -5515,6 +5523,7 @@
 		dprintk(VIDC_ERR,
 				"session_continue called in wrong state for decoder");
 	}
+
 sess_continue_fail:
 	mutex_unlock(&inst->lock);
 	return rc;
@@ -5544,3 +5553,41 @@
 {
 	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
 }
+
+void msm_comm_set_use_sys_cache(struct msm_vidc_inst *inst)
+{
+	struct hal_enable syscache_use;
+	int rc = 0;
+
+
+	if (!inst->core->resources.sys_cache_enabled)
+		goto exit;
+
+	syscache_use.enable = false;
+	inst->clk_data.use_sys_cache = false;
+
+	if (inst->flags & VIDC_REALTIME)
+		syscache_use.enable = true;
+
+	if (inst->flags & VIDC_THUMBNAIL)
+		syscache_use.enable = false;
+
+	dprintk(VIDC_DBG,
+		"set_use_sys_cache: enable = %d inst = %pK flags =%d\n",
+		syscache_use.enable, inst, inst->flags);
+	rc = msm_comm_try_set_prop(inst, HAL_PARAM_USE_SYS_CACHE,
+		&syscache_use);
+	if (rc) {
+		dprintk(VIDC_ERR, "set_use_sys_cache: failed!!\n");
+			inst->clk_data.use_sys_cache = false;
+		goto exit;
+	}
+
+	inst->clk_data.use_sys_cache = syscache_use.enable;
+
+	return;
+
+exit:
+	return;
+}
+
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 098063d..7534593 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -61,7 +61,7 @@
 struct hal_buffer_requirements *get_buff_req_buffer(
 			struct msm_vidc_inst *inst, u32 buffer_type);
 #define IS_PRIV_CTRL(idx) (\
-		(V4L2_CTRL_ID2CLASS(idx) == V4L2_CTRL_CLASS_MPEG) && \
+		(V4L2_CTRL_ID2WHICH(idx) == V4L2_CTRL_CLASS_MPEG) && \
 		V4L2_CTRL_DRIVER_PRIV(idx))
 void msm_comm_session_clean(struct msm_vidc_inst *inst);
 int msm_comm_kill_session(struct msm_vidc_inst *inst);
@@ -103,4 +103,5 @@
 u32 get_frame_size_rgba(int plane, u32 height, u32 width);
 u32 get_frame_size_nv21(int plane, u32 height, u32 width);
 u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width);
+void msm_comm_set_use_sys_cache(struct msm_vidc_inst *inst);
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 37bccbd..17c3045 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -224,6 +224,7 @@
 	u32 core_id;
 	enum hal_work_mode work_mode;
 	bool low_latency_mode;
+	bool use_sys_cache;
 };
 
 struct profile_data {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 0a6de41..5cf4628 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -59,19 +59,6 @@
 	return 0;
 }
 
-static inline enum imem_type read_imem_type(struct platform_device *pdev)
-{
-	bool is_compatible(char *compat)
-	{
-		return !!of_find_compatible_node(NULL, NULL, compat);
-	}
-
-	return is_compatible("qcom,msm-ocmem") ? IMEM_OCMEM :
-		is_compatible("qcom,msm-vmem") ? IMEM_VMEM :
-						IMEM_NONE;
-
-}
-
 static inline void msm_vidc_free_allowed_clocks_table(
 		struct msm_vidc_platform_resources *res)
 {
@@ -90,12 +77,6 @@
 	res->pf_ver_tbl = NULL;
 }
 
-static inline void msm_vidc_free_imem_ab_table(
-		struct msm_vidc_platform_resources *res)
-{
-	res->imem_ab_tbl = NULL;
-}
-
 static inline void msm_vidc_free_reg_table(
 			struct msm_vidc_platform_resources *res)
 {
@@ -262,43 +243,48 @@
 	return rc;
 }
 
-static int msm_vidc_load_imem_ab_table(struct msm_vidc_platform_resources *res)
+static int msm_vidc_load_subcache_info(struct msm_vidc_platform_resources *res)
 {
-	int num_elements = 0;
+	int rc = 0, num_subcaches = 0, c;
 	struct platform_device *pdev = res->pdev;
+	struct subcache_set *subcaches = &res->subcache_set;
 
-	if (!of_find_property(pdev->dev.of_node, "qcom,imem-ab-tbl", NULL)) {
-		/* optional property */
-		dprintk(VIDC_DBG, "qcom,imem-freq-tbl not found\n");
-		return 0;
+	num_subcaches = of_property_count_strings(pdev->dev.of_node,
+		"cache-slice-names");
+	if (num_subcaches <= 0) {
+		dprintk(VIDC_DBG, "No subcaches found\n");
+		goto err_load_subcache_table_fail;
 	}
 
-	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
-			"qcom,imem-ab-tbl");
-	num_elements /= (sizeof(*res->imem_ab_tbl) / sizeof(u32));
-	if (!num_elements) {
-		dprintk(VIDC_ERR, "no elements in imem ab table\n");
-		return -EINVAL;
+	subcaches->subcache_tbl = devm_kzalloc(&pdev->dev,
+		sizeof(*subcaches->subcache_tbl) * num_subcaches, GFP_KERNEL);
+	if (!subcaches->subcache_tbl) {
+		dprintk(VIDC_ERR,
+			"Failed to allocate memory for subcache tbl\n");
+		rc = -ENOMEM;
+		goto err_load_subcache_table_fail;
 	}
 
-	res->imem_ab_tbl = devm_kzalloc(&pdev->dev, num_elements *
-			sizeof(*res->imem_ab_tbl), GFP_KERNEL);
-	if (!res->imem_ab_tbl) {
-		dprintk(VIDC_ERR, "Failed to alloc imem_ab_tbl\n");
-		return -ENOMEM;
+	subcaches->count = num_subcaches;
+	dprintk(VIDC_DBG, "Found %d subcaches\n", num_subcaches);
+
+	for (c = 0; c < num_subcaches; ++c) {
+		struct subcache_info *vsc = &res->subcache_set.subcache_tbl[c];
+
+		of_property_read_string_index(pdev->dev.of_node,
+			"cache-slice-names", c, &vsc->name);
 	}
 
-	if (of_property_read_u32_array(pdev->dev.of_node,
-		"qcom,imem-ab-tbl", (u32 *)res->imem_ab_tbl,
-		num_elements * sizeof(*res->imem_ab_tbl) / sizeof(u32))) {
-		dprintk(VIDC_ERR, "Failed to read imem_ab_tbl\n");
-		msm_vidc_free_imem_ab_table(res);
-		return -EINVAL;
-	}
-
-	res->imem_ab_tbl_size = num_elements;
+	res->sys_cache_enabled = true;
 
 	return 0;
+
+err_load_subcache_table_fail:
+	res->sys_cache_enabled = false;
+	subcaches->count = 0;
+	subcaches->subcache_tbl = NULL;
+
+	return rc;
 }
 
 /**
@@ -856,10 +842,6 @@
 	kres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	res->irq = kres ? kres->start : -1;
 
-	of_property_read_u32(pdev->dev.of_node,
-			"qcom,imem-size", &res->imem_size);
-	res->imem_type = read_imem_type(pdev);
-
 	res->sys_idle_indicator = of_property_read_bool(pdev->dev.of_node,
 			"qcom,enable-idle-indicator");
 
@@ -884,9 +866,9 @@
 	if (rc)
 		dprintk(VIDC_ERR, "Failed to load pf version table: %d\n", rc);
 
-	rc = msm_vidc_load_imem_ab_table(res);
+	rc = msm_vidc_load_subcache_info(res);
 	if (rc)
-		dprintk(VIDC_WARN, "Failed to load freq table: %d\n", rc);
+		dprintk(VIDC_WARN, "Failed to load subcache info: %d\n", rc);
 
 	rc = msm_vidc_load_qdss_table(res);
 	if (rc)
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 20b0ffc..d76985e 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -17,6 +17,8 @@
 #include <linux/devfreq.h>
 #include <linux/platform_device.h>
 #include <media/msm_vidc.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
 #define MAX_BUFFER_TYPES 32
 
 struct platform_version_table {
@@ -36,11 +38,6 @@
 	u32 fps;
 };
 
-struct imem_ab_table {
-	u32 core_freq;
-	u32 imem_ab;
-};
-
 struct reg_value_pair {
 	u32 reg;
 	u32 value;
@@ -122,13 +119,6 @@
 	u32 count;
 };
 
-enum imem_type {
-	IMEM_NONE,
-	IMEM_OCMEM,
-	IMEM_VMEM,
-	IMEM_MAX,
-};
-
 struct allowed_clock_rates_table {
 	u32 clock_rate;
 };
@@ -145,6 +135,18 @@
 	u32 count;
 };
 
+struct subcache_info {
+	const char *name;
+	bool isactive;
+	bool isset;
+	struct llcc_slice_desc *subcache;
+};
+
+struct subcache_set {
+	struct subcache_info *subcache_tbl;
+	u32 count;
+};
+
 struct msm_vidc_platform_resources {
 	phys_addr_t firmware_base;
 	phys_addr_t register_base;
@@ -157,13 +159,11 @@
 	struct dcvs_table *dcvs_tbl;
 	uint32_t dcvs_tbl_size;
 	struct dcvs_limit *dcvs_limit;
-	struct imem_ab_table *imem_ab_tbl;
-	u32 imem_ab_tbl_size;
+	bool sys_cache_enabled;
+	struct subcache_set subcache_set;
 	struct reg_set reg_set;
 	struct addr_set qdss_addr_set;
 	struct buffer_usage_set buffer_usage_set;
-	uint32_t imem_size;
-	enum imem_type imem_type;
 	uint32_t max_load;
 	uint32_t max_hq_mbs_per_frame;
 	uint32_t max_hq_fps;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 1a1078d..5a8dd26 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -27,6 +27,8 @@
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
 #include <soc/qcom/scm.h>
 #include <soc/qcom/smem.h>
 #include <soc/qcom/subsystem_restart.h>
@@ -72,7 +74,6 @@
 const struct msm_vidc_gov_data DEFAULT_BUS_VOTE = {
 	.data = NULL,
 	.data_count = 0,
-	.imem_size = 0,
 };
 
 const int max_packets = 1000;
@@ -95,6 +96,8 @@
 static int __load_fw(struct venus_hfi_device *device);
 static void __unload_fw(struct venus_hfi_device *device);
 static int __tzbsp_set_video_state(enum tzbsp_video_state state);
+static int __enable_subcaches(struct venus_hfi_device *device);
+static int __disable_subcaches(struct venus_hfi_device *device);
 
 
 /**
@@ -875,7 +878,6 @@
 	kfree(device->bus_vote.data);
 	device->bus_vote.data = new_data;
 	device->bus_vote.data_count = num_data;
-	device->bus_vote.imem_size = device->res->imem_size;
 
 	venus_hfi_for_each_bus(device, bus) {
 		if (bus && bus->devfreq) {
@@ -939,140 +941,33 @@
 	return rc;
 }
 
-static int __alloc_imem(struct venus_hfi_device *device, unsigned long size)
+static int __core_release_resource(struct venus_hfi_device *device,
+		struct vidc_resource_hdr *resource_hdr)
 {
-	struct imem *imem = NULL;
+	struct hfi_cmd_sys_release_resource_packet *pkt;
+	u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
 	int rc = 0;
 
-	if (!device)
-		return -EINVAL;
-
-	imem = &device->resources.imem;
-	if (imem->type) {
-		dprintk(VIDC_ERR, "IMEM of type %d already allocated\n",
-				imem->type);
-		return -ENOMEM;
-	}
-
-	switch (device->res->imem_type) {
-	case IMEM_VMEM:
-	{
-		phys_addr_t vmem_buffer = 0;
-
-		rc = vmem_allocate(size, &vmem_buffer);
-		if (rc) {
-			if (rc == -ENOTSUPP) {
-				dprintk(VIDC_DBG,
-					"Target does not support vmem\n");
-				rc = 0;
-			}
-			goto imem_alloc_failed;
-		} else if (!vmem_buffer) {
-			rc = -ENOMEM;
-			goto imem_alloc_failed;
-		}
-
-		imem->vmem = vmem_buffer;
-		break;
-	}
-	case IMEM_NONE:
-		rc = 0;
-		break;
-
-	default:
-		rc = -ENOTSUPP;
-		goto imem_alloc_failed;
-	}
-
-	imem->type = device->res->imem_type;
-	dprintk(VIDC_DBG, "Allocated %ld bytes of IMEM of type %d\n", size,
-			imem->type);
-	return 0;
-imem_alloc_failed:
-	imem->type = IMEM_NONE;
-	return rc;
-}
-
-static int __free_imem(struct venus_hfi_device *device)
-{
-	struct imem *imem = NULL;
-	int rc = 0;
-
-	if (!device)
-		return -EINVAL;
-
-	imem = &device->resources.imem;
-	switch (imem->type) {
-	case IMEM_NONE:
-		/* Follow the semantics of free(NULL), which is a no-op. */
-		break;
-	case IMEM_VMEM:
-		vmem_free(imem->vmem);
-		break;
-	default:
-		rc = -ENOTSUPP;
-		goto imem_free_failed;
-	}
-
-	imem->type = IMEM_NONE;
-	return 0;
-
-imem_free_failed:
-	return rc;
-}
-
-static int __set_imem(struct venus_hfi_device *device, struct imem *imem)
-{
-	struct vidc_resource_hdr rhdr;
-	phys_addr_t addr = 0;
-	int rc = 0;
-
-	if (!device || !device->res || !imem) {
-		dprintk(VIDC_ERR, "Invalid params, core: %pK, imem: %pK\n",
-			device, imem);
+	if (!device || !resource_hdr) {
+		dprintk(VIDC_ERR, "release_res: Invalid Params\n");
 		return -EINVAL;
 	}
 
-	rhdr.resource_handle = imem; /* cookie */
-	rhdr.size = device->res->imem_size;
-	rhdr.resource_id = VIDC_RESOURCE_NONE;
+	pkt = (struct hfi_cmd_sys_release_resource_packet *) packet;
 
-	switch (imem->type) {
-	case IMEM_VMEM:
-		rhdr.resource_id = VIDC_RESOURCE_VMEM;
-		addr = imem->vmem;
-		break;
-	case IMEM_NONE:
-		dprintk(VIDC_DBG, "%s Target does not support IMEM", __func__);
-		rc = 0;
-		goto imem_set_failed;
-	default:
-		dprintk(VIDC_ERR, "IMEM of type %d unsupported\n", imem->type);
-		rc = -ENOTSUPP;
-		goto imem_set_failed;
-	}
+	rc = call_hfi_pkt_op(device, sys_release_resource,
+			pkt, resource_hdr);
 
-	MSM_VIDC_ERROR(!addr);
-
-	rc = __core_set_resource(device, &rhdr, (void *)addr);
 	if (rc) {
-		dprintk(VIDC_ERR, "Failed to set IMEM on driver\n");
-		goto imem_set_failed;
+		dprintk(VIDC_ERR, "release_res: failed to create packet\n");
+		goto err_create_pkt;
 	}
 
-	dprintk(VIDC_DBG,
-			"Managed to set IMEM buffer of type %d sized %d bytes at %pa\n",
-			rhdr.resource_id, rhdr.size, &addr);
+	rc = __iface_cmdq_write(device, pkt);
+	if (rc)
+		rc = -ENOTEMPTY;
 
-	rc = __vote_buses(device, device->bus_vote.data,
-			device->bus_vote.data_count);
-	if (rc) {
-		dprintk(VIDC_ERR,
-				"Failed to vote for buses after setting imem: %d\n",
-				rc);
-	}
-
-imem_set_failed:
+err_create_pkt:
 	return rc;
 }
 
@@ -1931,6 +1826,12 @@
 	if (rc || __iface_cmdq_write(dev, &version_pkt))
 		dprintk(VIDC_WARN, "Failed to send image version pkt to f/w\n");
 
+	rc = __enable_subcaches(device);
+	if (rc) {
+		dprintk(VIDC_WARN,
+			"Failed to enable subcaches, err = %d\n", rc);
+	}
+
 	if (dev->res->pm_qos_latency_us) {
 #ifdef CONFIG_SMP
 		dev->qos.type = PM_QOS_REQ_AFFINE_IRQ;
@@ -2848,6 +2749,8 @@
 		return;
 	}
 
+	dprintk(VIDC_PROF,
+		"Entering venus_hfi_pm_handler\n");
 	/*
 	 * It is ok to check this variable outside the lock since
 	 * it is being updated in this context only
@@ -3094,12 +2997,7 @@
 			break;
 		case HAL_SYS_INIT_DONE:
 			dprintk(VIDC_DBG, "Received SYS_INIT_DONE\n");
-			/* Video driver intentionally does not unset
-			 * IMEM on venus to simplify power collapse.
-			 */
-			if (__set_imem(device, &device->resources.imem))
-				dprintk(VIDC_WARN,
-				"Failed to set IMEM. Performance will be impacted\n");
+
 			sys_init_done.capabilities =
 				device->sys_init_capabilities;
 			hfi_process_sys_init_done_prop_read(
@@ -3584,6 +3482,68 @@
 	return rc;
 }
 
+static void __deinit_subcaches(struct venus_hfi_device *device)
+{
+	struct subcache_info *sinfo = NULL;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "deinit_subcaches: invalid device %pK\n",
+			device);
+		goto exit;
+	}
+
+	if (!device->res->sys_cache_enabled)
+		goto exit;
+
+	venus_hfi_for_each_subcache_reverse(device, sinfo) {
+		if (sinfo->subcache) {
+			dprintk(VIDC_DBG, "deinit_subcaches: %s\n",
+				sinfo->name);
+			llcc_slice_putd(sinfo->subcache);
+			sinfo->subcache = NULL;
+		}
+	}
+
+exit:
+	return;
+}
+
+static int __init_subcaches(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	struct subcache_info *sinfo = NULL;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "init_subcaches: invalid device %pK\n",
+			device);
+		return -EINVAL;
+	}
+
+	if (!device->res->sys_cache_enabled)
+		return 0;
+
+	venus_hfi_for_each_subcache(device, sinfo) {
+		sinfo->subcache = llcc_slice_getd(&device->res->pdev->dev,
+			sinfo->name);
+		if (IS_ERR_OR_NULL(sinfo->subcache)) {
+			rc = PTR_ERR(sinfo->subcache) ? : -EBADHANDLE;
+			dprintk(VIDC_ERR,
+				 "init_subcaches: invalid subcache: %s rc %d\n",
+				sinfo->name, rc);
+			sinfo->subcache = NULL;
+			goto err_subcache_get;
+		}
+		dprintk(VIDC_DBG, "init_subcaches: %s\n",
+			sinfo->name);
+	}
+
+	return 0;
+
+err_subcache_get:
+	__deinit_subcaches(device);
+	return rc;
+}
+
 static int __init_resources(struct venus_hfi_device *device,
 				struct msm_vidc_platform_resources *res)
 {
@@ -3608,6 +3568,10 @@
 		goto err_init_bus;
 	}
 
+	rc = __init_subcaches(device);
+	if (rc)
+		dprintk(VIDC_WARN, "Failed to init subcaches: %d\n", rc);
+
 	device->sys_init_capabilities =
 		kzalloc(sizeof(struct msm_vidc_capability)
 		* VIDC_MAX_SESSIONS, GFP_TEMPORARY);
@@ -3623,6 +3587,7 @@
 
 static void __deinit_resources(struct venus_hfi_device *device)
 {
+	__deinit_subcaches(device);
 	__deinit_bus(device);
 	__deinit_clocks(device);
 	__deinit_regulators(device);
@@ -3789,6 +3754,132 @@
 	return rc;
 }
 
+static int __enable_subcaches(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	u32 c = 0;
+	struct subcache_info *sinfo;
+	u32 resource[VIDC_MAX_SUBCACHE_SIZE];
+	struct hfi_resource_syscache_info_type *sc_res_info;
+	struct hfi_resource_subcache_type *sc_res;
+	struct vidc_resource_hdr rhdr;
+
+	if (!device->res->sys_cache_enabled)
+		return 0;
+
+	memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
+
+	sc_res_info = (struct hfi_resource_syscache_info_type *)resource;
+	sc_res = &(sc_res_info->rg_subcache_entries[0]);
+
+	/* Activate subcaches */
+	venus_hfi_for_each_subcache(device, sinfo) {
+		rc = llcc_slice_activate(sinfo->subcache);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to activate %s: %d\n",
+				sinfo->name, rc);
+			continue;
+		}
+		sinfo->isactive = true;
+
+		/* Update the entry */
+		sc_res[c].size = sinfo->subcache->llcc_slice_size;
+		sc_res[c].sc_id = sinfo->subcache->llcc_slice_id;
+		dprintk(VIDC_DBG, "Activate subcache %s\n", sinfo->name);
+		c++;
+	}
+
+	/* Set resource to Venus for activated subcaches */
+	if (c) {
+		dprintk(VIDC_DBG, "Setting Subcaches\n");
+
+		rhdr.resource_handle = sc_res_info; /* cookie */
+		rhdr.resource_id = VIDC_RESOURCE_SYSCACHE;
+
+		sc_res_info->num_entries = c;
+
+		rc = __core_set_resource(device, &rhdr, (void *)sc_res_info);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to set subcaches %d\n", rc);
+			goto err_fail_set_subacaches;
+		}
+	}
+
+	venus_hfi_for_each_subcache(device, sinfo) {
+		if (sinfo->isactive == true)
+			sinfo->isset = true;
+	}
+
+	dprintk(VIDC_DBG, "Activated & Set Subcaches to Venus\n");
+
+	return 0;
+
+err_fail_set_subacaches:
+	__disable_subcaches(device);
+
+	return rc;
+}
+
+static int __disable_subcaches(struct venus_hfi_device *device)
+{
+	struct subcache_info *sinfo;
+	int rc = 0;
+	u32 c = 0;
+	u32 resource[VIDC_MAX_SUBCACHE_SIZE];
+	struct hfi_resource_syscache_info_type *sc_res_info;
+	struct hfi_resource_subcache_type *sc_res;
+	struct vidc_resource_hdr rhdr;
+
+	if (!device->res->sys_cache_enabled)
+		return 0;
+
+	dprintk(VIDC_DBG, "Disabling Subcaches\n");
+
+	memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
+
+	sc_res_info = (struct hfi_resource_syscache_info_type *)resource;
+	sc_res = &(sc_res_info->rg_subcache_entries[0]);
+
+	/* Release resource command to Venus */
+	venus_hfi_for_each_subcache_reverse(device, sinfo) {
+		if (sinfo->isset == true) {
+			/* Update the entry */
+			sc_res[c].size = sinfo->subcache->llcc_slice_size;
+			sc_res[c].sc_id = sinfo->subcache->llcc_slice_id;
+			c++;
+			sinfo->isset = false;
+		}
+	}
+
+	if (c > 0) {
+		rhdr.resource_handle = sc_res_info; /* cookie */
+		rhdr.resource_id = VIDC_RESOURCE_SYSCACHE;
+
+		rc = __core_release_resource(device, &rhdr);
+		if (rc)
+			dprintk(VIDC_ERR, "Failed to release subcaches\n");
+
+		dprintk(VIDC_DBG, "Release %d subcaches\n", c);
+	}
+
+	/* De-activate subcaches */
+	venus_hfi_for_each_subcache_reverse(device, sinfo) {
+		if (sinfo->isactive == true) {
+			dprintk(VIDC_DBG, "De-activate subcache %s\n",
+				sinfo->name);
+			rc = llcc_slice_deactivate(sinfo->subcache);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"Failed to de-activate %s: %d\n",
+					sinfo->name, rc);
+			}
+			sinfo->isactive = false;
+		}
+	}
+
+	return rc;
+}
+
 static int __venus_power_on(struct venus_hfi_device *device)
 {
 	int rc = 0;
@@ -3805,12 +3896,6 @@
 		goto fail_vote_buses;
 	}
 
-	rc = __alloc_imem(device, device->res->imem_size);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to allocate IMEM\n");
-		goto fail_alloc_imem;
-	}
-
 	rc = __enable_regulators(device);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to enable GDSC, err = %d\n", rc);
@@ -3855,8 +3940,6 @@
 fail_enable_clks:
 	__disable_regulators(device);
 fail_enable_gdsc:
-	__free_imem(device);
-fail_alloc_imem:
 	__unvote_buses(device);
 fail_vote_buses:
 	device->power_enabled = false;
@@ -3876,8 +3959,6 @@
 	if (__disable_regulators(device))
 		dprintk(VIDC_WARN, "Failed to disable regulators\n");
 
-	__free_imem(device);
-
 	if (__unvote_buses(device))
 		dprintk(VIDC_WARN, "Failed to unvote for buses\n");
 	device->power_enabled = false;
@@ -3897,6 +3978,9 @@
 
 	dprintk(VIDC_PROF, "Entering power collapse\n");
 
+	if (__disable_subcaches(device))
+		dprintk(VIDC_ERR, "Failed to disable subcaches\n");
+
 	if (device->res->pm_qos_latency_us &&
 		pm_qos_request_active(&device->qos))
 		pm_qos_remove_request(&device->qos);
@@ -3966,6 +4050,15 @@
 		pm_qos_add_request(&device->qos, PM_QOS_CPU_DMA_LATENCY,
 				device->res->pm_qos_latency_us);
 	}
+
+	__sys_set_debug(device, msm_vidc_fw_debug);
+
+	rc = __enable_subcaches(device);
+	if (rc) {
+		dprintk(VIDC_WARN,
+			"Failed to enable subcaches, err = %d\n", rc);
+	}
+
 	dprintk(VIDC_PROF, "Resumed from power collapse\n");
 exit:
 	device->skip_pc_count = 0;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.h b/drivers/media/platform/msm/vidc/venus_hfi.h
index 76ede70..925918c 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.h
+++ b/drivers/media/platform/msm/vidc/venus_hfi.h
@@ -19,7 +19,6 @@
 #include <linux/platform_device.h>
 #include <linux/pm_qos.h>
 #include <linux/spinlock.h>
-#include "vmem/vmem.h"
 #include "vidc_hfi_api.h"
 #include "vidc_hfi_helper.h"
 #include "vidc_hfi_api.h"
@@ -48,6 +47,9 @@
 
 #define VIDC_MAX_NAME_LENGTH 64
 #define VIDC_MAX_PC_SKIP_COUNT 10
+#define VIDC_MAX_SUBCACHES 4
+#define VIDC_MAX_SUBCACHE_SIZE 52
+
 struct hfi_queue_table_header {
 	u32 qtbl_version;
 	u32 qtbl_size;
@@ -198,6 +200,11 @@
 #define venus_hfi_for_each_bus_reverse(__device, __binfo) \
 	venus_hfi_for_each_thing_reverse(__device, __binfo, bus)
 
+/* Subcache set helpers */
+#define venus_hfi_for_each_subcache(__device, __sinfo) \
+	venus_hfi_for_each_thing(__device, __sinfo, subcache)
+#define venus_hfi_for_each_subcache_reverse(__device, __sinfo) \
+	venus_hfi_for_each_thing_reverse(__device, __sinfo, subcache)
 
 /* Internal data used in vidc_hal not exposed to msm_vidc*/
 struct hal_data {
@@ -207,16 +214,8 @@
 	u32 register_size;
 };
 
-struct imem {
-	enum imem_type type;
-	union {
-		phys_addr_t vmem;
-	};
-};
-
 struct venus_resources {
 	struct msm_vidc_fw fw;
-	struct imem imem;
 };
 
 enum venus_hfi_state {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 537a1c6..bcc29c0 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -224,6 +224,7 @@
 	HAL_PARAM_VIDEO_CORES_USAGE,
 	HAL_PARAM_VIDEO_WORK_MODE,
 	HAL_PARAM_SECURE,
+	HAL_PARAM_USE_SYS_CACHE,
 };
 
 enum hal_domain {
@@ -870,15 +871,13 @@
 
 enum vidc_resource_id {
 	VIDC_RESOURCE_NONE,
-	VIDC_RESOURCE_OCMEM,
-	VIDC_RESOURCE_VMEM,
+	VIDC_RESOURCE_SYSCACHE,
 	VIDC_UNUSED_RESOURCE = 0x10000000,
 };
 
 struct vidc_resource_hdr {
 	enum vidc_resource_id resource_id;
 	void *resource_handle;
-	u32 size;
 };
 
 struct vidc_buffer_addr_info {
@@ -1293,7 +1292,6 @@
 struct msm_vidc_gov_data {
 	struct vidc_bus_vote_data *data;
 	u32 data_count;
-	int imem_size;
 };
 
 enum msm_vidc_power_mode {
@@ -1309,9 +1307,6 @@
 	int num_formats; /* 1 = DPB-OPB unified; 2 = split */
 	int height, width, fps;
 	enum msm_vidc_power_mode power_mode;
-	struct imem_ab_table *imem_ab_tbl;
-	u32 imem_ab_tbl_size;
-	unsigned long core_freq;
 };
 
 struct vidc_clk_scale_data {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 2dd25f3..fc638f0 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -220,6 +220,8 @@
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x010)
 #define  HFI_PROPERTY_PARAM_SECURE_SESSION		\
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x011)
+#define  HFI_PROPERTY_PARAM_USE_SYS_CACHE				\
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x012)
 #define  HFI_PROPERTY_PARAM_WORK_MODE                       \
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x015)
 
@@ -456,7 +458,7 @@
 
 #define HFI_INTRA_REFRESH_NONE				(HFI_COMMON_BASE + 0x1)
 #define HFI_INTRA_REFRESH_CYCLIC			(HFI_COMMON_BASE + 0x2)
-#define HFI_INTRA_REFRESH_RANDOM			(HFI_COMMON_BASE + 0x3)
+#define HFI_INTRA_REFRESH_RANDOM			(HFI_COMMON_BASE + 0x5)
 
 struct hfi_intra_refresh {
 	u32 mode;
@@ -718,23 +720,16 @@
 	u32 flip;
 };
 
-#define HFI_RESOURCE_OCMEM 0x00000001
+#define HFI_RESOURCE_SYSCACHE 0x00000002
 
-struct hfi_resource_ocmem {
+struct hfi_resource_subcache_type {
 	u32 size;
-	u32 mem;
+	u32 sc_id;
 };
 
-struct hfi_resource_ocmem_requirement {
-	u32 session_domain;
-	u32 width;
-	u32 height;
-	u32 size;
-};
-
-struct hfi_resource_ocmem_requirement_info {
+struct hfi_resource_syscache_info_type {
 	u32 num_entries;
-	struct hfi_resource_ocmem_requirement rg_requirements[1];
+	struct hfi_resource_subcache_type rg_subcache_entries[1];
 };
 
 struct hfi_property_sys_image_version_info_type {
diff --git a/drivers/media/platform/msm/vidc/vmem/Kconfig b/drivers/media/platform/msm/vidc/vmem/Kconfig
deleted file mode 100644
index 99260a9..0000000
--- a/drivers/media/platform/msm/vidc/vmem/Kconfig
+++ /dev/null
@@ -1,3 +0,0 @@
-menuconfig MSM_VIDC_VMEM
-	tristate "Qualcomm Technologies, Inc. MSM VMEM driver"
-	depends on ARCH_QCOM && MSM_VIDC_V4L2
diff --git a/drivers/media/platform/msm/vidc/vmem/Makefile b/drivers/media/platform/msm/vidc/vmem/Makefile
deleted file mode 100644
index a56ad95..0000000
--- a/drivers/media/platform/msm/vidc/vmem/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-ccflags-y += -I$(srctree)/drivers/media/platform/msm/vidc/
-ccflags-y += -I$(srctree)/drivers/media/platform/msm/vidc/vmem/
-
-msm-vidc-vmem-objs := vmem.o \
-                      vmem_debugfs.o
-
-obj-$(CONFIG_MSM_VIDC_VMEM) := msm-vidc-vmem.o
diff --git a/drivers/media/platform/msm/vidc/vmem/vmem.c b/drivers/media/platform/msm/vidc/vmem/vmem.c
deleted file mode 100644
index c75f02d..0000000
--- a/drivers/media/platform/msm/vidc/vmem/vmem.c
+++ /dev/null
@@ -1,738 +0,0 @@
-/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/clk/qcom.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/msm-bus.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include "vmem.h"
-#include "vmem_debugfs.h"
-
-/* Registers */
-#define OCIMEM_BASE(v)               ((uint8_t *)(v)->reg.base)
-#define OCIMEM_HW_VERSION(v)         (OCIMEM_BASE(v) + 0x00)
-#define OCIMEM_HW_PROFILE(v)         (OCIMEM_BASE(v) + 0x04)
-#define OCIMEM_GEN_CTL(v)            (OCIMEM_BASE(v) + 0x08)
-#define OCIMEM_GEN_STAT(v)           (OCIMEM_BASE(v) + 0x0C)
-#define OCIMEM_INTC_CLR(v)           (OCIMEM_BASE(v) + 0x10)
-#define OCIMEM_INTC_MASK(v)          (OCIMEM_BASE(v) + 0x14)
-#define OCIMEM_INTC_STAT(v)          (OCIMEM_BASE(v) + 0x18)
-#define OCIMEM_OSW_STATUS(v)         (OCIMEM_BASE(v) + 0x1C)
-#define OCIMEM_PSCGC_TIMERS(v)       (OCIMEM_BASE(v) + 0x34)
-#define OCIMEM_PSCGC_STAT(v)         (OCIMEM_BASE(v) + 0x38)
-#define OCIMEM_PSCGC_M0_M7_CTL(v)    (OCIMEM_BASE(v) + 0x3C)
-#define OCIMEM_ERR_ADDRESS(v)        (OCIMEM_BASE(v) + 0x60)
-#define OCIMEM_AXI_ERR_SYNDROME(v)   (OCIMEM_BASE(v) + 0x64)
-#define OCIMEM_DEBUG_CTL(v)          (OCIMEM_BASE(v) + 0x68)
-
-/*
- * Helper macro to help out with masks and shifts for values packed into
- * registers.
- */
-#define DECLARE_TYPE(__type, __end, __start)                                   \
-	static const unsigned int __type##_BITS = (__end) - (__start) + 1;     \
-	static const unsigned int __type##_SHIFT = (__start);                  \
-	static const unsigned int __type##_MASK = GENMASK((__end), (__start)); \
-	static inline unsigned int __type(uint32_t val)                        \
-	{                                                                      \
-		return (val & __type##_MASK) >> __type##_SHIFT;                \
-	}                                                                      \
-	static inline uint32_t __type##_UPDATE(unsigned int val)               \
-	{                                                                      \
-		return (val << __type##_SHIFT) & __type##_MASK;                \
-	}
-
-/* Register masks */
-/* OCIMEM_PSCGC_M0_M7_CTL */
-DECLARE_TYPE(BANK0_STATE, 3, 0);
-DECLARE_TYPE(BANK1_STATE, 7, 4);
-DECLARE_TYPE(BANK2_STATE, 11, 8);
-DECLARE_TYPE(BANK3_STATE, 15, 12);
-/* OCIMEM_PSCGC_TIMERS */
-DECLARE_TYPE(TIMERS_WAKEUP, 3, 0);
-DECLARE_TYPE(TIMERS_SLEEP, 11, 8);
-/* OCIMEM_HW_VERSION */
-DECLARE_TYPE(VERSION_STEP, 15, 0);
-DECLARE_TYPE(VERSION_MINOR, 27, 16);
-DECLARE_TYPE(VERSION_MAJOR, 31, 28);
-/* OCIMEM_HW_PROFILE */
-DECLARE_TYPE(PROFILE_BANKS, 16, 12);
-/* OCIMEM_AXI_ERR_SYNDROME */
-DECLARE_TYPE(ERR_SYN_ATID, 14, 8);
-DECLARE_TYPE(ERR_SYN_AMID, 23, 16);
-DECLARE_TYPE(ERR_SYN_APID, 28, 24);
-DECLARE_TYPE(ERR_SYN_ABID, 31, 29);
-/* OCIMEM_INTC_MASK */
-DECLARE_TYPE(AXI_ERR_INT, 0, 0);
-
-/* Internal stuff */
-#define MAX_BANKS 4
-
-enum bank_state {
-	BANK_STATE_NORM_PASSTHRU = 0x000,
-	BANK_STATE_NORM_FORCE_CORE_ON = 0x002,
-	BANK_STATE_NORM_FORCE_PERIPH_ON = 0x001,
-	BANK_STATE_NORM_FORCE_ALL_ON = 0x03,
-	BANK_STATE_SLEEP_RET = 0x6,
-	BANK_STATE_SLEEP_RET_PERIPH_ON = 0x7,
-	BANK_STATE_SLEEP_NO_RET = 0x4,
-};
-
-struct vmem {
-	int irq;
-	int num_banks;
-	int bank_size;
-	struct {
-		struct resource *resource;
-		void __iomem *base;
-	} reg, mem;
-	struct regulator *vdd;
-	struct {
-		const char *name;
-		struct clk *clk;
-		bool has_mem_retention;
-	} *clocks;
-	int num_clocks;
-	struct {
-		struct msm_bus_scale_pdata *pdata;
-		uint32_t priv;
-	} bus;
-	atomic_t alloc_count;
-	struct dentry *debugfs_root;
-};
-
-static struct vmem *vmem;
-
-static inline u32 __readl(void * __iomem addr)
-{
-	u32 value = 0;
-
-	pr_debug("read %pK ", addr);
-	value = readl_relaxed(addr);
-	pr_debug("-> %08x\n", value);
-
-	return value;
-}
-
-static inline void __writel(u32 val, void * __iomem addr)
-{
-	pr_debug("write %08x -> %pK\n", val, addr);
-	writel_relaxed(val, addr);
-	/*
-	 * Commit all writes via a mem barrier, as subsequent __readl()
-	 * will depend on the state that's set via __writel().
-	 */
-	mb();
-}
-
-static inline void __wait_timer(struct vmem *v, bool wakeup)
-{
-	uint32_t ticks = 0;
-	unsigned int (*timer)(uint32_t) = wakeup ?
-		TIMERS_WAKEUP : TIMERS_SLEEP;
-
-	ticks = timer(__readl(OCIMEM_PSCGC_TIMERS(v)));
-
-	/* Sleep for `ticks` nanoseconds as per h/w spec */
-	ndelay(ticks);
-}
-
-static inline void __wait_wakeup(struct vmem *v)
-{
-	return __wait_timer(v, true);
-}
-
-static inline void __wait_sleep(struct vmem *v)
-{
-	return __wait_timer(v, false);
-}
-
-static inline int __power_on(struct vmem *v)
-{
-	int rc = 0, c = 0;
-
-	rc = msm_bus_scale_client_update_request(v->bus.priv, 1);
-	if (rc) {
-		pr_err("Failed to vote for buses (%d)\n", rc);
-		goto exit;
-	}
-	pr_debug("Voted for buses\n");
-
-	rc = regulator_enable(v->vdd);
-	if (rc) {
-		pr_err("Failed to power on gdsc (%d)", rc);
-		goto unvote_bus;
-	}
-	pr_debug("Enabled regulator vdd\n");
-
-	for (c = 0; c < v->num_clocks; ++c) {
-		if (v->clocks[c].has_mem_retention) {
-			rc = clk_set_flags(v->clocks[c].clk,
-				       CLKFLAG_NORETAIN_PERIPH);
-			if (rc) {
-				pr_warn("Failed set flag NORETAIN_PERIPH %s\n",
-					v->clocks[c].name);
-			}
-			rc = clk_set_flags(v->clocks[c].clk,
-				       CLKFLAG_NORETAIN_MEM);
-			if (rc) {
-				pr_warn("Failed set flag NORETAIN_MEM %s\n",
-					v->clocks[c].name);
-			}
-		}
-
-		rc = clk_prepare_enable(v->clocks[c].clk);
-		if (rc) {
-			pr_err("Failed to enable %s clock (%d)\n",
-					v->clocks[c].name, rc);
-			goto disable_clocks;
-		}
-
-		pr_debug("Enabled clock %s\n", v->clocks[c].name);
-	}
-
-	return 0;
-disable_clocks:
-	for (--c; c >= 0; c--)
-		clk_disable_unprepare(v->clocks[c].clk);
-	regulator_disable(v->vdd);
-unvote_bus:
-	msm_bus_scale_client_update_request(v->bus.priv, 0);
-exit:
-	return rc;
-}
-
-static inline int __power_off(struct vmem *v)
-{
-	int c = v->num_clocks;
-
-	for (c--; c >= 0; --c) {
-		clk_disable_unprepare(v->clocks[c].clk);
-		pr_debug("Disabled clock %s\n", v->clocks[c].name);
-	}
-
-	regulator_disable(v->vdd);
-	pr_debug("Disabled regulator vdd\n");
-
-	msm_bus_scale_client_update_request(v->bus.priv, 0);
-	pr_debug("Unvoted for buses\n");
-
-	return 0;
-}
-
-static inline enum bank_state __bank_get_state(struct vmem *v,
-		unsigned int bank)
-{
-	unsigned int (*func[MAX_BANKS])(uint32_t) = {
-		BANK0_STATE, BANK1_STATE, BANK2_STATE, BANK3_STATE
-	};
-
-	VMEM_ERROR(bank >= ARRAY_SIZE(func));
-	return func[bank](__readl(OCIMEM_PSCGC_M0_M7_CTL(v)));
-}
-
-static inline void __bank_set_state(struct vmem *v, unsigned int bank,
-		enum bank_state state)
-{
-	uint32_t bank_state = 0;
-	struct {
-		uint32_t (*update)(unsigned int);
-		uint32_t mask;
-	} banks[MAX_BANKS] = {
-		{BANK0_STATE_UPDATE, BANK0_STATE_MASK},
-		{BANK1_STATE_UPDATE, BANK1_STATE_MASK},
-		{BANK2_STATE_UPDATE, BANK2_STATE_MASK},
-		{BANK3_STATE_UPDATE, BANK3_STATE_MASK},
-	};
-
-	VMEM_ERROR(bank >= ARRAY_SIZE(banks));
-
-	bank_state = __readl(OCIMEM_PSCGC_M0_M7_CTL(v));
-	bank_state &= ~banks[bank].mask;
-	bank_state |= banks[bank].update(state);
-
-	__writel(bank_state, OCIMEM_PSCGC_M0_M7_CTL(v));
-}
-
-static inline void __toggle_interrupts(struct vmem *v, bool enable)
-{
-	uint32_t ints = __readl(OCIMEM_INTC_MASK(v)),
-		mask = AXI_ERR_INT_MASK,
-		update = AXI_ERR_INT_UPDATE(!enable);
-
-	ints &= ~mask;
-	ints |= update;
-
-	__writel(ints, OCIMEM_INTC_MASK(v));
-}
-
-static void __enable_interrupts(struct vmem *v)
-{
-	pr_debug("Enabling interrupts\n");
-	enable_irq(v->irq);
-	__toggle_interrupts(v, true);
-}
-
-static void __disable_interrupts(struct vmem *v)
-{
-	pr_debug("Disabling interrupts\n");
-	__toggle_interrupts(v, false);
-	disable_irq_nosync(v->irq);
-}
-
-/**
- * vmem_allocate: - Allocates memory from VMEM.  Allocations have a few
- * restrictions: only allocations of the entire VMEM memory are allowed, and
- * , as a result, only single outstanding allocations are allowed.
- *
- * @size: amount of bytes to allocate
- * @addr: A pointer to phys_addr_t where the physical address of the memory
- * allocated is stored.
- *
- * Return: 0 in case of successful allocation (i.e. *addr != NULL). -ENOTSUPP,
- * if platform doesn't support VMEM. -EEXIST, if there are outstanding VMEM
- * allocations.  -ENOMEM, if platform can't support allocation of `size` bytes.
- * -EAGAIN, if `size` does not allocate the entire VMEM region.  -EIO in case of
- * internal errors.
- */
-int vmem_allocate(size_t size, phys_addr_t *addr)
-{
-	int rc = 0, c = 0;
-	resource_size_t max_size = 0;
-
-	if (!vmem) {
-		pr_err("No vmem, try rebooting your device\n");
-		rc = -ENOTSUPP;
-		goto exit;
-	}
-	if (!size) {
-		pr_err("%s Invalid size %zu\n", __func__, size);
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	max_size = resource_size(vmem->mem.resource);
-
-	if (atomic_read(&vmem->alloc_count)) {
-		pr_err("Only single allocations allowed for vmem\n");
-		rc = -EEXIST;
-		goto exit;
-	} else if (size > max_size) {
-		pr_err("Out of memory, have max %pa\n", &max_size);
-		rc = -ENOMEM;
-		goto exit;
-	} else if (size != max_size) {
-		pr_err("Only support allocations of size %pa\n", &max_size);
-		rc = -EAGAIN;
-		goto exit;
-	}
-
-	rc = __power_on(vmem);
-	if (rc) {
-		pr_err("Failed power on (%d)\n", rc);
-		goto exit;
-	}
-
-	VMEM_ERROR(vmem->num_banks != DIV_ROUND_UP(size, vmem->bank_size));
-
-	/* Turn on the necessary banks */
-	for (c = 0; c < vmem->num_banks; ++c) {
-		__bank_set_state(vmem, c, BANK_STATE_NORM_FORCE_CORE_ON);
-		__wait_wakeup(vmem);
-	}
-
-	/* Enable interrupts to detect faults */
-	__enable_interrupts(vmem);
-
-	atomic_inc(&vmem->alloc_count);
-	*addr = (phys_addr_t)vmem->mem.resource->start;
-	return 0;
-exit:
-	return rc;
-}
-EXPORT_SYMBOL(vmem_allocate);
-
-/**
- * vmem_free: - Frees the memory allocated via vmem_allocate.  Undefined
- * behaviour if to_free is a not a pointer returned via vmem_allocate
- */
-void vmem_free(phys_addr_t to_free)
-{
-	int c = 0;
-
-	if (!to_free || !vmem)
-		return;
-
-	VMEM_ERROR(atomic_read(&vmem->alloc_count) == 0);
-
-	for (c = 0; c < vmem->num_banks; ++c) {
-		enum bank_state curr_state = __bank_get_state(vmem, c);
-
-		if (curr_state != BANK_STATE_NORM_FORCE_CORE_ON) {
-			pr_warn("When freeing, expected bank state to be %d, was instead %d\n",
-					BANK_STATE_NORM_FORCE_CORE_ON,
-					curr_state);
-		}
-
-		__bank_set_state(vmem, c, BANK_STATE_SLEEP_NO_RET);
-	}
-
-	__disable_interrupts(vmem);
-	__power_off(vmem);
-	atomic_dec(&vmem->alloc_count);
-}
-EXPORT_SYMBOL(vmem_free);
-
-struct vmem_interrupt_cookie {
-	struct vmem *vmem;
-	struct work_struct work;
-};
-
-static void __irq_helper(struct work_struct *work)
-{
-	struct vmem_interrupt_cookie *cookie = container_of(work,
-			struct vmem_interrupt_cookie, work);
-	struct vmem *v = cookie->vmem;
-	unsigned int stat, gen_stat, pscgc_stat, err_addr_abs,
-		err_addr_rel, err_syn;
-
-	stat = __readl(OCIMEM_INTC_STAT(v));
-	gen_stat = __readl(OCIMEM_GEN_CTL(v));
-	pscgc_stat = __readl(OCIMEM_PSCGC_STAT(v));
-
-	err_addr_abs = __readl(OCIMEM_ERR_ADDRESS(v));
-	err_addr_rel = v->mem.resource->start - err_addr_abs;
-
-	err_syn = __readl(OCIMEM_AXI_ERR_SYNDROME(v));
-
-	pr_crit("Detected a fault on VMEM:\n");
-	pr_cont("\tinterrupt status: %x\n", stat);
-	pr_cont("\tgeneral status: %x\n", gen_stat);
-	pr_cont("\tmemory status: %x\n", pscgc_stat);
-	pr_cont("\tfault address: %x (absolute), %x (relative)\n",
-			err_addr_abs, err_addr_rel);
-	pr_cont("\tfault bank: %x\n", err_addr_rel / v->bank_size);
-	pr_cont("\tfault core: %u (mid), %u (pid), %u (bid)\n",
-			ERR_SYN_AMID(err_syn), ERR_SYN_APID(err_syn),
-			ERR_SYN_ABID(err_syn));
-
-	/* Clear the interrupt */
-	__writel(0, OCIMEM_INTC_CLR(v));
-
-	__enable_interrupts(v);
-}
-
-static struct vmem_interrupt_cookie interrupt_cookie;
-
-static irqreturn_t __irq_handler(int irq, void *cookie)
-{
-	struct vmem *v = cookie;
-	irqreturn_t status = __readl(OCIMEM_INTC_STAT(vmem)) ?
-		IRQ_HANDLED : IRQ_NONE;
-
-	if (status != IRQ_NONE) {
-		/* Mask further interrupts while handling this one */
-		__disable_interrupts(v);
-
-		interrupt_cookie.vmem = v;
-		INIT_WORK(&interrupt_cookie.work, __irq_helper);
-		schedule_work(&interrupt_cookie.work);
-	}
-
-	return status;
-}
-
-static inline int __init_resources(struct vmem *v,
-		struct platform_device *pdev)
-{
-	int rc = 0, c = 0;
-	int *clock_props = NULL;
-
-	v->irq = platform_get_irq(pdev, 0);
-	if (v->irq < 0) {
-		rc = v->irq;
-		pr_err("Failed to get irq (%d)\n", rc);
-		v->irq = 0;
-		goto exit;
-	}
-
-	/* Registers and memory */
-	v->reg.resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-			"reg-base");
-	if (!v->reg.resource) {
-		pr_err("Failed to find register base\n");
-		rc = -ENOENT;
-		goto exit;
-	}
-
-	v->reg.base = devm_ioremap_resource(&pdev->dev, v->reg.resource);
-	if (IS_ERR_OR_NULL(v->reg.base)) {
-		rc = PTR_ERR(v->reg.base) ?: -EIO;
-		pr_err("Failed to map register base into kernel (%d)\n", rc);
-		v->reg.base = NULL;
-		goto exit;
-	}
-
-	pr_debug("Register range: %pa -> %pa\n", &v->reg.resource->start,
-			&v->reg.resource->end);
-
-	v->mem.resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-			"mem-base");
-	if (!v->mem.resource) {
-		pr_err("Failed to find memory base\n");
-		rc = -ENOENT;
-		goto exit;
-	}
-
-	v->mem.base = NULL;
-	pr_debug("Memory range: %pa -> %pa\n", &v->mem.resource->start,
-			&v->mem.resource->end);
-
-	/* Buses, Clocks & Regulators*/
-	v->num_clocks = of_property_count_strings(pdev->dev.of_node,
-			"clock-names");
-	if (v->num_clocks <= 0) {
-		pr_err("Can't find any clocks\n");
-		goto exit;
-	}
-
-	v->clocks = devm_kzalloc(&pdev->dev, sizeof(*v->clocks) * v->num_clocks,
-			GFP_KERNEL);
-	if (!v->clocks) {
-		rc = -ENOMEM;
-		goto exit;
-	}
-
-	clock_props = devm_kzalloc(&pdev->dev,
-					v->num_clocks * sizeof(*clock_props),
-					GFP_KERNEL);
-	if (!clock_props) {
-		pr_err("Failed to allocate clock config table\n");
-		goto exit;
-	}
-
-	rc = of_property_read_u32_array(pdev->dev.of_node, "clock-config",
-			clock_props, v->num_clocks);
-	if (rc) {
-		pr_err("Failed to read clock config\n");
-		goto exit;
-	}
-
-	for (c = 0; c < v->num_clocks; ++c) {
-		const char *name = NULL;
-		struct clk *temp = NULL;
-
-		of_property_read_string_index(pdev->dev.of_node, "clock-names",
-				c, &name);
-		temp = devm_clk_get(&pdev->dev, name);
-		if (IS_ERR_OR_NULL(temp)) {
-			rc = PTR_ERR(temp) ?: -ENOENT;
-			pr_err("Failed to find %s (%d)\n", name, rc);
-			goto exit;
-		}
-
-		v->clocks[c].clk = temp;
-		v->clocks[c].name = name;
-		v->clocks[c].has_mem_retention = clock_props[c];
-	}
-
-	v->vdd = devm_regulator_get(&pdev->dev, "vdd");
-	if (IS_ERR_OR_NULL(v->vdd)) {
-		rc = PTR_ERR(v->vdd) ?: -ENOENT;
-		pr_err("Failed to find regulator (vdd) (%d)\n", rc);
-		goto exit;
-	}
-
-	v->bus.pdata = msm_bus_cl_get_pdata(pdev);
-	if (IS_ERR_OR_NULL(v->bus.pdata)) {
-		rc = PTR_ERR(v->bus.pdata) ?: -ENOENT;
-		pr_err("Failed to find bus vectors (%d)\n", rc);
-		goto exit;
-	}
-
-	v->bus.priv = msm_bus_scale_register_client(v->bus.pdata);
-	if (!v->bus.priv) {
-		rc = -EBADHANDLE;
-		pr_err("Failed to register bus client\n");
-		goto free_pdata;
-	}
-
-	/* Misc. */
-	rc = of_property_read_u32(pdev->dev.of_node, "qcom,bank-size",
-			&v->bank_size);
-	if (rc || !v->bank_size) {
-		pr_err("Failed reading (or found invalid) qcom,bank-size in %s (%d)\n",
-				of_node_full_name(pdev->dev.of_node), rc);
-		rc = -ENOENT;
-		goto free_pdata;
-	}
-
-	v->num_banks = resource_size(v->mem.resource) / v->bank_size;
-
-	pr_debug("Found configuration with %d banks with size %d\n",
-			v->num_banks, v->bank_size);
-
-	return 0;
-free_pdata:
-	msm_bus_cl_clear_pdata(v->bus.pdata);
-exit:
-	return rc;
-}
-
-static inline void __uninit_resources(struct vmem *v,
-		struct platform_device *pdev)
-{
-	int c = 0;
-
-	msm_bus_cl_clear_pdata(v->bus.pdata);
-	v->bus.pdata = NULL;
-	v->bus.priv = 0;
-
-	for (c = 0; c < v->num_clocks; ++c) {
-		v->clocks[c].clk = NULL;
-		v->clocks[c].name = NULL;
-	}
-
-	v->vdd = NULL;
-}
-
-static int vmem_probe(struct platform_device *pdev)
-{
-	uint32_t version = 0, num_banks = 0, rc = 0;
-	struct vmem *v = NULL;
-
-	if (vmem) {
-		pr_err("Only one instance of %s allowed", pdev->name);
-		return -EEXIST;
-	}
-
-	v = devm_kzalloc(&pdev->dev, sizeof(*v), GFP_KERNEL);
-	if (!v)
-		return -ENOMEM;
-
-
-	rc = __init_resources(v, pdev);
-	if (rc) {
-		pr_err("Failed to read resources\n");
-		goto exit;
-	}
-
-	/*
-	 * For now, only support up to 4 banks. It's unrealistic that VMEM has
-	 * more banks than that (even in the future).
-	 */
-	if (v->num_banks > MAX_BANKS) {
-		pr_err("Number of banks (%d) exceeds what's supported (%d)\n",
-			v->num_banks, MAX_BANKS);
-		rc = -ENOTSUPP;
-		goto exit;
-	}
-
-	/* Cross check the platform resources with what's available on chip */
-	rc = __power_on(v);
-	if (rc) {
-		pr_err("Failed to power on (%d)\n", rc);
-		goto exit;
-	}
-
-	version = __readl(OCIMEM_HW_VERSION(v));
-	pr_debug("v%d.%d.%d\n", VERSION_MAJOR(version), VERSION_MINOR(version),
-			VERSION_STEP(version));
-
-	num_banks = PROFILE_BANKS(__readl(OCIMEM_HW_PROFILE(v)));
-	pr_debug("Found %d banks on chip\n", num_banks);
-	if (v->num_banks != num_banks) {
-		pr_err("Platform configuration of %d banks differs from what's available on chip (%d)\n",
-				v->num_banks, num_banks);
-		rc = -EINVAL;
-		goto disable_clocks;
-	}
-
-	rc = devm_request_irq(&pdev->dev, v->irq, __irq_handler,
-			IRQF_TRIGGER_HIGH, "vmem", v);
-	if (rc) {
-		pr_err("Failed to setup irq (%d)\n", rc);
-		goto disable_clocks;
-	}
-
-	__disable_interrupts(v);
-
-	/* Everything good so far, set up the global context and debug hooks */
-	pr_info("Up and running with %d banks of memory from %pR\n",
-			v->num_banks, &v->mem.resource);
-	v->debugfs_root = vmem_debugfs_init(pdev);
-	platform_set_drvdata(pdev, v);
-	vmem = v;
-
-disable_clocks:
-	__power_off(v);
-exit:
-	return rc;
-}
-
-static int vmem_remove(struct platform_device *pdev)
-{
-	struct vmem *v = platform_get_drvdata(pdev);
-
-	VMEM_ERROR(v != vmem);
-
-	__uninit_resources(v, pdev);
-	vmem_debugfs_deinit(v->debugfs_root);
-	vmem = NULL;
-
-	return 0;
-}
-
-static const struct of_device_id vmem_of_match[] = {
-	{.compatible = "qcom,msm-vmem"},
-	{}
-};
-
-MODULE_DEVICE_TABLE(of, vmem_of_match);
-
-static struct platform_driver vmem_driver = {
-	.probe = vmem_probe,
-	.remove = vmem_remove,
-	.driver = {
-		.name = "msm_vidc_vmem",
-		.owner = THIS_MODULE,
-		.of_match_table = vmem_of_match,
-	},
-};
-
-static int __init vmem_init(void)
-{
-	return platform_driver_register(&vmem_driver);
-}
-
-static void __exit vmem_exit(void)
-{
-	platform_driver_unregister(&vmem_driver);
-}
-
-module_init(vmem_init);
-module_exit(vmem_exit);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/vidc/vmem/vmem.h b/drivers/media/platform/msm/vidc/vmem/vmem.h
deleted file mode 100644
index 0376427..0000000
--- a/drivers/media/platform/msm/vidc/vmem/vmem.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __VMEM_H__
-#define __VMEM_H__
-
-#define VMEM_ERROR(value)	\
-	do {			\
-		pr_info("%s : Fatal Level = %d\n", KBUILD_MODNAME, value);\
-		BUG_ON(value);  \
-	} while (0)
-
-#if (defined CONFIG_MSM_VIDC_VMEM) || (defined CONFIG_MSM_VIDC_VMEM_MODULE)
-
-int vmem_allocate(size_t size, phys_addr_t *addr);
-void vmem_free(phys_addr_t to_free);
-
-#else
-
-static inline int vmem_allocate(size_t size, phys_addr_t *addr)
-{
-	return -ENODEV;
-}
-
-static inline void vmem_free(phys_addr_t to_free)
-{
-}
-
-#endif
-
-#endif /* __VMEM_H__ */
diff --git a/drivers/media/platform/msm/vidc/vmem/vmem_debugfs.c b/drivers/media/platform/msm/vidc/vmem/vmem_debugfs.c
deleted file mode 100644
index 7d2d524..0000000
--- a/drivers/media/platform/msm/vidc/vmem/vmem_debugfs.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2014, 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/debugfs.h>
-#include <linux/fs.h>
-#include <linux/platform_device.h>
-#include "vmem.h"
-
-struct vmem_debugfs_cookie {
-	phys_addr_t addr;
-	size_t size;
-};
-
-static int __vmem_alloc_get(void *priv, u64 *val)
-{
-	struct vmem_debugfs_cookie *cookie = priv;
-
-	*val = cookie->size;
-	return 0;
-}
-
-static int __vmem_alloc_set(void *priv, u64 val)
-{
-	struct vmem_debugfs_cookie *cookie = priv;
-	int rc = 0;
-
-	switch (val) {
-	case 0: /* free */
-		vmem_free(cookie->addr);
-		cookie->size = 0;
-		break;
-	default:
-		rc = vmem_allocate(val, &cookie->addr);
-		cookie->size = val;
-		break;
-	}
-
-	return rc;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(fops_vmem_alloc, __vmem_alloc_get,
-		__vmem_alloc_set, "%llu");
-
-struct dentry *vmem_debugfs_init(struct platform_device *pdev)
-{
-	struct vmem_debugfs_cookie *alloc_cookie = NULL;
-	struct dentry *debugfs_root = NULL;
-
-	alloc_cookie = devm_kzalloc(&pdev->dev, sizeof(*alloc_cookie),
-			GFP_KERNEL);
-	if (!alloc_cookie)
-		goto exit;
-
-	debugfs_root = debugfs_create_dir("vmem", NULL);
-	if (IS_ERR_OR_NULL(debugfs_root)) {
-		pr_warn("Failed to create '<debugfs>/vmem'\n");
-		debugfs_root = NULL;
-		goto exit;
-	}
-
-	debugfs_create_file("alloc", 0600, debugfs_root,
-			alloc_cookie, &fops_vmem_alloc);
-
-exit:
-	return debugfs_root;
-}
-
-void vmem_debugfs_deinit(struct dentry *debugfs_root)
-{
-	debugfs_remove_recursive(debugfs_root);
-}
-
diff --git a/drivers/media/platform/msm/vidc/vmem/vmem_debugfs.h b/drivers/media/platform/msm/vidc/vmem/vmem_debugfs.h
deleted file mode 100644
index 8b716cc..0000000
--- a/drivers/media/platform/msm/vidc/vmem/vmem_debugfs.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2014, 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#ifndef __VMEM_DEBUGFS_H__
-#define __VMEM_DEBUGFS_H__
-
-#include <linux/debugfs.h>
-
-struct dentry *vmem_debugfs_init(struct platform_device *pdev);
-void vmem_debugfs_deinit(struct dentry *debugfs_root);
-
-#endif /* __VMEM_DEBUGFS_H__ */
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 0898414..6739fb0 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -338,6 +338,7 @@
 		"5",
 		"5.1",
 		"5.2",
+		"Unknown",
 		NULL,
 	};
 	static const char * const h264_loop_filter[] = {
diff --git a/drivers/misc/memory_state_time.c b/drivers/misc/memory_state_time.c
index 34c797a..ba94dcf 100644
--- a/drivers/misc/memory_state_time.c
+++ b/drivers/misc/memory_state_time.c
@@ -296,27 +296,31 @@
 	struct device_node *node = dev->of_node;
 
 	of_property_read_u32(node, NUM_SOURCES, &num_sources);
-	if (of_find_property(node, BW_TBL, &lenb)) {
-		bandwidths = devm_kzalloc(dev,
-				sizeof(*bandwidths) * num_sources, GFP_KERNEL);
-		if (!bandwidths)
-			return -ENOMEM;
-		lenb /= sizeof(*bw_buckets);
-		bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets),
-				GFP_KERNEL);
-		if (!bw_buckets) {
-			devm_kfree(dev, bandwidths);
-			return -ENOMEM;
-		}
-		ret = of_property_read_u32_array(node, BW_TBL, bw_buckets,
-				lenb);
-		if (ret < 0) {
-			devm_kfree(dev, bandwidths);
-			devm_kfree(dev, bw_buckets);
-			pr_err("Unable to read bandwidth table from device tree.\n");
-			return ret;
-		}
+	if (!of_find_property(node, BW_TBL, &lenb)) {
+		pr_err("Missing %s property\n", BW_TBL);
+		return -ENODATA;
 	}
+
+	bandwidths = devm_kzalloc(dev,
+			sizeof(*bandwidths) * num_sources, GFP_KERNEL);
+	if (!bandwidths)
+		return -ENOMEM;
+	lenb /= sizeof(*bw_buckets);
+	bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets),
+			GFP_KERNEL);
+	if (!bw_buckets) {
+		devm_kfree(dev, bandwidths);
+		return -ENOMEM;
+	}
+	ret = of_property_read_u32_array(node, BW_TBL, bw_buckets,
+			lenb);
+	if (ret < 0) {
+		devm_kfree(dev, bandwidths);
+		devm_kfree(dev, bw_buckets);
+		pr_err("Unable to read bandwidth table from device tree.\n");
+		return ret;
+	}
+
 	curr_bw = 0;
 	num_buckets = lenb;
 	return 0;
@@ -332,22 +336,26 @@
 	int ret, lenf;
 	struct device_node *node = dev->of_node;
 
-	if (of_find_property(node, FREQ_TBL, &lenf)) {
-		lenf /= sizeof(*freq_buckets);
-		freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets),
-				GFP_KERNEL);
-		if (!freq_buckets)
-			return -ENOMEM;
-		pr_debug("freqs found len %d\n", lenf);
-		ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets,
-				lenf);
-		if (ret < 0) {
-			devm_kfree(dev, freq_buckets);
-			pr_err("Unable to read frequency table from device tree.\n");
-			return ret;
-		}
-		pr_debug("ret freq %d\n", ret);
+	if (!of_find_property(node, FREQ_TBL, &lenf)) {
+		pr_err("Missing %s property\n", FREQ_TBL);
+		return -ENODATA;
 	}
+
+	lenf /= sizeof(*freq_buckets);
+	freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets),
+			GFP_KERNEL);
+	if (!freq_buckets)
+		return -ENOMEM;
+	pr_debug("freqs found len %d\n", lenf);
+	ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets,
+			lenf);
+	if (ret < 0) {
+		devm_kfree(dev, freq_buckets);
+		pr_err("Unable to read frequency table from device tree.\n");
+		return ret;
+	}
+	pr_debug("ret freq %d\n", ret);
+
 	num_freqs = lenf;
 	curr_freq = freq_buckets[LOWEST_FREQ];
 
diff --git a/drivers/misc/qcom/Kconfig b/drivers/misc/qcom/Kconfig
index 9c73960..e8a7960 100644
--- a/drivers/misc/qcom/Kconfig
+++ b/drivers/misc/qcom/Kconfig
@@ -1,6 +1,5 @@
 config MSM_QDSP6V2_CODECS
 	bool "Audio QDSP6V2 APR support"
-	depends on MSM_SMD
 	select SND_SOC_QDSP6V2
 	help
 	  Enable Audio codecs with APR IPC protocol support between
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 1397d03..790f191 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -4321,6 +4321,7 @@
 		mmc_power_up(host, host->ocr_avail);
 
 	mmc_gpiod_request_cd_irq(host);
+	mmc_register_extcon(host);
 	mmc_release_host(host);
 	_mmc_detect_change(host, 0, false);
 }
@@ -4357,6 +4358,8 @@
 
 	BUG_ON(host->card);
 
+	mmc_register_extcon(host);
+
 	mmc_claim_host(host);
 	mmc_power_off(host);
 	mmc_release_host(host);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 27117ba..b5c81e4 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -17,6 +17,7 @@
 #include <linux/mmc/slot-gpio.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/extcon.h>
 
 #include "slot-gpio.h"
 
@@ -154,6 +155,53 @@
 }
 EXPORT_SYMBOL(mmc_gpiod_request_cd_irq);
 
+static int mmc_card_detect_notifier(struct notifier_block *nb,
+				       unsigned long event, void *ptr)
+{
+	struct mmc_host *host = container_of(nb, struct mmc_host,
+					     card_detect_nb);
+
+	host->trigger_card_event = true;
+	mmc_detect_change(host, 0);
+
+	return NOTIFY_DONE;
+}
+
+void mmc_register_extcon(struct mmc_host *host)
+{
+	struct extcon_dev *extcon = host->extcon;
+	int err;
+
+	if (!extcon)
+		return;
+
+	host->card_detect_nb.notifier_call = mmc_card_detect_notifier;
+	err = extcon_register_notifier(extcon, EXTCON_MECHANICAL,
+				       &host->card_detect_nb);
+	if (err) {
+		dev_err(mmc_dev(host), "%s: extcon_register_notifier() failed ret=%d\n",
+			__func__, err);
+		host->caps |= MMC_CAP_NEEDS_POLL;
+	}
+}
+EXPORT_SYMBOL(mmc_register_extcon);
+
+void mmc_unregister_extcon(struct mmc_host *host)
+{
+	struct extcon_dev *extcon = host->extcon;
+	int err;
+
+	if (!extcon)
+		return;
+
+	err = extcon_unregister_notifier(extcon, EXTCON_MECHANICAL,
+					 &host->card_detect_nb);
+	if (err)
+		dev_err(mmc_dev(host), "%s: extcon_unregister_notifier() failed ret=%d\n",
+			__func__, err);
+}
+EXPORT_SYMBOL(mmc_unregister_extcon);
+
 /* Register an alternate interrupt service routine for
  * the card-detect GPIO.
  */
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 515abb2..dff6631 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -401,6 +401,8 @@
 	depends on MMC_SDHCI_PLTFM
 	select PM_DEVFREQ
 	select DEVFREQ_GOV_SIMPLE_ONDEMAND
+	select EXTCON
+	select EXTCON_GPIO
 	help
 	  This selects the Secure Digital Host Controller Interface (SDHCI)
 	  support present in Qualcomm Technologies, Inc. SOCs. The controller
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index ad49bfa..b6122e9 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -121,6 +121,7 @@
 	struct resource *iomem;
 	void __iomem *ioaddr;
 	int irq, ret;
+	struct extcon_dev *extcon;
 
 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
@@ -156,6 +157,15 @@
 		host->quirks2 = pdata->quirks2;
 	}
 
+	extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
+	if (IS_ERR(extcon) && PTR_ERR(extcon) != -ENODEV) {
+		ret = PTR_ERR(extcon);
+		goto err;
+	}
+	if (!IS_ERR(extcon))
+		host->mmc->extcon = extcon;
+
+
 	platform_set_drvdata(pdev, host);
 
 	return host;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 53a6ae8..83be863 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -4095,7 +4095,7 @@
 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
 	    mmc_card_is_removable(mmc) &&
 	    mmc_gpio_get_cd(host->mmc) < 0 &&
-	    !(mmc->caps2 & MMC_CAP2_NONHOTPLUG))
+	    !(mmc->caps2 & MMC_CAP2_NONHOTPLUG) && !host->mmc->extcon)
 		mmc->caps |= MMC_CAP_NEEDS_POLL;
 
 	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f08a20b..48ee411 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2867,7 +2867,8 @@
 		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
 
 	bp->ntp_fltr_count = 0;
-	bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+	bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+				    sizeof(long),
 				    GFP_KERNEL);
 
 	if (!bp->ntp_fltr_bmap)
diff --git a/drivers/net/ethernet/msm/ecm_ipa.c b/drivers/net/ethernet/msm/ecm_ipa.c
index ce8f7ac..105294a 100644
--- a/drivers/net/ethernet/msm/ecm_ipa.c
+++ b/drivers/net/ethernet/msm/ecm_ipa.c
@@ -97,8 +97,8 @@
 };
 
 #define ECM_IPA_STATE_DEBUG(ecm_ipa_ctx) \
-	(ECM_IPA_DEBUG("Driver state - %s\n",\
-	ecm_ipa_state_string((ecm_ipa_ctx)->state)))
+	ECM_IPA_DEBUG("Driver state - %s\n",\
+	ecm_ipa_state_string((ecm_ipa_ctx)->state))
 
 /**
  * struct ecm_ipa_dev - main driver context parameters
@@ -163,8 +163,6 @@
 static netdev_tx_t ecm_ipa_start_xmit
 	(struct sk_buff *skb, struct net_device *net);
 static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file);
-static ssize_t ecm_ipa_debugfs_enable_read
-	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos);
 static ssize_t ecm_ipa_debugfs_atomic_read
 	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos);
 static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx);
@@ -558,7 +556,7 @@
 	netdev_tx_t status = NETDEV_TX_BUSY;
 	struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net);
 
-	net->trans_start = jiffies;
+	netif_trans_update(net);
 
 	ECM_IPA_DEBUG
 		("Tx, len=%d, skb->protocol=%d, outstanding=%d\n",
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index f7c6a40..a5d66e2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -617,7 +617,8 @@
 
 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
 					     unsigned char **iv,
-					     struct scatterlist **sg)
+					     struct scatterlist **sg,
+					     int num_frags)
 {
 	size_t size, iv_offset, sg_offset;
 	struct aead_request *req;
@@ -629,7 +630,7 @@
 
 	size = ALIGN(size, __alignof__(struct scatterlist));
 	sg_offset = size;
-	size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
+	size += sizeof(struct scatterlist) * num_frags;
 
 	tmp = kmalloc(size, GFP_ATOMIC);
 	if (!tmp)
@@ -649,6 +650,7 @@
 {
 	int ret;
 	struct scatterlist *sg;
+	struct sk_buff *trailer;
 	unsigned char *iv;
 	struct ethhdr *eth;
 	struct macsec_eth_header *hh;
@@ -723,7 +725,14 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
+	ret = skb_cow_data(skb, 0, &trailer);
+	if (unlikely(ret < 0)) {
+		macsec_txsa_put(tx_sa);
+		kfree_skb(skb);
+		return ERR_PTR(ret);
+	}
+
+	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
 	if (!req) {
 		macsec_txsa_put(tx_sa);
 		kfree_skb(skb);
@@ -732,7 +741,7 @@
 
 	macsec_fill_iv(iv, secy->sci, pn);
 
-	sg_init_table(sg, MAX_SKB_FRAGS + 1);
+	sg_init_table(sg, ret);
 	skb_to_sgvec(skb, sg, 0, skb->len);
 
 	if (tx_sc->encrypt) {
@@ -914,6 +923,7 @@
 {
 	int ret;
 	struct scatterlist *sg;
+	struct sk_buff *trailer;
 	unsigned char *iv;
 	struct aead_request *req;
 	struct macsec_eth_header *hdr;
@@ -924,7 +934,12 @@
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
 
-	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
+	ret = skb_cow_data(skb, 0, &trailer);
+	if (unlikely(ret < 0)) {
+		kfree_skb(skb);
+		return ERR_PTR(ret);
+	}
+	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
 	if (!req) {
 		kfree_skb(skb);
 		return ERR_PTR(-ENOMEM);
@@ -933,7 +948,7 @@
 	hdr = (struct macsec_eth_header *)skb->data;
 	macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
 
-	sg_init_table(sg, MAX_SKB_FRAGS + 1);
+	sg_init_table(sg, ret);
 	skb_to_sgvec(skb, sg, 0, skb->len);
 
 	if (hdr->tci_an & MACSEC_TCI_E) {
@@ -2709,7 +2724,7 @@
 }
 
 #define MACSEC_FEATURES \
-	(NETIF_F_SG | NETIF_F_HIGHDMA)
+	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
 static struct lock_class_key macsec_netdev_addr_lock_key;
 
 static int macsec_dev_init(struct net_device *dev)
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index 0a04125..0a5f62e 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -203,11 +203,14 @@
 			   &md->mux_handle, md, md->mii_bus);
 	if (rc) {
 		dev_info(md->dev, "mdiomux initialization failed\n");
-		goto out;
+		goto out_register;
 	}
 
 	dev_info(md->dev, "iProc mdiomux registered\n");
 	return 0;
+
+out_register:
+	mdiobus_unregister(bus);
 out:
 	mdiobus_free(bus);
 	return rc;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 0d519a9..34d997c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -902,6 +902,7 @@
 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
 	{QMI_FIXED_INTF(0x2357, 0x9000, 4)},	/* TP-LINK MA260 */
 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)},	/* Telit LE922A */
+	{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},	/* Telit ME910 */
 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
 	{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},	/* Telit LE920 */
 	{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},	/* XS Stick W100-2 from 4G Systems */
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index 8f0bde5..0e66348 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -44,7 +44,7 @@
 config WIL6210_WRITE_IOCTL
 	bool "wil6210 write ioctl to the device"
 	depends on WIL6210
-	default n
+	default y
 	---help---
 	  Say Y here to allow write-access from user-space to
 	  the device memory through ioctl. This is useful for
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 4e111cb..a83f8f6 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -25,6 +25,10 @@
 module_param(disable_ap_sme, bool, 0444);
 MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
 
+static bool ignore_reg_hints = true;
+module_param(ignore_reg_hints, bool, 0444);
+MODULE_PARM_DESC(ignore_reg_hints, " Ignore OTA regulatory hints (Default: true)");
+
 #define CHAN60G(_channel, _flags) {				\
 	.band			= NL80211_BAND_60GHZ,		\
 	.center_freq		= 56160 + (2160 * (_channel)),	\
@@ -1413,6 +1417,8 @@
 	wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
 	wil_set_recovery_state(wil, fw_recovery_idle);
 
+	set_bit(wil_status_resetting, wil->status);
+
 	mutex_lock(&wil->mutex);
 
 	wmi_pcp_stop(wil);
@@ -1647,12 +1653,6 @@
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 	enum wmi_ps_profile_type ps_profile;
-	int rc;
-
-	if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) {
-		wil_err(wil, "set_power_mgmt not supported\n");
-		return -EOPNOTSUPP;
-	}
 
 	wil_dbg_misc(wil, "enabled=%d, timeout=%d\n",
 		     enabled, timeout);
@@ -1662,11 +1662,7 @@
 	else
 		ps_profile = WMI_PS_PROFILE_TYPE_PS_DISABLED;
 
-	rc  = wmi_ps_dev_profile_cfg(wil, ps_profile);
-	if (rc)
-		wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc);
-
-	return rc;
+	return wil_ps_update(wil, ps_profile);
 }
 
 static struct cfg80211_ops wil_cfg80211_ops = {
@@ -1740,6 +1736,11 @@
 	wiphy->vendor_commands = wil_nl80211_vendor_commands;
 	wiphy->vendor_events = wil_nl80211_vendor_events;
 	wiphy->n_vendor_events = ARRAY_SIZE(wil_nl80211_vendor_events);
+
+	if (ignore_reg_hints) {
+		wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS;
+		wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
+	}
 }
 
 struct wireless_dev *wil_cfg80211_init(struct device *dev)
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index f490158..e01acac 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -554,5 +554,7 @@
 	rc = request_firmware(&fw, name, wil_to_dev(wil));
 	if (!rc)
 		release_firmware(fw);
-	return rc != -ENOENT;
+	else
+		wil_dbg_fw(wil, "<%s> not available: %d\n", name, rc);
+	return !rc;
 }
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 36959a3..1fc4580 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -130,9 +130,15 @@
 	u32 *d = dst;
 	const volatile u32 __iomem *s = src;
 
-	/* size_t is unsigned, if (count%4 != 0) it will wrap */
-	for (count += 4; count > 4; count -= 4)
+	for (; count >= 4; count -= 4)
 		*d++ = __raw_readl(s++);
+
+	if (unlikely(count)) {
+		/* count can be 1..3 */
+		u32 tmp = __raw_readl(s);
+
+		memcpy(d, &tmp, count);
+	}
 }
 
 void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
@@ -141,8 +147,16 @@
 	volatile u32 __iomem *d = dst;
 	const u32 *s = src;
 
-	for (count += 4; count > 4; count -= 4)
+	for (; count >= 4; count -= 4)
 		__raw_writel(*s++, d++);
+
+	if (unlikely(count)) {
+		/* count can be 1..3 */
+		u32 tmp = 0;
+
+		memcpy(&tmp, s, count);
+		__raw_writel(tmp, d);
+	}
 }
 
 static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
@@ -176,6 +190,7 @@
 			break;
 		}
 		sta->status = wil_sta_unused;
+		sta->fst_link_loss = false;
 	}
 	/* reorder buffers */
 	for (i = 0; i < WIL_STA_TID_NUM; i++) {
@@ -561,6 +576,9 @@
 
 	if (rx_ring_overflow_thrsh == WIL6210_RX_HIGH_TRSH_INIT)
 		rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_DEFAULT;
+
+	wil->ps_profile =  WMI_PS_PROFILE_TYPE_DEFAULT;
+
 	return 0;
 
 out_wmi_wq:
@@ -889,6 +907,24 @@
 	}
 }
 
+int wil_ps_update(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile)
+{
+	int rc;
+
+	if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) {
+		wil_err(wil, "set_power_mgmt not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	rc  = wmi_ps_dev_profile_cfg(wil, ps_profile);
+	if (rc)
+		wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc);
+	else
+		wil->ps_profile = ps_profile;
+
+	return rc;
+}
+
 /*
  * We reset all the structures, and we reset the UMAC.
  * After calling this routine, you're expected to reload
@@ -938,15 +974,15 @@
 	/* Disable device led before reset*/
 	wmi_led_cfg(wil, false);
 
+	mutex_lock(&wil->p2p_wdev_mutex);
+	wil_abort_scan(wil, false);
+	mutex_unlock(&wil->p2p_wdev_mutex);
+
 	/* prevent NAPI from being scheduled and prevent wmi commands */
 	mutex_lock(&wil->wmi_mutex);
 	bitmap_zero(wil->status, wil_status_last);
 	mutex_unlock(&wil->wmi_mutex);
 
-	mutex_lock(&wil->p2p_wdev_mutex);
-	wil_abort_scan(wil, false);
-	mutex_unlock(&wil->p2p_wdev_mutex);
-
 	wil_mask_irq(wil);
 
 	wmi_event_flush(wil);
@@ -1023,6 +1059,12 @@
 			return rc;
 		}
 
+		if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT)
+			wil_ps_update(wil, wil->ps_profile);
+
+		if (wil->tt_data_set)
+			wmi_set_tt_cfg(wil, &wil->tt_data);
+
 		wil_collect_fw_info(wil);
 
 		if (wil->platform_ops.notify) {
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index b067fdf..2e301b6 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -200,7 +200,7 @@
 
 release_pmc_skbs:
 	wil_err(wil, "exit on error: Releasing skbs...\n");
-	for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
+	for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) {
 		dma_free_coherent(dev,
 				  descriptor_size,
 				  pmc->descriptors[i].va,
@@ -283,7 +283,7 @@
 		int i;
 
 		for (i = 0;
-		     pmc->descriptors[i].va && i < pmc->num_descriptors; i++) {
+		     i < pmc->num_descriptors && pmc->descriptors[i].va; i++) {
 			dma_free_coherent(dev,
 					  pmc->descriptor_size,
 					  pmc->descriptors[i].va,
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 7404b6f..a43cffc 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -343,8 +343,16 @@
 		wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
 		status = WLAN_STATUS_INVALID_QOS_PARAM;
 	}
-	if (status == WLAN_STATUS_SUCCESS)
-		agg_wsize = wil_agg_size(wil, req_agg_wsize);
+	if (status == WLAN_STATUS_SUCCESS) {
+		if (req_agg_wsize == 0) {
+			wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
+				     WIL_MAX_AGG_WSIZE);
+			agg_wsize = WIL_MAX_AGG_WSIZE;
+		} else {
+			agg_wsize = min_t(u16,
+					  WIL_MAX_AGG_WSIZE, req_agg_wsize);
+		}
+	}
 
 	rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status,
 			       agg_amsdu, agg_wsize, agg_timeout);
diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c
index 0faa26c..b4c4d09 100644
--- a/drivers/net/wireless/ath/wil6210/sysfs.c
+++ b/drivers/net/wireless/ath/wil6210/sysfs.c
@@ -94,8 +94,184 @@
 		   wil_ftm_txrx_offset_sysfs_show,
 		   wil_ftm_txrx_offset_sysfs_store);
 
+static ssize_t
+wil_tt_sysfs_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct wil6210_priv *wil = dev_get_drvdata(dev);
+	ssize_t len;
+	struct wmi_tt_data tt_data;
+	int i, rc;
+
+	rc = wmi_get_tt_cfg(wil, &tt_data);
+	if (rc)
+		return rc;
+
+	len = snprintf(buf, PAGE_SIZE, "    high      max       critical\n");
+
+	len += snprintf(buf + len, PAGE_SIZE - len, "bb: ");
+	if (tt_data.bb_enabled)
+		for (i = 0; i < WMI_NUM_OF_TT_ZONES; ++i)
+			len += snprintf(buf + len, PAGE_SIZE - len,
+					"%03d-%03d   ",
+					tt_data.bb_zones[i].temperature_high,
+					tt_data.bb_zones[i].temperature_low);
+	else
+		len += snprintf(buf + len, PAGE_SIZE - len, "* disabled *");
+	len += snprintf(buf + len, PAGE_SIZE - len, "\nrf: ");
+	if (tt_data.rf_enabled)
+		for (i = 0; i < WMI_NUM_OF_TT_ZONES; ++i)
+			len += snprintf(buf + len, PAGE_SIZE - len,
+					"%03d-%03d   ",
+					tt_data.rf_zones[i].temperature_high,
+					tt_data.rf_zones[i].temperature_low);
+	else
+		len += snprintf(buf + len, PAGE_SIZE - len, "* disabled *");
+	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+
+	return len;
+}
+
+static ssize_t
+wil_tt_sysfs_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct wil6210_priv *wil = dev_get_drvdata(dev);
+	int i, rc = -EINVAL;
+	char *token, *dupbuf, *tmp;
+	struct wmi_tt_data tt_data = {
+		.bb_enabled = 0,
+		.rf_enabled = 0,
+	};
+
+	tmp = kmemdup(buf, count + 1, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+	tmp[count] = '\0';
+	dupbuf = tmp;
+
+	/* Format for writing is 12 unsigned bytes separated by spaces:
+	 * <bb_z1_h> <bb_z1_l> <bb_z2_h> <bb_z2_l> <bb_z3_h> <bb_z3_l> \
+	 * <rf_z1_h> <rf_z1_l> <rf_z2_h> <rf_z2_l> <rf_z3_h> <rf_z3_l>
+	 * To disable thermal throttling for bb or for rf, use 0 for all
+	 * its six set points.
+	 */
+
+	/* bb */
+	for (i = 0; i < WMI_NUM_OF_TT_ZONES; ++i) {
+		token = strsep(&dupbuf, " ");
+		if (!token)
+			goto out;
+		if (kstrtou8(token, 0, &tt_data.bb_zones[i].temperature_high))
+			goto out;
+		token = strsep(&dupbuf, " ");
+		if (!token)
+			goto out;
+		if (kstrtou8(token, 0, &tt_data.bb_zones[i].temperature_low))
+			goto out;
+
+		if (tt_data.bb_zones[i].temperature_high > 0 ||
+		    tt_data.bb_zones[i].temperature_low > 0)
+			tt_data.bb_enabled = 1;
+	}
+	/* rf */
+	for (i = 0; i < WMI_NUM_OF_TT_ZONES; ++i) {
+		token = strsep(&dupbuf, " ");
+		if (!token)
+			goto out;
+		if (kstrtou8(token, 0, &tt_data.rf_zones[i].temperature_high))
+			goto out;
+		token = strsep(&dupbuf, " ");
+		if (!token)
+			goto out;
+		if (kstrtou8(token, 0, &tt_data.rf_zones[i].temperature_low))
+			goto out;
+
+		if (tt_data.rf_zones[i].temperature_high > 0 ||
+		    tt_data.rf_zones[i].temperature_low > 0)
+			tt_data.rf_enabled = 1;
+	}
+
+	rc = wmi_set_tt_cfg(wil, &tt_data);
+	if (rc)
+		goto out;
+
+	rc = count;
+out:
+	kfree(tmp);
+	return rc;
+}
+
+static DEVICE_ATTR(thermal_throttling, 0644,
+		   wil_tt_sysfs_show, wil_tt_sysfs_store);
+
+static ssize_t
+wil_fst_link_loss_sysfs_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct wil6210_priv *wil = dev_get_drvdata(dev);
+	ssize_t len = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(wil->sta); i++)
+		if (wil->sta[i].status == wil_sta_connected)
+			len += snprintf(buf + len, PAGE_SIZE - len,
+					"[%d] %pM %s\n", i, wil->sta[i].addr,
+					wil->sta[i].fst_link_loss ?
+					"On" : "Off");
+
+	return len;
+}
+
+static ssize_t
+wil_fst_link_loss_sysfs_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct wil6210_priv *wil = dev_get_drvdata(dev);
+	u8 addr[ETH_ALEN];
+	char *token, *dupbuf, *tmp;
+	int rc = -EINVAL;
+	bool fst_link_loss;
+
+	tmp = kmemdup(buf, count + 1, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	tmp[count] = '\0';
+	dupbuf = tmp;
+
+	token = strsep(&dupbuf, " ");
+	if (!token)
+		goto out;
+
+	/* mac address */
+	if (sscanf(token, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+		   &addr[0], &addr[1], &addr[2],
+		   &addr[3], &addr[4], &addr[5]) != 6)
+		goto out;
+
+	/* On/Off */
+	if (strtobool(dupbuf, &fst_link_loss))
+		goto out;
+
+	wil_dbg_misc(wil, "set [%pM] with %d\n", addr, fst_link_loss);
+
+	rc = wmi_link_maintain_cfg_write(wil, addr, fst_link_loss);
+	if (!rc)
+		rc = count;
+
+out:
+	kfree(tmp);
+	return rc;
+}
+
+static DEVICE_ATTR(fst_link_loss, 0644,
+		   wil_fst_link_loss_sysfs_show,
+		   wil_fst_link_loss_sysfs_store);
+
 static struct attribute *wil6210_sysfs_entries[] = {
 	&dev_attr_ftm_txrx_offset.attr,
+	&dev_attr_thermal_throttling.attr,
+	&dev_attr_fst_link_loss.attr,
 	NULL
 };
 
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 8b5411e..35bbf3a 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -37,6 +37,10 @@
 module_param(rx_align_2, bool, 0444);
 MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
 
+bool rx_large_buf;
+module_param(rx_large_buf, bool, 0444);
+MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
+
 static inline uint wil_rx_snaplen(void)
 {
 	return rx_align_2 ? 6 : 0;
@@ -255,7 +259,7 @@
 			       u32 i, int headroom)
 {
 	struct device *dev = wil_to_dev(wil);
-	unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
+	unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
 	struct vring_rx_desc dd, *d = &dd;
 	volatile struct vring_rx_desc *_d = &vring->va[i].rx;
 	dma_addr_t pa;
@@ -419,7 +423,7 @@
 	struct sk_buff *skb;
 	dma_addr_t pa;
 	unsigned int snaplen = wil_rx_snaplen();
-	unsigned int sz = mtu_max + ETH_HLEN + snaplen;
+	unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
 	u16 dmalen;
 	u8 ftype;
 	int cid;
@@ -780,6 +784,20 @@
 	wil_rx_refill(wil, v->size);
 }
 
+static void wil_rx_buf_len_init(struct wil6210_priv *wil)
+{
+	wil->rx_buf_len = rx_large_buf ?
+		WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
+	if (mtu_max > wil->rx_buf_len) {
+		/* do not allow RX buffers to be smaller than mtu_max, for
+		 * backward compatibility (mtu_max parameter was also used
+		 * to support receiving large packets)
+		 */
+		wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
+		wil->rx_buf_len = mtu_max;
+	}
+}
+
 int wil_rx_init(struct wil6210_priv *wil, u16 size)
 {
 	struct vring *vring = &wil->vring_rx;
@@ -792,6 +810,8 @@
 		return -EINVAL;
 	}
 
+	wil_rx_buf_len_init(wil);
+
 	vring->size = size;
 	rc = wil_vring_alloc(wil, vring);
 	if (rc)
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 734449d..d05bb36 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -33,6 +33,7 @@
 extern int agg_wsize;
 extern u32 vring_idle_trsh;
 extern bool rx_align_2;
+extern bool rx_large_buf;
 extern bool debug_fw;
 extern bool disable_ap_sme;
 
@@ -531,6 +532,7 @@
 	struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM];
 	struct wil_tid_crypto_rx group_crypto_rx;
 	u8 aid; /* 1-254; 0 if unknown/not reported */
+	bool fst_link_loss;
 };
 
 enum {
@@ -661,6 +663,7 @@
 	struct work_struct probe_client_worker;
 	/* DMA related */
 	struct vring vring_rx;
+	unsigned int rx_buf_len;
 	struct vring vring_tx[WIL6210_MAX_TX_RINGS];
 	struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
 	u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
@@ -696,7 +699,11 @@
 	/* High Access Latency Policy voting */
 	struct wil_halp halp;
 
+	enum wmi_ps_profile_type ps_profile;
+
 	struct wil_ftm_priv ftm;
+	bool tt_data_set;
+	struct wmi_tt_data tt_data;
 
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM_SLEEP
@@ -813,6 +820,8 @@
 void wil_if_remove(struct wil6210_priv *wil);
 int wil_priv_init(struct wil6210_priv *wil);
 void wil_priv_deinit(struct wil6210_priv *wil);
+int wil_ps_update(struct wil6210_priv *wil,
+		  enum wmi_ps_profile_type ps_profile);
 int wil_reset(struct wil6210_priv *wil, bool no_fw);
 void wil_fw_error_recovery(struct wil6210_priv *wil);
 void wil_set_recovery_state(struct wil6210_priv *wil, int state);
@@ -861,6 +870,8 @@
 int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short);
 int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short);
 int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid);
+int wmi_set_tt_cfg(struct wil6210_priv *wil, struct wmi_tt_data *tt_data);
+int wmi_get_tt_cfg(struct wil6210_priv *wil, struct wmi_tt_data *tt_data);
 int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
 			 u8 dialog_token, __le16 ba_param_set,
 			 __le16 ba_timeout, __le16 ba_seq_ctrl);
@@ -983,5 +994,9 @@
 void wil_aoa_evt_meas(struct wil6210_priv *wil,
 		      struct wmi_aoa_meas_event *evt,
 		      int len);
+/* link loss */
+int wmi_link_maintain_cfg_write(struct wil6210_priv *wil,
+				const u8 *addr,
+				bool fst_link_loss);
 
 #endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 31d6ab9..8e1825f 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -519,16 +519,16 @@
 		assoc_resp_ielen = 0;
 	}
 
-	mutex_lock(&wil->mutex);
 	if (test_bit(wil_status_resetting, wil->status) ||
 	    !test_bit(wil_status_fwready, wil->status)) {
 		wil_err(wil, "status_resetting, cancel connect event, CID %d\n",
 			evt->cid);
-		mutex_unlock(&wil->mutex);
 		/* no need for cleanup, wil_reset will do that */
 		return;
 	}
 
+	mutex_lock(&wil->mutex);
+
 	if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
 	    (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
 		if (!test_bit(wil_status_fwconnecting, wil->status)) {
@@ -580,8 +580,7 @@
 			cfg80211_connect_bss(ndev, evt->bssid, wil->bss,
 					     assoc_req_ie, assoc_req_ielen,
 					     assoc_resp_ie, assoc_resp_ielen,
-					     WLAN_STATUS_SUCCESS, GFP_KERNEL,
-					     NL80211_TIMEOUT_UNSPECIFIED);
+					     WLAN_STATUS_SUCCESS, GFP_KERNEL);
 		}
 		wil->bss = NULL;
 	} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
@@ -632,6 +631,13 @@
 
 	wil->sinfo_gen++;
 
+	if (test_bit(wil_status_resetting, wil->status) ||
+	    !test_bit(wil_status_fwready, wil->status)) {
+		wil_err(wil, "status_resetting, cancel disconnect event\n");
+		/* no need for cleanup, wil_reset will do that */
+		return;
+	}
+
 	mutex_lock(&wil->mutex);
 	wil6210_disconnect(wil, evt->bssid, reason_code, true);
 	mutex_unlock(&wil->mutex);
@@ -1430,7 +1436,8 @@
 	struct wmi_cfg_rx_chain_cmd cmd = {
 		.action = WMI_RX_CHAIN_ADD,
 		.rx_sw_ring = {
-			.max_mpdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+			.max_mpdu_size = cpu_to_le16(
+				wil_mtu2macbuf(wil->rx_buf_len)),
 			.ring_mem_base = cpu_to_le64(vring->pa),
 			.ring_size = cpu_to_le16(vring->size),
 		},
@@ -1769,6 +1776,67 @@
 	return rc;
 }
 
+int wmi_set_tt_cfg(struct wil6210_priv *wil, struct wmi_tt_data *tt_data)
+{
+	int rc;
+	struct wmi_set_thermal_throttling_cfg_cmd cmd = {
+		.tt_data = *tt_data,
+	};
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_set_thermal_throttling_cfg_event evt;
+	} __packed reply;
+
+	if (!test_bit(WMI_FW_CAPABILITY_THERMAL_THROTTLING,
+		      wil->fw_capabilities))
+		return -EOPNOTSUPP;
+
+	memset(&reply, 0, sizeof(reply));
+	rc = wmi_call(wil, WMI_SET_THERMAL_THROTTLING_CFG_CMDID, &cmd,
+		      sizeof(cmd), WMI_SET_THERMAL_THROTTLING_CFG_EVENTID,
+		      &reply, sizeof(reply), 100);
+	if (rc) {
+		wil_err(wil, "failed to set thermal throttling\n");
+		return rc;
+	}
+	if (reply.evt.status) {
+		wil_err(wil, "set thermal throttling failed, error %d\n",
+			reply.evt.status);
+		return -EIO;
+	}
+
+	wil->tt_data = *tt_data;
+	wil->tt_data_set = true;
+
+	return 0;
+}
+
+int wmi_get_tt_cfg(struct wil6210_priv *wil, struct wmi_tt_data *tt_data)
+{
+	int rc;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_get_thermal_throttling_cfg_event evt;
+	} __packed reply;
+
+	if (!test_bit(WMI_FW_CAPABILITY_THERMAL_THROTTLING,
+		      wil->fw_capabilities))
+		return -EOPNOTSUPP;
+
+	rc = wmi_call(wil, WMI_GET_THERMAL_THROTTLING_CFG_CMDID, NULL, 0,
+		      WMI_GET_THERMAL_THROTTLING_CFG_EVENTID, &reply,
+		      sizeof(reply), 100);
+	if (rc) {
+		wil_err(wil, "failed to get thermal throttling\n");
+		return rc;
+	}
+
+	if (tt_data)
+		*tt_data = reply.evt.tt_data;
+
+	return 0;
+}
+
 void wmi_event_flush(struct wil6210_priv *wil)
 {
 	ulong flags;
@@ -1786,6 +1854,61 @@
 	spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
 }
 
+int wmi_link_maintain_cfg_write(struct wil6210_priv *wil,
+				const u8 *addr,
+				bool fst_link_loss)
+{
+	int rc;
+	int cid = wil_find_cid(wil, addr);
+	u32 cfg_type;
+	struct wmi_link_maintain_cfg_write_cmd cmd;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_link_maintain_cfg_write_done_event evt;
+	} __packed reply;
+
+	if (cid < 0)
+		return cid;
+
+	switch (wil->wdev->iftype) {
+	case NL80211_IFTYPE_STATION:
+		cfg_type = fst_link_loss ?
+			   WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_STA :
+			   WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_STA;
+		break;
+	case NL80211_IFTYPE_AP:
+		cfg_type = fst_link_loss ?
+			   WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_AP :
+			   WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_AP;
+		break;
+	default:
+		wil_err(wil, "Unsupported for iftype %d", wil->wdev->iftype);
+		return -EINVAL;
+	}
+
+	wil_dbg_misc(wil, "Setting cid:%d with cfg_type:%d\n", cid, cfg_type);
+
+	cmd.cfg_type = cpu_to_le32(cfg_type);
+	cmd.cid = cpu_to_le32(cid);
+
+	reply.evt.status = cpu_to_le32(WMI_FW_STATUS_FAILURE);
+
+	rc = wmi_call(wil, WMI_LINK_MAINTAIN_CFG_WRITE_CMDID, &cmd, sizeof(cmd),
+		      WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID, &reply,
+		      sizeof(reply), 250);
+	if (rc) {
+		wil_err(wil, "Failed to %s FST link loss",
+			fst_link_loss ? "enable" : "disable");
+	} else if (reply.evt.status == WMI_FW_STATUS_SUCCESS) {
+		wil->sta[cid].fst_link_loss = fst_link_loss;
+	} else {
+		wil_err(wil, "WMI_LINK_MAINTAIN_CFG_WRITE_CMDID returned status %d",
+			reply.evt.status);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
 static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
 				 void *d, int len)
 {
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 7c9fee5..f7f5f4f 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -58,6 +58,7 @@
 	WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT	= 3,
 	WMI_FW_CAPABILITY_DISABLE_AP_SME	= 4,
 	WMI_FW_CAPABILITY_WMI_ONLY		= 5,
+	WMI_FW_CAPABILITY_THERMAL_THROTTLING	= 7,
 	WMI_FW_CAPABILITY_MAX,
 };
 
@@ -142,8 +143,6 @@
 	WMI_MAINTAIN_RESUME_CMDID			= 0x851,
 	WMI_RS_MGMT_CMDID				= 0x852,
 	WMI_RF_MGMT_CMDID				= 0x853,
-	WMI_THERMAL_THROTTLING_CTRL_CMDID		= 0x854,
-	WMI_THERMAL_THROTTLING_GET_STATUS_CMDID		= 0x855,
 	WMI_OTP_READ_CMDID				= 0x856,
 	WMI_OTP_WRITE_CMDID				= 0x857,
 	WMI_LED_CFG_CMDID				= 0x858,
@@ -192,6 +191,8 @@
 	WMI_GET_MGMT_RETRY_LIMIT_CMDID			= 0x931,
 	WMI_NEW_STA_CMDID				= 0x935,
 	WMI_DEL_STA_CMDID				= 0x936,
+	WMI_SET_THERMAL_THROTTLING_CFG_CMDID		= 0x940,
+	WMI_GET_THERMAL_THROTTLING_CFG_CMDID		= 0x941,
 	WMI_TOF_SESSION_START_CMDID			= 0x991,
 	WMI_TOF_GET_CAPABILITIES_CMDID			= 0x992,
 	WMI_TOF_SET_LCR_CMDID				= 0x993,
@@ -438,16 +439,6 @@
 	__le32 rf_mgmt_type;
 } __packed;
 
-/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
-#define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH	(0xFFFFFFFF)
-
-/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
-struct wmi_thermal_throttling_ctrl_cmd {
-	__le32 time_on_usec;
-	__le32 time_off_usec;
-	__le32 max_txop_length_usec;
-} __packed;
-
 /* WMI_RF_RX_TEST_CMDID */
 struct wmi_rf_rx_test_cmd {
 	__le32 sector;
@@ -549,7 +540,7 @@
 	u8 hidden_ssid;
 	u8 is_go;
 	u8 reserved0[5];
-	/* abft_len override if non-0 */
+	/* A-BFT length override if non-0 */
 	u8 abft_len;
 	u8 disable_ap_sme;
 	u8 network_type;
@@ -910,6 +901,39 @@
 	u8 reserved[3];
 } __packed;
 
+/* Zones: HIGH, MAX, CRITICAL */
+#define WMI_NUM_OF_TT_ZONES	(3)
+
+struct wmi_tt_zone_limits {
+	/* Above this temperature this zone is active */
+	u8 temperature_high;
+	/* Below this temperature the adjacent lower zone is active */
+	u8 temperature_low;
+	u8 reserved[2];
+} __packed;
+
+/* Struct used for both configuration and status commands of thermal
+ * throttling
+ */
+struct wmi_tt_data {
+	/* Enable/Disable TT algorithm for baseband */
+	u8 bb_enabled;
+	u8 reserved0[3];
+	/* Define zones for baseband */
+	struct wmi_tt_zone_limits bb_zones[WMI_NUM_OF_TT_ZONES];
+	/* Enable/Disable TT algorithm for radio */
+	u8 rf_enabled;
+	u8 reserved1[3];
+	/* Define zones for all radio chips */
+	struct wmi_tt_zone_limits rf_zones[WMI_NUM_OF_TT_ZONES];
+} __packed;
+
+/* WMI_SET_THERMAL_THROTTLING_CFG_CMDID */
+struct wmi_set_thermal_throttling_cfg_cmd {
+	/* Command data */
+	struct wmi_tt_data tt_data;
+} __packed;
+
 /* WMI_NEW_STA_CMDID */
 struct wmi_new_sta_cmd {
 	u8 dst_mac[WMI_MAC_LEN];
@@ -1040,7 +1064,6 @@
 	WMI_BF_RXSS_MGMT_DONE_EVENTID			= 0x1839,
 	WMI_RS_MGMT_DONE_EVENTID			= 0x1852,
 	WMI_RF_MGMT_STATUS_EVENTID			= 0x1853,
-	WMI_THERMAL_THROTTLING_STATUS_EVENTID		= 0x1855,
 	WMI_BF_SM_MGMT_DONE_EVENTID			= 0x1838,
 	WMI_RX_MGMT_PACKET_EVENTID			= 0x1840,
 	WMI_TX_MGMT_PACKET_EVENTID			= 0x1841,
@@ -1090,6 +1113,8 @@
 	WMI_BRP_SET_ANT_LIMIT_EVENTID			= 0x1924,
 	WMI_SET_MGMT_RETRY_LIMIT_EVENTID		= 0x1930,
 	WMI_GET_MGMT_RETRY_LIMIT_EVENTID		= 0x1931,
+	WMI_SET_THERMAL_THROTTLING_CFG_EVENTID		= 0x1940,
+	WMI_GET_THERMAL_THROTTLING_CFG_EVENTID		= 0x1941,
 	WMI_TOF_SESSION_END_EVENTID			= 0x1991,
 	WMI_TOF_GET_CAPABILITIES_EVENTID		= 0x1992,
 	WMI_TOF_SET_LCR_EVENTID				= 0x1993,
@@ -1133,13 +1158,6 @@
 	__le32 rf_status;
 } __packed;
 
-/* WMI_THERMAL_THROTTLING_STATUS_EVENTID */
-struct wmi_thermal_throttling_status_event {
-	__le32 time_on_usec;
-	__le32 time_off_usec;
-	__le32 max_txop_length_usec;
-} __packed;
-
 /* WMI_GET_STATUS_DONE_EVENTID */
 struct wmi_get_status_done_event {
 	__le32 is_associated;
@@ -2206,6 +2224,19 @@
 	__le32 aoa_supported_types;
 } __packed;
 
+/* WMI_SET_THERMAL_THROTTLING_CFG_EVENTID */
+struct wmi_set_thermal_throttling_cfg_event {
+	/* wmi_fw_status */
+	u8 status;
+	u8 reserved[3];
+} __packed;
+
+/* WMI_GET_THERMAL_THROTTLING_CFG_EVENTID */
+struct wmi_get_thermal_throttling_cfg_event {
+	/* Status data */
+	struct wmi_tt_data tt_data;
+} __packed;
+
 enum wmi_tof_session_end_status {
 	WMI_TOF_SESSION_END_NO_ERROR		= 0x00,
 	WMI_TOF_SESSION_END_FAIL		= 0x01,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 5eaac13..f877301 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -198,7 +198,7 @@
 	int ret;
 	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_pub *drvr = ifp->drvr;
-	struct ethhdr *eh = (struct ethhdr *)(skb->data);
+	struct ethhdr *eh;
 
 	brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
 
@@ -211,22 +211,13 @@
 		goto done;
 	}
 
-	/* Make sure there's enough room for any header */
-	if (skb_headroom(skb) < drvr->hdrlen) {
-		struct sk_buff *skb2;
-
-		brcmf_dbg(INFO, "%s: insufficient headroom\n",
+	/* Make sure there's enough writable headroom*/
+	ret = skb_cow_head(skb, drvr->hdrlen);
+	if (ret < 0) {
+		brcmf_err("%s: skb_cow_head failed\n",
 			  brcmf_ifname(ifp));
-		drvr->bus_if->tx_realloc++;
-		skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
 		dev_kfree_skb(skb);
-		skb = skb2;
-		if (skb == NULL) {
-			brcmf_err("%s: skb_realloc_headroom failed\n",
-				  brcmf_ifname(ifp));
-			ret = -ENOMEM;
-			goto done;
-		}
+		goto done;
 	}
 
 	/* validate length for ether packet */
@@ -236,6 +227,8 @@
 		goto done;
 	}
 
+	eh = (struct ethhdr *)(skb->data);
+
 	if (eh->h_proto == htons(ETH_P_PAE))
 		atomic_inc(&ifp->pend_8021x_cnt);
 
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
index 0b9f6a7..39335b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
@@ -371,4 +371,4 @@
 MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index b88e204..207d8ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1262,12 +1262,15 @@
 	iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
  out:
 	if (ret < 0) {
-		iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
-		if (mvm->restart_fw > 0) {
-			mvm->restart_fw--;
-			ieee80211_restart_hw(mvm->hw);
-		}
 		iwl_mvm_free_nd(mvm);
+
+		if (!unified_image) {
+			iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+			if (mvm->restart_fw > 0) {
+				mvm->restart_fw--;
+				ieee80211_restart_hw(mvm->hw);
+			}
+		}
 	}
  out_noreset:
 	mutex_unlock(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 7b7d2a1..0bda91f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1056,6 +1056,8 @@
 
 	if (ret)
 		return ret;
+	if (count == 0)
+		return 0;
 
 	iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf,
 			       (count - 1), NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index d89d0a1..700d244 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -784,12 +784,16 @@
 			struct iwl_fw_error_dump_paging *paging;
 			struct page *pages =
 				mvm->fw_paging_db[i].fw_paging_block;
+			dma_addr_t addr = mvm->fw_paging_db[i].fw_paging_phys;
 
 			dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
 			dump_data->len = cpu_to_le32(sizeof(*paging) +
 						     PAGING_BLOCK_SIZE);
 			paging = (void *)dump_data->data;
 			paging->index = cpu_to_le32(i);
+			dma_sync_single_for_cpu(mvm->trans->dev, addr,
+						PAGING_BLOCK_SIZE,
+						DMA_BIDIRECTIONAL);
 			memcpy(paging->data, page_address(pages),
 			       PAGING_BLOCK_SIZE);
 			dump_data = iwl_fw_error_next_data(dump_data);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 8720663..2ec3a91 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -214,6 +214,10 @@
 	memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
 	       image->sec[sec_idx].data,
 	       mvm->fw_paging_db[0].fw_paging_size);
+	dma_sync_single_for_device(mvm->trans->dev,
+				   mvm->fw_paging_db[0].fw_paging_phys,
+				   mvm->fw_paging_db[0].fw_paging_size,
+				   DMA_BIDIRECTIONAL);
 
 	IWL_DEBUG_FW(mvm,
 		     "Paging: copied %d CSS bytes to first block\n",
@@ -228,9 +232,16 @@
 	 * loop stop at num_of_paging_blk since that last block is not full.
 	 */
 	for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
-		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+		struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
+
+		memcpy(page_address(block->fw_paging_block),
 		       image->sec[sec_idx].data + offset,
-		       mvm->fw_paging_db[idx].fw_paging_size);
+		       block->fw_paging_size);
+		dma_sync_single_for_device(mvm->trans->dev,
+					   block->fw_paging_phys,
+					   block->fw_paging_size,
+					   DMA_BIDIRECTIONAL);
+
 
 		IWL_DEBUG_FW(mvm,
 			     "Paging: copied %d paging bytes to block %d\n",
@@ -242,9 +253,15 @@
 
 	/* copy the last paging block */
 	if (mvm->num_of_pages_in_last_blk > 0) {
-		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+		struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
+
+		memcpy(page_address(block->fw_paging_block),
 		       image->sec[sec_idx].data + offset,
 		       FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
+		dma_sync_single_for_device(mvm->trans->dev,
+					   block->fw_paging_phys,
+					   block->fw_paging_size,
+					   DMA_BIDIRECTIONAL);
 
 		IWL_DEBUG_FW(mvm,
 			     "Paging: copied %d pages in the last block %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 6c802ce..a481eb4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -409,7 +409,7 @@
 
 	/* ignore nssn smaller than head sn - this can happen due to timeout */
 	if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
-		return;
+		goto set_timer;
 
 	while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
 		int index = ssn % reorder_buf->buf_size;
@@ -432,6 +432,7 @@
 	}
 	reorder_buf->head_sn = nssn;
 
+set_timer:
 	if (reorder_buf->num_stored && !reorder_buf->removed) {
 		u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 52de3c6..e64aeb4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1466,6 +1466,7 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+	u8 sta_id = mvm_sta->sta_id;
 	int ret;
 
 	lockdep_assert_held(&mvm->mutex);
@@ -1474,7 +1475,7 @@
 		kfree(mvm_sta->dup_data);
 
 	if ((vif->type == NL80211_IFTYPE_STATION &&
-	     mvmvif->ap_sta_id == mvm_sta->sta_id) ||
+	     mvmvif->ap_sta_id == sta_id) ||
 	    iwl_mvm_is_dqa_supported(mvm)){
 		ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
 		if (ret)
@@ -1497,6 +1498,15 @@
 			iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
 
 			/*
+			 * If pending_frames is set at this point - it must be
+			 * driver internal logic error, since queues are empty
+			 * and removed successuly.
+			 * warn on it but set it to 0 anyway to avoid station
+			 * not being removed later in the function
+			 */
+			WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
+
+			/*
 			 * If no traffic has gone through the reserved TXQ - it
 			 * is still marked as IWL_MVM_QUEUE_RESERVED, and
 			 * should be manually marked as free again
@@ -1506,7 +1516,7 @@
 			if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
 				 (*status != IWL_MVM_QUEUE_FREE),
 				 "sta_id %d reserved txq %d status %d",
-				 mvm_sta->sta_id, reserved_txq, *status)) {
+				 sta_id, reserved_txq, *status)) {
 				spin_unlock_bh(&mvm->queue_info_lock);
 				return -EINVAL;
 			}
@@ -1516,7 +1526,7 @@
 		}
 
 		if (vif->type == NL80211_IFTYPE_STATION &&
-		    mvmvif->ap_sta_id == mvm_sta->sta_id) {
+		    mvmvif->ap_sta_id == sta_id) {
 			/* if associated - we can't remove the AP STA now */
 			if (vif->bss_conf.assoc)
 				return ret;
@@ -1525,7 +1535,7 @@
 			mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
 
 			/* clear d0i3_ap_sta_id if no longer relevant */
-			if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
+			if (mvm->d0i3_ap_sta_id == sta_id)
 				mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
 		}
 	}
@@ -1534,7 +1544,7 @@
 	 * This shouldn't happen - the TDLS channel switch should be canceled
 	 * before the STA is removed.
 	 */
-	if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
+	if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
 		mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
 		cancel_delayed_work(&mvm->tdls_cs.dwork);
 	}
@@ -1544,21 +1554,20 @@
 	 * calls the drain worker.
 	 */
 	spin_lock_bh(&mvm_sta->lock);
+
 	/*
 	 * There are frames pending on the AC queues for this station.
 	 * We need to wait until all the frames are drained...
 	 */
-	if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
-		rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+	if (atomic_read(&mvm->pending_frames[sta_id])) {
+		rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
 				   ERR_PTR(-EBUSY));
 		spin_unlock_bh(&mvm_sta->lock);
 
 		/* disable TDLS sta queues on drain complete */
 		if (sta->tdls) {
-			mvm->tfd_drained[mvm_sta->sta_id] =
-							mvm_sta->tfd_queue_msk;
-			IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
-				       mvm_sta->sta_id);
+			mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
+			IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
 		}
 
 		ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 66957ac..0556d13 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -202,7 +202,6 @@
 			struct iwl_tx_cmd *tx_cmd,
 			struct ieee80211_tx_info *info, u8 sta_id)
 {
-	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr = (void *)skb->data;
 	__le16 fc = hdr->frame_control;
 	u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
@@ -284,9 +283,8 @@
 		tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
 
 	tx_cmd->tx_flags = cpu_to_le32(tx_flags);
-	/* Total # bytes to be transmitted */
-	tx_cmd->len = cpu_to_le16((u16)skb->len +
-		(uintptr_t)skb_info->driver_data[0]);
+	/* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
+	tx_cmd->len = cpu_to_le16((u16)skb->len);
 	tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
 	tx_cmd->sta_id = sta_id;
 
@@ -459,7 +457,6 @@
 		      struct ieee80211_sta *sta, u8 sta_id)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
 	struct iwl_device_cmd *dev_cmd;
 	struct iwl_tx_cmd *tx_cmd;
 
@@ -479,12 +476,18 @@
 
 	iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
 
+	return dev_cmd;
+}
+
+static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
+				       struct iwl_device_cmd *cmd)
+{
+	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+
 	memset(&skb_info->status, 0, sizeof(skb_info->status));
 	memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
 
-	skb_info->driver_data[1] = dev_cmd;
-
-	return dev_cmd;
+	skb_info->driver_data[1] = cmd;
 }
 
 static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
@@ -550,9 +553,6 @@
 			  info.hw_queue != info.control.vif->cab_queue)))
 		return -1;
 
-	/* This holds the amsdu headers length */
-	skb_info->driver_data[0] = (void *)(uintptr_t)0;
-
 	queue = info.hw_queue;
 
 	/*
@@ -563,9 +563,10 @@
 	 * (this is not possible for unicast packets as a TLDS discovery
 	 * response are sent without a station entry); otherwise use the
 	 * AUX station.
-	 * In DQA mode, if vif is of type STATION and frames are not multicast,
-	 * they should be sent from the BSS queue. For example, TDLS setup
-	 * frames should be sent on this queue, as they go through the AP.
+	 * In DQA mode, if vif is of type STATION and frames are not multicast
+	 * or offchannel, they should be sent from the BSS queue.
+	 * For example, TDLS setup frames should be sent on this queue,
+	 * as they go through the AP.
 	 */
 	sta_id = mvm->aux_sta.sta_id;
 	if (info.control.vif) {
@@ -587,7 +588,8 @@
 			if (ap_sta_id != IWL_MVM_STATION_COUNT)
 				sta_id = ap_sta_id;
 		} else if (iwl_mvm_is_dqa_supported(mvm) &&
-			   info.control.vif->type == NL80211_IFTYPE_STATION) {
+			   info.control.vif->type == NL80211_IFTYPE_STATION &&
+			   queue != mvm->aux_queue) {
 			queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
 		}
 	}
@@ -598,6 +600,9 @@
 	if (!dev_cmd)
 		return -1;
 
+	/* From now on, we cannot access info->control */
+	iwl_mvm_skb_prepare_status(skb, dev_cmd);
+
 	tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
 	/* Copy MAC header from skb into command buffer */
@@ -634,7 +639,7 @@
 	unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
 	bool ipv4 = (skb->protocol == htons(ETH_P_IP));
 	u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
-	u16 amsdu_add, snap_ip_tcp, pad, i = 0;
+	u16 snap_ip_tcp, pad, i = 0;
 	unsigned int dbg_max_amsdu_len;
 	netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
 	u8 *qc, tid, txf;
@@ -736,21 +741,6 @@
 
 	/* This skb fits in one single A-MSDU */
 	if (num_subframes * mss >= tcp_payload_len) {
-		struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
-
-		/*
-		 * Compute the length of all the data added for the A-MSDU.
-		 * This will be used to compute the length to write in the TX
-		 * command. We have: SNAP + IP + TCP for n -1 subframes and
-		 * ETH header for n subframes. Note that the original skb
-		 * already had one set of SNAP / IP / TCP headers.
-		 */
-		num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
-		amsdu_add = num_subframes * sizeof(struct ethhdr) +
-			(num_subframes - 1) * (snap_ip_tcp + pad);
-		/* This holds the amsdu headers length */
-		skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
-
 		__skb_queue_tail(mpdus_skb, skb);
 		return 0;
 	}
@@ -789,14 +779,6 @@
 			ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
 
 		if (tcp_payload_len > mss) {
-			struct ieee80211_tx_info *skb_info =
-				IEEE80211_SKB_CB(tmp);
-
-			num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
-			amsdu_add = num_subframes * sizeof(struct ethhdr) +
-				(num_subframes - 1) * (snap_ip_tcp + pad);
-			skb_info->driver_data[0] =
-				(void *)(uintptr_t)amsdu_add;
 			skb_shinfo(tmp)->gso_size = mss;
 		} else {
 			qc = ieee80211_get_qos_ctl((void *)tmp->data);
@@ -908,7 +890,6 @@
 		goto drop;
 
 	tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
-	/* From now on, we cannot access info->control */
 
 	/*
 	 * we handle that entirely ourselves -- for uAPSD the firmware
@@ -1015,6 +996,9 @@
 	IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
 		     tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
 
+	/* From now on, we cannot access info->control */
+	iwl_mvm_skb_prepare_status(skb, dev_cmd);
+
 	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
 		goto drop_unlock_sta;
 
@@ -1024,7 +1008,10 @@
 	spin_unlock(&mvmsta->lock);
 
 	/* Increase pending frames count if this isn't AMPDU */
-	if (!is_ampdu)
+	if ((iwl_mvm_is_dqa_supported(mvm) &&
+	     mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
+	     mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
+	    (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
 		atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
 
 	return 0;
@@ -1040,7 +1027,6 @@
 		   struct ieee80211_sta *sta)
 {
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_tx_info info;
 	struct sk_buff_head mpdus_skbs;
 	unsigned int payload_len;
@@ -1054,9 +1040,6 @@
 
 	memcpy(&info, skb->cb, sizeof(info));
 
-	/* This holds the amsdu headers length */
-	skb_info->driver_data[0] = (void *)(uintptr_t)0;
-
 	if (!skb_is_gso(skb))
 		return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
 
@@ -1295,8 +1278,6 @@
 
 		memset(&info->status, 0, sizeof(info->status));
 
-		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
-
 		/* inform mac80211 about what happened with the frame */
 		switch (status & TX_STATUS_MSK) {
 		case TX_STATUS_SUCCESS:
@@ -1319,10 +1300,11 @@
 			(void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
 
 		/* Single frame failure in an AMPDU queue => send BAR */
-		if (txq_id >= mvm->first_agg_queue &&
+		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
 		    !(info->flags & IEEE80211_TX_STAT_ACK) &&
 		    !(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
 			info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
 
 		/* W/A FW bug: seq_ctl is wrong when the status isn't success */
 		if (status != TX_STATUS_SUCCESS) {
@@ -1357,7 +1339,7 @@
 		ieee80211_tx_status(mvm->hw, skb);
 	}
 
-	if (txq_id >= mvm->first_agg_queue) {
+	if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) {
 		/* If this is an aggregation queue, we use the ssn since:
 		 * ssn = wifi seq_num % 256.
 		 * The seq_ctl is the sequence control of the packet to which
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index cac6d99..e3cede9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -279,7 +279,7 @@
 	bool frozen;
 	u8 active;
 	bool ampdu;
-	bool block;
+	int block;
 	unsigned long wd_timeout;
 	struct sk_buff_head overflow_q;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index ae95533..10ef44e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -868,17 +868,13 @@
 				      int cpu,
 				      int *first_ucode_section)
 {
-	int shift_param;
 	int i, ret = 0;
 	u32 last_read_idx = 0;
 
-	if (cpu == 1) {
-		shift_param = 0;
+	if (cpu == 1)
 		*first_ucode_section = 0;
-	} else {
-		shift_param = 16;
+	else
 		(*first_ucode_section)++;
-	}
 
 	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
 		last_read_idx = i;
@@ -2933,16 +2929,12 @@
 				       PCIE_LINK_STATE_CLKPM);
 	}
 
-	if (cfg->mq_rx_supported)
-		addr_size = 64;
-	else
-		addr_size = 36;
-
 	if (cfg->use_tfh) {
+		addr_size = 64;
 		trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
 		trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
-
 	} else {
+		addr_size = 36;
 		trans_pcie->max_tbs = IWL_NUM_OF_TBS;
 		trans_pcie->tfd_size = sizeof(struct iwl_tfd);
 	}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 5f840f1..e1bfc95 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -2096,6 +2096,7 @@
 				   struct iwl_cmd_meta *out_meta,
 				   struct iwl_device_cmd *dev_cmd, u16 tb1_len)
 {
+	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
 	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
 	struct ieee80211_hdr *hdr = (void *)skb->data;
 	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
@@ -2145,6 +2146,13 @@
 	 */
 	skb_pull(skb, hdr_len + iv_len);
 
+	/*
+	 * Remove the length of all the headers that we don't actually
+	 * have in the MPDU by themselves, but that we duplicate into
+	 * all the different MSDUs inside the A-MSDU.
+	 */
+	le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+
 	tso_start(skb, &tso);
 
 	while (total_len) {
@@ -2155,7 +2163,7 @@
 		unsigned int hdr_tb_len;
 		dma_addr_t hdr_tb_phys;
 		struct tcphdr *tcph;
-		u8 *iph;
+		u8 *iph, *subf_hdrs_start = hdr_page->pos;
 
 		total_len -= data_left;
 
@@ -2216,6 +2224,8 @@
 				       hdr_tb_len, false);
 		trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
 					       hdr_tb_len);
+		/* add this subframe's headers' length to the tx_cmd */
+		le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
 
 		/* prepare the start_hdr for the next subframe */
 		start_hdr = hdr_page->pos;
@@ -2408,9 +2418,10 @@
 		tb1_len = len;
 	}
 
-	/* The first TB points to bi-directional DMA data */
-	memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
-	       IWL_FIRST_TB_SIZE);
+	/*
+	 * The first TB points to bi-directional DMA data, we'll
+	 * memcpy the data into it later.
+	 */
 	iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
 			       IWL_FIRST_TB_SIZE, true);
 
@@ -2434,6 +2445,10 @@
 		goto out_err;
 	}
 
+	/* building the A-MSDU might have changed this data, so memcpy it now */
+	memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
+	       IWL_FIRST_TB_SIZE);
+
 	tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
 	/* Set up entry for this TFD in Tx byte-count array */
 	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index c47d636..a75013a 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -101,13 +101,6 @@
 {
 	struct txpd *local_tx_pd;
 	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
-	unsigned int pad;
-	int headroom = (priv->adapter->iface_type ==
-			MWIFIEX_USB) ? 0 : INTF_HEADER_LEN;
-
-	pad = ((void *)skb->data - sizeof(*local_tx_pd) -
-		headroom - NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1);
-	skb_push(skb, pad);
 
 	skb_push(skb, sizeof(*local_tx_pd));
 
@@ -121,12 +114,10 @@
 	local_tx_pd->bss_num = priv->bss_num;
 	local_tx_pd->bss_type = priv->bss_type;
 	/* Always zero as the data is followed by struct txpd */
-	local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) +
-						 pad);
+	local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
 	local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
 	local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
-						 sizeof(*local_tx_pd) -
-						 pad);
+						 sizeof(*local_tx_pd));
 
 	if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
 		local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
@@ -190,7 +181,11 @@
 				       ra_list_flags);
 		return -1;
 	}
-	skb_reserve(skb_aggr, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+	/* skb_aggr->data already 64 byte align, just reserve bus interface
+	 * header and txpd.
+	 */
+	skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
 	tx_info_aggr =  MWIFIEX_SKB_TXCB(skb_aggr);
 
 	memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index b9284b5..ae2b69d 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -114,7 +114,8 @@
 	if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
 		p += sprintf(p, "multicast_count=\"%d\"\n",
 			     netdev_mc_count(netdev));
-		p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid);
+		p += sprintf(p, "essid=\"%.*s\"\n", info.ssid.ssid_len,
+			     info.ssid.ssid);
 		p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
 		p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
 		p += sprintf(p, "country_code = \"%s\"\n", info.country_code);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 644f3a2..1532ac9 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -1159,8 +1159,6 @@
 			encrypt_key.is_rx_seq_valid = true;
 		}
 	} else {
-		if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
-			return 0;
 		encrypt_key.key_disable = true;
 		if (mac_addr)
 			memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index b36ce18..86fa0fc 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -218,5 +218,33 @@
 	if (vector & FW_LOGGER_INDICATION)
 		wlcore_event_fw_logger(wl);
 
+	if (vector & RX_BA_WIN_SIZE_CHANGE_EVENT_ID) {
+		struct wl12xx_vif *wlvif;
+		struct ieee80211_vif *vif;
+		struct ieee80211_sta *sta;
+		u8 link_id = mbox->rx_ba_link_id;
+		u8 win_size = mbox->rx_ba_win_size;
+		const u8 *addr;
+
+		wlvif = wl->links[link_id].wlvif;
+		vif = wl12xx_wlvif_to_vif(wlvif);
+
+		/* Update RX aggregation window size and call
+		 * MAC routine to stop active RX aggregations for this link
+		 */
+		if (wlvif->bss_type != BSS_TYPE_AP_BSS)
+			addr = vif->bss_conf.bssid;
+		else
+			addr = wl->links[link_id].addr;
+
+		sta = ieee80211_find_sta(vif, addr);
+		if (sta) {
+			sta->max_rx_aggregation_subframes = win_size;
+			ieee80211_stop_rx_ba_session(vif,
+						wl->links[link_id].ba_bitmap,
+						addr);
+		}
+	}
+
 	return 0;
 }
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index ce8ea9c0..4af297f 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -38,6 +38,7 @@
 	REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID      = BIT(18),
 	DFS_CHANNELS_CONFIG_COMPLETE_EVENT       = BIT(19),
 	PERIODIC_SCAN_REPORT_EVENT_ID            = BIT(20),
+	RX_BA_WIN_SIZE_CHANGE_EVENT_ID           = BIT(21),
 	SMART_CONFIG_SYNC_EVENT_ID               = BIT(22),
 	SMART_CONFIG_DECODE_EVENT_ID             = BIT(23),
 	TIME_SYNC_EVENT_ID                       = BIT(24),
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 06d6943..5bdf7a0 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -1041,7 +1041,8 @@
 		SMART_CONFIG_SYNC_EVENT_ID |
 		SMART_CONFIG_DECODE_EVENT_ID |
 		TIME_SYNC_EVENT_ID |
-		FW_LOGGER_INDICATION;
+		FW_LOGGER_INDICATION |
+		RX_BA_WIN_SIZE_CHANGE_EVENT_ID;
 
 	wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
 
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 26cc23f..a485999 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1419,7 +1419,8 @@
 
 /* setup BA session receiver setting in the FW. */
 int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
-				       u16 ssn, bool enable, u8 peer_hlid)
+				       u16 ssn, bool enable, u8 peer_hlid,
+				       u8 win_size)
 {
 	struct wl1271_acx_ba_receiver_setup *acx;
 	int ret;
@@ -1435,7 +1436,7 @@
 	acx->hlid = peer_hlid;
 	acx->tid = tid_index;
 	acx->enable = enable;
-	acx->win_size = wl->conf.ht.rx_ba_win_size;
+	acx->win_size =	win_size;
 	acx->ssn = ssn;
 
 	ret = wlcore_cmd_configure_failsafe(wl, ACX_BA_SESSION_RX_SETUP, acx,
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 6321ed4..f46d7fd 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1113,7 +1113,8 @@
 int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
 				       struct wl12xx_vif *wlvif);
 int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
-				       u16 ssn, bool enable, u8 peer_hlid);
+				       u16 ssn, bool enable, u8 peer_hlid,
+				       u8 win_size);
 int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			u64 *mactime);
 int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 471521a..5438975 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5285,7 +5285,9 @@
 		}
 
 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
-							 hlid);
+				hlid,
+				params->buf_size);
+
 		if (!ret) {
 			*ba_bitmap |= BIT(tid);
 			wl->ba_rx_session_count++;
@@ -5306,7 +5308,7 @@
 		}
 
 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
-							 hlid);
+							 hlid, 0);
 		if (!ret) {
 			*ba_bitmap &= ~BIT(tid);
 			wl->ba_rx_session_count--;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index fe00f91..7dc726d 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -456,6 +456,7 @@
 config PHY_TUSB1210
 	tristate "TI TUSB1210 ULPI PHY module"
 	depends on USB_ULPI_BUS
+	depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
 	select GENERIC_PHY
 	help
 	  Support for TI TUSB1210 USB ULPI PHY.
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 3a6e214..1946204 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -152,4 +152,12 @@
 	  a log and rates the actions according to whether a typical user would
 	  use the tools.
 
+config QCOM_GENI_SE
+	tristate "QCOM GENI Serial Engine Driver"
+	help
+	  This module is used to interact with GENI based Serial Engines on
+	  Qualcomm Technologies, Inc. Universal Peripheral(QUPv3). This
+	  module is used to configure and read the configuration from the
+	  Serial Engines.
+
 endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index cf24d7a..ff1d0e2 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -11,3 +11,4 @@
 obj-$(CONFIG_USB_BAM) += usb_bam.o
 obj-$(CONFIG_MSM_11AD) += msm_11ad/
 obj-$(CONFIG_SEEMP_CORE) += seemp_core/
+obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 5fdb4e9..7fca7aa 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -649,6 +649,13 @@
 			GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
 			GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
 		break;
+	case GSI_VER_2_0:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
+			GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
+		break;
 	default:
 		GSIERR("bad gsi version %d\n", ver);
 		WARN_ON(1);
@@ -684,6 +691,13 @@
 			GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
 			GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
 		break;
+	case GSI_VER_2_0:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
+			GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
+		break;
 	default:
 		GSIERR("bad gsi version %d\n", ver);
 		WARN_ON(1);
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index f5e23c68..154ac26 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -293,6 +293,16 @@
 		val = gsi_readl(gsi_ctx->base +
 			GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
 		TERR("EE%2d HW_PARAM_2 0x%x\n", gsi_ctx->per.ee, val);
+	} else if (gsi_ctx->per.ver == GSI_VER_2_0) {
+		val = gsi_readl(gsi_ctx->base +
+			GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+		TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
+		val = gsi_readl(gsi_ctx->base +
+			GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
+		TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
+		val = gsi_readl(gsi_ctx->base +
+			GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		TERR("EE%2d HW_PARAM_2 0x%x\n", gsi_ctx->per.ee, val);
 	} else {
 		WARN_ON(1);
 	}
diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h
index 653cdd4..7817613 100644
--- a/drivers/platform/msm/gsi/gsi_reg.h
+++ b/drivers/platform/msm/gsi/gsi_reg.h
@@ -1518,6 +1518,34 @@
 #define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
 #define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
 
+/* v2.0 */
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2
+#define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3
+
 #define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \
 	(GSI_GSI_REG_BASE_OFFS + 0x0001f044 + 0x4000 * (n))
 #define GSI_EE_n_GSI_SW_VERSION_RMSK 0xffffffff
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index a37947b..6c597f0 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -2600,6 +2600,9 @@
 	case IPA_HW_v3_5_1:
 		str = "3.5.1";
 		break;
+	case IPA_HW_v4_0:
+		str = "4.0";
+		break;
 	default:
 		str = "Invalid version";
 		break;
@@ -2660,6 +2663,7 @@
 	case IPA_HW_v3_1:
 	case IPA_HW_v3_5:
 	case IPA_HW_v3_5_1:
+	case IPA_HW_v4_0:
 		result = ipa3_plat_drv_probe(pdev_p, ipa_api_ctrl,
 			ipa_plat_drv_match);
 		break;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index e7b16b3..31e530e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1735,7 +1735,7 @@
 		IPAERR("failed to construct dma_shared_mem imm cmd\n");
 		return -ENOMEM;
 	}
-	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc.opcode = cmd_pyld->opcode;
 	desc.pyld = cmd_pyld->data;
 	desc.len = cmd_pyld->len;
 	desc.type = IPA_IMM_CMD_DESC;
@@ -2000,8 +2000,7 @@
 				retval = -ENOMEM;
 				goto free_empty_img;
 			}
-			desc[num_cmds].opcode = ipahal_imm_cmd_get_opcode(
-				IPA_IMM_CMD_DMA_SHARED_MEM);
+			desc[num_cmds].opcode = cmd_pyld[num_cmds]->opcode;
 			desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
 			desc[num_cmds].len = cmd_pyld[num_cmds]->len;
 			desc[num_cmds].type = IPA_IMM_CMD_DESC;
@@ -2100,8 +2099,7 @@
 		retval = -ENOMEM;
 		goto free_desc;
 	}
-	desc->opcode =
-		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc->opcode = cmd_pyld->opcode;
 	desc->pyld = cmd_pyld->data;
 	desc->len = cmd_pyld->len;
 	desc->type = IPA_IMM_CMD_DESC;
@@ -2191,8 +2189,7 @@
 		retval = -EFAULT;
 		goto bail_desc;
 	}
-	desc->opcode =
-		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc->opcode = cmd_pyld->opcode;
 	desc->pyld = cmd_pyld->data;
 	desc->len = cmd_pyld->len;
 	desc->type = IPA_IMM_CMD_DESC;
@@ -2259,8 +2256,7 @@
 				BUG();
 			}
 
-			desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
-				IPA_IMM_CMD_REGISTER_WRITE);
+			desc[num_descs].opcode = cmd_pyld->opcode;
 			desc[num_descs].type = IPA_IMM_CMD_DESC;
 			desc[num_descs].callback = ipa3_destroy_imm;
 			desc[num_descs].user1 = cmd_pyld;
@@ -2289,8 +2285,7 @@
 				return -EFAULT;
 			}
 
-			desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
-				IPA_IMM_CMD_REGISTER_WRITE);
+			desc[num_descs].opcode = cmd_pyld->opcode;
 			desc[num_descs].type = IPA_IMM_CMD_DESC;
 			desc[num_descs].callback = ipa3_destroy_imm;
 			desc[num_descs].user1 = cmd_pyld;
@@ -2494,7 +2489,7 @@
 			mem.phys_base);
 		return -EFAULT;
 	}
-	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_HDR_INIT_LOCAL);
+	desc.opcode = cmd_pyld->opcode;
 	desc.type = IPA_IMM_CMD_DESC;
 	desc.pyld = cmd_pyld->data;
 	desc.len = cmd_pyld->len;
@@ -2539,7 +2534,7 @@
 			mem.phys_base);
 		return -EFAULT;
 	}
-	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc.opcode = cmd_pyld->opcode;
 	desc.pyld = cmd_pyld->data;
 	desc.len = cmd_pyld->len;
 	desc.type = IPA_IMM_CMD_DESC;
@@ -2611,8 +2606,7 @@
 		goto free_mem;
 	}
 
-	desc.opcode =
-		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_ROUTING_INIT);
+	desc.opcode = cmd_pyld->opcode;
 	desc.type = IPA_IMM_CMD_DESC;
 	desc.pyld = cmd_pyld->data;
 	desc.len = cmd_pyld->len;
@@ -2678,8 +2672,7 @@
 		goto free_mem;
 	}
 
-	desc.opcode =
-		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_ROUTING_INIT);
+	desc.opcode = cmd_pyld->opcode;
 	desc.type = IPA_IMM_CMD_DESC;
 	desc.pyld = cmd_pyld->data;
 	desc.len = cmd_pyld->len;
@@ -2739,7 +2732,7 @@
 		goto free_mem;
 	}
 
-	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_FILTER_INIT);
+	desc.opcode = cmd_pyld->opcode;
 	desc.type = IPA_IMM_CMD_DESC;
 	desc.pyld = cmd_pyld->data;
 	desc.len = cmd_pyld->len;
@@ -2800,7 +2793,7 @@
 		goto free_mem;
 	}
 
-	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_FILTER_INIT);
+	desc.opcode = cmd_pyld->opcode;
 	desc.type = IPA_IMM_CMD_DESC;
 	desc.pyld = cmd_pyld->data;
 	desc.len = cmd_pyld->len;
@@ -3939,6 +3932,9 @@
 	case IPA_HW_v3_5_1:
 		gsi_ver = GSI_VER_1_3;
 		break;
+	case IPA_HW_v4_0:
+		gsi_ver = GSI_VER_2_0;
+		break;
 	default:
 		IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
 		WARN_ON(1);
@@ -4319,6 +4315,7 @@
 		IPAERR("failed to construct IMM cmd\n");
 		return -ENOMEM;
 	}
+	ipa3_ctx->pkt_init_imm_opcode = cmd_pyld->opcode;
 
 	mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
 	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index ca77be9..1ee8ec8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -244,6 +244,38 @@
 		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe));
 }
 
+/**
+ * _ipa_read_ep_reg_v4_0() - Reads and prints endpoint configuration registers
+ *
+ * Returns the number of characters printed
+ * Removed IPA_ENDP_INIT_ROUTE_n from v3
+ */
+int _ipa_read_ep_reg_v4_0(char *buf, int max_len, int pipe)
+{
+	return scnprintf(
+		dbg_buff, IPA_MAX_MSG_LEN,
+		"IPA_ENDP_INIT_NAT_%u=0x%x\n"
+		"IPA_ENDP_INIT_HDR_%u=0x%x\n"
+		"IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n"
+		"IPA_ENDP_INIT_MODE_%u=0x%x\n"
+		"IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+		"IPA_ENDP_INIT_CTRL_%u=0x%x\n"
+		"IPA_ENDP_INIT_HOL_EN_%u=0x%x\n"
+		"IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n"
+		"IPA_ENDP_INIT_DEAGGR_%u=0x%x\n"
+		"IPA_ENDP_INIT_CFG_%u=0x%x\n",
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_AGGR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CTRL_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_DEAGGR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe));
+}
+
 static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
 		size_t count, loff_t *ppos)
 {
@@ -1381,6 +1413,11 @@
 	u32 option = 0;
 	struct ipahal_reg_debug_cnt_ctrl dbg_cnt_ctrl;
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		IPAERR("IPA_DEBUG_CNT_CTRL is not supported in IPA 4.0\n");
+		return -EPERM;
+	}
+
 	if (sizeof(dbg_buff) < count + 1)
 		return -EFAULT;
 
@@ -1416,6 +1453,11 @@
 	int nbytes;
 	u32 regval;
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		IPAERR("IPA_DEBUG_CNT_REG is not supported in IPA 4.0\n");
+		return -EPERM;
+	}
+
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	regval =
 		ipahal_read_reg_n(IPA_DEBUG_CNT_REG_n, 0);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index faa47d8..bf13ac5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -315,9 +315,7 @@
 		}
 
 		/* populate tag field */
-		if (desc[i].opcode ==
-			ipahal_imm_cmd_get_opcode(
-				IPA_IMM_CMD_IP_PACKET_TAG_STATUS)) {
+		if (desc[i].is_tag_status) {
 			if (ipa_populate_tag_field(&desc[i], tx_pkt,
 				&tag_pyld_ret)) {
 				IPAERR("Failed to populate tag field\n");
@@ -1279,15 +1277,10 @@
 			 * notification. IPA will generate a status packet with
 			 * tag info as a result of the TAG STATUS command.
 			 */
-			desc[data_idx].opcode =
-				ipahal_imm_cmd_get_opcode(
-				IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
-			desc[data_idx].type = IPA_IMM_CMD_DESC;
-			desc[data_idx].callback = ipa3_tag_destroy_imm;
+			desc[data_idx].is_tag_status = true;
 			data_idx++;
 		}
-		desc[data_idx].opcode =
-			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
+		desc[data_idx].opcode = ipa3_ctx->pkt_init_imm_opcode;
 		desc[data_idx].dma_address_valid = true;
 		desc[data_idx].dma_address = ipa3_ctx->pkt_init_imm[dst_ep_idx];
 		desc[data_idx].type = IPA_IMM_CMD_DESC;
@@ -1338,11 +1331,7 @@
 			 * notification. IPA will generate a status packet with
 			 * tag info as a result of the TAG STATUS command.
 			 */
-			desc[data_idx].opcode =
-				ipahal_imm_cmd_get_opcode(
-					IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
-			desc[data_idx].type = IPA_IMM_CMD_DESC;
-			desc[data_idx].callback = ipa3_tag_destroy_imm;
+			desc[data_idx].is_tag_status = true;
 			data_idx++;
 		}
 		desc[data_idx].pyld = skb->data;
@@ -2979,11 +2968,7 @@
 			(u8)sys->ep->cfg.meta.qmap_id;
 
 		/* the tag field will be populated in ipa3_send() function */
-		desc[0].opcode =
-			ipahal_imm_cmd_get_opcode(
-				IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
-		desc[0].type = IPA_IMM_CMD_DESC;
-		desc[0].callback = ipa3_tag_destroy_imm;
+		desc[0].is_tag_status = true;
 		desc[1].pyld = entry->pyld_buffer;
 		desc[1].len = entry->pyld_len;
 		desc[1].type = IPA_DATA_DESC_SKB;
@@ -3615,8 +3600,11 @@
 		 */
 		IPADBG_LOW("tx_pkt sent in tag: 0x%p\n", tx_pkt);
 		desc->pyld = tag_pyld->data;
+		desc->opcode = tag_pyld->opcode;
 		desc->len = tag_pyld->len;
 		desc->user1 = tag_pyld;
+		desc->type = IPA_IMM_CMD_DESC;
+		desc->callback = ipa3_tag_destroy_imm;
 
 		*tag_pyld_ret = tag_pyld;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index ff763c4..d0ed782 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -573,7 +573,7 @@
 		rc = -EFAULT;
 		goto fail_reg_write_construct;
 	}
-	desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[0].opcode = cmd_pyld[0]->opcode;
 	desc[0].pyld = cmd_pyld[0]->data;
 	desc[0].len = cmd_pyld[0]->len;
 	desc[0].type = IPA_IMM_CMD_DESC;
@@ -609,8 +609,7 @@
 				ip);
 			goto fail_imm_cmd_construct;
 		}
-		desc[num_cmd].opcode =
-			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
 		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
 		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
 		desc[num_cmd++].type = IPA_IMM_CMD_DESC;
@@ -630,8 +629,7 @@
 				ip);
 			goto fail_imm_cmd_construct;
 		}
-		desc[num_cmd].opcode =
-			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
 		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
 		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
 		desc[num_cmd++].type = IPA_IMM_CMD_DESC;
@@ -653,8 +651,7 @@
 				ip);
 			goto fail_imm_cmd_construct;
 		}
-		desc[num_cmd].opcode =
-			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
 		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
 		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
 		desc[num_cmd++].type = IPA_IMM_CMD_DESC;
@@ -673,8 +670,7 @@
 				ip);
 			goto fail_imm_cmd_construct;
 		}
-		desc[num_cmd].opcode =
-			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
 		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
 		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
 		desc[num_cmd++].type = IPA_IMM_CMD_DESC;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 69db99a..410b96a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -181,8 +181,7 @@
 				IPAERR("fail construct dma_shared_mem cmd\n");
 				goto end;
 			}
-			desc[0].opcode = ipahal_imm_cmd_get_opcode(
-				IPA_IMM_CMD_DMA_SHARED_MEM);
+			desc[0].opcode = hdr_cmd_pyld->opcode;
 			desc[0].pyld = hdr_cmd_pyld->data;
 			desc[0].len = hdr_cmd_pyld->len;
 		}
@@ -200,8 +199,7 @@
 				IPAERR("fail construct hdr_init_system cmd\n");
 				goto end;
 			}
-			desc[0].opcode = ipahal_imm_cmd_get_opcode(
-				IPA_IMM_CMD_HDR_INIT_SYSTEM);
+			desc[0].opcode = hdr_cmd_pyld->opcode;
 			desc[0].pyld = hdr_cmd_pyld->data;
 			desc[0].len = hdr_cmd_pyld->len;
 		}
@@ -233,8 +231,7 @@
 				IPAERR("fail construct dma_shared_mem cmd\n");
 				goto end;
 			}
-			desc[1].opcode = ipahal_imm_cmd_get_opcode(
-				IPA_IMM_CMD_DMA_SHARED_MEM);
+			desc[1].opcode = ctx_cmd_pyld->opcode;
 			desc[1].pyld = ctx_cmd_pyld->data;
 			desc[1].len = ctx_cmd_pyld->len;
 		}
@@ -262,8 +259,7 @@
 				IPAERR("fail construct register_write cmd\n");
 				goto end;
 			}
-			desc[1].opcode = ipahal_imm_cmd_get_opcode(
-				IPA_IMM_CMD_REGISTER_WRITE);
+			desc[1].opcode = ctx_cmd_pyld->opcode;
 			desc[1].pyld = ctx_cmd_pyld->data;
 			desc[1].len = ctx_cmd_pyld->len;
 		}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 3af4486..73a405f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -701,6 +701,7 @@
  * or kmalloc'ed immediate command parameters/plain old data
  * @dma_address: dma mapped address of pyld
  * @dma_address_valid: valid field for dma_address
+ * @is_tag_status: flag for IP_PACKET_TAG_STATUS imd cmd
  * @len: length of the pyld
  * @opcode: for immediate commands
  * @callback: IPA client provided completion callback
@@ -715,6 +716,7 @@
 	skb_frag_t *frag;
 	dma_addr_t dma_address;
 	bool dma_address_valid;
+	bool is_tag_status;
 	u16 len;
 	u16 opcode;
 	void (*callback)(void *user1, int user2);
@@ -1069,6 +1071,7 @@
  * @ipa_bus_hdl: msm driver handle for the data path bus
  * @ctrl: holds the core specific operations based on
  *  core version (vtable like)
+ * @pkt_init_imm_opcode: opcode for IP_PACKET_INIT imm cmd
  * @enable_clock_scaling: clock scaling is enabled ?
  * @curr_ipa_clk_rate: IPA current clock rate
  * @wcstats: wlan common buffer stats
@@ -1180,6 +1183,7 @@
 	bool q6_proxy_clk_vote_valid;
 	u32 ipa_num_pipes;
 	dma_addr_t pkt_init_imm[IPA3_MAX_NUM_PIPES];
+	u32 pkt_init_imm_opcode;
 
 	struct ipa3_wlan_comm_memb wc_memb;
 
@@ -1318,6 +1322,12 @@
  * +-------------------------+
  * |    CANARY               |
  * +-------------------------+
+ * | PDN CONFIG              |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
  * |  MODEM MEM              |
  * +-------------------------+
  * |    CANARY               |
@@ -1398,6 +1408,8 @@
 	u32 apps_v6_rt_nhash_size;
 	u32 uc_event_ring_ofst;
 	u32 uc_event_ring_size;
+	u32 pdn_config_ofst;
+	u32 pdn_config_size;
 };
 
 struct ipa3_controller {
@@ -1827,6 +1839,7 @@
 int __ipa3_release_hdr(u32 hdr_hdl);
 int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl);
 int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe);
+int _ipa_read_ep_reg_v4_0(char *buf, int max_len, int pipe);
 void _ipa_enable_clks_v3_0(void);
 void _ipa_disable_clks_v3_0(void);
 struct device *ipa3_get_dma_dev(void);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index d98e6b4..e1177ca 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -420,7 +420,7 @@
 		goto bail;
 	}
 
-	desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[0].opcode = nop_cmd_pyld->opcode;
 	desc[0].type = IPA_IMM_CMD_DESC;
 	desc[0].callback = NULL;
 	desc[0].user1 = NULL;
@@ -505,7 +505,7 @@
 		goto free_nop;
 	}
 
-	desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
+	desc[1].opcode = cmd_pyld->opcode;
 	desc[1].type = IPA_IMM_CMD_DESC;
 	desc[1].callback = NULL;
 	desc[1].user1 = NULL;
@@ -668,7 +668,7 @@
 		goto bail;
 	}
 	desc[0].type = IPA_IMM_CMD_DESC;
-	desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[0].opcode = nop_cmd_pyld->opcode;
 	desc[0].callback = NULL;
 	desc[0].user1 = NULL;
 	desc[0].user2 = 0;
@@ -687,7 +687,7 @@
 			continue;
 		}
 		desc[1].type = IPA_IMM_CMD_DESC;
-		desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_NAT_DMA);
+		desc[1].opcode = cmd_pyld->opcode;
 		desc[1].callback = NULL;
 		desc[1].user1 = NULL;
 		desc[1].user2 = 0;
@@ -777,7 +777,7 @@
 		result = -ENOMEM;
 		goto bail;
 	}
-	desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[0].opcode = nop_cmd_pyld->opcode;
 	desc[0].type = IPA_IMM_CMD_DESC;
 	desc[0].callback = NULL;
 	desc[0].user1 = NULL;
@@ -804,7 +804,7 @@
 		result = -EPERM;
 		goto destroy_regwrt_imm_cmd;
 	}
-	desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
+	desc[1].opcode = cmd_pyld->opcode;
 	desc[1].type = IPA_IMM_CMD_DESC;
 	desc[1].callback = NULL;
 	desc[1].user1 = NULL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 273877c..cf28986 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -550,8 +550,7 @@
 		IPAERR("fail construct register_write imm cmd. IP %d\n", ip);
 		goto fail_size_valid;
 	}
-	desc[num_cmd].opcode =
-		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
 	desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
 	desc[num_cmd].len = cmd_pyld[num_cmd]->len;
 	desc[num_cmd].type = IPA_IMM_CMD_DESC;
@@ -569,8 +568,7 @@
 		IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
 		goto fail_imm_cmd_construct;
 	}
-	desc[num_cmd].opcode =
-		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
 	desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
 	desc[num_cmd].len = cmd_pyld[num_cmd]->len;
 	desc[num_cmd].type = IPA_IMM_CMD_DESC;
@@ -588,8 +586,7 @@
 		IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
 		goto fail_imm_cmd_construct;
 	}
-	desc[num_cmd].opcode =
-		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
 	desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
 	desc[num_cmd].len = cmd_pyld[num_cmd]->len;
 	desc[num_cmd].type = IPA_IMM_CMD_DESC;
@@ -609,8 +606,7 @@
 				ip);
 			goto fail_imm_cmd_construct;
 		}
-		desc[num_cmd].opcode =
-			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
 		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
 		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
 		desc[num_cmd].type = IPA_IMM_CMD_DESC;
@@ -630,8 +626,7 @@
 				ip);
 			goto fail_imm_cmd_construct;
 		}
-		desc[num_cmd].opcode =
-			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
 		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
 		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
 		desc[num_cmd].type = IPA_IMM_CMD_DESC;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 6321ca9..f8b4d7d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -42,6 +42,7 @@
 #define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
 #define IPA_BCR_REG_VAL_v3_0 (0x00000001)
 #define IPA_BCR_REG_VAL_v3_5 (0x0000003B)
+#define IPA_BCR_REG_VAL_v4_0 (0x00000039)
 #define IPA_AGGR_GRAN_MIN (1)
 #define IPA_AGGR_GRAN_MAX (32)
 #define IPA_EOT_COAL_GRAN_MIN (1)
@@ -62,8 +63,6 @@
 /* configure IPA spare register 1 in order to have correct IPA version
  * set bits 0,2,3 and 4. see SpareBits documentation.xlsx
  */
-#define IPA_SPARE_REG_1_VAL (0x0000081D)
-
 
 /* HPS, DPS sequencers Types*/
 #define IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY  0x00000000
@@ -121,6 +120,16 @@
 #define IPA_v3_5_SRC_GROUP_MAX		(4)
 #define IPA_v3_5_DST_GROUP_MAX		(3)
 
+#define IPA_v4_0_GROUP_LWA_DL		(0)
+#define IPA_v4_0_MHI_GROUP_PCIE		(0)
+#define IPA_v4_0_ETHERNET		(0)
+#define IPA_v4_0_GROUP_UL_DL		(1)
+#define IPA_v4_0_MHI_GROUP_DDR		(1)
+#define IPA_v4_0_MHI_GROUP_DMA		(2)
+#define IPA_v4_0_GROUP_UC_RX_Q		(3)
+#define IPA_v4_0_SRC_GROUP_MAX		(4)
+#define IPA_v4_0_DST_GROUP_MAX		(4)
+
 #define IPA_GROUP_MAX IPA_v3_0_GROUP_MAX
 
 enum ipa_rsrc_grp_type_src {
@@ -139,7 +148,14 @@
 	IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
 	IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS,
 	IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
-	IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX
+	IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX,
+
+	IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS = 0,
+	IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS,
+	IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
+	IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS,
+	IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
+	IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX
 };
 
 #define IPA_RSRC_GRP_TYPE_SRC_MAX IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX
@@ -153,6 +169,10 @@
 	IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0,
 	IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS,
 	IPA_v3_5_RSRC_GRP_TYPE_DST_MAX,
+
+	IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0,
+	IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS,
+	IPA_v4_0_RSRC_GRP_TYPE_DST_MAX,
 };
 #define IPA_RSRC_GRP_TYPE_DST_MAX IPA_v3_0_RSRC_GRP_TYPE_DST_MAX
 
@@ -160,6 +180,12 @@
 	IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ,
 	IPA_RSRC_GRP_TYPE_RX_MAX
 };
+
+enum ipa_rsrc_grp_rx_hps_weight_config {
+	IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG,
+	IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_MAX
+};
+
 struct rsrc_min_max {
 	u32 min;
 	u32 max;
@@ -170,6 +196,8 @@
 	IPA_3_5,
 	IPA_3_5_MHI,
 	IPA_3_5_1,
+	IPA_4_0,
+	IPA_4_0_MHI,
 	IPA_VER_MAX,
 };
 
@@ -233,6 +261,32 @@
 		[IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
 		{14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} },
 	},
+	[IPA_4_0] = {
+		/* LWA_DL  UL_DL    not used  UC_RX_Q, other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{1, 255}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
+		{10, 10}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{12, 12}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255},  {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_0_MHI] = {
+		/* PCIE  DDR     DMA  not used, other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{4, 4}, {5, 5}, {1, 1}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
+		{10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{12, 12}, {12, 12}, {8, 8}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255},  {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{14, 14}, {14, 14}, {14, 14}, {0, 0}, {0, 0}, {0, 0} },
+	},
 };
 
 static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
@@ -267,6 +321,20 @@
 		[IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
 		{2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} },
 	},
+	[IPA_4_0] = {
+		/*LWA_DL UL/DL/DPL uC, other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{4, 4}, {4, 4}, {3, 3}, {2, 2}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 255}, {1, 255}, {1, 2}, {0, 2}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_0_MHI] = {
+		/*LWA_DL UL/DL/DPL uC, other are invalid */
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{4, 4}, {4, 4}, {3, 3}, {2, 2}, {0, 0}, {0, 0} },
+		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 255}, {1, 255}, {1, 2}, {0, 2}, {0, 0}, {0, 0} },
+	},
 };
 
 static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
@@ -285,12 +353,50 @@
 		/* PCIE   DDR	     DMA       unused   N/A        N/A */
 		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
 		{ 3, 3 }, { 7, 7 }, { 2, 2 }, { 0, 0 }, { 0, 0 }, { 0, 0 } },
-},
+	},
 	[IPA_3_5_1] = {
 		/* LWA_DL UL_DL	unused   UC_RX_Q N/A     N/A */
 		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
 		{3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
 	},
+	[IPA_4_0] = {
+		/* LWA_DL UL_DL	not used UC_RX_Q, other are invalid */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
+	},
+	[IPA_4_0_MHI] = {
+		/* PCIE   DDR	     DMA       unused   N/A        N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{ 3, 3 }, { 7, 7 }, { 2, 2 }, { 0, 0 }, { 0, 0 }, { 0, 0 } },
+	},
+};
+
+static const u32 ipa3_rsrc_rx_grp_hps_weight_config
+	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_MAX][IPA_GROUP_MAX] = {
+	[IPA_3_0] = {
+		/* UL	DL	DIAG	DMA	Unused	uC Rx */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 0, 0, 0, 0, 0, 0 },
+	},
+	[IPA_3_5] = {
+		/* unused UL_DL	unused UC_RX_Q   N/A     N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 },
+	},
+	[IPA_3_5_MHI] = {
+		/* PCIE   DDR	     DMA       unused   N/A        N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 3, 5, 1, 1, 0, 0 },
+	},
+	[IPA_3_5_1] = {
+		/* LWA_DL UL_DL	unused   UC_RX_Q N/A     N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 },
+	},
+	[IPA_4_0] = {
+		/* LWA_DL UL_DL	not used UC_RX_Q, other are invalid */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 },
+	},
+	[IPA_4_0_MHI] = {
+		/* PCIE   DDR	     DMA       unused   N/A        N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 3, 5, 1, 1, 0, 0 },
+	},
 };
 
 enum ipa_ees {
@@ -1115,6 +1221,399 @@
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 11, 2, 4, 6, IPA_EE_AP } },
+
+
+	/* IPA_4_0 */
+	[IPA_4_0][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_WLAN1_PROD]          = {
+			7, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 9, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_USB_PROD]            = {
+			0, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_UC_USB_PROD]         = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_APPS_LAN_PROD]   = {
+			8, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 10, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_APPS_WAN_PROD] = {
+			2, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			5, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 4, 20, 24, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_ODU_PROD]            = {
+			0, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 1, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_ETHERNET_PROD]	  = {
+			9, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 0, 8, 16, IPA_EE_UC } },
+	[IPA_4_0][IPA_CLIENT_MHI_PROD]            = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_Q6_LAN_PROD]         = {
+			6, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 2, 12, 24, IPA_EE_Q6 } },
+	[IPA_4_0][IPA_CLIENT_Q6_WAN_PROD]         = {
+			3, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+	[IPA_4_0][IPA_CLIENT_Q6_CMD_PROD]	  = {
+			4, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 1, 20, 24, IPA_EE_Q6 } },
+	[IPA_4_0][IPA_CLIENT_Q6_DECOMP_PROD]      = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
+	/* Only for test purpose */
+	[IPA_4_0][IPA_CLIENT_TEST_PROD]           = {
+			0, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST1_PROD]          = {
+			0, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST2_PROD]          = {
+			1, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST3_PROD]          = {
+			7, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{7, 9, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST4_PROD]          = {
+			8, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 10, 8, 16, IPA_EE_AP } },
+
+
+	[IPA_4_0][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_WLAN1_CONS]          = {
+			18, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 18, 12, 6, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_WLAN2_CONS]          = {
+			20, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 20, 14, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_WLAN3_CONS]          = {
+			21, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 21, 15, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_WLAN4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_USB_CONS]            = {
+			19, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 19, 13, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_USB_DPL_CONS]        = {
+			15, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 7, 5, 5, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_APPS_LAN_CONS]       = {
+			10, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 5, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_APPS_WAN_CONS]       = {
+			11, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 6, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_ODU_EMB_CONS]        = {
+			17, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 1, 17, 17, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_ETHERNET_CONS]	  = {
+			22, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 22, 1, 17, 17, IPA_EE_UC } },
+	[IPA_4_0][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_MHI_CONS]            = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_Q6_LAN_CONS]         = {
+			14, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 4, 9, 9, IPA_EE_Q6 } },
+	[IPA_4_0][IPA_CLIENT_Q6_WAN_CONS]         = {
+			13, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 3, 9, 9, IPA_EE_Q6 } },
+	[IPA_4_0][IPA_CLIENT_Q6_DUN_CONS]         = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_Q6_DECOMP_CONS]	  = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_Q6_DECOMP2_CONS]	  = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
+	[IPA_4_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
+			16, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 5, 9, 9, IPA_EE_Q6 } },
+	/* Only for test purpose */
+	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
+	[IPA_4_0][IPA_CLIENT_TEST_CONS]           = {
+			12, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 12, 2, 5, 5, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST1_CONS]           = {
+			12, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 2, 5, 5, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST2_CONS]          = {
+			18, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 18, 12, 6, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST3_CONS]          = {
+			20, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 20, 14, 9, 9, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST4_CONS]          = {
+			21, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 21, 15, 9, 9, IPA_EE_AP } },
+
+	/* IPA_4_0_MHI */
+	[IPA_4_0_MHI][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_WLAN1_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_USB_PROD]            = {
+			0, IPA_v4_0_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_UC_USB_PROD]         = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_APPS_LAN_PROD] = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_PROD]   = {
+			2, IPA_v4_0_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			5, IPA_v4_0_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 4, 20, 24, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_ODU_PROD]            = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_MHI_PROD]            = {
+			1, IPA_v4_0_MHI_GROUP_PCIE, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_PCIE,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_PROD]         = {
+			3, IPA_v4_0_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_PROD]         = {
+			6, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 2, 12, 24, IPA_EE_Q6 } },
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_CMD_PROD]	  = {
+			4, IPA_v4_0_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 1, 20, 24, IPA_EE_Q6 } },
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_DECOMP_PROD]      = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
+			7, IPA_v4_0_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 9, 8, 16, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
+			8, IPA_v4_0_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 10, 8, 16, IPA_EE_AP } },
+	/* Only for test purpose */
+	[IPA_4_0_MHI][IPA_CLIENT_TEST_PROD]           = {
+			0, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_0][IPA_CLIENT_TEST1_PROD]          = {
+			0, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 8, 8, 16, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST2_PROD]          = {
+			1, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST3_PROD]          = {
+			7, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{7, 9, 8, 16, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST4_PROD]          = {
+			8, IPA_v4_0_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 10, 8, 16, IPA_EE_AP } },
+
+	[IPA_4_0_MHI][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_WLAN1_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_WLAN2_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_WLAN3_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_WLAN4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_USB_CONS]            = {
+			19, IPA_v4_0_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 19, 13, 9, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_USB_DPL_CONS]        = {
+			15, IPA_v4_0_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 7, 5, 5, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_APPS_LAN_CONS]       = {
+			10, IPA_v4_0_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 5, 9, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_CONS]       = {
+			11, IPA_v4_0_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 11, 6, 9, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_ODU_EMB_CONS]        = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_MHI_CONS]            = {
+			17, IPA_v4_0_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 17, 1, 17, 17, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_CONS]         = {
+			14, IPA_v4_0_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 4, 9, 9, IPA_EE_Q6 } },
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_CONS]         = {
+			13, IPA_v4_0_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 3, 9, 9, IPA_EE_Q6 } },
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_DUN_CONS]		= IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_DECOMP_CONS]	= IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_DECOMP2_CONS]	= IPA_CLIENT_NOT_USED,
+	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
+			20, IPA_v4_0_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 20, 14, 9, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
+			21, IPA_v4_0_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 21, 15, 9, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
+			16, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 5, 9, 9, IPA_EE_Q6 } },
+	/* Only for test purpose */
+	[IPA_4_0_MHI][IPA_CLIENT_TEST_CONS]           = {
+			12, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 12, 2, 5, 5, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST1_CONS]           = {
+			12, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 2, 5, 5, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST2_CONS]          = {
+			18, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 18, 12, 6, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST3_CONS]          = {
+			20, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 20, 14, 9, 9, IPA_EE_AP } },
+	[IPA_4_0_MHI][IPA_CLIENT_TEST4_CONS]          = {
+			21, IPA_v4_0_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 21, 15, 9, 9, IPA_EE_AP } },
+
+
 };
 
 static struct msm_bus_vectors ipa_init_vectors_v3_0[]  = {
@@ -1587,16 +2086,22 @@
  */
 void ipa3_cfg_qsb(void)
 {
-	int qsb_max_writes[2] = { 8, 2 };
-	int qsb_max_reads[2] = { 8, 8 };
+	struct ipahal_reg_qsb_max_reads max_reads = { 0 };
+	struct ipahal_reg_qsb_max_writes max_writes = { 0 };
+
+	max_reads.qmb_0_max_reads = 8,
+	max_reads.qmb_1_max_reads = 8,
+
+	max_writes.qmb_0_max_writes = 8;
+	max_writes.qmb_1_max_writes = 2;
 
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) {
-		qsb_max_writes[1] = 4;
-		qsb_max_reads[1] = 12;
+		max_writes.qmb_1_max_writes = 4;
+		max_reads.qmb_1_max_reads = 12;
 	}
 
-	ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, qsb_max_writes);
-	ipahal_write_reg_fields(IPA_QSB_MAX_READS, qsb_max_reads);
+	ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, &max_writes);
+	ipahal_write_reg_fields(IPA_QSB_MAX_READS, &max_reads);
 }
 
 /**
@@ -1624,6 +2129,9 @@
 	case IPA_HW_v3_5_1:
 		val = IPA_BCR_REG_VAL_v3_5;
 		break;
+	case IPA_HW_v4_0:
+		val = IPA_BCR_REG_VAL_v4_0;
+		break;
 	default:
 		IPAERR("unknown HW type in dts\n");
 		return -EFAULT;
@@ -1663,6 +2171,15 @@
 	case IPA_HW_v3_5_1:
 		hw_type_index = IPA_3_5_1;
 		break;
+	case IPA_HW_v4_0:
+		hw_type_index = IPA_4_0;
+		/*
+		 *this flag is initialized only after fw load trigger from
+		 * user space (ipa3_write)
+		 */
+		if (ipa3_ctx->ipa_config_is_mhi)
+			hw_type_index = IPA_4_0_MHI;
+		break;
 	default:
 		IPAERR("Incorrect IPA version %d\n", ipa3_ctx->ipa_hw_type);
 		hw_type_index = IPA_3_0;
@@ -2573,12 +3090,15 @@
 	ipa3_ctx->ep[clnt_hdl].rt_tbl_idx =
 		IPA_MEM_PART(v4_apps_rt_index_lo);
 
-	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
 
-	init_rt.route_table_index = ipa3_ctx->ep[clnt_hdl].rt_tbl_idx;
-	ipahal_write_reg_n_fields(IPA_ENDP_INIT_ROUTE_n, clnt_hdl, &init_rt);
+		init_rt.route_table_index = ipa3_ctx->ep[clnt_hdl].rt_tbl_idx;
+		ipahal_write_reg_n_fields(IPA_ENDP_INIT_ROUTE_n,
+			clnt_hdl, &init_rt);
 
-	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	}
 
 	return 0;
 }
@@ -2815,11 +3335,18 @@
 {
 	struct ipahal_reg_qcncm qcncm;
 
-	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-	ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
-	qcncm.mode_en = mode;
-	ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
-	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		if (mode != IPA_MBIM_AGGR) {
+			IPAERR("Only MBIM mode is supported staring 4.0\n");
+			return -EPERM;
+		}
+	} else {
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+		ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
+		qcncm.mode_en = mode;
+		ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	}
 
 	return 0;
 }
@@ -2839,6 +3366,11 @@
 {
 	struct ipahal_reg_qcncm qcncm;
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		IPAERR("QCNCM mode is not supported staring 4.0\n");
+		return -EPERM;
+	}
+
 	if (sig == NULL) {
 		IPAERR("bad argument for ipa3_set_qcncm_ndp_sig/n");
 		return -EINVAL;
@@ -2863,6 +3395,11 @@
 {
 	struct ipahal_reg_single_ndp_mode mode;
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		IPAERR("QCNCM mode is not supported staring 4.0\n");
+		return -EPERM;
+	}
+
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	ipahal_read_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
 	mode.single_ndp_en = enable;
@@ -2910,13 +3447,13 @@
  */
 int ipa3_init_mem_partition(struct device_node *node)
 {
-	const size_t ram_mmap_v3_0_size = 70;
-	const size_t ram_mmap_v3_5_size = 72;
 	const size_t ram_mmap_current_version_size =
 		sizeof(ipa3_ctx->ctrl->mem_partition) / sizeof(u32);
-	const size_t version = ipa_get_hw_type();
 	int result;
 
+	memset(&ipa3_ctx->ctrl->mem_partition, 0,
+		sizeof(ipa3_ctx->ctrl->mem_partition));
+
 	IPADBG("Reading from DTS as u32 array\n");
 
 	/*
@@ -2925,39 +3462,21 @@
 	 * mismatch. The size of the array monotonically increasing because the
 	 * obsolete entries are set to zero rather than deleted, so the
 	 * possible sizes are in range
-	 *	[ram_mmap_v3_0_size, ram_mmap_current_version_size]
+	 *	[1, ram_mmap_current_version_size]
 	 */
 	result = of_property_read_variable_u32_array(node, "qcom,ipa-ram-mmap",
 		(u32 *)&ipa3_ctx->ctrl->mem_partition,
-		ram_mmap_v3_0_size, ram_mmap_current_version_size);
+		1, ram_mmap_current_version_size);
 
-	if (result <= 0) {
-		IPAERR("Read operation failed\n");
+	if (IPA_MEM_PART(uc_event_ring_ofst) & 1023) {
+		IPAERR("UC EVENT RING OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(uc_event_ring_ofst));
 		return -ENODEV;
 	}
-	if (version < IPA_HW_v3_0)
-		ipa_assert();
-	if (version < IPA_HW_v3_5) {
-		if (result != ram_mmap_v3_0_size) {
-			IPAERR("Mismatch at IPA RAM MMAP DTS entry\n");
-			return -ENODEV;
-		}
-	} else {
-		if (result != ram_mmap_v3_5_size) {
-			IPAERR("Mismatch at IPA RAM MMAP DTS entry\n");
-			return -ENODEV;
-		}
 
-		if (IPA_MEM_PART(uc_event_ring_ofst) & 1023) {
-			IPAERR("UC EVENT RING OFST 0x%x is unaligned\n",
-				IPA_MEM_PART(uc_event_ring_ofst));
-			return -ENODEV;
-		}
-
-		IPADBG("UC EVENT RING OFST 0x%x SIZE 0x%x\n",
-			IPA_MEM_PART(uc_event_ring_ofst),
-			IPA_MEM_PART(uc_event_ring_size));
-	}
+	IPADBG("UC EVENT RING OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(uc_event_ring_ofst),
+		IPA_MEM_PART(uc_event_ring_size));
 
 	IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
 		IPA_MEM_PART(nat_size));
@@ -3126,6 +3645,16 @@
 		IPA_MEM_PART(apps_hdr_proc_ctx_size),
 		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr));
 
+	if (IPA_MEM_PART(pdn_config_ofst) & 7) {
+		IPAERR("PDN CONFIG OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(pdn_config_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("PDN CONFIG OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(pdn_config_ofst),
+		IPA_MEM_PART(pdn_config_size));
+
 	if (IPA_MEM_PART(modem_ofst) & 7) {
 		IPAERR("MODEM OFST 0x%x is unaligned\n",
 			IPA_MEM_PART(modem_ofst));
@@ -3207,9 +3736,11 @@
 	ctrl->ipa_reg_base_ofst = ipahal_get_reg_base();
 	ctrl->ipa_init_sram = _ipa_init_sram_v3;
 	ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
-
 	ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v4_0;
+
 	return 0;
 }
 
@@ -3343,8 +3874,7 @@
 		res = -ENOMEM;
 		goto fail_free_tag_desc;
 	}
-	tag_desc[desc_idx].opcode =
-		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	tag_desc[desc_idx].opcode = cmd_pyld->opcode;
 	tag_desc[desc_idx].pyld = cmd_pyld->data;
 	tag_desc[desc_idx].len = cmd_pyld->len;
 	tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
@@ -3362,8 +3892,7 @@
 		res = -ENOMEM;
 		goto fail_free_desc;
 	}
-	tag_desc[desc_idx].opcode =
-		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
+	tag_desc[desc_idx].opcode = cmd_pyld->opcode;
 	tag_desc[desc_idx].pyld = cmd_pyld->data;
 	tag_desc[desc_idx].len = cmd_pyld->len;
 	tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
@@ -3380,8 +3909,7 @@
 		res = -ENOMEM;
 		goto fail_free_desc;
 	}
-	tag_desc[desc_idx].opcode =
-		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+	tag_desc[desc_idx].opcode = cmd_pyld->opcode;
 	tag_desc[desc_idx].pyld = cmd_pyld->data;
 	tag_desc[desc_idx].len = cmd_pyld->len;
 	tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
@@ -3520,8 +4048,7 @@
 			goto fail_alloc_reg_write_agg_close;
 		}
 
-		desc[desc_idx].opcode =
-			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+		desc[desc_idx].opcode = cmd_pyld->opcode;
 		desc[desc_idx].pyld = cmd_pyld->data;
 		desc[desc_idx].len = cmd_pyld->len;
 		desc[desc_idx].type = IPA_IMM_CMD_DESC;
@@ -4059,6 +4586,49 @@
 			}
 		}
 		break;
+	case IPA_4_0:
+	case IPA_4_0_MHI:
+		if (src) {
+			switch (group_index) {
+			case IPA_v4_0_GROUP_LWA_DL:
+			case IPA_v4_0_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v4_0_MHI_GROUP_DMA:
+			case IPA_v4_0_GROUP_UC_RX_Q:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid source resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		} else {
+			switch (group_index) {
+			case IPA_v4_0_GROUP_LWA_DL:
+			case IPA_v4_0_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v4_0_MHI_GROUP_DMA:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid destination resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		}
+		break;
 	default:
 		IPAERR("invalid hw type\n");
 		WARN_ON(1);
@@ -4103,6 +4673,33 @@
 	}
 }
 
+static void ipa3_configure_rx_hps_weight(void)
+{
+	struct ipahal_reg_rx_hps_weights val;
+	u8 hw_type_idx;
+
+	hw_type_idx = ipa3_get_hw_type_index();
+
+	val.hps_queue_weight_0 =
+			ipa3_rsrc_rx_grp_hps_weight_config
+			[hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG]
+			[0];
+	val.hps_queue_weight_1 =
+			ipa3_rsrc_rx_grp_hps_weight_config
+			[hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG]
+			[1];
+	val.hps_queue_weight_2 =
+			ipa3_rsrc_rx_grp_hps_weight_config
+			[hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG]
+			[2];
+	val.hps_queue_weight_3 =
+			ipa3_rsrc_rx_grp_hps_weight_config
+			[hw_type_idx][IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG]
+			[3];
+
+	ipahal_write_reg_fields(IPA_HPS_FTCH_ARB_QUEUE_WEIGHT, &val);
+}
+
 void ipa3_set_resorce_groups_min_max_limits(void)
 {
 	int i;
@@ -4133,6 +4730,13 @@
 		src_grp_idx_max = IPA_v3_5_SRC_GROUP_MAX;
 		dst_grp_idx_max = IPA_v3_5_DST_GROUP_MAX;
 		break;
+	case IPA_4_0:
+	case IPA_4_0_MHI:
+		src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX;
+		dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX;
+		src_grp_idx_max = IPA_v4_0_SRC_GROUP_MAX;
+		dst_grp_idx_max = IPA_v4_0_DST_GROUP_MAX;
+		break;
 	default:
 		IPAERR("invalid hw type index\n");
 		WARN_ON(1);
@@ -4186,6 +4790,9 @@
 		ipa3_configure_rx_hps_clients(1, false);
 	}
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
+		ipa3_configure_rx_hps_weight();
+
 	IPADBG("EXIT\n");
 }
 
@@ -4309,8 +4916,7 @@
 {
 	struct ipa3_desc desc = {0};
 
-	desc.opcode = ipahal_imm_cmd_get_opcode_param(
-		IPA_IMM_CMD_DMA_TASK_32B_ADDR, 1);
+	desc.opcode = ipa3_ctx->dma_task_info.cmd_pyld->opcode;
 	desc.pyld = ipa3_ctx->dma_task_info.cmd_pyld->data;
 	desc.len = ipa3_ctx->dma_task_info.cmd_pyld->len;
 	desc.type = IPA_IMM_CMD_DESC;
@@ -4565,6 +5171,7 @@
 	switch (ipa3_ctx->ipa_hw_type) {
 	case IPA_HW_v3_0:
 	case IPA_HW_v3_5:
+	case IPA_HW_v4_0:
 		return false;
 	case IPA_HW_v3_1:
 	case IPA_HW_v3_5_1:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index fa9c6c8..d35b8a7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -49,6 +49,8 @@
 #define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
 		(kzalloc((__size), ((__is_atomic_ctx)?GFP_ATOMIC:GFP_KERNEL)))
 
+static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
+
 
 static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
 	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
@@ -63,6 +65,8 @@
 		IPAHAL_ERR("kzalloc err\n");
 		return pyld;
 	}
+	/* Currently supports only one packet */
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd) + (1 << 8);
 	pyld->len = sizeof(*data);
 	data = (struct ipa_imm_cmd_hw_dma_task_32b_addr *)pyld->data;
 
@@ -101,6 +105,7 @@
 		IPAHAL_ERR("kzalloc err\n");
 		return pyld;
 	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
 	pyld->len = sizeof(*data);
 	data = (struct ipa_imm_cmd_hw_ip_packet_tag_status *)pyld->data;
 
@@ -127,6 +132,7 @@
 		IPAHAL_ERR("kzalloc err\n");
 		return pyld;
 	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
 	pyld->len = sizeof(*data);
 	data = (struct ipa_imm_cmd_hw_dma_shared_mem *)pyld->data;
 
@@ -164,6 +170,61 @@
 	return pyld;
 }
 
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem_v_4_0(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 *data;
+	struct ipahal_imm_cmd_dma_shared_mem *mem_params =
+		(struct ipahal_imm_cmd_dma_shared_mem *)params;
+
+	if (unlikely(mem_params->size & ~0xFFFF)) {
+		IPAHAL_ERR("Size is bigger than 16bit width 0x%x\n",
+			mem_params->size);
+		WARN_ON(1);
+		return NULL;
+	}
+	if (unlikely(mem_params->local_addr & ~0xFFFF)) {
+		IPAHAL_ERR("Local addr is bigger than 16bit width 0x%x\n",
+			mem_params->local_addr);
+		WARN_ON(1);
+		return NULL;
+	}
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		WARN_ON(1);
+		return pyld;
+	}
+
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 *)pyld->data;
+
+	data->direction = mem_params->is_read ? 1 : 0;
+	data->clear_after_read = mem_params->clear_after_read;
+	data->size = mem_params->size;
+	data->local_addr = mem_params->local_addr;
+	data->system_addr = mem_params->system_addr;
+	pyld->opcode |= (mem_params->skip_pipeline_clear ? 1 : 0) << 8;
+	switch (mem_params->pipeline_clear_options) {
+	case IPAHAL_HPS_CLEAR:
+		break;
+	case IPAHAL_SRC_GRP_CLEAR:
+		pyld->opcode |= (1 << 9);
+		break;
+	case IPAHAL_FULL_PIPELINE_CLEAR:
+		pyld->opcode |= (2 << 9);
+		break;
+	default:
+		IPAHAL_ERR("unsupported pipline clear option %d\n",
+			mem_params->pipeline_clear_options);
+		WARN_ON(1);
+	};
+
+	return pyld;
+}
+
 static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write(
 	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
 {
@@ -177,6 +238,7 @@
 		IPAHAL_ERR("kzalloc err\n");
 		return pyld;
 	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
 	pyld->len = sizeof(*data);
 	data = (struct ipa_imm_cmd_hw_register_write *)pyld->data;
 
@@ -209,6 +271,54 @@
 	return pyld;
 }
 
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write_v_4_0(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_register_write_v_4_0 *data;
+	struct ipahal_imm_cmd_register_write *regwrt_params =
+		(struct ipahal_imm_cmd_register_write *)params;
+
+	if (unlikely(regwrt_params->offset & ~0xFFFF)) {
+		IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n",
+			regwrt_params->offset);
+		WARN_ON(1);
+		return NULL;
+	}
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		WARN_ON(1);
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_register_write_v_4_0 *)pyld->data;
+
+	data->offset = regwrt_params->offset;
+	data->offset_high = regwrt_params->offset >> 16;
+	data->value = regwrt_params->value;
+	data->value_mask = regwrt_params->value_mask;
+
+	pyld->opcode |= (regwrt_params->skip_pipeline_clear ? 1 : 0) << 8;
+	switch (regwrt_params->pipeline_clear_options) {
+	case IPAHAL_HPS_CLEAR:
+		break;
+	case IPAHAL_SRC_GRP_CLEAR:
+		pyld->opcode |= (1 << 9);
+		break;
+	case IPAHAL_FULL_PIPELINE_CLEAR:
+		pyld->opcode |= (2 << 9);
+		break;
+	default:
+		IPAHAL_ERR("unsupported pipline clear option %d\n",
+			regwrt_params->pipeline_clear_options);
+		WARN_ON(1);
+	};
+
+	return pyld;
+}
+
 static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_init(
 	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
 {
@@ -222,6 +332,7 @@
 		IPAHAL_ERR("kzalloc err\n");
 		return pyld;
 	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
 	pyld->len = sizeof(*data);
 	data = (struct ipa_imm_cmd_hw_ip_packet_init *)pyld->data;
 
@@ -248,6 +359,7 @@
 		IPAHAL_ERR("kzalloc err\n");
 		return pyld;
 	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
 	pyld->len = sizeof(*data);
 	data = (struct ipa_imm_cmd_hw_nat_dma *)pyld->data;
 
@@ -272,6 +384,7 @@
 		IPAHAL_ERR("kzalloc err\n");
 		return pyld;
 	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
 	pyld->len = sizeof(*data);
 	data = (struct ipa_imm_cmd_hw_hdr_init_system *)pyld->data;
 
@@ -293,6 +406,7 @@
 		IPAHAL_ERR("kzalloc err\n");
 		return pyld;
 	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
 	pyld->len = sizeof(*data);
 	data = (struct ipa_imm_cmd_hw_hdr_init_local *)pyld->data;
 
@@ -321,6 +435,7 @@
 		IPAHAL_ERR("kzalloc err\n");
 		return pyld;
 	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
 	pyld->len = sizeof(*data);
 	data = (struct ipa_imm_cmd_hw_ip_v6_routing_init *)pyld->data;
 
@@ -347,6 +462,7 @@
 		IPAHAL_ERR("kzalloc err\n");
 		return pyld;
 	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
 	pyld->len = sizeof(*data);
 	data = (struct ipa_imm_cmd_hw_ip_v4_routing_init *)pyld->data;
 
@@ -373,6 +489,7 @@
 		IPAHAL_ERR("kzalloc err\n");
 		return pyld;
 	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
 	pyld->len = sizeof(*data);
 	data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data;
 
@@ -411,6 +528,7 @@
 		IPAHAL_ERR("kzalloc err\n");
 		return pyld;
 	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
 	pyld->len = sizeof(*data);
 	data = (struct ipa_imm_cmd_hw_ip_v6_filter_init *)pyld->data;
 
@@ -437,6 +555,7 @@
 		IPAHAL_ERR("kzalloc err\n");
 		return pyld;
 	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
 	pyld->len = sizeof(*data);
 	data = (struct ipa_imm_cmd_hw_ip_v4_filter_init *)pyld->data;
 
@@ -455,16 +574,11 @@
  *  specific IPA version
  * @construct - CB to construct imm command payload from abstracted structure
  * @opcode - Immediate command OpCode
- * @dyn_op - Does this command supports Dynamic opcode?
- *  Some commands opcode are dynamic where the part of the opcode is
- *  supplied as param. This flag indicates if the specific command supports it
- *  or not.
  */
 struct ipahal_imm_cmd_obj {
 	struct ipahal_imm_cmd_pyld *(*construct)(enum ipahal_imm_cmd_name cmd,
 		const void *params, bool is_atomic_ctx);
 	u16 opcode;
-	bool dyn_op;
 };
 
 /*
@@ -484,43 +598,51 @@
 	/* IPAv3 */
 	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_FILTER_INIT] = {
 		ipa_imm_cmd_construct_ip_v4_filter_init,
-		3, false},
+		3},
 	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_FILTER_INIT] = {
 		ipa_imm_cmd_construct_ip_v6_filter_init,
-		4, false},
+		4},
 	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_NAT_INIT] = {
 		ipa_imm_cmd_construct_ip_v4_nat_init,
-		5, false},
+		5},
 	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_ROUTING_INIT] = {
 		ipa_imm_cmd_construct_ip_v4_routing_init,
-		7, false},
+		7},
 	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_ROUTING_INIT] = {
 		ipa_imm_cmd_construct_ip_v6_routing_init,
-		8, false},
+		8},
 	[IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_LOCAL] = {
 		ipa_imm_cmd_construct_hdr_init_local,
-		9, false},
+		9},
 	[IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_SYSTEM] = {
 		ipa_imm_cmd_construct_hdr_init_system,
-		10, false},
+		10},
 	[IPA_HW_v3_0][IPA_IMM_CMD_REGISTER_WRITE] = {
 		ipa_imm_cmd_construct_register_write,
-		12, false},
+		12},
 	[IPA_HW_v3_0][IPA_IMM_CMD_NAT_DMA] = {
 		ipa_imm_cmd_construct_nat_dma,
-		14, false},
+		14},
 	[IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_INIT] = {
 		ipa_imm_cmd_construct_ip_packet_init,
-		16, false},
+		16},
 	[IPA_HW_v3_0][IPA_IMM_CMD_DMA_TASK_32B_ADDR] = {
 		ipa_imm_cmd_construct_dma_task_32b_addr,
-		17, true},
+		17},
 	[IPA_HW_v3_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
 		ipa_imm_cmd_construct_dma_shared_mem,
-		19, false},
+		19},
 	[IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_TAG_STATUS] = {
 		ipa_imm_cmd_construct_ip_packet_tag_status,
-		20, false},
+		20},
+
+	/* IPAv4 */
+	[IPA_HW_v4_0][IPA_IMM_CMD_REGISTER_WRITE] = {
+		ipa_imm_cmd_construct_register_write_v_4_0,
+		12},
+	[IPA_HW_v4_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
+		ipa_imm_cmd_construct_dma_shared_mem_v_4_0,
+		19},
 };
 
 /*
@@ -589,7 +711,7 @@
 /*
  * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
  */
-u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd)
+static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd)
 {
 	u32 opcode;
 
@@ -613,63 +735,6 @@
 }
 
 /*
- * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
- *  that supports dynamic opcode
- * Some commands opcode are not totaly fixed, but part of it is
- *  a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
- *  is a given parameter.
- * This API will return the composed opcode of the command given
- *  the parameter
- * Note: Use this API only for immediate comamnds that support Dynamic Opcode
- */
-u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param)
-{
-	u32 opcode;
-
-	if (cmd >= IPA_IMM_CMD_MAX) {
-		IPAHAL_ERR("Invalid immediate command IMM_CMD=%u\n", cmd);
-		ipa_assert();
-		return -EFAULT;
-	}
-
-	IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
-		ipahal_imm_cmd_name_str(cmd));
-
-	if (!ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].dyn_op) {
-		IPAHAL_ERR("IMM_CMD=%s does not support dynamic opcode\n",
-			ipahal_imm_cmd_name_str(cmd));
-		ipa_assert();
-		return -EFAULT;
-	}
-
-	/* Currently, dynamic opcode commands uses params to be set
-	 *  on the Opcode hi-byte (lo-byte is fixed).
-	 * If this to be changed in the future, make the opcode calculation
-	 *  a CB per command
-	 */
-	if (param & ~0xFFFF) {
-		IPAHAL_ERR("IMM_CMD=%s opcode param is invalid\n",
-			ipahal_imm_cmd_name_str(cmd));
-		ipa_assert();
-		return -EFAULT;
-	}
-	opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
-	if (opcode == -1) {
-		IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
-			ipahal_imm_cmd_name_str(cmd));
-		ipa_assert();
-		return -EFAULT;
-	}
-	if (opcode & ~0xFFFF) {
-		IPAHAL_ERR("IMM_CMD=%s opcode will be overridden\n",
-			ipahal_imm_cmd_name_str(cmd));
-		ipa_assert();
-		return -EFAULT;
-	}
-	return (opcode + (param<<8));
-}
-
-/*
  * ipahal_construct_imm_cmd() - Construct immdiate command
  * This function builds imm cmd bulk that can be be sent to IPA
  * The command will be allocated dynamically.
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index 8f85d4e..e71a48b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -259,6 +259,8 @@
  * Perform mem copy into or out of the SW area of IPA local mem
  * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
  * @local_addr: Address in IPA local memory
+ * @clear_after_read: Clear local memory at the end of a read operation allows
+ *  atomic read and clear if HPS is clear. Ignore for writes.
  * @is_read: Read operation from local memory? If not, then write.
  * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
  * @pipeline_clear_option: options for pipeline clear waiting
@@ -267,6 +269,7 @@
 struct ipahal_imm_cmd_dma_shared_mem {
 	u32 size;
 	u32 local_addr;
+	bool clear_after_read;
 	bool is_read;
 	bool skip_pipeline_clear;
 	enum ipahal_pipeline_clear_option pipeline_clear_options;
@@ -322,13 +325,13 @@
 /*
  * struct ipahal_imm_cmd_pyld - Immediate cmd payload information
  * @len: length of the buffer
- * @reserved: padding bytes to make data buffer aligned
+ * @opcode: opcode of the immediate command
  * @data: buffer contains the immediate command payload. Buffer goes
  *  back to back with this structure
  */
 struct ipahal_imm_cmd_pyld {
 	u16 len;
-	u16 reserved;
+	u16 opcode;
 	u8 data[0];
 };
 
@@ -342,23 +345,6 @@
 const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name);
 
 /*
- * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
- */
-u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
-
-/*
- * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
- *  that supports dynamic opcode
- * Some commands opcode are not totaly fixed, but part of it is
- *  a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
- *  is a given parameter.
- * This API will return the composed opcode of the command given
- *  the parameter
- * Note: Use this API only for immediate comamnds that support Dynamic Opcode
- */
-u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param);
-
-/*
  * ipahal_construct_imm_cmd() - Construct immdiate command
  * This function builds imm cmd bulk that can be be sent to IPA
  * The command will be allocated dynamically.
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index d6a496e..804c554 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -278,7 +278,7 @@
  *  in H/W format.
  * Write value to register. Allows reg changes to be synced with data packet
  *  and other immediate command. Can be used to access the sram
- * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @sw_rsvd: Ignored by H/W. May be used by S/W
  * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
  * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
  * @value: value to write to register
@@ -301,6 +301,29 @@
 };
 
 /*
+ * struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload
+ *  in H/W format.
+ * Write value to register. Allows reg changes to be synced with data packet
+ *  and other immediate command. Can be used to access the sram
+ * @sw_rsvd: Ignored by H/W. May be used by S/W
+ * @offset_high: high bits of the Offset field - bits 17-20
+ * @rsvd: reserved - should be set to zero
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @rsvd2: reserved - should be set to zero
+ */
+struct ipa_imm_cmd_hw_register_write_v_4_0 {
+	u64 sw_rsvd:11;
+	u64 offset_high:4;
+	u64 rsvd:1;
+	u64 offset:16;
+	u64 value:32;
+	u64 value_mask:32;
+	u64 rsvd2:32;
+};
+
+/*
  * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
  *  in H/W format.
  * Perform mem copy into or out of the SW area of IPA local mem
@@ -331,6 +354,31 @@
 };
 
 /*
+ * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
+ *  in H/W format.
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @clear_after_read: Clear local memory at the end of a read operation allows
+ *  atomic read and clear if HPS is clear. Ignore for writes.
+ * @local_addr: Address in IPA local memory
+ * @direction: Read or write?
+ *	0: IPA write, Write to local address from system address
+ *	1: IPA read, Read from local address to system address
+ * @rsvd: reserved - should be set to zero
+ * @system_addr: Address in system memory
+ */
+struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 {
+	u64 sw_rsvd:15;
+	u64 clear_after_read:1;
+	u64 size:16;
+	u64 local_addr:16;
+	u64 direction:1;
+	u64 rsvd:15;
+	u64 system_addr:64;
+};
+
+/*
  * struct ipa_imm_cmd_hw_ip_packet_tag_status -
  *  IP_PACKET_TAG_STATUS command payload in H/W format.
  * This cmd is used for to allow SW to track HW processing by setting a TAG
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index d369e82..1a119b9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -78,6 +78,7 @@
 	__stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_1),
 	__stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_0),
 	__stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_1),
+	__stringify(IPA_HPS_FTCH_ARB_QUEUE_WEIGHT),
 	__stringify(IPA_QSB_MAX_WRITES),
 	__stringify(IPA_QSB_MAX_READS),
 	__stringify(IPA_TX_CFG),
@@ -355,6 +356,29 @@
 			IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK);
 }
 
+static void ipareg_construct_endp_status_n_v4_0(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_ep_cfg_status *ep_status =
+		(struct ipahal_reg_ep_cfg_status *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_en,
+			IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_ep,
+			IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_location,
+			IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_pkt_suppress,
+			IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK);
+}
+
 static void ipareg_construct_qcncm(
 	enum ipahal_reg_name reg, const void *fields, u32 *val)
 {
@@ -896,12 +920,14 @@
 static void ipareg_construct_qsb_max_writes(enum ipahal_reg_name reg,
 	const void *fields, u32 *val)
 {
-	int *qsb_max_writes = (int *)fields;
+	struct ipahal_reg_qsb_max_writes *max_writes;
 
-	IPA_SETFIELD_IN_REG(*val, qsb_max_writes[0],
+	max_writes = (struct ipahal_reg_qsb_max_writes *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, max_writes->qmb_0_max_writes,
 			    IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT,
 			    IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK);
-	IPA_SETFIELD_IN_REG(*val, qsb_max_writes[1],
+	IPA_SETFIELD_IN_REG(*val, max_writes->qmb_1_max_writes,
 			    IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT,
 			    IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK);
 }
@@ -909,16 +935,39 @@
 static void ipareg_construct_qsb_max_reads(enum ipahal_reg_name reg,
 	const void *fields, u32 *val)
 {
-	int *qsb_max_reads = (int *)fields;
+	struct ipahal_reg_qsb_max_reads *max_reads;
 
-	IPA_SETFIELD_IN_REG(*val, qsb_max_reads[0],
+	max_reads = (struct ipahal_reg_qsb_max_reads *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_reads,
 			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT,
 			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK);
-	IPA_SETFIELD_IN_REG(*val, qsb_max_reads[1],
+	IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_reads,
 			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT,
 			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
 }
 
+static void ipareg_construct_qsb_max_reads_v4_0(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_qsb_max_reads *max_reads;
+
+	max_reads = (struct ipahal_reg_qsb_max_reads *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_reads,
+			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT,
+			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK);
+	IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_reads,
+			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT,
+			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
+	IPA_SETFIELD_IN_REG(*val, max_reads->qmb_0_max_read_beats,
+		    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_SHFT_V4_0,
+		    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_BMSK_V4_0);
+	IPA_SETFIELD_IN_REG(*val, max_reads->qmb_1_max_read_beats,
+		    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_SHFT_V4_0,
+		    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_BMSK_V4_0);
+}
+
 static void ipareg_parse_tx_cfg(enum ipahal_reg_name reg,
 	void *fields, u32 val)
 {
@@ -934,9 +983,44 @@
 		IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5,
 		IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5);
 
-	tx_cfg->prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val,
+	tx_cfg->tx0_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val,
 		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5,
 		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
+
+	tx_cfg->tx1_prefetch_almost_empty_size =
+		tx_cfg->tx0_prefetch_almost_empty_size;
+}
+
+static void ipareg_parse_tx_cfg_v4_0(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	tx_cfg->tx0_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0);
+
+	tx_cfg->tx1_prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0);
+
+	tx_cfg->dmaw_scnd_outsd_pred_en = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0);
+
+	tx_cfg->dmaw_scnd_outsd_pred_threshold = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0);
+
+	tx_cfg->dmaw_max_beats_256_dis = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0,
+		IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0);
+
+	tx_cfg->pa_mask_en = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0,
+		IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0);
 }
 
 static void ipareg_construct_tx_cfg(enum ipahal_reg_name reg,
@@ -946,6 +1030,10 @@
 
 	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
 
+	if (tx_cfg->tx0_prefetch_almost_empty_size !=
+			tx_cfg->tx1_prefetch_almost_empty_size)
+		ipa_assert();
+
 	IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_disable,
 		IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5,
 		IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5);
@@ -954,11 +1042,43 @@
 		IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5,
 		IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5);
 
-	IPA_SETFIELD_IN_REG(*val, tx_cfg->prefetch_almost_empty_size,
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_almost_empty_size,
 		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5,
 		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
 }
 
+static void ipareg_construct_tx_cfg_v4_0(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_almost_empty_size,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->tx1_prefetch_almost_empty_size,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_scnd_outsd_pred_threshold,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_max_beats_256_dis,
+		IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0,
+		IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->dmaw_scnd_outsd_pred_en,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0,
+		IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->pa_mask_en,
+		IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0,
+		IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0);
+}
+
 static void ipareg_construct_idle_indication_cfg(enum ipahal_reg_name reg,
 	const void *fields, u32 *val)
 {
@@ -977,6 +1097,59 @@
 		IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5);
 }
 
+static void ipareg_construct_hps_queue_weights(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_rx_hps_weights *hps_weights;
+
+	hps_weights = (struct ipahal_reg_rx_hps_weights *)fields;
+
+	IPA_SETFIELD_IN_REG(*val,
+		hps_weights->hps_queue_weight_0,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		hps_weights->hps_queue_weight_1,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		hps_weights->hps_queue_weight_2,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val,
+		hps_weights->hps_queue_weight_3,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK);
+}
+
+static void ipareg_parse_hps_queue_weights(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_rx_hps_weights *hps_weights =
+		(struct ipahal_reg_rx_hps_weights *)fields;
+
+	memset(hps_weights, 0, sizeof(struct ipahal_reg_rx_hps_weights));
+
+	hps_weights->hps_queue_weight_0 = IPA_GETFIELD_FROM_REG(val,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK);
+
+	hps_weights->hps_queue_weight_1 = IPA_GETFIELD_FROM_REG(val,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK);
+
+	hps_weights->hps_queue_weight_2 = IPA_GETFIELD_FROM_REG(val,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK);
+
+	hps_weights->hps_queue_weight_3 = IPA_GETFIELD_FROM_REG(val,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT,
+		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK);
+}
+
 /*
  * struct ipahal_reg_obj - Register H/W information for specific IPA version
  * @construct - CB to construct register value from abstracted structure
@@ -1266,6 +1439,41 @@
 	[IPA_HW_v3_5][IPA_IDLE_INDICATION_CFG] = {
 		ipareg_construct_idle_indication_cfg, ipareg_parse_dummy,
 		0x00000220, 0},
+	[IPA_HW_v3_5][IPA_HPS_FTCH_ARB_QUEUE_WEIGHT] = {
+		ipareg_construct_hps_queue_weights,
+		ipareg_parse_hps_queue_weights, 0x000005a4, 0},
+
+	/* IPAv4.0 */
+	[IPA_HW_v4_0][IPA_TX_CFG] = {
+		ipareg_construct_tx_cfg_v4_0, ipareg_parse_tx_cfg_v4_0,
+		0x000001FC, 0},
+	[IPA_HW_v4_0][IPA_DEBUG_CNT_REG_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v4_0][IPA_DEBUG_CNT_CTRL_n] = {
+		ipareg_construct_debug_cnt_ctrl_n, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v4_0][IPA_QCNCM] = {
+		ipareg_construct_qcncm, ipareg_parse_qcncm,
+		-1, 0},
+	[IPA_HW_v4_0][IPA_SINGLE_NDP_MODE] = {
+		ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode,
+		-1, 0},
+	[IPA_HW_v4_0][IPA_QSB_MAX_READS] = {
+		ipareg_construct_qsb_max_reads_v4_0, ipareg_parse_dummy,
+		0x00000078, 0},
+	[IPA_HW_v4_0][IPA_FILT_ROUT_HASH_FLUSH] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000014c, 0},
+	[IPA_HW_v4_0][IPA_STATE_AGGR_ACTIVE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000000b4, 0},
+	[IPA_HW_v4_0][IPA_ENDP_INIT_ROUTE_n] = {
+		ipareg_construct_endp_init_route_n, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v4_0][IPA_ENDP_STATUS_n] = {
+		ipareg_construct_endp_status_n_v4_0, ipareg_parse_dummy,
+		0x00000840, 0x70},
 };
 
 /*
@@ -1597,11 +1805,16 @@
 	if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
 		shft = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT;
 		bmsk = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK;
-	} else {
+	} else if (ipahal_ctx->hw_type <= IPA_HW_v3_5_1) {
 		shft =
 		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5;
 		bmsk =
 		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5;
+	} else {
+		shft =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_0;
+		bmsk =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_0;
 	}
 
 	IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index 4490103..c9293b8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -81,6 +81,7 @@
 	IPA_RX_HPS_CLIENTS_MIN_DEPTH_1,
 	IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
 	IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
+	IPA_HPS_FTCH_ARB_QUEUE_WEIGHT,
 	IPA_QSB_MAX_WRITES,
 	IPA_QSB_MAX_READS,
 	IPA_TX_CFG,
@@ -168,11 +169,13 @@
  *	If set to 0 (default), PKT-STATUS will be appended before the packet
  *	for this endpoint. If set to 1, PKT-STATUS will be appended after the
  *	packet for this endpoint. Valid only for Output Pipes (IPA Producer)
+ * @status_pkt_suppress:
  */
 struct ipahal_reg_ep_cfg_status {
 	bool status_en;
 	u8 status_ep;
 	bool status_location;
+	u8 status_pkt_suppress;
 };
 
 /*
@@ -272,6 +275,20 @@
 };
 
 /*
+* struct ipahal_reg_rx_hps_weights - weight values for RX HPS clients
+* @hps_queue_weight_0 - 4 bit Weight for RX_HPS_CMDQ #0 (3:0)
+* @hps_queue_weight_1 - 4 bit Weight for RX_HPS_CMDQ #1 (7:4)
+* @hps_queue_weight_2 - 4 bit Weight for RX_HPS_CMDQ #2 (11:8)
+* @hps_queue_weight_3 - 4 bit Weight for RX_HPS_CMDQ #3 (15:12)
+*/
+struct ipahal_reg_rx_hps_weights {
+	u32 hps_queue_weight_0;
+	u32 hps_queue_weight_1;
+	u32 hps_queue_weight_2;
+	u32 hps_queue_weight_3;
+};
+
+/*
  * struct ipahal_reg_valmask - holding values and masking for registers
  *	HAL application may require only value and mask of it for some
  *	register fields.
@@ -322,15 +339,50 @@
 };
 
 /*
+ * struct ipahal_reg_qsb_max_writes - IPA QSB Max Writes register
+ * @qmb_0_max_writes: Max number of outstanding writes for GEN_QMB_0
+ * @qmb_1_max_writes: Max number of outstanding writes for GEN_QMB_1
+ */
+struct ipahal_reg_qsb_max_writes {
+	u32 qmb_0_max_writes;
+	u32 qmb_1_max_writes;
+};
+
+/*
+ * struct ipahal_reg_qsb_max_reads - IPA QSB Max Reads register
+ * @qmb_0_max_reads: Max number of outstanding reads for GEN_QMB_0
+ * @qmb_1_max_reads: Max number of outstanding reads for GEN_QMB_1
+ * @qmb_0_max_read_beats: Max number of outstanding read beats for GEN_QMB_0
+ * @qmb_1_max_read_beats: Max number of outstanding read beats for GEN_QMB_1
+ */
+struct ipahal_reg_qsb_max_reads {
+	u32 qmb_0_max_reads;
+	u32 qmb_1_max_reads;
+	u32 qmb_0_max_read_beats;
+	u32 qmb_1_max_read_beats;
+};
+
+/*
  * struct ipahal_reg_tx_cfg - IPA TX_CFG register
  * @tx0_prefetch_disable: Disable prefetch on TX0
  * @tx1_prefetch_disable: Disable prefetch on TX1
- * @prefetch_almost_empty_size: Prefetch almost empty size
+ * @tx0_prefetch_almost_empty_size: Prefetch almost empty size on TX0
+ * @tx1_prefetch_almost_empty_size: Prefetch almost empty size on TX1
+ * @dmaw_scnd_outsd_pred_threshold:
+ * @dmaw_max_beats_256_dis:
+ * @dmaw_scnd_outsd_pred_en:
+ * @pa_mask_en:
  */
 struct ipahal_reg_tx_cfg {
 	bool tx0_prefetch_disable;
 	bool tx1_prefetch_disable;
-	u16 prefetch_almost_empty_size;
+	u32 tx0_prefetch_almost_empty_size;
+	u32 tx1_prefetch_almost_empty_size;
+	u32 dmaw_scnd_outsd_pred_threshold;
+	u32 dmaw_max_beats_256_dis;
+	u32 dmaw_scnd_outsd_pred_en;
+	u32 pa_mask_en;
+
 };
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
index 6d69b15..17bad03 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -93,6 +93,8 @@
 #define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0
 #define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5 0xfffff
 #define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_0 0x7fffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V4_0 0
 
 /* IPA_ENDP_INIT_ROUTE_n register */
 #define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
@@ -129,6 +131,7 @@
 /* IPA_ENDP_INIT_HOL_BLOCK_EN_n register */
 #define IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1
 #define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX 19
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX_V_4_0 22
 #define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1
 #define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0
 
@@ -230,6 +233,8 @@
 #define IPA_QCNCM_MODE_EN_SHFT 0
 
 /* IPA_ENDP_STATUS_n register */
+#define IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK 0x200
+#define IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_SHFT 0x9
 #define IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK 0x100
 #define IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT 0x8
 #define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
@@ -289,7 +294,6 @@
 #define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5 0x3F
 #define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5 0
 
-
 /* IPA_IPA_IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */
 #define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n)))
 #define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(n) \
@@ -308,6 +312,12 @@
 #define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK (0xf0)
 #define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT (4)
 
+/* IPA_QSB_MAX_READS_BEATS register */
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_BMSK_V4_0 (0xff0000)
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BEATS_SHFT_V4_0 (0x10)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_BMSK_V4_0 (0xff000000)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BEATS_SHFT_V4_0 (0x18)
+
 /* IPA_TX_CFG register */
 #define IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5 (0x1)
 #define IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5 (0)
@@ -316,10 +326,34 @@
 #define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5 (0x1C)
 #define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5 (2)
 
+/* IPA_TX_CFG register v4.0 */
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_BMSK_V4_0 (0x1e000)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX1_SHFT_V4_0 (0xd)
+#define IPA_TX_CFG_PA_MASK_EN_BMSK_V4_0 (0x1000)
+#define IPA_TX_CFG_PA_MASK_EN_SHFT_V4_0 (0xc)
+#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_BMSK_V4_0 (0x800)
+#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_EN_SHFT_V4_0 (0xb)
+#define IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_BMSK_V4_0 (0x400)
+#define IPA_TX_CFG_DMAW_MAX_BEATS_256_DIS_SHFT_V4_0 (0xa)
+#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_BMSK_V4_0 (0x3c0)
+#define IPA_TX_CFG_DMAW_SCND_OUTSD_PRED_THRESHOLD_SHFT_V4_0 (0x6)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_BMSK_V4_0 (0x3c)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_TX0_SHFT_V4_0 (0x2)
+
 /* IPA_IDLE_INDICATION_CFG regiser */
 #define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_BMSK_V3_5 (0xffff)
 #define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_SHFT_V3_5 (0)
 #define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5 (0x10000)
 #define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_SHFT_V3_5 (16)
 
+/* IPA_HPS_FTCH_QUEUE_WEIGHT register */
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_BMSK (0xf)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_0_SHFT (0x0)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_BMSK (0xf0)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_1_SHFT (0x4)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_BMSK (0xf00)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_2_SHFT (0x8)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK (0xf000)
+#define IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_SHFT (0xc)
+
 #endif /* _IPAHAL_REG_I_H_ */
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
new file mode 100644
index 0000000..1fffa7c
--- /dev/null
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -0,0 +1,1269 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/dma-iommu.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/ipc_logging.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/spinlock.h>
+
+#define GENI_SE_IOMMU_VA_START	(0x40000000)
+#define GENI_SE_IOMMU_VA_SIZE	(0xC0000000)
+
+#define NUM_LOG_PAGES 2
+
+static unsigned long default_bus_bw_set[] = {0, 19200000, 50000000, 100000000};
+
+/**
+ * @struct geni_se_device - Data structure to represent the QUPv3 Core
+ * @dev:		Device pointer of the QUPv3 core.
+ * @cb_dev:		Device pointer of the context bank in the IOMMU.
+ * @iommu_lock:		Lock to protect IOMMU Mapping & attachment.
+ * @iommu_map:		IOMMU map of the memory space supported by this core.
+ * @iommu_s1_bypass:	Bypass IOMMU stage 1 translation.
+ * @base:		Base address of this instance of QUPv3 core.
+ * @bus_bw:		Client handle to the bus bandwidth request.
+ * @bus_mas_id:		Master Endpoint ID for bus BW request.
+ * @bus_slv_id:		Slave Endpoint ID for bus BW request.
+ * @ab_ib_lock:		Lock to protect the bus ab & ib values, list.
+ * @ab_list_head:	Sorted resource list based on average bus BW.
+ * @ib_list_head:	Sorted resource list based on instantaneous bus BW.
+ * @cur_ab:		Current Bus Average BW request value.
+ * @cur_ib:		Current Bus Instantaneous BW request value.
+ * @bus_bw_set:		Clock plan for the bus driver.
+ * @cur_bus_bw_idx:	Current index within the bus clock plan.
+ * @log_ctx:		Logging context to hold the debug information
+ */
+struct geni_se_device {
+	struct device *dev;
+	struct device *cb_dev;
+	struct mutex iommu_lock;
+	struct dma_iommu_mapping *iommu_map;
+	bool iommu_s1_bypass;
+	void __iomem *base;
+	struct msm_bus_client_handle *bus_bw;
+	u32 bus_mas_id;
+	u32 bus_slv_id;
+	spinlock_t ab_ib_lock;
+	struct list_head ab_list_head;
+	struct list_head ib_list_head;
+	unsigned long cur_ab;
+	unsigned long cur_ib;
+	int bus_bw_set_size;
+	unsigned long *bus_bw_set;
+	int cur_bus_bw_idx;
+	void *log_ctx;
+};
+
+/* Offset of QUPV3 Hardware Version Register */
+#define QUPV3_HW_VER (0x4)
+
+#define HW_VER_MAJOR_MASK GENMASK(31, 28)
+#define HW_VER_MAJOR_SHFT 28
+#define HW_VER_MINOR_MASK GENMASK(27, 16)
+#define HW_VER_MINOR_SHFT 16
+#define HW_VER_STEP_MASK GENMASK(15, 0)
+
+static int geni_se_iommu_map_and_attach(struct geni_se_device *geni_se_dev);
+
+/**
+ * geni_read_reg_nolog() - Helper function to read from a GENI register
+ * @base:	Base address of the serial engine's register block.
+ * @offset:	Offset within the serial engine's register block.
+ *
+ * Return:	Return the contents of the register.
+ */
+unsigned int geni_read_reg_nolog(void __iomem *base, int offset)
+{
+	return readl_relaxed_no_log(base + offset);
+}
+EXPORT_SYMBOL(geni_read_reg_nolog);
+
+/**
+ * geni_write_reg_nolog() - Helper function to write into a GENI register
+ * @value:	Value to be written into the register.
+ * @base:	Base address of the serial engine's register block.
+ * @offset:	Offset within the serial engine's register block.
+ */
+void geni_write_reg_nolog(unsigned int value, void __iomem *base, int offset)
+{
+	return writel_relaxed_no_log(value, (base + offset));
+}
+EXPORT_SYMBOL(geni_write_reg_nolog);
+
+/**
+ * geni_read_reg() - Helper function to read from a GENI register
+ * @base:	Base address of the serial engine's register block.
+ * @offset:	Offset within the serial engine's register block.
+ *
+ * Return:	Return the contents of the register.
+ */
+unsigned int geni_read_reg(void __iomem *base, int offset)
+{
+	return readl_relaxed(base + offset);
+}
+EXPORT_SYMBOL(geni_read_reg);
+
+/**
+ * geni_write_reg() - Helper function to write into a GENI register
+ * @value:	Value to be written into the register.
+ * @base:	Base address of the serial engine's register block.
+ * @offset:	Offset within the serial engine's register block.
+ */
+void geni_write_reg(unsigned int value, void __iomem *base, int offset)
+{
+	return writel_relaxed(value, (base + offset));
+}
+EXPORT_SYMBOL(geni_write_reg);
+
+/**
+ * get_se_proto() - Read the protocol configured for a serial engine
+ * @base:	Base address of the serial engine's register block.
+ *
+ * Return:	Protocol value as configured in the serial engine.
+ */
+int get_se_proto(void __iomem *base)
+{
+	int proto;
+
+	proto = ((geni_read_reg(base, GENI_FW_REVISION_RO)
+			& FW_REV_PROTOCOL_MSK) >> FW_REV_PROTOCOL_SHFT);
+	return proto;
+}
+EXPORT_SYMBOL(get_se_proto);
+
+static int se_geni_irq_en(void __iomem *base)
+{
+	unsigned int common_geni_m_irq_en;
+	unsigned int common_geni_s_irq_en;
+
+	common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
+	common_geni_s_irq_en = geni_read_reg(base, SE_GENI_S_IRQ_EN);
+	/* Common to all modes */
+	common_geni_m_irq_en |= M_COMMON_GENI_M_IRQ_EN;
+	common_geni_s_irq_en |= S_COMMON_GENI_S_IRQ_EN;
+
+	geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN);
+	geni_write_reg(common_geni_s_irq_en, base, SE_GENI_S_IRQ_EN);
+	return 0;
+}
+
+
+static void se_set_rx_rfr_wm(void __iomem *base, unsigned int rx_wm,
+						unsigned int rx_rfr)
+{
+	geni_write_reg(rx_wm, base, SE_GENI_RX_WATERMARK_REG);
+	geni_write_reg(rx_rfr, base, SE_GENI_RX_RFR_WATERMARK_REG);
+}
+
+static int se_io_set_mode(void __iomem *base)
+{
+	unsigned int io_mode;
+	unsigned int geni_dma_mode;
+
+	io_mode = geni_read_reg(base, SE_IRQ_EN);
+	geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
+
+	io_mode |= (GENI_M_IRQ_EN | GENI_S_IRQ_EN);
+	io_mode |= (DMA_TX_IRQ_EN | DMA_RX_IRQ_EN);
+	geni_dma_mode &= ~GENI_DMA_MODE_EN;
+
+	geni_write_reg(io_mode, base, SE_IRQ_EN);
+	geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
+	geni_write_reg(0, base, SE_GSI_EVENT_EN);
+	return 0;
+}
+
+static void se_io_init(void __iomem *base)
+{
+	unsigned int io_op_ctrl;
+	unsigned int geni_cgc_ctrl;
+	unsigned int dma_general_cfg;
+
+	geni_cgc_ctrl = geni_read_reg(base, GENI_CGC_CTRL);
+	dma_general_cfg = geni_read_reg(base, SE_DMA_GENERAL_CFG);
+	geni_cgc_ctrl |= DEFAULT_CGC_EN;
+	dma_general_cfg |= (AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON |
+			DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON);
+	io_op_ctrl = DEFAULT_IO_OUTPUT_CTRL_MSK;
+	geni_write_reg(geni_cgc_ctrl, base, GENI_CGC_CTRL);
+	geni_write_reg(dma_general_cfg, base, SE_DMA_GENERAL_CFG);
+
+	geni_write_reg(io_op_ctrl, base, GENI_OUTPUT_CTRL);
+	geni_write_reg(FORCE_DEFAULT, base, GENI_FORCE_DEFAULT_REG);
+}
+
+/**
+ * geni_se_init() - Initialize the GENI Serial Engine
+ * @base:	Base address of the serial engine's register block.
+ * @rx_wm:	Receive watermark to be configured.
+ * @rx_rfr_wm:	Ready-for-receive watermark to be configured.
+ *
+ * This function is used to initialize the GENI serial engine, configure
+ * receive watermark and ready-for-receive watermarks.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_init(void __iomem *base, unsigned int rx_wm, unsigned int rx_rfr)
+{
+	int ret;
+
+	se_io_init(base);
+	ret = se_io_set_mode(base);
+	if (ret)
+		return ret;
+
+	se_set_rx_rfr_wm(base, rx_wm, rx_rfr);
+	ret = se_geni_irq_en(base);
+	return ret;
+}
+EXPORT_SYMBOL(geni_se_init);
+
+static int geni_se_select_fifo_mode(void __iomem *base)
+{
+	int proto = get_se_proto(base);
+	unsigned int common_geni_m_irq_en;
+	unsigned int common_geni_s_irq_en;
+	unsigned int geni_dma_mode;
+
+	geni_write_reg(0, base, SE_GSI_EVENT_EN);
+	geni_write_reg(0xFFFFFFFF, base, SE_GENI_M_IRQ_CLEAR);
+	geni_write_reg(0xFFFFFFFF, base, SE_GENI_S_IRQ_CLEAR);
+	geni_write_reg(0xFFFFFFFF, base, SE_DMA_TX_IRQ_CLR);
+	geni_write_reg(0xFFFFFFFF, base, SE_DMA_RX_IRQ_CLR);
+	geni_write_reg(0xFFFFFFFF, base, SE_IRQ_EN);
+
+	common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
+	common_geni_s_irq_en = geni_read_reg(base, SE_GENI_S_IRQ_EN);
+	geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
+	if (proto != UART) {
+		common_geni_m_irq_en |=
+			(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN |
+			M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+		common_geni_s_irq_en |= S_CMD_DONE_EN;
+	}
+	geni_dma_mode &= ~GENI_DMA_MODE_EN;
+
+	geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN);
+	geni_write_reg(common_geni_s_irq_en, base, SE_GENI_S_IRQ_EN);
+	geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
+	return 0;
+}
+
+static int geni_se_select_dma_mode(void __iomem *base)
+{
+	unsigned int geni_dma_mode = 0;
+
+	geni_write_reg(0, base, SE_GSI_EVENT_EN);
+	geni_write_reg(0xFFFFFFFF, base, SE_GENI_M_IRQ_CLEAR);
+	geni_write_reg(0xFFFFFFFF, base, SE_GENI_S_IRQ_CLEAR);
+	geni_write_reg(0xFFFFFFFF, base, SE_DMA_TX_IRQ_CLR);
+	geni_write_reg(0xFFFFFFFF, base, SE_DMA_RX_IRQ_CLR);
+	geni_write_reg(0xFFFFFFFF, base, SE_IRQ_EN);
+
+	geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
+	geni_dma_mode |= GENI_DMA_MODE_EN;
+	geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
+	return 0;
+}
+
+static int geni_se_select_gsi_mode(void __iomem *base)
+{
+	unsigned int io_mode = 0;
+	unsigned int geni_dma_mode = 0;
+	unsigned int gsi_event_en = 0;
+
+	geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
+	gsi_event_en = geni_read_reg(base, SE_GSI_EVENT_EN);
+	io_mode = geni_read_reg(base, SE_IRQ_EN);
+
+	geni_dma_mode |= GENI_DMA_MODE_EN;
+	io_mode &= ~(DMA_TX_IRQ_EN | DMA_RX_IRQ_EN);
+	gsi_event_en |= (DMA_RX_EVENT_EN | DMA_TX_EVENT_EN |
+				GENI_M_EVENT_EN | GENI_S_EVENT_EN);
+
+	geni_write_reg(io_mode, base, SE_IRQ_EN);
+	geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
+	geni_write_reg(gsi_event_en, base, SE_GSI_EVENT_EN);
+	return 0;
+
+}
+
+/**
+ * geni_se_select_mode() - Select the serial engine transfer mode
+ * @base:	Base address of the serial engine's register block.
+ * @mode:	Transfer mode to be selected.
+ *
+ * Return:	0 on success, standard Linux error codes on failure.
+ */
+int geni_se_select_mode(void __iomem *base, int mode)
+{
+	int ret = 0;
+
+	switch (mode) {
+	case FIFO_MODE:
+		geni_se_select_fifo_mode(base);
+		break;
+	case SE_DMA:
+		geni_se_select_dma_mode(base);
+		break;
+	case GSI_DMA:
+		geni_se_select_gsi_mode(base);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(geni_se_select_mode);
+
+/**
+ * geni_setup_m_cmd() - Setup the primary sequencer
+ * @base:	Base address of the serial engine's register block.
+ * @cmd:	Command/Operation to setup in the primary sequencer.
+ * @params:	Parameter for the sequencer command.
+ *
+ * This function is used to configure the primary sequencer with the
+ * command and its assoicated parameters.
+ */
+void geni_setup_m_cmd(void __iomem *base, u32 cmd, u32 params)
+{
+	u32 m_cmd = (cmd << M_OPCODE_SHFT);
+
+	m_cmd |= (params & M_PARAMS_MSK);
+	geni_write_reg(m_cmd, base, SE_GENI_M_CMD0);
+}
+EXPORT_SYMBOL(geni_setup_m_cmd);
+
+/**
+ * geni_setup_s_cmd() - Setup the secondary sequencer
+ * @base:	Base address of the serial engine's register block.
+ * @cmd:	Command/Operation to setup in the secondary sequencer.
+ * @params:	Parameter for the sequencer command.
+ *
+ * This function is used to configure the secondary sequencer with the
+ * command and its assoicated parameters.
+ */
+void geni_setup_s_cmd(void __iomem *base, u32 cmd, u32 params)
+{
+	u32 s_cmd = geni_read_reg(base, SE_GENI_S_CMD0);
+
+	s_cmd &= ~(S_OPCODE_MSK | S_PARAMS_MSK);
+	s_cmd |= (cmd << S_OPCODE_SHFT);
+	s_cmd |= (params & S_PARAMS_MSK);
+	geni_write_reg(s_cmd, base, SE_GENI_S_CMD0);
+}
+EXPORT_SYMBOL(geni_setup_s_cmd);
+
+/**
+ * geni_cancel_m_cmd() - Cancel the command configured in the primary sequencer
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to cancel the currently configured command in the
+ * primary sequencer.
+ */
+void geni_cancel_m_cmd(void __iomem *base)
+{
+	geni_write_reg(M_GENI_CMD_CANCEL, base, SE_GENI_S_CMD_CTRL_REG);
+}
+EXPORT_SYMBOL(geni_cancel_m_cmd);
+
+/**
+ * geni_cancel_s_cmd() - Cancel the command configured in the secondary
+ *                       sequencer
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to cancel the currently configured command in the
+ * secondary sequencer.
+ */
+void geni_cancel_s_cmd(void __iomem *base)
+{
+	geni_write_reg(S_GENI_CMD_CANCEL, base, SE_GENI_S_CMD_CTRL_REG);
+}
+EXPORT_SYMBOL(geni_cancel_s_cmd);
+
+/**
+ * geni_abort_m_cmd() - Abort the command configured in the primary sequencer
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to force abort the currently configured command in the
+ * primary sequencer.
+ */
+void geni_abort_m_cmd(void __iomem *base)
+{
+	geni_write_reg(M_GENI_CMD_ABORT, base, SE_GENI_M_CMD_CTRL_REG);
+}
+EXPORT_SYMBOL(geni_abort_m_cmd);
+
+/**
+ * geni_abort_s_cmd() - Abort the command configured in the secondary
+ *                       sequencer
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to force abort the currently configured command in the
+ * secondary sequencer.
+ */
+void geni_abort_s_cmd(void __iomem *base)
+{
+	geni_write_reg(S_GENI_CMD_ABORT, base, SE_GENI_S_CMD_CTRL_REG);
+}
+EXPORT_SYMBOL(geni_abort_s_cmd);
+
+/**
+ * get_tx_fifo_depth() - Get the TX fifo depth of the serial engine
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to get the depth i.e. number of elements in the
+ * TX fifo of the serial engine.
+ *
+ * Return:	TX fifo depth in units of FIFO words.
+ */
+int get_tx_fifo_depth(void __iomem *base)
+{
+	int tx_fifo_depth;
+
+	tx_fifo_depth = ((geni_read_reg(base, SE_HW_PARAM_0)
+			& TX_FIFO_DEPTH_MSK) >> TX_FIFO_DEPTH_SHFT);
+	return tx_fifo_depth;
+}
+EXPORT_SYMBOL(get_tx_fifo_depth);
+
+/**
+ * get_tx_fifo_width() - Get the TX fifo width of the serial engine
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to get the width i.e. word size per element in the
+ * TX fifo of the serial engine.
+ *
+ * Return:	TX fifo width in bits
+ */
+int get_tx_fifo_width(void __iomem *base)
+{
+	int tx_fifo_width;
+
+	tx_fifo_width = ((geni_read_reg(base, SE_HW_PARAM_0)
+			& TX_FIFO_WIDTH_MSK) >> TX_FIFO_WIDTH_SHFT);
+	return tx_fifo_width;
+}
+EXPORT_SYMBOL(get_tx_fifo_width);
+
+/**
+ * get_rx_fifo_depth() - Get the RX fifo depth of the serial engine
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to get the depth i.e. number of elements in the
+ * RX fifo of the serial engine.
+ *
+ * Return:	RX fifo depth in units of FIFO words
+ */
+int get_rx_fifo_depth(void __iomem *base)
+{
+	int rx_fifo_depth;
+
+	rx_fifo_depth = ((geni_read_reg(base, SE_HW_PARAM_1)
+			& RX_FIFO_DEPTH_MSK) >> RX_FIFO_DEPTH_SHFT);
+	return rx_fifo_depth;
+}
+EXPORT_SYMBOL(get_rx_fifo_depth);
+
+/**
+ * se_get_packing_config() - Get the packing configuration based on input
+ * @bpw:	Bits of data per transfer word.
+ * @pack_words:	Number of words per fifo element.
+ * @msb_to_lsb:	Transfer from MSB to LSB or vice-versa.
+ * @cfg0:	Output buffer to hold the first half of configuration.
+ * @cfg1:	Output buffer to hold the second half of configuration.
+ *
+ * This function is used to calculate the packing configuration based on
+ * the input packing requirement and the configuration logic.
+ */
+void se_get_packing_config(int bpw, int pack_words, bool msb_to_lsb,
+			   unsigned long *cfg0, unsigned long *cfg1)
+{
+	u32 cfg[4] = {0};
+	int len;
+	int temp_bpw = bpw;
+	int idx_start = (msb_to_lsb ? (bpw - 1) : 0);
+	int idx = idx_start;
+	int idx_delta = (msb_to_lsb ? -BITS_PER_BYTE : BITS_PER_BYTE);
+	int ceil_bpw = ((bpw & (BITS_PER_BYTE - 1)) ?
+			((bpw & ~(BITS_PER_BYTE - 1)) + BITS_PER_BYTE) : bpw);
+	int iter = (ceil_bpw * pack_words) >> 3;
+	int i;
+
+	if (unlikely(iter <= 0 || iter > 4)) {
+		*cfg0 = 0;
+		*cfg1 = 0;
+		return;
+	}
+
+	for (i = 0; i < iter; i++) {
+		len = (temp_bpw < BITS_PER_BYTE) ?
+				(temp_bpw - 1) : BITS_PER_BYTE - 1;
+		cfg[i] = ((idx << 5) | (msb_to_lsb << 4) | (len << 1));
+		idx = ((temp_bpw - BITS_PER_BYTE) <= 0) ?
+				((i + 1) * BITS_PER_BYTE) + idx_start :
+				idx + idx_delta;
+		temp_bpw = ((temp_bpw - BITS_PER_BYTE) <= 0) ?
+				bpw : (temp_bpw - BITS_PER_BYTE);
+	}
+	cfg[iter - 1] |= 1;
+	*cfg0 = cfg[0] | (cfg[1] << 10);
+	*cfg1 = cfg[2] | (cfg[3] << 10);
+}
+EXPORT_SYMBOL(se_get_packing_config);
+
+/**
+ * se_config_packing() - Packing configuration of the serial engine
+ * @base:	Base address of the serial engine's register block.
+ * @bpw:	Bits of data per transfer word.
+ * @pack_words:	Number of words per fifo element.
+ * @msb_to_lsb:	Transfer from MSB to LSB or vice-versa.
+ *
+ * This function is used to configure the packing rules for the current
+ * transfer.
+ */
+void se_config_packing(void __iomem *base, int bpw,
+			int pack_words, bool msb_to_lsb)
+{
+	unsigned long cfg0, cfg1;
+
+	se_get_packing_config(bpw, pack_words, msb_to_lsb, &cfg0, &cfg1);
+	geni_write_reg(cfg0, base, SE_GENI_TX_PACKING_CFG0);
+	geni_write_reg(cfg1, base, SE_GENI_TX_PACKING_CFG1);
+	geni_write_reg(cfg0, base, SE_GENI_RX_PACKING_CFG0);
+	geni_write_reg(cfg1, base, SE_GENI_RX_PACKING_CFG1);
+	if (pack_words || bpw == 32)
+		geni_write_reg((bpw >> 4), base, SE_GENI_BYTE_GRAN);
+}
+EXPORT_SYMBOL(se_config_packing);
+
+static void se_geni_clks_off(struct se_geni_rsc *rsc)
+{
+	clk_disable_unprepare(rsc->se_clk);
+	clk_disable_unprepare(rsc->s_ahb_clk);
+	clk_disable_unprepare(rsc->m_ahb_clk);
+}
+
+static bool geni_se_check_bus_bw(struct geni_se_device *geni_se_dev)
+{
+	int i;
+	int new_bus_bw_idx = geni_se_dev->bus_bw_set_size - 1;
+	unsigned long new_bus_bw;
+	bool bus_bw_update = false;
+
+	new_bus_bw = max(geni_se_dev->cur_ib, geni_se_dev->cur_ab) /
+							DEFAULT_BUS_WIDTH;
+	for (i = 0; i < geni_se_dev->bus_bw_set_size; i++) {
+		if (geni_se_dev->bus_bw_set[i] >= new_bus_bw) {
+			new_bus_bw_idx = i;
+			break;
+		}
+	}
+
+	if (geni_se_dev->cur_bus_bw_idx != new_bus_bw_idx) {
+		geni_se_dev->cur_bus_bw_idx = new_bus_bw_idx;
+		bus_bw_update = true;
+	}
+	return bus_bw_update;
+}
+
+static int geni_se_rmv_ab_ib(struct geni_se_device *geni_se_dev,
+			     struct se_geni_rsc *rsc)
+{
+	unsigned long flags;
+	struct se_geni_rsc *tmp;
+	bool bus_bw_update = false;
+	int ret = 0;
+
+	if (unlikely(list_empty(&rsc->ab_list) || list_empty(&rsc->ib_list)))
+		return -EINVAL;
+
+	spin_lock_irqsave(&geni_se_dev->ab_ib_lock, flags);
+	list_del_init(&rsc->ab_list);
+	geni_se_dev->cur_ab -= rsc->ab;
+
+	list_del_init(&rsc->ib_list);
+	tmp = list_first_entry_or_null(&geni_se_dev->ib_list_head,
+					   struct se_geni_rsc, ib_list);
+	if (tmp && tmp->ib != geni_se_dev->cur_ib)
+		geni_se_dev->cur_ib = tmp->ib;
+	else if (!tmp && geni_se_dev->cur_ib)
+		geni_se_dev->cur_ib = 0;
+
+	bus_bw_update = geni_se_check_bus_bw(geni_se_dev);
+	spin_unlock_irqrestore(&geni_se_dev->ab_ib_lock, flags);
+
+	if (bus_bw_update)
+		ret = msm_bus_scale_update_bw(geni_se_dev->bus_bw,
+						geni_se_dev->cur_ab,
+						geni_se_dev->cur_ib);
+	GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+		    "%s: %lu:%lu (%lu:%lu) %d\n", __func__,
+		    geni_se_dev->cur_ab, geni_se_dev->cur_ib,
+		    rsc->ab, rsc->ib, bus_bw_update);
+	return ret;
+}
+
+/**
+ * se_geni_resources_off() - Turn off resources associated with the serial
+ *                           engine
+ * @rsc:	Handle to resources associated with the serial engine.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_resources_off(struct se_geni_rsc *rsc)
+{
+	int ret = 0;
+	struct geni_se_device *geni_se_dev;
+
+	if (unlikely(!rsc || !rsc->wrapper_dev))
+		return -EINVAL;
+
+	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
+	if (unlikely(!geni_se_dev || !geni_se_dev->bus_bw))
+		return -ENODEV;
+
+	ret = pinctrl_select_state(rsc->geni_pinctrl, rsc->geni_gpio_sleep);
+	if (ret) {
+		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+			"%s: Error %d pinctrl_select_state\n", __func__, ret);
+		return ret;
+	}
+	se_geni_clks_off(rsc);
+	ret = geni_se_rmv_ab_ib(geni_se_dev, rsc);
+	if (ret)
+		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+			"%s: Error %d during bus_bw_update\n", __func__, ret);
+	return ret;
+}
+EXPORT_SYMBOL(se_geni_resources_off);
+
+static int se_geni_clks_on(struct se_geni_rsc *rsc)
+{
+	int ret;
+
+	ret = clk_prepare_enable(rsc->m_ahb_clk);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(rsc->s_ahb_clk);
+	if (ret) {
+		clk_disable_unprepare(rsc->m_ahb_clk);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(rsc->se_clk);
+	if (ret) {
+		clk_disable_unprepare(rsc->s_ahb_clk);
+		clk_disable_unprepare(rsc->m_ahb_clk);
+	}
+	return ret;
+}
+
+static int geni_se_add_ab_ib(struct geni_se_device *geni_se_dev,
+			     struct se_geni_rsc *rsc)
+{
+	unsigned long flags;
+	struct se_geni_rsc *tmp;
+	struct list_head *ins_list_head;
+	bool bus_bw_update = false;
+	int ret = 0;
+
+	spin_lock_irqsave(&geni_se_dev->ab_ib_lock, flags);
+	list_add(&rsc->ab_list, &geni_se_dev->ab_list_head);
+	geni_se_dev->cur_ab += rsc->ab;
+
+	ins_list_head = &geni_se_dev->ib_list_head;
+	list_for_each_entry(tmp, &geni_se_dev->ib_list_head, ib_list) {
+		if (tmp->ib < rsc->ib)
+			break;
+		ins_list_head = &tmp->ib_list;
+	}
+	list_add(&rsc->ib_list, ins_list_head);
+	/* Currently inserted node has greater average BW value */
+	if (ins_list_head == &geni_se_dev->ib_list_head)
+		geni_se_dev->cur_ib = tmp->ib;
+
+	bus_bw_update = geni_se_check_bus_bw(geni_se_dev);
+	spin_unlock_irqrestore(&geni_se_dev->ab_ib_lock, flags);
+
+	if (bus_bw_update)
+		ret = msm_bus_scale_update_bw(geni_se_dev->bus_bw,
+						geni_se_dev->cur_ab,
+						geni_se_dev->cur_ib);
+	GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+		    "%s: %lu:%lu (%lu:%lu) %d\n", __func__,
+		    geni_se_dev->cur_ab, geni_se_dev->cur_ib,
+		    rsc->ab, rsc->ib, bus_bw_update);
+	return ret;
+}
+
+/**
+ * se_geni_resources_on() - Turn on resources associated with the serial
+ *                          engine
+ * @rsc:	Handle to resources associated with the serial engine.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_resources_on(struct se_geni_rsc *rsc)
+{
+	int ret = 0;
+	struct geni_se_device *geni_se_dev;
+
+	if (unlikely(!rsc || !rsc->wrapper_dev))
+		return -EINVAL;
+
+	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
+	if (unlikely(!geni_se_dev))
+		return -EPROBE_DEFER;
+
+	ret = geni_se_add_ab_ib(geni_se_dev, rsc);
+	if (ret) {
+		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+			"%s: Error %d during bus_bw_update\n", __func__, ret);
+		return ret;
+	}
+
+	ret = se_geni_clks_on(rsc);
+	if (ret) {
+		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+			"%s: Error %d during clks_on\n", __func__, ret);
+		geni_se_rmv_ab_ib(geni_se_dev, rsc);
+		return ret;
+	}
+
+	ret = pinctrl_select_state(rsc->geni_pinctrl, rsc->geni_gpio_active);
+	if (ret) {
+		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+			"%s: Error %d pinctrl_select_state\n", __func__, ret);
+		se_geni_clks_off(rsc);
+		geni_se_rmv_ab_ib(geni_se_dev, rsc);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(se_geni_resources_on);
+
+/**
+ * geni_se_resources_init() - Init the SE resource structure
+ * @rsc:	SE resource structure to be initialized.
+ * @ab:		Initial Average bus bandwidth request value.
+ * @ib:		Initial Instantaneous bus bandwidth request value.
+ *
+ * Return:	0 on success, standard Linux error codes on failure.
+ */
+int geni_se_resources_init(struct se_geni_rsc *rsc,
+			   unsigned long ab, unsigned long ib)
+{
+	struct geni_se_device *geni_se_dev;
+
+	if (unlikely(!rsc || !rsc->wrapper_dev))
+		return -EINVAL;
+
+	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
+	if (unlikely(!geni_se_dev))
+		return -EPROBE_DEFER;
+
+	if (unlikely(IS_ERR_OR_NULL(geni_se_dev->bus_bw))) {
+		geni_se_dev->bus_bw = msm_bus_scale_register(
+					geni_se_dev->bus_mas_id,
+					geni_se_dev->bus_slv_id,
+					(char *)dev_name(geni_se_dev->dev),
+					false);
+		if (IS_ERR_OR_NULL(geni_se_dev->bus_bw)) {
+			GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+				"%s: Error creating bus client\n", __func__);
+			return (int)PTR_ERR(geni_se_dev->bus_bw);
+		}
+	}
+
+	rsc->ab = ab;
+	rsc->ib = ib;
+	INIT_LIST_HEAD(&rsc->ab_list);
+	INIT_LIST_HEAD(&rsc->ib_list);
+	geni_se_iommu_map_and_attach(geni_se_dev);
+	return 0;
+}
+EXPORT_SYMBOL(geni_se_resources_init);
+
+/**
+ * geni_se_tx_dma_prep() - Prepare the Serial Engine for TX DMA transfer
+ * @wrapper_dev:	QUPv3 Wrapper Device to which the TX buffer is mapped.
+ * @base:		Base address of the SE register block.
+ * @tx_buf:		Pointer to the TX buffer.
+ * @tx_len:		Length of the TX buffer.
+ * @tx_dma:		Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA TX.
+ *
+ * Return:	0 on success, standard Linux error codes on error/failure.
+ */
+int geni_se_tx_dma_prep(struct device *wrapper_dev, void __iomem *base,
+			void *tx_buf, int tx_len, dma_addr_t *tx_dma)
+{
+	int ret;
+
+	if (unlikely(!wrapper_dev || !base || !tx_buf || !tx_len || !tx_dma))
+		return -EINVAL;
+
+	ret = geni_se_iommu_map_buf(wrapper_dev, tx_dma, tx_buf, tx_len,
+				    DMA_TO_DEVICE);
+	if (ret)
+		return ret;
+
+	geni_write_reg(7, base, SE_DMA_TX_IRQ_EN_SET);
+	geni_write_reg((u32)(*tx_dma), base, SE_DMA_TX_PTR_L);
+	geni_write_reg((u32)((*tx_dma) >> 32), base, SE_DMA_TX_PTR_H);
+	geni_write_reg(1, base, SE_DMA_TX_ATTR);
+	geni_write_reg(tx_len, base, SE_DMA_TX_LEN);
+	return 0;
+}
+EXPORT_SYMBOL(geni_se_tx_dma_prep);
+
+/**
+ * geni_se_rx_dma_prep() - Prepare the Serial Engine for RX DMA transfer
+ * @wrapper_dev:	QUPv3 Wrapper Device to which the RX buffer is mapped.
+ * @base:		Base address of the SE register block.
+ * @rx_buf:		Pointer to the RX buffer.
+ * @rx_len:		Length of the RX buffer.
+ * @rx_dma:		Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA RX.
+ *
+ * Return:	0 on success, standard Linux error codes on error/failure.
+ */
+int geni_se_rx_dma_prep(struct device *wrapper_dev, void __iomem *base,
+			void *rx_buf, int rx_len, dma_addr_t *rx_dma)
+{
+	int ret;
+
+	if (unlikely(!wrapper_dev || !base || !rx_buf || !rx_len || !rx_dma))
+		return -EINVAL;
+
+	ret = geni_se_iommu_map_buf(wrapper_dev, rx_dma, rx_buf, rx_len,
+				    DMA_FROM_DEVICE);
+	if (ret)
+		return ret;
+
+	geni_write_reg(7, base, SE_DMA_RX_IRQ_EN_SET);
+	geni_write_reg((u32)(*rx_dma), base, SE_DMA_RX_PTR_L);
+	geni_write_reg((u32)((*rx_dma) >> 32), base, SE_DMA_RX_PTR_H);
+	/* RX does not have EOT bit */
+	geni_write_reg(0, base, SE_DMA_RX_ATTR);
+	geni_write_reg(rx_len, base, SE_DMA_RX_LEN);
+	return 0;
+}
+EXPORT_SYMBOL(geni_se_rx_dma_prep);
+
+/**
+ * geni_se_tx_dma_unprep() - Unprepare the Serial Engine after TX DMA transfer
+ * @wrapper_dev:	QUPv3 Wrapper Device to which the RX buffer is mapped.
+ * @tx_dma:		DMA address of the TX buffer.
+ * @tx_len:		Length of the TX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA TX.
+ */
+void geni_se_tx_dma_unprep(struct device *wrapper_dev,
+			   dma_addr_t tx_dma, int tx_len)
+{
+	if (tx_dma)
+		geni_se_iommu_unmap_buf(wrapper_dev, &tx_dma, tx_len,
+					DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL(geni_se_tx_dma_unprep);
+
+/**
+ * geni_se_rx_dma_unprep() - Unprepare the Serial Engine after RX DMA transfer
+ * @wrapper_dev:	QUPv3 Wrapper Device to which the RX buffer is mapped.
+ * @rx_dma:		DMA address of the RX buffer.
+ * @rx_len:		Length of the RX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA RX.
+ */
+void geni_se_rx_dma_unprep(struct device *wrapper_dev,
+			   dma_addr_t rx_dma, int rx_len)
+{
+	if (rx_dma)
+		geni_se_iommu_unmap_buf(wrapper_dev, &rx_dma, rx_len,
+					DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(geni_se_rx_dma_unprep);
+
+/**
+ * geni_se_qupv3_hw_version() - Read the QUPv3 Hardware version
+ * @wrapper_dev:	Pointer to the corresponding QUPv3 wrapper core.
+ * @major:		Buffer for Major Version field.
+ * @minor:		Buffer for Minor Version field.
+ * @step:		Buffer for Step Version field.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_qupv3_hw_version(struct device *wrapper_dev, unsigned int *major,
+			     unsigned int *minor, unsigned int *step)
+{
+	unsigned int version;
+	struct geni_se_device *geni_se_dev;
+
+	if (!wrapper_dev || !major || !minor || !step)
+		return -EINVAL;
+
+	geni_se_dev = dev_get_drvdata(wrapper_dev);
+	if (unlikely(!geni_se_dev))
+		return -ENODEV;
+
+	version = geni_read_reg(geni_se_dev->base, QUPV3_HW_VER);
+	*major = (version & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT;
+	*minor = (version & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT;
+	*step = version & HW_VER_STEP_MASK;
+	return 0;
+}
+EXPORT_SYMBOL(geni_se_qupv3_hw_version);
+
+static int geni_se_iommu_map_and_attach(struct geni_se_device *geni_se_dev)
+{
+	dma_addr_t va_start = GENI_SE_IOMMU_VA_START;
+	size_t va_size = GENI_SE_IOMMU_VA_SIZE;
+	int bypass = 1;
+	struct device *cb_dev = geni_se_dev->cb_dev;
+
+	mutex_lock(&geni_se_dev->iommu_lock);
+	if (likely(geni_se_dev->iommu_map)) {
+		mutex_unlock(&geni_se_dev->iommu_lock);
+		return 0;
+	}
+
+	geni_se_dev->iommu_map = arm_iommu_create_mapping(&platform_bus_type,
+							  va_start, va_size);
+	if (IS_ERR(geni_se_dev->iommu_map)) {
+		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+			"%s:%s iommu_create_mapping failure\n",
+			__func__, dev_name(cb_dev));
+		mutex_unlock(&geni_se_dev->iommu_lock);
+		return PTR_ERR(geni_se_dev->iommu_map);
+	}
+
+	if (geni_se_dev->iommu_s1_bypass) {
+		if (iommu_domain_set_attr(geni_se_dev->iommu_map->domain,
+					  DOMAIN_ATTR_S1_BYPASS, &bypass)) {
+			GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+				"%s:%s Couldn't bypass s1 translation\n",
+				__func__, dev_name(cb_dev));
+			arm_iommu_release_mapping(geni_se_dev->iommu_map);
+			geni_se_dev->iommu_map = NULL;
+			mutex_unlock(&geni_se_dev->iommu_lock);
+			return -EIO;
+		}
+	}
+
+	if (arm_iommu_attach_device(cb_dev, geni_se_dev->iommu_map)) {
+		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+			"%s:%s couldn't arm_iommu_attach_device\n",
+			__func__, dev_name(cb_dev));
+		arm_iommu_release_mapping(geni_se_dev->iommu_map);
+		geni_se_dev->iommu_map = NULL;
+		mutex_unlock(&geni_se_dev->iommu_lock);
+		return -EIO;
+	}
+	mutex_unlock(&geni_se_dev->iommu_lock);
+	GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL, "%s:%s successful\n",
+		    __func__, dev_name(cb_dev));
+	return 0;
+}
+
+/**
+ * geni_se_iommu_map_buf() - Map a single buffer into QUPv3 context bank
+ * @wrapper_dev:	Pointer to the corresponding QUPv3 wrapper core.
+ * @iova:		Pointer in which the mapped virtual address is stored.
+ * @buf:		Address of the buffer that needs to be mapped.
+ * @size:		Size of the buffer.
+ * @dir:		Direction of the DMA transfer.
+ *
+ * This function is used to map an already allocated buffer into the
+ * QUPv3 context bank device space.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_iommu_map_buf(struct device *wrapper_dev, dma_addr_t *iova,
+			  void *buf, size_t size, enum dma_data_direction dir)
+{
+	struct device *cb_dev;
+	struct geni_se_device *geni_se_dev;
+
+	if (!wrapper_dev || !iova || !buf || !size)
+		return -EINVAL;
+
+	*iova = DMA_ERROR_CODE;
+	geni_se_dev = dev_get_drvdata(wrapper_dev);
+	if (!geni_se_dev || !geni_se_dev->cb_dev)
+		return -ENODEV;
+
+	cb_dev = geni_se_dev->cb_dev;
+
+	*iova = dma_map_single(cb_dev, buf, size, dir);
+	if (dma_mapping_error(cb_dev, *iova))
+		return -EIO;
+	return 0;
+}
+EXPORT_SYMBOL(geni_se_iommu_map_buf);
+
+/**
+ * geni_se_iommu_alloc_buf() - Allocate & map a single buffer into QUPv3
+ *			       context bank
+ * @wrapper_dev:	Pointer to the corresponding QUPv3 wrapper core.
+ * @iova:		Pointer in which the mapped virtual address is stored.
+ * @size:		Size of the buffer.
+ *
+ * This function is used to allocate a buffer and map it into the
+ * QUPv3 context bank device space.
+ *
+ * Return:	address of the buffer on success, NULL or ERR_PTR on
+ *		failure/error.
+ */
+void *geni_se_iommu_alloc_buf(struct device *wrapper_dev, dma_addr_t *iova,
+			      size_t size)
+{
+	struct device *cb_dev;
+	struct geni_se_device *geni_se_dev;
+	void *buf = NULL;
+
+	if (!wrapper_dev || !iova || !size)
+		return ERR_PTR(-EINVAL);
+
+	*iova = DMA_ERROR_CODE;
+	geni_se_dev = dev_get_drvdata(wrapper_dev);
+	if (!geni_se_dev || !geni_se_dev->cb_dev)
+		return ERR_PTR(-ENODEV);
+
+	cb_dev = geni_se_dev->cb_dev;
+
+	buf = dma_alloc_coherent(cb_dev, size, iova, GFP_KERNEL);
+	if (!buf)
+		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+			    "%s: Failed dma_alloc_coherent\n", __func__);
+	return buf;
+}
+EXPORT_SYMBOL(geni_se_iommu_alloc_buf);
+
+/**
+ * geni_se_iommu_unmap_buf() - Unmap a single buffer from QUPv3 context bank
+ * @wrapper_dev:	Pointer to the corresponding QUPv3 wrapper core.
+ * @iova:		Pointer in which the mapped virtual address is stored.
+ * @size:		Size of the buffer.
+ * @dir:		Direction of the DMA transfer.
+ *
+ * This function is used to unmap an already mapped buffer from the
+ * QUPv3 context bank device space.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_iommu_unmap_buf(struct device *wrapper_dev, dma_addr_t *iova,
+			    size_t size, enum dma_data_direction dir)
+{
+	struct device *cb_dev;
+	struct geni_se_device *geni_se_dev;
+
+	if (!wrapper_dev || !iova || !size)
+		return -EINVAL;
+
+	geni_se_dev = dev_get_drvdata(wrapper_dev);
+	if (!geni_se_dev || !geni_se_dev->cb_dev)
+		return -ENODEV;
+
+	cb_dev = geni_se_dev->cb_dev;
+
+	dma_unmap_single(cb_dev, *iova, size, dir);
+	return 0;
+}
+EXPORT_SYMBOL(geni_se_iommu_unmap_buf);
+
+/**
+ * geni_se_iommu_free_buf() - Unmap & free a single buffer from QUPv3
+ *			      context bank
+ * @wrapper_dev:	Pointer to the corresponding QUPv3 wrapper core.
+ * @iova:		Pointer in which the mapped virtual address is stored.
+ * @buf:		Address of the buffer.
+ * @size:		Size of the buffer.
+ *
+ * This function is used to unmap and free a buffer from the
+ * QUPv3 context bank device space.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_iommu_free_buf(struct device *wrapper_dev, dma_addr_t *iova,
+			   void *buf, size_t size)
+{
+	struct device *cb_dev;
+	struct geni_se_device *geni_se_dev;
+
+	if (!wrapper_dev || !iova || !buf || !size)
+		return -EINVAL;
+
+	geni_se_dev = dev_get_drvdata(wrapper_dev);
+	if (!geni_se_dev || !geni_se_dev->cb_dev)
+		return -ENODEV;
+
+	cb_dev = geni_se_dev->cb_dev;
+
+	dma_free_coherent(cb_dev, size, buf, *iova);
+	return 0;
+}
+EXPORT_SYMBOL(geni_se_iommu_free_buf);
+
+static const struct of_device_id geni_se_dt_match[] = {
+	{ .compatible = "qcom,qupv3-geni-se", },
+	{ .compatible = "qcom,qupv3-geni-se-cb", },
+	{}
+};
+
+static int geni_se_iommu_probe(struct device *dev)
+{
+	struct geni_se_device *geni_se_dev;
+
+	if (unlikely(!dev->parent)) {
+		dev_err(dev, "%s no parent for this device\n", __func__);
+		return -EINVAL;
+	}
+
+	geni_se_dev = dev_get_drvdata(dev->parent);
+	if (unlikely(!geni_se_dev)) {
+		dev_err(dev, "%s geni_se_dev not found\n", __func__);
+		return -EINVAL;
+	}
+	geni_se_dev->cb_dev = dev;
+
+	GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+		    "%s: Probe successful\n", __func__);
+	return 0;
+}
+
+static int geni_se_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct geni_se_device *geni_se_dev;
+
+	if (of_device_is_compatible(dev->of_node, "qcom,qupv3-geni-se-cb"))
+		return geni_se_iommu_probe(dev);
+
+	geni_se_dev = devm_kzalloc(dev, sizeof(*geni_se_dev), GFP_KERNEL);
+	if (!geni_se_dev)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "%s: Mandatory resource info not found\n",
+			__func__);
+		devm_kfree(dev, geni_se_dev);
+		return -EINVAL;
+	}
+
+	geni_se_dev->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR_OR_NULL(geni_se_dev->base)) {
+		dev_err(dev, "%s: Error mapping the resource\n", __func__);
+		devm_kfree(dev, geni_se_dev);
+		return -EFAULT;
+	}
+
+	geni_se_dev->dev = dev;
+	ret = of_property_read_u32(dev->of_node, "qcom,bus-mas-id",
+				   &geni_se_dev->bus_mas_id);
+	if (ret) {
+		dev_err(dev, "%s: Error missing bus master id\n", __func__);
+		devm_iounmap(dev, geni_se_dev->base);
+		devm_kfree(dev, geni_se_dev);
+	}
+	ret = of_property_read_u32(dev->of_node, "qcom,bus-slv-id",
+				   &geni_se_dev->bus_slv_id);
+	if (ret) {
+		dev_err(dev, "%s: Error missing bus slave id\n", __func__);
+		devm_iounmap(dev, geni_se_dev->base);
+		devm_kfree(dev, geni_se_dev);
+	}
+
+	geni_se_dev->iommu_s1_bypass = of_property_read_bool(dev->of_node,
+							"qcom,iommu-s1-bypass");
+	geni_se_dev->bus_bw_set = default_bus_bw_set;
+	geni_se_dev->bus_bw_set_size = ARRAY_SIZE(default_bus_bw_set);
+	mutex_init(&geni_se_dev->iommu_lock);
+	INIT_LIST_HEAD(&geni_se_dev->ab_list_head);
+	INIT_LIST_HEAD(&geni_se_dev->ib_list_head);
+	spin_lock_init(&geni_se_dev->ab_ib_lock);
+	geni_se_dev->log_ctx = ipc_log_context_create(NUM_LOG_PAGES,
+						dev_name(geni_se_dev->dev), 0);
+	if (!geni_se_dev->log_ctx)
+		dev_err(dev, "%s Failed to allocate log context\n", __func__);
+	dev_set_drvdata(dev, geni_se_dev);
+
+	ret = of_platform_populate(dev->of_node, geni_se_dt_match, NULL, dev);
+	if (ret) {
+		dev_err(dev, "%s: Error populating children\n", __func__);
+		devm_iounmap(dev, geni_se_dev->base);
+		devm_kfree(dev, geni_se_dev);
+	}
+
+	GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+		    "%s: Probe successful\n", __func__);
+	return ret;
+}
+
+static int geni_se_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct geni_se_device *geni_se_dev = dev_get_drvdata(dev);
+
+	if (likely(!IS_ERR_OR_NULL(geni_se_dev->iommu_map))) {
+		arm_iommu_detach_device(geni_se_dev->cb_dev);
+		arm_iommu_release_mapping(geni_se_dev->iommu_map);
+	}
+	ipc_log_context_destroy(geni_se_dev->log_ctx);
+	devm_iounmap(dev, geni_se_dev->base);
+	devm_kfree(dev, geni_se_dev);
+	return 0;
+}
+
+static struct platform_driver geni_se_driver = {
+	.driver = {
+		.name = "qupv3_geni_se",
+		.of_match_table = geni_se_dt_match,
+	},
+	.probe = geni_se_probe,
+	.remove = geni_se_remove,
+};
+
+static int __init geni_se_driver_init(void)
+{
+	return platform_driver_register(&geni_se_driver);
+}
+arch_initcall(geni_se_driver_init);
+
+static void __exit geni_se_driver_exit(void)
+{
+	platform_driver_unregister(&geni_se_driver);
+}
+module_exit(geni_se_driver_exit);
+
+MODULE_DESCRIPTION("GENI Serial Engine Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index e958433..50171fd 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -144,10 +144,7 @@
  * so the first read after a fault returns the latched value and subsequent
  * reads return the current value.  In order to return the fault status
  * to the user, have the interrupt handler save the reg's value and retrieve
- * it in the appropriate health/status routine.  Each routine has its own
- * flag indicating whether it should use the value stored by the last run
- * of the interrupt handler or do an actual reg read.  That way each routine
- * can report back whatever fault may have occured.
+ * it in the appropriate health/status routine.
  */
 struct bq24190_dev_info {
 	struct i2c_client		*client;
@@ -159,10 +156,6 @@
 	unsigned int			gpio_int;
 	unsigned int			irq;
 	struct mutex			f_reg_lock;
-	bool				first_time;
-	bool				charger_health_valid;
-	bool				battery_health_valid;
-	bool				battery_status_valid;
 	u8				f_reg;
 	u8				ss_reg;
 	u8				watchdog;
@@ -636,21 +629,11 @@
 		union power_supply_propval *val)
 {
 	u8 v;
-	int health, ret;
+	int health;
 
 	mutex_lock(&bdi->f_reg_lock);
-
-	if (bdi->charger_health_valid) {
-		v = bdi->f_reg;
-		bdi->charger_health_valid = false;
-		mutex_unlock(&bdi->f_reg_lock);
-	} else {
-		mutex_unlock(&bdi->f_reg_lock);
-
-		ret = bq24190_read(bdi, BQ24190_REG_F, &v);
-		if (ret < 0)
-			return ret;
-	}
+	v = bdi->f_reg;
+	mutex_unlock(&bdi->f_reg_lock);
 
 	if (v & BQ24190_REG_F_BOOST_FAULT_MASK) {
 		/*
@@ -937,18 +920,8 @@
 	int status, ret;
 
 	mutex_lock(&bdi->f_reg_lock);
-
-	if (bdi->battery_status_valid) {
-		chrg_fault = bdi->f_reg;
-		bdi->battery_status_valid = false;
-		mutex_unlock(&bdi->f_reg_lock);
-	} else {
-		mutex_unlock(&bdi->f_reg_lock);
-
-		ret = bq24190_read(bdi, BQ24190_REG_F, &chrg_fault);
-		if (ret < 0)
-			return ret;
-	}
+	chrg_fault = bdi->f_reg;
+	mutex_unlock(&bdi->f_reg_lock);
 
 	chrg_fault &= BQ24190_REG_F_CHRG_FAULT_MASK;
 	chrg_fault >>= BQ24190_REG_F_CHRG_FAULT_SHIFT;
@@ -996,21 +969,11 @@
 		union power_supply_propval *val)
 {
 	u8 v;
-	int health, ret;
+	int health;
 
 	mutex_lock(&bdi->f_reg_lock);
-
-	if (bdi->battery_health_valid) {
-		v = bdi->f_reg;
-		bdi->battery_health_valid = false;
-		mutex_unlock(&bdi->f_reg_lock);
-	} else {
-		mutex_unlock(&bdi->f_reg_lock);
-
-		ret = bq24190_read(bdi, BQ24190_REG_F, &v);
-		if (ret < 0)
-			return ret;
-	}
+	v = bdi->f_reg;
+	mutex_unlock(&bdi->f_reg_lock);
 
 	if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
 		health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
@@ -1197,9 +1160,12 @@
 static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
 {
 	struct bq24190_dev_info *bdi = data;
-	bool alert_userspace = false;
+	const u8 battery_mask_ss = BQ24190_REG_SS_CHRG_STAT_MASK;
+	const u8 battery_mask_f = BQ24190_REG_F_BAT_FAULT_MASK
+				| BQ24190_REG_F_NTC_FAULT_MASK;
+	bool alert_charger = false, alert_battery = false;
 	u8 ss_reg = 0, f_reg = 0;
-	int ret;
+	int i, ret;
 
 	pm_runtime_get_sync(bdi->dev);
 
@@ -1209,6 +1175,32 @@
 		goto out;
 	}
 
+	i = 0;
+	do {
+		ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
+		if (ret < 0) {
+			dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
+			goto out;
+		}
+	} while (f_reg && ++i < 2);
+
+	if (f_reg != bdi->f_reg) {
+		dev_info(bdi->dev,
+			"Fault: boost %d, charge %d, battery %d, ntc %d\n",
+			!!(f_reg & BQ24190_REG_F_BOOST_FAULT_MASK),
+			!!(f_reg & BQ24190_REG_F_CHRG_FAULT_MASK),
+			!!(f_reg & BQ24190_REG_F_BAT_FAULT_MASK),
+			!!(f_reg & BQ24190_REG_F_NTC_FAULT_MASK));
+
+		mutex_lock(&bdi->f_reg_lock);
+		if ((bdi->f_reg & battery_mask_f) != (f_reg & battery_mask_f))
+			alert_battery = true;
+		if ((bdi->f_reg & ~battery_mask_f) != (f_reg & ~battery_mask_f))
+			alert_charger = true;
+		bdi->f_reg = f_reg;
+		mutex_unlock(&bdi->f_reg_lock);
+	}
+
 	if (ss_reg != bdi->ss_reg) {
 		/*
 		 * The device is in host mode so when PG_STAT goes from 1->0
@@ -1225,47 +1217,17 @@
 					ret);
 		}
 
+		if ((bdi->ss_reg & battery_mask_ss) != (ss_reg & battery_mask_ss))
+			alert_battery = true;
+		if ((bdi->ss_reg & ~battery_mask_ss) != (ss_reg & ~battery_mask_ss))
+			alert_charger = true;
 		bdi->ss_reg = ss_reg;
-		alert_userspace = true;
 	}
 
-	mutex_lock(&bdi->f_reg_lock);
-
-	ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
-	if (ret < 0) {
-		mutex_unlock(&bdi->f_reg_lock);
-		dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
-		goto out;
-	}
-
-	if (f_reg != bdi->f_reg) {
-		bdi->f_reg = f_reg;
-		bdi->charger_health_valid = true;
-		bdi->battery_health_valid = true;
-		bdi->battery_status_valid = true;
-
-		alert_userspace = true;
-	}
-
-	mutex_unlock(&bdi->f_reg_lock);
-
-	/*
-	 * Sometimes bq24190 gives a steady trickle of interrupts even
-	 * though the watchdog timer is turned off and neither the STATUS
-	 * nor FAULT registers have changed.  Weed out these sprurious
-	 * interrupts so userspace isn't alerted for no reason.
-	 * In addition, the chip always generates an interrupt after
-	 * register reset so we should ignore that one (the very first
-	 * interrupt received).
-	 */
-	if (alert_userspace) {
-		if (!bdi->first_time) {
-			power_supply_changed(bdi->charger);
-			power_supply_changed(bdi->battery);
-		} else {
-			bdi->first_time = false;
-		}
-	}
+	if (alert_charger)
+		power_supply_changed(bdi->charger);
+	if (alert_battery)
+		power_supply_changed(bdi->battery);
 
 out:
 	pm_runtime_put_sync(bdi->dev);
@@ -1300,6 +1262,10 @@
 		goto out;
 
 	ret = bq24190_set_mode_host(bdi);
+	if (ret < 0)
+		goto out;
+
+	ret = bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg);
 out:
 	pm_runtime_put_sync(bdi->dev);
 	return ret;
@@ -1375,10 +1341,8 @@
 	bdi->model = id->driver_data;
 	strncpy(bdi->model_name, id->name, I2C_NAME_SIZE);
 	mutex_init(&bdi->f_reg_lock);
-	bdi->first_time = true;
-	bdi->charger_health_valid = false;
-	bdi->battery_health_valid = false;
-	bdi->battery_status_valid = false;
+	bdi->f_reg = 0;
+	bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */
 
 	i2c_set_clientdata(client, bdi);
 
@@ -1392,22 +1356,13 @@
 		return -EINVAL;
 	}
 
-	ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
-			bq24190_irq_handler_thread,
-			IRQF_TRIGGER_RISING | IRQF_ONESHOT,
-			"bq24190-charger", bdi);
-	if (ret < 0) {
-		dev_err(dev, "Can't set up irq handler\n");
-		goto out1;
-	}
-
 	pm_runtime_enable(dev);
 	pm_runtime_resume(dev);
 
 	ret = bq24190_hw_init(bdi);
 	if (ret < 0) {
 		dev_err(dev, "Hardware init failed\n");
-		goto out2;
+		goto out1;
 	}
 
 	charger_cfg.drv_data = bdi;
@@ -1418,7 +1373,7 @@
 	if (IS_ERR(bdi->charger)) {
 		dev_err(dev, "Can't register charger\n");
 		ret = PTR_ERR(bdi->charger);
-		goto out2;
+		goto out1;
 	}
 
 	battery_cfg.drv_data = bdi;
@@ -1427,24 +1382,34 @@
 	if (IS_ERR(bdi->battery)) {
 		dev_err(dev, "Can't register battery\n");
 		ret = PTR_ERR(bdi->battery);
-		goto out3;
+		goto out2;
 	}
 
 	ret = bq24190_sysfs_create_group(bdi);
 	if (ret) {
 		dev_err(dev, "Can't create sysfs entries\n");
+		goto out3;
+	}
+
+	ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
+			bq24190_irq_handler_thread,
+			IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+			"bq24190-charger", bdi);
+	if (ret < 0) {
+		dev_err(dev, "Can't set up irq handler\n");
 		goto out4;
 	}
 
 	return 0;
 
 out4:
-	power_supply_unregister(bdi->battery);
+	bq24190_sysfs_remove_group(bdi);
 out3:
-	power_supply_unregister(bdi->charger);
+	power_supply_unregister(bdi->battery);
 out2:
-	pm_runtime_disable(dev);
+	power_supply_unregister(bdi->charger);
 out1:
+	pm_runtime_disable(dev);
 	if (bdi->gpio_int)
 		gpio_free(bdi->gpio_int);
 
@@ -1488,12 +1453,13 @@
 	struct i2c_client *client = to_i2c_client(dev);
 	struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
 
-	bdi->charger_health_valid = false;
-	bdi->battery_health_valid = false;
-	bdi->battery_status_valid = false;
+	bdi->f_reg = 0;
+	bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */
 
 	pm_runtime_get_sync(bdi->dev);
 	bq24190_register_reset(bdi);
+	bq24190_set_mode_host(bdi);
+	bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg);
 	pm_runtime_put_sync(bdi->dev);
 
 	/* Things may have changed while suspended so alert upper layer */
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 7321b72..cd614fe 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -654,7 +654,7 @@
 {
 	struct lp8788_charger *pchg = dev_get_drvdata(dev);
 	char *stime[] = { "400ms", "5min", "10min", "15min",
-			"20min", "25min", "30min" "No timeout" };
+			"20min", "25min", "30min", "No timeout" };
 	u8 val;
 
 	lp8788_read_byte(pchg->lp, LP8788_CHG_EOC, &val);
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index d16e3e8..077d237 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -123,6 +123,7 @@
 }
 EXPORT_SYMBOL_GPL(power_supply_changed);
 
+static int psy_register_cooler(struct device *dev, struct power_supply *psy);
 /*
  * Notify that power supply was registered after parent finished the probing.
  *
@@ -130,6 +131,8 @@
  * calling power_supply_changed() directly from power_supply_register()
  * would lead to execution of get_property() function provided by the driver
  * too early - before the probe ends.
+ * Also, registering cooling device from the probe will execute the
+ * get_property() function. So register the cooling device after the probe.
  *
  * Avoid that by waiting on parent's mutex.
  */
@@ -141,6 +144,7 @@
 	if (psy->dev.parent)
 		mutex_lock(&psy->dev.parent->mutex);
 
+	psy_register_cooler(psy->dev.parent, psy);
 	power_supply_changed(psy);
 
 	if (psy->dev.parent)
@@ -776,10 +780,6 @@
 	if (rc)
 		goto register_thermal_failed;
 
-	rc = psy_register_cooler(parent, psy);
-	if (rc)
-		goto register_cooler_failed;
-
 	rc = power_supply_create_triggers(psy);
 	if (rc)
 		goto create_triggers_failed;
@@ -803,8 +803,6 @@
 	return psy;
 
 create_triggers_failed:
-	psy_unregister_cooler(psy);
-register_cooler_failed:
 	psy_unregister_thermal(psy);
 register_thermal_failed:
 	device_del(dev);
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index e802fbd..f8a7555 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -858,7 +858,6 @@
 	POWER_SUPPLY_PROP_PRESENT,
 	POWER_SUPPLY_PROP_CHARGE_TYPE,
 	POWER_SUPPLY_PROP_CAPACITY,
-	POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
 	POWER_SUPPLY_PROP_CHARGER_TEMP,
 	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
@@ -878,6 +877,8 @@
 	POWER_SUPPLY_PROP_DIE_HEALTH,
 	POWER_SUPPLY_PROP_RERUN_AICL,
 	POWER_SUPPLY_PROP_DP_DM,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
 };
 
 static int smb2_batt_get_prop(struct power_supply *psy,
@@ -907,9 +908,12 @@
 	case POWER_SUPPLY_PROP_CAPACITY:
 		rc = smblib_get_prop_batt_capacity(chg, val);
 		break;
-	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
 		rc = smblib_get_prop_system_temp_level(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+		rc = smblib_get_prop_system_temp_level_max(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_CHARGER_TEMP:
 		/* do not query RRADC if charger is not present */
 		rc = smblib_get_prop_usb_present(chg, &pval);
@@ -1004,7 +1008,7 @@
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
 		rc = smblib_set_prop_input_suspend(chg, val);
 		break;
-	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
 		rc = smblib_set_prop_system_temp_level(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_CAPACITY:
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 7d5a8bd..f4ae415 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1635,6 +1635,13 @@
 	return 0;
 }
 
+int smblib_get_prop_system_temp_level_max(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	val->intval = chg->thermal_levels;
+	return 0;
+}
+
 int smblib_get_prop_input_current_limited(struct smb_charger *chg,
 				union power_supply_propval *val)
 {
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index b0d84f0..5409166 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -398,6 +398,8 @@
 				union power_supply_propval *val);
 int smblib_get_prop_system_temp_level(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_prop_system_temp_level_max(struct smb_charger *chg,
+				union power_supply_propval *val);
 int smblib_get_prop_input_current_limited(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index c45fb0d..b1e6a3b 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -290,6 +290,10 @@
 #define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_SHIFT	22
 #define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_MASK	GENMASK(21, 20)
 #define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT	20
+#define CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_MASK	BIT(16)
+#define CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_EN	BIT(16)
+#define CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN_MASK	BIT(13)
+#define CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN	BIT(13)
 #define CPRH_MISC_REG2_ACD_AVG_EN_MASK	BIT(12)
 #define CPRH_MISC_REG2_ACD_AVG_ENABLE	BIT(12)
 
@@ -1449,6 +1453,16 @@
 				  ctrl->acd_adj_down_step_size <<
 				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT);
 		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_MASK,
+				  (ctrl->acd_notwait_for_cl_settled
+				   ? CPRH_MISC_REG2_ACD_NOTWAIT_4_CL_SETTLE_EN
+				   : 0));
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN_MASK,
+				  (ctrl->acd_adj_avg_fast_update
+				   ? CPRH_MISC_REG2_ACD_AVG_FAST_UPDATE_EN
+				   : 0));
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
 				  CPRH_MISC_REG2_ACD_AVG_EN_MASK,
 				  CPRH_MISC_REG2_ACD_AVG_ENABLE);
 	}
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index 8535020..a315e46 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -661,6 +661,10 @@
  * @acd_adj_up_step_size: ACD step size in units of PMIC steps used for
  *			target quotient adjustment due to an ACD up
  *			recommendation.
+ * @acd_notwait_for_cl_settled: Boolean which indicates ACD down recommendations
+ *			do not need to wait for CPR closed-loop to settle.
+ * @acd_adj_avg_fast_update: Boolean which indicates if CPR should issue
+ *			immediate voltage updates on ACD requests.
  * @acd_avg_enabled:	Boolean defining the enable state of the ACD AVG
  *			feature.
  * @count_mode:		CPR controller count mode
@@ -828,6 +832,8 @@
 	u32			acd_adj_up_step_limit;
 	u32			acd_adj_down_step_size;
 	u32			acd_adj_up_step_size;
+	bool			acd_notwait_for_cl_settled;
+	bool			acd_adj_avg_fast_update;
 	bool			acd_avg_enabled;
 	enum cpr3_count_mode	count_mode;
 	u32			count_repeat;
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index f7f0299..cf7c35d 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -2282,6 +2282,13 @@
 				 rc);
 			return rc;
 		}
+
+		ctrl->acd_notwait_for_cl_settled =
+			of_property_read_bool(ctrl->dev->of_node,
+					      "qcom,cpr-acd-notwait-for-cl-settled");
+		ctrl->acd_adj_avg_fast_update =
+			of_property_read_bool(ctrl->dev->of_node,
+					      "qcom,cpr-acd-avg-fast-update");
 	}
 
 	rc = of_property_read_u32(ctrl->dev->of_node,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3e2bdb9..17b1574 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1497,7 +1497,7 @@
 
 config MAC_SCSI
 	tristate "Macintosh NCR5380 SCSI"
-	depends on MAC && SCSI=y
+	depends on MAC && SCSI
 	select SCSI_SPI_ATTRS
 	help
 	  This is the NCR 5380 SCSI controller included on most of the 68030
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 734e592..f9b52a4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1464,7 +1464,8 @@
 				/* Don't abort commands in adapter during EEH
 				 * recovery as it's not accessible/responding.
 				 */
-				if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
+				if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
+				    (sp->type == SRB_SCSI_CMD)) {
 					/* Get a reference to the sp and drop the lock.
 					 * The reference ensures this sp->done() call
 					 * - and not the call in qla2xxx_eh_abort() -
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index af17066..243b2d1 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -154,16 +154,6 @@
 	else
 		fn = NULL;
 
-	/*
-	 * Forcibly set runtime PM status of request queue to "active" to
-	 * make sure we can again get requests from the queue (see also
-	 * blk_pm_peek_request()).
-	 *
-	 * The resume hook will correct runtime PM status of the disk.
-	 */
-	if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
-		blk_set_runtime_active(to_scsi_device(dev)->request_queue);
-
 	if (fn) {
 		async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
 
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index a535b26..96a343e 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -533,8 +533,7 @@
 	size_t buffer_length;
 	time64_t local_time;
 	unsigned int year;
-	struct timeval time;
-	struct rtc_time tm;
+	struct tm tm;
 
 	buffer_length = sizeof(*buffer);
 
@@ -551,9 +550,8 @@
 	put_unaligned_le16(sizeof(buffer->time),
 		&buffer->time_length);
 
-	do_gettimeofday(&time);
-	local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60);
-	rtc_time64_to_tm(local_time, &tm);
+	local_time = ktime_get_real_seconds();
+	time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
 	year = tm.tm_year + 1900;
 
 	buffer->time[0] = bin2bcd(tm.tm_hour);
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 6be274f..1b283b2 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -114,3 +114,15 @@
 	The UFS unit-tests register as a block device test utility to
 	the test-iosched and will be initiated when the test-iosched will
 	be chosen to be the active I/O scheduler.
+
+config SCSI_UFSHCD_CMD_LOGGING
+	bool "Universal Flash Storage host controller driver layer command logging support"
+	depends on SCSI_UFSHCD
+	help
+	  This selects the UFS host controller driver layer command logging.
+	  UFS host controller driver layer command logging records all the
+	  command information sent from UFS host controller for debugging
+	  purpose.
+
+	  Select this if you want above mentioned debug information captured.
+	  If unsure, say N.
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 602c359..dc74484 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -532,14 +532,145 @@
 		*val = ' ';
 }
 
+#define UFSHCD_MAX_CMD_LOGGING	100
+
 #ifdef CONFIG_TRACEPOINTS
-static void ufshcd_add_command_trace(struct ufs_hba *hba,
-		unsigned int tag, const char *str)
+static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
+			struct ufshcd_cmd_log_entry *entry, u8 opcode)
 {
-	sector_t lba = -1;
-	u8 opcode = 0;
-	u32 intr, doorbell;
+	if (trace_ufshcd_command_enabled()) {
+		u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+
+		trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
+				     entry->doorbell, entry->transfer_len, intr,
+				     entry->lba, opcode);
+	}
+}
+#else
+static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
+			struct ufshcd_cmd_log_entry *entry, u8 opcode)
+{
+}
+#endif
+
+#ifdef CONFIG_SCSI_UFSHCD_CMD_LOGGING
+static void ufshcd_cmd_log_init(struct ufs_hba *hba)
+{
+	/* Allocate log entries */
+	if (!hba->cmd_log.entries) {
+		hba->cmd_log.entries = kzalloc(UFSHCD_MAX_CMD_LOGGING *
+			sizeof(struct ufshcd_cmd_log_entry), GFP_KERNEL);
+		if (!hba->cmd_log.entries)
+			return;
+		dev_dbg(hba->dev, "%s: cmd_log.entries initialized\n",
+				__func__);
+	}
+}
+
+static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+			     unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
+			     sector_t lba, int transfer_len, u8 opcode)
+{
+	struct ufshcd_cmd_log_entry *entry;
+
+	if (!hba->cmd_log.entries)
+		return;
+
+	entry = &hba->cmd_log.entries[hba->cmd_log.pos];
+	entry->lun = lun;
+	entry->str = str;
+	entry->cmd_type = cmd_type;
+	entry->cmd_id = cmd_id;
+	entry->lba = lba;
+	entry->transfer_len = transfer_len;
+	entry->idn = idn;
+	entry->doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+	entry->tag = tag;
+	entry->tstamp = ktime_get();
+	entry->outstanding_reqs = hba->outstanding_reqs;
+	entry->seq_num = hba->cmd_log.seq_num;
+	hba->cmd_log.seq_num++;
+	hba->cmd_log.pos =
+			(hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
+
+	ufshcd_add_command_trace(hba, entry, opcode);
+}
+
+static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+	unsigned int tag, u8 cmd_id, u8 idn)
+{
+	__ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
+			 0xff, (sector_t)-1, -1, -1);
+}
+
+static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
+{
+	ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
+}
+
+static void ufshcd_cmd_log_print(struct ufs_hba *hba)
+{
+	int i;
+	int pos;
+	struct ufshcd_cmd_log_entry *p;
+
+	if (!hba->cmd_log.entries)
+		return;
+
+	pos = hba->cmd_log.pos;
+	for (i = 0; i < UFSHCD_MAX_CMD_LOGGING; i++) {
+		p = &hba->cmd_log.entries[pos];
+		pos = (pos + 1) % UFSHCD_MAX_CMD_LOGGING;
+
+		if (ktime_to_us(p->tstamp)) {
+			pr_err("%s: %s: seq_no=%u lun=0x%x cmd_id=0x%02x lba=0x%llx txfer_len=%d tag=%u, doorbell=0x%x outstanding=0x%x idn=%d time=%lld us\n",
+				p->cmd_type, p->str, p->seq_num,
+				p->lun, p->cmd_id, (unsigned long long)p->lba,
+				p->transfer_len, p->tag, p->doorbell,
+				p->outstanding_reqs, p->idn,
+				ktime_to_us(p->tstamp));
+				usleep_range(1000, 1100);
+		}
+	}
+}
+#else
+static void ufshcd_cmd_log_init(struct ufs_hba *hba)
+{
+}
+
+static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+			     unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
+			     sector_t lba, int transfer_len, u8 opcode)
+{
+	struct ufshcd_cmd_log_entry entry;
+
+	entry.str = str;
+	entry.lba = lba;
+	entry.transfer_len = transfer_len;
+	entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+	entry.tag = tag;
+
+	ufshcd_add_command_trace(hba, &entry, opcode);
+}
+
+static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
+{
+}
+
+static void ufshcd_cmd_log_print(struct ufs_hba *hba)
+{
+}
+#endif
+
+#ifdef CONFIG_TRACEPOINTS
+static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
+					unsigned int tag, const char *str)
+{
 	struct ufshcd_lrb *lrbp;
+	char *cmd_type = NULL;
+	u8 opcode = 0;
+	u8 cmd_id = 0, idn = 0;
+	sector_t lba = -1;
 	int transfer_len = -1;
 
 	lrbp = &hba->lrb[tag];
@@ -553,23 +684,28 @@
 			 */
 			if (lrbp->cmd->request && lrbp->cmd->request->bio)
 				lba =
-				  lrbp->cmd->request->bio->bi_iter.bi_sector;
+				lrbp->cmd->request->bio->bi_iter.bi_sector;
 			transfer_len = be32_to_cpu(
 				lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
 		}
 	}
 
-	intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
-	doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
-	trace_ufshcd_command(dev_name(hba->dev), str, tag,
-				doorbell, transfer_len, intr, lba, opcode);
-}
+	if (lrbp->cmd && (lrbp->command_type == UTP_CMD_TYPE_SCSI)) {
+		cmd_type = "scsi";
+		cmd_id = (u8)(*lrbp->cmd->cmnd);
+	} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+		if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) {
+			cmd_type = "nop";
+			cmd_id = 0;
+		} else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) {
+			cmd_type = "query";
+			cmd_id = hba->dev_cmd.query.request.upiu_req.opcode;
+			idn = hba->dev_cmd.query.request.upiu_req.idn;
+		}
+	}
 
-static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
-					unsigned int tag, const char *str)
-{
-	if (trace_ufshcd_command_enabled())
-		ufshcd_add_command_trace(hba, tag, str);
+	__ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
+			 lrbp->lun, lba, transfer_len, opcode);
 }
 #else
 static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
@@ -2280,6 +2416,7 @@
 
 	hba->active_uic_cmd = uic_cmd;
 
+	ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
 	/* Write Args */
 	ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
 	ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
@@ -2313,6 +2450,8 @@
 	if (ret)
 		ufsdbg_set_err_state(hba);
 
+	ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
+
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	hba->active_uic_cmd = NULL;
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -4148,6 +4287,8 @@
 			cmd->command, status);
 		ret = (status != PWR_OK) ? status : -1;
 	}
+	ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
+
 out:
 	if (ret) {
 		ufsdbg_set_err_state(hba);
@@ -5474,7 +5615,7 @@
 		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
 			if (hba->dev_cmd.complete) {
 				ufshcd_cond_add_cmd_trace(hba, index,
-						"dev_complete");
+						"dcmp");
 				complete(hba->dev_cmd.complete);
 			}
 		}
@@ -5997,6 +6138,7 @@
 			ufshcd_print_host_state(hba);
 			ufshcd_print_pwr_info(hba);
 			ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+			ufshcd_cmd_log_print(hba);
 			spin_lock_irqsave(hba->host->host_lock, flags);
 		}
 	}
@@ -6503,6 +6645,7 @@
 	hba = shost_priv(host);
 	tag = cmd->request->tag;
 
+	ufshcd_cmd_log_print(hba);
 	lrbp = &hba->lrb[tag];
 	err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
 	if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
@@ -9986,6 +10129,8 @@
 	 */
 	ufshcd_set_ufs_dev_active(hba);
 
+	ufshcd_cmd_log_init(hba);
+
 	async_schedule(ufshcd_async_scan, hba);
 
 	ufsdbg_add_debugfs(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 11916ac..6966aac 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -515,7 +515,7 @@
 	bool is_scaled_up;
 };
 
-#define UIC_ERR_REG_HIST_LENGTH 8
+#define UIC_ERR_REG_HIST_LENGTH 20
 /**
  * struct ufs_uic_err_reg_hist - keeps history of uic errors
  * @pos: index to indicate cyclic buffer position
@@ -637,6 +637,27 @@
 		 UFSHCD_DBG_PRINT_TMRS_EN | UFSHCD_DBG_PRINT_PWR_EN |	   \
 		 UFSHCD_DBG_PRINT_HOST_STATE_EN)
 
+struct ufshcd_cmd_log_entry {
+	char *str;	/* context like "send", "complete" */
+	char *cmd_type;	/* "scsi", "query", "nop", "dme" */
+	u8 lun;
+	u8 cmd_id;
+	sector_t lba;
+	int transfer_len;
+	u8 idn;		/* used only for query idn */
+	u32 doorbell;
+	u32 outstanding_reqs;
+	u32 seq_num;
+	unsigned int tag;
+	ktime_t tstamp;
+};
+
+struct ufshcd_cmd_log {
+	struct ufshcd_cmd_log_entry *entries;
+	int pos;
+	u32 seq_num;
+};
+
 /**
  * struct ufs_hba - per adapter private structure
  * @mmio_base: UFSHCI base register address
@@ -860,6 +881,7 @@
 
 	struct ufs_clk_gating clk_gating;
 	struct ufs_hibern8_on_idle hibern8_on_idle;
+	struct ufshcd_cmd_log cmd_log;
 
 	/* Control to enable/disable host capabilities */
 	u32 caps;
diff --git a/drivers/sensors/Kconfig b/drivers/sensors/Kconfig
new file mode 100644
index 0000000..0e2da79
--- /dev/null
+++ b/drivers/sensors/Kconfig
@@ -0,0 +1,6 @@
+config SENSORS_SSC
+	bool "Enable Sensors Driver Support for SSC"
+	help
+	  Add support for sensors SSC driver.
+	  This driver is used for exercising sensors use case,
+	  time syncing with ADSP clock.
diff --git a/drivers/sensors/Makefile b/drivers/sensors/Makefile
new file mode 100644
index 0000000..08d8a63
--- /dev/null
+++ b/drivers/sensors/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SENSORS_SSC)	+= sensors_ssc.o
diff --git a/drivers/sensors/sensors_ssc.c b/drivers/sensors/sensors_ssc.c
new file mode 100644
index 0000000..d738767
--- /dev/null
+++ b/drivers/sensors/sensors_ssc.c
@@ -0,0 +1,419 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/msm_dsps.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/of_device.h>
+#include <asm/arch_timer.h>
+#include <linux/uaccess.h>
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
+#include <soc/qcom/subsystem_restart.h>
+
+#define IMAGE_LOAD_CMD 1
+#define IMAGE_UNLOAD_CMD 0
+#define CLASS_NAME	"ssc"
+#define DRV_NAME	"sensors"
+#define DRV_VERSION	"2.00"
+#ifdef CONFIG_COMPAT
+#define DSPS_IOCTL_READ_SLOW_TIMER32	_IOR(DSPS_IOCTL_MAGIC, 3, compat_uint_t)
+#endif
+
+#define QTICK_DIV_FACTOR	0x249F
+
+struct sns_ssc_control_s {
+	struct class *dev_class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev *cdev;
+};
+static struct sns_ssc_control_s sns_ctl;
+
+static ssize_t slpi_boot_store(struct kobject *kobj,
+	struct kobj_attribute *attr,
+	const char *buf, size_t count);
+
+struct slpi_loader_private {
+	void *pil_h;
+	struct kobject *boot_slpi_obj;
+	struct attribute_group *attr_group;
+};
+
+static struct kobj_attribute slpi_boot_attribute =
+	__ATTR(boot, 0220, NULL, slpi_boot_store);
+
+static struct attribute *attrs[] = {
+	&slpi_boot_attribute.attr,
+	NULL,
+};
+
+static struct platform_device *slpi_private;
+static struct work_struct slpi_ldr_work;
+
+static void slpi_load_fw(struct work_struct *slpi_ldr_work)
+{
+	struct platform_device *pdev = slpi_private;
+	struct slpi_loader_private *priv = NULL;
+	int ret;
+	const char *firmware_name = NULL;
+
+	if (!pdev) {
+		dev_err(&pdev->dev, "%s: Platform device null\n", __func__);
+		goto fail;
+	}
+
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev,
+			"%s: Device tree information missing\n", __func__);
+		goto fail;
+	}
+
+	ret = of_property_read_string(pdev->dev.of_node,
+		"qcom,firmware-name", &firmware_name);
+	if (ret < 0) {
+		pr_err("can't get fw name.\n");
+		goto fail;
+	}
+
+	priv = platform_get_drvdata(pdev);
+	if (!priv) {
+		dev_err(&pdev->dev,
+		" %s: Private data get failed\n", __func__);
+		goto fail;
+	}
+
+	priv->pil_h = subsystem_get_with_fwname("slpi", firmware_name);
+	if (IS_ERR(priv->pil_h)) {
+		dev_err(&pdev->dev, "%s: pil get failed,\n",
+			__func__);
+		goto fail;
+	}
+
+	dev_dbg(&pdev->dev, "%s: SLPI image is loaded\n", __func__);
+	return;
+
+fail:
+	dev_err(&pdev->dev, "%s: SLPI image loading failed\n", __func__);
+}
+
+static void slpi_loader_do(struct platform_device *pdev)
+{
+	dev_dbg(&pdev->dev, "%s: scheduling work to load SLPI fw\n", __func__);
+	schedule_work(&slpi_ldr_work);
+}
+
+static void slpi_loader_unload(struct platform_device *pdev)
+{
+	struct slpi_loader_private *priv = NULL;
+
+	priv = platform_get_drvdata(pdev);
+
+	if (!priv)
+		return;
+
+	if (priv->pil_h) {
+		dev_dbg(&pdev->dev, "%s: calling subsystem put\n", __func__);
+		subsystem_put(priv->pil_h);
+		priv->pil_h = NULL;
+	}
+}
+
+static ssize_t slpi_boot_store(struct kobject *kobj,
+	struct kobj_attribute *attr,
+	const char *buf,
+	size_t count)
+{
+	int boot = 0;
+
+	if (sscanf(buf, "%du", &boot) != 1)
+		return -EINVAL;
+
+	if (boot == IMAGE_LOAD_CMD) {
+		pr_debug("%s: going to call slpi_loader_do\n", __func__);
+		slpi_loader_do(slpi_private);
+	} else if (boot == IMAGE_UNLOAD_CMD) {
+		pr_debug("%s: going to call slpi_unloader\n", __func__);
+		slpi_loader_unload(slpi_private);
+	}
+	return count;
+}
+
+static int slpi_loader_init_sysfs(struct platform_device *pdev)
+{
+	int ret = -EINVAL;
+	struct slpi_loader_private *priv = NULL;
+
+	slpi_private = NULL;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	priv->pil_h = NULL;
+	priv->boot_slpi_obj = NULL;
+	priv->attr_group = devm_kzalloc(&pdev->dev,
+				sizeof(*(priv->attr_group)),
+				GFP_KERNEL);
+	if (!priv->attr_group) {
+		dev_err(&pdev->dev, "%s: malloc attr_group failed\n",
+						__func__);
+		ret = -ENOMEM;
+		goto error_return;
+	}
+
+	priv->attr_group->attrs = attrs;
+
+	priv->boot_slpi_obj = kobject_create_and_add("boot_slpi", kernel_kobj);
+	if (!priv->boot_slpi_obj) {
+		dev_err(&pdev->dev, "%s: sysfs create and add failed\n",
+						__func__);
+		ret = -ENOMEM;
+		goto error_return;
+	}
+
+	ret = sysfs_create_group(priv->boot_slpi_obj, priv->attr_group);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: sysfs create group failed %d\n",
+							__func__, ret);
+		goto error_return;
+	}
+
+	slpi_private = pdev;
+
+	return 0;
+
+error_return:
+
+	if (priv->boot_slpi_obj) {
+		kobject_del(priv->boot_slpi_obj);
+		priv->boot_slpi_obj = NULL;
+	}
+
+	return ret;
+}
+
+static int slpi_loader_remove(struct platform_device *pdev)
+{
+	struct slpi_loader_private *priv = NULL;
+
+	priv = platform_get_drvdata(pdev);
+
+	if (!priv)
+		return 0;
+
+	if (priv->pil_h) {
+		subsystem_put(priv->pil_h);
+		priv->pil_h = NULL;
+	}
+
+	if (priv->boot_slpi_obj) {
+		sysfs_remove_group(priv->boot_slpi_obj, priv->attr_group);
+		kobject_del(priv->boot_slpi_obj);
+		priv->boot_slpi_obj = NULL;
+	}
+
+	return 0;
+}
+
+/*
+ * Read virtual QTimer clock ticks and scale down to 32KHz clock as used
+ * in DSPS
+ */
+static u32 sns_read_qtimer(void)
+{
+	u64 val;
+
+	val = arch_counter_get_cntvct();
+	/*
+	 * To convert ticks from 19.2 Mhz clock to 32768 Hz clock:
+	 * x = (value * 32768) / 19200000
+	 * This is same as first left shift the value by 4 bits, i.e. multiply
+	 * by 16, and then divide by 0x249F. The latter is preferable since
+	 * QTimer tick (value) is 56-bit, so (value * 32768) could overflow,
+	 * while (value * 16) will never do
+	 */
+	val <<= 4;
+	do_div(val, QTICK_DIV_FACTOR);
+
+	return (u32)val;
+}
+
+static int sensors_ssc_open(struct inode *ip, struct file *fp)
+{
+	return 0;
+}
+
+static int sensors_ssc_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static long sensors_ssc_ioctl(struct file *file,
+			unsigned int cmd, unsigned long arg)
+{
+	int ret = 0;
+	u32 val = 0;
+
+	switch (cmd) {
+	case DSPS_IOCTL_READ_SLOW_TIMER:
+#ifdef CONFIG_COMPAT
+	case DSPS_IOCTL_READ_SLOW_TIMER32:
+#endif
+		val = sns_read_qtimer();
+		ret = put_user(val, (u32 __user *) arg);
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+const struct file_operations sensors_ssc_fops = {
+	.owner = THIS_MODULE,
+	.open = sensors_ssc_open,
+	.release = sensors_ssc_release,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = sensors_ssc_ioctl,
+#endif
+	.unlocked_ioctl = sensors_ssc_ioctl
+};
+
+static int sensors_ssc_probe(struct platform_device *pdev)
+{
+	int ret = slpi_loader_init_sysfs(pdev);
+
+	if (ret != 0) {
+		dev_err(&pdev->dev, "%s: Error in initing sysfs\n", __func__);
+		return ret;
+	}
+
+	sns_ctl.dev_class = class_create(THIS_MODULE, CLASS_NAME);
+	if (sns_ctl.dev_class == NULL) {
+		pr_err("%s: class_create fail.\n", __func__);
+		goto res_err;
+	}
+
+	ret = alloc_chrdev_region(&sns_ctl.dev_num, 0, 1, DRV_NAME);
+	if (ret) {
+		pr_err("%s: alloc_chrdev_region fail.\n", __func__);
+		goto alloc_chrdev_region_err;
+	}
+
+	sns_ctl.dev = device_create(sns_ctl.dev_class, NULL,
+				     sns_ctl.dev_num,
+				     &sns_ctl, DRV_NAME);
+	if (IS_ERR(sns_ctl.dev)) {
+		pr_err("%s: device_create fail.\n", __func__);
+		goto device_create_err;
+	}
+
+	sns_ctl.cdev = cdev_alloc();
+	if (sns_ctl.cdev == NULL) {
+		pr_err("%s: cdev_alloc fail.\n", __func__);
+		goto cdev_alloc_err;
+	}
+	cdev_init(sns_ctl.cdev, &sensors_ssc_fops);
+	sns_ctl.cdev->owner = THIS_MODULE;
+
+	ret = cdev_add(sns_ctl.cdev, sns_ctl.dev_num, 1);
+	if (ret) {
+		pr_err("%s: cdev_add fail.\n", __func__);
+		goto cdev_add_err;
+	}
+
+	INIT_WORK(&slpi_ldr_work, slpi_load_fw);
+
+	return 0;
+
+cdev_add_err:
+	kfree(sns_ctl.cdev);
+cdev_alloc_err:
+	device_destroy(sns_ctl.dev_class, sns_ctl.dev_num);
+device_create_err:
+	unregister_chrdev_region(sns_ctl.dev_num, 1);
+alloc_chrdev_region_err:
+	class_destroy(sns_ctl.dev_class);
+res_err:
+	return -ENODEV;
+}
+
+static int sensors_ssc_remove(struct platform_device *pdev)
+{
+	slpi_loader_remove(pdev);
+	cdev_del(sns_ctl.cdev);
+	kfree(sns_ctl.cdev);
+	sns_ctl.cdev = NULL;
+	device_destroy(sns_ctl.dev_class, sns_ctl.dev_num);
+	unregister_chrdev_region(sns_ctl.dev_num, 1);
+	class_destroy(sns_ctl.dev_class);
+
+	return 0;
+}
+
+static const struct of_device_id msm_ssc_sensors_dt_match[] = {
+	{.compatible = "qcom,msm-ssc-sensors"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, msm_ssc_sensors_dt_match);
+
+static struct platform_driver sensors_ssc_driver = {
+	.driver = {
+		.name = "sensors-ssc",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_ssc_sensors_dt_match,
+	},
+	.probe = sensors_ssc_probe,
+	.remove = sensors_ssc_remove,
+};
+
+static int __init sensors_ssc_init(void)
+{
+	int rc;
+
+	pr_debug("%s driver version %s.\n", DRV_NAME, DRV_VERSION);
+	rc = platform_driver_register(&sensors_ssc_driver);
+	if (rc) {
+		pr_err("%s: Failed to register sensors ssc driver\n",
+			__func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void __exit sensors_ssc_exit(void)
+{
+	platform_driver_unregister(&sensors_ssc_driver);
+}
+
+module_init(sensors_ssc_init);
+module_exit(sensors_ssc_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Sensors SSC driver");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 3311380..0bdcc99 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -545,6 +545,25 @@
 	  for the platforms that use APRv2.
 	  Say M if you want to enable this module.
 
+config MSM_PERFORMANCE
+	tristate "msm performacne driver to support userspace hotplug requests"
+	default n
+	help
+	  This driver is used to provide CPU hotplug support to userspace.
+	  It ensures that no more than a user specified number of CPUs stay
+	  online at any given point in time. This module can also restrict
+	  max freq or min freq of cpu cluster
+
+config MSM_PERFORMANCE_HOTPLUG_ON
+	bool "Hotplug functionality through msm_performance turned on"
+	depends on MSM_PERFORMANCE
+	default y
+	help
+	  If some other core-control driver is present turn off the core-control
+	  capability of msm_performance driver. Setting this flag to false will
+	  compile out the nodes needed for core-control functionality through
+	  msm_performance.
+
 config MSM_CDSP_LOADER
 	tristate "CDSP loader support"
 	depends on MSM_GLINK
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index ba00ef10..9d175cd 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -57,6 +57,8 @@
 obj-$(CONFIG_MSM_PIL)   +=      peripheral-loader.o
 obj-$(CONFIG_MSM_AVTIMER) += avtimer.o
 
+obj-$(CONFIG_MSM_PERFORMANCE) += msm_performance.o
+
 ifdef CONFIG_MSM_SUBSYSTEM_RESTART
        obj-y += subsystem_notif.o
        obj-y += subsystem_restart.o
diff --git a/drivers/soc/qcom/glink_loopback_server.c b/drivers/soc/qcom/glink_loopback_server.c
index 3b540f3..4e9b118 100644
--- a/drivers/soc/qcom/glink_loopback_server.c
+++ b/drivers/soc/qcom/glink_loopback_server.c
@@ -140,6 +140,7 @@
 	{"LOOPBACK_CTL_APSS", "mpss", "smem"},
 	{"LOOPBACK_CTL_APSS", "lpass", "smem"},
 	{"LOOPBACK_CTL_APSS", "dsps", "smem"},
+	{"LOOPBACK_CTL_APPS", "cdsp", "smem"},
 	{"LOOPBACK_CTL_APSS", "spss", "mailbox"},
 	{"LOOPBACK_CTL_APSS", "wdsp", "spi"},
 };
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index b759776..69e0ebc 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -197,6 +197,7 @@
 	ICNSS_MSA0_ASSIGNED,
 	ICNSS_WLFW_EXISTS,
 	ICNSS_WDOG_BITE,
+	ICNSS_SHUTDOWN_DONE,
 };
 
 struct ce_irq_list {
@@ -1983,9 +1984,13 @@
 	if (!priv->ops || !priv->ops->shutdown)
 		goto out;
 
+	if (test_bit(ICNSS_SHUTDOWN_DONE, &penv->state))
+		goto out;
+
 	icnss_pr_dbg("Calling driver shutdown state: 0x%lx\n", priv->state);
 
 	priv->ops->shutdown(&priv->pdev->dev);
+	set_bit(ICNSS_SHUTDOWN_DONE, &penv->state);
 
 out:
 	return 0;
@@ -2023,6 +2028,7 @@
 	}
 
 out:
+	clear_bit(ICNSS_SHUTDOWN_DONE, &penv->state);
 	return 0;
 
 call_probe:
@@ -2101,7 +2107,6 @@
 
 power_off:
 	icnss_hw_power_off(penv);
-	penv->ops = NULL;
 out:
 	return ret;
 }
@@ -2633,7 +2638,7 @@
 	}
 
 	ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_REGISTER_DRIVER,
-				      ICNSS_EVENT_SYNC, ops);
+				      0, ops);
 
 	if (ret == -EINTR)
 		ret = 0;
@@ -3655,6 +3660,9 @@
 		case ICNSS_WDOG_BITE:
 			seq_puts(s, "MODEM WDOG BITE");
 			continue;
+		case ICNSS_SHUTDOWN_DONE:
+			seq_puts(s, "SHUTDOWN DONE");
+			continue;
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
diff --git a/drivers/soc/qcom/icnss_utils.c b/drivers/soc/qcom/icnss_utils.c
index 5e187d5..a7a0ffa 100644
--- a/drivers/soc/qcom/icnss_utils.c
+++ b/drivers/soc/qcom/icnss_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,7 @@
 #define ICNSS_MAX_CH_NUM 45
 
 static DEFINE_MUTEX(unsafe_channel_list_lock);
-static DEFINE_MUTEX(dfs_nol_info_lock);
+static DEFINE_SPINLOCK(dfs_nol_info_lock);
 
 static struct icnss_unsafe_channel_list {
 	u16 unsafe_ch_count;
@@ -77,27 +77,24 @@
 int icnss_wlan_set_dfs_nol(const void *info, u16 info_len)
 {
 	void *temp;
+	void *old_nol_info;
 	struct icnss_dfs_nol_info *dfs_info;
 
-	mutex_lock(&dfs_nol_info_lock);
-	if (!info || !info_len) {
-		mutex_unlock(&dfs_nol_info_lock);
+	if (!info || !info_len)
 		return -EINVAL;
-	}
 
-	temp = kmalloc(info_len, GFP_KERNEL);
-	if (!temp) {
-		mutex_unlock(&dfs_nol_info_lock);
+	temp = kmalloc(info_len, GFP_ATOMIC);
+	if (!temp)
 		return -ENOMEM;
-	}
 
 	memcpy(temp, info, info_len);
+	spin_lock_bh(&dfs_nol_info_lock);
 	dfs_info = &dfs_nol_info;
-	kfree(dfs_info->dfs_nol_info);
-
+	old_nol_info = dfs_info->dfs_nol_info;
 	dfs_info->dfs_nol_info = temp;
 	dfs_info->dfs_nol_info_len = info_len;
-	mutex_unlock(&dfs_nol_info_lock);
+	spin_unlock_bh(&dfs_nol_info_lock);
+	kfree(old_nol_info);
 
 	return 0;
 }
@@ -108,24 +105,21 @@
 	int len;
 	struct icnss_dfs_nol_info *dfs_info;
 
-	mutex_lock(&dfs_nol_info_lock);
-	if (!info || !info_len) {
-		mutex_unlock(&dfs_nol_info_lock);
+	if (!info || !info_len)
 		return -EINVAL;
-	}
+
+	spin_lock_bh(&dfs_nol_info_lock);
 
 	dfs_info = &dfs_nol_info;
-
 	if (dfs_info->dfs_nol_info == NULL ||
 	    dfs_info->dfs_nol_info_len == 0) {
-		mutex_unlock(&dfs_nol_info_lock);
+		spin_unlock_bh(&dfs_nol_info_lock);
 		return -ENOENT;
 	}
 
 	len = min(info_len, dfs_info->dfs_nol_info_len);
-
 	memcpy(info, dfs_info->dfs_nol_info, len);
-	mutex_unlock(&dfs_nol_info_lock);
+	spin_unlock_bh(&dfs_nol_info_lock);
 
 	return len;
 }
diff --git a/drivers/soc/qcom/msm_performance.c b/drivers/soc/qcom/msm_performance.c
new file mode 100644
index 0000000..25e6a9d
--- /dev/null
+++ b/drivers/soc/qcom/msm_performance.c
@@ -0,0 +1,2771 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/moduleparam.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <trace/events/power.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/kthread.h>
+
+static struct mutex managed_cpus_lock;
+
+/* Maximum number to clusters that this module will manage */
+static unsigned int num_clusters;
+struct cluster {
+	cpumask_var_t cpus;
+	/* Number of CPUs to maintain online */
+	int max_cpu_request;
+	/* To track CPUs that the module decides to offline */
+	cpumask_var_t offlined_cpus;
+	/* stats for load detection */
+	/* IO */
+	u64 last_io_check_ts;
+	unsigned int iowait_enter_cycle_cnt;
+	unsigned int iowait_exit_cycle_cnt;
+	spinlock_t iowait_lock;
+	unsigned int cur_io_busy;
+	bool io_change;
+	/* CPU */
+	unsigned int mode;
+	bool mode_change;
+	u64 last_mode_check_ts;
+	unsigned int single_enter_cycle_cnt;
+	unsigned int single_exit_cycle_cnt;
+	unsigned int multi_enter_cycle_cnt;
+	unsigned int multi_exit_cycle_cnt;
+	spinlock_t mode_lock;
+	/* Perf Cluster Peak Loads */
+	unsigned int perf_cl_peak;
+	u64 last_perf_cl_check_ts;
+	bool perf_cl_detect_state_change;
+	unsigned int perf_cl_peak_enter_cycle_cnt;
+	unsigned int perf_cl_peak_exit_cycle_cnt;
+	spinlock_t perf_cl_peak_lock;
+	/* Tunables */
+	unsigned int single_enter_load;
+	unsigned int pcpu_multi_enter_load;
+	unsigned int perf_cl_peak_enter_load;
+	unsigned int single_exit_load;
+	unsigned int pcpu_multi_exit_load;
+	unsigned int perf_cl_peak_exit_load;
+	unsigned int single_enter_cycles;
+	unsigned int single_exit_cycles;
+	unsigned int multi_enter_cycles;
+	unsigned int multi_exit_cycles;
+	unsigned int perf_cl_peak_enter_cycles;
+	unsigned int perf_cl_peak_exit_cycles;
+	unsigned int current_freq;
+	spinlock_t timer_lock;
+	unsigned int timer_rate;
+	struct timer_list mode_exit_timer;
+	struct timer_list perf_cl_peak_mode_exit_timer;
+};
+
+static struct cluster **managed_clusters;
+static bool clusters_inited;
+
+/* Work to evaluate the onlining/offlining CPUs */
+static struct delayed_work evaluate_hotplug_work;
+
+/* To handle cpufreq min/max request */
+struct cpu_status {
+	unsigned int min;
+	unsigned int max;
+};
+static DEFINE_PER_CPU(struct cpu_status, cpu_stats);
+
+static unsigned int num_online_managed(struct cpumask *mask);
+static int init_cluster_control(void);
+static int rm_high_pwr_cost_cpus(struct cluster *cl);
+static int init_events_group(void);
+static DEFINE_PER_CPU(unsigned int, cpu_power_cost);
+struct events {
+	spinlock_t cpu_hotplug_lock;
+	bool cpu_hotplug;
+	bool init_success;
+};
+static struct events events_group;
+static struct task_struct *events_notify_thread;
+
+#define LAST_UPDATE_TOL		USEC_PER_MSEC
+
+struct input_events {
+	unsigned int evt_x_cnt;
+	unsigned int evt_y_cnt;
+	unsigned int evt_pres_cnt;
+	unsigned int evt_dist_cnt;
+};
+struct trig_thr {
+	unsigned int pwr_cl_trigger_threshold;
+	unsigned int perf_cl_trigger_threshold;
+	unsigned int ip_evt_threshold;
+};
+struct load_stats {
+	u64 last_wallclock;
+	/* IO wait related */
+	u64 last_iowait;
+	unsigned int last_iopercent;
+	/* CPU load related */
+	unsigned int cpu_load;
+	/* CPU Freq */
+	unsigned int freq;
+};
+static bool input_events_handler_registered;
+static struct input_events *ip_evts;
+static struct trig_thr thr;
+static unsigned int use_input_evts_with_hi_slvt_detect;
+static int register_input_handler(void);
+static void unregister_input_handler(void);
+static DEFINE_PER_CPU(struct load_stats, cpu_load_stats);
+
+/* Bitmask to keep track of the workloads being detected */
+static unsigned int workload_detect;
+#define IO_DETECT	1
+#define MODE_DETECT	2
+#define PERF_CL_PEAK_DETECT	4
+
+/* IOwait related tunables */
+static unsigned int io_enter_cycles = 4;
+static unsigned int io_exit_cycles = 4;
+static u64 iowait_ceiling_pct = 25;
+static u64 iowait_floor_pct = 8;
+#define LAST_IO_CHECK_TOL	(3 * USEC_PER_MSEC)
+
+static unsigned int aggr_iobusy;
+static unsigned int aggr_mode;
+
+static struct task_struct *notify_thread;
+
+static struct input_handler *handler;
+
+/* CPU workload detection related */
+#define NO_MODE		(0)
+#define SINGLE		(1)
+#define MULTI		(2)
+#define MIXED		(3)
+#define PERF_CL_PEAK		(4)
+#define DEF_SINGLE_ENT		90
+#define DEF_PCPU_MULTI_ENT	85
+#define DEF_PERF_CL_PEAK_ENT	80
+#define DEF_SINGLE_EX		60
+#define DEF_PCPU_MULTI_EX	50
+#define DEF_PERF_CL_PEAK_EX		70
+#define DEF_SINGLE_ENTER_CYCLE	4
+#define DEF_SINGLE_EXIT_CYCLE	4
+#define DEF_MULTI_ENTER_CYCLE	4
+#define DEF_MULTI_EXIT_CYCLE	4
+#define DEF_PERF_CL_PEAK_ENTER_CYCLE	100
+#define DEF_PERF_CL_PEAK_EXIT_CYCLE	20
+#define LAST_LD_CHECK_TOL	(2 * USEC_PER_MSEC)
+#define CLUSTER_0_THRESHOLD_FREQ	147000
+#define CLUSTER_1_THRESHOLD_FREQ	190000
+#define INPUT_EVENT_CNT_THRESHOLD	15
+#define MAX_LENGTH_CPU_STRING	256
+
+/**************************sysfs start********************************/
+
+static int set_num_clusters(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+	if (num_clusters)
+		return -EINVAL;
+
+	num_clusters = val;
+
+	if (init_cluster_control()) {
+		num_clusters = 0;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int get_num_clusters(char *buf, const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", num_clusters);
+}
+
+static const struct kernel_param_ops param_ops_num_clusters = {
+	.set = set_num_clusters,
+	.get = get_num_clusters,
+};
+device_param_cb(num_clusters, &param_ops_num_clusters, NULL, 0644);
+
+static int set_max_cpus(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int i, ntokens = 0;
+	const char *cp = buf;
+	int val;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%d\n", &val) != 1)
+			return -EINVAL;
+		if (val > (int)cpumask_weight(managed_clusters[i]->cpus))
+			return -EINVAL;
+
+		managed_clusters[i]->max_cpu_request = val;
+
+		cp = strnchr(cp, strlen(cp), ':');
+		cp++;
+		trace_set_max_cpus(cpumask_bits(managed_clusters[i]->cpus)[0],
+								val);
+	}
+
+	schedule_delayed_work(&evaluate_hotplug_work, 0);
+
+	return 0;
+}
+
+static int get_max_cpus(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%d:", managed_clusters[i]->max_cpu_request);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_max_cpus = {
+	.set = set_max_cpus,
+	.get = get_max_cpus,
+};
+
+#ifdef CONFIG_MSM_PERFORMANCE_HOTPLUG_ON
+device_param_cb(max_cpus, &param_ops_max_cpus, NULL, 0644);
+#endif
+
+static int set_managed_cpus(const char *buf, const struct kernel_param *kp)
+{
+	int i, ret;
+	struct cpumask tmp_mask;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	ret = cpulist_parse(buf, &tmp_mask);
+
+	if (ret)
+		return ret;
+
+	for (i = 0; i < num_clusters; i++) {
+		if (cpumask_empty(managed_clusters[i]->cpus)) {
+			mutex_lock(&managed_cpus_lock);
+			cpumask_copy(managed_clusters[i]->cpus, &tmp_mask);
+			cpumask_clear(managed_clusters[i]->offlined_cpus);
+			mutex_unlock(&managed_cpus_lock);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int get_managed_cpus(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0, total_cnt = 0;
+	char tmp[MAX_LENGTH_CPU_STRING] = "";
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++) {
+		cnt = cpumap_print_to_pagebuf(true, buf,
+						managed_clusters[i]->cpus);
+		if ((i + 1) < num_clusters &&
+		    (total_cnt + cnt + 1) <= MAX_LENGTH_CPU_STRING) {
+			snprintf(tmp + total_cnt, cnt, "%s", buf);
+			tmp[cnt-1] = ':';
+			tmp[cnt] = '\0';
+			total_cnt += cnt;
+		} else if ((i + 1) == num_clusters &&
+			(total_cnt + cnt) <= MAX_LENGTH_CPU_STRING) {
+			snprintf(tmp + total_cnt, cnt, "%s", buf);
+			total_cnt += cnt;
+		} else {
+			pr_err("invalid string for managed_cpu:%s%s\n", tmp,
+				buf);
+			break;
+		}
+	}
+	snprintf(buf, PAGE_SIZE, "%s", tmp);
+	return total_cnt;
+}
+
+static const struct kernel_param_ops param_ops_managed_cpus = {
+	.set = set_managed_cpus,
+	.get = get_managed_cpus,
+};
+device_param_cb(managed_cpus, &param_ops_managed_cpus, NULL, 0644);
+
+/* Read-only node: To display all the online managed CPUs */
+static int get_managed_online_cpus(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0, total_cnt = 0;
+	char tmp[MAX_LENGTH_CPU_STRING] = "";
+	struct cpumask tmp_mask;
+	struct cluster *i_cl;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++) {
+		i_cl = managed_clusters[i];
+
+		cpumask_clear(&tmp_mask);
+		cpumask_complement(&tmp_mask, i_cl->offlined_cpus);
+		cpumask_and(&tmp_mask, i_cl->cpus, &tmp_mask);
+
+		cnt = cpumap_print_to_pagebuf(true, buf, &tmp_mask);
+		if ((i + 1) < num_clusters &&
+		    (total_cnt + cnt + 1) <= MAX_LENGTH_CPU_STRING) {
+			snprintf(tmp + total_cnt, cnt, "%s", buf);
+			tmp[cnt-1] = ':';
+			tmp[cnt] = '\0';
+			total_cnt += cnt;
+		} else if ((i + 1) == num_clusters &&
+			   (total_cnt + cnt) <= MAX_LENGTH_CPU_STRING) {
+			snprintf(tmp + total_cnt, cnt, "%s", buf);
+			total_cnt += cnt;
+		} else {
+			pr_err("invalid string for managed_cpu:%s%s\n", tmp,
+				buf);
+			break;
+		}
+	}
+	snprintf(buf, PAGE_SIZE, "%s", tmp);
+	return total_cnt;
+}
+
+static const struct kernel_param_ops param_ops_managed_online_cpus = {
+	.get = get_managed_online_cpus,
+};
+
+#ifdef CONFIG_MSM_PERFORMANCE_HOTPLUG_ON
+device_param_cb(managed_online_cpus, &param_ops_managed_online_cpus,
+							NULL, 0444);
+#endif
+/*
+ * Userspace sends cpu#:min_freq_value to vote for min_freq_value as the new
+ * scaling_min. To withdraw its vote it needs to enter cpu#:0
+ */
+static int set_cpu_min_freq(const char *buf, const struct kernel_param *kp)
+{
+	int i, j, ntokens = 0;
+	unsigned int val, cpu;
+	const char *cp = buf;
+	struct cpu_status *i_cpu_stats;
+	struct cpufreq_policy policy;
+	cpumask_var_t limit_mask;
+	int ret;
+
+	while ((cp = strpbrk(cp + 1, " :")))
+		ntokens++;
+
+	/* CPU:value pair */
+	if (!(ntokens % 2))
+		return -EINVAL;
+
+	cp = buf;
+	cpumask_clear(limit_mask);
+	for (i = 0; i < ntokens; i += 2) {
+		if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
+			return -EINVAL;
+		if (cpu > (num_present_cpus() - 1))
+			return -EINVAL;
+
+		i_cpu_stats = &per_cpu(cpu_stats, cpu);
+
+		i_cpu_stats->min = val;
+		cpumask_set_cpu(cpu, limit_mask);
+
+		cp = strnchr(cp, strlen(cp), ' ');
+		cp++;
+	}
+
+	/*
+	 * Since on synchronous systems policy is shared amongst multiple
+	 * CPUs only one CPU needs to be updated for the limit to be
+	 * reflected for the entire cluster. We can avoid updating the policy
+	 * of other CPUs in the cluster once it is done for at least one CPU
+	 * in the cluster
+	 */
+	get_online_cpus();
+	for_each_cpu(i, limit_mask) {
+		i_cpu_stats = &per_cpu(cpu_stats, i);
+
+		if (cpufreq_get_policy(&policy, i))
+			continue;
+
+		if (cpu_online(i) && (policy.min != i_cpu_stats->min)) {
+			ret = cpufreq_update_policy(i);
+			if (ret)
+				continue;
+		}
+		for_each_cpu(j, policy.related_cpus)
+			cpumask_clear_cpu(j, limit_mask);
+	}
+	put_online_cpus();
+
+	return 0;
+}
+
+static int get_cpu_min_freq(char *buf, const struct kernel_param *kp)
+{
+	int cnt = 0, cpu;
+
+	for_each_present_cpu(cpu) {
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%d:%u ", cpu, per_cpu(cpu_stats, cpu).min);
+	}
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_cpu_min_freq = {
+	.set = set_cpu_min_freq,
+	.get = get_cpu_min_freq,
+};
+module_param_cb(cpu_min_freq, &param_ops_cpu_min_freq, NULL, 0644);
+
+/*
+ * Userspace sends cpu#:max_freq_value to vote for max_freq_value as the new
+ * scaling_max. To withdraw its vote it needs to enter cpu#:UINT_MAX
+ */
+static int set_cpu_max_freq(const char *buf, const struct kernel_param *kp)
+{
+	int i, j, ntokens = 0;
+	unsigned int val, cpu;
+	const char *cp = buf;
+	struct cpu_status *i_cpu_stats;
+	struct cpufreq_policy policy;
+	cpumask_var_t limit_mask;
+	int ret;
+
+	while ((cp = strpbrk(cp + 1, " :")))
+		ntokens++;
+
+	/* CPU:value pair */
+	if (!(ntokens % 2))
+		return -EINVAL;
+
+	cp = buf;
+	cpumask_clear(limit_mask);
+	for (i = 0; i < ntokens; i += 2) {
+		if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
+			return -EINVAL;
+		if (cpu > (num_present_cpus() - 1))
+			return -EINVAL;
+
+		i_cpu_stats = &per_cpu(cpu_stats, cpu);
+
+		i_cpu_stats->max = val;
+		cpumask_set_cpu(cpu, limit_mask);
+
+		cp = strnchr(cp, strlen(cp), ' ');
+		cp++;
+	}
+
+	get_online_cpus();
+	for_each_cpu(i, limit_mask) {
+		i_cpu_stats = &per_cpu(cpu_stats, i);
+		if (cpufreq_get_policy(&policy, i))
+			continue;
+
+		if (cpu_online(i) && (policy.max != i_cpu_stats->max)) {
+			ret = cpufreq_update_policy(i);
+			if (ret)
+				continue;
+		}
+		for_each_cpu(j, policy.related_cpus)
+			cpumask_clear_cpu(j, limit_mask);
+	}
+	put_online_cpus();
+
+	return 0;
+}
+
+static int get_cpu_max_freq(char *buf, const struct kernel_param *kp)
+{
+	int cnt = 0, cpu;
+
+	for_each_present_cpu(cpu) {
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%d:%u ", cpu, per_cpu(cpu_stats, cpu).max);
+	}
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_cpu_max_freq = {
+	.set = set_cpu_max_freq,
+	.get = get_cpu_max_freq,
+};
+module_param_cb(cpu_max_freq, &param_ops_cpu_max_freq, NULL, 0644);
+
+static int set_ip_evt_trigger_threshold(const char *buf,
+		const struct kernel_param *kp)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	thr.ip_evt_threshold = val;
+	return 0;
+}
+
+static int get_ip_evt_trigger_threshold(char *buf,
+		const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", thr.ip_evt_threshold);
+}
+
+static const struct kernel_param_ops param_ops_ip_evt_trig_thr = {
+	.set = set_ip_evt_trigger_threshold,
+	.get = get_ip_evt_trigger_threshold,
+};
+device_param_cb(ip_evt_trig_thr, &param_ops_ip_evt_trig_thr, NULL, 0644);
+
+
+static int set_perf_cl_trigger_threshold(const char *buf,
+		 const struct kernel_param *kp)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	thr.perf_cl_trigger_threshold = val;
+	return 0;
+}
+
+static int get_perf_cl_trigger_threshold(char *buf,
+		const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", thr.perf_cl_trigger_threshold);
+}
+
+static const struct kernel_param_ops param_ops_perf_trig_thr = {
+	.set = set_perf_cl_trigger_threshold,
+	.get = get_perf_cl_trigger_threshold,
+};
+device_param_cb(perf_cl_trig_thr, &param_ops_perf_trig_thr, NULL, 0644);
+
+
+static int set_pwr_cl_trigger_threshold(const char *buf,
+		const struct kernel_param *kp)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	thr.pwr_cl_trigger_threshold = val;
+	return 0;
+}
+
+static int get_pwr_cl_trigger_threshold(char *buf,
+		const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", thr.pwr_cl_trigger_threshold);
+}
+
+static const struct kernel_param_ops param_ops_pwr_trig_thr = {
+	.set = set_pwr_cl_trigger_threshold,
+	.get = get_pwr_cl_trigger_threshold,
+};
+device_param_cb(pwr_cl_trig_thr, &param_ops_pwr_trig_thr, NULL, 0644);
+
+static int freq_greater_than_threshold(struct cluster *cl, int idx)
+{
+	int rc = 0;
+	/* Check for Cluster 0 */
+	if (!idx && cl->current_freq >= thr.pwr_cl_trigger_threshold)
+		rc = 1;
+	/* Check for Cluster 1 */
+	if (idx && cl->current_freq >= thr.perf_cl_trigger_threshold)
+		rc = 1;
+	return rc;
+}
+
+static bool input_events_greater_than_threshold(void)
+{
+
+	bool rc = false;
+
+	if ((ip_evts->evt_x_cnt >= thr.ip_evt_threshold) ||
+	    (ip_evts->evt_y_cnt >= thr.ip_evt_threshold) ||
+	    !use_input_evts_with_hi_slvt_detect)
+		rc = true;
+
+	return rc;
+}
+
+static int set_single_enter_load(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		if (val < managed_clusters[i]->single_exit_load)
+			return -EINVAL;
+
+		managed_clusters[i]->single_enter_load = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_single_enter_load(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%u:", managed_clusters[i]->single_enter_load);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_single_enter_load = {
+	.set = set_single_enter_load,
+	.get = get_single_enter_load,
+};
+device_param_cb(single_enter_load, &param_ops_single_enter_load, NULL, 0644);
+
+static int set_single_exit_load(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		if (val > managed_clusters[i]->single_enter_load)
+			return -EINVAL;
+
+		managed_clusters[i]->single_exit_load = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_single_exit_load(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%u:", managed_clusters[i]->single_exit_load);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_single_exit_load = {
+	.set = set_single_exit_load,
+	.get = get_single_exit_load,
+};
+device_param_cb(single_exit_load, &param_ops_single_exit_load, NULL, 0644);
+
+static int set_pcpu_multi_enter_load(const char *buf,
+					const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		if (val < managed_clusters[i]->pcpu_multi_exit_load)
+			return -EINVAL;
+
+		managed_clusters[i]->pcpu_multi_enter_load = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_pcpu_multi_enter_load(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+			"%u:", managed_clusters[i]->pcpu_multi_enter_load);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_pcpu_multi_enter_load = {
+	.set = set_pcpu_multi_enter_load,
+	.get = get_pcpu_multi_enter_load,
+};
+device_param_cb(pcpu_multi_enter_load, &param_ops_pcpu_multi_enter_load,
+								NULL, 0644);
+
+static int set_pcpu_multi_exit_load(const char *buf,
+						const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		if (val > managed_clusters[i]->pcpu_multi_enter_load)
+			return -EINVAL;
+
+		managed_clusters[i]->pcpu_multi_exit_load = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_pcpu_multi_exit_load(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+			"%u:", managed_clusters[i]->pcpu_multi_exit_load);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_pcpu_multi_exit_load = {
+	.set = set_pcpu_multi_exit_load,
+	.get = get_pcpu_multi_exit_load,
+};
+device_param_cb(pcpu_multi_exit_load, &param_ops_pcpu_multi_exit_load,
+		NULL, 0644);
+static int set_perf_cl_peak_enter_load(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		if (val < managed_clusters[i]->perf_cl_peak_exit_load)
+			return -EINVAL;
+
+		managed_clusters[i]->perf_cl_peak_enter_load = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_perf_cl_peak_enter_load(char *buf,
+				const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+			"%u:", managed_clusters[i]->perf_cl_peak_enter_load);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_perf_cl_peak_enter_load = {
+	.set = set_perf_cl_peak_enter_load,
+	.get = get_perf_cl_peak_enter_load,
+};
+device_param_cb(perf_cl_peak_enter_load, &param_ops_perf_cl_peak_enter_load,
+		 NULL, 0644);
+
+static int set_perf_cl_peak_exit_load(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		if (val > managed_clusters[i]->perf_cl_peak_enter_load)
+			return -EINVAL;
+
+		managed_clusters[i]->perf_cl_peak_exit_load = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_perf_cl_peak_exit_load(char *buf,
+				const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+			"%u:", managed_clusters[i]->perf_cl_peak_exit_load);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_perf_cl_peak_exit_load = {
+	.set = set_perf_cl_peak_exit_load,
+	.get = get_perf_cl_peak_exit_load,
+};
+device_param_cb(perf_cl_peak_exit_load, &param_ops_perf_cl_peak_exit_load,
+		 NULL, 0644);
+
+static int set_perf_cl_peak_enter_cycles(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		managed_clusters[i]->perf_cl_peak_enter_cycles = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_perf_cl_peak_enter_cycles(char *buf,
+				const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "%u:",
+				managed_clusters[i]->perf_cl_peak_enter_cycles);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_perf_cl_peak_enter_cycles = {
+	.set = set_perf_cl_peak_enter_cycles,
+	.get = get_perf_cl_peak_enter_cycles,
+};
+device_param_cb(perf_cl_peak_enter_cycles, &param_ops_perf_cl_peak_enter_cycles,
+		NULL, 0644);
+
+
+static int set_perf_cl_peak_exit_cycles(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		managed_clusters[i]->perf_cl_peak_exit_cycles = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_perf_cl_peak_exit_cycles(char *buf,
+			const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+			"%u:", managed_clusters[i]->perf_cl_peak_exit_cycles);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_perf_cl_peak_exit_cycles = {
+	.set = set_perf_cl_peak_exit_cycles,
+	.get = get_perf_cl_peak_exit_cycles,
+};
+device_param_cb(perf_cl_peak_exit_cycles, &param_ops_perf_cl_peak_exit_cycles,
+		 NULL, 0644);
+
+
+static int set_single_enter_cycles(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		managed_clusters[i]->single_enter_cycles = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_single_enter_cycles(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "%u:",
+				managed_clusters[i]->single_enter_cycles);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_single_enter_cycles = {
+	.set = set_single_enter_cycles,
+	.get = get_single_enter_cycles,
+};
+device_param_cb(single_enter_cycles, &param_ops_single_enter_cycles,
+		NULL, 0644);
+
+
+static int set_single_exit_cycles(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		managed_clusters[i]->single_exit_cycles = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_single_exit_cycles(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%u:", managed_clusters[i]->single_exit_cycles);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_single_exit_cycles = {
+	.set = set_single_exit_cycles,
+	.get = get_single_exit_cycles,
+};
+device_param_cb(single_exit_cycles, &param_ops_single_exit_cycles, NULL, 0644);
+
+static int set_multi_enter_cycles(const char *buf,
+				const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		managed_clusters[i]->multi_enter_cycles = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_multi_enter_cycles(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%u:", managed_clusters[i]->multi_enter_cycles);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_multi_enter_cycles = {
+	.set = set_multi_enter_cycles,
+	.get = get_multi_enter_cycles,
+};
+device_param_cb(multi_enter_cycles, &param_ops_multi_enter_cycles, NULL, 0644);
+
+static int set_multi_exit_cycles(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val, i, ntokens = 0;
+	const char *cp = buf;
+	unsigned int bytes_left;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	while ((cp = strpbrk(cp + 1, ":")))
+		ntokens++;
+
+	if (ntokens != (num_clusters - 1))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < num_clusters; i++) {
+
+		if (sscanf(cp, "%u\n", &val) != 1)
+			return -EINVAL;
+
+		managed_clusters[i]->multi_exit_cycles = val;
+
+		bytes_left = PAGE_SIZE - (cp - buf);
+		cp = strnchr(cp, bytes_left, ':');
+		cp++;
+	}
+
+	return 0;
+}
+
+static int get_multi_exit_cycles(char *buf, const struct kernel_param *kp)
+{
+	int i, cnt = 0;
+
+	if (!clusters_inited)
+		return cnt;
+
+	for (i = 0; i < num_clusters; i++)
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%u:", managed_clusters[i]->multi_exit_cycles);
+	cnt--;
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_multi_exit_cycles = {
+	.set = set_multi_exit_cycles,
+	.get = get_multi_exit_cycles,
+};
+device_param_cb(multi_exit_cycles, &param_ops_multi_exit_cycles, NULL, 0644);
+
+static int set_io_enter_cycles(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	io_enter_cycles = val;
+
+	return 0;
+}
+
+static int get_io_enter_cycles(char *buf, const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", io_enter_cycles);
+}
+
+static const struct kernel_param_ops param_ops_io_enter_cycles = {
+	.set = set_io_enter_cycles,
+	.get = get_io_enter_cycles,
+};
+device_param_cb(io_enter_cycles, &param_ops_io_enter_cycles, NULL, 0644);
+
+static int set_io_exit_cycles(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	io_exit_cycles = val;
+
+	return 0;
+}
+
+static int get_io_exit_cycles(char *buf, const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", io_exit_cycles);
+}
+
+static const struct kernel_param_ops param_ops_io_exit_cycles = {
+	.set = set_io_exit_cycles,
+	.get = get_io_exit_cycles,
+};
+device_param_cb(io_exit_cycles, &param_ops_io_exit_cycles, NULL, 0644);
+
+static int set_iowait_floor_pct(const char *buf, const struct kernel_param *kp)
+{
+	u64 val;
+
+	if (sscanf(buf, "%llu\n", &val) != 1)
+		return -EINVAL;
+	if (val > iowait_ceiling_pct)
+		return -EINVAL;
+
+	iowait_floor_pct = val;
+
+	return 0;
+}
+
+static int get_iowait_floor_pct(char *buf, const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%llu", iowait_floor_pct);
+}
+
+static const struct kernel_param_ops param_ops_iowait_floor_pct = {
+	.set = set_iowait_floor_pct,
+	.get = get_iowait_floor_pct,
+};
+device_param_cb(iowait_floor_pct, &param_ops_iowait_floor_pct, NULL, 0644);
+
+static int set_iowait_ceiling_pct(const char *buf,
+						const struct kernel_param *kp)
+{
+	u64 val;
+
+	if (sscanf(buf, "%llu\n", &val) != 1)
+		return -EINVAL;
+	if (val < iowait_floor_pct)
+		return -EINVAL;
+
+	iowait_ceiling_pct = val;
+
+	return 0;
+}
+
+static int get_iowait_ceiling_pct(char *buf, const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%llu", iowait_ceiling_pct);
+}
+
+static const struct kernel_param_ops param_ops_iowait_ceiling_pct = {
+	.set = set_iowait_ceiling_pct,
+	.get = get_iowait_ceiling_pct,
+};
+device_param_cb(iowait_ceiling_pct, &param_ops_iowait_ceiling_pct, NULL, 0644);
+
+static int set_workload_detect(const char *buf, const struct kernel_param *kp)
+{
+	unsigned int val, i;
+	struct cluster *i_cl;
+	unsigned long flags;
+
+	if (!clusters_inited)
+		return -EINVAL;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	if (val == workload_detect)
+		return 0;
+
+	workload_detect = val;
+	if (!(workload_detect & IO_DETECT)) {
+		for (i = 0; i < num_clusters; i++) {
+			i_cl = managed_clusters[i];
+			spin_lock_irqsave(&i_cl->iowait_lock, flags);
+			i_cl->iowait_enter_cycle_cnt = 0;
+			i_cl->iowait_exit_cycle_cnt = 0;
+			i_cl->cur_io_busy = 0;
+			i_cl->io_change = true;
+			spin_unlock_irqrestore(&i_cl->iowait_lock, flags);
+		}
+	}
+	if (!(workload_detect & MODE_DETECT)) {
+		for (i = 0; i < num_clusters; i++) {
+			i_cl = managed_clusters[i];
+			spin_lock_irqsave(&i_cl->mode_lock, flags);
+			i_cl->single_enter_cycle_cnt = 0;
+			i_cl->single_exit_cycle_cnt = 0;
+			i_cl->multi_enter_cycle_cnt = 0;
+			i_cl->multi_exit_cycle_cnt = 0;
+			i_cl->mode = 0;
+			i_cl->mode_change = true;
+			spin_unlock_irqrestore(&i_cl->mode_lock, flags);
+		}
+	}
+
+	if (!(workload_detect & PERF_CL_PEAK_DETECT)) {
+		for (i = 0; i < num_clusters; i++) {
+			i_cl = managed_clusters[i];
+			spin_lock_irqsave(&i_cl->perf_cl_peak_lock, flags);
+			i_cl->perf_cl_peak_enter_cycle_cnt = 0;
+			i_cl->perf_cl_peak_exit_cycle_cnt = 0;
+			i_cl->perf_cl_peak = 0;
+			spin_unlock_irqrestore(&i_cl->perf_cl_peak_lock, flags);
+		}
+	}
+
+	wake_up_process(notify_thread);
+	return 0;
+}
+
+static int get_workload_detect(char *buf, const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u", workload_detect);
+}
+
+static const struct kernel_param_ops param_ops_workload_detect = {
+	.set = set_workload_detect,
+	.get = get_workload_detect,
+};
+device_param_cb(workload_detect, &param_ops_workload_detect, NULL, 0644);
+
+
+static int set_input_evts_with_hi_slvt_detect(const char *buf,
+					const struct kernel_param *kp)
+{
+
+	unsigned int val;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	if (val == use_input_evts_with_hi_slvt_detect)
+		return 0;
+
+	use_input_evts_with_hi_slvt_detect = val;
+
+	if ((workload_detect & PERF_CL_PEAK_DETECT) &&
+		!input_events_handler_registered &&
+		use_input_evts_with_hi_slvt_detect) {
+		if (register_input_handler() == -ENOMEM) {
+			use_input_evts_with_hi_slvt_detect = 0;
+			return -ENOMEM;
+		}
+	} else if ((workload_detect & PERF_CL_PEAK_DETECT) &&
+				input_events_handler_registered &&
+				!use_input_evts_with_hi_slvt_detect) {
+		unregister_input_handler();
+	}
+	return 0;
+}
+
+static int get_input_evts_with_hi_slvt_detect(char *buf,
+					const struct kernel_param *kp)
+{
+	return snprintf(buf, PAGE_SIZE, "%u",
+			use_input_evts_with_hi_slvt_detect);
+}
+
+static const struct kernel_param_ops param_ops_ip_evts_with_hi_slvt_detect = {
+	.set = set_input_evts_with_hi_slvt_detect,
+	.get = get_input_evts_with_hi_slvt_detect,
+};
+device_param_cb(input_evts_with_hi_slvt_detect,
+	&param_ops_ip_evts_with_hi_slvt_detect, NULL, 0644);
+
+static struct kobject *mode_kobj;
+
+static ssize_t show_aggr_mode(struct kobject *kobj,
+					struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", aggr_mode);
+}
+static struct kobj_attribute aggr_mode_attr =
+__ATTR(aggr_mode, 0444, show_aggr_mode, NULL);
+
+static ssize_t show_aggr_iobusy(struct kobject *kobj,
+					struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", aggr_iobusy);
+}
+static struct kobj_attribute aggr_iobusy_attr =
+__ATTR(aggr_iobusy, 0444, show_aggr_iobusy, NULL);
+
+static struct attribute *attrs[] = {
+	&aggr_mode_attr.attr,
+	&aggr_iobusy_attr.attr,
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+
+static bool check_notify_status(void)
+{
+	int i;
+	struct cluster *cl;
+	bool any_change = false;
+	unsigned long flags;
+
+
+	for (i = 0; i < num_clusters; i++) {
+		cl = managed_clusters[i];
+		spin_lock_irqsave(&cl->iowait_lock, flags);
+		if (!any_change)
+			any_change = cl->io_change;
+		cl->io_change = false;
+		spin_unlock_irqrestore(&cl->iowait_lock, flags);
+
+		spin_lock_irqsave(&cl->mode_lock, flags);
+		if (!any_change)
+			any_change = cl->mode_change;
+		cl->mode_change = false;
+		spin_unlock_irqrestore(&cl->mode_lock, flags);
+
+		spin_lock_irqsave(&cl->perf_cl_peak_lock, flags);
+		if (!any_change)
+			any_change = cl->perf_cl_detect_state_change;
+		cl->perf_cl_detect_state_change = false;
+		spin_unlock_irqrestore(&cl->perf_cl_peak_lock, flags);
+	}
+
+	return any_change;
+}
+
+static int notify_userspace(void *data)
+{
+	unsigned int i, io, cpu_mode, perf_cl_peak_mode;
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (!check_notify_status()) {
+			schedule();
+
+			if (kthread_should_stop())
+				break;
+		}
+		set_current_state(TASK_RUNNING);
+
+		io = 0;
+		cpu_mode = 0;
+		perf_cl_peak_mode = 0;
+		for (i = 0; i < num_clusters; i++) {
+			io |= managed_clusters[i]->cur_io_busy;
+			cpu_mode |= managed_clusters[i]->mode;
+			perf_cl_peak_mode |= managed_clusters[i]->perf_cl_peak;
+		}
+		if (io != aggr_iobusy) {
+			aggr_iobusy = io;
+			sysfs_notify(mode_kobj, NULL, "aggr_iobusy");
+			pr_debug("msm_perf: Notifying IO: %u\n", aggr_iobusy);
+		}
+		if ((aggr_mode & (SINGLE | MULTI)) != cpu_mode) {
+			aggr_mode &= ~(SINGLE | MULTI);
+			aggr_mode |= cpu_mode;
+			sysfs_notify(mode_kobj, NULL, "aggr_mode");
+			pr_debug("msm_perf: Notifying CPU mode:%u\n",
+								aggr_mode);
+		}
+		if ((aggr_mode & PERF_CL_PEAK) != perf_cl_peak_mode) {
+			aggr_mode &= ~(PERF_CL_PEAK);
+			aggr_mode |= perf_cl_peak_mode;
+			sysfs_notify(mode_kobj, NULL, "aggr_mode");
+			pr_debug("msm_perf: Notifying Gaming mode:%u\n",
+								aggr_mode);
+		}
+	}
+
+	return 0;
+}
+
+static void check_cluster_iowait(struct cluster *cl, u64 now)
+{
+	struct load_stats *pcpu_st;
+	unsigned int i;
+	unsigned long flags;
+	unsigned int temp_iobusy;
+	u64 max_iowait = 0;
+
+	spin_lock_irqsave(&cl->iowait_lock, flags);
+
+	if (((now - cl->last_io_check_ts)
+		< (cl->timer_rate - LAST_IO_CHECK_TOL)) ||
+		!(workload_detect & IO_DETECT)) {
+		spin_unlock_irqrestore(&cl->iowait_lock, flags);
+		return;
+	}
+
+	temp_iobusy = cl->cur_io_busy;
+	for_each_cpu(i, cl->cpus) {
+		pcpu_st = &per_cpu(cpu_load_stats, i);
+		if ((now - pcpu_st->last_wallclock)
+			> (cl->timer_rate + LAST_UPDATE_TOL))
+			continue;
+		if (max_iowait < pcpu_st->last_iopercent)
+			max_iowait = pcpu_st->last_iopercent;
+	}
+
+	if (!cl->cur_io_busy) {
+		if (max_iowait > iowait_ceiling_pct) {
+			cl->iowait_enter_cycle_cnt++;
+			if (cl->iowait_enter_cycle_cnt >= io_enter_cycles) {
+				cl->cur_io_busy = 1;
+				cl->iowait_enter_cycle_cnt = 0;
+			}
+		} else {
+			cl->iowait_enter_cycle_cnt = 0;
+		}
+	} else {
+		if (max_iowait < iowait_floor_pct) {
+			cl->iowait_exit_cycle_cnt++;
+			if (cl->iowait_exit_cycle_cnt >= io_exit_cycles) {
+				cl->cur_io_busy = 0;
+				cl->iowait_exit_cycle_cnt = 0;
+			}
+		} else {
+			cl->iowait_exit_cycle_cnt = 0;
+		}
+	}
+
+	cl->last_io_check_ts = now;
+	trace_track_iowait(cpumask_first(cl->cpus), cl->iowait_enter_cycle_cnt,
+			cl->iowait_exit_cycle_cnt, cl->cur_io_busy, max_iowait);
+
+	if (temp_iobusy != cl->cur_io_busy) {
+		cl->io_change = true;
+		pr_debug("msm_perf: IO changed to %u\n", cl->cur_io_busy);
+	}
+
+	spin_unlock_irqrestore(&cl->iowait_lock, flags);
+	if (cl->io_change)
+		wake_up_process(notify_thread);
+}
+
+static void disable_timer(struct cluster *cl)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cl->timer_lock, flags);
+
+	if (del_timer(&cl->mode_exit_timer)) {
+		trace_single_cycle_exit_timer_stop(cpumask_first(cl->cpus),
+			cl->single_enter_cycles, cl->single_enter_cycle_cnt,
+			cl->single_exit_cycles, cl->single_exit_cycle_cnt,
+			cl->multi_enter_cycles, cl->multi_enter_cycle_cnt,
+			cl->multi_exit_cycles, cl->multi_exit_cycle_cnt,
+			cl->timer_rate, cl->mode);
+	}
+
+	spin_unlock_irqrestore(&cl->timer_lock, flags);
+}
+
+static void start_timer(struct cluster *cl)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cl->timer_lock, flags);
+	if ((cl->mode & SINGLE) && !timer_pending(&cl->mode_exit_timer)) {
+		/* Set timer for the Cluster since there is none pending */
+		cl->mode_exit_timer.expires = get_jiffies_64() +
+		usecs_to_jiffies(cl->single_exit_cycles * cl->timer_rate);
+		cl->mode_exit_timer.data = cpumask_first(cl->cpus);
+		add_timer(&cl->mode_exit_timer);
+		trace_single_cycle_exit_timer_start(cpumask_first(cl->cpus),
+			cl->single_enter_cycles, cl->single_enter_cycle_cnt,
+			cl->single_exit_cycles, cl->single_exit_cycle_cnt,
+			cl->multi_enter_cycles, cl->multi_enter_cycle_cnt,
+			cl->multi_exit_cycles, cl->multi_exit_cycle_cnt,
+			cl->timer_rate, cl->mode);
+	}
+	spin_unlock_irqrestore(&cl->timer_lock, flags);
+}
+
+static void disable_perf_cl_peak_timer(struct cluster *cl)
+{
+
+	if (del_timer(&cl->perf_cl_peak_mode_exit_timer)) {
+		trace_perf_cl_peak_exit_timer_stop(cpumask_first(cl->cpus),
+			cl->perf_cl_peak_enter_cycles,
+			cl->perf_cl_peak_enter_cycle_cnt,
+			cl->perf_cl_peak_exit_cycles,
+			cl->perf_cl_peak_exit_cycle_cnt,
+			cl->timer_rate, cl->mode);
+	}
+
+}
+
+static void start_perf_cl_peak_timer(struct cluster *cl)
+{
+	if ((cl->mode & PERF_CL_PEAK) &&
+		!timer_pending(&cl->perf_cl_peak_mode_exit_timer)) {
+		/* Set timer for the Cluster since there is none pending */
+		cl->perf_cl_peak_mode_exit_timer.expires = get_jiffies_64() +
+		usecs_to_jiffies(cl->perf_cl_peak_exit_cycles * cl->timer_rate);
+		cl->perf_cl_peak_mode_exit_timer.data = cpumask_first(cl->cpus);
+		add_timer(&cl->perf_cl_peak_mode_exit_timer);
+		trace_perf_cl_peak_exit_timer_start(cpumask_first(cl->cpus),
+			cl->perf_cl_peak_enter_cycles,
+			cl->perf_cl_peak_enter_cycle_cnt,
+			cl->perf_cl_peak_exit_cycles,
+			cl->perf_cl_peak_exit_cycle_cnt,
+			cl->timer_rate, cl->mode);
+	}
+}
+
+static const struct input_device_id msm_perf_input_ids[] = {
+
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = {BIT_MASK(EV_ABS)},
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+			BIT_MASK(ABS_MT_POSITION_X) |
+			BIT_MASK(ABS_MT_POSITION_Y)},
+	},
+
+	{},
+};
+
+static void msm_perf_input_event_handler(struct input_handle *handle,
+					unsigned int type,
+					unsigned int code,
+					int value)
+{
+	if (type != EV_ABS)
+		return;
+
+	switch (code) {
+
+	case ABS_MT_POSITION_X:
+		ip_evts->evt_x_cnt++;
+		break;
+	case ABS_MT_POSITION_Y:
+		ip_evts->evt_y_cnt++;
+		break;
+
+	case ABS_MT_DISTANCE:
+		break;
+
+	case ABS_MT_PRESSURE:
+		break;
+
+	default:
+		break;
+
+	}
+}
+static int msm_perf_input_connect(struct input_handler *handler,
+				struct input_dev *dev,
+				const struct input_device_id *id)
+{
+	int rc;
+	struct input_handle *handle;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = handler->name;
+
+	rc = input_register_handle(handle);
+	if (rc) {
+		pr_err("Failed to register handle\n");
+		goto error;
+	}
+
+	rc = input_open_device(handle);
+	if (rc) {
+		pr_err("Failed to open device\n");
+		goto error_unregister;
+	}
+	return 0;
+
+error_unregister:
+	input_unregister_handle(handle);
+error:
+	kfree(handle);
+	return rc;
+}
+
+static void  msm_perf_input_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+static void unregister_input_handler(void)
+{
+	if (handler != NULL) {
+		input_unregister_handler(handler);
+		input_events_handler_registered = false;
+	}
+}
+
+static int register_input_handler(void)
+{
+	int rc;
+
+	if (handler == NULL) {
+		handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+		if (!handler)
+			return -ENOMEM;
+		handler->event = msm_perf_input_event_handler;
+		handler->connect = msm_perf_input_connect;
+		handler->disconnect = msm_perf_input_disconnect;
+		handler->name = "msm_perf";
+		handler->id_table = msm_perf_input_ids;
+		handler->private = NULL;
+	}
+	rc = input_register_handler(handler);
+	if (rc) {
+		pr_err("Unable to register the input handler for msm_perf\n");
+		kfree(handler);
+	} else {
+		input_events_handler_registered = true;
+	}
+	return rc;
+}
+
+static void check_perf_cl_peak_load(struct cluster *cl, u64 now)
+{
+	struct load_stats *pcpu_st;
+	unsigned int i, ret_mode, max_load = 0;
+	unsigned int total_load = 0, cpu_cnt = 0;
+	unsigned long flags;
+	bool cpu_of_cluster_zero = true;
+
+	spin_lock_irqsave(&cl->perf_cl_peak_lock, flags);
+
+	cpu_of_cluster_zero = cpumask_first(cl->cpus) ? false:true;
+	/*
+	 * If delta of last load to now < than timer_rate - ld check tolerance
+	 * which is 18ms OR if perf_cl_peak detection not set
+	 * OR the first CPU of Cluster is CPU 0 (LVT)
+	 * then return do nothing. We are interested only in SLVT
+	 */
+	if (((now - cl->last_perf_cl_check_ts)
+		< (cl->timer_rate - LAST_LD_CHECK_TOL)) ||
+		!(workload_detect & PERF_CL_PEAK_DETECT) ||
+		cpu_of_cluster_zero) {
+		spin_unlock_irqrestore(&cl->perf_cl_peak_lock, flags);
+		return;
+	}
+	for_each_cpu(i, cl->cpus) {
+		pcpu_st = &per_cpu(cpu_load_stats, i);
+		if ((now - pcpu_st->last_wallclock)
+			> (cl->timer_rate + LAST_UPDATE_TOL))
+			continue;
+		if (pcpu_st->cpu_load > max_load)
+			max_load = pcpu_st->cpu_load;
+		 /*
+		  * Save the frequency for the cpu of the cluster
+		  * This frequency is the most recent/current
+		  * as obtained due to a transition
+		  * notifier callback.
+		  */
+		cl->current_freq = pcpu_st->freq;
+	}
+	ret_mode = cl->perf_cl_peak;
+
+	if (!(cl->perf_cl_peak & PERF_CL_PEAK)) {
+		if (max_load >= cl->perf_cl_peak_enter_load &&
+			freq_greater_than_threshold(cl,
+				cpumask_first(cl->cpus))) {
+			/*
+			 * Reset the event count  for the first cycle
+			 * of perf_cl_peak we detect
+			 */
+			if (!cl->perf_cl_peak_enter_cycle_cnt)
+				ip_evts->evt_x_cnt = ip_evts->evt_y_cnt = 0;
+			cl->perf_cl_peak_enter_cycle_cnt++;
+			if (cl->perf_cl_peak_enter_cycle_cnt >=
+				cl->perf_cl_peak_enter_cycles) {
+				if (input_events_greater_than_threshold())
+					ret_mode |= PERF_CL_PEAK;
+				cl->perf_cl_peak_enter_cycle_cnt = 0;
+			}
+		} else {
+			cl->perf_cl_peak_enter_cycle_cnt = 0;
+			/* Reset the event count */
+			ip_evts->evt_x_cnt = ip_evts->evt_y_cnt = 0;
+		}
+	} else {
+		if (max_load >= cl->perf_cl_peak_exit_load &&
+			freq_greater_than_threshold(cl,
+				cpumask_first(cl->cpus))) {
+			cl->perf_cl_peak_exit_cycle_cnt = 0;
+			disable_perf_cl_peak_timer(cl);
+		} else {
+			start_perf_cl_peak_timer(cl);
+			cl->perf_cl_peak_exit_cycle_cnt++;
+			if (cl->perf_cl_peak_exit_cycle_cnt
+				>= cl->perf_cl_peak_exit_cycles) {
+				ret_mode &= ~PERF_CL_PEAK;
+				cl->perf_cl_peak_exit_cycle_cnt = 0;
+				disable_perf_cl_peak_timer(cl);
+			}
+		}
+	}
+
+	cl->last_perf_cl_check_ts = now;
+	if (ret_mode != cl->perf_cl_peak) {
+		pr_debug("msm_perf: Mode changed to %u\n", ret_mode);
+		cl->perf_cl_peak = ret_mode;
+		cl->perf_cl_detect_state_change = true;
+	}
+
+	trace_cpu_mode_detect(cpumask_first(cl->cpus), max_load,
+		cl->single_enter_cycle_cnt, cl->single_exit_cycle_cnt,
+		total_load, cl->multi_enter_cycle_cnt,
+		cl->multi_exit_cycle_cnt, cl->perf_cl_peak_enter_cycle_cnt,
+		cl->perf_cl_peak_exit_cycle_cnt, cl->mode, cpu_cnt);
+
+	spin_unlock_irqrestore(&cl->perf_cl_peak_lock, flags);
+
+	if (cl->perf_cl_detect_state_change)
+		wake_up_process(notify_thread);
+
+}
+
+static void check_cpu_load(struct cluster *cl, u64 now)
+{
+	struct load_stats *pcpu_st;
+	unsigned int i, max_load = 0, total_load = 0, ret_mode, cpu_cnt = 0;
+	unsigned int total_load_ceil, total_load_floor;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cl->mode_lock, flags);
+
+	if (((now - cl->last_mode_check_ts)
+		< (cl->timer_rate - LAST_LD_CHECK_TOL)) ||
+		!(workload_detect & MODE_DETECT)) {
+		spin_unlock_irqrestore(&cl->mode_lock, flags);
+		return;
+	}
+
+	for_each_cpu(i, cl->cpus) {
+		pcpu_st = &per_cpu(cpu_load_stats, i);
+		if ((now - pcpu_st->last_wallclock)
+			> (cl->timer_rate + LAST_UPDATE_TOL))
+			continue;
+		if (pcpu_st->cpu_load > max_load)
+			max_load = pcpu_st->cpu_load;
+		total_load += pcpu_st->cpu_load;
+		cpu_cnt++;
+	}
+
+	if (cpu_cnt > 1) {
+		total_load_ceil = cl->pcpu_multi_enter_load * cpu_cnt;
+		total_load_floor = cl->pcpu_multi_exit_load * cpu_cnt;
+	} else {
+		total_load_ceil = UINT_MAX;
+		total_load_floor = UINT_MAX;
+	}
+
+	ret_mode = cl->mode;
+	if (!(cl->mode & SINGLE)) {
+		if (max_load >= cl->single_enter_load) {
+			cl->single_enter_cycle_cnt++;
+			if (cl->single_enter_cycle_cnt
+				>= cl->single_enter_cycles) {
+				ret_mode |= SINGLE;
+				cl->single_enter_cycle_cnt = 0;
+			}
+		} else {
+			cl->single_enter_cycle_cnt = 0;
+		}
+	} else {
+		if (max_load < cl->single_exit_load) {
+			start_timer(cl);
+			cl->single_exit_cycle_cnt++;
+			if (cl->single_exit_cycle_cnt
+				>= cl->single_exit_cycles) {
+				ret_mode &= ~SINGLE;
+				cl->single_exit_cycle_cnt = 0;
+				disable_timer(cl);
+			}
+		} else {
+			cl->single_exit_cycle_cnt = 0;
+			disable_timer(cl);
+		}
+	}
+
+	if (!(cl->mode & MULTI)) {
+		if (total_load >= total_load_ceil) {
+			cl->multi_enter_cycle_cnt++;
+			if (cl->multi_enter_cycle_cnt
+				>= cl->multi_enter_cycles) {
+				ret_mode |= MULTI;
+				cl->multi_enter_cycle_cnt = 0;
+			}
+		} else {
+			cl->multi_enter_cycle_cnt = 0;
+		}
+	} else {
+		if (total_load < total_load_floor) {
+			cl->multi_exit_cycle_cnt++;
+			if (cl->multi_exit_cycle_cnt
+				>= cl->multi_exit_cycles) {
+				ret_mode &= ~MULTI;
+				cl->multi_exit_cycle_cnt = 0;
+			}
+		} else {
+			cl->multi_exit_cycle_cnt = 0;
+		}
+	}
+
+	cl->last_mode_check_ts = now;
+
+	if (ret_mode != cl->mode) {
+		cl->mode = ret_mode;
+		cl->mode_change = true;
+		pr_debug("msm_perf: Mode changed to %u\n", ret_mode);
+	}
+
+	trace_cpu_mode_detect(cpumask_first(cl->cpus), max_load,
+		cl->single_enter_cycle_cnt, cl->single_exit_cycle_cnt,
+		total_load, cl->multi_enter_cycle_cnt,
+		cl->multi_exit_cycle_cnt, cl->perf_cl_peak_enter_cycle_cnt,
+		cl->perf_cl_peak_exit_cycle_cnt, cl->mode, cpu_cnt);
+
+	spin_unlock_irqrestore(&cl->mode_lock, flags);
+
+	if (cl->mode_change)
+		wake_up_process(notify_thread);
+}
+
+static void check_workload_stats(unsigned int cpu, unsigned int rate, u64 now)
+{
+	struct cluster *cl = NULL;
+	unsigned int i;
+
+	for (i = 0; i < num_clusters; i++) {
+		if (cpumask_test_cpu(cpu, managed_clusters[i]->cpus)) {
+			cl = managed_clusters[i];
+			break;
+		}
+	}
+	if (cl == NULL)
+		return;
+
+	cl->timer_rate = rate;
+	check_cluster_iowait(cl, now);
+	check_cpu_load(cl, now);
+	check_perf_cl_peak_load(cl, now);
+}
+
+static int perf_govinfo_notify(struct notifier_block *nb, unsigned long val,
+								void *data)
+{
+	struct cpufreq_govinfo *gov_info = data;
+	unsigned int cpu = gov_info->cpu;
+	struct load_stats *cpu_st = &per_cpu(cpu_load_stats, cpu);
+	u64 now, cur_iowait, time_diff, iowait_diff;
+
+	if (!clusters_inited || !workload_detect)
+		return NOTIFY_OK;
+
+	cur_iowait = get_cpu_iowait_time_us(cpu, &now);
+	if (cur_iowait >= cpu_st->last_iowait)
+		iowait_diff = cur_iowait - cpu_st->last_iowait;
+	else
+		iowait_diff = 0;
+
+	if (now > cpu_st->last_wallclock)
+		time_diff = now - cpu_st->last_wallclock;
+	else
+		return NOTIFY_OK;
+
+	if (iowait_diff <= time_diff) {
+		iowait_diff *= 100;
+		cpu_st->last_iopercent = div64_u64(iowait_diff, time_diff);
+	} else {
+		cpu_st->last_iopercent = 100;
+	}
+
+	cpu_st->last_wallclock = now;
+	cpu_st->last_iowait = cur_iowait;
+	cpu_st->cpu_load = gov_info->load;
+
+	 /*
+	  * Avoid deadlock in case governor notifier ran in the context
+	  * of notify_work thread
+	  */
+	if (current == notify_thread)
+		return NOTIFY_OK;
+
+	check_workload_stats(cpu, gov_info->sampling_rate_us, now);
+
+	return NOTIFY_OK;
+}
+
+static int perf_cputrans_notify(struct notifier_block *nb, unsigned long val,
+								void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	unsigned int cpu = freq->cpu;
+	unsigned long flags;
+	unsigned int i;
+	struct cluster *cl = NULL;
+	struct load_stats *cpu_st = &per_cpu(cpu_load_stats, cpu);
+
+	if (!clusters_inited || !workload_detect)
+		return NOTIFY_OK;
+	for (i = 0; i < num_clusters; i++) {
+		if (cpumask_test_cpu(cpu, managed_clusters[i]->cpus)) {
+			cl = managed_clusters[i];
+			break;
+		}
+	}
+	if (cl == NULL)
+		return NOTIFY_OK;
+	if (val == CPUFREQ_POSTCHANGE) {
+		spin_lock_irqsave(&cl->perf_cl_peak_lock, flags);
+		cpu_st->freq = freq->new;
+		spin_unlock_irqrestore(&cl->perf_cl_peak_lock, flags);
+	}
+
+	/*
+	 * Avoid deadlock in case governor notifier ran in the context
+	 * of notify_work thread
+	 */
+	if (current == notify_thread)
+		return NOTIFY_OK;
+	return NOTIFY_OK;
+}
+
+static struct notifier_block perf_govinfo_nb = {
+	.notifier_call = perf_govinfo_notify,
+};
+
+static struct notifier_block perf_cputransitions_nb = {
+	.notifier_call = perf_cputrans_notify,
+};
+
+static void single_mod_exit_timer(unsigned long data)
+{
+	int i;
+	struct cluster *i_cl = NULL;
+	unsigned long flags;
+
+	if (!clusters_inited)
+		return;
+
+	for (i = 0; i < num_clusters; i++) {
+		if (cpumask_test_cpu(data,
+			managed_clusters[i]->cpus)) {
+			i_cl = managed_clusters[i];
+			break;
+		}
+	}
+
+	if (i_cl == NULL)
+		return;
+
+	spin_lock_irqsave(&i_cl->mode_lock, flags);
+	if (i_cl->mode & SINGLE) {
+		/* Disable SINGLE mode and exit since the timer expired */
+		i_cl->mode = i_cl->mode & ~SINGLE;
+		i_cl->single_enter_cycle_cnt = 0;
+		i_cl->single_exit_cycle_cnt = 0;
+		trace_single_mode_timeout(cpumask_first(i_cl->cpus),
+			i_cl->single_enter_cycles, i_cl->single_enter_cycle_cnt,
+			i_cl->single_exit_cycles, i_cl->single_exit_cycle_cnt,
+			i_cl->multi_enter_cycles, i_cl->multi_enter_cycle_cnt,
+			i_cl->multi_exit_cycles, i_cl->multi_exit_cycle_cnt,
+			i_cl->timer_rate, i_cl->mode);
+	}
+	spin_unlock_irqrestore(&i_cl->mode_lock, flags);
+	wake_up_process(notify_thread);
+}
+
+static void perf_cl_peak_mod_exit_timer(unsigned long data)
+{
+	int i;
+	struct cluster *i_cl = NULL;
+	unsigned long flags;
+
+	if (!clusters_inited)
+		return;
+
+	for (i = 0; i < num_clusters; i++) {
+		if (cpumask_test_cpu(data,
+			managed_clusters[i]->cpus)) {
+			i_cl = managed_clusters[i];
+			break;
+		}
+	}
+
+	if (i_cl == NULL)
+		return;
+
+	spin_lock_irqsave(&i_cl->perf_cl_peak_lock, flags);
+	if (i_cl->perf_cl_peak & PERF_CL_PEAK) {
+		/* Disable PERF_CL_PEAK mode and exit since the timer expired */
+		i_cl->perf_cl_peak = i_cl->perf_cl_peak & ~PERF_CL_PEAK;
+		i_cl->perf_cl_peak_enter_cycle_cnt = 0;
+		i_cl->perf_cl_peak_exit_cycle_cnt = 0;
+	}
+	spin_unlock_irqrestore(&i_cl->perf_cl_peak_lock, flags);
+	wake_up_process(notify_thread);
+}
+
+/* CPU Hotplug */
+static struct kobject *events_kobj;
+
+static ssize_t show_cpu_hotplug(struct kobject *kobj,
+					struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "\n");
+}
+static struct kobj_attribute cpu_hotplug_attr =
+__ATTR(cpu_hotplug, 0444, show_cpu_hotplug, NULL);
+
+static struct attribute *events_attrs[] = {
+	&cpu_hotplug_attr.attr,
+	NULL,
+};
+
+static struct attribute_group events_attr_group = {
+	.attrs = events_attrs,
+};
+/*******************************sysfs ends************************************/
+
+static unsigned int num_online_managed(struct cpumask *mask)
+{
+	struct cpumask tmp_mask;
+
+	cpumask_clear(&tmp_mask);
+	cpumask_and(&tmp_mask, mask, cpu_online_mask);
+
+	return cpumask_weight(&tmp_mask);
+}
+
+static int perf_adjust_notify(struct notifier_block *nb, unsigned long val,
+							void *data)
+{
+	struct cpufreq_policy *policy = data;
+	unsigned int cpu = policy->cpu;
+	struct cpu_status *cpu_st = &per_cpu(cpu_stats, cpu);
+	unsigned int min = cpu_st->min, max = cpu_st->max;
+
+
+	if (val != CPUFREQ_ADJUST)
+		return NOTIFY_OK;
+
+	pr_debug("msm_perf: CPU%u policy before: %u:%u kHz\n", cpu,
+						policy->min, policy->max);
+	pr_debug("msm_perf: CPU%u seting min:max %u:%u kHz\n", cpu, min, max);
+
+	cpufreq_verify_within_limits(policy, min, max);
+
+	pr_debug("msm_perf: CPU%u policy after: %u:%u kHz\n", cpu,
+						policy->min, policy->max);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block perf_cpufreq_nb = {
+	.notifier_call = perf_adjust_notify,
+};
+
+static void hotplug_notify(int action)
+{
+	unsigned long flags;
+
+	if (!events_group.init_success)
+		return;
+
+	if ((action == CPU_ONLINE) || (action == CPU_DEAD)) {
+		spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
+		events_group.cpu_hotplug = true;
+		spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
+		wake_up_process(events_notify_thread);
+	}
+}
+
+static int events_notify_userspace(void *data)
+{
+	unsigned long flags;
+	bool notify_change;
+
+	while (1) {
+
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
+
+		if (!events_group.cpu_hotplug) {
+			spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock),
+									flags);
+
+			schedule();
+			if (kthread_should_stop())
+				break;
+			spin_lock_irqsave(&(events_group.cpu_hotplug_lock),
+									flags);
+		}
+
+		set_current_state(TASK_RUNNING);
+		notify_change = events_group.cpu_hotplug;
+		events_group.cpu_hotplug = false;
+		spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
+
+		if (notify_change)
+			sysfs_notify(events_kobj, NULL, "cpu_hotplug");
+	}
+
+	return 0;
+}
+
+/*
+ * Attempt to offline CPUs based on their power cost.
+ * CPUs with higher power costs are offlined first.
+ */
+static int __ref rm_high_pwr_cost_cpus(struct cluster *cl)
+{
+	unsigned int cpu, i;
+	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+	struct cpu_pstate_pwr *costs;
+	unsigned int *pcpu_pwr;
+	unsigned int max_cost_cpu, max_cost;
+	int any_cpu = -1;
+
+	if (!per_cpu_info)
+		return -EAGAIN;
+
+	for_each_cpu(cpu, cl->cpus) {
+		costs = per_cpu_info[cpu].ptable;
+		if (!costs || !costs[0].freq)
+			continue;
+
+		i = 1;
+		while (costs[i].freq)
+			i++;
+
+		pcpu_pwr = &per_cpu(cpu_power_cost, cpu);
+		*pcpu_pwr = costs[i - 1].power;
+		any_cpu = (int)cpu;
+		pr_debug("msm_perf: CPU:%d Power:%u\n", cpu, *pcpu_pwr);
+	}
+
+	if (any_cpu < 0)
+		return -EAGAIN;
+
+	for (i = 0; i < cpumask_weight(cl->cpus); i++) {
+		max_cost = 0;
+		max_cost_cpu = cpumask_first(cl->cpus);
+
+		for_each_cpu(cpu, cl->cpus) {
+			pcpu_pwr = &per_cpu(cpu_power_cost, cpu);
+			if (max_cost < *pcpu_pwr) {
+				max_cost = *pcpu_pwr;
+				max_cost_cpu = cpu;
+			}
+		}
+
+		if (!cpu_online(max_cost_cpu))
+			goto end;
+
+		pr_debug("msm_perf: Offlining CPU%d Power:%d\n", max_cost_cpu,
+								max_cost);
+		cpumask_set_cpu(max_cost_cpu, cl->offlined_cpus);
+		lock_device_hotplug();
+		if (device_offline(get_cpu_device(max_cost_cpu))) {
+			cpumask_clear_cpu(max_cost_cpu, cl->offlined_cpus);
+			pr_debug("msm_perf: Offlining CPU%d failed\n",
+								max_cost_cpu);
+		}
+		unlock_device_hotplug();
+
+end:
+		pcpu_pwr = &per_cpu(cpu_power_cost, max_cost_cpu);
+		*pcpu_pwr = 0;
+		if (num_online_managed(cl->cpus) <= cl->max_cpu_request)
+			break;
+	}
+
+	if (num_online_managed(cl->cpus) > cl->max_cpu_request)
+		return -EAGAIN;
+	else
+		return 0;
+}
+
+/*
+ * try_hotplug tries to online/offline cores based on the current requirement.
+ * It loops through the currently managed CPUs and tries to online/offline
+ * them until the max_cpu_request criteria is met.
+ */
+static void __ref try_hotplug(struct cluster *data)
+{
+	unsigned int i;
+
+	if (!clusters_inited)
+		return;
+
+	pr_debug("msm_perf: Trying hotplug...%d:%d\n",
+			num_online_managed(data->cpus),	num_online_cpus());
+
+	mutex_lock(&managed_cpus_lock);
+	if (num_online_managed(data->cpus) > data->max_cpu_request) {
+		if (!rm_high_pwr_cost_cpus(data)) {
+			mutex_unlock(&managed_cpus_lock);
+			return;
+		}
+
+		/*
+		 * If power aware offlining fails due to power cost info
+		 * being unavaiable fall back to original implementation
+		 */
+		for (i = num_present_cpus() - 1; i >= 0 &&
+						i < num_present_cpus(); i--) {
+			if (!cpumask_test_cpu(i, data->cpus) ||	!cpu_online(i))
+				continue;
+
+			pr_debug("msm_perf: Offlining CPU%d\n", i);
+			cpumask_set_cpu(i, data->offlined_cpus);
+			lock_device_hotplug();
+			if (device_offline(get_cpu_device(i))) {
+				cpumask_clear_cpu(i, data->offlined_cpus);
+				pr_debug("msm_perf: Offlining CPU%d failed\n",
+									i);
+				unlock_device_hotplug();
+				continue;
+			}
+			unlock_device_hotplug();
+			if (num_online_managed(data->cpus) <=
+							data->max_cpu_request)
+				break;
+		}
+	} else {
+		for_each_cpu(i, data->cpus) {
+			if (cpu_online(i))
+				continue;
+			pr_debug("msm_perf: Onlining CPU%d\n", i);
+			lock_device_hotplug();
+			if (device_online(get_cpu_device(i))) {
+				pr_debug("msm_perf: Onlining CPU%d failed\n",
+									i);
+				unlock_device_hotplug();
+				continue;
+			}
+			unlock_device_hotplug();
+			cpumask_clear_cpu(i, data->offlined_cpus);
+			if (num_online_managed(data->cpus) >=
+							data->max_cpu_request)
+				break;
+		}
+	}
+	mutex_unlock(&managed_cpus_lock);
+}
+
+static void __ref release_cluster_control(struct cpumask *off_cpus)
+{
+	int cpu;
+
+	for_each_cpu(cpu, off_cpus) {
+		pr_debug("msm_perf: Release CPU %d\n", cpu);
+		lock_device_hotplug();
+		if (!device_online(get_cpu_device(cpu)))
+			cpumask_clear_cpu(cpu, off_cpus);
+		unlock_device_hotplug();
+	}
+}
+
+/* Work to evaluate current online CPU status and hotplug CPUs as per need */
+static void check_cluster_status(struct work_struct *work)
+{
+	int i;
+	struct cluster *i_cl;
+
+	for (i = 0; i < num_clusters; i++) {
+		i_cl = managed_clusters[i];
+
+		if (cpumask_empty(i_cl->cpus))
+			continue;
+
+		if (i_cl->max_cpu_request < 0) {
+			if (!cpumask_empty(i_cl->offlined_cpus))
+				release_cluster_control(i_cl->offlined_cpus);
+			continue;
+		}
+
+		if (num_online_managed(i_cl->cpus) !=
+					i_cl->max_cpu_request)
+			try_hotplug(i_cl);
+	}
+}
+
+static int __ref msm_performance_cpu_callback(struct notifier_block *nfb,
+		unsigned long action, void *hcpu)
+{
+	uint32_t cpu = (uintptr_t)hcpu;
+	unsigned int i;
+	struct cluster *i_cl = NULL;
+
+	hotplug_notify(action);
+
+	if (!clusters_inited)
+		return NOTIFY_OK;
+
+	for (i = 0; i < num_clusters; i++) {
+		if (managed_clusters[i]->cpus == NULL)
+			return NOTIFY_OK;
+		if (cpumask_test_cpu(cpu, managed_clusters[i]->cpus)) {
+			i_cl = managed_clusters[i];
+			break;
+		}
+	}
+
+	if (i_cl == NULL)
+		return NOTIFY_OK;
+
+	if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) {
+		/*
+		 * Prevent onlining of a managed CPU if max_cpu criteria is
+		 * already satisfied
+		 */
+		if (i_cl->offlined_cpus == NULL)
+			return NOTIFY_OK;
+		if (i_cl->max_cpu_request <=
+					num_online_managed(i_cl->cpus)) {
+			pr_debug("msm_perf: Prevent CPU%d onlining\n", cpu);
+			cpumask_set_cpu(cpu, i_cl->offlined_cpus);
+			return NOTIFY_BAD;
+		}
+		cpumask_clear_cpu(cpu, i_cl->offlined_cpus);
+
+	} else if (action == CPU_DEAD) {
+		if (i_cl->offlined_cpus == NULL)
+			return NOTIFY_OK;
+		if (cpumask_test_cpu(cpu, i_cl->offlined_cpus))
+			return NOTIFY_OK;
+		/*
+		 * Schedule a re-evaluation to check if any more CPUs can be
+		 * brought online to meet the max_cpu_request requirement. This
+		 * work is delayed to account for CPU hotplug latencies
+		 */
+		if (schedule_delayed_work(&evaluate_hotplug_work, 0)) {
+			trace_reevaluate_hotplug(cpumask_bits(i_cl->cpus)[0],
+							i_cl->max_cpu_request);
+			pr_debug("msm_perf: Re-evaluation scheduled %d\n", cpu);
+		} else {
+			pr_debug("msm_perf: Work scheduling failed %d\n", cpu);
+		}
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata msm_performance_cpu_notifier = {
+	.notifier_call = msm_performance_cpu_callback,
+};
+
+static int init_cluster_control(void)
+{
+	unsigned int i;
+	int ret = 0;
+
+	struct kobject *module_kobj;
+
+	managed_clusters = kcalloc(num_clusters, sizeof(struct cluster *),
+								GFP_KERNEL);
+	if (!managed_clusters)
+		return -ENOMEM;
+	for (i = 0; i < num_clusters; i++) {
+		managed_clusters[i] = kcalloc(1, sizeof(struct cluster),
+								GFP_KERNEL);
+		if (!managed_clusters[i]) {
+			ret = -ENOMEM;
+			goto error;
+		}
+		if (!alloc_cpumask_var(&managed_clusters[i]->cpus,
+		     GFP_KERNEL)) {
+			ret = -ENOMEM;
+			goto error;
+		}
+		if (!alloc_cpumask_var(&managed_clusters[i]->offlined_cpus,
+		     GFP_KERNEL)) {
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		managed_clusters[i]->max_cpu_request = -1;
+		managed_clusters[i]->single_enter_load = DEF_SINGLE_ENT;
+		managed_clusters[i]->single_exit_load = DEF_SINGLE_EX;
+		managed_clusters[i]->single_enter_cycles
+						= DEF_SINGLE_ENTER_CYCLE;
+		managed_clusters[i]->single_exit_cycles
+						= DEF_SINGLE_EXIT_CYCLE;
+		managed_clusters[i]->pcpu_multi_enter_load
+						= DEF_PCPU_MULTI_ENT;
+		managed_clusters[i]->pcpu_multi_exit_load = DEF_PCPU_MULTI_EX;
+		managed_clusters[i]->multi_enter_cycles = DEF_MULTI_ENTER_CYCLE;
+		managed_clusters[i]->multi_exit_cycles = DEF_MULTI_EXIT_CYCLE;
+		managed_clusters[i]->perf_cl_peak_enter_load =
+						DEF_PERF_CL_PEAK_ENT;
+		managed_clusters[i]->perf_cl_peak_exit_load =
+						DEF_PERF_CL_PEAK_EX;
+		managed_clusters[i]->perf_cl_peak_enter_cycles =
+						DEF_PERF_CL_PEAK_ENTER_CYCLE;
+		managed_clusters[i]->perf_cl_peak_exit_cycles =
+						DEF_PERF_CL_PEAK_EXIT_CYCLE;
+
+		/* Initialize trigger threshold */
+		thr.perf_cl_trigger_threshold = CLUSTER_1_THRESHOLD_FREQ;
+		thr.pwr_cl_trigger_threshold = CLUSTER_0_THRESHOLD_FREQ;
+		thr.ip_evt_threshold = INPUT_EVENT_CNT_THRESHOLD;
+		spin_lock_init(&(managed_clusters[i]->iowait_lock));
+		spin_lock_init(&(managed_clusters[i]->mode_lock));
+		spin_lock_init(&(managed_clusters[i]->timer_lock));
+		spin_lock_init(&(managed_clusters[i]->perf_cl_peak_lock));
+		init_timer(&managed_clusters[i]->mode_exit_timer);
+		managed_clusters[i]->mode_exit_timer.function =
+			single_mod_exit_timer;
+		init_timer(&managed_clusters[i]->perf_cl_peak_mode_exit_timer);
+		managed_clusters[i]->perf_cl_peak_mode_exit_timer.function =
+			perf_cl_peak_mod_exit_timer;
+	}
+
+	INIT_DELAYED_WORK(&evaluate_hotplug_work, check_cluster_status);
+	mutex_init(&managed_cpus_lock);
+
+	ip_evts = kcalloc(1, sizeof(struct input_events), GFP_KERNEL);
+	if (!ip_evts) {
+		ret = -ENOMEM;
+		goto error;
+	}
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("msm_perf: Couldn't find module kobject\n");
+		ret = -ENOENT;
+		goto error;
+	}
+	mode_kobj = kobject_create_and_add("workload_modes", module_kobj);
+	if (!mode_kobj) {
+		pr_err("msm_perf: Failed to add mode_kobj\n");
+		ret = -ENOMEM;
+		kobject_put(module_kobj);
+		goto error;
+	}
+	ret = sysfs_create_group(mode_kobj, &attr_group);
+	if (ret) {
+		pr_err("msm_perf: Failed to create sysfs\n");
+		kobject_put(module_kobj);
+		kobject_put(mode_kobj);
+		goto error;
+	}
+	notify_thread = kthread_run(notify_userspace, NULL, "wrkld_notify");
+
+	clusters_inited = true;
+
+	return 0;
+
+error:
+	for (i = 0; i < num_clusters; i++) {
+		if (!managed_clusters[i])
+			break;
+		if (managed_clusters[i]->offlined_cpus)
+			free_cpumask_var(managed_clusters[i]->offlined_cpus);
+		if (managed_clusters[i]->cpus)
+			free_cpumask_var(managed_clusters[i]->cpus);
+		kfree(managed_clusters[i]);
+	}
+	kfree(managed_clusters);
+	return ret;
+}
+
+static int init_events_group(void)
+{
+	int ret;
+	struct kobject *module_kobj;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("msm_perf: Couldn't find module kobject\n");
+		return -ENOENT;
+	}
+
+	events_kobj = kobject_create_and_add("events", module_kobj);
+	if (!events_kobj) {
+		pr_err("msm_perf: Failed to add events_kobj\n");
+		return -ENOMEM;
+	}
+
+	ret = sysfs_create_group(events_kobj, &events_attr_group);
+	if (ret) {
+		pr_err("msm_perf: Failed to create sysfs\n");
+		return ret;
+	}
+
+	spin_lock_init(&(events_group.cpu_hotplug_lock));
+	events_notify_thread = kthread_run(events_notify_userspace,
+					NULL, "msm_perf:events_notify");
+	if (IS_ERR(events_notify_thread))
+		return PTR_ERR(events_notify_thread);
+
+	events_group.init_success = true;
+
+	return 0;
+}
+
+static int __init msm_performance_init(void)
+{
+	unsigned int cpu;
+
+	cpufreq_register_notifier(&perf_cpufreq_nb, CPUFREQ_POLICY_NOTIFIER);
+	cpufreq_register_notifier(&perf_govinfo_nb, CPUFREQ_GOVINFO_NOTIFIER);
+	cpufreq_register_notifier(&perf_cputransitions_nb,
+					CPUFREQ_TRANSITION_NOTIFIER);
+
+	for_each_present_cpu(cpu)
+		per_cpu(cpu_stats, cpu).max = UINT_MAX;
+
+	register_cpu_notifier(&msm_performance_cpu_notifier);
+
+	init_events_group();
+
+	return 0;
+}
+late_initcall(msm_performance_init);
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index c5ba279..b71ce6b 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -649,6 +649,7 @@
 		}
 		drv->dp_size = dp_fw->size;
 		drv->mba_dp_size += drv->dp_size;
+		drv->mba_dp_size = ALIGN(drv->mba_dp_size, SZ_4K);
 	}
 
 	mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
diff --git a/drivers/soc/qcom/qdsp6v2/Makefile b/drivers/soc/qcom/qdsp6v2/Makefile
index b2cf03c..9fdd63a 100644
--- a/drivers/soc/qcom/qdsp6v2/Makefile
+++ b/drivers/soc/qcom/qdsp6v2/Makefile
@@ -1,5 +1,3 @@
-obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o
-obj-$(CONFIG_MSM_QDSP6_APRV3) += apr.o apr_v3.o apr_tal.o
 obj-$(CONFIG_MSM_QDSP6_APRV2_GLINK) += apr.o apr_v2.o apr_tal_glink.o
 obj-$(CONFIG_MSM_QDSP6_APRV3_GLINK) += apr.o apr_v3.o apr_tal_glink.o
 obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += msm_audio_ion.o
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal.c b/drivers/soc/qcom/qdsp6v2/apr_tal.c
deleted file mode 100644
index 5c296f66..0000000
--- a/drivers/soc/qcom/qdsp6v2/apr_tal.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/* Copyright (c) 2010-2011, 2013-2014, 2016-2017 The Linux Foundation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/debugfs.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <soc/qcom/smd.h>
-#include <linux/qdsp6v2/apr_tal.h>
-
-static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = {
-	{
-		"apr_audio_svc",
-		"apr_voice_svc",
-	},
-	{
-		"apr_audio_svc",
-		"apr_voice_svc",
-	},
-};
-
-struct apr_svc_ch_dev apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX];
-
-int __apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
-			struct apr_pkt_priv *pkt_priv, int len)
-{
-	int w_len;
-	unsigned long flags;
-
-	spin_lock_irqsave(&apr_ch->w_lock, flags);
-	if (smd_write_avail(apr_ch->ch) < len) {
-		spin_unlock_irqrestore(&apr_ch->w_lock, flags);
-		return -EAGAIN;
-	}
-
-	w_len = smd_write(apr_ch->ch, data, len);
-	spin_unlock_irqrestore(&apr_ch->w_lock, flags);
-
-	pr_debug("apr_tal:w_len = %d\n", w_len);
-
-	if (w_len != len) {
-		pr_err("apr_tal: Error in write\n");
-		return -ENETRESET;
-	}
-	return w_len;
-}
-
-int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
-			struct apr_pkt_priv *pkt_priv, int len)
-{
-	int rc = 0, retries = 0;
-
-	if (!apr_ch->ch)
-		return -EINVAL;
-
-	do {
-		if (rc == -EAGAIN)
-			udelay(50);
-
-		rc = __apr_tal_write(apr_ch, data, pkt_priv, len);
-	} while (rc == -EAGAIN && retries++ < 300);
-
-	if (rc == -EAGAIN)
-		pr_err("apr_tal: TIMEOUT for write\n");
-
-	return rc;
-}
-
-static void apr_tal_notify(void *priv, unsigned int event)
-{
-	struct apr_svc_ch_dev *apr_ch = priv;
-	int len, r_len, sz;
-	int pkt_cnt = 0;
-	unsigned long flags;
-
-	pr_debug("event = %d\n", event);
-	switch (event) {
-	case SMD_EVENT_DATA:
-		pkt_cnt = 0;
-		spin_lock_irqsave(&apr_ch->lock, flags);
-check_pending:
-		len = smd_read_avail(apr_ch->ch);
-		if (len < 0) {
-			pr_err("apr_tal: Invalid Read Event :%d\n", len);
-			spin_unlock_irqrestore(&apr_ch->lock, flags);
-			return;
-		}
-		sz = smd_cur_packet_size(apr_ch->ch);
-		if (sz < 0) {
-			pr_debug("pkt size is zero\n");
-			spin_unlock_irqrestore(&apr_ch->lock, flags);
-			return;
-		}
-		if (!len && !sz && !pkt_cnt)
-			goto check_write_avail;
-		if (!len) {
-			pr_debug("len = %d pkt_cnt = %d\n", len, pkt_cnt);
-			spin_unlock_irqrestore(&apr_ch->lock, flags);
-			return;
-		}
-		r_len = smd_read_from_cb(apr_ch->ch, apr_ch->data, len);
-		if (len != r_len) {
-			pr_err("apr_tal: Invalid Read\n");
-			spin_unlock_irqrestore(&apr_ch->lock, flags);
-			return;
-		}
-		pkt_cnt++;
-		pr_debug("%d %d %d\n", len, sz, pkt_cnt);
-		if (apr_ch->func)
-			apr_ch->func(apr_ch->data, r_len, apr_ch->priv);
-		goto check_pending;
-check_write_avail:
-		if (smd_write_avail(apr_ch->ch))
-			wake_up(&apr_ch->wait);
-		spin_unlock_irqrestore(&apr_ch->lock, flags);
-		break;
-	case SMD_EVENT_OPEN:
-		pr_debug("apr_tal: SMD_EVENT_OPEN\n");
-		apr_ch->smd_state = 1;
-		wake_up(&apr_ch->wait);
-		break;
-	case SMD_EVENT_CLOSE:
-		pr_debug("apr_tal: SMD_EVENT_CLOSE\n");
-		break;
-	}
-}
-
-int apr_tal_rx_intents_config(struct apr_svc_ch_dev *apr_ch,
-			int num_of_intents, uint32_t size)
-{
-	/* Rx intents configuration is required for Glink
-	 * but not for SMD. No-op for this function.
-	 */
-	return 0;
-}
-
-struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest,
-				uint32_t dl, apr_svc_cb_fn func, void *priv)
-{
-	int rc;
-
-	if ((clnt >= APR_CLIENT_MAX) || (dest >= APR_DEST_MAX) ||
-						(dl >= APR_DL_MAX)) {
-		pr_err("apr_tal: Invalid params\n");
-		return NULL;
-	}
-
-	if (apr_svc_ch[dl][dest][clnt].ch) {
-		pr_err("apr_tal: This channel alreday openend\n");
-		return NULL;
-	}
-
-	mutex_lock(&apr_svc_ch[dl][dest][clnt].m_lock);
-	if (!apr_svc_ch[dl][dest][clnt].dest_state) {
-		rc = wait_event_timeout(apr_svc_ch[dl][dest][clnt].dest,
-			apr_svc_ch[dl][dest][clnt].dest_state,
-				msecs_to_jiffies(APR_OPEN_TIMEOUT_MS));
-		if (rc == 0) {
-			pr_err("apr_tal:open timeout\n");
-			mutex_unlock(&apr_svc_ch[dl][dest][clnt].m_lock);
-			return NULL;
-		}
-		pr_debug("apr_tal:Wakeup done\n");
-		apr_svc_ch[dl][dest][clnt].dest_state = 0;
-	}
-	rc = smd_named_open_on_edge(svc_names[dest][clnt], dest,
-			&apr_svc_ch[dl][dest][clnt].ch,
-			&apr_svc_ch[dl][dest][clnt],
-			apr_tal_notify);
-	if (rc < 0) {
-		pr_err("apr_tal: smd_open failed %s\n",
-					svc_names[dest][clnt]);
-		mutex_unlock(&apr_svc_ch[dl][dest][clnt].m_lock);
-		return NULL;
-	}
-	rc = wait_event_timeout(apr_svc_ch[dl][dest][clnt].wait,
-		(apr_svc_ch[dl][dest][clnt].smd_state == 1), 5 * HZ);
-	if (rc == 0) {
-		pr_err("apr_tal:TIMEOUT for OPEN event\n");
-		mutex_unlock(&apr_svc_ch[dl][dest][clnt].m_lock);
-		apr_tal_close(&apr_svc_ch[dl][dest][clnt]);
-		return NULL;
-	}
-
-	smd_disable_read_intr(apr_svc_ch[dl][dest][clnt].ch);
-
-	if (!apr_svc_ch[dl][dest][clnt].dest_state) {
-		apr_svc_ch[dl][dest][clnt].dest_state = 1;
-		pr_debug("apr_tal:Waiting for apr svc init\n");
-		msleep(200);
-		pr_debug("apr_tal:apr svc init done\n");
-	}
-	apr_svc_ch[dl][dest][clnt].smd_state = 0;
-
-	apr_svc_ch[dl][dest][clnt].func = func;
-	apr_svc_ch[dl][dest][clnt].priv = priv;
-	mutex_unlock(&apr_svc_ch[dl][dest][clnt].m_lock);
-
-	return &apr_svc_ch[dl][dest][clnt];
-}
-
-int apr_tal_close(struct apr_svc_ch_dev *apr_ch)
-{
-	int r;
-
-	if (!apr_ch->ch)
-		return -EINVAL;
-
-	mutex_lock(&apr_ch->m_lock);
-	r = smd_close(apr_ch->ch);
-	apr_ch->ch = NULL;
-	apr_ch->func = NULL;
-	apr_ch->priv = NULL;
-	mutex_unlock(&apr_ch->m_lock);
-	return r;
-}
-
-static int apr_smd_probe(struct platform_device *pdev)
-{
-	int dest;
-	int clnt;
-
-	if (pdev->id == APR_DEST_MODEM) {
-		pr_info("apr_tal:Modem Is Up\n");
-		dest = APR_DEST_MODEM;
-		if (!strcmp(pdev->name, "apr_audio_svc"))
-			clnt = APR_CLIENT_AUDIO;
-		else
-			clnt = APR_CLIENT_VOICE;
-		apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1;
-		wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest);
-	} else if (pdev->id == APR_DEST_QDSP6) {
-		pr_info("apr_tal:Q6 Is Up\n");
-		dest = APR_DEST_QDSP6;
-		clnt = APR_CLIENT_AUDIO;
-		apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1;
-		wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest);
-	} else
-		pr_err("apr_tal:Invalid Dest Id: %d\n", pdev->id);
-
-	return 0;
-}
-
-static struct platform_driver apr_q6_driver = {
-	.probe = apr_smd_probe,
-	.driver = {
-		.name = "apr_audio_svc",
-		.owner = THIS_MODULE,
-	},
-};
-
-static struct platform_driver apr_modem_driver = {
-	.probe = apr_smd_probe,
-	.driver = {
-		.name = "apr_voice_svc",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init apr_tal_init(void)
-{
-	int i, j, k;
-
-	for (i = 0; i < APR_DL_MAX; i++)
-		for (j = 0; j < APR_DEST_MAX; j++)
-			for (k = 0; k < APR_CLIENT_MAX; k++) {
-				init_waitqueue_head(&apr_svc_ch[i][j][k].wait);
-				init_waitqueue_head(&apr_svc_ch[i][j][k].dest);
-				spin_lock_init(&apr_svc_ch[i][j][k].lock);
-				spin_lock_init(&apr_svc_ch[i][j][k].w_lock);
-				mutex_init(&apr_svc_ch[i][j][k].m_lock);
-			}
-	platform_driver_register(&apr_q6_driver);
-	platform_driver_register(&apr_modem_driver);
-	return 0;
-}
-device_initcall(apr_tal_init);
diff --git a/drivers/soc/qcom/rpm_stats.c b/drivers/soc/qcom/rpm_stats.c
index 15d8b1b..a3fe7b3 100644
--- a/drivers/soc/qcom/rpm_stats.c
+++ b/drivers/soc/qcom/rpm_stats.c
@@ -11,7 +11,6 @@
  *
  */
 
-#include <linux/debugfs.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
@@ -150,131 +149,40 @@
 	return length;
 }
 
-static inline unsigned long  msm_rpmstats_read_register(void __iomem *regbase,
-		int index, int offset)
-{
-	return  readl_relaxed(regbase + index * 12 + (offset + 1) * 4);
-}
-
-static ssize_t msm_rpmstats_file_read(struct file *file, char __user *bufu,
-				  size_t count, loff_t *ppos)
-{
-	struct msm_rpmstats_private_data *prvdata;
-
-	prvdata = file->private_data;
-	if (!prvdata)
-		return -EINVAL;
-
-	if (!bufu || count == 0)
-		return -EINVAL;
-
-	if ((*ppos >= prvdata->len) &&
-		(prvdata->read_idx < prvdata->num_records)) {
-		prvdata->len = msm_rpmstats_copy_stats(prvdata);
-		*ppos = 0;
-	}
-
-	return simple_read_from_buffer(bufu, count, ppos,
-			prvdata->buf, prvdata->len);
-}
-
-static int msm_rpmstats_file_open(struct inode *inode, struct file *file)
-{
-	struct msm_rpmstats_private_data *prvdata;
-	struct msm_rpmstats_platform_data *pdata;
-
-	pdata = inode->i_private;
-
-	file->private_data = kzalloc(sizeof(*prvdata), GFP_KERNEL);
-	if (!file->private_data)
-		return -ENOMEM;
-
-	prvdata = file->private_data;
-
-	prvdata->reg_base = ioremap_nocache(pdata->phys_addr_base,
-					pdata->phys_size);
-	if (!prvdata->reg_base) {
-		kfree(file->private_data);
-		prvdata = NULL;
-		pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
-			__func__, &pdata->phys_addr_base,
-			pdata->phys_size);
-		return -EBUSY;
-	}
-
-	prvdata->read_idx = prvdata->len = 0;
-	prvdata->platform_data = pdata;
-	prvdata->num_records = RPM_STATS_NUM_REC;
-
-	return 0;
-}
-
-static int msm_rpmstats_file_close(struct inode *inode, struct file *file)
-{
-	struct msm_rpmstats_private_data *private = file->private_data;
-
-	if (private->reg_base)
-		iounmap(private->reg_base);
-	kfree(file->private_data);
-
-	return 0;
-}
-
-static const struct file_operations msm_rpmstats_fops = {
-	.owner    = THIS_MODULE,
-	.open     = msm_rpmstats_file_open,
-	.read     = msm_rpmstats_file_read,
-	.release  = msm_rpmstats_file_close,
-	.llseek   = no_llseek,
-};
-
 static ssize_t rpmstats_show(struct kobject *kobj,
 			struct kobj_attribute *attr, char *buf)
 {
-	struct msm_rpmstats_private_data *prvdata = NULL;
+	struct msm_rpmstats_private_data prvdata;
 	struct msm_rpmstats_platform_data *pdata = NULL;
 
 	pdata = GET_PDATA_OF_ATTR(attr);
 
-	prvdata =
-		kmalloc(sizeof(*prvdata), GFP_KERNEL);
-	if (!prvdata)
-		return -ENOMEM;
-
-	prvdata->reg_base = ioremap_nocache(pdata->phys_addr_base,
+	prvdata.reg_base = ioremap_nocache(pdata->phys_addr_base,
 					pdata->phys_size);
-	if (!prvdata->reg_base) {
-		kfree(prvdata);
+	if (!prvdata.reg_base) {
 		pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
 			__func__, &pdata->phys_addr_base,
 			pdata->phys_size);
 		return -EBUSY;
 	}
 
-	prvdata->read_idx = prvdata->len = 0;
-	prvdata->platform_data = pdata;
-	prvdata->num_records = RPM_STATS_NUM_REC;
+	prvdata.read_idx = prvdata.len = 0;
+	prvdata.platform_data = pdata;
+	prvdata.num_records = RPM_STATS_NUM_REC;
 
-	if (prvdata->read_idx < prvdata->num_records)
-		prvdata->len = msm_rpmstats_copy_stats(prvdata);
+	if (prvdata.read_idx < prvdata.num_records)
+		prvdata.len = msm_rpmstats_copy_stats(&prvdata);
 
-	return snprintf(buf, prvdata->len, prvdata->buf);
+	return snprintf(buf, prvdata.len, prvdata.buf);
 }
 
 static int msm_rpmstats_create_sysfs(struct msm_rpmstats_platform_data *pd)
 {
-	struct kobject *module_kobj = NULL;
 	struct kobject *rpmstats_kobj = NULL;
 	struct msm_rpmstats_kobj_attr *rpms_ka = NULL;
 	int ret = 0;
 
-	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
-	if (!module_kobj) {
-		pr_err("%s: Cannot find module_kset\n", __func__);
-		return -ENODEV;
-	}
-
-	rpmstats_kobj = kobject_create_and_add("rpmstats", module_kobj);
+	rpmstats_kobj = kobject_create_and_add("system_sleep", power_kobj);
 	if (!rpmstats_kobj) {
 		pr_err("%s: Cannot create rpmstats kobject\n", __func__);
 		ret = -ENOMEM;
@@ -303,7 +211,6 @@
 
 static int msm_rpmstats_probe(struct platform_device *pdev)
 {
-	struct dentry *dent = NULL;
 	struct msm_rpmstats_platform_data *pdata;
 	struct msm_rpmstats_platform_data *pd;
 	struct resource *res = NULL, *offset = NULL;
@@ -339,28 +246,8 @@
 	if (pdev->dev.platform_data)
 		pd = pdev->dev.platform_data;
 
-	dent = debugfs_create_file("rpm_stats", 0444, NULL,
-					pdata, &msm_rpmstats_fops);
-	if (!dent) {
-		pr_err("%s: ERROR rpm_stats debugfs_create_file	fail\n",
-				__func__);
-		return -ENOMEM;
-	}
-
 	msm_rpmstats_create_sysfs(pdata);
 
-	platform_set_drvdata(pdev, dent);
-	return 0;
-}
-
-static int msm_rpmstats_remove(struct platform_device *pdev)
-{
-	struct dentry *dent;
-
-	dent = platform_get_drvdata(pdev);
-	debugfs_remove(dent);
-	platform_set_drvdata(pdev, NULL);
-
 	return 0;
 }
 
@@ -371,7 +258,6 @@
 
 static struct platform_driver msm_rpmstats_driver = {
 	.probe = msm_rpmstats_probe,
-	.remove = msm_rpmstats_remove,
 	.driver = {
 		.name = "msm_rpm_stat",
 		.owner = THIS_MODULE,
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 81d0bb0..5ca0fe5 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -153,10 +153,9 @@
 	}
 
 	/* Signal the blocking thread we are done */
-	if (waitq) {
-		atomic_dec(wc);
-		wake_up(waitq);
-	}
+	if (wc && atomic_dec_and_test(wc))
+		if (waitq)
+			wake_up(waitq);
 }
 
 static struct rpmh_req *__find_req(struct rpmh_client *rc, u32 addr)
@@ -463,6 +462,9 @@
 	unsigned long flags;
 	struct rpmh_mbox *rpm;
 
+	if (IS_ERR_OR_NULL(rc) || !cmd || !n)
+		return -EINVAL;
+
 	if (rpmh_standalone)
 		return 0;
 
@@ -478,7 +480,7 @@
 	while (n[count++])
 		;
 	count--;
-	if (count >= RPMH_MAX_REQ_IN_BATCH)
+	if (!count || count > RPMH_MAX_REQ_IN_BATCH)
 		return -EINVAL;
 
 	if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 6a54048..82718c8 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -31,7 +31,6 @@
 
 #define SERVREG_LOC_SERVICE_INSTANCE_ID			1
 
-#define QMI_RESP_BIT_SHIFT(x)				(x << 16)
 #define QMI_SERVREG_LOC_SERVER_INITIAL_TIMEOUT		2000
 #define QMI_SERVREG_LOC_SERVER_TIMEOUT			2000
 #define INITIAL_TIMEOUT					100000
@@ -199,9 +198,9 @@
 	}
 
 	/* Check the response */
-	if (QMI_RESP_BIT_SHIFT(resp->resp.result) != QMI_RESULT_SUCCESS_V01) {
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
 		pr_err("QMI request for client %s failed 0x%x\n",
-			pd->client_name, QMI_RESP_BIT_SHIFT(resp->resp.error));
+			pd->client_name, resp->resp.error);
 		return -EREMOTEIO;
 	}
 	return rc;
@@ -220,7 +219,7 @@
 		return -EAGAIN;
 	}
 
-	req = kmalloc(sizeof(
+	req = kzalloc(sizeof(
 		struct qmi_servreg_loc_get_domain_list_req_msg_v01),
 		GFP_KERNEL);
 	if (!req) {
@@ -228,7 +227,7 @@
 		rc = -ENOMEM;
 		goto out;
 	}
-	resp = kmalloc(sizeof(
+	resp = kzalloc(sizeof(
 		struct qmi_servreg_loc_get_domain_list_resp_msg_v01),
 		GFP_KERNEL);
 	if (!resp) {
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index 68592fe..62e2384 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -30,7 +30,6 @@
 #include <soc/qcom/service-notifier.h>
 #include "service-notifier-private.h"
 
-#define QMI_RESP_BIT_SHIFT(x)			(x << 16)
 #define SERVREG_NOTIF_NAME_LENGTH	QMI_SERVREG_NOTIF_NAME_LENGTH_V01
 #define SERVREG_NOTIF_SERVICE_ID	SERVREG_NOTIF_SERVICE_ID_V01
 #define SERVREG_NOTIF_SERVICE_VERS	SERVREG_NOTIF_SERVICE_VERS_V01
@@ -225,9 +224,8 @@
 	}
 
 	/* Check the response */
-	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01)
-		pr_err("QMI request failed 0x%x\n",
-			QMI_RESP_BIT_SHIFT(resp.resp.error));
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01)
+		pr_err("QMI request failed 0x%x\n", resp.resp.error);
 	pr_info("Indication ACKed for transid %d, service %s, instance %d!\n",
 		data->ind_msg.transaction_id, data->ind_msg.service_path,
 		data->instance_id);
@@ -318,9 +316,8 @@
 	}
 
 	/* Check the response */
-	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
-		pr_err("QMI request failed 0x%x\n",
-					QMI_RESP_BIT_SHIFT(resp.resp.error));
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("QMI request failed 0x%x\n", resp.resp.error);
 		return -EREMOTEIO;
 	}
 
@@ -646,15 +643,15 @@
 	}
 
 	/* Check response if PDR is disabled */
-	if (QMI_RESP_BIT_SHIFT(resp.resp.result) == QMI_ERR_DISABLED_V01) {
-		pr_err("PD restart is disabled 0x%x\n",
-					QMI_RESP_BIT_SHIFT(resp.resp.error));
+	if (resp.resp.result == QMI_RESULT_FAILURE_V01 &&
+				resp.resp.error == QMI_ERR_DISABLED_V01) {
+		pr_err("PD restart is disabled 0x%x\n", resp.resp.error);
 		return -EOPNOTSUPP;
 	}
 	/* Check the response for other error case*/
-	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
 		pr_err("QMI request for PD restart failed 0x%x\n",
-					QMI_RESP_BIT_SHIFT(resp.resp.error));
+						resp.resp.error);
 		return -EREMOTEIO;
 	}
 
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index db12900..8cc77c1 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -16,6 +16,7 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_platform.h>
 #include <linux/pm_runtime.h>
 #include <linux/qcom-geni-se.h>
 #include <linux/spi/spi.h>
@@ -78,6 +79,8 @@
 #define TIMESTAMP_AFTER		(3)
 #define POST_CMD_DELAY		(4)
 
+#define SPI_CORE2X_VOTE		(10000)
+
 struct spi_geni_master {
 	struct se_geni_rsc spi_rsc;
 	resource_size_t phys_addr;
@@ -96,6 +99,7 @@
 	unsigned int rx_rem_bytes;
 	struct spi_transfer *cur_xfer;
 	struct completion xfer_done;
+	struct device *wrapper_dev;
 };
 
 static struct spi_master *get_spi_master(struct device *dev)
@@ -243,8 +247,8 @@
 			dev_err(mas->dev, "Invalid proto %d\n", proto);
 			return -ENXIO;
 		}
-		geni_se_init(mas->base, FIFO_MODE, 0x0,
-						(mas->tx_fifo_depth - 2));
+		geni_se_init(mas->base, 0x0, (mas->tx_fifo_depth - 2));
+		geni_se_select_mode(mas->base, FIFO_MODE);
 		mas->tx_fifo_depth = get_tx_fifo_depth(mas->base);
 		mas->rx_fifo_depth = get_rx_fifo_depth(mas->base);
 		mas->tx_fifo_width = get_tx_fifo_width(mas->base);
@@ -476,6 +480,8 @@
 	struct spi_geni_master *geni_mas;
 	struct se_geni_rsc *rsc;
 	struct resource *res;
+	struct platform_device *wrapper_pdev;
+	struct device_node *wrapper_ph_node;
 
 	spi = spi_alloc_master(&pdev->dev, sizeof(struct spi_geni_master));
 	if (!spi) {
@@ -489,6 +495,29 @@
 	rsc = &geni_mas->spi_rsc;
 	geni_mas->dev = &pdev->dev;
 	spi->dev.of_node = pdev->dev.of_node;
+	wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
+					"qcom,wrapper-core", 0);
+	if (IS_ERR_OR_NULL(wrapper_ph_node)) {
+		ret = PTR_ERR(wrapper_ph_node);
+		dev_err(&pdev->dev, "No wrapper core defined\n");
+		goto spi_geni_probe_err;
+	}
+	wrapper_pdev = of_find_device_by_node(wrapper_ph_node);
+	of_node_put(wrapper_ph_node);
+	if (IS_ERR_OR_NULL(wrapper_pdev)) {
+		ret = PTR_ERR(wrapper_pdev);
+		dev_err(&pdev->dev, "Cannot retrieve wrapper device\n");
+		goto spi_geni_probe_err;
+	}
+	geni_mas->wrapper_dev = &wrapper_pdev->dev;
+	geni_mas->spi_rsc.wrapper_dev = &wrapper_pdev->dev;
+	ret = geni_se_resources_init(rsc, SPI_CORE2X_VOTE,
+				     (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
+	if (ret) {
+		dev_err(&pdev->dev, "Error geni_se_resources_init\n");
+		goto spi_geni_probe_err;
+	}
+
 	rsc->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
 	if (IS_ERR_OR_NULL(rsc->geni_pinctrl)) {
 		dev_err(&pdev->dev, "No pinctrl config specified!\n");
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index c3e2988..1055649 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -3160,7 +3160,7 @@
 };
 
 /*-------------------------------------------------------------------------*/
-static void __init nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
+static void nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
 {
 	int	i;
 
@@ -3191,7 +3191,7 @@
 
 /*-------------------------------------------------------------------------*/
 /* platform_driver */
-static int __init nbu2ss_drv_contest_init(
+static int nbu2ss_drv_contest_init(
 	struct platform_device *pdev,
 	struct nbu2ss_udc *udc)
 {
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 23fda9d..13ec24d 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -924,27 +924,29 @@
 }
 LPROC_SEQ_FOPS(ll_unstable_stats);
 
-static ssize_t root_squash_show(struct kobject *kobj, struct attribute *attr,
-				char *buf)
+static int ll_root_squash_seq_show(struct seq_file *m, void *v)
 {
-	struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
-					      ll_kobj);
+	struct super_block *sb = m->private;
+	struct ll_sb_info *sbi = ll_s2sbi(sb);
 	struct root_squash_info *squash = &sbi->ll_squash;
 
-	return sprintf(buf, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
+	seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
+	return 0;
 }
 
-static ssize_t root_squash_store(struct kobject *kobj, struct attribute *attr,
-				 const char *buffer, size_t count)
+static ssize_t ll_root_squash_seq_write(struct file *file,
+					const char __user *buffer,
+					size_t count, loff_t *off)
 {
-	struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
-					      ll_kobj);
+	struct seq_file *m = file->private_data;
+	struct super_block *sb = m->private;
+	struct ll_sb_info *sbi = ll_s2sbi(sb);
 	struct root_squash_info *squash = &sbi->ll_squash;
 
 	return lprocfs_wr_root_squash(buffer, count, squash,
-				      ll_get_fsname(sbi->ll_sb, NULL, 0));
+				      ll_get_fsname(sb, NULL, 0));
 }
-LUSTRE_RW_ATTR(root_squash);
+LPROC_SEQ_FOPS(ll_root_squash);
 
 static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
 {
@@ -997,6 +999,8 @@
 	{ "statahead_stats",  &ll_statahead_stats_fops, NULL, 0 },
 	{ "unstable_stats",   &ll_unstable_stats_fops, NULL },
 	{ "sbi_flags",	      &ll_sbi_flags_fops, NULL, 0 },
+	{ .name =       "root_squash",
+	  .fops =       &ll_root_squash_fops			},
 	{ .name =		"nosquash_nids",
 	  .fops =		&ll_nosquash_nids_fops		},
 	{ NULL }
@@ -1027,7 +1031,6 @@
 	&lustre_attr_max_easize.attr,
 	&lustre_attr_default_easize.attr,
 	&lustre_attr_xattr_cache.attr,
-	&lustre_attr_root_squash.attr,
 	NULL,
 };
 
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 825a63a..2e075a6 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -232,7 +232,7 @@
 	struct p80211_hdr_a3 *hdr;
 
 	hdr = (struct p80211_hdr_a3 *)skb->data;
-	if (p80211_rx_typedrop(wlandev, hdr->fc))
+	if (p80211_rx_typedrop(wlandev, le16_to_cpu(hdr->fc)))
 		return CONV_TO_ETHER_SKIPPED;
 
 	/* perform mcast filtering: allow my local address through but reject
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index f0be6e9..984241f9 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -35,11 +35,6 @@
 
 #include "thermal_core.h"
 
-#define for_each_tz_sibling(pos, head)                                         \
-	for (pos = list_first_entry((head), struct __thermal_zone, list);\
-		&(pos->list) != (head);                                  \
-		pos = list_next_entry(pos, list))                        \
-
 /***   Private data structures to represent thermal device tree data ***/
 /**
  * struct __thermal_bind_param - a match between trip and cooling device
@@ -436,7 +431,7 @@
 	enum thermal_trip_type type = 0;
 
 	head = &data->senps->first_tz;
-	for_each_tz_sibling(data, head) {
+	list_for_each_entry(data, head, list) {
 		zone = data->tzd;
 		for (trip = 0; trip < data->ntrips; trip++) {
 			of_thermal_get_trip_type(zone, trip, &type);
@@ -499,7 +494,7 @@
 	struct list_head *head;
 
 	head = &data->senps->first_tz;
-	for_each_tz_sibling(data, head) {
+	list_for_each_entry(data, head, list) {
 		zone = data->tzd;
 		thermal_zone_device_update(zone, THERMAL_EVENT_UNSPECIFIED);
 	}
@@ -684,7 +679,7 @@
 void thermal_zone_of_sensor_unregister(struct device *dev,
 				       struct thermal_zone_device *tzd)
 {
-	struct __thermal_zone *tz;
+	struct __thermal_zone *tz, *next;
 	struct thermal_zone_device *pos;
 	struct list_head *head;
 
@@ -698,7 +693,7 @@
 		return;
 
 	head = &tz->senps->first_tz;
-	for_each_tz_sibling(tz, head) {
+	list_for_each_entry_safe(tz, next, head, list) {
 		pos = tz->tzd;
 		mutex_lock(&pos->lock);
 		pos->ops->get_temp = NULL;
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
index f8a7945..65dc2df 100644
--- a/drivers/thermal/qcom/msm_lmh_dcvs.c
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -108,6 +108,7 @@
 };
 
 LIST_HEAD(lmh_dcvs_hw_list);
+DEFINE_MUTEX(lmh_dcvs_list_access);
 
 static int limits_dcvs_get_freq_limits(uint32_t cpu, unsigned long *max_freq,
 					 unsigned long *min_freq)
@@ -308,10 +309,14 @@
 {
 	struct limits_dcvs_hw *hw;
 
+	mutex_lock(&lmh_dcvs_list_access);
 	list_for_each_entry(hw, &lmh_dcvs_hw_list, list) {
-		if (cpumask_test_cpu(cpu, &hw->core_map))
+		if (cpumask_test_cpu(cpu, &hw->core_map)) {
+			mutex_unlock(&lmh_dcvs_list_access);
 			return hw;
+		}
 	}
+	mutex_unlock(&lmh_dcvs_list_access);
 
 	return NULL;
 }
@@ -388,6 +393,42 @@
 	.floor_limit = lmh_set_min_limit,
 };
 
+static int limits_cpu_online(unsigned int online_cpu)
+{
+	struct limits_dcvs_hw *hw = get_dcvsh_hw_from_cpu(online_cpu);
+	unsigned int idx = 0, cpu = 0;
+
+	if (!hw)
+		return 0;
+
+	for_each_cpu(cpu, &hw->core_map) {
+		cpumask_t cpu_mask  = { CPU_BITS_NONE };
+
+		if (cpu != online_cpu) {
+			idx++;
+			continue;
+		} else if (hw->cdev_data[idx].cdev) {
+			return 0;
+		}
+		cpumask_set_cpu(cpu, &cpu_mask);
+		hw->cdev_data[idx].max_freq = U32_MAX;
+		hw->cdev_data[idx].min_freq = 0;
+		hw->cdev_data[idx].cdev = cpufreq_platform_cooling_register(
+						&cpu_mask, &cd_ops);
+		if (IS_ERR_OR_NULL(hw->cdev_data[idx].cdev)) {
+			pr_err("CPU:%u cooling device register error:%ld\n",
+				cpu, PTR_ERR(hw->cdev_data[idx].cdev));
+			hw->cdev_data[idx].cdev = NULL;
+		} else {
+			pr_debug("CPU:%u cooling device registered\n", cpu);
+		}
+		break;
+
+	}
+
+	return 0;
+}
+
 static int limits_dcvs_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -398,7 +439,7 @@
 	struct device_node *cpu_node, *lmh_node;
 	uint32_t request_reg, clear_reg, min_reg;
 	unsigned long max_freq, min_freq;
-	int cpu, idx;
+	int cpu;
 	cpumask_t mask = { CPU_BITS_NONE };
 
 	for_each_possible_cpu(cpu) {
@@ -491,22 +532,6 @@
 	if (IS_ERR_OR_NULL(tzdev))
 		return PTR_ERR(tzdev);
 
-	/* Setup cooling devices to request mitigation states */
-	mutex_init(&hw->access_lock);
-	idx = 0;
-	for_each_cpu(cpu, &hw->core_map) {
-		cpumask_t cpu_mask  = { CPU_BITS_NONE };
-
-		cpumask_set_cpu(cpu, &cpu_mask);
-		hw->cdev_data[idx].cdev = cpufreq_platform_cooling_register(
-						&cpu_mask, &cd_ops);
-		if (IS_ERR_OR_NULL(hw->cdev_data[idx].cdev))
-			return PTR_ERR(hw->cdev_data[idx].cdev);
-		hw->cdev_data[idx].max_freq = U32_MAX;
-		hw->cdev_data[idx].min_freq = 0;
-		idx++;
-	}
-
 	switch (affinity) {
 	case 0:
 		request_reg = LIMITS_CLUSTER_0_REQ;
@@ -519,33 +544,36 @@
 		min_reg = LIMITS_CLUSTER_1_MIN_FREQ;
 		break;
 	default:
-		return -EINVAL;
+		ret = -EINVAL;
+		goto unregister_sensor;
 	};
 
+	hw->min_freq_reg = devm_ioremap(&pdev->dev, min_reg, 0x4);
+	if (!hw->min_freq_reg) {
+		pr_err("min frequency enable register remap failed\n");
+		ret = -ENOMEM;
+		goto unregister_sensor;
+	}
+
+	mutex_init(&hw->access_lock);
+	init_timer_deferrable(&hw->poll_timer);
+	hw->poll_timer.data = (unsigned long)hw;
+	hw->poll_timer.function = limits_dcvs_poll;
 	hw->osm_hw_reg = devm_ioremap(&pdev->dev, request_reg, 0x4);
 	if (!hw->osm_hw_reg) {
 		pr_err("register remap failed\n");
-		return -ENOMEM;
+		goto probe_exit;
 	}
 	hw->int_clr_reg = devm_ioremap(&pdev->dev, clear_reg, 0x4);
 	if (!hw->int_clr_reg) {
 		pr_err("interrupt clear reg remap failed\n");
-		return -ENOMEM;
+		goto probe_exit;
 	}
-	hw->min_freq_reg = devm_ioremap(&pdev->dev, min_reg, 0x4);
-	if (!hw->min_freq_reg) {
-		pr_err("min frequency enable register remap failed\n");
-		return -ENOMEM;
-	}
-	init_timer_deferrable(&hw->poll_timer);
-	hw->poll_timer.data = (unsigned long)hw;
-	hw->poll_timer.function = limits_dcvs_poll;
 
 	hw->irq_num = of_irq_get(pdev->dev.of_node, 0);
 	if (hw->irq_num < 0) {
-		ret = hw->irq_num;
-		pr_err("Error getting IRQ number. err:%d\n", ret);
-		return ret;
+		pr_err("Error getting IRQ number. err:%d\n", hw->irq_num);
+		goto probe_exit;
 	}
 	atomic_set(&hw->is_irq_enabled, 1);
 	ret = devm_request_threaded_irq(&pdev->dev, hw->irq_num, NULL,
@@ -553,11 +581,26 @@
 		| IRQF_NO_SUSPEND, hw->sensor_name, hw);
 	if (ret) {
 		pr_err("Error registering for irq. err:%d\n", ret);
-		return ret;
+		ret = 0;
+		goto probe_exit;
 	}
 
+probe_exit:
+	mutex_lock(&lmh_dcvs_list_access);
 	INIT_LIST_HEAD(&hw->list);
 	list_add(&hw->list, &lmh_dcvs_hw_list);
+	mutex_unlock(&lmh_dcvs_list_access);
+
+	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lmh-dcvs/cdev:online",
+				limits_cpu_online, NULL);
+	if (ret < 0)
+		goto unregister_sensor;
+	ret = 0;
+
+	return ret;
+
+unregister_sensor:
+	thermal_zone_of_sensor_unregister(&pdev->dev, tzdev);
 
 	return ret;
 }
diff --git a/drivers/thermal/tsens-dbg.c b/drivers/thermal/tsens-dbg.c
index 7cd8c86..9b10a1b 100644
--- a/drivers/thermal/tsens-dbg.c
+++ b/drivers/thermal/tsens-dbg.c
@@ -35,8 +35,8 @@
 #define	TSENS_DEBUG_ID_MASK_1_4			0xffffffe1
 #define	DEBUG_SIZE				10
 
-#define TSENS_DEBUG_CONTROL(n)			((n) + 0x1130)
-#define TSENS_DEBUG_DATA(n)			((n) + 0x1134)
+#define TSENS_DEBUG_CONTROL(n)			((n) + 0x130)
+#define TSENS_DEBUG_DATA(n)			((n) + 0x134)
 
 struct tsens_dbg_func {
 	int (*dbg_func)(struct tsens_device *, u32, u32, int *);
@@ -86,10 +86,127 @@
 	return 0;
 }
 
+static int tsens_dbg_log_bus_id_data(struct tsens_device *data,
+					u32 id, u32 dbg_type, int *val)
+{
+	struct tsens_device *tmdev = NULL;
+	u32 loop = 0, i = 0;
+	uint32_t r1, r2, r3, r4, offset = 0;
+	unsigned int debug_dump;
+	unsigned int debug_id = 0, cntrl_id = 0;
+	void __iomem *srot_addr;
+	void __iomem *controller_id_addr;
+	void __iomem *debug_id_addr;
+	void __iomem *debug_data_addr;
+
+	if (!data)
+		return -EINVAL;
+
+	pr_debug("%d %d\n", id, dbg_type);
+	tmdev = data;
+	controller_id_addr = TSENS_CONTROLLER_ID(tmdev->tsens_tm_addr);
+	debug_id_addr = TSENS_DEBUG_CONTROL(tmdev->tsens_tm_addr);
+	debug_data_addr = TSENS_DEBUG_DATA(tmdev->tsens_tm_addr);
+	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr);
+
+	cntrl_id = readl_relaxed(controller_id_addr);
+	pr_err("Controller_id: 0x%x\n", cntrl_id);
+
+	loop = 0;
+	i = 0;
+	debug_id = readl_relaxed(debug_id_addr);
+	writel_relaxed((debug_id | (i << 1) | 1),
+			TSENS_DEBUG_CONTROL(tmdev->tsens_tm_addr));
+	while (loop < TSENS_DEBUG_LOOP_COUNT_ID_0) {
+		debug_dump = readl_relaxed(debug_data_addr);
+		r1 = readl_relaxed(debug_data_addr);
+		r2 = readl_relaxed(debug_data_addr);
+		r3 = readl_relaxed(debug_data_addr);
+		r4 = readl_relaxed(debug_data_addr);
+		pr_err("cntrl:%d, bus-id:%d value:0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
+			cntrl_id, i, debug_dump, r1, r2, r3, r4);
+		loop++;
+	}
+
+	for (i = TSENS_DBG_BUS_ID_1; i <= TSENS_DBG_BUS_ID_15; i++) {
+		loop = 0;
+		debug_id = readl_relaxed(debug_id_addr);
+		debug_id = debug_id & TSENS_DEBUG_ID_MASK_1_4;
+		writel_relaxed((debug_id | (i << 1) | 1),
+				TSENS_DEBUG_CONTROL(tmdev->tsens_tm_addr));
+		while (loop < TSENS_DEBUG_LOOP_COUNT) {
+			debug_dump = readl_relaxed(debug_data_addr);
+			pr_err("cntrl:%d, bus-id:%d with value: 0x%x\n",
+				cntrl_id, i, debug_dump);
+			if (i == TSENS_DBG_BUS_ID_2)
+				usleep_range(
+					TSENS_DEBUG_BUS_ID2_MIN_CYCLE,
+					TSENS_DEBUG_BUS_ID2_MAX_CYCLE);
+			loop++;
+		}
+	}
+
+	pr_err("Start of TSENS TM dump\n");
+	for (i = 0; i < TSENS_DEBUG_OFFSET_RANGE; i++) {
+		r1 = readl_relaxed(controller_id_addr + offset);
+		r2 = readl_relaxed(controller_id_addr + (offset +
+					TSENS_DEBUG_OFFSET_WORD1));
+		r3 = readl_relaxed(controller_id_addr +	(offset +
+					TSENS_DEBUG_OFFSET_WORD2));
+		r4 = readl_relaxed(controller_id_addr + (offset +
+					TSENS_DEBUG_OFFSET_WORD3));
+
+		pr_err("ctrl:%d:0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			cntrl_id, offset, r1, r2, r3, r4);
+		offset += TSENS_DEBUG_OFFSET_ROW;
+	}
+
+	offset = 0;
+	pr_err("Start of TSENS SROT dump\n");
+	for (i = 0; i < TSENS_DEBUG_OFFSET_RANGE; i++) {
+		r1 = readl_relaxed(srot_addr + offset);
+		r2 = readl_relaxed(srot_addr + (offset +
+					TSENS_DEBUG_OFFSET_WORD1));
+		r3 = readl_relaxed(srot_addr + (offset +
+					TSENS_DEBUG_OFFSET_WORD2));
+		r4 = readl_relaxed(srot_addr + (offset +
+					TSENS_DEBUG_OFFSET_WORD3));
+
+		pr_err("ctrl:%d:0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			cntrl_id, offset, r1, r2, r3, r4);
+		offset += TSENS_DEBUG_OFFSET_ROW;
+	}
+
+	loop = 0;
+	while (loop < TSENS_DEBUG_LOOP_COUNT) {
+		offset = TSENS_DEBUG_OFFSET_ROW *
+				TSENS_DEBUG_STATUS_REG_START;
+		pr_err("Start of TSENS TM dump %d\n", loop);
+		/* Limited dump of the registers for the temperature */
+		for (i = 0; i < TSENS_DEBUG_LOOP_COUNT; i++) {
+			r1 = readl_relaxed(controller_id_addr + offset);
+			r2 = readl_relaxed(controller_id_addr +
+				(offset + TSENS_DEBUG_OFFSET_WORD1));
+			r3 = readl_relaxed(controller_id_addr +
+				(offset + TSENS_DEBUG_OFFSET_WORD2));
+			r4 = readl_relaxed(controller_id_addr +
+				(offset + TSENS_DEBUG_OFFSET_WORD3));
+
+		pr_err("ctrl:%d:0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			cntrl_id, offset, r1, r2, r3, r4);
+			offset += TSENS_DEBUG_OFFSET_ROW;
+		}
+		loop++;
+	}
+
+	return 0;
+}
+
 static struct tsens_dbg_func dbg_arr[] = {
 	[TSENS_DBG_LOG_TEMP_READS] = {tsens_dbg_log_temp_reads},
 	[TSENS_DBG_LOG_INTERRUPT_TIMESTAMP] = {
 			tsens_dbg_log_interrupt_timestamp},
+	[TSENS_DBG_LOG_BUS_ID_DATA] = {tsens_dbg_log_bus_id_data},
 };
 
 int tsens2xxx_dbg(struct tsens_device *data, u32 id, u32 dbg_type, int *val)
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index 3b9b01a..770b982 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -31,6 +31,7 @@
 	TSENS_DBG_POLL,
 	TSENS_DBG_LOG_TEMP_READS,
 	TSENS_DBG_LOG_INTERRUPT_TIMESTAMP,
+	TSENS_DBG_LOG_BUS_ID_DATA,
 	TSENS_DBG_LOG_MAX
 };
 
@@ -109,9 +110,9 @@
 	unsigned int			*hw_ids;
 	u32				temp_factor;
 	bool				cycle_monitor;
-	u32				cycle_compltn_monitor_val;
+	u32				cycle_compltn_monitor_mask;
 	bool				wd_bark;
-	u32				wd_bark_val;
+	u32				wd_bark_mask;
 };
 
 struct tsens_device {
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index 13b183d..55be2f9 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -57,6 +57,8 @@
 #define TSENS_TM_CODE_BIT_MASK			0xfff
 #define TSENS_TM_CODE_SIGN_BIT			0x800
 #define TSENS_TM_SCALE_DECI_MILLIDEG		100
+#define TSENS_DEBUG_WDOG_TRIGGER_COUNT		5
+#define TSENS_TM_WATCHDOG_LOG(n)		((n) + 0x13c)
 
 #define TSENS_EN				BIT(0)
 
@@ -296,13 +298,11 @@
 static irqreturn_t tsens_tm_critical_irq_thread(int irq, void *data)
 {
 	struct tsens_device *tm = data;
-	unsigned int i, status;
+	unsigned int i, status, wd_log, wd_mask;
 	unsigned long flags;
-	void __iomem *sensor_status_addr;
-	void __iomem *sensor_int_mask_addr;
+	void __iomem *sensor_status_addr, *sensor_int_mask_addr;
 	void __iomem *sensor_critical_addr;
-	void __iomem *wd_critical_addr;
-	int wd_mask;
+	void __iomem *wd_critical_addr, *wd_log_addr;
 
 	sensor_status_addr = TSENS_TM_SN_STATUS(tm->tsens_tm_addr);
 	sensor_int_mask_addr =
@@ -311,6 +311,7 @@
 		TSENS_TM_SN_CRITICAL_THRESHOLD(tm->tsens_tm_addr);
 	wd_critical_addr =
 		TSENS_TM_CRITICAL_INT_STATUS(tm->tsens_tm_addr);
+	wd_log_addr = TSENS_TM_WATCHDOG_LOG(tm->tsens_tm_addr);
 
 	if (tm->ctrl_data->wd_bark) {
 		wd_mask = readl_relaxed(wd_critical_addr);
@@ -325,7 +326,15 @@
 			writel_relaxed(wd_mask & ~(TSENS_TM_CRITICAL_WD_BARK),
 				(TSENS_TM_CRITICAL_INT_CLEAR
 				(tm->tsens_tm_addr)));
-			tm->tsens_dbg.tsens_critical_wd_cnt++;
+			wd_log = readl_relaxed(wd_log_addr);
+			if (wd_log >= TSENS_DEBUG_WDOG_TRIGGER_COUNT) {
+				pr_err("Watchdog count:%d\n", wd_log);
+				if (tm->ops->dbg)
+					tm->ops->dbg(tm, 0,
+					TSENS_DBG_LOG_BUS_ID_DATA, NULL);
+				BUG();
+			}
+
 			return IRQ_HANDLED;
 		}
 	}
@@ -494,8 +503,7 @@
 {
 	void __iomem *srot_addr;
 	void __iomem *sensor_int_mask_addr;
-	unsigned int srot_val;
-	int crit_mask;
+	unsigned int srot_val, crit_mask, crit_val;
 
 	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
 	srot_val = readl_relaxed(srot_addr);
@@ -508,13 +516,36 @@
 		sensor_int_mask_addr =
 			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
 		crit_mask = readl_relaxed(sensor_int_mask_addr);
-		writel_relaxed(
-			crit_mask | tmdev->ctrl_data->cycle_compltn_monitor_val,
-			(TSENS_TM_CRITICAL_INT_MASK
-			(tmdev->tsens_tm_addr)));
+		crit_val = TSENS_TM_CRITICAL_CYCLE_MONITOR;
+		if (tmdev->ctrl_data->cycle_compltn_monitor_mask)
+			writel_relaxed((crit_mask | crit_val),
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_tm_addr)));
+		else
+			writel_relaxed((crit_mask & ~crit_val),
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_tm_addr)));
 		/*Update critical cycle monitoring*/
 		mb();
 	}
+
+	if (tmdev->ctrl_data->wd_bark) {
+		sensor_int_mask_addr =
+			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
+		crit_mask = readl_relaxed(sensor_int_mask_addr);
+		crit_val = TSENS_TM_CRITICAL_WD_BARK;
+		if (tmdev->ctrl_data->wd_bark_mask)
+			writel_relaxed((crit_mask | crit_val),
+			(TSENS_TM_CRITICAL_INT_MASK
+			(tmdev->tsens_tm_addr)));
+		else
+			writel_relaxed((crit_mask & ~crit_val),
+			(TSENS_TM_CRITICAL_INT_MASK
+			(tmdev->tsens_tm_addr)));
+		/*Update watchdog monitoring*/
+		mb();
+	}
+
 	writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
 		TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
 		TSENS_TM_INT_EN(tmdev->tsens_tm_addr));
@@ -575,24 +606,25 @@
 
 const struct tsens_data data_tsens2xxx = {
 	.cycle_monitor			= false,
-	.cycle_compltn_monitor_val	= 0,
+	.cycle_compltn_monitor_mask	= 1,
 	.wd_bark			= false,
-	.wd_bark_val			= 0,
+	.wd_bark_mask			= 1,
 	.ops				= &ops_tsens2xxx,
 };
 
 const struct tsens_data data_tsens23xx = {
 	.cycle_monitor			= true,
-	.cycle_compltn_monitor_val	= 0,
+	.cycle_compltn_monitor_mask	= 1,
 	.wd_bark			= true,
-	.wd_bark_val			= 0,
+	.wd_bark_mask			= 1,
 	.ops				= &ops_tsens2xxx,
 };
 
 const struct tsens_data data_tsens24xx = {
 	.cycle_monitor			= true,
-	.cycle_compltn_monitor_val	= 0,
+	.cycle_compltn_monitor_mask	= 1,
 	.wd_bark			= true,
-	.wd_bark_val			= 1,
+	/* Enable Watchdog monitoring by unmasking */
+	.wd_bark_mask			= 0,
 	.ops				= &ops_tsens2xxx,
 };
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index f4eb807..da31159 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1237,7 +1237,8 @@
 	pm_runtime_put_autosuspend(&pdev->dev);
 	return 0;
 err:
-	pm_runtime_put(&pdev->dev);
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
+	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 	return ret;
 }
@@ -1246,6 +1247,7 @@
 {
 	struct omap8250_priv *priv = platform_get_drvdata(pdev);
 
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
 	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 	serial8250_unregister_port(priv->line);
@@ -1345,6 +1347,10 @@
 	struct omap8250_priv *priv = dev_get_drvdata(dev);
 	struct uart_8250_port *up;
 
+	/* In case runtime-pm tries this before we are setup */
+	if (!priv)
+		return 0;
+
 	up = serial8250_get_port(priv->line);
 	/*
 	 * When using 'no_console_suspend', the console UART must not be
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 6a3f2ac..8108da8 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -28,8 +28,6 @@
 #include <linux/serial_core.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
 
 /* UART specific GENI registers */
 #define SE_UART_LOOPBACK_CFG		(0x22C)
@@ -107,8 +105,6 @@
 #define DEF_TX_WM		(2)
 #define DEF_FIFO_WIDTH_BITS	(32)
 #define UART_CORE2X_VOTE	(10000)
-#define DEFAULT_SE_CLK		(19200000)
-#define DEFAULT_BUS_WIDTH	(4)
 
 #define WAKEBYTE_TIMEOUT_MSEC	(2000)
 #define IPC_LOG_PWR_PAGES	(2)
@@ -138,6 +134,7 @@
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last);
+	struct device *wrapper_dev;
 	struct se_geni_rsc serial_rsc;
 	int loopback;
 	int wakeup_irq;
@@ -1035,18 +1032,24 @@
 	if (!uart_console(uport)) {
 		/* For now only assume FIFO mode. */
 		msm_port->xfer_mode = FIFO_MODE;
-		ret = geni_se_init(uport->membase, msm_port->xfer_mode,
+		ret = geni_se_init(uport->membase,
 					msm_port->rx_wm, msm_port->rx_rfr);
 		if (ret) {
 			dev_err(uport->dev, "%s: Fail\n", __func__);
 			goto exit_portsetup;
 		}
+
+		ret = geni_se_select_mode(uport->membase, msm_port->xfer_mode);
+		if (ret)
+			goto exit_portsetup;
+
 		se_get_packing_config(8, 4, false, &cfg0, &cfg1);
 		geni_write_reg_nolog(cfg0, uport->membase,
 						SE_GENI_TX_PACKING_CFG0);
 		geni_write_reg_nolog(cfg1, uport->membase,
 						SE_GENI_TX_PACKING_CFG1);
 	}
+
 	msm_port->port_setup = true;
 	/*
 	 * Ensure Port setup related IO completes before returning to
@@ -1106,27 +1109,6 @@
 	scnprintf(msm_port->name, sizeof(msm_port->name), "msm_serial_geni%d",
 				uport->line);
 
-	ret = request_irq(uport->irq, msm_geni_serial_isr, IRQF_TRIGGER_HIGH,
-			msm_port->name, msm_port);
-	if (unlikely(ret)) {
-		dev_err(uport->dev, "%s: Failed to get IRQ ret %d\n",
-							__func__, ret);
-		goto exit_startup;
-	}
-
-	if (msm_port->wakeup_irq > 0) {
-		ret = request_threaded_irq(msm_port->wakeup_irq, NULL,
-				msm_geni_wakeup_isr,
-				IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-				"hs_uart_wakeup", uport);
-		if (unlikely(ret)) {
-			dev_err(uport->dev, "%s:Failed to get WakeIRQ ret%d\n",
-								__func__, ret);
-			goto exit_startup;
-		}
-		disable_irq(msm_port->wakeup_irq);
-	}
-
 	if (likely(!uart_console(uport))) {
 		ret = msm_geni_serial_power_on(&msm_port->uport);
 		if (ret)
@@ -1156,6 +1138,26 @@
 	 * before returning to the framework.
 	 */
 	mb();
+	ret = request_irq(uport->irq, msm_geni_serial_isr, IRQF_TRIGGER_HIGH,
+			msm_port->name, msm_port);
+	if (unlikely(ret)) {
+		dev_err(uport->dev, "%s: Failed to get IRQ ret %d\n",
+							__func__, ret);
+		goto exit_startup;
+	}
+
+	if (msm_port->wakeup_irq > 0) {
+		ret = request_threaded_irq(msm_port->wakeup_irq, NULL,
+				msm_geni_wakeup_isr,
+				IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+				"hs_uart_wakeup", uport);
+		if (unlikely(ret)) {
+			dev_err(uport->dev, "%s:Failed to get WakeIRQ ret%d\n",
+								__func__, ret);
+			goto exit_startup;
+		}
+		disable_irq(msm_port->wakeup_irq);
+	}
 	IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
 exit_startup:
 	return ret;
@@ -1436,8 +1438,9 @@
 		goto exit_geni_serial_earlyconsetup;
 	}
 
-	geni_se_init(uport->membase, FIFO_MODE, (DEF_FIFO_DEPTH_WORDS >> 1),
-						(DEF_FIFO_DEPTH_WORDS - 2));
+	geni_se_init(uport->membase, (DEF_FIFO_DEPTH_WORDS >> 1),
+					(DEF_FIFO_DEPTH_WORDS - 2));
+	geni_se_select_mode(uport->membase, FIFO_MODE);
 	/*
 	 * Ignore Flow control.
 	 * Disable Tx Parity.
@@ -1600,6 +1603,8 @@
 	struct uart_driver *drv;
 	const struct of_device_id *id;
 	bool is_console = false;
+	struct platform_device *wrapper_pdev;
+	struct device_node *wrapper_ph_node;
 
 	id = of_match_device(msm_geni_device_tbl, &pdev->dev);
 	if (id) {
@@ -1643,23 +1648,24 @@
 
 	uport->dev = &pdev->dev;
 
-	if (!(of_property_read_u32(pdev->dev.of_node, "qcom,bus-mas",
-					&dev_port->serial_rsc.bus_mas))) {
-		dev_port->serial_rsc.bus_bw =
-				msm_bus_scale_register(
-					dev_port->serial_rsc.bus_mas,
-					MSM_BUS_SLAVE_EBI_CH0,
-					(char *)dev_name(&pdev->dev),
-					false);
-		if (IS_ERR_OR_NULL(dev_port->serial_rsc.bus_bw)) {
-			ret = PTR_ERR(dev_port->serial_rsc.bus_bw);
-			goto exit_geni_serial_probe;
-		}
-		dev_port->serial_rsc.ab = UART_CORE2X_VOTE;
-		dev_port->serial_rsc.ib = DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH;
-	} else {
-		dev_info(&pdev->dev, "No bus master specified\n");
+	wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
+					"qcom,wrapper-core", 0);
+	if (IS_ERR_OR_NULL(wrapper_ph_node)) {
+		ret = PTR_ERR(wrapper_ph_node);
+		goto exit_geni_serial_probe;
 	}
+	wrapper_pdev = of_find_device_by_node(wrapper_ph_node);
+	of_node_put(wrapper_ph_node);
+	if (IS_ERR_OR_NULL(wrapper_pdev)) {
+		ret = PTR_ERR(wrapper_pdev);
+		goto exit_geni_serial_probe;
+	}
+	dev_port->wrapper_dev = &wrapper_pdev->dev;
+	dev_port->serial_rsc.wrapper_dev = &wrapper_pdev->dev;
+	ret = geni_se_resources_init(&dev_port->serial_rsc, UART_CORE2X_VOTE,
+					(DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
+	if (ret)
+		goto exit_geni_serial_probe;
 
 	if (of_property_read_u8(pdev->dev.of_node, "qcom,wakeup-byte",
 					&dev_port->wakeup_byte))
@@ -1755,6 +1761,7 @@
 		dev_port->rx_fifo = devm_kzalloc(uport->dev,
 				sizeof(dev_port->rx_fifo_depth * sizeof(u32)),
 								GFP_KERNEL);
+		pm_runtime_set_suspended(&pdev->dev);
 		pm_runtime_enable(&pdev->dev);
 	}
 
@@ -1777,7 +1784,6 @@
 
 	wakeup_source_trash(&port->geni_wake);
 	uart_remove_one_port(drv, &port->uport);
-	msm_bus_scale_unregister(port->serial_rsc.bus_bw);
 	return 0;
 }
 
@@ -1830,6 +1836,7 @@
 	if (uart_console(uport)) {
 		uart_suspend_port((struct uart_driver *)uport->private_data,
 					uport);
+		se_geni_resources_off(&port->serial_rsc);
 	} else {
 		if (!pm_runtime_status_suspended(dev)) {
 			dev_info(dev, "%s: Is still active\n", __func__);
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index cd41455..05bc4d6 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -428,9 +428,6 @@
 
 u8 hw_port_test_get(struct ci_hdrc *ci);
 
-int hw_wait_reg(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask,
-				u32 value, unsigned int timeout_ms);
-
 void ci_platform_configure(struct ci_hdrc *ci);
 
 int dbg_create_files(struct ci_hdrc *ci);
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 3dbb4a2..6e0d614 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -516,38 +516,6 @@
 	return 0;
 }
 
-/**
- * hw_wait_reg: wait the register value
- *
- * Sometimes, it needs to wait register value before going on.
- * Eg, when switch to device mode, the vbus value should be lower
- * than OTGSC_BSV before connects to host.
- *
- * @ci: the controller
- * @reg: register index
- * @mask: mast bit
- * @value: the bit value to wait
- * @timeout_ms: timeout in millisecond
- *
- * This function returns an error code if timeout
- */
-int hw_wait_reg(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask,
-				u32 value, unsigned int timeout_ms)
-{
-	unsigned long elapse = jiffies + msecs_to_jiffies(timeout_ms);
-
-	while (hw_read(ci, reg, mask) != value) {
-		if (time_after(jiffies, elapse)) {
-			dev_err(ci->dev, "timeout waiting for %08x in %d\n",
-					mask, reg);
-			return -ETIMEDOUT;
-		}
-		msleep(20);
-	}
-
-	return 0;
-}
-
 static irqreturn_t ci_irq(int irq, void *data)
 {
 	struct ci_hdrc *ci = data;
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 03b6743..0cf149e 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -44,12 +44,15 @@
 		else
 			val &= ~OTGSC_BSVIS;
 
-		cable->changed = false;
-
 		if (cable->state)
 			val |= OTGSC_BSV;
 		else
 			val &= ~OTGSC_BSV;
+
+		if (cable->enabled)
+			val |= OTGSC_BSVIE;
+		else
+			val &= ~OTGSC_BSVIE;
 	}
 
 	cable = &ci->platdata->id_extcon;
@@ -59,15 +62,18 @@
 		else
 			val &= ~OTGSC_IDIS;
 
-		cable->changed = false;
-
 		if (cable->state)
 			val |= OTGSC_ID;
 		else
 			val &= ~OTGSC_ID;
+
+		if (cable->enabled)
+			val |= OTGSC_IDIE;
+		else
+			val &= ~OTGSC_IDIE;
 	}
 
-	return val;
+	return val & mask;
 }
 
 /**
@@ -77,6 +83,36 @@
  */
 void hw_write_otgsc(struct ci_hdrc *ci, u32 mask, u32 data)
 {
+	struct ci_hdrc_cable *cable;
+
+	cable = &ci->platdata->vbus_extcon;
+	if (!IS_ERR(cable->edev)) {
+		if (data & mask & OTGSC_BSVIS)
+			cable->changed = false;
+
+		/* Don't enable vbus interrupt if using external notifier */
+		if (data & mask & OTGSC_BSVIE) {
+			cable->enabled = true;
+			data &= ~OTGSC_BSVIE;
+		} else if (mask & OTGSC_BSVIE) {
+			cable->enabled = false;
+		}
+	}
+
+	cable = &ci->platdata->id_extcon;
+	if (!IS_ERR(cable->edev)) {
+		if (data & mask & OTGSC_IDIS)
+			cable->changed = false;
+
+		/* Don't enable id interrupt if using external notifier */
+		if (data & mask & OTGSC_IDIE) {
+			cable->enabled = true;
+			data &= ~OTGSC_IDIE;
+		} else if (mask & OTGSC_IDIE) {
+			cable->enabled = false;
+		}
+	}
+
 	hw_write(ci, OP_OTGSC, mask | OTGSC_INT_STATUS_BITS, data);
 }
 
@@ -104,7 +140,31 @@
 		usb_gadget_vbus_disconnect(&ci->gadget);
 }
 
-#define CI_VBUS_STABLE_TIMEOUT_MS 5000
+/**
+ * When we switch to device mode, the vbus value should be lower
+ * than OTGSC_BSV before connecting to host.
+ *
+ * @ci: the controller
+ *
+ * This function returns an error code if timeout
+ */
+static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci)
+{
+	unsigned long elapse = jiffies + msecs_to_jiffies(5000);
+	u32 mask = OTGSC_BSV;
+
+	while (hw_read_otgsc(ci, mask)) {
+		if (time_after(jiffies, elapse)) {
+			dev_err(ci->dev, "timeout waiting for %08x in OTGSC\n",
+					mask);
+			return -ETIMEDOUT;
+		}
+		msleep(20);
+	}
+
+	return 0;
+}
+
 static void ci_handle_id_switch(struct ci_hdrc *ci)
 {
 	enum ci_role role = ci_otg_role(ci);
@@ -116,9 +176,11 @@
 		ci_role_stop(ci);
 
 		if (role == CI_ROLE_GADGET)
-			/* wait vbus lower than OTGSC_BSV */
-			hw_wait_reg(ci, OP_OTGSC, OTGSC_BSV, 0,
-					CI_VBUS_STABLE_TIMEOUT_MS);
+			/*
+			 * wait vbus lower than OTGSC_BSV before connecting
+			 * to host
+			 */
+			hw_wait_vbus_lower_bsv(ci);
 
 		ci_role_start(ci, role);
 	}
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 4c0fa0b..f6759c6 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -455,7 +455,7 @@
 	dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
 
 	if (dwc2_iddig_filter_enabled(hsotg))
-		usleep_range(100000, 110000);
+		msleep(100);
 }
 
 /*
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 217b7ca..25b2cdd 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -180,6 +180,15 @@
 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
 };
 
+static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
+	.bLength =		sizeof(ss_bulk_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
 /* B.6.2  Class-specific MS Bulk IN Endpoint Descriptor */
 static struct usb_ms_endpoint_descriptor_16 ms_in_desc = {
 	/* .bLength =		DYNAMIC */
@@ -846,6 +855,7 @@
 static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
 {
 	struct usb_descriptor_header **midi_function;
+	struct usb_descriptor_header **midi_ss_function;
 	struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
 	struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS];
 	struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
@@ -853,7 +863,7 @@
 	struct usb_composite_dev *cdev = c->cdev;
 	struct f_midi *midi = func_to_midi(f);
 	struct usb_string *us;
-	int status, n, jack = 1, i = 0;
+	int status, n, jack = 1, i = 0, j = 0;
 
 	midi->gadget = cdev->gadget;
 	tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi);
@@ -894,11 +904,20 @@
 	if (!midi->out_ep)
 		goto fail;
 
+	/* allocate temporary function list for ss */
+	midi_ss_function = kcalloc((MAX_PORTS * 4) + 11,
+				sizeof(*midi_ss_function), GFP_KERNEL);
+	if (!midi_ss_function) {
+		status = -ENOMEM;
+		goto fail;
+	}
+
 	/* allocate temporary function list */
 	midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(*midi_function),
 				GFP_KERNEL);
 	if (!midi_function) {
 		status = -ENOMEM;
+		kfree(midi_ss_function);
 		goto fail;
 	}
 
@@ -912,6 +931,12 @@
 	midi_function[i++] = (struct usb_descriptor_header *) &ac_interface_desc;
 	midi_function[i++] = (struct usb_descriptor_header *) &ac_header_desc;
 	midi_function[i++] = (struct usb_descriptor_header *) &ms_interface_desc;
+	midi_ss_function[j++] =
+			(struct usb_descriptor_header *) &ac_interface_desc;
+	midi_ss_function[j++] =
+			(struct usb_descriptor_header *) &ac_header_desc;
+	midi_ss_function[j++] =
+			(struct usb_descriptor_header *) &ms_interface_desc;
 
 	/* calculate the header's wTotalLength */
 	n = USB_DT_MS_HEADER_SIZE
@@ -920,6 +945,8 @@
 	ms_header_desc.wTotalLength = cpu_to_le16(n);
 
 	midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
+	midi_ss_function[j++] =
+			(struct usb_descriptor_header *) &ms_header_desc;
 
 	/* configure the external IN jacks, each linked to an embedded OUT jack */
 	for (n = 0; n < midi->in_ports; n++) {
@@ -933,6 +960,7 @@
 		in_ext->bJackID			= jack++;
 		in_ext->iJack			= 0;
 		midi_function[i++] = (struct usb_descriptor_header *) in_ext;
+		midi_ss_function[j++] = (struct usb_descriptor_header *) in_ext;
 
 		out_emb->bLength		= USB_DT_MIDI_OUT_SIZE(1);
 		out_emb->bDescriptorType	= USB_DT_CS_INTERFACE;
@@ -944,6 +972,8 @@
 		out_emb->pins[0].baSourceID	= in_ext->bJackID;
 		out_emb->iJack			= 0;
 		midi_function[i++] = (struct usb_descriptor_header *) out_emb;
+		midi_ss_function[j++] =
+				(struct usb_descriptor_header *) out_emb;
 
 		/* link it to the endpoint */
 		ms_in_desc.baAssocJackID[n] = out_emb->bJackID;
@@ -961,6 +991,7 @@
 		in_emb->bJackID			= jack++;
 		in_emb->iJack			= 0;
 		midi_function[i++] = (struct usb_descriptor_header *) in_emb;
+		midi_ss_function[j++] = (struct usb_descriptor_header *) in_emb;
 
 		out_ext->bLength =		USB_DT_MIDI_OUT_SIZE(1);
 		out_ext->bDescriptorType =	USB_DT_CS_INTERFACE;
@@ -972,6 +1003,8 @@
 		out_ext->pins[0].baSourceID =	in_emb->bJackID;
 		out_ext->pins[0].baSourcePin =	1;
 		midi_function[i++] = (struct usb_descriptor_header *) out_ext;
+		midi_ss_function[j++] =
+				(struct usb_descriptor_header *) out_ext;
 
 		/* link it to the endpoint */
 		ms_out_desc.baAssocJackID[n] = in_emb->bJackID;
@@ -991,6 +1024,16 @@
 	midi_function[i++] = (struct usb_descriptor_header *) &ms_in_desc;
 	midi_function[i++] = NULL;
 
+	midi_ss_function[j++] = (struct usb_descriptor_header *) &bulk_out_desc;
+	midi_ss_function[j++] =
+			(struct usb_descriptor_header *) &ss_bulk_comp_desc;
+	midi_ss_function[j++] = (struct usb_descriptor_header *) &ms_out_desc;
+	midi_ss_function[j++] = (struct usb_descriptor_header *) &bulk_in_desc;
+	midi_ss_function[j++] =
+			(struct usb_descriptor_header *) &ss_bulk_comp_desc;
+	midi_ss_function[j++] = (struct usb_descriptor_header *) &ms_in_desc;
+	midi_ss_function[j++] = NULL;
+
 	/*
 	 * support all relevant hardware speeds... we expect that when
 	 * hardware is dual speed, all bulk-capable endpoints work at
@@ -1009,13 +1052,23 @@
 			goto fail_f_midi;
 	}
 
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		bulk_in_desc.wMaxPacketSize = cpu_to_le16(1024);
+		bulk_out_desc.wMaxPacketSize = cpu_to_le16(1024);
+		f->ss_descriptors = usb_copy_descriptors(midi_ss_function);
+		if (!f->ss_descriptors)
+			goto fail_f_midi;
+	}
+
 	kfree(midi_function);
+	kfree(midi_ss_function);
 
 	return 0;
 
 fail_f_midi:
 	kfree(midi_function);
 	usb_free_descriptors(f->hs_descriptors);
+	kfree(midi_ss_function);
 fail:
 	f_midi_unregister_card(midi);
 fail_register:
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 42e5b66..7a603f6 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -77,10 +77,12 @@
 		if (IS_ERR(phy)) {
 			ret = PTR_ERR(phy);
 			if (ret == -EPROBE_DEFER) {
+				of_node_put(child);
 				return ret;
 			} else if (ret != -ENOSYS && ret != -ENODEV) {
 				dev_err(dev,
 					"Error retrieving usb2 phy: %d\n", ret);
+				of_node_put(child);
 				return ret;
 			}
 		}
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 2cd105b..6865b91 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -66,10 +66,12 @@
 		if (IS_ERR(phy)) {
 			ret = PTR_ERR(phy);
 			if (ret == -EPROBE_DEFER) {
+				of_node_put(child);
 				return ret;
 			} else if (ret != -ENOSYS && ret != -ENODEV) {
 				dev_err(dev,
 					"Error retrieving usb2 phy: %d\n", ret);
+				of_node_put(child);
 				return ret;
 			}
 		}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6012da3..918f659 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -435,8 +435,8 @@
 #define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
 #define PORT_BESLD(p)(((p) & 0xf) << 10)
 
-/* use 512 microseconds as USB2 LPM L1 default timeout. */
-#define XHCI_L1_TIMEOUT		512
+/* use 128 microseconds as USB2 LPM L1 default timeout. */
+#define XHCI_L1_TIMEOUT		128
 
 /* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
  * Safe to use with mixed HIRD and BESL systems (host and device) and is used
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 7812052..754fc3e 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -373,23 +373,29 @@
 		dev_dbg(&port->dev,
 			"%s - usb_serial_generic_open failed: %d\n",
 			__func__, result);
-		goto err_out;
+		goto err_free;
 	}
 
 	/* remove any data still left: also clears error state */
 	ark3116_read_reg(serial, UART_RX, buf);
 
 	/* read modem status */
-	priv->msr = ark3116_read_reg(serial, UART_MSR, buf);
+	result = ark3116_read_reg(serial, UART_MSR, buf);
+	if (result < 0)
+		goto err_close;
+	priv->msr = *buf;
+
 	/* read line status */
-	priv->lsr = ark3116_read_reg(serial, UART_LSR, buf);
+	result = ark3116_read_reg(serial, UART_LSR, buf);
+	if (result < 0)
+		goto err_close;
+	priv->lsr = *buf;
 
 	result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
 	if (result) {
 		dev_err(&port->dev, "submit irq_in urb failed %d\n",
 			result);
-		ark3116_close(port);
-		goto err_out;
+		goto err_close;
 	}
 
 	/* activate interrupts */
@@ -402,8 +408,15 @@
 	if (tty)
 		ark3116_set_termios(tty, port, NULL);
 
-err_out:
 	kfree(buf);
+
+	return 0;
+
+err_close:
+	usb_serial_generic_close(port);
+err_free:
+	kfree(buf);
+
 	return result;
 }
 
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 30bf0f5..7ab3235 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1398,25 +1398,30 @@
 {
 	struct usb_serial_port *port = urb->context;
 	struct digi_port *priv = usb_get_serial_port_data(port);
-	int opcode = ((unsigned char *)urb->transfer_buffer)[0];
-	int len = ((unsigned char *)urb->transfer_buffer)[1];
-	int port_status = ((unsigned char *)urb->transfer_buffer)[2];
-	unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3;
+	unsigned char *buf = urb->transfer_buffer;
+	int opcode;
+	int len;
+	int port_status;
+	unsigned char *data;
 	int flag, throttled;
-	int status = urb->status;
-
-	/* do not process callbacks on closed ports */
-	/* but do continue the read chain */
-	if (urb->status == -ENOENT)
-		return 0;
 
 	/* short/multiple packet check */
+	if (urb->actual_length < 2) {
+		dev_warn(&port->dev, "short packet received\n");
+		return -1;
+	}
+
+	opcode = buf[0];
+	len = buf[1];
+
 	if (urb->actual_length != len + 2) {
-		dev_err(&port->dev, "%s: INCOMPLETE OR MULTIPLE PACKET, "
-			"status=%d, port=%d, opcode=%d, len=%d, "
-			"actual_length=%d, status=%d\n", __func__, status,
-			priv->dp_port_num, opcode, len, urb->actual_length,
-			port_status);
+		dev_err(&port->dev, "malformed packet received: port=%d, opcode=%d, len=%d, actual_length=%u\n",
+			priv->dp_port_num, opcode, len, urb->actual_length);
+		return -1;
+	}
+
+	if (opcode == DIGI_CMD_RECEIVE_DATA && len < 1) {
+		dev_err(&port->dev, "malformed data packet received\n");
 		return -1;
 	}
 
@@ -1430,6 +1435,9 @@
 
 	/* receive data */
 	if (opcode == DIGI_CMD_RECEIVE_DATA) {
+		port_status = buf[2];
+		data = &buf[3];
+
 		/* get flag from port_status */
 		flag = 0;
 
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 99a0a5f..d8d13ee 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1439,10 +1439,13 @@
 			     FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
 			     0, priv->interface,
 			     buf, 1, WDR_TIMEOUT);
-	if (rv < 0)
+	if (rv < 1) {
 		dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
-	else
+		if (rv >= 0)
+			rv = -EIO;
+	} else {
 		priv->latency = buf[0];
+	}
 
 	kfree(buf);
 
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 36dfe99..464db17 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -492,20 +492,24 @@
 	int result;
 	struct usb_serial *serial = ep->serial;
 	struct edgeport_product_info *product_info = &ep->product_info;
-	struct edge_compatibility_descriptor *epic = &ep->epic_descriptor;
+	struct edge_compatibility_descriptor *epic;
 	struct edge_compatibility_bits *bits;
 	struct device *dev = &serial->dev->dev;
 
 	ep->is_epic = 0;
+
+	epic = kmalloc(sizeof(*epic), GFP_KERNEL);
+	if (!epic)
+		return -ENOMEM;
+
 	result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
 				 USB_REQUEST_ION_GET_EPIC_DESC,
 				 0xC0, 0x00, 0x00,
-				 &ep->epic_descriptor,
-				 sizeof(struct edge_compatibility_descriptor),
+				 epic, sizeof(*epic),
 				 300);
-
-	if (result > 0) {
+	if (result == sizeof(*epic)) {
 		ep->is_epic = 1;
+		memcpy(&ep->epic_descriptor, epic, sizeof(*epic));
 		memset(product_info, 0, sizeof(struct edgeport_product_info));
 
 		product_info->NumPorts = epic->NumPorts;
@@ -534,8 +538,16 @@
 		dev_dbg(dev, "  IOSPWriteLCR     : %s\n", bits->IOSPWriteLCR	? "TRUE": "FALSE");
 		dev_dbg(dev, "  IOSPSetBaudRate  : %s\n", bits->IOSPSetBaudRate	? "TRUE": "FALSE");
 		dev_dbg(dev, "  TrueEdgeport     : %s\n", bits->TrueEdgeport	? "TRUE": "FALSE");
+
+		result = 0;
+	} else if (result >= 0) {
+		dev_warn(&serial->interface->dev, "short epic descriptor received: %d\n",
+			 result);
+		result = -EIO;
 	}
 
+	kfree(epic);
+
 	return result;
 }
 
@@ -2093,8 +2105,7 @@
  * rom_read
  *	reads a number of bytes from the Edgeport device starting at the given
  *	address.
- *	If successful returns the number of bytes read, otherwise it returns
- *	a negative error number of the problem.
+ *	Returns zero on success or a negative error number.
  ****************************************************************************/
 static int rom_read(struct usb_serial *serial, __u16 extAddr,
 					__u16 addr, __u16 length, __u8 *data)
@@ -2119,12 +2130,17 @@
 					USB_REQUEST_ION_READ_ROM,
 					0xC0, addr, extAddr, transfer_buffer,
 					current_length, 300);
-		if (result < 0)
+		if (result < current_length) {
+			if (result >= 0)
+				result = -EIO;
 			break;
+		}
 		memcpy(data, transfer_buffer, current_length);
 		length -= current_length;
 		addr += current_length;
 		data += current_length;
+
+		result = 0;
 	}
 
 	kfree(transfer_buffer);
@@ -2578,9 +2594,10 @@
 				EDGE_MANUF_DESC_LEN,
 				(__u8 *)(&edge_serial->manuf_descriptor));
 
-	if (response < 1)
-		dev_err(dev, "error in getting manufacturer descriptor\n");
-	else {
+	if (response < 0) {
+		dev_err(dev, "error in getting manufacturer descriptor: %d\n",
+				response);
+	} else {
 		char string[30];
 		dev_dbg(dev, "**Manufacturer Descriptor\n");
 		dev_dbg(dev, "  RomSize:        %dK\n",
@@ -2637,9 +2654,10 @@
 				EDGE_BOOT_DESC_LEN,
 				(__u8 *)(&edge_serial->boot_descriptor));
 
-	if (response < 1)
-		dev_err(dev, "error in getting boot descriptor\n");
-	else {
+	if (response < 0) {
+		dev_err(dev, "error in getting boot descriptor: %d\n",
+				response);
+	} else {
 		dev_dbg(dev, "**Boot Descriptor:\n");
 		dev_dbg(dev, "  BootCodeLength: %d\n",
 			le16_to_cpu(edge_serial->boot_descriptor.BootCodeLength));
@@ -2782,7 +2800,7 @@
 	dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name);
 
 	/* Read the epic descriptor */
-	if (get_epic_descriptor(edge_serial) <= 0) {
+	if (get_epic_descriptor(edge_serial) < 0) {
 		/* memcpy descriptor to Supports structures */
 		memcpy(&edge_serial->epic_descriptor.Supports, descriptor,
 		       sizeof(struct edge_compatibility_bits));
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 83523fc..d2dab2a 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -139,6 +139,7 @@
 {
 	struct usb_serial_port *port = urb->context;
 	unsigned char *data = urb->transfer_buffer;
+	unsigned int len = urb->actual_length;
 	int retval;
 	int status = urb->status;
 	struct keyspan_pda_private *priv;
@@ -159,18 +160,26 @@
 		goto exit;
 	}
 
+	if (len < 1) {
+		dev_warn(&port->dev, "short message received\n");
+		goto exit;
+	}
+
 	/* see if the message is data or a status interrupt */
 	switch (data[0]) {
 	case 0:
 		 /* rest of message is rx data */
-		if (urb->actual_length) {
-			tty_insert_flip_string(&port->port, data + 1,
-						urb->actual_length - 1);
-			tty_flip_buffer_push(&port->port);
-		}
+		if (len < 2)
+			break;
+		tty_insert_flip_string(&port->port, data + 1, len - 1);
+		tty_flip_buffer_push(&port->port);
 		break;
 	case 1:
 		/* status interrupt */
+		if (len < 3) {
+			dev_warn(&port->dev, "short interrupt message received\n");
+			break;
+		}
 		dev_dbg(&port->dev, "rx int, d1=%d, d2=%d\n", data[1], data[2]);
 		switch (data[1]) {
 		case 1: /* modemline change */
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 8856553..edbc81f 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -322,8 +322,12 @@
 			MCT_U232_GET_REQUEST_TYPE,
 			0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE,
 			WDR_TIMEOUT);
-	if (rc < 0) {
+	if (rc < MCT_U232_GET_MODEM_STAT_SIZE) {
 		dev_err(&port->dev, "Get MODEM STATus failed (error = %d)\n", rc);
+
+		if (rc >= 0)
+			rc = -EIO;
+
 		*msr = 0;
 	} else {
 		*msr = buf[0];
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index bd1a130..1d17779 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -188,22 +188,22 @@
 }
 
 
-static inline int qt2_getdevice(struct usb_device *dev, u8 *data)
-{
-	return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
-			       QT_SET_GET_DEVICE, 0xc0, 0, 0,
-			       data, 3, QT2_USB_TIMEOUT);
-}
-
 static inline int qt2_getregister(struct usb_device *dev,
 				  u8 uart,
 				  u8 reg,
 				  u8 *data)
 {
-	return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
-			       QT_SET_GET_REGISTER, 0xc0, reg,
-			       uart, data, sizeof(*data), QT2_USB_TIMEOUT);
+	int ret;
 
+	ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+			      QT_SET_GET_REGISTER, 0xc0, reg,
+			      uart, data, sizeof(*data), QT2_USB_TIMEOUT);
+	if (ret < sizeof(*data)) {
+		if (ret >= 0)
+			ret = -EIO;
+	}
+
+	return ret;
 }
 
 static inline int qt2_setregister(struct usb_device *dev,
@@ -372,9 +372,11 @@
 				 0xc0, 0,
 				 device_port, data, 2, QT2_USB_TIMEOUT);
 
-	if (status < 0) {
+	if (status < 2) {
 		dev_err(&port->dev, "%s - open port failed %i\n", __func__,
 			status);
+		if (status >= 0)
+			status = -EIO;
 		kfree(data);
 		return status;
 	}
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 70a098d..886e129 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -80,9 +80,17 @@
 
 static inline int ssu100_getdevice(struct usb_device *dev, u8 *data)
 {
-	return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
-			       QT_SET_GET_DEVICE, 0xc0, 0, 0,
-			       data, 3, 300);
+	int ret;
+
+	ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+			      QT_SET_GET_DEVICE, 0xc0, 0, 0,
+			      data, 3, 300);
+	if (ret < 3) {
+		if (ret >= 0)
+			ret = -EIO;
+	}
+
+	return ret;
 }
 
 static inline int ssu100_getregister(struct usb_device *dev,
@@ -90,10 +98,17 @@
 				     unsigned short reg,
 				     u8 *data)
 {
-	return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
-			       QT_SET_GET_REGISTER, 0xc0, reg,
-			       uart, data, sizeof(*data), 300);
+	int ret;
 
+	ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+			      QT_SET_GET_REGISTER, 0xc0, reg,
+			      uart, data, sizeof(*data), 300);
+	if (ret < sizeof(*data)) {
+		if (ret >= 0)
+			ret = -EIO;
+	}
+
+	return ret;
 }
 
 
@@ -289,8 +304,10 @@
 				 QT_OPEN_CLOSE_CHANNEL,
 				 QT_TRANSFER_IN, 0x01,
 				 0, data, 2, 300);
-	if (result < 0) {
+	if (result < 2) {
 		dev_dbg(&port->dev, "%s - open failed %i\n", __func__, result);
+		if (result >= 0)
+			result = -EIO;
 		kfree(data);
 		return result;
 	}
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index bdbddbc..6bcb874b 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1556,13 +1556,10 @@
 		(USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT),
 		value, moduleid, data, size, 1000);
 
-	if (status == size)
-		status = 0;
+	if (status < 0)
+		return status;
 
-	if (status > 0)
-		status = -ECOMM;
-
-	return status;
+	return 0;
 }
 
 
@@ -1578,8 +1575,7 @@
 
 	if (status == size)
 		status = 0;
-
-	if (status > 0)
+	else if (status >= 0)
 		status = -ECOMM;
 
 	return status;
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 9ecfcdc..d5dbdb9 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1314,6 +1314,9 @@
 	if (!VALID_EVTCHN(evtchn))
 		return -1;
 
+	if (!xen_support_evtchn_rebind())
+		return -1;
+
 	/* Send future instances of this interrupt to other vcpu. */
 	bind_vcpu.port = evtchn;
 	bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
@@ -1647,15 +1650,20 @@
 {
 	int rc;
 	uint64_t callback_via;
-
-	callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
-	rc = xen_set_callback_via(callback_via);
-	BUG_ON(rc);
-	pr_info("Xen HVM callback vector for event delivery is enabled\n");
-	/* in the restore case the vector has already been allocated */
-	if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
-		alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
-				xen_hvm_callback_vector);
+	if (xen_have_vector_callback) {
+		callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
+		rc = xen_set_callback_via(callback_via);
+		if (rc) {
+			pr_err("Request for Xen HVM callback vector failed\n");
+			xen_have_vector_callback = 0;
+			return;
+		}
+		pr_info("Xen HVM callback vector for event delivery is enabled\n");
+		/* in the restore case the vector has already been allocated */
+		if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
+			alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
+					xen_hvm_callback_vector);
+	}
 }
 #else
 void xen_callback_vector(void) {}
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index b59c9455..cf96666 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -42,6 +42,7 @@
 static unsigned long platform_mmio;
 static unsigned long platform_mmio_alloc;
 static unsigned long platform_mmiolen;
+static uint64_t callback_via;
 
 static unsigned long alloc_xen_mmio(unsigned long len)
 {
@@ -54,6 +55,51 @@
 	return addr;
 }
 
+static uint64_t get_callback_via(struct pci_dev *pdev)
+{
+	u8 pin;
+	int irq;
+
+	irq = pdev->irq;
+	if (irq < 16)
+		return irq; /* ISA IRQ */
+
+	pin = pdev->pin;
+
+	/* We don't know the GSI. Specify the PCI INTx line instead. */
+	return ((uint64_t)0x01 << 56) | /* PCI INTx identifier */
+		((uint64_t)pci_domain_nr(pdev->bus) << 32) |
+		((uint64_t)pdev->bus->number << 16) |
+		((uint64_t)(pdev->devfn & 0xff) << 8) |
+		((uint64_t)(pin - 1) & 3);
+}
+
+static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
+{
+	xen_hvm_evtchn_do_upcall();
+	return IRQ_HANDLED;
+}
+
+static int xen_allocate_irq(struct pci_dev *pdev)
+{
+	return request_irq(pdev->irq, do_hvm_evtchn_intr,
+			IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+			"xen-platform-pci", pdev);
+}
+
+static int platform_pci_resume(struct pci_dev *pdev)
+{
+	int err;
+	if (xen_have_vector_callback)
+		return 0;
+	err = xen_set_callback_via(callback_via);
+	if (err) {
+		dev_err(&pdev->dev, "platform_pci_resume failure!\n");
+		return err;
+	}
+	return 0;
+}
+
 static int platform_pci_probe(struct pci_dev *pdev,
 			      const struct pci_device_id *ent)
 {
@@ -92,6 +138,21 @@
 	platform_mmio = mmio_addr;
 	platform_mmiolen = mmio_len;
 
+	if (!xen_have_vector_callback) {
+		ret = xen_allocate_irq(pdev);
+		if (ret) {
+			dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
+			goto out;
+		}
+		callback_via = get_callback_via(pdev);
+		ret = xen_set_callback_via(callback_via);
+		if (ret) {
+			dev_warn(&pdev->dev, "Unable to set the evtchn callback "
+					 "err=%d\n", ret);
+			goto out;
+		}
+	}
+
 	max_nr_gframes = gnttab_max_grant_frames();
 	grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
 	ret = gnttab_setup_auto_xlat_frames(grant_frames);
@@ -123,6 +184,9 @@
 	.name =           DRV_NAME,
 	.probe =          platform_pci_probe,
 	.id_table =       platform_pci_tbl,
+#ifdef CONFIG_PM
+	.resume_early =   platform_pci_resume,
+#endif
 };
 
 static int __init platform_pci_init(void)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 092a2eed..9ad527f 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1165,7 +1165,6 @@
 
 	if (disk->fops->revalidate_disk)
 		ret = disk->fops->revalidate_disk(disk);
-	blk_integrity_revalidate(disk);
 	bdev = bdget_disk(disk, 0);
 	if (!bdev)
 		return ret;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 013c6a5..7e0c002 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1405,6 +1405,13 @@
 		return 1;
 	}
 
+	if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
+		f2fs_msg(sb, KERN_INFO,
+			"Invalid segment count (%u)",
+			le32_to_cpu(raw_super->segment_count));
+		return 1;
+	}
+
 	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
 	if (sanity_check_area_boundary(sbi, bh))
 		return 1;
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index afd9771..ae2b4ba 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -60,6 +60,14 @@
 	lower_dentry = lower_path.dentry;
 	lower_cur_parent_dentry = dget_parent(lower_dentry);
 
+	if ((lower_dentry->d_flags & DCACHE_OP_REVALIDATE)) {
+		err = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
+		if (err == 0) {
+			d_drop(dentry);
+			goto out;
+		}
+	}
+
 	spin_lock(&lower_dentry->d_lock);
 	if (d_unhashed(lower_dentry)) {
 		spin_unlock(&lower_dentry->d_lock);
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index 2964527..5a0ef38 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -215,16 +215,16 @@
 		gid = AID_MEDIA_OBB;
 		break;
 	case PERM_ANDROID_PACKAGE:
-		if (info->d_uid != 0)
+		if (uid_is_app(info->d_uid))
 			gid = multiuser_get_ext_gid(info->d_uid);
 		else
-			gid = multiuser_get_uid(info->userid, uid);
+			gid = multiuser_get_uid(info->userid, AID_MEDIA_RW);
 		break;
 	case PERM_ANDROID_PACKAGE_CACHE:
-		if (info->d_uid != 0)
+		if (uid_is_app(info->d_uid))
 			gid = multiuser_get_ext_cache_gid(info->d_uid);
 		else
-			gid = multiuser_get_uid(info->userid, uid);
+			gid = multiuser_get_uid(info->userid, AID_MEDIA_RW);
 		break;
 	case PERM_PRE_ROOT:
 	default:
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
index 1f6921e..6076c34 100644
--- a/fs/sdcardfs/file.c
+++ b/fs/sdcardfs/file.c
@@ -358,9 +358,12 @@
 	get_file(lower_file); /* prevent lower_file from being released */
 	iocb->ki_filp = lower_file;
 	err = lower_file->f_op->read_iter(iocb, iter);
-	/* ? wait IO finish to update atime as ecryptfs ? */
 	iocb->ki_filp = file;
 	fput(lower_file);
+	/* update upper inode atime as needed */
+	if (err >= 0 || err == -EIOCBQUEUED)
+		fsstack_copy_attr_atime(file->f_path.dentry->d_inode,
+					file_inode(lower_file));
 out:
 	return err;
 }
@@ -384,6 +387,13 @@
 	err = lower_file->f_op->write_iter(iocb, iter);
 	iocb->ki_filp = file;
 	fput(lower_file);
+	/* update upper inode times/sizes as needed */
+	if (err >= 0 || err == -EIOCBQUEUED) {
+		fsstack_copy_inode_size(file->f_path.dentry->d_inode,
+					file_inode(lower_file));
+		fsstack_copy_attr_times(file->f_path.dentry->d_inode,
+					file_inode(lower_file));
+	}
 out:
 	return err;
 }
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 92afceb..4d558b8 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -622,11 +622,8 @@
 	struct inode tmp;
 	struct inode *top = grab_top(SDCARDFS_I(inode));
 
-	if (!top) {
-		release_top(SDCARDFS_I(inode));
-		WARN(1, "Top value was null!\n");
+	if (!top)
 		return -EINVAL;
-	}
 
 	/*
 	 * Permission check on sdcardfs inode.
@@ -701,10 +698,8 @@
 	inode = d_inode(dentry);
 	top = grab_top(SDCARDFS_I(inode));
 
-	if (!top) {
-		release_top(SDCARDFS_I(inode));
+	if (!top)
 		return -EINVAL;
-	}
 
 	/*
 	 * Permission check on sdcardfs inode.
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 19154b7..706329d 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -366,19 +366,22 @@
 	/* instatiate a new negative dentry */
 	dname.name = name->name;
 	dname.len = name->len;
-	dname.hash = full_name_hash(lower_dir_dentry, dname.name, dname.len);
-	lower_dentry = d_lookup(lower_dir_dentry, &dname);
-	if (lower_dentry)
-		goto setup_lower;
 
-	lower_dentry = d_alloc(lower_dir_dentry, &dname);
+	/* See if the low-level filesystem might want
+	 * to use its own hash
+	 */
+	lower_dentry = d_hash_and_lookup(lower_dir_dentry, &dname);
+	if (IS_ERR(lower_dentry))
+		return lower_dentry;
 	if (!lower_dentry) {
-		err = -ENOMEM;
+		/* We called vfs_path_lookup earlier, and did not get a negative
+		 * dentry then. Don't confuse the lower filesystem by forcing
+		 * one on it now...
+		 */
+		err = -ENOENT;
 		goto out;
 	}
-	d_add(lower_dentry, NULL); /* instantiate and hash */
 
-setup_lower:
 	lower_path.dentry = lower_dentry;
 	lower_path.mnt = mntget(lower_dir_mnt);
 	sdcardfs_set_lower_path(dentry, &lower_path);
diff --git a/fs/sdcardfs/multiuser.h b/fs/sdcardfs/multiuser.h
index d0c925c..85341e7 100644
--- a/fs/sdcardfs/multiuser.h
+++ b/fs/sdcardfs/multiuser.h
@@ -35,6 +35,13 @@
 	return (user_id * AID_USER_OFFSET) + (app_id % AID_USER_OFFSET);
 }
 
+static inline bool uid_is_app(uid_t uid)
+{
+	appid_t appid = uid % AID_USER_OFFSET;
+
+	return appid >= AID_APP_START && appid <= AID_APP_END;
+}
+
 static inline gid_t multiuser_get_ext_cache_gid(uid_t uid)
 {
 	return uid - AID_APP_START + AID_EXT_CACHE_GID_START;
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
index a3393e9..8a9c9c7 100644
--- a/fs/sdcardfs/super.c
+++ b/fs/sdcardfs/super.c
@@ -192,9 +192,16 @@
 	return &i->vfs_inode;
 }
 
+static void i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+
+	kmem_cache_free(sdcardfs_inode_cachep, SDCARDFS_I(inode));
+}
+
 static void sdcardfs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(sdcardfs_inode_cachep, SDCARDFS_I(inode));
+	call_rcu(&inode->i_rcu, i_callback);
 }
 
 /* sdcardfs inode cache constructor */
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index e411e8e..a95d494 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -197,6 +197,7 @@
 #define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK			179
 #define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK				180
 #define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK				181
+#define GCC_GPU_IREF_CLK					182
 
 /* GCC reset clocks */
 #define GCC_GPU_BCR						0
diff --git a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
index 6243588..915ac08 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
@@ -63,37 +63,39 @@
 #define GCC_PCIE_AUX_CLK					45
 #define GCC_PCIE_AUX_PHY_CLK_SRC				46
 #define GCC_PCIE_CFG_AHB_CLK					47
-#define GCC_PCIE_MSTR_AXI_CLK					48
-#define GCC_PCIE_PHY_REFGEN_CLK					49
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC				50
-#define GCC_PCIE_PIPE_CLK					51
-#define GCC_PCIE_SLEEP_CLK					52
-#define GCC_PCIE_SLV_AXI_CLK					53
-#define GCC_PCIE_SLV_Q2A_AXI_CLK				54
-#define GCC_PDM2_CLK						55
-#define GCC_PDM2_CLK_SRC					56
-#define GCC_PDM_AHB_CLK						57
-#define GCC_PDM_XO4_CLK						58
-#define GCC_PRNG_AHB_CLK					59
-#define GCC_SDCC1_AHB_CLK					60
-#define GCC_SDCC1_APPS_CLK					61
-#define GCC_SDCC1_APPS_CLK_SRC					62
-#define GCC_SPMI_FETCHER_AHB_CLK				63
-#define GCC_SPMI_FETCHER_CLK					64
-#define GCC_SPMI_FETCHER_CLK_SRC				65
-#define GCC_SYS_NOC_CPUSS_AHB_CLK				66
-#define GCC_USB30_MASTER_CLK					67
-#define GCC_USB30_MASTER_CLK_SRC				68
-#define GCC_USB30_MOCK_UTMI_CLK					69
-#define GCC_USB30_MOCK_UTMI_CLK_SRC				70
-#define GCC_USB30_SLEEP_CLK					71
-#define GCC_USB3_PHY_AUX_CLK					72
-#define GCC_USB3_PHY_AUX_CLK_SRC				73
-#define GCC_USB3_PHY_PIPE_CLK					74
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK				75
-#define GCC_XO_DIV4_CLK						76
-#define GPLL0							77
-#define GPLL0_OUT_EVEN						78
+#define GCC_PCIE_0_CLKREF_EN					48
+#define GCC_PCIE_MSTR_AXI_CLK					49
+#define GCC_PCIE_PHY_REFGEN_CLK					50
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC				51
+#define GCC_PCIE_PIPE_CLK					52
+#define GCC_PCIE_SLEEP_CLK					53
+#define GCC_PCIE_SLV_AXI_CLK					54
+#define GCC_PCIE_SLV_Q2A_AXI_CLK				55
+#define GCC_PDM2_CLK						56
+#define GCC_PDM2_CLK_SRC					57
+#define GCC_PDM_AHB_CLK						58
+#define GCC_PDM_XO4_CLK						59
+#define GCC_PRNG_AHB_CLK					60
+#define GCC_SDCC1_AHB_CLK					61
+#define GCC_SDCC1_APPS_CLK					62
+#define GCC_SDCC1_APPS_CLK_SRC					63
+#define GCC_SPMI_FETCHER_AHB_CLK				64
+#define GCC_SPMI_FETCHER_CLK					65
+#define GCC_SPMI_FETCHER_CLK_SRC				66
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				67
+#define GCC_USB30_MASTER_CLK					68
+#define GCC_USB30_MASTER_CLK_SRC				69
+#define GCC_USB30_MOCK_UTMI_CLK					70
+#define GCC_USB30_MOCK_UTMI_CLK_SRC				71
+#define GCC_USB30_SLEEP_CLK					72
+#define GCC_USB3_PRIM_CLKREF_CLK				73
+#define GCC_USB3_PHY_AUX_CLK					74
+#define GCC_USB3_PHY_AUX_CLK_SRC				75
+#define GCC_USB3_PHY_PIPE_CLK					76
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK				77
+#define GCC_XO_DIV4_CLK						78
+#define GPLL0							79
+#define GPLL0_OUT_EVEN						80
 
 /* GDSCs */
 #define PCIE_GDSC						0
@@ -118,6 +120,9 @@
 #define GCC_SDCC1_BCR						12
 #define GCC_SPMI_FETCHER_BCR					13
 #define GCC_USB30_BCR						14
-#define GCC_USB_PHY_CFG_AHB2PHY_BCR				15
+#define GCC_USB3_PHY_BCR					15
+#define GCC_USB3PHY_PHY_BCR					16
+#define GCC_QUSB2PHY_BCR					17
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR				18
 
 #endif
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 1325b23..094b152 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -430,6 +430,16 @@
  */
 int clk_set_flags(struct clk *clk, unsigned long flags);
 
+/**
+ * clk_list_frequnecy - enumerate supported frequencies
+ * @clk: clock source
+ * @index: identify frequency to list
+ *
+ * Returns a non-negative integer frequency for success
+ * or negative errno in case of failure.
+ */
+unsigned long clk_list_frequency(struct clk *clk, unsigned int index);
+
 #else /* !CONFIG_HAVE_CLK */
 
 static inline struct clk *clk_get(struct device *dev, const char *id)
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 32c5890..7f7e9a7 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -61,6 +61,10 @@
 		enum dma_data_direction dir, unsigned long attrs);
 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
 		enum dma_data_direction dir, unsigned long attrs);
+dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+		size_t size, enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir, unsigned long attrs);
 int iommu_dma_supported(struct device *dev, u64 mask);
 int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h
index 7cacafb..8c463a8 100644
--- a/include/linux/extcon/extcon-gpio.h
+++ b/include/linux/extcon/extcon-gpio.h
@@ -33,6 +33,8 @@
  * @irq_flags:		IRQ Flags (e.g., IRQF_TRIGGER_LOW).
  * @check_on_resume:	Boolean describing whether to check the state of gpio
  *			while resuming from sleep.
+ * @pctrl:		GPIO pinctrl handle
+ * @pctrl_default:	GPIO pinctrl default state handle
  */
 struct gpio_extcon_pdata {
 	unsigned int extcon_id;
@@ -42,6 +44,9 @@
 	unsigned long irq_flags;
 
 	bool check_on_resume;
+
+	struct pinctrl          *pctrl;
+	struct pinctrl_state    *pins_default;
 };
 
 #endif /* __EXTCON_GPIO_H__ */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 422630b..e46e7d1 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -286,6 +286,12 @@
 #define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
 
 /*
+ * F2FS uses 4 bytes to represent block address. As a result, supported size of
+ * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments.
+ */
+#define F2FS_MAX_SEGMENT       ((16 * 1024 * 1024) / 2)
+
+/*
  * Note that f2fs_sit_entry->vblocks has the following bit-field information.
  * [15:10] : allocation type such as CURSEG_XXXX_TYPE
  * [9:0] : valid block count
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index e0341af..3c99fb6 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -731,11 +731,9 @@
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
 extern void blk_integrity_add(struct gendisk *);
 extern void blk_integrity_del(struct gendisk *);
-extern void blk_integrity_revalidate(struct gendisk *);
 #else	/* CONFIG_BLK_DEV_INTEGRITY */
 static inline void blk_integrity_add(struct gendisk *disk) { }
 static inline void blk_integrity_del(struct gendisk *disk) { }
-static inline void blk_integrity_revalidate(struct gendisk *disk) { }
 #endif	/* CONFIG_BLK_DEV_INTEGRITY */
 
 #else /* CONFIG_BLOCK */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index d3b4cf4..9523c60 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -91,12 +91,6 @@
  * @base:	pointer to the timer base (per cpu and per clock)
  * @state:	state information (See bit values above)
  * @is_rel:	Set if the timer was armed relative
- * @start_pid:  timer statistics field to store the pid of the task which
- *		started the timer
- * @start_site:	timer statistics field to store the site where the timer
- *		was started
- * @start_comm: timer statistics field to store the name of the process which
- *		started the timer
  *
  * The hrtimer structure must be initialized by hrtimer_init()
  */
@@ -107,11 +101,6 @@
 	struct hrtimer_clock_base	*base;
 	u8				state;
 	u8				is_rel;
-#ifdef CONFIG_TIMER_STATS
-	int				start_pid;
-	void				*start_site;
-	char				start_comm[16];
-#endif
 };
 
 /**
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index ecfc173..0e6a54c 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -18,6 +18,7 @@
 #include <linux/devfreq.h>
 #include <linux/fault-inject.h>
 #include <linux/blkdev.h>
+#include <linux/extcon.h>
 
 #include <linux/mmc/core.h>
 #include <linux/mmc/card.h>
@@ -594,6 +595,8 @@
 	 * actually disabling the clock from it's source.
 	 */
 	bool			card_clock_off;
+	struct extcon_dev	*extcon;
+	struct notifier_block card_detect_nb;
 
 #ifdef CONFIG_MMC_PERF_PROFILING
 	struct {
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 3945a8c..f2c8d13 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -29,5 +29,7 @@
 void mmc_gpio_set_cd_isr(struct mmc_host *host,
 			 irqreturn_t (*isr)(int irq, void *dev_id));
 void mmc_gpiod_request_cd_irq(struct mmc_host *host);
+void mmc_register_extcon(struct mmc_host *host);
+void mmc_unregister_extcon(struct mmc_host *host);
 
 #endif
diff --git a/include/linux/msm_gpi.h b/include/linux/msm_gpi.h
new file mode 100644
index 0000000..31eaf13
--- /dev/null
+++ b/include/linux/msm_gpi.h
@@ -0,0 +1,214 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_GPI_H_
+#define __MSM_GPI_H_
+
+struct __packed msm_gpi_tre {
+	u32 dword[4];
+};
+
+enum msm_gpi_tre_type {
+	MSM_GPI_TRE_INVALID = 0x00,
+	MSM_GPI_TRE_NOP = 0x01,
+	MSM_GPI_TRE_DMA_W_BUF = 0x10,
+	MSM_GPI_TRE_DMA_IMMEDIATE = 0x11,
+	MSM_GPI_TRE_DMA_W_SG_LIST = 0x12,
+	MSM_GPI_TRE_GO = 0x20,
+	MSM_GPI_TRE_CONFIG0 = 0x22,
+	MSM_GPI_TRE_CONFIG1 = 0x23,
+	MSM_GPI_TRE_CONFIG2 = 0x24,
+	MSM_GPI_TRE_CONFIG3 = 0x25,
+	MSM_GPI_TRE_LOCK = 0x30,
+	MSM_GPI_TRE_UNLOCK = 0x31,
+};
+
+#define MSM_GPI_TRE_TYPE(tre) ((tre->dword[3] >> 16) & 0xFF)
+
+/* DMA w. Buffer TRE */
+#define MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(ptr) ((u32)ptr)
+#define MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(ptr) ((u32)(ptr >> 32))
+#define MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(length) (length & 0xFFFFFF)
+#define MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(bei, ieot, ieob, ch) ((0x1 << 20) | \
+	(0x0 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+#define MSM_GPI_DMA_W_BUFFER_TRE_GET_LEN(tre) (tre->dword[2] & 0xFFFFFF)
+#define MSM_GPI_DMA_W_BUFFER_TRE_SET_LEN(tre, length) (tre->dword[2] = \
+	MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(length))
+
+/* DMA Immediate TRE */
+#define MSM_GPI_DMA_IMMEDIATE_TRE_DWORD0(d3, d2, d1, d0) ((d3 << 24) | \
+	(d2 << 16) | (d1 << 8) | (d0))
+#define MSM_GPI_DMA_IMMEDIATE_TRE_DWORD1(d4, d5, d6, d7) ((d7 << 24) | \
+	(d6 << 16) | (d5 << 8) | (d4))
+#define MSM_GPI_DMA_IMMEDIATE_TRE_DWORD2(length) (length & 0xF)
+#define MSM_GPI_DMA_IMMEDIATE_TRE_DWORD3(bei, ieot, ieob, ch) ((0x1 << 20) | \
+	(0x1 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+#define MSM_GPI_DMA_IMMEDIATE_TRE_GET_LEN(tre) (tre->dword[2] & 0xF)
+
+/* DMA w. Scatter/Gather List TRE */
+#define MSM_GPI_SG_LIST_TRE_DWORD0(ptr) ((u32)ptr)
+#define MSM_GPI_SG_LIST_TRE_DWORD1(ptr) ((u32)(ptr >> 32))
+#define MSM_GPI_SG_LIST_TRE_DWORD2(length) (length & 0xFFFF)
+#define MSM_GPI_SG_LIST_TRE_DWORD3(bei, ieot, ieob, ch) ((0x1 << 20) | \
+	(0x2 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* SG Element */
+#define MSM_GPI_SG_ELEMENT_DWORD0(ptr) ((u32)ptr)
+#define MSM_GPI_SG_ELEMENT_DWORD1(ptr) ((u32)(ptr >> 32))
+#define MSM_GSI_SG_ELEMENT_DWORD2(length) (length & 0xFFFFF)
+#define MSM_GSI_SG_ELEMENT_DWORD3 (0)
+
+/* Config2 TRE  */
+#define GPI_CONFIG2_TRE_DWORD0(gr, txp) ((gr << 20) | (txp))
+#define GPI_CONFIG2_TRE_DWORD1(txp) (txp)
+#define GPI_CONFIG2_TRE_DWORD2 (0)
+#define GPI_CONFIG2_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+	(0x4 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* Config3 TRE */
+#define GPI_CONFIG3_TRE_DWORD0(rxp) (rxp)
+#define GPI_CONFIG3_TRE_DWORD1(rxp) (rxp)
+#define GPI_CONFIG3_TRE_DWORD2 (0)
+#define GPI_CONFIG3_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+	(0x5 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* SPI Go TRE */
+#define MSM_GPI_SPI_GO_TRE_DWORD0(flags, cs, command) ((flags << 24) | \
+	(cs << 8) | command)
+#define MSM_GPI_SPI_GO_TRE_DWORD1 (0)
+#define MSM_GPI_SPI_GO_TRE_DWORD2(rx_len) (rx_len)
+#define MSM_GPI_SPI_GO_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+	(0x0 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* SPI Config0 TRE */
+#define MSM_GPI_SPI_CONFIG0_TRE_DWORD0(pack, flags, word_size) ((pack << 24) | \
+	(flags << 8) | word_size)
+#define MSM_GPI_SPI_CONFIG0_TRE_DWORD1(it_del, cs_clk_del, iw_del) \
+	((it_del << 16) | (cs_clk_del << 8) | iw_del)
+#define MSM_GPI_SPI_CONFIG0_TRE_DWORD2(clk_src, clk_div) ((clk_src << 16) | \
+	clk_div)
+#define MSM_GPI_SPI_CONFIG0_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+	(0x2 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* UART Go TRE */
+#define MSM_GPI_UART_GO_TRE_DWORD0(en_hunt, command) ((en_hunt << 8) | command)
+#define MSM_GPI_UART_GO_TRE_DWORD1 (0)
+#define MSM_GPI_UART_GO_TRE_DWORD2 (0)
+#define MSM_GPI_UART_GO_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+	(0x0 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* UART Config0 TRE */
+#define MSM_GPI_UART_CONFIG0_TRE_DWORD0(pack, hunt, flags, parity, sbl, size) \
+	((pack << 24) | (hunt << 16) | (flags << 8) | (parity << 5) | \
+	 (sbl << 3) | size)
+#define MSM_GPI_UART_CONFIG0_TRE_DWORD1(rfr_level, rx_stale) \
+	((rfr_level << 24) | rx_stale)
+#define MSM_GPI_UART_CONFIG0_TRE_DWORD2(clk_source, clk_div) \
+	((clk_source << 16) | clk_div)
+#define MSM_GPI_UART_CONFIG0_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+	(0x2 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* I2C GO TRE */
+#define MSM_GPI_I2C_GO_TRE_DWORD0(flags, slave, opcode) \
+	((flags << 24) | (slave << 8) | opcode)
+#define MSM_GPI_I2C_GO_TRE_DWORD1 (0)
+#define MSM_GPI_I2C_GO_TRE_DWORD2(rx_len) (rx_len)
+#define MSM_GPI_I2C_GO_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+	(0x0 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* I2C Config0 TRE */
+#define MSM_GPI_I2C_CONFIG0_TRE_DWORD0(pack, t_cycle, t_high, t_low) \
+	((pack << 24) | (t_cycle << 16) | (t_high << 8) | t_low)
+#define MSM_GPI_I2C_CONFIG0_TRE_DWORD1(inter_delay, noise_rej) \
+	((inter_delay << 16) | noise_rej)
+#define MSM_GPI_I2C_CONFIG0_TRE_DWORD2(clk_src, clk_div) \
+	((clk_src << 16) | clk_div)
+#define MSM_GPI_I2C_CONFIG0_TRE_DWORD3(bei, ieot, ieob, ch) ((0x2 << 20) | \
+	(0x2 << 16) | (bei << 10) | (ieot << 9) | (ieob << 8) | ch)
+
+/* cmds to perform by using dmaengine_slave_config() */
+enum msm_gpi_ctrl_cmd {
+	MSM_GPI_INIT,
+	MSM_GPI_CMD_UART_SW_STALE,
+	MSM_GPI_CMD_UART_RFR_READY,
+	MSM_GPI_CMD_UART_RFR_NOT_READY,
+};
+
+enum msm_gpi_cb_event {
+	/* These events are hardware generated events */
+	MSM_GPI_QUP_NOTIFY,
+	MSM_GPI_QUP_ERROR, /* global error */
+	MSM_GPI_QUP_CH_ERROR, /* channel specific error */
+	MSM_GPI_QUP_FW_ERROR, /* unhandled error */
+	/* These events indicate a software bug */
+	MSM_GPI_QUP_PENDING_EVENT,
+	MSM_GPI_QUP_EOT_DESC_MISMATCH,
+	MSM_GPI_QUP_SW_ERROR,
+	MSM_GPI_QUP_MAX_EVENT,
+};
+
+struct msm_gpi_error_log {
+	u32 routine;
+	u32 type;
+	u32 error_code;
+};
+
+struct msm_gpi_cb {
+	enum msm_gpi_cb_event cb_event;
+	u64 status;
+	u64 timestamp;
+	u64 count;
+	struct msm_gpi_error_log error_log;
+};
+
+struct gpi_client_info {
+	/*
+	 * memory for msm_gpi_cb is released after callback, clients shall
+	 * save any required data for post processing after returning
+	 * from callback
+	 */
+	void (*callback)(struct dma_chan *chan,
+			 struct msm_gpi_cb const *msm_gpi_cb,
+			 void *cb_param);
+	void *cb_param;
+};
+
+/*
+ * control structure to config gpi dma engine via dmaengine_slave_config()
+ * dma_chan.private should point to msm_gpi_ctrl structure
+ */
+struct msm_gpi_ctrl {
+	enum msm_gpi_ctrl_cmd cmd;
+	union {
+		struct gpi_client_info init;
+	};
+};
+
+enum msm_gpi_tce_code {
+	MSM_GPI_TCE_SUCCESS = 1,
+	MSM_GPI_TCE_EOT = 2,
+	MSM_GPI_TCE_EOB = 4,
+	MSM_GPI_TCE_UNEXP_ERR = 16,
+};
+
+/*
+ * gpi specific callback parameters to pass between gpi client and gpi engine.
+ * client shall set async_desc.callback_parm to msm_gpi_dma_async_tx_cb_param
+ */
+struct msm_gpi_dma_async_tx_cb_param {
+	u32 length;
+	enum msm_gpi_tce_code completion_code; /* TCE event code */
+	u32 status;
+	void *userdata;
+};
+
+#endif
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index f5d2f72..0c460a0 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -18,6 +18,7 @@
 	GSI_VER_1_0 = 1,
 	GSI_VER_1_2 = 2,
 	GSI_VER_1_3 = 3,
+	GSI_VER_2_0 = 4,
 	GSI_VER_MAX,
 };
 
diff --git a/include/linux/netfilter/xt_qtaguid.h b/include/linux/netfilter/xt_qtaguid.h
index ca60fbd..1c67155 100644
--- a/include/linux/netfilter/xt_qtaguid.h
+++ b/include/linux/netfilter/xt_qtaguid.h
@@ -10,4 +10,5 @@
 #define XT_QTAGUID_SOCKET XT_OWNER_SOCKET
 #define xt_qtaguid_match_info xt_owner_match_info
 
+int qtaguid_untag(struct socket *sock, bool kernel);
 #endif /* _XT_QTAGUID_MATCH_H */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 12b3d51e8..657ac07 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -14,18 +14,22 @@
 
 #ifndef _LINUX_QCOM_GENI_SE
 #define _LINUX_QCOM_GENI_SE
-#include <linux/io.h>
 #include <linux/clk.h>
+#include <linux/dma-direction.h>
+#include <linux/io.h>
+#include <linux/list.h>
 #include <linux/msm-bus.h>
 #include <linux/msm-bus-board.h>
-#include <linux/pm_runtime.h>
 
+/* Transfer mode supported by GENI Serial Engines */
 enum se_xfer_mode {
 	INVALID,
 	FIFO_MODE,
 	GSI_DMA,
+	SE_DMA,
 };
 
+/* Protocols supported by GENI Serial Engines */
 enum se_protocol_types {
 	NONE,
 	SPI,
@@ -34,13 +38,28 @@
 	I3C
 };
 
+/**
+ * struct geni_se_rsc - GENI Serial Engine Resource
+ * @wrapper_dev:	Pointer to the parent QUPv3 core.
+ * @se_clk:		Handle to the core serial engine clock.
+ * @m_ahb_clk:		Handle to the primary AHB clock.
+ * @s_ahb_clk:		Handle to the secondary AHB clock.
+ * @ab_list:		List Head of Average bus banwidth list.
+ * @ab:			Average bus bandwidth request value.
+ * @ib_list:		List Head of Instantaneous bus banwidth list.
+ * @ib:			Instantaneous bus bandwidth request value.
+ * @geni_pinctrl:	Handle to the pinctrl configuration.
+ * @geni_gpio_active:	Handle to the default/active pinctrl state.
+ * @geni_gpi_sleep:	Handle to the sleep pinctrl state.
+ */
 struct se_geni_rsc {
+	struct device *wrapper_dev;
 	struct clk *se_clk;
 	struct clk *m_ahb_clk;
 	struct clk *s_ahb_clk;
-	struct msm_bus_client_handle *bus_bw;
-	unsigned int bus_mas;
+	struct list_head ab_list;
 	unsigned long ab;
+	struct list_head ib_list;
 	unsigned long ib;
 	struct pinctrl *geni_pinctrl;
 	struct pinctrl_state *geni_gpio_active;
@@ -64,6 +83,7 @@
 #define GENI_FW_REVISION_RO		(0x68)
 #define GENI_FW_S_REVISION_RO		(0x6C)
 #define SE_GENI_CLK_SEL			(0x7C)
+#define SE_GENI_BYTE_GRAN		(0x254)
 #define SE_GENI_DMA_MODE_EN		(0x258)
 #define SE_GENI_TX_PACKING_CFG0		(0x260)
 #define SE_GENI_TX_PACKING_CFG1		(0x264)
@@ -182,11 +202,11 @@
 #define M_TX_FIFO_WR_ERR_EN	(BIT(29))
 #define M_TX_FIFO_WATERMARK_EN	(BIT(30))
 #define M_SEC_IRQ_EN		(BIT(31))
-#define M_COMMON_GENI_M_IRQ_EN	(GENMASK(3, 0) |  M_TIMESTAMP_EN | \
-				GENMASK(14, 8) | M_IO_DATA_DEASSERT_EN | \
+#define M_COMMON_GENI_M_IRQ_EN	(GENMASK(6, 1) | \
+				M_IO_DATA_DEASSERT_EN | \
 				M_IO_DATA_ASSERT_EN | M_RX_FIFO_RD_ERR_EN | \
 				M_RX_FIFO_WR_ERR_EN | M_TX_FIFO_RD_ERR_EN | \
-				M_TX_FIFO_WR_ERR_EN | M_SEC_IRQ_EN)
+				M_TX_FIFO_WR_ERR_EN)
 
 /* GENI_S_IRQ_EN fields */
 #define S_CMD_DONE_EN		(BIT(0))
@@ -208,7 +228,7 @@
 #define S_RX_FIFO_WR_ERR_EN	(BIT(25))
 #define S_RX_FIFO_WATERMARK_EN	(BIT(26))
 #define S_RX_FIFO_LAST_EN	(BIT(27))
-#define S_COMMON_GENI_S_IRQ_EN	(GENMASK(3, 0) | GENMASK(14, 8) | \
+#define S_COMMON_GENI_S_IRQ_EN	(GENMASK(5, 1) | GENMASK(13, 9) | \
 				 S_RX_FIFO_RD_ERR_EN | S_RX_FIFO_WR_ERR_EN)
 
 /*  GENI_/TX/RX/RX_RFR/_WATERMARK_REG fields */
@@ -261,304 +281,557 @@
 #define RX_DMA_IRQ_DELAY_MSK	(GENMASK(8, 6))
 #define RX_DMA_IRQ_DELAY_SHFT	(6)
 
+#define SE_DMA_TX_PTR_L		(0xC30)
+#define SE_DMA_TX_PTR_H		(0xC34)
+#define SE_DMA_TX_ATTR		(0xC38)
+#define SE_DMA_TX_LEN		(0xC3C)
+#define SE_DMA_TX_IRQ_STAT	(0xC40)
+#define SE_DMA_TX_IRQ_CLR	(0xC44)
+#define SE_DMA_TX_IRQ_EN	(0xC48)
+#define SE_DMA_TX_IRQ_EN_SET	(0xC4C)
+#define SE_DMA_TX_IRQ_EN_CLR	(0xC50)
+#define SE_DMA_TX_LEN_IN	(0xC54)
+#define SE_DMA_TX_FSM_RST	(0xC58)
+#define SE_DMA_TX_MAX_BURST	(0xC5C)
+
+#define SE_DMA_RX_PTR_L		(0xD30)
+#define SE_DMA_RX_PTR_H		(0xD34)
+#define SE_DMA_RX_ATTR		(0xD38)
+#define SE_DMA_RX_LEN		(0xD3C)
+#define SE_DMA_RX_IRQ_STAT	(0xD40)
+#define SE_DMA_RX_IRQ_CLR	(0xD44)
+#define SE_DMA_RX_IRQ_EN	(0xD48)
+#define SE_DMA_RX_IRQ_EN_SET	(0xD4C)
+#define SE_DMA_RX_IRQ_EN_CLR	(0xD50)
+#define SE_DMA_RX_LEN_IN	(0xD54)
+#define SE_DMA_RX_FSM_RST	(0xD58)
+#define SE_DMA_RX_MAX_BURST	(0xD5C)
+#define SE_DMA_RX_FLUSH		(0xD60)
+
+#define DEFAULT_BUS_WIDTH	(4)
+#define DEFAULT_SE_CLK		(19200000)
+
+#define GENI_SE_ERR(log_ctx, print, dev, x...) do { \
+if (log_ctx) \
+	ipc_log_string(log_ctx, x); \
+if (print) { \
+	if (dev) \
+		dev_err((dev), x); \
+	else \
+		pr_err(x); \
+} \
+} while (0)
+
+#define GENI_SE_DBG(log_ctx, print, dev, x...) do { \
+if (log_ctx) \
+	ipc_log_string(log_ctx, x); \
+if (print) { \
+	if (dev) \
+		dev_dbg((dev), x); \
+	else \
+		pr_debug(x); \
+} \
+} while (0)
+
+
+#ifdef CONFIG_QCOM_GENI_SE
+/**
+ * geni_read_reg_nolog() - Helper function to read from a GENI register
+ * @base:	Base address of the serial engine's register block.
+ * @offset:	Offset within the serial engine's register block.
+ *
+ * Return:	Return the contents of the register.
+ */
+unsigned int geni_read_reg_nolog(void __iomem *base, int offset);
+
+/**
+ * geni_write_reg_nolog() - Helper function to write into a GENI register
+ * @value:	Value to be written into the register.
+ * @base:	Base address of the serial engine's register block.
+ * @offset:	Offset within the serial engine's register block.
+ */
+void geni_write_reg_nolog(unsigned int value, void __iomem *base, int offset);
+
+/**
+ * geni_read_reg() - Helper function to read from a GENI register
+ * @base:	Base address of the serial engine's register block.
+ * @offset:	Offset within the serial engine's register block.
+ *
+ * Return:	Return the contents of the register.
+ */
+unsigned int geni_read_reg(void __iomem *base, int offset);
+
+/**
+ * geni_write_reg() - Helper function to write into a GENI register
+ * @value:	Value to be written into the register.
+ * @base:	Base address of the serial engine's register block.
+ * @offset:	Offset within the serial engine's register block.
+ */
+void geni_write_reg(unsigned int value, void __iomem *base, int offset);
+
+/**
+ * get_se_proto() - Read the protocol configured for a serial engine
+ * @base:	Base address of the serial engine's register block.
+ *
+ * Return:	Protocol value as configured in the serial engine.
+ */
+int get_se_proto(void __iomem *base);
+
+/**
+ * geni_se_init() - Initialize the GENI Serial Engine
+ * @base:	Base address of the serial engine's register block.
+ * @rx_wm:	Receive watermark to be configured.
+ * @rx_rfr_wm:	Ready-for-receive watermark to be configured.
+ *
+ * This function is used to initialize the GENI serial engine, configure
+ * the transfer mode, receive watermark and ready-for-receive watermarks.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_init(void __iomem *base, unsigned int rx_wm, unsigned int rx_rfr);
+
+/**
+ * geni_se_select_mode() - Select the serial engine transfer mode
+ * @base:	Base address of the serial engine's register block.
+ * @mode:	Transfer mode to be selected.
+ *
+ * Return:	0 on success, standard Linux error codes on failure.
+ */
+int geni_se_select_mode(void __iomem *base, int mode);
+
+/**
+ * geni_setup_m_cmd() - Setup the primary sequencer
+ * @base:	Base address of the serial engine's register block.
+ * @cmd:	Command/Operation to setup in the primary sequencer.
+ * @params:	Parameter for the sequencer command.
+ *
+ * This function is used to configure the primary sequencer with the
+ * command and its assoicated parameters.
+ */
+void geni_setup_m_cmd(void __iomem *base, u32 cmd, u32 params);
+
+/**
+ * geni_setup_s_cmd() - Setup the secondary sequencer
+ * @base:	Base address of the serial engine's register block.
+ * @cmd:	Command/Operation to setup in the secondary sequencer.
+ * @params:	Parameter for the sequencer command.
+ *
+ * This function is used to configure the secondary sequencer with the
+ * command and its assoicated parameters.
+ */
+void geni_setup_s_cmd(void __iomem *base, u32 cmd, u32 params);
+
+/**
+ * geni_cancel_m_cmd() - Cancel the command configured in the primary sequencer
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to cancel the currently configured command in the
+ * primary sequencer.
+ */
+void geni_cancel_m_cmd(void __iomem *base);
+
+/**
+ * geni_cancel_s_cmd() - Cancel the command configured in the secondary
+ *			 sequencer
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to cancel the currently configured command in the
+ * secondary sequencer.
+ */
+void geni_cancel_s_cmd(void __iomem *base);
+
+/**
+ * geni_abort_m_cmd() - Abort the command configured in the primary sequencer
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to force abort the currently configured command in the
+ * primary sequencer.
+ */
+void geni_abort_m_cmd(void __iomem *base);
+
+/**
+ * geni_abort_s_cmd() - Abort the command configured in the secondary
+ *			 sequencer
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to force abort the currently configured command in the
+ * secondary sequencer.
+ */
+void geni_abort_s_cmd(void __iomem *base);
+
+/**
+ * get_tx_fifo_depth() - Get the TX fifo depth of the serial engine
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to get the depth i.e. number of elements in the
+ * TX fifo of the serial engine.
+ *
+ * Return:	TX fifo depth in units of FIFO words.
+ */
+int get_tx_fifo_depth(void __iomem *base);
+
+/**
+ * get_tx_fifo_width() - Get the TX fifo width of the serial engine
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to get the width i.e. word size per element in the
+ * TX fifo of the serial engine.
+ *
+ * Return:	TX fifo width in bits.
+ */
+int get_tx_fifo_width(void __iomem *base);
+
+/**
+ * get_rx_fifo_depth() - Get the RX fifo depth of the serial engine
+ * @base:	Base address of the serial engine's register block.
+ *
+ * This function is used to get the depth i.e. number of elements in the
+ * RX fifo of the serial engine.
+ *
+ * Return:	RX fifo depth in units of FIFO words.
+ */
+int get_rx_fifo_depth(void __iomem *base);
+
+/**
+ * se_get_packing_config() - Get the packing configuration based on input
+ * @bpw:	Bits of data per transfer word.
+ * @pack_words:	Number of words per fifo element.
+ * @msb_to_lsb:	Transfer from MSB to LSB or vice-versa.
+ * @cfg0:	Output buffer to hold the first half of configuration.
+ * @cfg1:	Output buffer to hold the second half of configuration.
+ *
+ * This function is used to calculate the packing configuration based on
+ * the input packing requirement and the configuration logic.
+ */
+void se_get_packing_config(int bpw, int pack_words, bool msb_to_lsb,
+			   unsigned long *cfg0, unsigned long *cfg1);
+
+/**
+ * se_config_packing() - Packing configuration of the serial engine
+ * @base:	Base address of the serial engine's register block.
+ * @bpw:	Bits of data per transfer word.
+ * @pack_words:	Number of words per fifo element.
+ * @msb_to_lsb:	Transfer from MSB to LSB or vice-versa.
+ *
+ * This function is used to configure the packing rules for the current
+ * transfer.
+ */
+void se_config_packing(void __iomem *base, int bpw, int pack_words,
+		       bool msb_to_lsb);
+
+/**
+ * se_geni_resources_off() - Turn off resources associated with the serial
+ *                           engine
+ * @rsc:	Handle to resources associated with the serial engine.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_resources_off(struct se_geni_rsc *rsc);
+
+/**
+ * se_geni_resources_on() - Turn on resources associated with the serial
+ *                           engine
+ * @rsc:	Handle to resources associated with the serial engine.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_resources_on(struct se_geni_rsc *rsc);
+
+/**
+ * geni_se_resources_init() - Init the SE resource structure
+ * @rsc:	SE resource structure to be initialized.
+ * @ab:		Initial Average bus bandwidth request value.
+ * @ib:		Initial Instantaneous bus bandwidth request value.
+ *
+ * Return:	0 on success, standard Linux error codes on failure.
+ */
+int geni_se_resources_init(struct se_geni_rsc *rsc,
+			   unsigned long ab, unsigned long ib);
+
+/**
+ * geni_se_tx_dma_prep() - Prepare the Serial Engine for TX DMA transfer
+ * @wrapper_dev:	QUPv3 Wrapper Device to which the TX buffer is mapped.
+ * @base:		Base address of the SE register block.
+ * @tx_buf:		Pointer to the TX buffer.
+ * @tx_len:		Length of the TX buffer.
+ * @tx_dma:		Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA TX.
+ *
+ * Return:	0 on success, standard Linux error codes on error/failure.
+ */
+int geni_se_tx_dma_prep(struct device *wrapper_dev, void __iomem *base,
+			void *tx_buf, int tx_len, dma_addr_t *tx_dma);
+
+/**
+ * geni_se_rx_dma_prep() - Prepare the Serial Engine for RX DMA transfer
+ * @wrapper_dev:	QUPv3 Wrapper Device to which the TX buffer is mapped.
+ * @base:		Base address of the SE register block.
+ * @rx_buf:		Pointer to the RX buffer.
+ * @rx_len:		Length of the RX buffer.
+ * @rx_dma:		Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA RX.
+ *
+ * Return:	0 on success, standard Linux error codes on error/failure.
+ */
+int geni_se_rx_dma_prep(struct device *wrapper_dev, void __iomem *base,
+			void *rx_buf, int rx_len, dma_addr_t *rx_dma);
+
+/**
+ * geni_se_tx_dma_unprep() - Unprepare the Serial Engine after TX DMA transfer
+ * @wrapper_dev:	QUPv3 Wrapper Device to which the TX buffer is mapped.
+ * @tx_dma:		DMA address of the TX buffer.
+ * @tx_len:		Length of the TX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA TX.
+ */
+void geni_se_tx_dma_unprep(struct device *wrapper_dev,
+			   dma_addr_t tx_dma, int tx_len);
+
+/**
+ * geni_se_rx_dma_unprep() - Unprepare the Serial Engine after RX DMA transfer
+ * @wrapper_dev:	QUPv3 Wrapper Device to which the TX buffer is mapped.
+ * @rx_dma:		DMA address of the RX buffer.
+ * @rx_len:		Length of the RX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA RX.
+ */
+void geni_se_rx_dma_unprep(struct device *wrapper_dev,
+			   dma_addr_t rx_dma, int rx_len);
+
+/**
+ * geni_se_qupv3_hw_version() - Read the QUPv3 Hardware version
+ * @wrapper_dev:	Pointer to the corresponding QUPv3 wrapper core.
+ * @major:		Buffer for Major Version field.
+ * @minor:		Buffer for Minor Version field.
+ * @step:		Buffer for Step Version field.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_qupv3_hw_version(struct device *wrapper_dev, unsigned int *major,
+			     unsigned int *minor, unsigned int *step);
+
+/**
+ * geni_se_iommu_map_buf() - Map a single buffer into QUPv3 context bank
+ * @wrapper_dev:	Pointer to the corresponding QUPv3 wrapper core.
+ * @iova:		Pointer in which the mapped virtual address is stored.
+ * @buf:		Address of the buffer that needs to be mapped.
+ * @size:		Size of the buffer.
+ * @dir:		Direction of the DMA transfer.
+ *
+ * This function is used to map an already allocated buffer into the
+ * QUPv3 context bank device space.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_iommu_map_buf(struct device *wrapper_dev, dma_addr_t *iova,
+			  void *buf, size_t size, enum dma_data_direction dir);
+
+/**
+ * geni_se_iommu_alloc_buf() - Allocate & map a single buffer into QUPv3
+ *			       context bank
+ * @wrapper_dev:	Pointer to the corresponding QUPv3 wrapper core.
+ * @iova:		Pointer in which the mapped virtual address is stored.
+ * @size:		Size of the buffer.
+ *
+ * This function is used to allocate a buffer and map it into the
+ * QUPv3 context bank device space.
+ *
+ * Return:	address of the buffer on success, NULL or ERR_PTR on
+ *		failure/error.
+ */
+void *geni_se_iommu_alloc_buf(struct device *wrapper_dev, dma_addr_t *iova,
+			      size_t size);
+
+/**
+ * geni_se_iommu_unmap_buf() - Unmap a single buffer from QUPv3 context bank
+ * @wrapper_dev:	Pointer to the corresponding QUPv3 wrapper core.
+ * @iova:		Pointer in which the mapped virtual address is stored.
+ * @size:		Size of the buffer.
+ * @dir:		Direction of the DMA transfer.
+ *
+ * This function is used to unmap an already mapped buffer from the
+ * QUPv3 context bank device space.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_iommu_unmap_buf(struct device *wrapper_dev, dma_addr_t *iova,
+			    size_t size, enum dma_data_direction dir);
+
+/**
+ * geni_se_iommu_free_buf() - Unmap & free a single buffer from QUPv3
+ *			      context bank
+ * @wrapper_dev:	Pointer to the corresponding QUPv3 wrapper core.
+ * @iova:	Pointer in which the mapped virtual address is stored.
+ * @buf:	Address of the buffer.
+ * @size:	Size of the buffer.
+ *
+ * This function is used to unmap and free a buffer from the
+ * QUPv3 context bank device space.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_iommu_free_buf(struct device *wrapper_dev, dma_addr_t *iova,
+			   void *buf, size_t size);
+
+#else
 static inline unsigned int geni_read_reg_nolog(void __iomem *base, int offset)
 {
-	return readl_relaxed_no_log(base + offset);
+	return 0;
 }
 
-static inline void geni_write_reg_nolog(unsigned int value, void __iomem *base,
-				int offset)
+static inline void geni_write_reg_nolog(unsigned int value,
+					void __iomem *base, int offset)
 {
-	return writel_relaxed_no_log(value, (base + offset));
 }
 
 static inline unsigned int geni_read_reg(void __iomem *base, int offset)
 {
-	return readl_relaxed(base + offset);
+	return 0;
 }
 
 static inline void geni_write_reg(unsigned int value, void __iomem *base,
 				int offset)
 {
-	writel_relaxed(value, (base + offset));
 }
 
 static inline int get_se_proto(void __iomem *base)
 {
-	int proto = 0;
-
-	proto = ((geni_read_reg(base, GENI_FW_REVISION_RO)
-			& FW_REV_PROTOCOL_MSK) >> FW_REV_PROTOCOL_SHFT);
-	return proto;
+	return -ENXIO;
 }
 
-static inline int se_geni_irq_en(void __iomem *base, int mode)
-{
-	int ret = 0;
-	unsigned int common_geni_m_irq_en;
-	unsigned int common_geni_s_irq_en;
-	int proto = get_se_proto(base);
-
-	common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
-	common_geni_s_irq_en = geni_read_reg(base, SE_GENI_S_IRQ_EN);
-	/* Common to all modes */
-	common_geni_m_irq_en |= M_COMMON_GENI_M_IRQ_EN;
-	common_geni_s_irq_en |= S_COMMON_GENI_S_IRQ_EN;
-
-	switch (mode) {
-	case FIFO_MODE:
-	{
-		if (proto != UART) {
-			common_geni_m_irq_en |=
-				(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN |
-				M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
-			common_geni_s_irq_en |= S_CMD_DONE_EN;
-		}
-		break;
-	}
-	case GSI_DMA:
-		break;
-	default:
-		pr_err("%s: Invalid mode %d\n", __func__, mode);
-		ret = -ENXIO;
-		goto exit_irq_en;
-	}
-
-
-	geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN);
-	geni_write_reg(common_geni_s_irq_en, base, SE_GENI_S_IRQ_EN);
-exit_irq_en:
-	return ret;
-}
-
-
-static inline void se_set_rx_rfr_wm(void __iomem *base, unsigned int rx_wm,
-						unsigned int rx_rfr)
-{
-	geni_write_reg(rx_wm, base, SE_GENI_RX_WATERMARK_REG);
-	geni_write_reg(rx_rfr, base, SE_GENI_RX_RFR_WATERMARK_REG);
-}
-
-static inline int se_io_set_mode(void __iomem *base, int mode)
-{
-	int ret = 0;
-	unsigned int io_mode = 0;
-	unsigned int geni_dma_mode = 0;
-	unsigned int gsi_event_en = 0;
-
-	io_mode = geni_read_reg(base, SE_IRQ_EN);
-	geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
-	gsi_event_en = geni_read_reg(base, SE_GSI_EVENT_EN);
-
-	switch (mode) {
-	case FIFO_MODE:
-	{
-		io_mode |= (GENI_M_IRQ_EN | GENI_S_IRQ_EN);
-		io_mode |= (DMA_TX_IRQ_EN | DMA_RX_IRQ_EN);
-		geni_dma_mode &= ~GENI_DMA_MODE_EN;
-		gsi_event_en = 0;
-		break;
-
-	}
-	case GSI_DMA:
-		geni_dma_mode |= GENI_DMA_MODE_EN;
-		io_mode &= ~(DMA_TX_IRQ_EN | DMA_RX_IRQ_EN);
-		gsi_event_en |= (DMA_RX_EVENT_EN | DMA_TX_EVENT_EN |
-					GENI_M_EVENT_EN | GENI_S_EVENT_EN);
-		break;
-	default:
-		ret = -ENXIO;
-		goto exit_set_mode;
-	}
-	geni_write_reg(io_mode, base, SE_IRQ_EN);
-	geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
-	geni_write_reg(gsi_event_en, base, SE_GSI_EVENT_EN);
-exit_set_mode:
-	return ret;
-}
-
-static inline void se_io_init(void __iomem *base)
-{
-	unsigned int io_op_ctrl = 0;
-	unsigned int geni_cgc_ctrl;
-	unsigned int dma_general_cfg;
-
-	geni_cgc_ctrl = geni_read_reg(base, GENI_CGC_CTRL);
-	dma_general_cfg = geni_read_reg(base, SE_DMA_GENERAL_CFG);
-	geni_cgc_ctrl |= DEFAULT_CGC_EN;
-	dma_general_cfg |= (AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON |
-			DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON);
-	io_op_ctrl |= DEFAULT_IO_OUTPUT_CTRL_MSK;
-	geni_write_reg(geni_cgc_ctrl, base, GENI_CGC_CTRL);
-	geni_write_reg(dma_general_cfg, base, SE_DMA_GENERAL_CFG);
-
-	geni_write_reg(io_op_ctrl, base, GENI_OUTPUT_CTRL);
-	geni_write_reg(FORCE_DEFAULT, base, GENI_FORCE_DEFAULT_REG);
-}
-
-static inline int geni_se_init(void __iomem *base, int mode,
+static inline int geni_se_init(void __iomem *base,
 		unsigned int rx_wm, unsigned int rx_rfr)
 {
-	int ret = 0;
+	return -ENXIO;
+}
 
-	se_io_init(base);
-	ret = se_io_set_mode(base, mode);
-	if (ret)
-		goto exit_geni_se_init;
-
-	se_set_rx_rfr_wm(base, rx_wm, rx_rfr);
-	ret = se_geni_irq_en(base, mode);
-	if (ret)
-		goto exit_geni_se_init;
-
-exit_geni_se_init:
-	return ret;
+static inline int geni_se_select_mode(void __iomem *base, int mode)
+{
+	return -ENXIO;
 }
 
 static inline void geni_setup_m_cmd(void __iomem *base, u32 cmd,
 								u32 params)
 {
-	u32 m_cmd = geni_read_reg(base, SE_GENI_M_CMD0);
-
-	m_cmd &= ~(M_OPCODE_MSK | M_PARAMS_MSK);
-	m_cmd |= (cmd << M_OPCODE_SHFT);
-	m_cmd |= (params & M_PARAMS_MSK);
-	geni_write_reg(m_cmd, base, SE_GENI_M_CMD0);
 }
 
 static inline void geni_setup_s_cmd(void __iomem *base, u32 cmd,
 								u32 params)
 {
-	u32 s_cmd = geni_read_reg(base, SE_GENI_S_CMD0);
-
-	s_cmd &= ~(S_OPCODE_MSK | S_PARAMS_MSK);
-	s_cmd |= (cmd << S_OPCODE_SHFT);
-	s_cmd |= (params & S_PARAMS_MSK);
-	geni_write_reg(s_cmd, base, SE_GENI_S_CMD0);
 }
 
 static inline void geni_cancel_m_cmd(void __iomem *base)
 {
-	geni_write_reg(M_GENI_CMD_CANCEL, base, SE_GENI_S_CMD_CTRL_REG);
 }
 
 static inline void geni_cancel_s_cmd(void __iomem *base)
 {
-	geni_write_reg(S_GENI_CMD_CANCEL, base, SE_GENI_S_CMD_CTRL_REG);
 }
 
 static inline void geni_abort_m_cmd(void __iomem *base)
 {
-	geni_write_reg(M_GENI_CMD_ABORT, base, SE_GENI_M_CMD_CTRL_REG);
 }
 
 static inline void geni_abort_s_cmd(void __iomem *base)
 {
-	geni_write_reg(S_GENI_CMD_ABORT, base, SE_GENI_S_CMD_CTRL_REG);
 }
 
 static inline int get_tx_fifo_depth(void __iomem *base)
 {
-	int tx_fifo_depth;
-
-	tx_fifo_depth = ((geni_read_reg(base, SE_HW_PARAM_0)
-			& TX_FIFO_DEPTH_MSK) >> TX_FIFO_DEPTH_SHFT);
-	return tx_fifo_depth;
+	return -ENXIO;
 }
 
 static inline int get_tx_fifo_width(void __iomem *base)
 {
-	int tx_fifo_width;
-
-	tx_fifo_width = ((geni_read_reg(base, SE_HW_PARAM_0)
-			& TX_FIFO_WIDTH_MSK) >> TX_FIFO_WIDTH_SHFT);
-	return tx_fifo_width;
+	return -ENXIO;
 }
 
 static inline int get_rx_fifo_depth(void __iomem *base)
 {
-	int rx_fifo_depth;
-
-	rx_fifo_depth = ((geni_read_reg(base, SE_HW_PARAM_1)
-			& RX_FIFO_DEPTH_MSK) >> RX_FIFO_DEPTH_SHFT);
-	return rx_fifo_depth;
+	return -ENXIO;
 }
 
 static inline void se_get_packing_config(int bpw, int pack_words,
 					bool msb_to_lsb, unsigned long *cfg0,
 					unsigned long *cfg1)
 {
-	u32 cfg[4] = {0};
-	int len = ((bpw < 8) ? (bpw - 1) : 7);
-	int idx = ((msb_to_lsb == 1) ? len : 0);
-	int iter = (bpw * pack_words) >> 3;
-	int i;
-
-	for (i = 0; i < iter; i++) {
-		cfg[i] = ((idx << 5) | (msb_to_lsb << 4) | (len << 1));
-		idx += (len + 1);
-		if (i == iter - 1)
-			cfg[i] |= 1;
-	}
-	*cfg0 = cfg[0] | (cfg[1] << 10);
-	*cfg1 = cfg[2] | (cfg[3] << 10);
 }
 
 static inline void se_config_packing(void __iomem *base, int bpw,
 				int pack_words, bool msb_to_lsb)
 {
-	unsigned long cfg0, cfg1;
-
-	se_get_packing_config(bpw, pack_words, msb_to_lsb, &cfg0, &cfg1);
-	geni_write_reg(cfg0, base, SE_GENI_TX_PACKING_CFG0);
-	geni_write_reg(cfg1, base, SE_GENI_TX_PACKING_CFG1);
-	geni_write_reg(cfg0, base, SE_GENI_RX_PACKING_CFG0);
-	geni_write_reg(cfg1, base, SE_GENI_RX_PACKING_CFG1);
-}
-
-/*
- * Power/Resource Management functions
- */
-
-static inline int se_geni_clks_off(struct se_geni_rsc *rsc)
-{
-	int ret = 0;
-
-	clk_disable_unprepare(rsc->se_clk);
-	clk_disable_unprepare(rsc->m_ahb_clk);
-	clk_disable_unprepare(rsc->s_ahb_clk);
-	return ret;
-}
-
-static inline int se_geni_resources_off(struct se_geni_rsc *rsc)
-{
-	int ret = 0;
-
-	ret = pinctrl_select_state(rsc->geni_pinctrl, rsc->geni_gpio_sleep);
-	se_geni_clks_off(rsc);
-	if (rsc->bus_bw)
-		msm_bus_scale_update_bw(rsc->bus_bw, 0, 0);
-	return ret;
-}
-
-static inline int se_geni_clks_on(struct se_geni_rsc *rsc)
-{
-	int ret = 0;
-
-	clk_prepare_enable(rsc->se_clk);
-	clk_prepare_enable(rsc->m_ahb_clk);
-	clk_prepare_enable(rsc->s_ahb_clk);
-	return ret;
 }
 
 static inline int se_geni_resources_on(struct se_geni_rsc *rsc)
 {
-	int ret = 0;
-
-	if (rsc->bus_bw)
-		msm_bus_scale_update_bw(rsc->bus_bw, rsc->ab, rsc->ib);
-	se_geni_clks_on(rsc);
-	ret = pinctrl_select_state(rsc->geni_pinctrl, rsc->geni_gpio_active);
-	return ret;
+	return -ENXIO;
 }
+
+static inline int se_geni_resources_off(struct se_geni_rsc *rsc)
+{
+	return -ENXIO;
+}
+
+static inline int geni_se_resources_init(struct se_geni_rsc *rsc,
+					 unsigned long ab, unsigned long ib)
+{
+	return -ENXIO;
+}
+
+static inline int geni_se_tx_dma_prep(struct device *wrapper_dev,
+	void __iomem *base, void *tx_buf, int tx_len, dma_addr_t *tx_dma)
+{
+	return -ENXIO;
+}
+
+static inline int geni_se_rx_dma_prep(struct device *wrapper_dev,
+	void __iomem *base, void *rx_buf, int rx_len, dma_addr_t *rx_dma)
+{
+	return -ENXIO;
+}
+
+static inline void geni_se_tx_dma_unprep(struct device *wrapper_dev,
+					dma_addr_t tx_dma, int tx_len)
+{
+}
+
+static inline void geni_se_rx_dma_unprep(struct device *wrapper_dev,
+					dma_addr_t rx_dma, int rx_len)
+{
+}
+
+static inline int geni_se_qupv3_hw_version(struct device *wrapper_dev,
+		unsigned int *major, unsigned int *minor, unsigned int *step)
+{
+	return -ENXIO;
+}
+
+static inline int geni_se_iommu_map_buf(struct device *wrapper_dev,
+	dma_addr_t *iova, void *buf, size_t size, enum dma_data_direction dir)
+{
+	return -ENXIO;
+}
+
+static inline void *geni_se_iommu_alloc_buf(struct device *wrapper_dev,
+					dma_addr_t *iova, size_t size)
+{
+	return NULL;
+}
+
+static inline int geni_se_iommu_unmap_buf(struct device *wrapper_dev,
+		dma_addr_t *iova, size_t size, enum dma_data_direction dir)
+{
+	return -ENXIO;
+
+}
+
+static inline int geni_se_iommu_free_buf(struct device *wrapper_dev,
+				dma_addr_t *iova, void *buf, size_t size)
+{
+	return -ENXIO;
+}
+
+#endif
 #endif
diff --git a/include/linux/qdsp6v2/apr_tal.h b/include/linux/qdsp6v2/apr_tal.h
index bac5e90..26d1a4c 100644
--- a/include/linux/qdsp6v2/apr_tal.h
+++ b/include/linux/qdsp6v2/apr_tal.h
@@ -75,8 +75,6 @@
 		int num_of_intents, uint32_t size);
 
 
-#if defined(CONFIG_MSM_QDSP6_APRV2_GLINK) || \
-	 defined(CONFIG_MSM_QDSP6_APRV3_GLINK)
 struct apr_svc_ch_dev {
 	void               *handle;
 	spinlock_t         w_lock;
@@ -88,20 +86,5 @@
 	unsigned int       channel_state;
 	bool               if_remote_intent_ready;
 };
-#else
-struct apr_svc_ch_dev {
-	struct smd_channel *ch;
-	spinlock_t         lock;
-	spinlock_t         w_lock;
-	struct mutex       m_lock;
-	apr_svc_cb_fn      func;
-	char               data[APR_MAX_BUF];
-	wait_queue_head_t  wait;
-	void               *priv;
-	uint32_t           smd_state;
-	wait_queue_head_t  dest;
-	uint32_t           dest_state;
-};
-#endif
 
 #endif
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 3e354fd..56dde53 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -20,11 +20,6 @@
 	unsigned long		data;
 	u32			flags;
 
-#ifdef CONFIG_TIMER_STATS
-	int			start_pid;
-	void			*start_site;
-	char			start_comm[16];
-#endif
 #ifdef CONFIG_LOCKDEP
 	struct lockdep_map	lockdep_map;
 #endif
@@ -200,46 +195,6 @@
 /* To be used from cpusets, only */
 extern void timer_quiesce_cpu(void *cpup);
 
-/*
- * Timer-statistics info:
- */
-#ifdef CONFIG_TIMER_STATS
-
-extern int timer_stats_active;
-
-extern void init_timer_stats(void);
-
-extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
-				     void *timerf, char *comm, u32 flags);
-
-extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
-					       void *addr);
-
-static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
-{
-	if (likely(!timer_stats_active))
-		return;
-	__timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
-}
-
-static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
-{
-	timer->start_site = NULL;
-}
-#else
-static inline void init_timer_stats(void)
-{
-}
-
-static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
-{
-}
-
-static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
-{
-}
-#endif
-
 extern void add_timer(struct timer_list *timer);
 
 extern int try_to_del_timer_sync(struct timer_list *timer);
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 5dd75fa..f9be467 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -14,6 +14,7 @@
  * struct ci_hdrc_cable - structure for external connector cable state tracking
  * @state: current state of the line
  * @changed: set to true when extcon event happen
+ * @enabled: set to true if we've enabled the vbus or id interrupt
  * @edev: device which generate events
  * @ci: driver state of the chipidea device
  * @nb: hold event notification callback
@@ -22,6 +23,7 @@
 struct ci_hdrc_cable {
 	bool				state;
 	bool				changed;
+	bool				enabled;
 	struct extcon_dev		*edev;
 	struct ci_hdrc			*ci;
 	struct notifier_block		nb;
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index e1bd2bc..858f308 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -20,6 +20,8 @@
 #define ADDRCONF_TIMER_FUZZ		(HZ / 4)
 #define ADDRCONF_TIMER_FUZZ_MAX		(HZ)
 
+#define ADDRCONF_NOTIFY_PRIORITY	0
+
 #include <linux/in.h>
 #include <linux/in6.h>
 
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 9dc2c18..f5e625f 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -84,6 +84,7 @@
 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
 			       int ifindex, struct flowi6 *fl6, int flags);
 
+void ip6_route_init_special_entries(void);
 int ip6_route_init(void);
 void ip6_route_cleanup(void);
 
diff --git a/include/soc/qcom/msm_qmi_interface.h b/include/soc/qcom/msm_qmi_interface.h
index c421209..fcddc48 100644
--- a/include/soc/qcom/msm_qmi_interface.h
+++ b/include/soc/qcom/msm_qmi_interface.h
@@ -92,7 +92,6 @@
 	QMI_RESULT_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
 	QMI_RESULT_SUCCESS_V01 = 0,
 	QMI_RESULT_FAILURE_V01 = 1,
-	QMI_ERR_DISABLED_V01 = 0x45,
 	QMI_RESULT_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
 };
 
@@ -106,6 +105,7 @@
 	QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 = 0x0005,
 	QMI_ERR_INVALID_ID_V01 = 0x0029,
 	QMI_ERR_ENCODING_V01 = 0x003A,
+	QMI_ERR_DISABLED_V01 = 0x0045,
 	QMI_ERR_INCOMPATIBLE_STATE_V01 = 0x005A,
 	QMI_ERR_NOT_SUPPORTED_V01 = 0x005E,
 	QMI_ERR_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index d55175e..57693e7 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -794,6 +794,323 @@
 		      __entry->nl, __entry->pl, __entry->flags)
 );
 
+DECLARE_EVENT_CLASS(kpm_module,
+
+	TP_PROTO(unsigned int managed_cpus, unsigned int max_cpus),
+
+	TP_ARGS(managed_cpus, max_cpus),
+
+	TP_STRUCT__entry(
+		__field(u32, managed_cpus)
+		__field(u32, max_cpus)
+	),
+
+	TP_fast_assign(
+		__entry->managed_cpus = managed_cpus;
+		__entry->max_cpus = max_cpus;
+	),
+
+	TP_printk("managed:%x max_cpus=%u", (unsigned int)__entry->managed_cpus,
+					(unsigned int)__entry->max_cpus)
+);
+
+DEFINE_EVENT(kpm_module, set_max_cpus,
+	TP_PROTO(unsigned int managed_cpus, unsigned int max_cpus),
+	TP_ARGS(managed_cpus, max_cpus)
+);
+
+DEFINE_EVENT(kpm_module, reevaluate_hotplug,
+	TP_PROTO(unsigned int managed_cpus, unsigned int max_cpus),
+	TP_ARGS(managed_cpus, max_cpus)
+);
+
+DECLARE_EVENT_CLASS(kpm_module2,
+
+	TP_PROTO(unsigned int cpu, unsigned int enter_cycle_cnt,
+		unsigned int exit_cycle_cnt,
+		unsigned int io_busy, u64 iowait),
+
+	TP_ARGS(cpu, enter_cycle_cnt, exit_cycle_cnt, io_busy, iowait),
+
+	TP_STRUCT__entry(
+		__field(u32, cpu)
+		__field(u32, enter_cycle_cnt)
+		__field(u32, exit_cycle_cnt)
+		__field(u32, io_busy)
+		__field(u64, iowait)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->enter_cycle_cnt = enter_cycle_cnt;
+		__entry->exit_cycle_cnt = exit_cycle_cnt;
+		__entry->io_busy = io_busy;
+		__entry->iowait = iowait;
+	),
+
+	TP_printk("CPU:%u enter_cycles=%u exit_cycles=%u io_busy=%u iowait=%lu",
+		(unsigned int)__entry->cpu,
+		(unsigned int)__entry->enter_cycle_cnt,
+		(unsigned int)__entry->exit_cycle_cnt,
+		(unsigned int)__entry->io_busy,
+		(unsigned long)__entry->iowait)
+);
+
+DEFINE_EVENT(kpm_module2, track_iowait,
+	TP_PROTO(unsigned int cpu, unsigned int enter_cycle_cnt,
+		unsigned int exit_cycle_cnt, unsigned int io_busy, u64 iowait),
+	TP_ARGS(cpu, enter_cycle_cnt, exit_cycle_cnt, io_busy, iowait)
+);
+
+DECLARE_EVENT_CLASS(cpu_modes,
+
+	TP_PROTO(unsigned int cpu, unsigned int max_load,
+		unsigned int single_enter_cycle_cnt,
+		unsigned int single_exit_cycle_cnt,
+		unsigned int total_load, unsigned int multi_enter_cycle_cnt,
+		unsigned int multi_exit_cycle_cnt,
+		unsigned int perf_cl_peak_enter_cycle_cnt,
+		unsigned int perf_cl_peak_exit_cycle_cnt,
+		unsigned int mode,
+		unsigned int cpu_cnt),
+
+	TP_ARGS(cpu, max_load, single_enter_cycle_cnt, single_exit_cycle_cnt,
+		total_load, multi_enter_cycle_cnt, multi_exit_cycle_cnt,
+		perf_cl_peak_enter_cycle_cnt, perf_cl_peak_exit_cycle_cnt, mode,
+		cpu_cnt),
+
+	TP_STRUCT__entry(
+		__field(u32, cpu)
+		__field(u32, max_load)
+		__field(u32, single_enter_cycle_cnt)
+		__field(u32, single_exit_cycle_cnt)
+		__field(u32, total_load)
+		__field(u32, multi_enter_cycle_cnt)
+		__field(u32, multi_exit_cycle_cnt)
+		__field(u32, perf_cl_peak_enter_cycle_cnt)
+		__field(u32, perf_cl_peak_exit_cycle_cnt)
+		__field(u32, mode)
+		__field(u32, cpu_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->max_load = max_load;
+		__entry->single_enter_cycle_cnt = single_enter_cycle_cnt;
+		__entry->single_exit_cycle_cnt = single_exit_cycle_cnt;
+		__entry->total_load = total_load;
+		__entry->multi_enter_cycle_cnt = multi_enter_cycle_cnt;
+		__entry->multi_exit_cycle_cnt = multi_exit_cycle_cnt;
+		__entry->perf_cl_peak_enter_cycle_cnt =
+				perf_cl_peak_enter_cycle_cnt;
+		__entry->perf_cl_peak_exit_cycle_cnt =
+				perf_cl_peak_exit_cycle_cnt;
+		__entry->mode = mode;
+		__entry->cpu_cnt = cpu_cnt;
+	),
+
+	TP_printk("%u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%u",
+		(unsigned int)__entry->cpu, (unsigned int)__entry->max_load,
+		(unsigned int)__entry->single_enter_cycle_cnt,
+		(unsigned int)__entry->single_exit_cycle_cnt,
+		(unsigned int)__entry->total_load,
+		(unsigned int)__entry->multi_enter_cycle_cnt,
+		(unsigned int)__entry->multi_exit_cycle_cnt,
+		(unsigned int)__entry->perf_cl_peak_enter_cycle_cnt,
+		(unsigned int)__entry->perf_cl_peak_exit_cycle_cnt,
+		(unsigned int)__entry->mode,
+		(unsigned int)__entry->cpu_cnt)
+);
+
+DEFINE_EVENT(cpu_modes, cpu_mode_detect,
+	TP_PROTO(unsigned int cpu, unsigned int max_load,
+		unsigned int single_enter_cycle_cnt,
+		unsigned int single_exit_cycle_cnt,
+		unsigned int total_load, unsigned int multi_enter_cycle_cnt,
+		unsigned int multi_exit_cycle_cnt,
+		unsigned int perf_cl_peak_enter_cycle_cnt,
+		unsigned int perf_cl_peak_exit_cycle_cnt,
+		unsigned int mode,
+		unsigned int cpu_cnt),
+	TP_ARGS(cpu, max_load, single_enter_cycle_cnt, single_exit_cycle_cnt,
+		total_load, multi_enter_cycle_cnt, multi_exit_cycle_cnt,
+		perf_cl_peak_enter_cycle_cnt, perf_cl_peak_exit_cycle_cnt,
+		mode, cpu_cnt)
+);
+
+DECLARE_EVENT_CLASS(timer_status,
+	TP_PROTO(unsigned int cpu, unsigned int single_enter_cycles,
+		unsigned int single_enter_cycle_cnt,
+		unsigned int single_exit_cycles,
+		unsigned int single_exit_cycle_cnt,
+		unsigned int multi_enter_cycles,
+		unsigned int multi_enter_cycle_cnt,
+		unsigned int multi_exit_cycles,
+		unsigned int multi_exit_cycle_cnt, unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, single_enter_cycles, single_enter_cycle_cnt,
+		single_exit_cycles, single_exit_cycle_cnt, multi_enter_cycles,
+		multi_enter_cycle_cnt, multi_exit_cycles,
+		multi_exit_cycle_cnt, timer_rate, mode),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, single_enter_cycles)
+		__field(unsigned int, single_enter_cycle_cnt)
+		__field(unsigned int, single_exit_cycles)
+		__field(unsigned int, single_exit_cycle_cnt)
+		__field(unsigned int, multi_enter_cycles)
+		__field(unsigned int, multi_enter_cycle_cnt)
+		__field(unsigned int, multi_exit_cycles)
+		__field(unsigned int, multi_exit_cycle_cnt)
+		__field(unsigned int, timer_rate)
+		__field(unsigned int, mode)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->single_enter_cycles = single_enter_cycles;
+		__entry->single_enter_cycle_cnt = single_enter_cycle_cnt;
+		__entry->single_exit_cycles = single_exit_cycles;
+		__entry->single_exit_cycle_cnt = single_exit_cycle_cnt;
+		__entry->multi_enter_cycles = multi_enter_cycles;
+		__entry->multi_enter_cycle_cnt = multi_enter_cycle_cnt;
+		__entry->multi_exit_cycles = multi_exit_cycles;
+		__entry->multi_exit_cycle_cnt = multi_exit_cycle_cnt;
+		__entry->timer_rate = timer_rate;
+		__entry->mode = mode;
+	),
+
+	TP_printk("%u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u:%4u",
+		(unsigned int) __entry->cpu,
+		(unsigned int) __entry->single_enter_cycles,
+		(unsigned int) __entry->single_enter_cycle_cnt,
+		(unsigned int) __entry->single_exit_cycles,
+		(unsigned int) __entry->single_exit_cycle_cnt,
+		(unsigned int) __entry->multi_enter_cycles,
+		(unsigned int) __entry->multi_enter_cycle_cnt,
+		(unsigned int) __entry->multi_exit_cycles,
+		(unsigned int) __entry->multi_exit_cycle_cnt,
+		(unsigned int) __entry->timer_rate,
+		(unsigned int) __entry->mode)
+);
+
+DEFINE_EVENT(timer_status, single_mode_timeout,
+	TP_PROTO(unsigned int cpu, unsigned int single_enter_cycles,
+		unsigned int single_enter_cycle_cnt,
+		unsigned int single_exit_cycles,
+		unsigned int single_exit_cycle_cnt,
+		unsigned int multi_enter_cycles,
+		unsigned int multi_enter_cycle_cnt,
+		unsigned int multi_exit_cycles,
+		unsigned int multi_exit_cycle_cnt, unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, single_enter_cycles, single_enter_cycle_cnt,
+		single_exit_cycles, single_exit_cycle_cnt, multi_enter_cycles,
+		multi_enter_cycle_cnt, multi_exit_cycles, multi_exit_cycle_cnt,
+		timer_rate, mode)
+);
+
+DEFINE_EVENT(timer_status, single_cycle_exit_timer_start,
+	TP_PROTO(unsigned int cpu, unsigned int single_enter_cycles,
+		unsigned int single_enter_cycle_cnt,
+		unsigned int single_exit_cycles,
+		unsigned int single_exit_cycle_cnt,
+		unsigned int multi_enter_cycles,
+		unsigned int multi_enter_cycle_cnt,
+		unsigned int multi_exit_cycles,
+		unsigned int multi_exit_cycle_cnt, unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, single_enter_cycles, single_enter_cycle_cnt,
+		single_exit_cycles, single_exit_cycle_cnt, multi_enter_cycles,
+		multi_enter_cycle_cnt, multi_exit_cycles, multi_exit_cycle_cnt,
+		timer_rate, mode)
+);
+
+DEFINE_EVENT(timer_status, single_cycle_exit_timer_stop,
+	TP_PROTO(unsigned int cpu, unsigned int single_enter_cycles,
+		unsigned int single_enter_cycle_cnt,
+		unsigned int single_exit_cycles,
+		unsigned int single_exit_cycle_cnt,
+		unsigned int multi_enter_cycles,
+		unsigned int multi_enter_cycle_cnt,
+		unsigned int multi_exit_cycles,
+		unsigned int multi_exit_cycle_cnt, unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, single_enter_cycles, single_enter_cycle_cnt,
+		single_exit_cycles, single_exit_cycle_cnt, multi_enter_cycles,
+		multi_enter_cycle_cnt, multi_exit_cycles, multi_exit_cycle_cnt,
+		timer_rate, mode)
+);
+
+DECLARE_EVENT_CLASS(perf_cl_peak_timer_status,
+	TP_PROTO(unsigned int cpu, unsigned int perf_cl_peak_enter_cycles,
+		unsigned int perf_cl_peak_enter_cycle_cnt,
+		unsigned int perf_cl_peak_exit_cycles,
+		unsigned int perf_cl_peak_exit_cycle_cnt,
+		unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, perf_cl_peak_enter_cycles, perf_cl_peak_enter_cycle_cnt,
+		perf_cl_peak_exit_cycles, perf_cl_peak_exit_cycle_cnt,
+		timer_rate, mode),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(unsigned int, perf_cl_peak_enter_cycles)
+		__field(unsigned int, perf_cl_peak_enter_cycle_cnt)
+		__field(unsigned int, perf_cl_peak_exit_cycles)
+		__field(unsigned int, perf_cl_peak_exit_cycle_cnt)
+		__field(unsigned int, timer_rate)
+		__field(unsigned int, mode)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->perf_cl_peak_enter_cycles = perf_cl_peak_enter_cycles;
+		__entry->perf_cl_peak_enter_cycle_cnt =
+				perf_cl_peak_enter_cycle_cnt;
+		__entry->perf_cl_peak_exit_cycles = perf_cl_peak_exit_cycles;
+		__entry->perf_cl_peak_exit_cycle_cnt =
+				perf_cl_peak_exit_cycle_cnt;
+		__entry->timer_rate = timer_rate;
+		__entry->mode = mode;
+	),
+
+	TP_printk("%u:%4u:%4u:%4u:%4u:%4u:%4u",
+		(unsigned int) __entry->cpu,
+		(unsigned int) __entry->perf_cl_peak_enter_cycles,
+		(unsigned int) __entry->perf_cl_peak_enter_cycle_cnt,
+		(unsigned int) __entry->perf_cl_peak_exit_cycles,
+		(unsigned int) __entry->perf_cl_peak_exit_cycle_cnt,
+		(unsigned int) __entry->timer_rate,
+		(unsigned int) __entry->mode)
+);
+
+DEFINE_EVENT(perf_cl_peak_timer_status, perf_cl_peak_exit_timer_start,
+	TP_PROTO(unsigned int cpu, unsigned int perf_cl_peak_enter_cycles,
+		unsigned int perf_cl_peak_enter_cycle_cnt,
+		unsigned int perf_cl_peak_exit_cycles,
+		unsigned int perf_cl_peak_exit_cycle_cnt,
+		unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, perf_cl_peak_enter_cycles, perf_cl_peak_enter_cycle_cnt,
+		perf_cl_peak_exit_cycles, perf_cl_peak_exit_cycle_cnt,
+		timer_rate, mode)
+);
+
+
+DEFINE_EVENT(perf_cl_peak_timer_status, perf_cl_peak_exit_timer_stop,
+	TP_PROTO(unsigned int cpu, unsigned int perf_cl_peak_enter_cycles,
+		unsigned int perf_cl_peak_enter_cycle_cnt,
+		unsigned int perf_cl_peak_exit_cycles,
+		unsigned int perf_cl_peak_exit_cycle_cnt,
+		unsigned int timer_rate,
+		unsigned int mode),
+	TP_ARGS(cpu, perf_cl_peak_enter_cycles, perf_cl_peak_enter_cycle_cnt,
+		perf_cl_peak_exit_cycles, perf_cl_peak_exit_cycle_cnt,
+		timer_rate, mode)
+);
+
 #endif /* _TRACE_POWER_H */
 
 /* This part must be outside protection */
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 817feba..ea68202 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -449,6 +449,7 @@
  * @IPA_HW_v3_1: IPA hardware version 3.1
  * @IPA_HW_v3_5: IPA hardware version 3.5
  * @IPA_HW_v3_5_1: IPA hardware version 3.5.1
+ * @IPA_HW_v4_0: IPA hardware version 4.0
  */
 enum ipa_hw_type {
 	IPA_HW_None = 0,
@@ -463,9 +464,12 @@
 	IPA_HW_v3_1 = 11,
 	IPA_HW_v3_5 = 12,
 	IPA_HW_v3_5_1 = 13,
+	IPA_HW_v4_0 = 14,
 	IPA_HW_MAX
 };
 
+#define IPA_HW_v4_0 IPA_HW_v4_0
+
 /**
  * struct ipa_rule_attrib - attributes of a routing/filtering
  * rule, all in LE
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index 478f7fe..98844ac 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -1,5 +1,6 @@
 header-y += cam_cpas.h
 header-y += cam_defs.h
+header-y += cam_icp.h
 header-y += cam_isp.h
 header-y += cam_isp_vfe.h
 header-y += cam_isp_ife.h
diff --git a/include/uapi/media/cam_defs.h b/include/uapi/media/cam_defs.h
index cf56211..a4557d1 100644
--- a/include/uapi/media/cam_defs.h
+++ b/include/uapi/media/cam_defs.h
@@ -121,9 +121,6 @@
 #define CAM_FORMAT_Y_ONLY                       45
 #define CAM_FORMAT_MAX                          46
 
-
-/* camera packet */
-
 /* camera rotaion */
 #define CAM_ROTATE_CW_0_DEGREE                  0
 #define CAM_ROTATE_CW_90_DEGREE                 1
@@ -343,7 +340,6 @@
 
 };
 
-/* Release Device */
 /**
  * struct cam_release_dev_cmd - Control payload for release devices
  *
@@ -355,7 +351,6 @@
 	int32_t                 dev_handle;
 };
 
-/* Start/Stop device */
 /**
  * struct cam_start_stop_dev_cmd - Control payload for start/stop device
  *
@@ -368,7 +363,6 @@
 	int32_t                 dev_handle;
 };
 
-/* Configure Device */
 /**
  * struct cam_config_dev_cmd - Command payload for configure device
  *
@@ -386,7 +380,6 @@
 	uint64_t                packet_handle;
 };
 
-/* Query Device Caps */
 /**
  * struct cam_query_cap_cmd - Payload for query device capability
  *
@@ -401,7 +394,6 @@
 	uint64_t        caps_handle;
 };
 
-/* Acquire Device */
 /**
  * struct cam_acquire_dev_cmd - Control payload for acquire devices
  *
diff --git a/include/uapi/media/cam_icp.h b/include/uapi/media/cam_icp.h
new file mode 100644
index 0000000..9351d2d
--- /dev/null
+++ b/include/uapi/media/cam_icp.h
@@ -0,0 +1,155 @@
+#ifndef __UAPI_CAM_ICP_H__
+#define __UAPI_CAM_ICP_H__
+
+#include "cam_defs.h"
+
+/* icp, ipe, bps, cdm(ipe/bps) are used in querycap */
+#define CAM_ICP_DEV_TYPE_A5      1
+#define CAM_ICP_DEV_TYPE_IPE     2
+#define CAM_ICP_DEV_TYPE_BPS     3
+#define CAM_ICP_DEV_TYPE_IPE_CDM 4
+#define CAM_ICP_DEV_TYPE_BPS_CDM 5
+#define CAM_ICP_DEV_TYPE_MAX     5
+
+/* definitions needed for icp aquire device */
+#define CAM_ICP_RES_TYPE_BPS        1
+#define CAM_ICP_RES_TYPE_IPE_RT     2
+#define CAM_ICP_RES_TYPE_IPE        3
+#define CAM_ICP_RES_TYPE_MAX        4
+
+/* packet opcode types */
+#define CAM_ICP_OPCODE_IPE_UPDATE 0
+#define CAM_ICP_OPCODE_BPS_UPDATE 1
+
+/* IPE input port resource type */
+#define CAM_ICP_IPE_INPUT_IMAGE_FULL            0x0
+#define CAM_ICP_IPE_INPUT_IMAGE_DS4             0x1
+#define CAM_ICP_IPE_INPUT_IMAGE_DS16            0x2
+#define CAM_ICP_IPE_INPUT_IMAGE_DS64            0x3
+#define CAM_ICP_IPE_INPUT_IMAGE_FULL_REF        0x4
+#define CAM_ICP_IPE_INPUT_IMAGE_DS4_REF         0x5
+#define CAM_ICP_IPE_INPUT_IMAGE_DS16_REF        0x6
+#define CAM_ICP_IPE_INPUT_IMAGE_DS64_REF        0x7
+
+/* IPE output port resource type */
+#define CAM_ICP_IPE_OUTPUT_IMAGE_DISPLAY        0x8
+#define CAM_ICP_IPE_OUTPUT_IMAGE_VIDEO          0x9
+#define CAM_ICP_IPE_OUTPUT_IMAGE_FULL_REF       0xA
+#define CAM_ICP_IPE_OUTPUT_IMAGE_DS4_REF        0xB
+#define CAM_ICP_IPE_OUTPUT_IMAGE_DS16_REF       0xC
+#define CAM_ICP_IPE_OUTPUT_IMAGE_DS64_REF       0xD
+
+#define CAM_ICP_IPE_IMAGE_MAX                   0xE
+
+/* BPS input port resource type */
+#define CAM_ICP_BPS_INPUT_IMAGE                 0x0
+
+/* BPS output port resource type */
+#define CAM_ICP_BPS_OUTPUT_IMAGE_FULL           0x1
+#define CAM_ICP_BPS_OUTPUT_IMAGE_DS4            0x2
+#define CAM_ICP_BPS_OUTPUT_IMAGE_DS16           0x3
+#define CAM_ICP_BPS_OUTPUT_IMAGE_DS64           0x4
+#define CAM_ICP_BPS_OUTPUT_IMAGE_STATS_BG       0x5
+#define CAM_ICP_BPS_OUTPUT_IMAGE_STATS_BHIST    0x6
+#define CAM_ICP_BPS_OUTPUT_IMAGE_REG1           0x7
+#define CAM_ICP_BPS_OUTPUT_IMAGE_REG2           0x8
+
+#define CAM_ICP_BPS_IO_IMAGES_MAX               0x9
+
+/**
+ * struct cam_icp_dev_ver - Device information for particular hw type
+ *
+ * This is used to get device version info of
+ * ICP, IPE, BPS and CDM related IPE and BPS from firmware
+ * and use this info in CAM_QUERY_CAP IOCTL
+ *
+ * @dev_type: hardware type for the cap info(icp, ipe, bps, cdm(ipe/bps))
+ * @reserved: reserved field
+ * @hw_ver: major, minor and incr values of a device version
+ */
+struct cam_icp_dev_ver {
+	uint32_t dev_type;
+	uint32_t reserved;
+	struct cam_hw_version hw_ver;
+};
+
+/**
+ * struct cam_icp_ver - ICP version info
+ *
+ * This strcuture is used for fw and api version
+ * this is used to get firmware version and api version from firmware
+ * and use this info in CAM_QUERY_CAP IOCTL
+ *
+ * @major: FW version major
+ * @minor: FW version minor
+ * @revision: FW version increment
+ */
+struct cam_icp_ver {
+	uint32_t major;
+	uint32_t minor;
+	uint32_t revision;
+	uint32_t reserved;
+};
+
+/**
+ * struct cam_icp_query_cap_cmd - ICP query device capability payload
+ *
+ * @dev_iommu_handle: icp iommu handles for secure/non secure modes
+ * @cdm_iommu_handle: iommu handles for secure/non secure modes
+ * @fw_version: firmware version info
+ * @api_version: api version info
+ * @num_ipe: number of ipes
+ * @num_bps: number of bps
+ * @dev_ver: returned device capability array
+ */
+struct cam_icp_query_cap_cmd {
+	struct cam_iommu_handle dev_iommu_handle;
+	struct cam_iommu_handle cdm_iommu_handle;
+	struct cam_icp_ver fw_version;
+	struct cam_icp_ver api_version;
+	uint32_t num_ipe;
+	uint32_t num_bps;
+	struct cam_icp_dev_ver dev_ver[CAM_ICP_DEV_TYPE_MAX];
+};
+
+/**
+ * struct cam_icp_res_info - ICP output resource info
+ *
+ * @format: format of the resource
+ * @width:  width in pixels
+ * @height: height in lines
+ * @fps:  fps
+ */
+struct cam_icp_res_info {
+	uint32_t format;
+	uint32_t width;
+	uint32_t height;
+	uint32_t fps;
+};
+
+/**
+ * struct cam_icp_acquire_dev_info - An ICP device info
+ *
+ * @scratch_mem_size: Output param - size of scratch memory
+ * @dev_type: device type (IPE_RT/IPE_NON_RT/BPS)
+ * @io_config_cmd_size: size of IO config command
+ * @io_config_cmd_handle: IO config command for each acquire
+ * @secure_mode: camera mode (secure/non secure)
+ * @chain_info: chaining info of FW device handles
+ * @in_res: resource info used for clock and bandwidth calculation
+ * @num_out_res: number of output resources
+ * @out_res: output resource
+ */
+struct cam_icp_acquire_dev_info {
+	uint32_t scratch_mem_size;
+	uint32_t dev_type;
+	uint32_t io_config_cmd_size;
+	int32_t  io_config_cmd_handle;
+	uint32_t secure_mode;
+	int32_t chain_info;
+	struct cam_icp_res_info in_res;
+	uint32_t num_out_res;
+	struct cam_icp_res_info out_res[1];
+} __attribute__((__packed__));
+
+#endif /* __UAPI_CAM_ICP_H__ */
diff --git a/include/uapi/media/cam_sensor.h b/include/uapi/media/cam_sensor.h
index bb4805c..83f1a02 100644
--- a/include/uapi/media/cam_sensor.h
+++ b/include/uapi/media/cam_sensor.h
@@ -6,7 +6,7 @@
 #include <media/cam_defs.h>
 
 #define CAM_SENSOR_PROBE_CMD   (CAM_COMMON_OPCODE_MAX + 1)
-
+#define CAM_SENSOR_MAX_LED_TRIGGERS 3
 /**
  * struct cam_sensor_query_cap - capabilities info for sensor
  *
@@ -42,6 +42,7 @@
  * @version          :  CSIphy version
  * @clk lane         :  Of the 5 lanes, informs lane configured
  *                      as clock lane
+ * @reserved
  */
 struct cam_csiphy_query_cap {
 	uint32_t            slot_info;
@@ -54,6 +55,7 @@
  * struct cam_actuator_query_cap - capabilities info for actuator
  *
  * @slot_info        :  Indicates about the slotId or cell Index
+ * @reserved
  */
 struct cam_actuator_query_cap {
 	uint32_t            slot_info;
@@ -85,6 +87,7 @@
  * @data_mask       :   Data mask if only few bits are valid
  * @camera_id       :   Indicates the slot to which camera
  *                      needs to be probed
+ * @reserved
  */
 struct cam_cmd_probe {
 	uint8_t     data_type;
@@ -99,9 +102,10 @@
 } __attribute__((packed));
 
 /**
- * struct cam_power_settings - Contains sensor slave info
+ * struct cam_power_settings - Contains sensor power setting info
  *
  * @power_seq_type  :   Type of power sequence
+ * @reserved
  * @config_val_low  :   Lower 32 bit value configuration value
  * @config_val_high :   Higher 32 bit value configuration value
  *
@@ -117,8 +121,9 @@
  * struct cam_cmd_power - Explains about the power settings
  *
  * @count           :    Number of power settings follows
+ * @reserved
  * @cmd_type        :    Explains type of command
- *
+ * @power_settings  :    Contains power setting info
  */
 struct cam_cmd_power {
 	uint16_t                    count;
@@ -135,7 +140,7 @@
  * @ cmd_type        :   Command buffer type
  * @ data_type       :   I2C data type
  * @ addr_type       :   I2C address type
- *
+ * @ reserved
  */
 struct i2c_rdwr_header {
 	uint16_t    count;
@@ -160,7 +165,8 @@
 
 /**
  * struct cam_cmd_i2c_random_wr - I2C random write command
- *
+ * @ header            :   header of READ/WRITE I2C command
+ * @ random_wr_payload :   payload for I2C random write
  */
 struct cam_cmd_i2c_random_wr {
 	struct i2c_rdwr_header       header;
@@ -170,7 +176,7 @@
 /**
  * struct cam_cmd_read - I2C read command
  * @ reg_data        :   Register data
- *
+ * @ reserved
  */
 struct cam_cmd_read {
 	uint32_t                reg_data;
@@ -179,8 +185,9 @@
 
 /**
  * struct cam_cmd_i2c_continuous_wr - I2C continuous write command
+ * @ header          :   header of READ/WRITE I2C command
  * @ reg_addr        :   Register address
- *
+ * @ data_read       :   I2C read command
  */
 struct cam_cmd_i2c_continuous_wr {
 	struct i2c_rdwr_header  header;
@@ -190,7 +197,8 @@
 
 /**
  * struct cam_cmd_i2c_random_rd - I2C random read command
- *
+ * @ header          :   header of READ/WRITE I2C command
+ * @ data_read       :   I2C read command
  */
 struct cam_cmd_i2c_random_rd {
 	struct i2c_rdwr_header  header;
@@ -199,6 +207,7 @@
 
 /**
  * struct cam_cmd_i2c_continuous_rd - I2C continuous continuous read command
+ * @ header          :   header of READ/WRITE I2C command
  * @ reg_addr        :   Register address
  *
  */
@@ -214,6 +223,7 @@
  * @op_code         :   Opcode
  * @cmd_type        :   Explains type of command
  * @timeout         :   Timeout for retries
+ * @reserved
  * @reg_addr        :   Register Address
  * @reg_data        :   Register data
  * @data_mask       :   Data mask if only few bits are valid
@@ -237,7 +247,7 @@
  * struct cam_cmd_unconditional_wait - Un-conditional wait command
  * @delay           :   Delay
  * @op_code         :   Opcode
- *
+ * @cmd_type        :   Explains type of command
  */
 struct cam_cmd_unconditional_wait {
 	int16_t     delay;
@@ -252,6 +262,7 @@
  * @csiphy_3phase :  Total number of lanes
  * @combo_mode    :  Info regarding combo_mode is enable / disable
  * @lane_cnt      :  Total number of lanes
+ * @reserved
  * @3phase        :  Details whether 3Phase / 2Phase operation
  * @settle_time   :  Settling time in ms
  * @data_rate     :  Data rate
@@ -272,6 +283,7 @@
  * cam_csiphy_acquire_dev_info : Information needed for
  *                        csiphy at the time of acquire
  * @combo_mode     :    Indicates the device mode of operation
+ * @reserved
  *
  */
 struct cam_csiphy_acquire_dev_info {
@@ -284,6 +296,7 @@
  * @device_handle  :    Updates device handle
  * @session_handle :    Session handle for acquiring device
  * @handle_type    :    Resource handle type
+ * @reserved
  * @info_handle    :    Handle to additional info
  *                      needed for sensor sub modules
  *
@@ -301,6 +314,7 @@
  * @session_handle :    Session handle for acquiring device
  * @device_handle  :    Updates device handle
  * @handle_type    :    Resource handle type
+ * @reserved
  * @info_handle    :    Information Needed at the time of streamOn
  *
  */
@@ -312,4 +326,92 @@
 	uint64_t    info_handle;
 } __attribute__((packed));
 
+/**
+ * struct cam_flash_init : Init command for the flash
+ * @flash_type  :    flash hw type
+ * @reserved
+ * @cmd_type    :    command buffer type
+ */
+struct cam_flash_init {
+	uint8_t     flash_type;
+	uint16_t    reserved;
+	uint8_t     cmd_type;
+} __attribute__((packed));
+
+/**
+ * struct cam_flash_set_rer : RedEyeReduction command buffer
+ *
+ * @count             :   Number of flash leds
+ * @opcode            :   Command buffer opcode
+ *			CAM_FLASH_FIRE_RER
+ * @cmd_type          :   command buffer operation type
+ * @num_iteration     :   Number of led turn on/off sequence
+ * @reserved
+ * @led_on_delay_ms   :   flash led turn on time in ms
+ * @led_off_delay_ms  :   flash led turn off time in ms
+ * @led_current_ma    :   flash led current in ma
+ *
+ */
+struct cam_flash_set_rer {
+	uint16_t    count;
+	uint8_t     opcode;
+	uint8_t     cmd_type;
+	uint16_t    num_iteration;
+	uint16_t    reserved;
+	uint32_t    led_on_delay_ms;
+	uint32_t    led_off_delay_ms;
+	uint32_t    led_current_ma[CAM_SENSOR_MAX_LED_TRIGGERS];
+} __attribute__((packed));
+
+/**
+ * struct cam_flash_set_on_off : led turn on/off command buffer
+ *
+ * @count              :   Number of Flash leds
+ * @opcode             :   command buffer opcodes
+ *			CAM_FLASH_FIRE_LOW
+ *			CAM_FLASH_FIRE_HIGH
+ *			CAM_FLASH_OFF
+ * @cmd_type           :   command buffer operation type
+ * @led_current_ma     :   flash led current in ma
+ *
+ */
+struct cam_flash_set_on_off {
+	uint16_t    count;
+	uint8_t     opcode;
+	uint8_t     cmd_type;
+	uint32_t    led_current_ma[CAM_SENSOR_MAX_LED_TRIGGERS];
+} __attribute__((packed));
+
+/**
+ * struct cam_flash_query_curr : query current command buffer
+ *
+ * @reserved
+ * @opcode            :   command buffer opcode
+ * @cmd_type          :   command buffer operation type
+ * @query_current_ma  :   battery current in ma
+ *
+ */
+struct cam_flash_query_curr {
+	uint16_t    reserved;
+	uint8_t     opcode;
+	uint8_t     cmd_type;
+	uint32_t    query_current_ma;
+} __attribute__ ((packed));
+
+/**
+ * struct cam_flash_query_cap  :  capabilities info for flash
+ *
+ * @slot_info           :  Indicates about the slotId or cell Index
+ * @max_current_flash   :  max supported current for flash
+ * @max_duration_flash  :  max flash turn on duration
+ * @max_current_torch   :  max supported current for torch
+ *
+ */
+struct cam_flash_query_cap_info {
+	uint32_t    slot_info;
+	uint32_t    max_current_flash[CAM_SENSOR_MAX_LED_TRIGGERS];
+	uint32_t    max_duration_flash[CAM_SENSOR_MAX_LED_TRIGGERS];
+	uint32_t    max_current_torch[CAM_SENSOR_MAX_LED_TRIGGERS];
+} __attribute__ ((packed));
+
 #endif
diff --git a/include/uapi/media/msm_vidc.h b/include/uapi/media/msm_vidc.h
index ac559f2..7161102 100644
--- a/include/uapi/media/msm_vidc.h
+++ b/include/uapi/media/msm_vidc.h
@@ -238,6 +238,9 @@
 	MSM_VIDC_EXTRADATA_FRAME_BITS_INFO = 0x00000010,
 	MSM_VIDC_EXTRADATA_VQZIP_SEI = 0x00000011,
 	MSM_VIDC_EXTRADATA_ROI_QP = 0x00000013,
+#define MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO \
+	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO
+	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO = 0x00000014,
 #define MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI \
 	MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI
 	MSM_VIDC_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI = 0x00000015,
@@ -252,9 +255,6 @@
 	MSM_VIDC_EXTRADATA_OUTPUT_CROP
 	MSM_VIDC_EXTRADATA_OUTPUT_CROP = 0x0700000F,
 	MSM_VIDC_EXTRADATA_DIGITAL_ZOOM = 0x07000010,
-#define MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO \
-	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO
-	MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO = 0x070000011,
 	MSM_VIDC_EXTRADATA_MULTISLICE_INFO = 0x7F100000,
 	MSM_VIDC_EXTRADATA_NUM_CONCEALED_MB = 0x7F100001,
 	MSM_VIDC_EXTRADATA_INDEX = 0x7F100002,
diff --git a/include/xen/xen.h b/include/xen/xen.h
index f0f0252..0c0e3ef 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -38,7 +38,8 @@
  */
 #include <xen/features.h>
 #define xen_pvh_domain() (xen_pv_domain() && \
-			  xen_feature(XENFEAT_auto_translated_physmap))
+			  xen_feature(XENFEAT_auto_translated_physmap) && \
+			  xen_have_vector_callback)
 #else
 #define xen_pvh_domain()	(0)
 #endif
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 7c9f94c..44c17f4 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -279,7 +279,8 @@
 	[BPF_EXIT >> 4] = "exit",
 };
 
-static void print_bpf_insn(struct bpf_insn *insn)
+static void print_bpf_insn(const struct bpf_verifier_env *env,
+			   const struct bpf_insn *insn)
 {
 	u8 class = BPF_CLASS(insn->code);
 
@@ -343,9 +344,19 @@
 				insn->code,
 				bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
 				insn->src_reg, insn->imm);
-		} else if (BPF_MODE(insn->code) == BPF_IMM) {
-			verbose("(%02x) r%d = 0x%x\n",
-				insn->code, insn->dst_reg, insn->imm);
+		} else if (BPF_MODE(insn->code) == BPF_IMM &&
+			   BPF_SIZE(insn->code) == BPF_DW) {
+			/* At this point, we already made sure that the second
+			 * part of the ldimm64 insn is accessible.
+			 */
+			u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
+			bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
+
+			if (map_ptr && !env->allow_ptr_leaks)
+				imm = 0;
+
+			verbose("(%02x) r%d = 0x%llx\n", insn->code,
+				insn->dst_reg, (unsigned long long)imm);
 		} else {
 			verbose("BUG_ld_%02x\n", insn->code);
 			return;
@@ -1749,6 +1760,17 @@
 			return 0;
 		} else if (opcode == BPF_ADD &&
 			   BPF_CLASS(insn->code) == BPF_ALU64 &&
+			   dst_reg->type == PTR_TO_STACK &&
+			   ((BPF_SRC(insn->code) == BPF_X &&
+			     regs[insn->src_reg].type == CONST_IMM) ||
+			    BPF_SRC(insn->code) == BPF_K)) {
+			if (BPF_SRC(insn->code) == BPF_X)
+				dst_reg->imm += regs[insn->src_reg].imm;
+			else
+				dst_reg->imm += insn->imm;
+			return 0;
+		} else if (opcode == BPF_ADD &&
+			   BPF_CLASS(insn->code) == BPF_ALU64 &&
 			   (dst_reg->type == PTR_TO_PACKET ||
 			    (BPF_SRC(insn->code) == BPF_X &&
 			     regs[insn->src_reg].type == PTR_TO_PACKET))) {
@@ -2663,7 +2685,7 @@
 
 		if (log_level) {
 			verbose("%d: ", insn_idx);
-			print_bpf_insn(insn);
+			print_bpf_insn(env, insn);
 		}
 
 		err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index 3c32c74..b7b997f 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -3,7 +3,6 @@
 # CONFIG_DEVMEM is not set
 # CONFIG_FHANDLE is not set
 # CONFIG_INET_LRO is not set
-# CONFIG_MODULES is not set
 # CONFIG_OABI_COMPAT is not set
 # CONFIG_SYSVIPC is not set
 # CONFIG_USELIB is not set
@@ -86,7 +85,6 @@
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
 CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
diff --git a/kernel/kthread.c b/kernel/kthread.c
index c2c911a..b65854c 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -854,7 +854,6 @@
 
 	list_add(&work->node, &worker->delayed_work_list);
 	work->worker = worker;
-	timer_stats_timer_set_start_info(&dwork->timer);
 	timer->expires = jiffies + delay;
 	add_timer(timer);
 }
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index bae3b2b..86a167b 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -1066,10 +1066,6 @@
 			 * Assume we have EM data only at the CPU and
 			 * the upper CLUSTER level
 			 */
-			BUG_ON(!cpumask_equal(
-				sched_group_cpus(sg),
-				sched_group_cpus(sd2->parent->groups)
-				));
 			break;
 		}
 	}
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 5819ca0..b9b881eb 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -9,7 +9,6 @@
 endif
 obj-$(CONFIG_GENERIC_SCHED_CLOCK)		+= sched_clock.o
 obj-$(CONFIG_TICK_ONESHOT)			+= tick-oneshot.o tick-sched.o
-obj-$(CONFIG_TIMER_STATS)			+= timer_stats.o
 obj-$(CONFIG_DEBUG_FS)				+= timekeeping_debug.o
 obj-$(CONFIG_TEST_UDELAY)			+= test_udelay.o
 
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index b1c7852..9792763 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -767,34 +767,6 @@
 	clock_was_set_delayed();
 }
 
-static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
-	if (timer->start_site)
-		return;
-	timer->start_site = __builtin_return_address(0);
-	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
-	timer->start_pid = current->pid;
-#endif
-}
-
-static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
-	timer->start_site = NULL;
-#endif
-}
-
-static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
-{
-#ifdef CONFIG_TIMER_STATS
-	if (likely(!timer_stats_active))
-		return;
-	timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
-				 timer->function, timer->start_comm, 0);
-#endif
-}
-
 /*
  * Counterpart to lock_hrtimer_base above:
  */
@@ -938,7 +910,6 @@
 		 * rare case and less expensive than a smp call.
 		 */
 		debug_deactivate(timer);
-		timer_stats_hrtimer_clear_start_info(timer);
 		reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
 
 		if (!restart)
@@ -997,8 +968,6 @@
 	/* Switch the timer base, if necessary: */
 	new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
 
-	timer_stats_hrtimer_set_start_info(timer);
-
 	/* Update pinned state */
 	timer->state &= ~HRTIMER_STATE_PINNED;
 	timer->state |= (!!(mode & HRTIMER_MODE_PINNED)) << HRTIMER_PINNED_SHIFT;
@@ -1139,12 +1108,6 @@
 	base = hrtimer_clockid_to_base(clock_id);
 	timer->base = &cpu_base->clock_base[base];
 	timerqueue_init(&timer->node);
-
-#ifdef CONFIG_TIMER_STATS
-	timer->start_site = NULL;
-	timer->start_pid = -1;
-	memset(timer->start_comm, 0, TASK_COMM_LEN);
-#endif
 }
 
 /**
@@ -1228,7 +1191,6 @@
 	raw_write_seqcount_barrier(&cpu_base->seq);
 
 	__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
-	timer_stats_account_hrtimer(timer);
 	fn = timer->function;
 
 	/*
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 5463c3b..adede73 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -575,38 +575,6 @@
 	trigger_dyntick_cpu(base, timer);
 }
 
-#ifdef CONFIG_TIMER_STATS
-void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
-{
-	if (timer->start_site)
-		return;
-
-	timer->start_site = addr;
-	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
-	timer->start_pid = current->pid;
-}
-
-static void timer_stats_account_timer(struct timer_list *timer)
-{
-	void *site;
-
-	/*
-	 * start_site can be concurrently reset by
-	 * timer_stats_timer_clear_start_info()
-	 */
-	site = READ_ONCE(timer->start_site);
-	if (likely(!site))
-		return;
-
-	timer_stats_update_stats(timer, timer->start_pid, site,
-				 timer->function, timer->start_comm,
-				 timer->flags);
-}
-
-#else
-static void timer_stats_account_timer(struct timer_list *timer) {}
-#endif
-
 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 
 static struct debug_obj_descr timer_debug_descr;
@@ -793,11 +761,6 @@
 {
 	timer->entry.pprev = NULL;
 	timer->flags = flags | raw_smp_processor_id();
-#ifdef CONFIG_TIMER_STATS
-	timer->start_site = NULL;
-	timer->start_pid = -1;
-	memset(timer->start_comm, 0, TASK_COMM_LEN);
-#endif
 	lockdep_init_map(&timer->lockdep_map, name, key, 0);
 }
 
@@ -1011,8 +974,6 @@
 		base = lock_timer_base(timer, &flags);
 	}
 
-	timer_stats_timer_set_start_info(timer);
-
 	ret = detach_if_pending(timer, base, false);
 	if (!ret && pending_only)
 		goto out_unlock;
@@ -1140,7 +1101,6 @@
 	struct timer_base *new_base, *base;
 	unsigned long flags;
 
-	timer_stats_timer_set_start_info(timer);
 	BUG_ON(timer_pending(timer) || !timer->function);
 
 	new_base = get_timer_cpu_base(timer->flags, cpu);
@@ -1186,7 +1146,6 @@
 
 	debug_assert_init(timer);
 
-	timer_stats_timer_clear_start_info(timer);
 	if (timer_pending(timer)) {
 		base = lock_timer_base(timer, &flags);
 		ret = detach_if_pending(timer, base, true);
@@ -1214,10 +1173,9 @@
 
 	base = lock_timer_base(timer, &flags);
 
-	if (base->running_timer != timer) {
-		timer_stats_timer_clear_start_info(timer);
+	if (base->running_timer != timer)
 		ret = detach_if_pending(timer, base, true);
-	}
+
 	spin_unlock_irqrestore(&base->lock, flags);
 
 	return ret;
@@ -1341,7 +1299,6 @@
 		unsigned long data;
 
 		timer = hlist_entry(head->first, struct timer_list, entry);
-		timer_stats_account_timer(timer);
 
 		base->running_timer = timer;
 		detach_timer(timer, true);
@@ -1916,7 +1873,6 @@
 void __init init_timers(void)
 {
 	init_timer_cpus();
-	init_timer_stats();
 	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
 }
 
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index ba7d8b2..83aa1f8 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -62,21 +62,11 @@
 print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
 	    int idx, u64 now)
 {
-#ifdef CONFIG_TIMER_STATS
-	char tmp[TASK_COMM_LEN + 1];
-#endif
 	SEQ_printf(m, " #%d: ", idx);
 	print_name_offset(m, taddr);
 	SEQ_printf(m, ", ");
 	print_name_offset(m, timer->function);
 	SEQ_printf(m, ", S:%02x", timer->state);
-#ifdef CONFIG_TIMER_STATS
-	SEQ_printf(m, ", ");
-	print_name_offset(m, timer->start_site);
-	memcpy(tmp, timer->start_comm, TASK_COMM_LEN);
-	tmp[TASK_COMM_LEN] = 0;
-	SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
-#endif
 	SEQ_printf(m, "\n");
 	SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
 		(unsigned long long)ktime_to_ns(hrtimer_get_softexpires(timer)),
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
deleted file mode 100644
index 087204c..0000000
--- a/kernel/time/timer_stats.c
+++ /dev/null
@@ -1,425 +0,0 @@
-/*
- * kernel/time/timer_stats.c
- *
- * Collect timer usage statistics.
- *
- * Copyright(C) 2006, Red Hat, Inc., Ingo Molnar
- * Copyright(C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
- *
- * timer_stats is based on timer_top, a similar functionality which was part of
- * Con Kolivas dyntick patch set. It was developed by Daniel Petrini at the
- * Instituto Nokia de Tecnologia - INdT - Manaus. timer_top's design was based
- * on dynamic allocation of the statistics entries and linear search based
- * lookup combined with a global lock, rather than the static array, hash
- * and per-CPU locking which is used by timer_stats. It was written for the
- * pre hrtimer kernel code and therefore did not take hrtimers into account.
- * Nevertheless it provided the base for the timer_stats implementation and
- * was a helpful source of inspiration. Kudos to Daniel and the Nokia folks
- * for this effort.
- *
- * timer_top.c is
- *	Copyright (C) 2005 Instituto Nokia de Tecnologia - INdT - Manaus
- *	Written by Daniel Petrini <d.pensator@gmail.com>
- *	timer_top.c was released under the GNU General Public License version 2
- *
- * We export the addresses and counting of timer functions being called,
- * the pid and cmdline from the owner process if applicable.
- *
- * Start/stop data collection:
- * # echo [1|0] >/proc/timer_stats
- *
- * Display the information collected so far:
- * # cat /proc/timer_stats
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/kallsyms.h>
-
-#include <asm/uaccess.h>
-
-/*
- * This is our basic unit of interest: a timer expiry event identified
- * by the timer, its start/expire functions and the PID of the task that
- * started the timer. We count the number of times an event happens:
- */
-struct entry {
-	/*
-	 * Hash list:
-	 */
-	struct entry		*next;
-
-	/*
-	 * Hash keys:
-	 */
-	void			*timer;
-	void			*start_func;
-	void			*expire_func;
-	pid_t			pid;
-
-	/*
-	 * Number of timeout events:
-	 */
-	unsigned long		count;
-	u32			flags;
-
-	/*
-	 * We save the command-line string to preserve
-	 * this information past task exit:
-	 */
-	char			comm[TASK_COMM_LEN + 1];
-
-} ____cacheline_aligned_in_smp;
-
-/*
- * Spinlock protecting the tables - not taken during lookup:
- */
-static DEFINE_RAW_SPINLOCK(table_lock);
-
-/*
- * Per-CPU lookup locks for fast hash lookup:
- */
-static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock);
-
-/*
- * Mutex to serialize state changes with show-stats activities:
- */
-static DEFINE_MUTEX(show_mutex);
-
-/*
- * Collection status, active/inactive:
- */
-int __read_mostly timer_stats_active;
-
-/*
- * Beginning/end timestamps of measurement:
- */
-static ktime_t time_start, time_stop;
-
-/*
- * tstat entry structs only get allocated while collection is
- * active and never freed during that time - this simplifies
- * things quite a bit.
- *
- * They get freed when a new collection period is started.
- */
-#define MAX_ENTRIES_BITS	10
-#define MAX_ENTRIES		(1UL << MAX_ENTRIES_BITS)
-
-static unsigned long nr_entries;
-static struct entry entries[MAX_ENTRIES];
-
-static atomic_t overflow_count;
-
-/*
- * The entries are in a hash-table, for fast lookup:
- */
-#define TSTAT_HASH_BITS		(MAX_ENTRIES_BITS - 1)
-#define TSTAT_HASH_SIZE		(1UL << TSTAT_HASH_BITS)
-#define TSTAT_HASH_MASK		(TSTAT_HASH_SIZE - 1)
-
-#define __tstat_hashfn(entry)						\
-	(((unsigned long)(entry)->timer       ^				\
-	  (unsigned long)(entry)->start_func  ^				\
-	  (unsigned long)(entry)->expire_func ^				\
-	  (unsigned long)(entry)->pid		) & TSTAT_HASH_MASK)
-
-#define tstat_hashentry(entry)	(tstat_hash_table + __tstat_hashfn(entry))
-
-static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly;
-
-static void reset_entries(void)
-{
-	nr_entries = 0;
-	memset(entries, 0, sizeof(entries));
-	memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
-	atomic_set(&overflow_count, 0);
-}
-
-static struct entry *alloc_entry(void)
-{
-	if (nr_entries >= MAX_ENTRIES)
-		return NULL;
-
-	return entries + nr_entries++;
-}
-
-static int match_entries(struct entry *entry1, struct entry *entry2)
-{
-	return entry1->timer       == entry2->timer	  &&
-	       entry1->start_func  == entry2->start_func  &&
-	       entry1->expire_func == entry2->expire_func &&
-	       entry1->pid	   == entry2->pid;
-}
-
-/*
- * Look up whether an entry matching this item is present
- * in the hash already. Must be called with irqs off and the
- * lookup lock held:
- */
-static struct entry *tstat_lookup(struct entry *entry, char *comm)
-{
-	struct entry **head, *curr, *prev;
-
-	head = tstat_hashentry(entry);
-	curr = *head;
-
-	/*
-	 * The fastpath is when the entry is already hashed,
-	 * we do this with the lookup lock held, but with the
-	 * table lock not held:
-	 */
-	while (curr) {
-		if (match_entries(curr, entry))
-			return curr;
-
-		curr = curr->next;
-	}
-	/*
-	 * Slowpath: allocate, set up and link a new hash entry:
-	 */
-	prev = NULL;
-	curr = *head;
-
-	raw_spin_lock(&table_lock);
-	/*
-	 * Make sure we have not raced with another CPU:
-	 */
-	while (curr) {
-		if (match_entries(curr, entry))
-			goto out_unlock;
-
-		prev = curr;
-		curr = curr->next;
-	}
-
-	curr = alloc_entry();
-	if (curr) {
-		*curr = *entry;
-		curr->count = 0;
-		curr->next = NULL;
-		memcpy(curr->comm, comm, TASK_COMM_LEN);
-
-		smp_mb(); /* Ensure that curr is initialized before insert */
-
-		if (prev)
-			prev->next = curr;
-		else
-			*head = curr;
-	}
- out_unlock:
-	raw_spin_unlock(&table_lock);
-
-	return curr;
-}
-
-/**
- * timer_stats_update_stats - Update the statistics for a timer.
- * @timer:	pointer to either a timer_list or a hrtimer
- * @pid:	the pid of the task which set up the timer
- * @startf:	pointer to the function which did the timer setup
- * @timerf:	pointer to the timer callback function of the timer
- * @comm:	name of the process which set up the timer
- * @tflags:	The flags field of the timer
- *
- * When the timer is already registered, then the event counter is
- * incremented. Otherwise the timer is registered in a free slot.
- */
-void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
-			      void *timerf, char *comm, u32 tflags)
-{
-	/*
-	 * It doesn't matter which lock we take:
-	 */
-	raw_spinlock_t *lock;
-	struct entry *entry, input;
-	unsigned long flags;
-
-	if (likely(!timer_stats_active))
-		return;
-
-	lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id());
-
-	input.timer = timer;
-	input.start_func = startf;
-	input.expire_func = timerf;
-	input.pid = pid;
-	input.flags = tflags;
-
-	raw_spin_lock_irqsave(lock, flags);
-	if (!timer_stats_active)
-		goto out_unlock;
-
-	entry = tstat_lookup(&input, comm);
-	if (likely(entry))
-		entry->count++;
-	else
-		atomic_inc(&overflow_count);
-
- out_unlock:
-	raw_spin_unlock_irqrestore(lock, flags);
-}
-
-static void print_name_offset(struct seq_file *m, unsigned long addr)
-{
-	char symname[KSYM_NAME_LEN];
-
-	if (lookup_symbol_name(addr, symname) < 0)
-		seq_printf(m, "<%p>", (void *)addr);
-	else
-		seq_printf(m, "%s", symname);
-}
-
-static int tstats_show(struct seq_file *m, void *v)
-{
-	struct timespec64 period;
-	struct entry *entry;
-	unsigned long ms;
-	long events = 0;
-	ktime_t time;
-	int i;
-
-	mutex_lock(&show_mutex);
-	/*
-	 * If still active then calculate up to now:
-	 */
-	if (timer_stats_active)
-		time_stop = ktime_get();
-
-	time = ktime_sub(time_stop, time_start);
-
-	period = ktime_to_timespec64(time);
-	ms = period.tv_nsec / 1000000;
-
-	seq_puts(m, "Timer Stats Version: v0.3\n");
-	seq_printf(m, "Sample period: %ld.%03ld s\n", (long)period.tv_sec, ms);
-	if (atomic_read(&overflow_count))
-		seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
-	seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
-
-	for (i = 0; i < nr_entries; i++) {
-		entry = entries + i;
-		if (entry->flags & TIMER_DEFERRABLE) {
-			seq_printf(m, "%4luD, %5d %-16s ",
-				entry->count, entry->pid, entry->comm);
-		} else {
-			seq_printf(m, " %4lu, %5d %-16s ",
-				entry->count, entry->pid, entry->comm);
-		}
-
-		print_name_offset(m, (unsigned long)entry->start_func);
-		seq_puts(m, " (");
-		print_name_offset(m, (unsigned long)entry->expire_func);
-		seq_puts(m, ")\n");
-
-		events += entry->count;
-	}
-
-	ms += period.tv_sec * 1000;
-	if (!ms)
-		ms = 1;
-
-	if (events && period.tv_sec)
-		seq_printf(m, "%ld total events, %ld.%03ld events/sec\n",
-			   events, events * 1000 / ms,
-			   (events * 1000000 / ms) % 1000);
-	else
-		seq_printf(m, "%ld total events\n", events);
-
-	mutex_unlock(&show_mutex);
-
-	return 0;
-}
-
-/*
- * After a state change, make sure all concurrent lookup/update
- * activities have stopped:
- */
-static void sync_access(void)
-{
-	unsigned long flags;
-	int cpu;
-
-	for_each_online_cpu(cpu) {
-		raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu);
-
-		raw_spin_lock_irqsave(lock, flags);
-		/* nothing */
-		raw_spin_unlock_irqrestore(lock, flags);
-	}
-}
-
-static ssize_t tstats_write(struct file *file, const char __user *buf,
-			    size_t count, loff_t *offs)
-{
-	char ctl[2];
-
-	if (count != 2 || *offs)
-		return -EINVAL;
-
-	if (copy_from_user(ctl, buf, count))
-		return -EFAULT;
-
-	mutex_lock(&show_mutex);
-	switch (ctl[0]) {
-	case '0':
-		if (timer_stats_active) {
-			timer_stats_active = 0;
-			time_stop = ktime_get();
-			sync_access();
-		}
-		break;
-	case '1':
-		if (!timer_stats_active) {
-			reset_entries();
-			time_start = ktime_get();
-			smp_mb();
-			timer_stats_active = 1;
-		}
-		break;
-	default:
-		count = -EINVAL;
-	}
-	mutex_unlock(&show_mutex);
-
-	return count;
-}
-
-static int tstats_open(struct inode *inode, struct file *filp)
-{
-	return single_open(filp, tstats_show, NULL);
-}
-
-static const struct file_operations tstats_fops = {
-	.open		= tstats_open,
-	.read		= seq_read,
-	.write		= tstats_write,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-void __init init_timer_stats(void)
-{
-	int cpu;
-
-	for_each_possible_cpu(cpu)
-		raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu));
-}
-
-static int __init init_tstats_procfs(void)
-{
-	struct proc_dir_entry *pe;
-
-	pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
-	if (!pe)
-		return -ENOMEM;
-	return 0;
-}
-__initcall(init_tstats_procfs);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7ae9b24..812b8f8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1529,8 +1529,6 @@
 		return;
 	}
 
-	timer_stats_timer_set_start_info(&dwork->timer);
-
 	dwork->wq = wq;
 	dwork->cpu = cpu;
 	timer->expires = jiffies + delay;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 64ec3fd..22eff06 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1003,20 +1003,6 @@
 
 	  If unsure, say N.
 
-config TIMER_STATS
-	bool "Collect kernel timers statistics"
-	depends on DEBUG_KERNEL && PROC_FS
-	help
-	  If you say Y here, additional code will be inserted into the
-	  timer routines to collect statistics about kernel timers being
-	  reprogrammed. The statistics can be read from /proc/timer_stats.
-	  The statistics collection is started by writing 1 to /proc/timer_stats,
-	  writing 0 stops it. This feature is useful to collect information
-	  about timer usage patterns in kernel and userspace. This feature
-	  is lightweight if enabled in the kernel config but not activated
-	  (it defaults to deactivated on bootup and will only be activated
-	  if some application like powertop activates it explicitly).
-
 config DEBUG_TASK_STACK_SCAN_OFF
 	bool "Disable kmemleak task stack scan by default"
 	depends on DEBUG_KMEMLEAK
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 0362da0..2e38502 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -4656,6 +4656,51 @@
 		{ },
 		{ { 0, 1 } },
 	},
+	{
+		/* Mainly testing JIT + imm64 here. */
+		"JMP_JGE_X: ldimm64 test 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JGE, R1, R2, 2),
+			BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
+			BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xeeeeeeeeU } },
+	},
+	{
+		"JMP_JGE_X: ldimm64 test 2",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JGE, R1, R2, 0),
+			BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0xffffffffU } },
+	},
+	{
+		"JMP_JGE_X: ldimm64 test 3",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_LD_IMM64(R1, 3),
+			BPF_LD_IMM64(R2, 2),
+			BPF_JMP_REG(BPF_JGE, R1, R2, 4),
+			BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
+			BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
 	/* BPF_JMP | BPF_JNE | BPF_X */
 	{
 		"JMP_JNE_X: if (3 != 2) return 1",
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b7f9ae7..b490af6 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1059,7 +1059,7 @@
 		return err;
 	}
 
-	if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name))
+	if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
 		return -EMSGSIZE;
 
 	return 0;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 8bc6c4e..39e9acf 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -89,6 +89,7 @@
 #include <linux/netfilter_ipv4.h>
 #include <linux/random.h>
 #include <linux/slab.h>
+#include <linux/netfilter/xt_qtaguid.h>
 
 #include <asm/uaccess.h>
 
@@ -412,6 +413,9 @@
 	if (sk) {
 		long timeout;
 
+#ifdef CONFIG_NETFILTER_XT_MATCH_QTAGUID
+		qtaguid_untag(sock, true);
+#endif
 		/* Applications forget to leave groups before exiting */
 		ip_mc_drop_socket(sk);
 
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 7525f5e..ab0bbcb 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -356,6 +356,9 @@
 			       rt->dst.dev->mtu);
 		return -EMSGSIZE;
 	}
+	if (length < sizeof(struct iphdr))
+		return -EINVAL;
+
 	if (flags&MSG_PROBE)
 		goto out;
 
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index c67ece1..7d86fc5 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -264,13 +264,15 @@
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct lp *lp = inet_csk_ca(sk);
+	u32 delta;
 
 	if (sample->rtt_us > 0)
 		tcp_lp_rtt_sample(sk, sample->rtt_us);
 
 	/* calc inference */
-	if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
-		lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr);
+	delta = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
+	if ((s32)delta > 0)
+		lp->inference = 3 * delta;
 
 	/* test if within inference */
 	if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference))
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 8615a6b..64e1ba4 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -543,6 +543,7 @@
 			newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
 		newtp->rx_opt.mss_clamp = req->mss;
 		tcp_ecn_openreq_child(newtp, req);
+		newtp->fastopen_req = NULL;
 		newtp->fastopen_rsk = NULL;
 		newtp->syn_data_acked = 0;
 		newtp->rack.mstamp.v64 = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0e7c05b..e6bf011 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1246,7 +1246,7 @@
  * eventually). The difference is that pulled data not copied, but
  * immediately discarded.
  */
-static void __pskb_trim_head(struct sk_buff *skb, int len)
+static int __pskb_trim_head(struct sk_buff *skb, int len)
 {
 	struct skb_shared_info *shinfo;
 	int i, k, eat;
@@ -1256,7 +1256,7 @@
 		__skb_pull(skb, eat);
 		len -= eat;
 		if (!len)
-			return;
+			return 0;
 	}
 	eat = len;
 	k = 0;
@@ -1282,23 +1282,28 @@
 	skb_reset_tail_pointer(skb);
 	skb->data_len -= len;
 	skb->len = skb->data_len;
+	return len;
 }
 
 /* Remove acked data from a packet in the transmit queue. */
 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
 {
+	u32 delta_truesize;
+
 	if (skb_unclone(skb, GFP_ATOMIC))
 		return -ENOMEM;
 
-	__pskb_trim_head(skb, len);
+	delta_truesize = __pskb_trim_head(skb, len);
 
 	TCP_SKB_CB(skb)->seq += len;
 	skb->ip_summed = CHECKSUM_PARTIAL;
 
-	skb->truesize	     -= len;
-	sk->sk_wmem_queued   -= len;
-	sk_mem_uncharge(sk, len);
-	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+	if (delta_truesize) {
+		skb->truesize	   -= delta_truesize;
+		sk->sk_wmem_queued -= delta_truesize;
+		sk_mem_uncharge(sk, delta_truesize);
+		sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+	}
 
 	/* Any change of skb->len requires recalculation of tso factor. */
 	if (tcp_skb_pcount(skb) > 1)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 553138d..58d7c1d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3318,7 +3318,8 @@
 				      idev->dev, 0, 0);
 	}
 
-	addrconf_dad_start(ifp);
+	if (ifp->state == INET6_IFADDR_STATE_PREDAD)
+		addrconf_dad_start(ifp);
 
 	return 0;
 }
@@ -3531,6 +3532,7 @@
  */
 static struct notifier_block ipv6_dev_notf = {
 	.notifier_call = addrconf_notify,
+	.priority = ADDRCONF_NOTIFY_PRIORITY,
 };
 
 static void addrconf_type_change(struct net_device *dev, unsigned long event)
@@ -3667,7 +3669,7 @@
 		if (keep) {
 			/* set state to skip the notifier below */
 			state = INET6_IFADDR_STATE_DEAD;
-			ifa->state = 0;
+			ifa->state = INET6_IFADDR_STATE_PREDAD;
 			if (!(ifa->flags & IFA_F_NODAD))
 				ifa->flags |= IFA_F_TENTATIVE;
 
@@ -6319,6 +6321,8 @@
 		goto errlo;
 	}
 
+	ip6_route_init_special_entries();
+
 	for (i = 0; i < IN6_ADDR_HSIZE; i++)
 		INIT_HLIST_HEAD(&inet6_addr_lst[i]);
 
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 1a34da0..83c7d2b 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -630,6 +630,8 @@
 		ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
 		return -EMSGSIZE;
 	}
+	if (length < sizeof(struct ipv6hdr))
+		return -EINVAL;
 	if (flags&MSG_PROBE)
 		goto out;
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7d17670..0b21d61 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3455,7 +3455,10 @@
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 	struct net *net = dev_net(dev);
 
-	if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
+	if (!(dev->flags & IFF_LOOPBACK))
+		return NOTIFY_OK;
+
+	if (event == NETDEV_REGISTER) {
 		net->ipv6.ip6_null_entry->dst.dev = dev;
 		net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
@@ -3464,6 +3467,12 @@
 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
 #endif
+	 } else if (event == NETDEV_UNREGISTER) {
+		in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
+#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+		in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
+		in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
+#endif
 	}
 
 	return NOTIFY_OK;
@@ -3770,9 +3779,24 @@
 
 static struct notifier_block ip6_route_dev_notifier = {
 	.notifier_call = ip6_route_dev_notify,
-	.priority = 0,
+	.priority = ADDRCONF_NOTIFY_PRIORITY - 10,
 };
 
+void __init ip6_route_init_special_entries(void)
+{
+	/* Registering of the loopback is done before this portion of code,
+	 * the loopback reference in rt6_info will not be taken, do it
+	 * manually for init_net */
+	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
+	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+  #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
+	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
+	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+  #endif
+}
+
 int __init ip6_route_init(void)
 {
 	int ret;
@@ -3799,17 +3823,6 @@
 
 	ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
 
-	/* Registering of the loopback is done before this portion of code,
-	 * the loopback reference in rt6_info will not be taken, do it
-	 * manually for init_net */
-	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
-	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
-  #ifdef CONFIG_IPV6_MULTIPLE_TABLES
-	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
-	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
-	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
-	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
-  #endif
 	ret = fib6_init();
 	if (ret)
 		goto out_register_subsys;
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index 3c7ae04..1fbe4b6 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -321,7 +321,7 @@
 			 st_entry->tag,
 			 get_uid_from_tag(st_entry->tag));
 		rb_erase(&st_entry->sock_node, st_to_free_tree);
-		sockfd_put(st_entry->socket);
+		sock_put(st_entry->sk);
 		kfree(st_entry);
 	}
 }
@@ -1922,12 +1922,12 @@
 {
 	struct sock_tag *sock_tag_entry = v;
 	uid_t uid;
-	long f_count;
 
 	CT_DEBUG("qtaguid: proc ctrl pid=%u tgid=%u uid=%u\n",
 		 current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
 
 	if (sock_tag_entry != SEQ_START_TOKEN) {
+		int sk_ref_count;
 		uid = get_uid_from_tag(sock_tag_entry->tag);
 		CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) "
 			 "pid=%u\n",
@@ -1936,13 +1936,13 @@
 			 uid,
 			 sock_tag_entry->pid
 			);
-		f_count = atomic_long_read(
-			&sock_tag_entry->socket->file->f_count);
+		sk_ref_count = atomic_read(
+			&sock_tag_entry->sk->sk_refcnt);
 		seq_printf(m, "sock=%pK tag=0x%llx (uid=%u) pid=%u "
-			   "f_count=%lu\n",
+			   "f_count=%d\n",
 			   sock_tag_entry->sk,
 			   sock_tag_entry->tag, uid,
-			   sock_tag_entry->pid, f_count);
+			   sock_tag_entry->pid, sk_ref_count);
 	} else {
 		seq_printf(m, "events: sockets_tagged=%llu "
 			   "sockets_untagged=%llu "
@@ -2238,8 +2238,8 @@
 			from_kuid(&init_user_ns, current_fsuid()));
 		goto err;
 	}
-	CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n",
-		 input, atomic_long_read(&el_socket->file->f_count),
+	CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->sk_refcnt=%d ->sk=%p\n",
+		 input, atomic_read(&el_socket->sk->sk_refcnt),
 		 el_socket->sk);
 	if (argc < 3) {
 		acct_tag = make_atag_from_value(0);
@@ -2283,16 +2283,9 @@
 		struct tag_ref *prev_tag_ref_entry;
 
 		CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p "
-			 "st@%p ...->f_count=%ld\n",
+			 "st@%p ...->sk_refcnt=%d\n",
 			 input, el_socket->sk, sock_tag_entry,
-			 atomic_long_read(&el_socket->file->f_count));
-		/*
-		 * This is a re-tagging, so release the sock_fd that was
-		 * locked at the time of the 1st tagging.
-		 * There is still the ref from this call's sockfd_lookup() so
-		 * it can be done within the spinlock.
-		 */
-		sockfd_put(sock_tag_entry->socket);
+			 atomic_read(&el_socket->sk->sk_refcnt));
 		prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag,
 						    &uid_tag_data_entry);
 		BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry));
@@ -2312,8 +2305,12 @@
 			res = -ENOMEM;
 			goto err_tag_unref_put;
 		}
+		/*
+		 * Hold the sk refcount here to make sure the sk pointer cannot
+		 * be freed and reused
+		 */
+		sock_hold(el_socket->sk);
 		sock_tag_entry->sk = el_socket->sk;
-		sock_tag_entry->socket = el_socket;
 		sock_tag_entry->pid = current->tgid;
 		sock_tag_entry->tag = combine_atag_with_uid(acct_tag, uid_int);
 		spin_lock_bh(&uid_tag_data_tree_lock);
@@ -2340,10 +2337,11 @@
 		atomic64_inc(&qtu_events.sockets_tagged);
 	}
 	spin_unlock_bh(&sock_tag_list_lock);
-	/* We keep the ref to the socket (file) until it is untagged */
-	CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n",
+	/* We keep the ref to the sk until it is untagged */
+	CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->sk_refcnt=%d\n",
 		 input, sock_tag_entry,
-		 atomic_long_read(&el_socket->file->f_count));
+		 atomic_read(&el_socket->sk->sk_refcnt));
+	sockfd_put(el_socket);
 	return 0;
 
 err_tag_unref_put:
@@ -2351,8 +2349,8 @@
 	tag_ref_entry->num_sock_tags--;
 	free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry);
 err_put:
-	CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n",
-		 input, atomic_long_read(&el_socket->file->f_count) - 1);
+	CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->sk_refcnt=%d\n",
+		 input, atomic_read(&el_socket->sk->sk_refcnt) - 1);
 	/* Release the sock_fd that was grabbed by sockfd_lookup(). */
 	sockfd_put(el_socket);
 	return res;
@@ -2368,17 +2366,13 @@
 	int sock_fd = 0;
 	struct socket *el_socket;
 	int res, argc;
-	struct sock_tag *sock_tag_entry;
-	struct tag_ref *tag_ref_entry;
-	struct uid_tag_data *utd_entry;
-	struct proc_qtu_data *pqd_entry;
 
 	argc = sscanf(input, "%c %d", &cmd, &sock_fd);
 	CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n",
 		 input, argc, cmd, sock_fd);
 	if (argc < 2) {
 		res = -EINVAL;
-		goto err;
+		return res;
 	}
 	el_socket = sockfd_lookup(sock_fd, &res);  /* This locks the file */
 	if (!el_socket) {
@@ -2386,17 +2380,31 @@
 			" sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
 			input, sock_fd, res, current->pid, current->tgid,
 			from_kuid(&init_user_ns, current_fsuid()));
-		goto err;
+		return res;
 	}
 	CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n",
 		 input, atomic_long_read(&el_socket->file->f_count),
 		 el_socket->sk);
+	res = qtaguid_untag(el_socket, false);
+	sockfd_put(el_socket);
+	return res;
+}
+
+int qtaguid_untag(struct socket *el_socket, bool kernel)
+{
+	int res;
+	pid_t pid;
+	struct sock_tag *sock_tag_entry;
+	struct tag_ref *tag_ref_entry;
+	struct uid_tag_data *utd_entry;
+	struct proc_qtu_data *pqd_entry;
+
 	spin_lock_bh(&sock_tag_list_lock);
 	sock_tag_entry = get_sock_stat_nl(el_socket->sk);
 	if (!sock_tag_entry) {
 		spin_unlock_bh(&sock_tag_list_lock);
 		res = -EINVAL;
-		goto err_put;
+		return res;
 	}
 	/*
 	 * The socket already belongs to the current process
@@ -2408,20 +2416,26 @@
 	BUG_ON(!tag_ref_entry);
 	BUG_ON(tag_ref_entry->num_sock_tags <= 0);
 	spin_lock_bh(&uid_tag_data_tree_lock);
+	if (kernel)
+		pid = sock_tag_entry->pid;
+	else
+		pid = current->tgid;
 	pqd_entry = proc_qtu_data_tree_search(
-		&proc_qtu_data_tree, current->tgid);
+		&proc_qtu_data_tree, pid);
 	/*
 	 * TODO: remove if, and start failing.
 	 * At first, we want to catch user-space code that is not
 	 * opening the /dev/xt_qtaguid.
 	 */
-	if (IS_ERR_OR_NULL(pqd_entry))
+	if (IS_ERR_OR_NULL(pqd_entry) || !sock_tag_entry->list.next) {
 		pr_warn_once("qtaguid: %s(): "
 			     "User space forgot to open /dev/xt_qtaguid? "
-			     "pid=%u tgid=%u uid=%u\n", __func__,
-			     current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
-	else
+			     "pid=%u tgid=%u sk_pid=%u, uid=%u\n", __func__,
+			     current->pid, current->tgid, sock_tag_entry->pid,
+			     from_kuid(&init_user_ns, current_fsuid()));
+	} else {
 		list_del(&sock_tag_entry->list);
+	}
 	spin_unlock_bh(&uid_tag_data_tree_lock);
 	/*
 	 * We don't free tag_ref from the utd_entry here,
@@ -2430,30 +2444,17 @@
 	tag_ref_entry->num_sock_tags--;
 	spin_unlock_bh(&sock_tag_list_lock);
 	/*
-	 * Release the sock_fd that was grabbed at tag time,
-	 * and once more for the sockfd_lookup() here.
+	 * Release the sock_fd that was grabbed at tag time.
 	 */
-	sockfd_put(sock_tag_entry->socket);
-	CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n",
-		 input, sock_tag_entry,
-		 atomic_long_read(&el_socket->file->f_count) - 1);
-	sockfd_put(el_socket);
+	sock_put(sock_tag_entry->sk);
+	CT_DEBUG("qtaguid: done. st@%p ...->sk_refcnt=%d\n",
+		 sock_tag_entry,
+		 atomic_read(&el_socket->sk->sk_refcnt));
 
 	kfree(sock_tag_entry);
 	atomic64_inc(&qtu_events.sockets_untagged);
 
 	return 0;
-
-err_put:
-	CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n",
-		 input, atomic_long_read(&el_socket->file->f_count) - 1);
-	/* Release the sock_fd that was grabbed by sockfd_lookup(). */
-	sockfd_put(el_socket);
-	return res;
-
-err:
-	CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input);
-	return res;
 }
 
 static ssize_t qtaguid_ctrl_parse(const char *input, size_t count)
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
index 6dc14a9..8178fbd 100644
--- a/net/netfilter/xt_qtaguid_internal.h
+++ b/net/netfilter/xt_qtaguid_internal.h
@@ -256,8 +256,6 @@
 struct sock_tag {
 	struct rb_node sock_node;
 	struct sock *sk;  /* Only used as a number, never dereferenced */
-	/* The socket is needed for sockfd_put() */
-	struct socket *socket;
 	/* Used to associate with a given pid */
 	struct list_head list;   /* in proc_qtu_data.sock_tag_list */
 	pid_t pid;
diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c
index f6a00a3..2a7190d 100644
--- a/net/netfilter/xt_qtaguid_print.c
+++ b/net/netfilter/xt_qtaguid_print.c
@@ -24,7 +24,7 @@
 #include <linux/rbtree.h>
 #include <linux/slab.h>
 #include <linux/spinlock_types.h>
-
+#include <net/sock.h>
 
 #include "xt_qtaguid_internal.h"
 #include "xt_qtaguid_print.h"
@@ -237,10 +237,10 @@
 	tag_str = pp_tag_t(&st->tag);
 	res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
 			"sock_node=rb_node{...}, "
-			"sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
+			"sk=%p (f_count=%d), list=list_head{...}, "
 			"pid=%u, tag=%s}",
-			st, st->sk, st->socket, atomic_long_read(
-				&st->socket->file->f_count),
+			st, st->sk, atomic_read(
+				&st->sk->sk_refcnt),
 			st->pid, tag_str);
 	_bug_on_err_or_null(res);
 	kfree(tag_str);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 9b4260d..a9a7128 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -796,8 +796,7 @@
 }
 EXPORT_SYMBOL(rfkill_resume_polling);
 
-#ifdef CONFIG_RFKILL_PM
-static int rfkill_suspend(struct device *dev)
+static __maybe_unused int rfkill_suspend(struct device *dev)
 {
 	struct rfkill *rfkill = to_rfkill(dev);
 
@@ -807,7 +806,7 @@
 	return 0;
 }
 
-static int rfkill_resume(struct device *dev)
+static __maybe_unused int rfkill_resume(struct device *dev)
 {
 	struct rfkill *rfkill = to_rfkill(dev);
 	bool cur;
@@ -827,19 +826,13 @@
 }
 
 static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
-#define RFKILL_PM_OPS (&rfkill_pm_ops)
-#else
-#define RFKILL_PM_OPS NULL
-#endif
 
 static struct class rfkill_class = {
 	.name		= "rfkill",
 	.dev_release	= rfkill_release,
 	.dev_groups	= rfkill_dev_groups,
 	.dev_uevent	= rfkill_dev_uevent,
-#ifdef CONFIG_RFKILL_PM
-	.pm		= RFKILL_PM_OPS,
-#endif
+	.pm		= IS_ENABLED(CONFIG_RFKILL_PM) ? &rfkill_pm_ops : NULL,
 };
 
 bool rfkill_blocked(struct rfkill *rfkill)
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 44ac85f..d0ca0db 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -241,7 +241,7 @@
 
 	.uinfo = {
 		.auth = {
-			.icv_truncbits = 96,
+			.icv_truncbits = 128,
 			.icv_fullbits = 256,
 		}
 	},
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index 369ffaa..dc7dec9 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -1218,16 +1218,22 @@
 		.result = ACCEPT,
 	},
 	{
-		"unpriv: obfuscate stack pointer",
+		"stack pointer arithmetic",
 		.insns = {
-			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
-			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
-			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_MOV64_IMM(BPF_REG_1, 4),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
+			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
 			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
-		.errstr_unpriv = "R2 pointer arithmetic",
-		.result_unpriv = REJECT,
 		.result = ACCEPT,
 	},
 	{
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index bc44626..5cb7e04 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2155,7 +2155,20 @@
 		/* cancel the pending probing work */
 		chip = card->private_data;
 		hda = container_of(chip, struct hda_intel, chip);
+		/* FIXME: below is an ugly workaround.
+		 * Both device_release_driver() and driver_probe_device()
+		 * take *both* the device's and its parent's lock before
+		 * calling the remove() and probe() callbacks.  The codec
+		 * probe takes the locks of both the codec itself and its
+		 * parent, i.e. the PCI controller dev.  Meanwhile, when
+		 * the PCI controller is unbound, it takes its lock, too
+		 * ==> ouch, a deadlock!
+		 * As a workaround, we unlock temporarily here the controller
+		 * device during cancel_work_sync() call.
+		 */
+		device_unlock(&pci->dev);
 		cancel_work_sync(&hda->probe_work);
+		device_lock(&pci->dev);
 
 		snd_card_free(card);
 	}
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 9c6f471..17224de 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -945,7 +945,7 @@
 	tristate
 
 config SND_SOC_WCD_SPI
-	depends on CONFIG_SPI
+	depends on SPI
 	tristate
 
 config SND_SOC_WL1273
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 4b6fcb0b..c0a32f3 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -8516,6 +8516,7 @@
 	{WCD934X_HPH_L_TEST, 0x01, 0x01},
 	{WCD934X_HPH_R_TEST, 0x01, 0x01},
 	{WCD934X_CPE_FLL_CONFIG_CTL_2, 0xFF, 0x20},
+	{WCD934X_MBHC_NEW_CTL_2, 0x0C, 0x00},
 };
 
 static const struct tavil_reg_mask_val tavil_codec_reg_init_1_1_val[] = {
diff --git a/sound/soc/codecs/wcd_cpe_core.c b/sound/soc/codecs/wcd_cpe_core.c
index cf014d7..0c2f41a 100644
--- a/sound/soc/codecs/wcd_cpe_core.c
+++ b/sound/soc/codecs/wcd_cpe_core.c
@@ -1725,10 +1725,10 @@
 	if (pos)
 		copy_count = pos - buf;
 
-	if (copy_count > WCD_CPE_IMAGE_FNAME_MAX) {
+	if (copy_count > (WCD_CPE_IMAGE_FNAME_MAX - 1)) {
 		dev_err(core->dev,
 			"%s: Invalid length %d, max allowed %d\n",
-			__func__, copy_count, WCD_CPE_IMAGE_FNAME_MAX);
+			__func__, copy_count, WCD_CPE_IMAGE_FNAME_MAX - 1);
 		return -EINVAL;
 	}
 
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c
index 93b0aa7..39c2c7d 100644
--- a/tools/power/cpupower/utils/helpers/cpuid.c
+++ b/tools/power/cpupower/utils/helpers/cpuid.c
@@ -156,6 +156,7 @@
 					 */
 			case 0x2C:	/* Westmere EP - Gulftown */
 				cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
+				break;
 			case 0x2A:	/* SNB */
 			case 0x2D:	/* SNB Xeon */
 			case 0x3A:	/* IVB */
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index a89f80a..6300c1a 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -5,7 +5,7 @@
 .PHONY: all all_32 all_64 warn_32bit_failure clean
 
 TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
-			check_initial_reg_state sigreturn ldt_gdt iopl \
+			check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test \
 			protection_keys
 TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
 			test_FCMOV test_FCOMI test_FISTTP \