Merge "ARM: dts: msm: Enable vdd_mss on SDM670"
diff --git a/Documentation/devicetree/bindings/arm/msm/glink_smem_native_xprt.txt b/Documentation/devicetree/bindings/arm/msm/glink_smem_native_xprt.txt
index f68c8e4..7011d5c 100644
--- a/Documentation/devicetree/bindings/arm/msm/glink_smem_native_xprt.txt
+++ b/Documentation/devicetree/bindings/arm/msm/glink_smem_native_xprt.txt
@@ -11,6 +11,7 @@
 -label : the name of the subsystem this link connects to
 
 Optional properties:
+-cpu-affinity: Cores to pin the interrupt and receiving work thread to.
 -qcom,qos-config: Reference to the qos configuration elements.It depends on
 		ramp-time.
 -qcom,ramp-time: Worst case time in microseconds to transition to this power
@@ -36,6 +37,7 @@
 		qcom,irq-mask = <0x1000>;
 		interrupts = <0 25 1>;
 		label = "lpass";
+		cpu-affinity = <1 2>;
 		qcom,qos-config = <&glink_qos_adsp>;
 		qcom,ramp-time = <0x10>,
 				     <0x20>,
diff --git a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
index a6537eb..105dcac 100644
--- a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
+++ b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
@@ -110,6 +110,10 @@
 			   on behalf of the subsystem driver.
 - qcom,mdm-link-info: a string indicating additional info about the physical link.
 			For example: "devID_domain.bus.slot" in case of PCIe.
+- qcom,mdm-auto-boot: Boolean. To indicate this instance of esoc boots independently.
+- qcom,mdm-statusline-not-a-powersource: Boolean. If set, status line to esoc device is not a
+		power source.
+- qcom,mdm-userspace-handle-shutdown: Boolean. If set, userspace handles shutdown requests.
 
 Example:
 	mdm0: qcom,mdm0 {
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 327a7d4..61226c9 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -107,6 +107,9 @@
 - MSM8953
   compatible = "qcom,msm8953"
 
+- SDM450
+  compatible = "qcom,sdm450"
+
 - MSM8937
   compatible = "qcom,msm8937"
 
@@ -310,6 +313,9 @@
 compatible = "qcom,msm8953-mtp"
 compatible = "qcom,msm8953-qrd"
 compatible = "qcom,msm8953-qrd-sku3"
+compatible = "qcom,sdm450-mtp"
+compatible = "qcom,sdm450-cdp"
+compatible = "qcom,sdm450-qrd"
 compatible = "qcom,mdm9640-cdp"
 compatible = "qcom,mdm9640-mtp"
 compatible = "qcom,mdm9640-rumi"
diff --git a/Documentation/devicetree/bindings/arm/msm/qdss_mhi.txt b/Documentation/devicetree/bindings/arm/msm/qdss_mhi.txt
new file mode 100644
index 0000000..928a4f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qdss_mhi.txt
@@ -0,0 +1,15 @@
+Qualcomm Technologies, Inc. QDSS bridge Driver
+
+This device will enable routing debug data from modem
+subsystem to APSS host.
+
+Required properties:
+-compatible : "qcom,qdss-mhi".
+-qcom,mhi : phandle of MHI Device to connect to.
+
+Example:
+	qcom,qdss-mhi {
+		compatible = "qcom,qdss-mhi";
+		qcom,mhi = <&mhi_0>;
+	};
+
diff --git a/Documentation/devicetree/bindings/batterydata/batterydata.txt b/Documentation/devicetree/bindings/batterydata/batterydata.txt
new file mode 100644
index 0000000..884b19c
--- /dev/null
+++ b/Documentation/devicetree/bindings/batterydata/batterydata.txt
@@ -0,0 +1,268 @@
+Battery Profile Data
+
+Battery Data is a collection of battery profile data made available to
+the QPNP Charger and BMS drivers via device tree.
+
+qcom,battery-data node required properties:
+- qcom,rpull-up-kohm : The vadc pullup resistor's resistance value in kOhms.
+- qcom,vref-batt-therm-uv : The vadc voltage used to make readings.
+			For Qualcomm Technologies, Inc. VADCs, this should be
+			1800000uV.
+
+qcom,battery-data node optional properties:
+- qcom,batt-id-range-pct : The area of variation between upper and lower bound
+			for which a given battery ID resistance is valid. This
+			value is expressed as a percentage of the specified kohm
+			resistance provided by qcom,batt-id-kohm.
+
+qcom,battery-data can also include any number of children nodes. These children
+nodes will be treated as battery profile data nodes.
+
+Profile data node required properties:
+- qcom,fcc-mah : Full charge count of the battery in milliamp-hours
+- qcom,default-rbatt-mohm : The nominal battery resistance value
+- qcom,rbatt-capacitive-mohm : The capacitive resistance of the battery.
+- qcom,flat-ocv-threshold-uv : The threshold under which the battery can be
+			considered to be in the flat portion of the discharge
+			curve.
+- qcom,max-voltage-uv : The maximum rated voltage of the battery
+- qcom,v-cutoff-uv : The cutoff voltage of the battery at which the device
+			should shutdown gracefully.
+- qcom,chg-term-ua : The termination charging current of the battery.
+- qcom,batt-id-kohm : The battery id resistance of the battery. It can be
+			used as an array which could support multiple IDs for one battery
+			module when the ID resistance of some battery modules goes across
+			several ranges.
+- qcom,battery-type : A string indicating the type of battery.
+- qcom,fg-profile-data : An array of hexadecimal values used to configure more
+			complex fuel gauge peripherals which have a large amount
+			of coefficients used in hardware state machines and thus
+			influencing the final output of the state of charge read
+			by software.
+
+Profile data node optional properties:
+- qcom,chg-rslow-comp-c1 : A constant for rslow compensation in the fuel gauge.
+			This will be provided by the profiling tool for
+			additional fuel gauge accuracy during charging.
+- qcom,chg-rslow-comp-c2 : A constant for rslow compensation in the fuel gauge.
+			This will be provided by the profiling tool for
+			additional fuel gauge accuracy during charging.
+- qcom,chg-rslow-comp-thr : A constant for rslow compensation in the fuel gauge.
+			This will be provided by the profiling tool for
+			additional fuel gauge accuracy during charging.
+- qcom,chg-rs-to-rslow: A constant for rslow compensation in the fuel gauge.
+			This will be provided by the profiling tool for
+			additional fuel gauge accuracy during charging.
+- qcom,fastchg-current-ma: Specifies the maximum fastcharge current.
+- qcom,fg-cc-cv-threshold-mv: Voltage threshold in mV for transition from constant
+			charge (CC) to constant voltage (CV). This value should
+			be 10 mV less than the float voltage.
+			This property should only be specified if
+			"qcom,autoadjust-vfloat" property is specified in the
+			charger driver to ensure a proper operation.
+- qcom,thermal-coefficients: Byte array of thermal coefficients for reading
+			battery thermistor. This should be exactly 6 bytes
+			in length.
+			Example: [01 02 03 04 05 06]
+- qcom,soc-based-step-chg: A bool property to indicate if the battery will
+			perform SoC (State of Charge) based step charging.
+			If yes, the low and high thresholds defined in
+			"qcom,step-chg-ranges" tuples should be assigned as
+			SoC values in percentage.
+- qcom,step-chg-ranges: Array of tuples in which a tuple describes a range
+			data of step charging setting.
+			A range contains following 3 integer elements:
+			[0]: the low threshold of battery votlage in uV
+			     or SoC (State of Charge) in percentage when
+			     SoC based step charge is used;
+			[1]: the high threshold of battery voltage in uV
+			     or SoC in percentage when SoC based step charge
+			     is used;
+			[2]: the FCC (full charging current) in uA when battery
+			     voltage or SoC falls between the low and high
+			     thresholds.
+			The threshold values in range should be in ascending
+			and shouldn't overlap. It support 8 ranges at max.
+- qcom,jeita-fcc-ranges: Array of tuples in which a tuple describes a range
+			data of sw-jeita FCC (full charging current) setting.
+			A range contains following 3 integer elements:
+			[0]: the low threshold of battery temperature in deci-degree;
+			[1]: the high threshold of battery temperature in deci-degree;
+			[2]: the FCC in uA when battery temperature falls between
+			     the low and high thresholds.
+			The threshold values in range should be in ascending
+			and shouldn't overlap. It support 8 ranges at max.
+- qcom,jeita-fv-ranges: Array of tuples in which a tuple describes a range
+			data of sw-jeita FV (float voltage) setting.
+			A range contains following 3 integer elements:
+			[0]: the low threshold of battery temperature in deci-degree;
+			[1]: the high threshold of battery temperature in deci-degree;
+			[3]: the FV in uV when battery temperature falls between
+			     the low and high thresholds.
+			The threshold values in range should be in ascending
+			and shouldn't overlap. It support 8 ranges at max.
+
+Profile data node required subnodes:
+- qcom,fcc-temp-lut : An 1-dimensional lookup table node that encodes
+			temperature to fcc lookup. The units for this lookup
+			table should be degrees celsius to milliamp-hours.
+- qcom,pc-temp-ocv-lut : A 2-dimensional lookup table node that encodes
+			temperature and percent charge to open circuit voltage
+			lookup. The units for this lookup table should be
+			degrees celsius and percent to millivolts.
+- qcom,rbatt-sf-lut : A 2-dimentional lookup table node that encodes
+			temperature and percent charge to battery internal
+			resistance lookup. The units for this lookup table
+			should be degrees celsius and percent to milliohms.
+
+Profile data node optional subnodes:
+- qcom,ibat-acc-luit: A 2-dimentional lookup table that encodes temperature
+			and battery current to battery ACC (apparent charge
+			capacity). The units for this lookup table should be
+			temperature in degrees celsius, ibat in milli-amps
+			and ACC in milli-ampere-hour.
+
+Lookup table required properties:
+- qcom,lut-col-legend : An array that encodes the legend of the lookup table's
+			columns. The length of this array will determine the
+			lookup table's width.
+- qcom,lut-data : An array that encodes the lookup table's data. The size of this
+			array should be equal to the size of qcom,lut-col-legend
+			multiplied by 1 if it's a 1-dimensional table, or
+			the size of qcom,lut-row-legend if it's a 2-dimensional
+			table. The data should be in a flattened row-major
+			representation.
+
+Lookup table optional properties:
+- qcom,lut-row-legend : An array that encodes the legend of the lookup table's rows.
+			If this property exists, then it is assumed that the
+			lookup table is a 2-dimensional table.
+
+Example:
+
+In msm8974-mtp.dtsi:
+
+mtp_batterydata: qcom,battery-data {
+	qcom,rpull-up-kohm = <100>;
+	qcom,vref-batt-therm-uv = <1800000>;
+
+	/include/ "batterydata-palladium.dtsi"
+	/include/ "batterydata-mtp-3000mah.dtsi"
+};
+
+&pm8941_bms {
+	qcom,battery-data = <&mtp_batterydata>;
+};
+
+In batterydata-palladium.dtsi:
+
+qcom,palladium-batterydata {
+	qcom,fcc-mah = <1500>;
+	qcom,default-rbatt-mohm = <236>;
+	qcom,rbatt-capacitive-mohm = <50>;
+	qcom,flat-ocv-threshold-uv = <3800000>;
+	qcom,max-voltage-uv = <4200000>;
+	qcom,v-cutoff-uv = <3400000>;
+	qcom,chg-term-ua = <100000>;
+	qcom,batt-id-kohm = <75>;
+	qcom,step-chg-ranges = <3600000 4000000 3000000
+				4001000 4200000 2800000
+				4201000 4400000 2000000>;
+	qcom,jeita-fcc-ranges = <0      100     600000
+				 101    200     2000000
+				 201    450     3000000
+				 451    550     600000>;
+	qcom,jeita-fv-ranges = <0      100     4200000
+				101    450     4350000
+				451    550     4200000>;
+	qcom,battery-type = "palladium_1500mah";
+
+	qcom,fcc-temp-lut {
+		qcom,lut-col-legend = <(-20) 0 25 40 65>;
+		qcom,lut-data = <1492 1492 1493 1483 1502>;
+	};
+
+	qcom,pc-temp-ocv-lut {
+		qcom,lut-col-legend = <(-20) 0 25 40 65>;
+		qcom,lut-row-legend = <100 95 90 85 80 75 70>,
+				<65 60 55 50 45 40 35>,
+				<30 25 20 15 10 9 8>,
+				<7 6 5 4 3 2 1 0>;
+		qcom,lut-data = <4173 4167 4163 4156 4154>,
+			<4104 4107 4108 4102 4104>,
+			<4057 4072 4069 4061 4060>,
+			<3973 4009 4019 4016 4020>,
+			<3932 3959 3981 3982 3983>,
+			<3899 3928 3954 3950 3950>,
+			<3868 3895 3925 3921 3920>,
+			<3837 3866 3898 3894 3892>,
+			<3812 3841 3853 3856 3862>,
+			<3794 3818 3825 3823 3822>,
+			<3780 3799 3804 3804 3803>,
+			<3768 3787 3790 3788 3788>,
+			<3757 3779 3778 3775 3776>,
+			<3747 3772 3771 3766 3765>,
+			<3736 3763 3766 3760 3746>,
+			<3725 3749 3756 3747 3729>,
+			<3714 3718 3734 3724 3706>,
+			<3701 3703 3696 3689 3668>,
+			<3675 3695 3682 3675 3662>,
+			<3670 3691 3680 3673 3661>,
+			<3661 3686 3679 3672 3656>,
+			<3649 3680 3676 3669 3641>,
+			<3633 3669 3667 3655 3606>,
+			<3610 3647 3640 3620 3560>,
+			<3580 3607 3596 3572 3501>,
+			<3533 3548 3537 3512 3425>,
+			<3457 3468 3459 3429 3324>,
+			<3328 3348 3340 3297 3172>,
+			<3000 3000 3000 3000 3000>;
+	};
+
+	qcom,rbatt-sf-lut {
+		qcom,lut-col-legend = <(-20) 0 25 40 65>;
+		qcom,lut-row-legend = <100 95 90 85 80 75 70>,
+				<65 60 55 50 45 40 35>,
+				<30 25 20 15 10 9 8>,
+				<7 6 5 4 3 2 1 0>;
+		qcom,lut-data = <357 187 100 91 91>,
+			<400 208 105 94 94>,
+			<390 204 106 95 96>,
+			<391 201 108 98 98>,
+			<391 202 110 98 100>,
+			<390 200 110 99 102>,
+			<389 200 110 99 102>,
+			<393 202 101 93 100>,
+			<407 205 99 89 94>,
+			<428 208 100 91 96>,
+			<455 212 102 92 98>,
+			<495 220 104 93 101>,
+			<561 232 107 95 102>,
+			<634 245 112 98 98>,
+			<714 258 114 98 98>,
+			<791 266 114 97 100>,
+			<871 289 108 95 97>,
+			<973 340 124 108 105>,
+			<489 241 109 96 99>,
+			<511 246 110 96 99>,
+			<534 252 111 95 98>,
+			<579 263 112 96 96>,
+			<636 276 111 95 97>,
+			<730 294 109 96 99>,
+			<868 328 112 98 104>,
+			<1089 374 119 101 115>,
+			<1559 457 128 105 213>,
+			<12886 1026 637 422 3269>,
+			<170899 127211 98968 88907 77102>;
+	};
+
+	qcom,ibat-acc-lut {
+		qcom,lut-col-legend = <(-20) 0 25>;
+		qcom,lut-row-legend = <0 250 500 1000>;
+		qcom,lut-data = <1470 1470 1473>,
+				<1406 1406 1430>,
+				<1247 1247 1414>,
+				<764 764 1338>;
+	};
+};
+
diff --git a/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt b/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
index 6f2fac7..3786412 100644
--- a/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
+++ b/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
@@ -4,7 +4,7 @@
 to measure the parameters for latency driven memory access patterns.
 
 Required properties:
-- compatible:			Must be "qcom,arm-memlat-mon"
+- compatible:			Must be "qcom,arm-memlat-mon" or "qcom,arm-cpu-mon"
 - qcom,cpulist:			List of CPU phandles to be monitored in a cluster
 - qcom,target-dev:		The DT device that corresponds to this master port
 - qcom,core-dev-table:		A mapping table of core frequency to a required bandwidth vote at the
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
index 3534f04..fc95288 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
@@ -121,6 +121,12 @@
 					If ping pong split is enabled, this time should not be higher
 					than two times the dsi link rate time.
 					If the property is not specified, then the default value is 14000 us.
+- qcom,panel-allow-phy-poweroff:	A boolean property indicates that panel allows to turn off the phy power
+					supply during idle screen. A panel should be able to handle the dsi lanes
+					in floating state(not LP00 or LP11) to turn on this property. Software
+					turns off PHY pmic power supply, phy ldo and DSI Lane ldo during
+					idle screen (footswitch control off) when this property is enabled.
+- qcom,dsi-phy-regulator-min-datarate-bps:  Minimum per lane data rate (bps) to turn on PHY regulator.
 
 [1] Documentation/devicetree/bindings/clocks/clock-bindings.txt
 [2] Documentation/devicetree/bindings/graph.txt
@@ -229,4 +235,6 @@
 		vddio-supply = <&pma8084_l12>;
 
 		qcom,dsi-phy-regulator-ldo-mode;
+		qcom,panel-allow-phy-poweroff;
+		qcom,dsi-phy-regulator-min-datarate-bps = <1200000000>;
 	};
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 22b4e91..4b4c274 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -125,6 +125,11 @@
 				configuration value.
 - qcom,sde-ubwc-swizzle:	Property to specify the default UBWC swizzle
 				configuration value.
+- qcom,sde-smart-panel-align-mode: A u32 property to specify the align mode for
+				split display on smart panel. Possible values:
+				0x0 - no alignment
+				0xc - align at start of frame
+				0xd - align at start of line
 - qcom,sde-panic-per-pipe:	Boolean property to indicate if panic signal
 				control feature is available on each source pipe.
 - qcom,sde-has-src-split:	Boolean property to indicate if source split
@@ -366,6 +371,8 @@
 - qcom,sde-cdp-setting:		Array of 2 cell property, with a format of
 				<read enable, write enable> for cdp use cases in
 				order of <real_time>, and <non_real_time>.
+- qcom,sde-qos-cpu-mask:	A u32 value indicating desired PM QoS CPU affine mask.
+- qcom,sde-qos-cpu-dma-latency:	A u32 value indicating desired PM QoS CPU DMA latency in usec.
 - qcom,sde-inline-rot-xin:	An integer array of xin-ids related to inline
 				rotation.
 - qcom,sde-inline-rot-xin-type:	A string array indicating the type of xin,
@@ -533,6 +540,7 @@
     qcom,sde-ubwc-version = <0x100>;
     qcom,sde-ubwc-static = <0x100>;
     qcom,sde-ubwc-swizzle = <0>;
+    qcom,sde-smart-panel-align-mode = <0xd>;
     qcom,sde-panic-per-pipe;
     qcom,sde-has-src-split;
     qcom,sde-has-dim-layer;
@@ -611,6 +619,9 @@
 
     qcom,sde-cdp-setting = <1 1>, <1 0>;
 
+    qcom,sde-qos-cpu-mask = <0x3>;
+    qcom,sde-qos-cpu-dma-latency = <300>;
+
     qcom,sde-vbif-off = <0 0>;
     qcom,sde-vbif-id = <0 1>;
     qcom,sde-vbif-default-ot-rd-limit = <32>;
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 32c31af..806c458 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -340,13 +340,15 @@
 					"single_roi": default enable mode, only single roi is sent to panel
 					"dual_roi": two rois are merged into one big roi. Panel ddic should be able
 					to process two roi's along with the DCS command to send two rois.
-					disabled if property is not specified.
+					disabled if property is not specified. This property is specified
+					per timing node to support resolution restrictions.
 - qcom,mdss-dsi-horizontal-line-idle:	List of width ranges (EC - SC) in pixels indicating
 					additional idle time in dsi clock cycles that is needed
 					to compensate for smaller line width.
 - qcom,partial-update-roi-merge:	Boolean indicates roi combination is need
 					and function has been provided for dcs
-					2A/2B command.
+					2A/2B command. This property is specified per timing node to support
+					resolution restrictions.
 - qcom,dcs-cmd-by-left:			Boolean to indicate that dcs command are sent
 					through the left DSI controller only in a dual-dsi configuration
 - qcom,mdss-dsi-panel-hdr-enabled:      Boolean to indicate HDR support in panel.
@@ -383,7 +385,8 @@
 - qcom,suspend-ulps-enabled:		Boolean to enable support for ULPS mode for panels during suspend state.
 - qcom,panel-roi-alignment:		Specifies the panel ROI alignment restrictions on its
 					left, top, width, height alignments and minimum width and
-					height values
+					height values. This property is specified per timing node to support
+					resolution's alignment restrictions.
 - qcom,esd-check-enabled:		Boolean used to enable ESD recovery feature.
 - qcom,mdss-dsi-panel-status-command:	A byte stream formed by multiple dcs packets based on
 					qcom dsi controller protocol, to read the panel status.
@@ -654,7 +657,6 @@
 		qcom,mdss-tear-check-rd-ptr-trigger-intr = <1281>;
 		qcom,mdss-tear-check-frame-rate = <6000>;
 		qcom,mdss-dsi-reset-sequence = <1 2>, <0 10>, <1 10>;
-		qcom,partial-update-enabled = "single_roi";
 		qcom,dcs-cmd-by-left;
 		qcom,mdss-dsi-lp11-init;
 		qcom,mdss-dsi-init-delay-us = <100>;
@@ -662,7 +664,6 @@
 		mdss-dsi-tx-eot-append;
 		qcom,ulps-enabled;
 		qcom,suspend-ulps-enabled;
-		qcom,panel-roi-alignment = <4 4 2 2 20 20>;
 		qcom,esd-check-enabled;
 		qcom,mdss-dsi-panel-status-command = [06 01 00 01 05 00 02 0A 08];
 		qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
@@ -721,6 +722,8 @@
 				qcom,mdss-dsc-config-by-manufacture-cmd;
 				qcom,display-topology = <1 1 1>;
 				qcom,default-topology-index = <0>;
+				qcom,partial-update-enabled = "single_roi";
+				qcom,panel-roi-alignment = <4 4 2 2 20 20>;
 			};
 		};
 		qcom,panel-supply-entries {
diff --git a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
index 641cc26..a89b834 100644
--- a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
@@ -100,3 +100,4 @@
 					controller. This must be enabled for debugging purpose
 					only with simulator panel. It should not be enabled for
 					normal DSI panels.
+- - qcom,null-insertion-enabled:	A boolean to enable NULL packet insertion feature for DSI controller.
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index b18d573..69174ca 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -5,14 +5,17 @@
 Required properties:
 - label:		A string used as a descriptive name for the device.
 - compatible:		Must be "qcom,kgsl-3d0" and "qcom,kgsl-3d"
-- reg:			Specifies the register base address and size. The second interval
-			specifies the shader memory base address and size.
+- reg:			Specifies the register base address and size, the shader memory
+			base address and size (if it exists), and the base address and size
+			of the CX_DBGC block (if it exists).
 - reg-names:		Resource names used for the physical address of device registers
 			and shader memory. "kgsl_3d0_reg_memory" gives the physical address
 			and length of device registers while "kgsl_3d0_shader_memory" gives
 			physical address and length of device shader memory.  If
 			specified, "qfprom_memory" gives the range for the efuse
-			registers used for various configuration options.
+			registers used for various configuration options. If specified,
+			"kgsl_3d0_cx_dbgc_memory" gives the physical address and length
+			of the CX DBGC block.
 - interrupts:		Interrupt mapping for GPU IRQ.
 - interrupt-names:	String property to describe the name of the interrupt.
 - qcom,id:		An integer used as an identification number for the device.
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
index 54365b1..1127544 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
@@ -54,6 +54,21 @@
 - hw-trdhld : should contain internal hold time for SDA
 - hw-tsp : should contain filtering of glitches
 
+* Qualcomm Technologies, Inc. MSM Camera Sensor Resource Manager
+
+MSM camera sensor resource manager node contains properties of shared camera
+sensor resource.
+
+Required properties:
+- compatible : should be manufacturer name followed by sensor name
+  - "qcom,cam-res-mgr"
+Optional properties:
+- shared-gpios : should contain the gpios which are used by two or more
+  cameras, and these cameras may be opened together.
+- pinctrl-names: List of names to assign the shared pin state defined in pinctrl device node
+- pinctrl-<0..n>: Lists phandles each pointing to the pin configuration node within a pin
+  controller. These pin configurations are installed in the pinctrl device node.
+
 * Qualcomm Technologies, Inc. MSM Sensor
 
 MSM sensor node contains properties of camera sensor
@@ -334,6 +349,15 @@
          rgltr-load-current = <100000>;
     };
 
+    qcom,cam-res-mgr {
+         compatible = "qcom,cam-res-mgr";
+         status = "ok";
+         shared-gpios = <18 19>;
+         pinctrl-names = "cam_res_mgr_default", "cam_res_mgr_suspend";
+         pinctrl-0 = <&cam_res_mgr_active>;
+         pinctrl-1 = <&cam_res_mgr_suspend>;
+    };
+
     qcom,cam-sensor@0 {
          cell-index = <0>;
          compatible = "qcom,camera";
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
index 36dad1a..ffc0e96 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
@@ -131,6 +131,11 @@
   Value type: <string>
   Definition: Name of firmware image.
 
+- ubwc-cfg
+  Usage: required
+  Value type: <u32>
+  Definition: UBWC configuration.
+
 Examples:
 a5: qcom,a5@ac00000 {
 	cell-index = <0>;
@@ -169,6 +174,7 @@
 	clock-rates = <0 0 0 80000000 0 0 0 0 600000000 0 0>;
 	clock-cntl-level = "turbo";
 	fw_name = "CAMERA_ICP.elf";
+	ubwc-cfg = <0x7F 0x1FF>;
 };
 
 qcom,ipe0 {
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-lrme.txt b/Documentation/devicetree/bindings/media/video/msm-cam-lrme.txt
new file mode 100644
index 0000000..9a37922
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-lrme.txt
@@ -0,0 +1,149 @@
+* Qualcomm Technologies, Inc. MSM Camera LRME
+
+The MSM camera Low Resolution Motion Estimation device provides dependency
+definitions for enabling Camera LRME HW. MSM camera LRME is implemented in
+multiple device nodes. The root LRME device node has properties defined to
+hint the driver about the LRME HW nodes available during the probe sequence.
+Each node has multiple properties defined for interrupts, clocks and
+regulators.
+
+=======================
+Required Node Structure
+=======================
+LRME root interface node takes care of the handling LRME high level
+driver handling and controls underlying LRME hardware present.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-lrme"
+
+- compat-hw-name
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,lrme"
+
+- num-lrme
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported LRME HW blocks
+
+Example:
+	qcom,cam-lrme {
+		compatible = "qcom,cam-lrme";
+		compat-hw-name = "qcom,lrme";
+		num-lrme = <1>;
+	};
+
+=======================
+Required Node Structure
+=======================
+LRME Node provides interface for Low Resolution Motion Estimation hardware
+driver about the device register map, interrupt map, clocks, regulators.
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,lrme"
+
+- reg-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the register resources
+
+- reg
+  Usage: optional
+  Value type: <u32>
+  Definition: Register values
+
+- reg-cam-base
+  Usage: optional
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+              to Camera base register space
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt line associated with LRME HW
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for LRME HW
+
+- camss-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+              in "regulator-names"
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for LRME HW
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks required for LRME HW
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: List of strings corresponds clock-rates levels
+  Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name
+
+Examples:
+	cam_lrme: qcom,lrme@ac6b000 {
+		cell-index = <0>;
+		compatible = "qcom,lrme";
+		reg-names = "lrme";
+		reg = <0xac6b000 0xa00>;
+		reg-cam-base = <0x6b000>;
+		interrupt-names = "lrme";
+		interrupts = <0 476 0>;
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"lrme_clk_src",
+			"lrme_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_LRME_CLK_SRC>,
+			<&clock_camcc CAM_CC_LRME_CLK>;
+		clock-rates = <0 0 0 0 0 0 0>,
+			<0 0 0 0 0 19200000 19200000>,
+			<0 0 0 0 0 19200000 19200000>,
+			<0 0 0 0 0 19200000 19200000>;
+		clock-cntl-level = "lowsvs", "svs", "svs_l1", "turbo";
+		src-clock-name = "lrme_core_clk_src";
+	};
+
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt b/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt
index 1c18228..99f2c7a 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt
@@ -12,6 +12,7 @@
 ======================================
 First Level Node - CAM VFE device
 ======================================
+Required properties:
 - compatible
   Usage: required
   Value type: <string>
@@ -74,6 +75,22 @@
   Value type: <string>
   Definition: Source clock name.
 
+Optional properties:
+- clock-names-option
+  Usage: optional
+  Value type: <string>
+  Definition: Optional clock names.
+
+- clocks-option
+  Usage: required if clock-names-option defined
+  Value type: <phandle>
+  Definition: List of optinal clocks used for VFE HW.
+
+- clock-rates-option
+  Usage: required if clock-names-option defined
+  Value type: <u32>
+  Definition: List of clocks rates for optional clocks.
+
 Example:
 	qcom,vfe0@acaf000 {
 		cell-index = <0>;
@@ -105,5 +122,8 @@
 			<&clock_camcc CAM_CC_IFE_0_AXI_CLK>,
 		clock-rates = <0 0 80000000 0 320000000 0 384000000 0 0 0>;
 		src-clock-name = "ife_clk_src";
+		clock-names-option = "ife_dsp_clk";
+		clocks-option = <&clock_camcc CAM_CC_IFE_0_DSP_CLK>;
+		clock-rates-option = <600000000>;
 		status = "ok";
 	};
diff --git a/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt b/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt
new file mode 100644
index 0000000..8e56180
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt
@@ -0,0 +1,43 @@
+Qualcomm Technologies Inc. EMAC Gigabit Ethernet controller
+
+This network controller consists of the MAC and
+RGMII IO Macro for interfacing with PHY.
+
+Required properties:
+
+emac_hw node:
+- compatible: Should be "qcom,emac-dwc-eqos"
+- reg: Offset and length of the register regions for the mac and io-macro
+- interrupts: Interrupt number used by this controller
+- io-macro-info: Internal io-macro-info
+
+Internal io-macro-info:
+- io-macro-bypass-mode: <0 or 1> internal or external delay configuration
+- io-interface: <rgmii/mii/rmii> PHY interface used
+
+Example:
+
+soc {
+	emac_hw: qcom,emac@00020000 {
+			compatible = "qcom,emac-dwc-eqos";
+			reg = <0x20000 0x10000>,
+				  <0x36000 0x100>;
+			reg-names = "emac-base", "rgmii-base";
+			interrupts = <0 62 4>, <0 60 4>,
+					<0 49 4>, <0 50 4>,
+					<0 51 4>, <0 52 4>,
+					<0 53 4>, <0 54 4>,
+					<0 55 4>, <0 56 4>,
+					<0 57 4>;
+			interrupt-names = "sbd-intr", "lpi-intr",
+				"tx-ch0-intr", "tx-ch1-intr",
+				"tx-ch2-intr", "tx-ch3-intr",
+				"tx-ch4-intr", "rx-ch0-intr",
+				"rx-ch1-intr", "rx-ch2-intr",
+				"rx-ch3-intr";
+			io-macro-info {
+				io-macro-bypass-mode = <0>;
+				io-interface = "rgmii";
+			};
+		};
+}
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index 05fa6e4..8795aff 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -137,12 +137,6 @@
 		be based off battery voltage. For both SOC and battery voltage,
 		charger receives the signal from FG to resume charging.
 
-- qcom,micro-usb
-  Usage:      optional
-  Value type: <empty>
-  Definition: Boolean flag which indicates that the platform only support
-		micro usb port.
-
 - qcom,suspend-input-on-debug-batt
   Usage:      optional
   Value type: <empty>
@@ -183,6 +177,15 @@
   Value type: bool
   Definition: Boolean flag which when present enables sw compensation for jeita
 
+- qcom,battery-data
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the phandle of the node which contains the battery
+		profiles supported on the device. This is only specified
+		when step charging and sw-jeita configurations are desired
+		to be get from these properties defined in battery profile:
+		qcom,step-chg-ranges, qcom,jeita-fcc-ranges, qcom,jeita-fv-ranges.
+
 =============================================
 Second Level Nodes - SMB2 Charger Peripherals
 =============================================
diff --git a/Documentation/devicetree/bindings/regulator/cpr4-apss-regulator.txt b/Documentation/devicetree/bindings/regulator/cpr4-apss-regulator.txt
index 54ab182..05792b0 100644
--- a/Documentation/devicetree/bindings/regulator/cpr4-apss-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cpr4-apss-regulator.txt
@@ -165,7 +165,7 @@
 		    and Turbo.
 
 - qcom,cpr-fuse-combos
-	Usage:      required
+	Usage:      optional
 	Value type: <u32>
 	Definition: Specifies the number of fuse combinations being supported by
 		    the device.  This value is utilized by several other
@@ -178,6 +178,11 @@
 		    The last 8 fuse combos correspond to speed bin fuse value 7
 		    along with CPR revision fuse values 0 to 7.
 
+		    This property must be specified unless qcom,cpr-fuse-combo-map
+		    is present. In that case, qcom,cpr-fuse-combos is implicitly
+		    assumed to have a value equal to the number of tuple lists (rows)
+		    found in the qcom,cpr-fuse-combo-map property.
+
 - qcom,cpr-speed-bins
 	Usage:      optional
 	Value type: <u32>
@@ -368,6 +373,27 @@
 		    speed bins 1-to-1 or exactly 1 list which is used regardless
 		    of the speed bin found on a given chip.
 
+- qcom,cpr-fuse-combo-map
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: A grouping of integer tuple lists where each tuple list (row)
+		    defines a mapping from combinations of fuse parameter ranges
+		    to fuse combo ID (i.e., map row index). Each tuple defines the
+		    beginning and ending fuse parameter value that matches. The
+		    number of tuples in each row is equal to the number of selection
+		    fuse parameters used in fuse combo logic. For MSM8953, the fuse
+		    parameters are "speed-bin", "cpr fuse revision", and "foundry id".
+		    The tuples in each row correspond to the fuses in order:
+		    "speed-bin", "cpr fuse revision" and "foundry id". An example entry
+		    for speed-bin '3', cpr fuse revisions >= '2', and foundry '2' is
+		    as shown below:
+		         <3 3>, <2 7>, <2 2>
+
+		    The number of rows in the property is arbitrary but used to size
+		    other properties. qcom,cpr-fuse-combos must be set to the number
+		    of rows specified in this property. For msm8953, the maximum number
+		    of rows for this property is 512 (8 * 8 * 8).
+
 =======
 Example
 =======
diff --git a/Documentation/devicetree/bindings/regulator/mem-acc-regulator.txt b/Documentation/devicetree/bindings/regulator/mem-acc-regulator.txt
index 5515457..f4f549a 100644
--- a/Documentation/devicetree/bindings/regulator/mem-acc-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/mem-acc-regulator.txt
@@ -159,6 +159,21 @@
 				not specified, then "qcom,override-cornerX-reg-config" must contain a single
 				register configuration sequence list which is then applied unconditionally.
 				This property can only be specified if qcom,cornerX-reg-config property is already defined.
+- qcom,override-acc-range-fuse-list:	Array of tuples define the selection parameters used for selecting the override
+				mem-acc configuration. The fused values for these selection parameters are used by the
+				qcom,override-fuse-range-map to identify the correct set of override properties.
+				Each tuple contains 4 elements as defined below:
+				  [0] => the fuse row number of the selector
+				  [1] => LSB bit position of the bits
+				  [2] => number of bits
+				  [3] => fuse reading method, 0 for direct reading or 1 for SCM reading
+- qcom,override-fuse-range-map:	Array of tuples where each tuple specifies the allowed range for all the selection parameters
+				defined in qcom,override-acc-range-fuse-list. The fused values of these selection parameters
+				are compared against their allowed range in each tuple starting from 0th tuple and use the
+				first matched tuple index to select the right tuples from the other override properties.
+				Either qcom,override-fuse-range-map or qcom,override-fuse-version-map is used to select
+				the override configuration. The qcom,override-fuse-range-map is used if both the
+				properties are specified.
 
 mem_acc_vreg_corner: regulator@fd4aa044 {
 	compatible = "qcom,mem-acc-regulator";
@@ -184,6 +199,13 @@
 	qcom,override-fuse-version-map = <0>,
 					 <2>,
 					 <(-1)>;
+	qcom,override-acc-range-fuse-list =
+					<37 40 3 0>,
+					<36 30 8 0>;
+	qcom,override-fuse-range-map =
+				<0 0>, <  0   0>, <49 63>,
+				<1 1>, <  0   0>, <50 63>,
+				<0 1>, < 95 255>, < 0 63>;
 	qcom,override-corner-acc-map =	<0 0 1>,
 					<0 1 2>,
 					<0 1 1>;
diff --git a/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt b/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
index 9deb7d4..7de891e 100644
--- a/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
@@ -1,12 +1,13 @@
 Qualcomm Technologies, Inc. RPMh Regulators
 
-rpmh-regulator devices support PMIC regulator management via the VRM and ARC
-RPMh accelerators.  The APPS processor communicates with these hardware blocks
-via an RSC using command packets.  The VRM allows changing four parameters for
-a given regulator: enable state, output voltage, operating mode, and minimum
-headroom voltage.  The ARC allows changing only a single parameter for a given
-regulator: its operating level.  This operating level is fed into CPR which then
-decides upon a final explicit voltage for the regulator.
+rpmh-regulator devices support PMIC regulator management via the VRM, ARC and
+XOB RPMh accelerators.  The APPS processor communicates with these hardware
+blocks via an RSC using command packets.  The VRM allows changing four
+parameters for a given regulator: enable state, output voltage, operating mode,
+and minimum headroom voltage.  The ARC allows changing only a single parameter
+for a given regulator: its operating level.  This operating level is fed into
+CPR which then decides upon a final explicit voltage for the regulator.  The XOB
+allows changing only a single parameter for a given regulator: its enable state.
 
 =======================
 Required Node Structure
@@ -24,9 +25,10 @@
 - compatible
 	Usage:      required
 	Value type: <string>
-	Definition: Must be "qcom,rpmh-vrm-regulator" or
-		    "qcom,rpmh-arc-regulator" depending upon the hardware type,
-		    VRM or ARC, of the RPMh managed regulator resource.
+	Definition: Must be "qcom,rpmh-vrm-regulator", "qcom,rpmh-arc-regulator"
+		    or "qcom,rpmh-xob-regulator" depending upon the hardware
+		    type, VRM, ARC or XOB, of the RPMh managed regulator
+		    resource.
 
 - mboxes
 	Usage:      required
@@ -116,8 +118,8 @@
  - regulator-enable-ramp-delay
 	Usage:      optional
 	Value type: <u32>
-	Definition: For VRM resources, the time in microseconds to delay after
-		    enabling a regulator.
+	Definition: For VRM and XOB resources, the time in microseconds to delay
+		    after enabling a regulator.
 
 - qcom,set
 	Usage:      required
@@ -130,7 +132,7 @@
 		    one of RPMH_REGULATOR_SET_* (i.e. 1, 2, or 3).
 
 - qcom,init-enable
-	Usage:      optional; VRM regulators only
+	Usage:      optional; VRM and XOB regulators only
 	Value type: <u32>
 	Definition: Specifies the initial enable state to request for a VRM
 		    regulator.  Supported values are 0 (regulator disabled) and
@@ -267,3 +269,15 @@
 		qcom,init-voltage = <1000000>;
 	};
 };
+
+rpmh-regulator-ldoc1 {
+	compatible = "qcom,rpmh-xob-regulator";
+	mboxes = <&apps_rsc 0>;
+	qcom,resource-name = "ldoc1";
+	pm855l_l1: regulator-pm855l-l1 {
+		regulator-name = "pm855l_l1";
+		qcom,set = <RPMH_REGULATOR_SET_ALL>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 6838afd..5d3b232 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -59,6 +59,8 @@
  - snps,xhci-imod-value: Interrupt moderation interval for host mode
 	(in increments of 250nsec).
  - usb-core-id: Differentiates between different controllers present on a device.
+ - snps,bus-suspend-enable: If present then controller supports low power mode
+	during bus suspend.
 
 This is usually a subnode to DWC3 glue to which it is connected.
 
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index 6109fad..9ee2cc6 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -159,6 +159,7 @@
    "efuse_addr": EFUSE address to read and update analog tune parameter.
    "emu_phy_base" : phy base address used for programming emulation target phy.
    "ref_clk_addr" : ref_clk bcr address used for on/off ref_clk before reset.
+   "refgen_north_bg_reg" : address used to read REFGEN status for overriding QUSB PHY register.
  - clocks: a list of phandles to the PHY clocks. Use as per
    Documentation/devicetree/bindings/clock/clock-bindings.txt
  - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
@@ -191,7 +192,8 @@
 			 0x210 /* QUSB2PHY_PWR_CTRL1 */
 			 0x230 /* QUSB2PHY_INTR_CTRL */
 			 0x0a8 /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE */
-			 0x254>; /* QUSB2PHY_TEST1 */
+			 0x254 /* QUSB2PHY_TEST1 */
+			 0x198>; /* QUSB2PHY_PLL_BIAS_CONTROL_2 */
 		qcom,efuse-bit-pos = <21>;
 		qcom,efuse-num-bits = <3>;
 
diff --git a/Makefile b/Makefile
index dd43551..1e85d9b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 58
+SUBLEVEL = 60
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 63da745..393c23f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1487,6 +1487,7 @@
 
 config HOTPLUG_CPU
 	bool "Support for hot-pluggable CPUs"
+	select GENERIC_IRQ_MIGRATION
 	depends on SMP
 	help
 	  Say Y here to experiment with turning CPUs off and on.  CPUs
diff --git a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
new file mode 100644
index 0000000..fa21dd7
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
@@ -0,0 +1,137 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+&i2c_7 {
+	status = "okay";
+	smb138x: qcom,smb138x@8 {
+		compatible = "qcom,i2c-pmic";
+		reg = <0x8>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interrupt-parent = <&spmi_bus>;
+		interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+		interrupt_names = "smb138x";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		qcom,periph-map = <0x10 0x11 0x12 0x13 0x14 0x16 0x36>;
+
+		smb138x_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		smb138x_tadc: qcom,tadc@3600 {
+			compatible = "qcom,tadc";
+			reg = <0x3600 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			#io-channel-cells = <1>;
+			interrupt-parent = <&smb138x>;
+			interrupts = <0x36 0x0 IRQ_TYPE_EDGE_BOTH>;
+			interrupt-names = "eoc";
+
+			batt_temp@0 {
+				reg = <0>;
+				qcom,rbias = <68100>;
+				qcom,rtherm-at-25degc = <68000>;
+				qcom,beta-coefficient = <3450>;
+			};
+
+			skin_temp@1 {
+				reg = <1>;
+				qcom,rbias = <33000>;
+				qcom,rtherm-at-25degc = <68000>;
+				qcom,beta-coefficient = <3450>;
+			};
+
+			die_temp@2 {
+				reg = <2>;
+				qcom,scale = <(-1306)>;
+				qcom,offset = <397904>;
+			};
+
+			batt_i@3 {
+				reg = <3>;
+				qcom,channel = <3>;
+				qcom,scale = <(-20000000)>;
+			};
+
+			batt_v@4 {
+				reg = <4>;
+				qcom,scale = <5000000>;
+			};
+
+			input_i@5 {
+				reg = <5>;
+				qcom,scale = <14285714>;
+			};
+
+			input_v@6 {
+				reg = <6>;
+				qcom,scale = <25000000>;
+			};
+
+			otg_i@7 {
+				reg = <7>;
+				qcom,scale = <5714286>;
+			};
+		};
+
+		smb1381_charger: qcom,smb1381-charger@1000 {
+			compatible = "qcom,smb138x-parallel-slave";
+			qcom,pmic-revid = <&smb138x_revid>;
+			reg = <0x1000 0x700>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			interrupt-parent = <&smb138x>;
+			io-channels =
+				<&smb138x_tadc 1>,
+				<&smb138x_tadc 2>,
+				<&smb138x_tadc 3>,
+				<&smb138x_tadc 14>,
+				<&smb138x_tadc 15>,
+				<&smb138x_tadc 16>,
+				<&smb138x_tadc 17>;
+			io-channel-names =
+				"connector_temp",
+				"charger_temp",
+				"batt_i",
+				"connector_temp_thr1",
+				"connector_temp_thr2",
+				"connector_temp_thr3",
+				"charger_temp_max";
+
+			qcom,chgr@1000 {
+				reg = <0x1000 0x100>;
+				interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "chg-state-change";
+			};
+
+			qcom,chgr-misc@1600 {
+				reg = <0x1600 0x100>;
+				interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+					     <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
+				interrupt-names = "wdog-bark",
+						  "temperature-change";
+			};
+		};
+	};
+};
+
+&smb1381_charger {
+	smb138x_vbus: qcom,smb138x-vbus {
+		status = "disabled";
+		regulator-name = "smb138x-vbus";
+	};
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index e1136da..2b89ee8 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -10,9 +10,8 @@
  * GNU General Public License for more details.
  */
 
-
+#include <dt-bindings/soc/qcom,tcs-mbox.h>
 #include "skeleton.dtsi"
-
 #include <dt-bindings/clock/qcom,rpmh.h>
 #include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -175,7 +174,7 @@
 		reg = <0x831000 0x200>;
 		interrupts = <0 26 0>;
 		status = "disabled";
-		clocks = <&clock_gcc GCC_BLSP1_UART2_APPS_CLK>,
+		clocks = <&clock_gcc GCC_BLSP1_UART3_APPS_CLK>,
 			<&clock_gcc GCC_BLSP1_AHB_CLK>;
 		clock-names = "core", "iface";
 	};
@@ -415,6 +414,52 @@
 		/* GPIO output to mss */
 		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
 	};
+
+	apps_rsc: mailbox@17840000 {
+		compatible = "qcom,tcs-drv";
+		label = "apps_rsc";
+		reg = <0x17840000 0x100>, <0x17840d00 0x3000>;
+		interrupts = <0 17 0>;
+		#mbox-cells = <1>;
+		qcom,drv-id = <1>;
+		qcom,tcs-config = <ACTIVE_TCS  2>,
+				<SLEEP_TCS     2>,
+				<WAKE_TCS      2>,
+				<CONTROL_TCS   1>;
+	};
+
+	cmd_db: qcom,cmd-db@ca0000c {
+		compatible = "qcom,cmd-db";
+		reg = <0xca0000c 8>;
+	};
+
+	system_pm {
+		compatible = "qcom,system-pm";
+		mboxes = <&apps_rsc 0>;
+	};
+
+	emac_hw: qcom,emac@00020000 {
+		compatible = "qcom,emac-dwc-eqos";
+		reg = <0x20000 0x10000>,
+		      <0x36000 0x100>;
+		reg-names = "emac-base", "rgmii-base";
+		interrupts = <0 62 4>, <0 60 4>,
+			<0 45 4>, <0 49 4>,
+			<0 50 4>, <0 51 4>,
+			<0 52 4>, <0 53 4>,
+			<0 54 4>, <0 55 4>,
+			<0 56 4>, <0 57 4>;
+		interrupt-names = "sbd-intr", "lpi-intr",
+			"wol-intr", "tx-ch0-intr",
+			"tx-ch1-intr", "tx-ch2-intr",
+			"tx-ch3-intr", "tx-ch4-intr",
+			"rx-ch0-intr", "rx-ch1-intr",
+			"rx-ch2-intr", "rx-ch3-intr";
+		io-macro-info {
+			io-macro-bypass-mode = <0>;
+			io-interface = "rgmii";
+		};
+	};
 };
 
 #include "pmxpoorwills.dtsi"
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 0d35667..0508ae3 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -219,8 +219,14 @@
 CONFIG_SMB138X_CHARGER=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
@@ -228,6 +234,9 @@
 CONFIG_REGULATOR_QPNP=y
 CONFIG_SOUND=y
 CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
@@ -266,9 +275,11 @@
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC1=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_MMC=y
 CONFIG_MMC_PARANOID_SD_INIT=y
 CONFIG_MMC_BLOCK_MINORS=32
@@ -282,6 +293,8 @@
 CONFIG_QCOM_SPS_DMA=y
 CONFIG_UIO=y
 CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
 CONFIG_GSI=y
 CONFIG_IPA3=y
 CONFIG_RMNET_IPA3=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 6c24910..cf95fbd 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -212,8 +212,14 @@
 CONFIG_SMB138X_CHARGER=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_MFD_SYSCON=y
@@ -226,6 +232,9 @@
 CONFIG_FB=y
 CONFIG_SOUND=y
 CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
@@ -264,9 +273,11 @@
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC1=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_MMC=y
 CONFIG_MMC_PARANOID_SD_INIT=y
 CONFIG_MMC_BLOCK_MINORS=32
@@ -279,6 +290,8 @@
 CONFIG_QCOM_SPS_DMA=y
 CONFIG_UIO=y
 CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
 CONFIG_GSI=y
 CONFIG_IPA3=y
 CONFIG_RMNET_IPA3=y
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 9edea10..41e9107 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -32,6 +32,9 @@
 #define arch_scale_cpu_capacity scale_cpu_capacity
 extern unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu);
 
+#define arch_update_cpu_capacity update_cpu_power_capacity
+extern void update_cpu_power_capacity(int cpu);
+
 #else
 
 static inline void init_cpu_topology(void) { }
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index bbf60e3..ab509d6 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -466,17 +466,6 @@
 }
 
 static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
-DEFINE_PER_CPU(bool, pending_ipi);
-static void smp_cross_call_common(const struct cpumask *cpumask,
-						unsigned int func)
-{
-	unsigned int cpu;
-
-	for_each_cpu(cpu, cpumask)
-		per_cpu(pending_ipi, cpu) = true;
-
-	__smp_cross_call(cpumask, func);
-}
 
 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
 {
@@ -501,6 +490,18 @@
 	__smp_cross_call(target, ipinr);
 }
 
+DEFINE_PER_CPU(bool, pending_ipi);
+static void smp_cross_call_common(const struct cpumask *cpumask,
+						unsigned int func)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, cpumask)
+		per_cpu(pending_ipi, cpu) = true;
+
+	smp_cross_call(cpumask, func);
+}
+
 void show_ipi_list(struct seq_file *p, int prec)
 {
 	unsigned int cpu, i;
@@ -539,7 +540,7 @@
 
 void arch_send_call_function_single_ipi(int cpu)
 {
-	smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+	smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
 }
 
 #ifdef CONFIG_IRQ_WORK
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 2b6c530..28dcd44 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -42,6 +42,16 @@
  */
 static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
 
+unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+	return per_cpu(cpu_scale, cpu);
+}
+
+static void set_power_scale(unsigned int cpu, unsigned long power)
+{
+	per_cpu(cpu_scale, cpu) = power;
+}
+
 unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
 {
 #ifdef CONFIG_CPU_FREQ
@@ -397,6 +407,23 @@
 	return &cpu_topology[cpu].thread_sibling;
 }
 
+static void update_cpu_power(unsigned int cpu)
+{
+	if (!cpu_capacity(cpu))
+		return;
+
+	set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+
+	pr_info("CPU%u: update cpu_power %lu\n",
+		cpu, arch_scale_freq_power(NULL, cpu));
+}
+
+void update_cpu_power_capacity(int cpu)
+{
+	update_cpu_power(cpu);
+	update_cpu_capacity(cpu);
+}
+
 static void update_siblings_masks(unsigned int cpuid)
 {
 	struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index f4d7965..4761bc5 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -51,5 +51,28 @@
 	select COMMON_CLK
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
+
+config ARCH_MSM8953
+	bool "Enable support for MSM8953"
+	select CPU_V7
+	select HAVE_ARM_ARCH_TIMER
+	select PINCTRL
+	select QCOM_SCM if SMP
+	select PM_DEVFREQ
+	select COMMON_CLK
+	select COMMON_CLK_QCOM
+	select QCOM_GDSC
+
+config ARCH_SDM450
+	bool "Enable support for SDM450"
+	select CPU_V7
+	select HAVE_ARM_ARCH_TIMER
+	select PINCTRL
+	select QCOM_SCM if SMP
+	select PM_DEVFREQ
+	select COMMON_CLK
+	select COMMON_CLK_QCOM
+	select QCOM_GDSC
+
 endmenu
 endif
diff --git a/arch/arm/mach-qcom/Makefile b/arch/arm/mach-qcom/Makefile
index d893b27..828e9c9 100644
--- a/arch/arm/mach-qcom/Makefile
+++ b/arch/arm/mach-qcom/Makefile
@@ -1,3 +1,5 @@
 obj-$(CONFIG_USE_OF) += board-dt.o
 obj-$(CONFIG_SMP)	+= platsmp.o
 obj-$(CONFIG_ARCH_SDXPOORWILLS) += board-poorwills.o
+obj-$(CONFIG_ARCH_MSM8953) += board-msm8953.o
+obj-$(CONFIG_ARCH_SDM450) += board-sdm450.o
diff --git a/arch/arm/mach-qcom/board-msm8953.c b/arch/arm/mach-qcom/board-msm8953.c
new file mode 100644
index 0000000..cae3bf7
--- /dev/null
+++ b/arch/arm/mach-qcom/board-msm8953.c
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "board-dt.h"
+#include <asm/mach/map.h>
+#include <asm/mach/arch.h>
+
+static const char *msm8953_dt_match[] __initconst = {
+	"qcom,msm8953",
+	NULL
+};
+
+static void __init msm8953_init(void)
+{
+	board_dt_populate(NULL);
+}
+
+DT_MACHINE_START(MSM8953_DT,
+	"Qualcomm Technologies, Inc. MSM8953 (Flattened Device Tree)")
+	.init_machine		= msm8953_init,
+	.dt_compat		= msm8953_dt_match,
+MACHINE_END
diff --git a/arch/arm/mach-qcom/board-sdm450.c b/arch/arm/mach-qcom/board-sdm450.c
new file mode 100644
index 0000000..5f68ede
--- /dev/null
+++ b/arch/arm/mach-qcom/board-sdm450.c
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "board-dt.h"
+#include <asm/mach/map.h>
+#include <asm/mach/arch.h>
+
+static const char *sdm450_dt_match[] __initconst = {
+	"qcom,sdm450",
+	NULL
+};
+
+static void __init sdm450_init(void)
+{
+	board_dt_populate(NULL);
+}
+
+DT_MACHINE_START(SDM450_DT,
+	"Qualcomm Technologies, Inc. SDM450 (Flattened Device Tree)")
+	.init_machine		= sdm450_init,
+	.dt_compat		= sdm450_dt_match,
+MACHINE_END
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 5d73327..0bb7673 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -150,7 +150,7 @@
 }
 
 #ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
-static void kmap_remove_unused_cpu(int cpu)
+int kmap_remove_unused_cpu(unsigned int cpu)
 {
 	int start_idx, idx, type;
 
@@ -167,6 +167,7 @@
 			set_top_pte(vaddr, __pte(0));
 	}
 	pagefault_enable();
+	return 0;
 }
 
 static void kmap_remove_unused(void *unused)
@@ -179,27 +180,4 @@
 	on_each_cpu(kmap_remove_unused, NULL, 1);
 }
 
-static int hotplug_kmap_atomic_callback(struct notifier_block *nfb,
-					unsigned long action, void *hcpu)
-{
-	switch (action & (~CPU_TASKS_FROZEN)) {
-	case CPU_DYING:
-		kmap_remove_unused_cpu((int)hcpu);
-		break;
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block hotplug_kmap_atomic_notifier = {
-	.notifier_call = hotplug_kmap_atomic_callback,
-};
-
-static int __init init_kmap_atomic(void)
-{
-	return register_hotcpu_notifier(&hotplug_kmap_atomic_notifier);
-}
-early_initcall(init_kmap_atomic);
 #endif
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index e1454fb..8edfbf2 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -148,6 +148,15 @@
 	  This enables support for the MSM8953 chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
+config ARCH_SDM450
+	bool "Enable Support for Qualcomm Technologies Inc. SDM450"
+	depends on ARCH_QCOM
+	select COMMON_CLK_QCOM
+	select QCOM_GDSC
+	help
+	  This enables support for the sdm450 chipset. If you do not
+	  wish to build a kernel that runs on this chipset, say 'N' here.
+
 config ARCH_ROCKCHIP
 	bool "Rockchip Platforms"
 	select ARCH_HAS_RESET_CONTROLLER
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index aec9930..40a6aab 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -35,7 +35,13 @@
 		sda845-v2-hdk-overlay.dtbo \
 		sda845-v2-4k-panel-mtp-overlay.dtbo \
 		sda845-v2-4k-panel-cdp-overlay.dtbo \
-		sda845-v2-4k-panel-qrd-overlay.dtbo
+		sda845-v2-4k-panel-qrd-overlay.dtbo \
+		sda845-v2.1-cdp-overlay.dtbo \
+		sda845-v2.1-mtp-overlay.dtbo \
+		sda845-v2.1-qrd-overlay.dtbo \
+		sda845-v2.1-4k-panel-cdp-overlay.dtbo \
+		sda845-v2.1-4k-panel-mtp-overlay.dtbo \
+		sda845-v2.1-4k-panel-qrd-overlay.dtbo
 
 sdm845-cdp-overlay.dtbo-base := sdm845.dtb
 sdm845-mtp-overlay.dtbo-base := sdm845.dtb
@@ -70,6 +76,12 @@
 sda845-v2-4k-panel-mtp-overlay.dtbo-base := sda845-v2.dtb
 sda845-v2-4k-panel-cdp-overlay.dtbo-base := sda845-v2.dtb
 sda845-v2-4k-panel-qrd-overlay.dtbo-base := sda845-v2.dtb
+sda845-v2.1-cdp-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-mtp-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-qrd-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-4k-panel-cdp-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-4k-panel-mtp-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-4k-panel-qrd-overlay.dtbo-base := sda845-v2.1.dtb
 else
 dtb-$(CONFIG_ARCH_SDM845) += sdm845-sim.dtb \
 	sdm845-rumi.dtb \
@@ -176,6 +188,13 @@
 ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
 else
 dtb-$(CONFIG_ARCH_MSM8953) += msm8953-mtp.dtb
+dtb-$(CONFIG_ARCH_SDM450) += sdm450-rcm.dtb \
+	sdm450-cdp.dtb \
+	sdm450-mtp.dtb \
+	sdm450-qrd.dtb \
+	sdm450-pmi8940-mtp.dtb \
+	sdm450-pmi8937-mtp.dtb \
+	sdm450-iot-mtp.dtb
 endif
 
 always		:= $(dtb-y)
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
index 5b5fbb8..1a8ce91 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
@@ -210,6 +210,11 @@
 					15 01 00 00 00 00 02 E5 01
 					/* CMD mode(10) VDO mode(03) */
 					15 01 00 00 00 00 02 BB 10
+					/* NVT SDC */
+					15 01 00 00 00 00 02 C0 00
+					/* GRAM Slide Parameter */
+					29 01 00 00 00 00 0C C9 01 01 70
+					00 0A 06 67 04 C5 12 18
 					/* Non Reload MTP */
 					15 01 00 00 00 00 02 FB 01
 					/* SlpOut + DispOn */
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi
new file mode 100644
index 0000000..c059443
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi
@@ -0,0 +1,92 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_dual_nt36850_truly_cmd: qcom,mdss_dsi_nt36850_truly_wqhd_cmd {
+		qcom,mdss-dsi-panel-name =
+			"Dual nt36850 cmd mode dsi truly panel without DSC";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,cmd-sync-wait-broadcast;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-panel-timings =
+			[da 34 24 00 64 68 28 38 2a 03 04 00];
+		qcom,mdss-dsi-t-clk-pre = <0x29>;
+		qcom,mdss-dsi-t-clk-post = <0x03>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-lp11-init;
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-width = <720>;
+				qcom,mdss-dsi-panel-height = <2560>;
+				qcom,mdss-dsi-h-front-porch = <120>;
+				qcom,mdss-dsi-h-back-porch = <140>;
+				qcom,mdss-dsi-h-pulse-width = <20>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <20>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <4>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-on-command = [
+					15 01 00 00 00 00 02 ff 10
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 36 00
+					15 01 00 00 00 00 02 35 00
+					39 01 00 00 00 00 03 44 03 e8
+					15 01 00 00 00 00 02 51 ff
+					15 01 00 00 00 00 02 53 2c
+					15 01 00 00 00 00 02 55 01
+					05 01 00 00 0a 00 02 20 00
+					15 01 00 00 00 00 02 bb 10
+					05 01 00 00 78 00 02 11 00
+					05 01 00 00 14 00 02 29 00
+				];
+				qcom,mdss-dsi-off-command = [
+					05 01 00 00 14 00 02
+					28 00 05 01 00 00 78 00 02 10 00
+				];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
index 50da1bf..45ac042 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
@@ -41,6 +41,7 @@
 		qcom,mdss-dsi-te-using-wd;
 		qcom,mdss-dsi-te-using-te-pin;
 		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,ulps-enabled;
 		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
 			17000 15500 30000 8000 3000>;
 		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
@@ -49,26 +50,22 @@
 
 		qcom,mdss-dsi-display-timings {
 			timing@0{
-				qcom,mdss-dsi-panel-width = <640>;
-				qcom,mdss-dsi-panel-height = <480>;
-				qcom,mdss-dsi-h-front-porch = <20>;
-				qcom,mdss-dsi-h-back-porch = <20>;
-				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-panel-width = <1440>;
+				qcom,mdss-dsi-panel-height = <2560>;
+				qcom,mdss-dsi-h-front-porch = <120>;
+				qcom,mdss-dsi-h-back-porch = <100>;
+				qcom,mdss-dsi-h-pulse-width = <40>;
 				qcom,mdss-dsi-h-sync-skew = <0>;
-				qcom,mdss-dsi-v-back-porch = <16>;
-				qcom,mdss-dsi-v-front-porch = <4>;
-				qcom,mdss-dsi-v-pulse-width = <1>;
+				qcom,mdss-dsi-v-back-porch = <100>;
+				qcom,mdss-dsi-v-front-porch = <100>;
+				qcom,mdss-dsi-v-pulse-width = <40>;
 				qcom,mdss-dsi-h-left-border = <0>;
 				qcom,mdss-dsi-h-right-border = <0>;
 				qcom,mdss-dsi-v-top-border = <0>;
 				qcom,mdss-dsi-v-bottom-border = <0>;
-				qcom,mdss-dsi-h-sync-pulse = <0>;
 				qcom,mdss-dsi-panel-framerate = <60>;
-				qcom,mdss-dsi-hor-line-idle = <0 40 256>,
-								<40 120 128>,
-								<120 240 64>;
 				qcom,mdss-dsi-panel-timings =
-					[cd 32 22 00 60 64 26 34 29 03 04 00];
+					[00 21 09 09 24 23 08 08 08 03 04 00];
 				qcom,mdss-dsi-on-command =
 					[29 01 00 00 00 00 02 b0 03
 					05 01 00 00 0a 00 01 00
@@ -98,6 +95,124 @@
 					[05 01 00 00 32 00 02 28 00
 					05 01 00 00 78 00 02 10 00];
 				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <40>;
+				qcom,mdss-dsc-slice-width = <720>;
+				qcom,mdss-dsc-slice-per-pkt = <1>;
+				qcom,mdss-dsc-bit-per-component = <8>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+			timing@1{
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <1920>;
+				qcom,mdss-dsi-h-front-porch = <120>;
+				qcom,mdss-dsi-h-back-porch = <460>;
+				qcom,mdss-dsi-h-pulse-width = <40>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <100>;
+				qcom,mdss-dsi-v-front-porch = <740>;
+				qcom,mdss-dsi-v-pulse-width = <40>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-timings =
+					[00 21 09 09 24 23 08 08 08 03 04 00];
+				qcom,mdss-dsi-on-command =
+					[29 01 00 00 00 00 02 b0 03
+					05 01 00 00 0a 00 01 00
+					/* Soft reset, wait 10ms */
+					15 01 00 00 0a 00 02 3a 77
+					/* Set Pixel format (24 bpp) */
+					39 01 00 00 0a 00 05 2a 00 00 04 ff
+					/* Set Column address */
+					39 01 00 00 0a 00 05 2b 00 00 05 9f
+					/* Set page address */
+					15 01 00 00 0a 00 02 35 00
+					/* Set tear on */
+					39 01 00 00 0a 00 03 44 00 00
+					/* Set tear scan line */
+					15 01 00 00 0a 00 02 51 ff
+					/* write display brightness */
+					15 01 00 00 0a 00 02 53 24
+					 /* write control brightness */
+					15 01 00 00 0a 00 02 55 00
+					/* CABC brightness */
+					05 01 00 00 78 00 01 11
+					/* exit sleep mode, wait 120ms */
+					05 01 00 00 10 00 01 29];
+					/* Set display on, wait 16ms */
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command =
+					[05 01 00 00 32 00 02 28 00
+					05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <40>;
+				qcom,mdss-dsc-slice-width = <540>;
+				qcom,mdss-dsc-slice-per-pkt = <1>;
+				qcom,mdss-dsc-bit-per-component = <8>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+			timing@2{
+				qcom,mdss-dsi-panel-width = <720>;
+				qcom,mdss-dsi-panel-height = <1280>;
+				qcom,mdss-dsi-h-front-porch = <100>;
+				qcom,mdss-dsi-h-back-porch = <840>;
+				qcom,mdss-dsi-h-pulse-width = <40>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <100>;
+				qcom,mdss-dsi-v-front-porch = <1380>;
+				qcom,mdss-dsi-v-pulse-width = <40>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-timings =
+					[00 21 09 09 24 23 08 08 08 03 04 00];
+				qcom,mdss-dsi-on-command =
+					[29 01 00 00 00 00 02 b0 03
+					05 01 00 00 0a 00 01 00
+					/* Soft reset, wait 10ms */
+					15 01 00 00 0a 00 02 3a 77
+					/* Set Pixel format (24 bpp) */
+					39 01 00 00 0a 00 05 2a 00 00 04 ff
+					/* Set Column address */
+					39 01 00 00 0a 00 05 2b 00 00 05 9f
+					/* Set page address */
+					15 01 00 00 0a 00 02 35 00
+					/* Set tear on */
+					39 01 00 00 0a 00 03 44 00 00
+					/* Set tear scan line */
+					15 01 00 00 0a 00 02 51 ff
+					/* write display brightness */
+					15 01 00 00 0a 00 02 53 24
+					 /* write control brightness */
+					15 01 00 00 0a 00 02 55 00
+					/* CABC brightness */
+					05 01 00 00 78 00 01 11
+					/* exit sleep mode, wait 120ms */
+					05 01 00 00 10 00 01 29];
+					/* Set display on, wait 16ms */
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command =
+					[05 01 00 00 32 00 02 28 00
+					05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <40>;
+				qcom,mdss-dsc-slice-width = <360>;
+				qcom,mdss-dsc-slice-per-pkt = <1>;
+				qcom,mdss-dsc-bit-per-component = <8>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
 			};
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
index 895cbc5..9a4e318 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
@@ -71,20 +71,20 @@
 				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
 			};
 			timing@1{
-				qcom,mdss-dsi-panel-width = <1280>;
-				qcom,mdss-dsi-panel-height = <1440>;
-				qcom,mdss-dsi-h-front-porch = <120>;
-				qcom,mdss-dsi-h-back-porch = <44>;
+				qcom,mdss-dsi-panel-width = <720>;
+				qcom,mdss-dsi-panel-height = <2560>;
+				qcom,mdss-dsi-h-front-porch = <100>;
+				qcom,mdss-dsi-h-back-porch = <32>;
 				qcom,mdss-dsi-h-pulse-width = <16>;
 				qcom,mdss-dsi-h-sync-skew = <0>;
-				qcom,mdss-dsi-v-back-porch = <4>;
+				qcom,mdss-dsi-v-back-porch = <7>;
 				qcom,mdss-dsi-v-front-porch = <8>;
-				qcom,mdss-dsi-v-pulse-width = <4>;
+				qcom,mdss-dsi-v-pulse-width = <1>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
 				qcom,mdss-dsi-h-left-border = <0>;
 				qcom,mdss-dsi-h-right-border = <0>;
 				qcom,mdss-dsi-v-top-border = <0>;
 				qcom,mdss-dsi-v-bottom-border = <0>;
-				qcom,mdss-dsi-h-sync-pulse = <0>;
 				qcom,mdss-dsi-panel-framerate = <60>;
 				qcom,mdss-dsi-on-command =
 					[/* exit sleep mode, wait 0ms */
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp356477-2800mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp356477-2800mah.dtsi
index 98dbf1c..4720238 100644
--- a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp356477-2800mah.dtsi
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp356477-2800mah.dtsi
@@ -14,9 +14,15 @@
 	/* #mlp356477_2800mah_averaged_MasterSlave_Aug14th2017*/
 	qcom,max-voltage-uv = <4400000>;
 	qcom,fg-cc-cv-threshold-mv = <4390>;
-	qcom,fastchg-current-ma = <2800>;
+	qcom,fastchg-current-ma = <4200>;
 	qcom,batt-id-kohm = <82>;
 	qcom,battery-beta = <4250>;
+	qcom,jeita-fcc-ranges = <0   150   560000
+				 151 450  4200000
+				 451 550  2380000>;
+	qcom,jeita-fv-ranges =  <0   150  4150000
+				 151 450  4400000
+				 451 550  4150000>;
 	qcom,battery-type =
 		"mlp356477_2800mah_averaged_masterslave_aug14th2017";
 	qcom,checksum = <0x71B8>;
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp446579-3800mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp446579-3800mah.dtsi
index ca43a45..75504d4 100644
--- a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp446579-3800mah.dtsi
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp446579-3800mah.dtsi
@@ -17,6 +17,12 @@
 	qcom,fastchg-current-ma = <3800>;
 	qcom,batt-id-kohm = <91>;
 	qcom,battery-beta = <4250>;
+	qcom,jeita-fcc-ranges = <0   150  760000
+				 151 450  3800000
+				 451 550  1900000>;
+	qcom,jeita-fv-ranges =  <0   150  4150000
+				 151 450  4400000
+				 451 550  4150000>;
 	qcom,battery-type = "mlp446579_3800mah_averaged_masterslave_oct9th2017";
 	qcom,checksum = <0x3F0A>;
 	qcom,gui-version = "PMI8998GUI - 2.0.0.58";
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
index 715b59c..fc468f5 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
@@ -21,6 +21,7 @@
 		#iommu-cells = <1>;
 		qcom,dynamic;
 		qcom,use-3-lvl-tables;
+		qcom,disable-atos;
 		#global-interrupts = <2>;
 		qcom,regulator-names = "vdd";
 		vdd-supply = <&gpu_cx_gdsc>;
@@ -34,12 +35,8 @@
 				<GIC_SPI 369 IRQ_TYPE_EDGE_RISING>,
 				<GIC_SPI 370 IRQ_TYPE_EDGE_RISING>,
 				<GIC_SPI 371 IRQ_TYPE_EDGE_RISING>;
-		clock-names = "gcc_ddrss_gpu_axi_clk",
-				"gcc_gpu_memnoc_gfx_clk",
-				"gpu_cc_cx_gmu_clk";
-		clocks = <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
-			<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
-			<&clock_gpucc GPU_CC_CX_GMU_CLK>;
+		clock-names = "gcc_gpu_memnoc_gfx_clk";
+		clocks = <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>;
 		attach-impl-defs =
 				<0x6000 0x2378>,
 				<0x6060 0x1055>,
@@ -294,6 +291,7 @@
 	};
 
 	kgsl_iommu_test_device {
+		status = "disabled";
 		compatible = "iommu-debug-test";
 		/*
 		 * 0x7 isn't a valid sid, but should pass the sid sanity check.
@@ -307,9 +305,19 @@
 	apps_iommu_test_device {
 		compatible = "iommu-debug-test";
 		/*
-		 * This SID belongs to QUP1-GSI. We can't use a fake SID for
+		 * This SID belongs to TSIF. We can't use a fake SID for
 		 * the apps_smmu device.
 		 */
-		iommus = <&apps_smmu 0x16 0x0>;
+		iommus = <&apps_smmu 0x20 0xf>;
+	};
+
+	apps_iommu_coherent_test_device {
+		compatible = "iommu-debug-test";
+		/*
+		 * This SID belongs to TSIF. We can't use a fake SID for
+		 * the apps_smmu device.
+		 */
+		iommus = <&apps_smmu 0x20 0xf>;
+		dma-coherent;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
index 2fd1bc4..b20feef8 100644
--- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
@@ -117,7 +117,7 @@
 
 		dai_mi2s4: qcom,msm-dai-q6-mi2s-quin {
 			compatible = "qcom,msm-dai-q6-mi2s";
-			qcom,msm-dai-q6-mi2s-dev-id = <5>;
+			qcom,msm-dai-q6-mi2s-dev-id = <4>;
 			qcom,msm-mi2s-rx-lines = <1>;
 			qcom,msm-mi2s-tx-lines = <2>;
 		};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
new file mode 100644
index 0000000..243aaf5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&blsp1_uart0 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
new file mode 100644
index 0000000..243aaf5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&blsp1_uart0 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index c4a30bf..e90c30b 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -348,6 +348,32 @@
 		};
 	};
 
+	cpubw_compute: qcom,cpubw-compute {
+		compatible = "qcom,arm-cpu-mon";
+		qcom,cpulist = < &CPU0 &CPU1 &CPU2 &CPU3
+				&CPU4 &CPU5 &CPU6 &CPU7 >;
+		qcom,target-dev = <&cpubw>;
+		qcom,core-dev-table =
+				 <  652800  1611>,
+				 < 1036800  3221>,
+				 < 1401600  5859>,
+				 < 1689600  6445>,
+				 < 1804800  7104>,
+				 < 1958400  7104>,
+				 < 2208000  7104>;
+	};
+
+	mincpubw_compute: qcom,mincpubw-compute {
+		compatible = "qcom,arm-cpu-mon";
+		qcom,cpulist = < &CPU0 &CPU1 &CPU2 &CPU3
+				&CPU4 &CPU5 &CPU6 &CPU7 >;
+		qcom,target-dev = <&mincpubw>;
+		qcom,core-dev-table =
+				<  652800 1611 >,
+				< 1401600 3221 >,
+				< 2208000 5859 >;
+	};
+
 	qcom,ipc-spinlock@1905000 {
 		compatible = "qcom,ipc-spinlock-sfpb";
 		reg = <0x1905000 0x8000>;
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 8d8bd63..c65430b1 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -102,6 +102,7 @@
 
 			qcom,thermal-mitigation
 					= <3000000 1500000 1000000 500000>;
+			qcom,auto-recharge-soc;
 
 			qcom,chgr@1000 {
 				reg = <0x1000 0x100>;
@@ -282,6 +283,9 @@
 			qcom,fg-esr-timer-asleep = <256 256>;
 			qcom,fg-esr-timer-charging = <0 96>;
 			qcom,cycle-counter-en;
+			qcom,hold-soc-while-full;
+			qcom,fg-auto-recharge-soc;
+			qcom,fg-recharge-soc-thr = <98>;
 			status = "okay";
 
 			qcom,fg-batt-soc@4000 {
diff --git a/arch/arm64/boot/dts/qcom/qcs605-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-cdp-overlay.dts
index fe7a027..01471b6 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-cdp-overlay.dts
@@ -21,6 +21,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm670-cdp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L CDP";
diff --git a/arch/arm64/boot/dts/qcom/qcs605-cdp.dts b/arch/arm64/boot/dts/qcom/qcs605-cdp.dts
index 7b38a58..ea10fa0 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-cdp.dts
@@ -16,6 +16,7 @@
 
 #include "qcs605.dtsi"
 #include "sdm670-cdp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L CDP";
diff --git a/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp-overlay.dts
index 1f439ae..44fae6a 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp-overlay.dts
@@ -21,6 +21,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm670-mtp.dtsi"
+#include "sdm670-external-codec.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L Ext. Audio Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts
index 7327440..7955242 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts
@@ -21,6 +21,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm670-mtp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L MTP";
diff --git a/arch/arm64/boot/dts/qcom/qcs605-mtp.dts b/arch/arm64/boot/dts/qcom/qcs605-mtp.dts
index bc7b376..dc3c7ce 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-mtp.dts
@@ -16,6 +16,7 @@
 
 #include "qcs605.dtsi"
 #include "sdm670-mtp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L MTP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-cdp-overlay.dts
index 141ed59..12a130c 100644
--- a/arch/arm64/boot/dts/qcom/sda670-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-cdp-overlay.dts
@@ -21,6 +21,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm670-cdp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660L CDP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-cdp.dts b/arch/arm64/boot/dts/qcom/sda670-cdp.dts
index fcb340e..9cd9960 100644
--- a/arch/arm64/boot/dts/qcom/sda670-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-cdp.dts
@@ -16,6 +16,7 @@
 
 #include "sda670.dtsi"
 #include "sdm670-cdp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660L CDP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-mtp-overlay.dts
index af8e8f1..b3f5a0b 100644
--- a/arch/arm64/boot/dts/qcom/sda670-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-mtp-overlay.dts
@@ -21,6 +21,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm670-mtp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660L MTP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-mtp.dts b/arch/arm64/boot/dts/qcom/sda670-mtp.dts
index 2123b44..253ec0c 100644
--- a/arch/arm64/boot/dts/qcom/sda670-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-mtp.dts
@@ -16,6 +16,7 @@
 
 #include "sda670.dtsi"
 #include "sdm670-mtp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660L MTP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp-overlay.dts
index 3e1365d..7701c0b 100644
--- a/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp-overlay.dts
@@ -22,6 +22,7 @@
 
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660A CDP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp.dts b/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp.dts
index 6cbf224..e6f8d50 100644
--- a/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp.dts
@@ -17,6 +17,7 @@
 #include "sda670.dtsi"
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660A CDP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp-overlay.dts
index 9855b11..0b355ab 100644
--- a/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp-overlay.dts
@@ -22,6 +22,7 @@
 
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660A MTP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp.dts b/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp.dts
index ffb6aa3..0d7e34a 100644
--- a/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp.dts
@@ -17,6 +17,7 @@
 #include "sda670.dtsi"
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660A MTP";
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
index f836f50..de20f87 100644
--- a/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
@@ -19,7 +19,9 @@
 #include <dt-bindings/clock/qcom,rpmh.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
+#include "sdm845-sde-display.dtsi"
 #include "sda845-v2-hdk.dtsi"
+#include "sdm845-hdk-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDA845 v2 HDK";
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi b/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
index 53617dc..d212554 100644
--- a/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
@@ -18,3 +18,7 @@
 		#include "fg-gen3-batterydata-mlp356477-2800mah.dtsi"
 	};
 };
+
+&sdhc_2 {
+	cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-cdp-overlay.dts
new file mode 100644
index 0000000..d49fdb6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-cdp-overlay.dts
@@ -0,0 +1,66 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-cdp.dtsi"
+#include "sdm845-cdp-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. sda845 v2.1 4K Display Panel CDP";
+	compatible = "qcom,sda845-cdp", "qcom,sda845", "qcom,cdp";
+	qcom,msm-id = <341 0x20001>;
+	qcom,board-id = <1 1>;
+};
+
+&dsi_nt35597_truly_dsc_cmd_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&mdss_mdp {
+	connectors = <&sde_rscc &sde_wb>;
+};
+
+&dsi_sharp_4k_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-mtp-overlay.dts
new file mode 100644
index 0000000..c797492
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-mtp-overlay.dts
@@ -0,0 +1,66 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. sda845 v2.1 4K Display Panel MTP";
+	compatible = "qcom,sda845-mtp", "qcom,sda845", "qcom,mtp";
+	qcom,msm-id = <341 0x20001>;
+	qcom,board-id = <8 1>;
+};
+
+&dsi_nt35597_truly_dsc_cmd_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&mdss_mdp {
+	connectors = <&sde_rscc &sde_wb &sde_dp>;
+};
+
+&dsi_sharp_4k_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-qrd-overlay.dts
new file mode 100644
index 0000000..221a1d7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-qrd-overlay.dts
@@ -0,0 +1,64 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-qrd.dtsi"
+#include "sdm845-qrd-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. sda845 v2.1 4K Display Panel QRD";
+	compatible = "qcom,sda845-qrd", "qcom,sda845", "qcom,qrd";
+	qcom,msm-id = <341 0x20001>;
+	qcom,board-id = <11 1>;
+};
+
+&dsi_nt35597_truly_dsc_cmd_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_sharp_4k_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+	qcom,mdss-dsi-panel-orientation = "180";
+};
+
+&dsi_sharp_4k_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+	qcom,mdss-dsi-panel-orientation = "180";
+};
+
+&dsi_sharp_4k_dsc_video_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2.1-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-cdp-overlay.dts
new file mode 100644
index 0000000..64af617
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-cdp-overlay.dts
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-cdp.dtsi"
+#include "sdm845-cdp-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. sda845 v2.1 CDP";
+	compatible = "qcom,sda845-cdp", "qcom,sda845", "qcom,cdp";
+	qcom,msm-id = <341 0x20001>;
+	qcom,board-id = <1 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2.1-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-mtp-overlay.dts
new file mode 100644
index 0000000..931f0e2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-mtp-overlay.dts
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. sda845 v2.1 MTP";
+	compatible = "qcom,sda845-mtp", "qcom,sda845", "qcom,mtp";
+	qcom,msm-id = <341 0x20001>;
+	qcom,board-id = <8 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2.1-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-qrd-overlay.dts
new file mode 100644
index 0000000..d279fce
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-qrd-overlay.dts
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-qrd.dtsi"
+#include "sdm845-qrd-audio-overlay.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA845 v2.1 QRD";
+	compatible = "qcom,sda845-qrd", "qcom,sda845", "qcom,qrd";
+	qcom,msm-id = <341 0x20001>;
+	qcom,board-id = <11 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2.1.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1.dts
new file mode 100644
index 0000000..9706587
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1.dts
@@ -0,0 +1,21 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sda845-v2.1.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA845 v2.1 SoC";
+	compatible = "qcom,sda845";
+	qcom,board-id = <0 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2.1.dtsi b/arch/arm64/boot/dts/qcom/sda845-v2.1.dtsi
new file mode 100644
index 0000000..fe70be1
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1.dtsi
@@ -0,0 +1,18 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845-v2.1.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA 845 V2.1";
+	qcom,msm-id = <341 0x20001>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-cdp.dts b/arch/arm64/boot/dts/qcom/sdm450-cdp.dts
new file mode 100644
index 0000000..3e06872
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-cdp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "msm8953-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 CDP";
+	compatible = "qcom,sdm450-cdp", "qcom,sdm450", "qcom,cdp";
+	qcom,board-id = <1 0>;
+	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts
new file mode 100644
index 0000000..7fac030
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "msm8953-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 IOT MTP";
+	compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
+	qcom,board-id = <8 2>;
+	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-mtp.dts
new file mode 100644
index 0000000..2524b80
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-mtp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "msm8953-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 MTP";
+	compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
+	qcom,board-id = <8 0>;
+	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts
new file mode 100644
index 0000000..6a6a09e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "msm8953-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI8937 MTP";
+	compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
+	qcom,board-id = <8 0>;
+	qcom,pmic-id = <0x010016 0x020037 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts
new file mode 100644
index 0000000..3c4e802
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "msm8953-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI8940 MTP";
+	compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
+	qcom,board-id = <8 0>;
+	qcom,pmic-id = <0x010016 0x020040 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-qrd.dts b/arch/arm64/boot/dts/qcom/sdm450-qrd.dts
new file mode 100644
index 0000000..3c2e25b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-qrd.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "msm8953-qrd.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 QRD";
+	compatible = "qcom,sdm450-qrd", "qcom,sdm450", "qcom,qrd";
+	qcom,board-id = <0x5000b 0>;
+	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-rcm.dts b/arch/arm64/boot/dts/qcom/sdm450-rcm.dts
new file mode 100644
index 0000000..4ab131a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-rcm.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "msm8953-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 RCM";
+	compatible = "qcom,sdm450-cdp", "qcom,sdm450", "qcom,cdp";
+	qcom,board-id = <21 0>;
+	qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm450.dtsi b/arch/arm64/boot/dts/qcom/sdm450.dtsi
new file mode 100644
index 0000000..8087399
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450.dtsi
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8953.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450";
+	compatible = "qcom,sdm450";
+	qcom,msm-id = <338 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
index dfb8142..58c290d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
@@ -64,6 +64,8 @@
 		"RX_BIAS", "INT_MCLK0",
 		"SPK_RX_BIAS", "INT_MCLK0",
 		"INT_LDO_H", "INT_MCLK0",
+		"RX_I2S_CLK", "INT_MCLK0",
+		"TX_I2S_CLK", "INT_MCLK0",
 		"MIC BIAS External", "Handset Mic",
 		"MIC BIAS External2", "Headset Mic",
 		"MIC BIAS External", "Secondary Mic",
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
index bbf6683..b26ec5c 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
@@ -72,7 +72,7 @@
 			<&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>;
 		asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
 			"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
-			"msm-dai-q6-mi2s.5",
+			"msm-dai-q6-mi2s.4",
 			"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
 			"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
 			"msm-dai-q6-auxpcm.5",
@@ -136,7 +136,7 @@
 			<&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>;
 		asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
 			"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
-			"msm-dai-q6-mi2s.5",
+			"msm-dai-q6-mi2s.4",
 			"msm-dai-q6-mi2s.7", "msm-dai-q6-mi2s.8",
 			"msm-dai-q6-mi2s.9", "msm-dai-q6-mi2s.10",
 			"msm-dai-q6-mi2s.11", "msm-dai-q6-mi2s.12",
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-cdp.dtsi
new file mode 100644
index 0000000..c4ca6c5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-cdp.dtsi
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	led_flash_rear: qcom,camera-flash@0 {
+		cell-index = <0>;
+		reg = <0x00 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm660l_flash0 &pm660l_flash1>;
+		torch-source = <&pm660l_torch0 &pm660l_torch1>;
+		switch-source = <&pm660l_switch0>;
+		status = "ok";
+	};
+
+	led_flash_front: qcom,camera-flash@1 {
+		cell-index = <1>;
+		reg = <0x01 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm660l_flash2>;
+		torch-source = <&pm660l_torch2>;
+		switch-source = <&pm660l_switch1>;
+		status = "ok";
+	};
+
+	actuator_regulator: gpio-regulator@0 {
+		compatible = "regulator-fixed";
+		reg = <0x00 0x00>;
+		regulator-name = "actuator_regulator";
+		regulator-min-microvolt = <2800000>;
+		regulator-max-microvolt = <2800000>;
+		regulator-enable-ramp-delay = <100>;
+		enable-active-high;
+		gpio = <&tlmm 27 0>;
+	};
+
+	camera_ldo: gpio-regulator@2 {
+		compatible = "regulator-fixed";
+		reg = <0x02 0x00>;
+		regulator-name = "camera_ldo";
+		regulator-min-microvolt = <1352000>;
+		regulator-max-microvolt = <1352000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&pm660l_gpios 3 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&camera_dvdd_en_default>;
+		vin-supply = <&pm660_s6>;
+	};
+
+	camera_rear_ldo: gpio-regulator@1 {
+		compatible = "regulator-fixed";
+		reg = <0x01 0x00>;
+		regulator-name = "camera_rear_ldo";
+		regulator-min-microvolt = <1352000>;
+		regulator-max-microvolt = <1352000>;
+		regulator-enable-ramp-delay = <135>;
+		enable-active-high;
+		gpio = <&pm660l_gpios 4 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&camera_rear_dvdd_en_default>;
+		vin-supply = <&pm660_s6>;
+	};
+
+	camera_vio_ldo: gpio-regulator@3 {
+		compatible = "regulator-fixed";
+		reg = <0x03 0x00>;
+		regulator-name = "camera_vio_ldo";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&tlmm 29 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&cam_sensor_rear_vio>;
+		vin-supply = <&pm660_s4>;
+	};
+
+	camera_vana_ldo: gpio-regulator@4 {
+		compatible = "regulator-fixed";
+		reg = <0x04 0x00>;
+		regulator-name = "camera_vana_ldo";
+		regulator-min-microvolt = <2850000>;
+		regulator-max-microvolt = <2850000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&tlmm 8 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&cam_sensor_rear_vana>;
+		vin-supply = <&pm660l_bob>;
+	};
+};
+
+&cam_cci {
+	qcom,cam-res-mgr {
+		compatible = "qcom,cam-res-mgr";
+		status = "ok";
+	};
+
+	actuator_rear: qcom,actuator@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	actuator_front: qcom,actuator@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	ois_rear: qcom,ois@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,ois";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+		status = "disabled";
+	};
+
+	eeprom_rear: qcom,eeprom@0 {
+		cell-index = <0>;
+		reg = <0>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_rear_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+		rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+		rgltr-load-current = <0 80000 105000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 30 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_rear_aux: qcom,eeprom@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_front: qcom,eeprom@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+		rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_front_active>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_front_suspend>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 9 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@0 {
+		cell-index = <0>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x0>;
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear>;
+		actuator-src = <&actuator_rear>;
+		ois-src = <&ois_rear>;
+		eeprom-src = <&eeprom_rear>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_rear_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1800000 2850000 1352000 0>;
+		rgltr-max-voltage = <1800000 2850000 1352000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 30 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@1 {
+		cell-index = <1>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x1>;
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_rear_aux>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@2 {
+		cell-index = <2>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x02>;
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_front>;
+		actuator-src = <&actuator_front>;
+		led-flash-src = <&led_flash_front>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1800000 2850000 1352000 0>;
+		rgltr-max-voltage = <1800000 2850000 1352000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_front_active>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_front_suspend>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 9 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-mtp.dtsi
new file mode 100644
index 0000000..c4ca6c5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-mtp.dtsi
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	led_flash_rear: qcom,camera-flash@0 {
+		cell-index = <0>;
+		reg = <0x00 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm660l_flash0 &pm660l_flash1>;
+		torch-source = <&pm660l_torch0 &pm660l_torch1>;
+		switch-source = <&pm660l_switch0>;
+		status = "ok";
+	};
+
+	led_flash_front: qcom,camera-flash@1 {
+		cell-index = <1>;
+		reg = <0x01 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm660l_flash2>;
+		torch-source = <&pm660l_torch2>;
+		switch-source = <&pm660l_switch1>;
+		status = "ok";
+	};
+
+	actuator_regulator: gpio-regulator@0 {
+		compatible = "regulator-fixed";
+		reg = <0x00 0x00>;
+		regulator-name = "actuator_regulator";
+		regulator-min-microvolt = <2800000>;
+		regulator-max-microvolt = <2800000>;
+		regulator-enable-ramp-delay = <100>;
+		enable-active-high;
+		gpio = <&tlmm 27 0>;
+	};
+
+	camera_ldo: gpio-regulator@2 {
+		compatible = "regulator-fixed";
+		reg = <0x02 0x00>;
+		regulator-name = "camera_ldo";
+		regulator-min-microvolt = <1352000>;
+		regulator-max-microvolt = <1352000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&pm660l_gpios 3 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&camera_dvdd_en_default>;
+		vin-supply = <&pm660_s6>;
+	};
+
+	camera_rear_ldo: gpio-regulator@1 {
+		compatible = "regulator-fixed";
+		reg = <0x01 0x00>;
+		regulator-name = "camera_rear_ldo";
+		regulator-min-microvolt = <1352000>;
+		regulator-max-microvolt = <1352000>;
+		regulator-enable-ramp-delay = <135>;
+		enable-active-high;
+		gpio = <&pm660l_gpios 4 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&camera_rear_dvdd_en_default>;
+		vin-supply = <&pm660_s6>;
+	};
+
+	camera_vio_ldo: gpio-regulator@3 {
+		compatible = "regulator-fixed";
+		reg = <0x03 0x00>;
+		regulator-name = "camera_vio_ldo";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&tlmm 29 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&cam_sensor_rear_vio>;
+		vin-supply = <&pm660_s4>;
+	};
+
+	camera_vana_ldo: gpio-regulator@4 {
+		compatible = "regulator-fixed";
+		reg = <0x04 0x00>;
+		regulator-name = "camera_vana_ldo";
+		regulator-min-microvolt = <2850000>;
+		regulator-max-microvolt = <2850000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&tlmm 8 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&cam_sensor_rear_vana>;
+		vin-supply = <&pm660l_bob>;
+	};
+};
+
+&cam_cci {
+	qcom,cam-res-mgr {
+		compatible = "qcom,cam-res-mgr";
+		status = "ok";
+	};
+
+	actuator_rear: qcom,actuator@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	actuator_front: qcom,actuator@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	ois_rear: qcom,ois@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,ois";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+		status = "disabled";
+	};
+
+	eeprom_rear: qcom,eeprom@0 {
+		cell-index = <0>;
+		reg = <0>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_rear_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+		rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+		rgltr-load-current = <0 80000 105000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 30 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_rear_aux: qcom,eeprom@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_front: qcom,eeprom@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+		rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_front_active>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_front_suspend>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 9 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@0 {
+		cell-index = <0>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x0>;
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear>;
+		actuator-src = <&actuator_rear>;
+		ois-src = <&ois_rear>;
+		eeprom-src = <&eeprom_rear>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_rear_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1800000 2850000 1352000 0>;
+		rgltr-max-voltage = <1800000 2850000 1352000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 30 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@1 {
+		cell-index = <1>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x1>;
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_rear_aux>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@2 {
+		cell-index = <2>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x02>;
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_front>;
+		actuator-src = <&actuator_front>;
+		led-flash-src = <&led_flash_front>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1800000 2850000 1352000 0>;
+		rgltr-max-voltage = <1800000 2850000 1352000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_front_active>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_front_suspend>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 9 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
new file mode 100644
index 0000000..6506f98
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	led_flash_rear: qcom,camera-flash@0 {
+		cell-index = <0>;
+		reg = <0x00 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm660l_flash0 &pm660l_flash1>;
+		torch-source = <&pm660l_torch0 &pm660l_torch1>;
+		switch-source = <&pm660l_switch0>;
+		status = "ok";
+	};
+
+	actuator_regulator: gpio-regulator@0 {
+		compatible = "regulator-fixed";
+		reg = <0x00 0x00>;
+		regulator-name = "actuator_regulator";
+		regulator-min-microvolt = <2800000>;
+		regulator-max-microvolt = <2800000>;
+		regulator-enable-ramp-delay = <100>;
+		enable-active-high;
+		gpio = <&tlmm 27 0>;
+		vin-supply = <&pm660l_bob>;
+	};
+
+	cam_avdd_gpio_regulator: gpio-regulator@1 {
+		compatible = "regulator-fixed";
+		reg = <0x01 0x00>;
+		regulator-name = "cam_avdd_gpio_regulator";
+		regulator-min-microvolt = <2850000>;
+		regulator-max-microvolt = <2850000>;
+		regulator-enable-ramp-delay = <135>;
+		enable-active-high;
+		gpio = <&tlmm 100 0>;
+		vin-supply = <&pm660l_bob>;
+	};
+
+	cam_dvdd_gpio_regulator: gpio-regulator@2 {
+		compatible = "regulator-fixed";
+		reg = <0x02 0x00>;
+		regulator-name = "cam_dvdd_gpio_regulator";
+		regulator-min-microvolt = <1200000>;
+		regulator-max-microvolt = <1200000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&pm660l_gpios 4 0>;
+		vin-supply = <&pm660_s6>;
+	};
+
+	cam_iovdd_gpio_regulator: gpio-regulator@3 {
+		compatible = "regulator-fixed";
+		reg = <0x03 0x00>;
+		regulator-name = "cam_iovdd_gpio_regulator";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&tlmm 29 0>;
+		vin-supply = <&pm660_s4>;
+	};
+
+	cam_rear_avdd_gpio_regulator: gpio-regulator@4 {
+		compatible = "regulator-fixed";
+		reg = <0x04 0x00>;
+		regulator-name = "cam_rear_avdd_gpio_regulator";
+		regulator-min-microvolt = <2850000>;
+		regulator-max-microvolt = <2850000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&tlmm 8 0>;
+		vin-supply = <&pm660l_bob>;
+	};
+
+	cam_rear_dvdd_gpio_regulator: gpio-regulator@5 {
+		compatible = "regulator-fixed";
+		reg = <0x05 0x00>;
+		regulator-name = "cam_rear_dvdd_gpio_regulator";
+		regulator-min-microvolt = <1200000>;
+		regulator-max-microvolt = <1200000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&pm660l_gpios 3 0>;
+		vin-supply = <&pm660_s6>;
+	};
+};
+
+&tlmm {
+	cam_sensor_rear_active: cam_sensor_rear_active {
+		/* RESET */
+		mux {
+			pins = "gpio30";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio30";
+			bias-disable; /* No PULL */
+			drive-strength = <2>; /* 2 MA */
+		};
+	};
+
+	cam_sensor_rear_suspend: cam_sensor_rear_suspend {
+		/* RESET */
+		mux {
+			pins = "gpio30";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio30";
+			bias-disable; /* No PULL */
+			drive-strength = <2>; /* 2 MA */
+		};
+	};
+
+	cam_sensor_rear2_active: cam_sensor_rear2_active {
+		/* RESET */
+		mux {
+			pins = "gpio9";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio9";
+			bias-disable; /* No PULL */
+			drive-strength = <2>; /* 2 MA */
+		};
+	};
+
+	cam_sensor_rear2_suspend: cam_sensor_rear2_suspend {
+		/* RESET */
+		mux {
+			pins = "gpio9";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio9";
+			bias-disable; /* No PULL */
+			drive-strength = <2>; /* 2 MA */
+		};
+	};
+
+	cam_sensor_front_active: cam_sensor_front_active {
+		/* RESET */
+		mux {
+			pins = "gpio28";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio28";
+			bias-disable; /* No PULL */
+			drive-strength = <2>; /* 2 MA */
+		};
+	};
+
+	cam_sensor_front_suspend: cam_sensor_front_suspend {
+		/* RESET */
+		mux {
+			pins = "gpio28";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio28";
+			bias-disable; /* No PULL */
+			drive-strength = <2>; /* 2 MA */
+		};
+	};
+};
+
+&cam_cci {
+	actuator_rear: qcom,actuator@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	actuator_rear_aux: qcom,actuator@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	actuator_front: qcom,actuator@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	eeprom_rear: qcom,eeprom@0 {
+		cell-index = <0>;
+		reg = <0>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+		cam_vana-supply = <&cam_rear_avdd_gpio_regulator>;
+		cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 0 0 0>;
+		rgltr-max-voltage = <0 0 0 0>;
+		rgltr-load-current = <0 0 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 30 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_rear_aux: qcom,eeprom@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+		cam_vana-supply = <&cam_avdd_gpio_regulator>;
+		cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 0 0 0>;
+		rgltr-max-voltage = <0 0 0 0>;
+		rgltr-load-current = <0 0 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				 &cam_sensor_front_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				 &cam_sensor_front_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_front: qcom,eeprom@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+		cam_vana-supply = <&cam_avdd_gpio_regulator>;
+		cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 0 0 0>;
+		rgltr-max-voltage = <0 0 0 0>;
+		rgltr-load-current = <0 0 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 9 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-position = <1>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@0 {
+		cell-index = <0>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x0>;
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear>;
+		actuator-src = <&actuator_rear>;
+		eeprom-src = <&eeprom_rear>;
+		cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+		cam_vana-supply = <&cam_rear_avdd_gpio_regulator>;
+		cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 0 0 0>;
+		rgltr-max-voltage = <0 0 0 0>;
+		rgltr-load-current = <0 0 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 30 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@1 {
+		cell-index = <1>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x1>;
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear>;
+		actuator-src = <&actuator_rear_aux>;
+		eeprom-src = <&eeprom_rear_aux>;
+		cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+		cam_vana-supply = <&cam_avdd_gpio_regulator>;
+		cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 0 0 0>;
+		rgltr-max-voltage = <0 0 0 0>;
+		rgltr-load-current = <0 0 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_front_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_front_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@2 {
+		cell-index = <2>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x02>;
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_front>;
+		actuator-src = <&actuator_front>;
+		cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+		cam_vana-supply = <&cam_avdd_gpio_regulator>;
+		cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 0 0 0>;
+		rgltr-max-voltage = <0 0 0 0>;
+		rgltr-load-current = <0 0 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 9 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
+
+&pm660l_gpios {
+	gpio@c300 { /* GPIO4 -CAMERA SENSOR 1/2 VDIG*/
+		qcom,mode = <1>;                /* Output */
+		qcom,pull = <5>;                /* No Pull */
+		qcom,vin-sel = <0>;             /* VIN1 GPIO_LV */
+		qcom,src-sel = <0>;             /* GPIO */
+		qcom,invert = <0>;              /* Invert */
+		qcom,master-en = <1>;           /* Enable GPIO */
+		status = "ok";
+	};
+
+	gpio@c200 { /* GPIO3 -CAMERA SENSOR 0 VDIG*/
+		qcom,mode = <1>;                /* Output */
+		qcom,pull = <5>;                /* No Pull */
+		qcom,vin-sel = <0>;             /* VIN1 GPIO_LV */
+		qcom,src-sel = <0>;             /* GPIO */
+		qcom,invert = <0>;              /* Invert */
+		qcom,master-en = <1>;           /* Enable GPIO */
+		status = "ok";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
new file mode 100644
index 0000000..34b8740
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
@@ -0,0 +1,1065 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,cam-req-mgr {
+		compatible = "qcom,cam-req-mgr";
+		status = "ok";
+	};
+
+	cam_csiphy0: qcom,csiphy@ac65000 {
+		cell-index = <0>;
+		compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
+		reg = <0x0ac65000 0x1000>;
+		reg-names = "csiphy";
+		reg-cam-base = <0x65000>;
+		interrupts = <0 477 0>;
+		interrupt-names = "csiphy";
+		regulator-names = "gdscr", "refgen";
+		gdscr-supply = <&titan_top_gdsc>;
+		refgen-supply = <&refgen>;
+		csi-vdd-voltage = <1200000>;
+		mipi-csi-vdd-supply = <&pm660_l1>;
+		clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+			<&clock_camcc CAM_CC_CSIPHY0_CLK>,
+			<&clock_camcc CAM_CC_CSI0PHYTIMER_CLK_SRC>,
+			<&clock_camcc CAM_CC_CSI0PHYTIMER_CLK>;
+		clock-names = "camnoc_axi_clk",
+			"soc_ahb_clk",
+			"slow_ahb_src_clk",
+			"cpas_ahb_clk",
+			"cphy_rx_clk_src",
+			"csiphy0_clk",
+			"csi0phytimer_clk_src",
+			"csi0phytimer_clk";
+		clock-cntl-level = "turbo";
+		clock-rates =
+			<0 0 0 0 384000000 0 269333333 0>;
+		status = "ok";
+	};
+
+	cam_csiphy1: qcom,csiphy@ac66000{
+		cell-index = <1>;
+		compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
+		reg = <0xac66000 0x1000>;
+		reg-names = "csiphy";
+		reg-cam-base = <0x66000>;
+		interrupts = <0 478 0>;
+		interrupt-names = "csiphy";
+		regulator-names = "gdscr", "refgen";
+		gdscr-supply = <&titan_top_gdsc>;
+		refgen-supply = <&refgen>;
+		csi-vdd-voltage = <1200000>;
+		mipi-csi-vdd-supply = <&pm660_l1>;
+		clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+			<&clock_camcc CAM_CC_CSIPHY1_CLK>,
+			<&clock_camcc CAM_CC_CSI1PHYTIMER_CLK_SRC>,
+			<&clock_camcc CAM_CC_CSI1PHYTIMER_CLK>;
+		clock-names = "camnoc_axi_clk",
+			"soc_ahb_clk",
+			"slow_ahb_src_clk",
+			"cpas_ahb_clk",
+			"cphy_rx_clk_src",
+			"csiphy1_clk",
+			"csi1phytimer_clk_src",
+			"csi1phytimer_clk";
+		clock-cntl-level = "turbo";
+		clock-rates =
+			<0 0 0 0 384000000 0 269333333 0>;
+
+		status = "ok";
+	};
+
+	cam_csiphy2: qcom,csiphy@ac67000 {
+		cell-index = <2>;
+		compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
+		reg = <0xac67000 0x1000>;
+		reg-names = "csiphy";
+		reg-cam-base = <0x67000>;
+		interrupts = <0 479 0>;
+		interrupt-names = "csiphy";
+		regulator-names = "gdscr", "refgen";
+		gdscr-supply = <&titan_top_gdsc>;
+		refgen-supply = <&refgen>;
+		csi-vdd-voltage = <1200000>;
+		mipi-csi-vdd-supply = <&pm660_l1>;
+		clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+			<&clock_camcc CAM_CC_CSIPHY2_CLK>,
+			<&clock_camcc CAM_CC_CSI2PHYTIMER_CLK_SRC>,
+			<&clock_camcc CAM_CC_CSI2PHYTIMER_CLK>;
+		clock-names = "camnoc_axi_clk",
+			"soc_ahb_clk",
+			"slow_ahb_src_clk",
+			"cpas_ahb_clk",
+			"cphy_rx_clk_src",
+			"csiphy2_clk",
+			"csi2phytimer_clk_src",
+			"csi2phytimer_clk";
+		clock-cntl-level = "turbo";
+		clock-rates =
+			<0 0 0 0 384000000 0 269333333 0>;
+		status = "ok";
+	};
+
+	cam_cci: qcom,cci@ac4a000 {
+		cell-index = <0>;
+		compatible = "qcom,cci";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xac4a000 0x4000>;
+		reg-names = "cci";
+		reg-cam-base = <0x4a000>;
+		interrupt-names = "cci";
+		interrupts = <0 460 0>;
+		status = "ok";
+		gdscr-supply = <&titan_top_gdsc>;
+		regulator-names = "gdscr";
+		clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CCI_CLK>,
+			<&clock_camcc CAM_CC_CCI_CLK_SRC>;
+		clock-names = "camnoc_axi_clk",
+			"soc_ahb_clk",
+			"slow_ahb_src_clk",
+			"cpas_ahb_clk",
+			"cci_clk",
+			"cci_clk_src";
+		src-clock-name = "cci_clk_src";
+		clock-cntl-level = "lowsvs";
+		clock-rates = <0 0 0 0 0 37500000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cci0_active &cci1_active>;
+		pinctrl-1 = <&cci0_suspend &cci1_suspend>;
+		gpios = <&tlmm 17 0>,
+			<&tlmm 18 0>,
+			<&tlmm 19 0>,
+			<&tlmm 20 0>;
+		gpio-req-tbl-num = <0 1 2 3>;
+		gpio-req-tbl-flags = <1 1 1 1>;
+		gpio-req-tbl-label = "CCI_I2C_DATA0",
+					"CCI_I2C_CLK0",
+					"CCI_I2C_DATA1",
+					"CCI_I2C_CLK1";
+
+		i2c_freq_100Khz: qcom,i2c_standard_mode {
+			hw-thigh = <201>;
+			hw-tlow = <174>;
+			hw-tsu-sto = <204>;
+			hw-tsu-sta = <231>;
+			hw-thd-dat = <22>;
+			hw-thd-sta = <162>;
+			hw-tbuf = <227>;
+			hw-scl-stretch-en = <0>;
+			hw-trdhld = <6>;
+			hw-tsp = <3>;
+			cci-clk-src = <37500000>;
+			status = "ok";
+		};
+
+		i2c_freq_400Khz: qcom,i2c_fast_mode {
+			hw-thigh = <38>;
+			hw-tlow = <56>;
+			hw-tsu-sto = <40>;
+			hw-tsu-sta = <40>;
+			hw-thd-dat = <22>;
+			hw-thd-sta = <35>;
+			hw-tbuf = <62>;
+			hw-scl-stretch-en = <0>;
+			hw-trdhld = <6>;
+			hw-tsp = <3>;
+			cci-clk-src = <37500000>;
+			status = "ok";
+		};
+
+		i2c_freq_custom: qcom,i2c_custom_mode {
+			hw-thigh = <38>;
+			hw-tlow = <56>;
+			hw-tsu-sto = <40>;
+			hw-tsu-sta = <40>;
+			hw-thd-dat = <22>;
+			hw-thd-sta = <35>;
+			hw-tbuf = <62>;
+			hw-scl-stretch-en = <1>;
+			hw-trdhld = <6>;
+			hw-tsp = <3>;
+			cci-clk-src = <37500000>;
+			status = "ok";
+		};
+
+		i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+			hw-thigh = <16>;
+			hw-tlow = <22>;
+			hw-tsu-sto = <17>;
+			hw-tsu-sta = <18>;
+			hw-thd-dat = <16>;
+			hw-thd-sta = <15>;
+			hw-tbuf = <24>;
+			hw-scl-stretch-en = <0>;
+			hw-trdhld = <3>;
+			hw-tsp = <3>;
+			cci-clk-src = <37500000>;
+			status = "ok";
+		};
+	};
+
+	qcom,cam_smmu {
+		compatible = "qcom,msm-cam-smmu";
+		status = "ok";
+
+		msm_cam_smmu_ife {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x808 0x0>,
+				<&apps_smmu 0x810 0x8>,
+				<&apps_smmu 0xc08 0x0>,
+				<&apps_smmu 0xc10 0x8>;
+			label = "ife";
+			ife_iova_mem_map: iova-mem-map {
+				/* IO region is approximately 3.4 GB */
+				iova-mem-region-io {
+					iova-region-name = "io";
+					iova-region-start = <0x7400000>;
+					iova-region-len = <0xd8c00000>;
+					iova-region-id = <0x3>;
+					status = "ok";
+				};
+			};
+		};
+
+		msm_cam_smmu_jpeg {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x1060 0x8>,
+				<&apps_smmu 0x1068 0x8>;
+			label = "jpeg";
+			jpeg_iova_mem_map: iova-mem-map {
+				/* IO region is approximately 3.4 GB */
+				iova-mem-region-io {
+					iova-region-name = "io";
+					iova-region-start = <0x7400000>;
+					iova-region-len = <0xd8c00000>;
+					iova-region-id = <0x3>;
+					status = "ok";
+				};
+			};
+		};
+
+		msm_cam_icp_fw {
+			compatible = "qcom,msm-cam-smmu-fw-dev";
+			label="icp";
+			memory-region = <&pil_camera_mem>;
+		};
+
+		msm_cam_smmu_icp {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x107A 0x0>,
+				<&apps_smmu 0x1020 0x8>,
+				<&apps_smmu 0x1040 0x8>,
+				<&apps_smmu 0x1030 0x0>,
+				<&apps_smmu 0x1050 0x0>;
+			label = "icp";
+			icp_iova_mem_map: iova-mem-map {
+				iova-mem-region-firmware {
+					/* Firmware region is 5MB */
+					iova-region-name = "firmware";
+					iova-region-start = <0x0>;
+					iova-region-len = <0x500000>;
+					iova-region-id = <0x0>;
+					status = "ok";
+				};
+
+				iova-mem-region-shared {
+					/* Shared region is 100MB long */
+					iova-region-name = "shared";
+					iova-region-start = <0x7400000>;
+					iova-region-len = <0x6400000>;
+					iova-region-id = <0x1>;
+					iova-granularity = <0x15>;
+					status = "ok";
+				};
+
+				iova-mem-region-secondary-heap {
+					/* Secondary heap region is 1MB long */
+					iova-region-name = "secheap";
+					iova-region-start = <0xd800000>;
+					iova-region-len = <0x100000>;
+					iova-region-id = <0x4>;
+					status = "ok";
+				};
+
+				iova-mem-region-io {
+					/* IO region is approximately 3.3 GB */
+					iova-region-name = "io";
+					iova-region-start = <0xd900000>;
+					iova-region-len = <0xd2700000>;
+					iova-region-id = <0x3>;
+					status = "ok";
+				};
+			};
+		};
+
+		msm_cam_smmu_cpas_cdm {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x1000 0x0>;
+			label = "cpas-cdm0";
+			cpas_cdm_iova_mem_map: iova-mem-map {
+				iova-mem-region-io {
+					/* IO region is approximately 3.4 GB */
+					iova-region-name = "io";
+					iova-region-start = <0x7400000>;
+					iova-region-len = <0xd8c00000>;
+					iova-region-id = <0x3>;
+					status = "ok";
+				};
+			};
+		};
+
+		msm_cam_smmu_secure {
+			compatible = "qcom,msm-cam-smmu-cb";
+			label = "cam-secure";
+			qcom,secure-cb;
+		};
+
+		msm_cam_smmu_fd {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x1070 0x0>;
+			label = "fd";
+			fd_iova_mem_map: iova-mem-map {
+				iova-mem-region-io {
+					/* IO region is approximately 3.4 GB */
+					iova-region-name = "io";
+					iova-region-start = <0x7400000>;
+					iova-region-len = <0xd8c00000>;
+					iova-region-id = <0x3>;
+					status = "ok";
+				};
+			};
+		};
+	};
+
+	qcom,cam-cpas@ac40000 {
+		cell-index = <0>;
+		compatible = "qcom,cam-cpas";
+		label = "cpas";
+		arch-compat = "cpas_top";
+		status = "ok";
+		reg-names = "cam_cpas_top", "cam_camnoc";
+		reg = <0xac40000 0x1000>,
+			<0xac42000 0x5000>;
+		reg-cam-base = <0x40000 0x42000>;
+		interrupt-names = "cpas_camnoc";
+		interrupts = <0 459 0>;
+		qcom,cpas-hw-ver = <0x170110>; /* Titan v170 v1.1.0 */
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_ahb_clk",
+			"gcc_axi_clk",
+			"soc_ahb_clk",
+			"slow_ahb_clk_src",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		src-clock-name = "slow_ahb_clk_src";
+		clock-rates = <0 0 0 0 0 0>,
+			<0 0 0 19200000 0 0>,
+			<0 0 0 80000000 0 0>,
+			<0 0 0 80000000 0 0>,
+			<0 0 0 80000000 0 0>,
+			<0 0 0 80000000 0 0>,
+			<0 0 0 80000000 0 0>;
+		clock-cntl-level = "suspend", "minsvs", "lowsvs", "svs",
+			"svs_l1", "nominal", "turbo";
+		qcom,msm-bus,name = "cam_ahb";
+		qcom,msm-bus,num-cases = <7>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 76500>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 120000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 150000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 150000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>;
+		vdd-corners = <RPMH_REGULATOR_LEVEL_OFF
+			RPMH_REGULATOR_LEVEL_RETENTION
+			RPMH_REGULATOR_LEVEL_MIN_SVS
+			RPMH_REGULATOR_LEVEL_LOW_SVS
+			RPMH_REGULATOR_LEVEL_SVS
+			RPMH_REGULATOR_LEVEL_SVS_L1
+			RPMH_REGULATOR_LEVEL_NOM
+			RPMH_REGULATOR_LEVEL_NOM_L1
+			RPMH_REGULATOR_LEVEL_NOM_L2
+			RPMH_REGULATOR_LEVEL_TURBO
+			RPMH_REGULATOR_LEVEL_TURBO_L1>;
+		vdd-corner-ahb-mapping = "suspend", "suspend",
+			"minsvs", "lowsvs", "svs", "svs_l1",
+			"nominal", "nominal", "nominal",
+			"turbo", "turbo";
+		client-id-based;
+		client-names =
+			"csiphy0", "csiphy1", "csiphy2", "cci0",
+			"csid0", "csid1", "csid2",
+			"ife0", "ife1", "ife2", "ipe0",
+			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
+			"icp0", "jpeg-dma0", "jpeg-enc0", "fd0";
+		client-axi-port-names =
+			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+			"cam_hf_1", "cam_hf_2", "cam_hf_2",
+			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+		client-bus-camnoc-based;
+		qcom,axi-port-list {
+			qcom,axi-port1 {
+				qcom,axi-port-name = "cam_hf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+					<MSM_BUS_MASTER_CAMNOC_HF0
+					MSM_BUS_SLAVE_EBI_CH0 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_HF0
+					MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+					<MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
+				};
+			};
+			qcom,axi-port2 {
+				qcom,axi-port-name = "cam_hf_2";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_2_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+					<MSM_BUS_MASTER_CAMNOC_HF1
+					MSM_BUS_SLAVE_EBI_CH0 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_HF1
+					MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_2_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+					<MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
+				};
+			};
+			qcom,axi-port3 {
+				qcom,axi-port-name = "cam_sf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_sf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+					<MSM_BUS_MASTER_CAMNOC_SF
+					MSM_BUS_SLAVE_EBI_CH0 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_SF
+					MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_sf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+					<MSM_BUS_MASTER_CAMNOC_SF_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_SF_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
+				};
+			};
+		};
+	};
+
+	qcom,cam-cdm-intf {
+		compatible = "qcom,cam-cdm-intf";
+		cell-index = <0>;
+		label = "cam-cdm-intf";
+		num-hw-cdm = <1>;
+		cdm-client-names = "vfe",
+			"jpegdma",
+			"jpegenc",
+			"fd";
+		status = "ok";
+	};
+
+	qcom,cpas-cdm0@ac48000 {
+		cell-index = <0>;
+		compatible = "qcom,cam170-cpas-cdm0";
+		label = "cpas-cdm";
+		reg = <0xac48000 0x1000>;
+		reg-names = "cpas-cdm";
+		reg-cam-base = <0x48000>;
+		interrupts = <0 461 0>;
+		interrupt-names = "cpas-cdm";
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_camera_ahb",
+			"gcc_camera_axi",
+			"cam_cc_soc_ahb_clk",
+			"cam_cc_cpas_ahb_clk",
+			"cam_cc_camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		clock-rates = <0 0 0 0 0>;
+		clock-cntl-level = "svs";
+		cdm-client-names = "ife";
+		status = "ok";
+	};
+
+	qcom,cam-isp {
+		compatible = "qcom,cam-isp";
+		arch-compat = "ife";
+		status = "ok";
+	};
+
+	cam_csid0: qcom,csid0@acb3000 {
+		cell-index = <0>;
+		compatible = "qcom,csid170";
+		reg-names = "csid";
+		reg = <0xacb3000 0x1000>;
+		reg-cam-base = <0xb3000>;
+		interrupt-names = "csid";
+		interrupts = <0 464 0>;
+		regulator-names = "camss", "ife0";
+		camss-supply = <&titan_top_gdsc>;
+		ife0-supply = <&ife_0_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_csid_clk",
+			"ife_csid_clk_src",
+			"ife_cphy_rx_clk",
+			"cphy_rx_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"camnoc_axi_clk",
+			"ife_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CPHY_RX_CLK>,
+			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
+		clock-rates =
+			<0 0 0 0 0 0 384000000 0 0 0 404000000 0 0>,
+			<0 0 0 0 0 0 538000000 0 0 0 600000000 0 0>;
+		clock-cntl-level = "svs", "turbo";
+		src-clock-name = "ife_csid_clk_src";
+		status = "ok";
+	};
+
+	cam_vfe0: qcom,vfe0@acaf000 {
+		cell-index = <0>;
+		compatible = "qcom,vfe170";
+		reg-names = "ife";
+		reg = <0xacaf000 0x4000>;
+		reg-cam-base = <0xaf000>;
+		interrupt-names = "ife";
+		interrupts = <0 465 0>;
+		regulator-names = "camss", "ife0";
+		camss-supply = <&titan_top_gdsc>;
+		ife0-supply = <&ife_0_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"camnoc_axi_clk",
+			"ife_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
+		clock-rates =
+			<0 0 0 0 0 0 404000000 0 0>,
+			<0 0 0 0 0 0 480000000 0 0>,
+			<0 0 0 0 0 0 600000000 0 0>;
+		clock-cntl-level = "svs", "svs_l1", "turbo";
+		src-clock-name = "ife_clk_src";
+		clock-names-option =  "ife_dsp_clk";
+		clocks-option = <&clock_camcc CAM_CC_IFE_0_DSP_CLK>;
+		clock-rates-option = <600000000>;
+		status = "ok";
+	};
+
+	cam_csid1: qcom,csid1@acba000 {
+		cell-index = <1>;
+		compatible = "qcom,csid170";
+		reg-names = "csid";
+		reg = <0xacba000 0x1000>;
+		reg-cam-base = <0xba000>;
+		interrupt-names = "csid";
+		interrupts = <0 466 0>;
+		regulator-names = "camss", "ife1";
+		camss-supply = <&titan_top_gdsc>;
+		ife1-supply = <&ife_1_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_csid_clk",
+			"ife_csid_clk_src",
+			"ife_cphy_rx_clk",
+			"cphy_rx_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"camnoc_axi_clk",
+			"ife_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_1_CSID_CLK>,
+			<&clock_camcc CAM_CC_IFE_1_CSID_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_1_CPHY_RX_CLK>,
+			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_1_CLK>,
+			<&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
+		clock-rates =
+			<0 0 0 0 0 0 384000000 0 0 0 404000000 0 0>,
+			<0 0 0 0 0 0 538000000 0 0 0 600000000 0 0>;
+		clock-cntl-level = "svs", "turbo";
+		src-clock-name = "ife_csid_clk_src";
+		status = "ok";
+	};
+
+	cam_vfe1: qcom,vfe1@acb6000 {
+		cell-index = <1>;
+		compatible = "qcom,vfe170";
+		reg-names = "ife";
+		reg = <0xacb6000 0x4000>;
+		reg-cam-base = <0xb6000>;
+		interrupt-names = "ife";
+		interrupts = <0 467 0>;
+		regulator-names = "camss", "ife1";
+		camss-supply = <&titan_top_gdsc>;
+		ife1-supply = <&ife_1_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"camnoc_axi_clk",
+			"ife_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_1_CLK>,
+			<&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
+		clock-rates =
+			<0 0 0 0 0 0 404000000 0 0>,
+			<0 0 0 0 0 0 480000000 0 0>,
+			<0 0 0 0 0 0 600000000 0 0>;
+		clock-cntl-level = "svs", "svs_l1", "turbo";
+		src-clock-name = "ife_clk_src";
+		clock-names-option =  "ife_dsp_clk";
+		clocks-option = <&clock_camcc CAM_CC_IFE_1_DSP_CLK>;
+		clock-rates-option = <600000000>;
+		status = "ok";
+	};
+
+	cam_csid_lite: qcom,csid-lite@acc8000 {
+		cell-index = <2>;
+		compatible = "qcom,csid-lite170";
+		reg-names = "csid-lite";
+		reg = <0xacc8000 0x1000>;
+		reg-cam-base = <0xc8000>;
+		interrupt-names = "csid-lite";
+		interrupts = <0 468 0>;
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_csid_clk",
+			"ife_csid_clk_src",
+			"ife_cphy_rx_clk",
+			"cphy_rx_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_LITE_CSID_CLK>,
+			<&clock_camcc CAM_CC_IFE_LITE_CSID_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_LITE_CPHY_RX_CLK>,
+			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_LITE_CLK>,
+			<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		clock-rates =
+			<0 0 0 0 0 0 384000000 0 0 0 404000000 0>,
+			<0 0 0 0 0 0 538000000 0 0 0 600000000 0>;
+		clock-cntl-level = "svs", "turbo";
+		src-clock-name = "ife_csid_clk_src";
+		status = "ok";
+	};
+
+	cam_vfe_lite: qcom,vfe-lite@acc4000 {
+		cell-index = <2>;
+		compatible = "qcom,vfe-lite170";
+		reg-names = "ife-lite";
+		reg = <0xacc4000 0x4000>;
+		reg-cam-base = <0xc4000>;
+		interrupt-names = "ife-lite";
+		interrupts = <0 469 0>;
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_LITE_CLK>,
+			<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		clock-rates =
+			<0 0 0 0 0 0 404000000 0>,
+			<0 0 0 0 0 0 480000000 0>,
+			<0 0 0 0 0 0 600000000 0>;
+		clock-cntl-level = "svs", "svs_l1", "turbo";
+		src-clock-name = "ife_clk_src";
+		status = "ok";
+	};
+
+	qcom,cam-icp {
+		compatible = "qcom,cam-icp";
+		compat-hw-name = "qcom,a5",
+			"qcom,ipe0",
+			"qcom,ipe1",
+			"qcom,bps";
+		num-a5 = <1>;
+		num-ipe = <2>;
+		num-bps = <1>;
+		status = "ok";
+	};
+
+	cam_a5: qcom,a5@ac00000 {
+		cell-index = <0>;
+		compatible = "qcom,cam-a5";
+		reg = <0xac00000 0x6000>,
+			<0xac10000 0x8000>,
+			<0xac18000 0x3000>;
+		reg-names = "a5_qgic", "a5_sierra", "a5_csr";
+		reg-cam-base = <0x00000 0x10000 0x18000>;
+		interrupts = <0 463 0>;
+		interrupt-names = "a5";
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_cam_ahb_clk",
+			"gcc_cam_axi_clk",
+			"soc_fast_ahb",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"icp_apb_clk",
+			"icp_clk",
+			"icp_clk_src";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+				<&clock_gcc GCC_CAMERA_AXI_CLK>,
+				<&clock_camcc CAM_CC_FAST_AHB_CLK_SRC>,
+				<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+				<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+				<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+				<&clock_camcc CAM_CC_ICP_APB_CLK>,
+				<&clock_camcc CAM_CC_ICP_CLK>,
+				<&clock_camcc CAM_CC_ICP_CLK_SRC>;
+
+		clock-rates =
+			<0 0 200000000 0 0 0 0 400000000>,
+			<0 0 200000000 0 0 0 0 600000000>;
+		clock-cntl-level = "svs", "turbo";
+		fw_name = "CAMERA_ICP.elf";
+		ubwc-cfg = <0x77 0x1DF>;
+		status = "ok";
+	};
+
+	cam_ipe0: qcom,ipe0 {
+		cell-index = <0>;
+		compatible = "qcom,cam-ipe";
+		regulator-names = "ipe0-vdd";
+		ipe0-vdd-supply = <&ipe_0_gdsc>;
+		clock-names = "ipe_0_ahb_clk",
+			"ipe_0_areg_clk",
+			"ipe_0_axi_clk",
+			"ipe_0_clk",
+			"ipe_0_clk_src";
+		src-clock-name = "ipe_0_clk_src";
+		clocks = <&clock_camcc CAM_CC_IPE_0_AHB_CLK>,
+				<&clock_camcc CAM_CC_IPE_0_AREG_CLK>,
+				<&clock_camcc CAM_CC_IPE_0_AXI_CLK>,
+				<&clock_camcc CAM_CC_IPE_0_CLK>,
+				<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
+
+		clock-rates = <0 0 0 0 240000000>,
+			<0 0 0 0 404000000>,
+			<0 0 0 0 480000000>,
+			<0 0 0 0 538000000>,
+			<0 0 0 0 600000000>;
+		clock-cntl-level = "lowsvs", "svs",
+			"svs_l1", "nominal", "turbo";
+		status = "ok";
+	};
+
+	cam_ipe1: qcom,ipe1 {
+		cell-index = <1>;
+		compatible = "qcom,cam-ipe";
+		regulator-names = "ipe1-vdd";
+		ipe1-vdd-supply = <&ipe_1_gdsc>;
+		clock-names = "ipe_1_ahb_clk",
+			"ipe_1_areg_clk",
+			"ipe_1_axi_clk",
+			"ipe_1_clk",
+			"ipe_1_clk_src";
+		src-clock-name = "ipe_1_clk_src";
+		clocks = <&clock_camcc CAM_CC_IPE_1_AHB_CLK>,
+				<&clock_camcc CAM_CC_IPE_1_AREG_CLK>,
+				<&clock_camcc CAM_CC_IPE_1_AXI_CLK>,
+				<&clock_camcc CAM_CC_IPE_1_CLK>,
+				<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
+
+		clock-rates = <0 0 0 0 240000000>,
+			<0 0 0 0 404000000>,
+			<0 0 0 0 480000000>,
+			<0 0 0 0 538000000>,
+			<0 0 0 0 600000000>;
+		clock-cntl-level = "lowsvs", "svs",
+			"svs_l1", "nominal", "turbo";
+		status = "ok";
+	};
+
+	cam_bps: qcom,bps {
+		cell-index = <0>;
+		compatible = "qcom,cam-bps";
+		regulator-names = "bps-vdd";
+		bps-vdd-supply = <&bps_gdsc>;
+		clock-names = "bps_ahb_clk",
+			"bps_areg_clk",
+			"bps_axi_clk",
+			"bps_clk",
+			"bps_clk_src";
+		src-clock-name = "bps_clk_src";
+		clocks = <&clock_camcc CAM_CC_BPS_AHB_CLK>,
+				<&clock_camcc CAM_CC_BPS_AREG_CLK>,
+				<&clock_camcc CAM_CC_BPS_AXI_CLK>,
+				<&clock_camcc CAM_CC_BPS_CLK>,
+				<&clock_camcc CAM_CC_BPS_CLK_SRC>;
+
+		clock-rates = <0 0 0 0 200000000>,
+			<0 0 0 0 404000000>,
+			<0 0 0 0 480000000>,
+			<0 0 0 0 600000000>,
+			<0 0 0 0 600000000>;
+		clock-cntl-level = "lowsvs", "svs",
+			"svs_l1", "nominal", "turbo";
+		status = "ok";
+	};
+
+	qcom,cam-jpeg {
+		compatible = "qcom,cam-jpeg";
+		compat-hw-name = "qcom,jpegenc",
+			"qcom,jpegdma";
+		num-jpeg-enc = <1>;
+		num-jpeg-dma = <1>;
+		status = "ok";
+	};
+
+	cam_jpeg_enc: qcom,jpegenc@ac4e000 {
+		cell-index = <0>;
+		compatible = "qcom,cam_jpeg_enc";
+		reg-names = "jpege_hw";
+		reg = <0xac4e000 0x4000>;
+		reg-cam-base = <0x4e000>;
+		interrupt-names = "jpeg";
+		interrupts = <0 474 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"jpegenc_clk_src",
+			"jpegenc_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+			<&clock_camcc CAM_CC_JPEG_CLK>;
+
+		clock-rates = <0 0 0 0 0 600000000 0>;
+		src-clock-name = "jpegenc_clk_src";
+		clock-cntl-level = "nominal";
+		status = "ok";
+	};
+
+	cam_jpeg_dma: qcom,jpegdma@0xac52000{
+		cell-index = <0>;
+		compatible = "qcom,cam_jpeg_dma";
+		reg-names = "jpegdma_hw";
+		reg = <0xac52000 0x4000>;
+		reg-cam-base = <0x52000>;
+		interrupt-names = "jpegdma";
+		interrupts = <0 475 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"jpegdma_clk_src",
+			"jpegdma_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+			<&clock_camcc CAM_CC_JPEG_CLK>;
+
+		clock-rates = <0 0 0 0 0 600000000 0>;
+		src-clock-name = "jpegdma_clk_src";
+		clock-cntl-level = "nominal";
+		status = "ok";
+	};
+
+	qcom,cam-fd {
+		compatible = "qcom,cam-fd";
+		compat-hw-name = "qcom,fd";
+		num-fd = <1>;
+		status = "ok";
+	};
+
+	cam_fd: qcom,fd@ac5a000 {
+		cell-index = <0>;
+		compatible = "qcom,fd41";
+		reg-names = "fd_core", "fd_wrapper";
+		reg = <0xac5a000 0x1000>,
+			<0xac5b000 0x400>;
+		reg-cam-base = <0x5a000 0x5b000>;
+		interrupt-names = "fd";
+		interrupts = <0 462 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_ahb_clk",
+			"gcc_axi_clk",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"fd_core_clk_src",
+			"fd_core_clk",
+			"fd_core_uar_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_FD_CORE_CLK_SRC>,
+			<&clock_camcc CAM_CC_FD_CORE_CLK>,
+			<&clock_camcc CAM_CC_FD_CORE_UAR_CLK>;
+		src-clock-name = "fd_core_clk_src";
+		clock-cntl-level = "svs", "svs_l1", "turbo";
+		clock-rates =
+			<0 0 0 0 0 400000000 0 0>,
+			<0 0 0 0 0 538000000 0 0>,
+			<0 0 0 0 0 600000000 0 0>;
+		status = "ok";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
index 0320bd9..521b048 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
@@ -13,6 +13,7 @@
 #include <dt-bindings/gpio/gpio.h>
 #include "sdm670-pmic-overlay.dtsi"
 #include "sdm670-sde-display.dtsi"
+#include "sdm670-camera-sensor-cdp.dtsi"
 
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v3";
@@ -40,6 +41,12 @@
 	status = "ok";
 };
 
+&pm660l_switch1 {
+	pinctrl-names = "led_enable", "led_disable";
+	pinctrl-0 = <&flash_led3_front_en>;
+	pinctrl-1 = <&flash_led3_front_dis>;
+};
+
 &qupv3_se9_2uart {
 	status = "disabled";
 };
@@ -266,9 +273,7 @@
 };
 
 &dsi_rm67195_amoled_fhd_cmd {
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
-	qcom,mdss-dsi-bl-min-level = <1>;
-	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
 	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>;
 	qcom,platform-reset-gpio = <&tlmm 75 0>;
 	qcom,platform-te-gpio = <&tlmm 10 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
index 980ade2..34fe19f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
@@ -562,6 +562,7 @@
 				     <13 32>;
 		qcom,cmb-elem-size = <3 64>,
 				     <7 64>,
+				     <9 64>,
 				     <13 64>;
 
 		clocks = <&clock_aop QDSS_CLK>;
@@ -625,6 +626,15 @@
 			};
 
 			port@6 {
+				reg = <9>;
+				tpda_in_tpdm_prng: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_prng_out_tpda>;
+				};
+			};
+
+			port@7 {
 				reg = <11>;
 					tpda_in_tpdm_north: endpoint {
 					slave-mode;
@@ -633,7 +643,7 @@
 				};
 			};
 
-			port@7 {
+			port@8 {
 				reg = <12>;
 				tpda_in_tpdm_qm: endpoint {
 					slave-mode;
@@ -642,7 +652,7 @@
 				};
 			};
 
-			port@8 {
+			port@9 {
 				reg = <13>;
 				tpda_in_tpdm_pimem: endpoint {
 					slave-mode;
@@ -743,6 +753,24 @@
 		};
 	};
 
+	tpdm_prng: tpdm@684c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+		reg = <0x684c000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-prng";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		port {
+			tpdm_prng_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_prng>;
+			};
+		};
+	};
+
 	tpdm_center: tpdm@6c28000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b968>;
@@ -1387,7 +1415,8 @@
 	};
 
 	cti0_wcss: cti@69a4000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x69a4000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1398,7 +1427,8 @@
 	};
 
 	cti1_wcss: cti@69a5000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x69a5000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1409,7 +1439,8 @@
 	};
 
 	cti2_wcss: cti@69a6000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x69a6000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1420,7 +1451,8 @@
 	};
 
 	cti_mss_q6: cti@683b000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x683b000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1431,7 +1463,8 @@
 	};
 
 	cti_turing: cti@6867000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x6867000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1442,7 +1475,8 @@
 	};
 
 	cti2_ssc_sdc: cti@6b10000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x6b10000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1453,7 +1487,8 @@
 	};
 
 	cti1_ssc: cti@6b11000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x6b11000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1464,7 +1499,8 @@
 	};
 
 	cti0_ssc_q6: cti@6b1b000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x6b1b000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1475,7 +1511,8 @@
 	};
 
 	cti_ssc_noc: cti@6b1e000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x6b1e000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1486,7 +1523,8 @@
 	};
 
 	cti6_ssc_noc: cti@6b1f000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x6b1f000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1497,7 +1535,8 @@
 	};
 
 	cti0_swao: cti@6b04000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x6b04000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1508,7 +1547,8 @@
 	};
 
 	cti1_swao: cti@6b05000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x6b05000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1519,7 +1559,8 @@
 	};
 
 	cti2_swao: cti@6b06000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x6b06000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1530,7 +1571,8 @@
 	};
 
 	cti3_swao: cti@6b07000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x6b07000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1541,7 +1583,8 @@
 	};
 
 	cti_aop_m3: cti@6b21000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x6b21000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1552,7 +1595,8 @@
 	};
 
 	cti_titan: cti@6c13000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x6c13000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1563,7 +1607,8 @@
 	};
 
 	cti_venus_arm9: cti@6c20000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x6c20000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1574,7 +1619,8 @@
 	};
 
 	cti0_apss: cti@78e0000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x78e0000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1585,7 +1631,8 @@
 	};
 
 	cti1_apss: cti@78f0000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x78f0000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1596,7 +1643,8 @@
 	};
 
 	cti2_apss: cti@7900000 {
-		compatible = "arm,coresight-cti";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x7900000 0x1000>;
 		reg-names = "cti-base";
 
@@ -1951,7 +1999,7 @@
 		compatible = "qcom,coresight-remote-etm";
 
 		coresight-name = "coresight-audio-etm0";
-		qcom,inst-id = <2>;
+		qcom,inst-id = <5>;
 
 		port {
 			audio_etm0_out_funnel_in1: endpoint {
@@ -2182,6 +2230,22 @@
 		};
 	};
 
+	ipcb_tgu: tgu@6b0c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b999>;
+		reg = <0x6b0c000 0x1000>;
+		reg-names = "tgu-base";
+		tgu-steps = <3>;
+		tgu-conditions = <4>;
+		tgu-regs = <4>;
+		tgu-timer-counters = <8>;
+
+		coresight-name = "coresight-tgu-ipcb";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
 	funnel_apss: funnel@7800000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b908>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-ext-codec-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-ext-codec-audio-overlay.dtsi
index 775cf48..14a3e93 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-ext-codec-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-ext-codec-audio-overlay.dtsi
@@ -47,16 +47,6 @@
 	status = "okay";
 };
 
-&soc {
-	wcd_buck_vreg_gpio: msm_cdc_pinctrl@94 {
-		status = "okay";
-		compatible = "qcom,msm-cdc-pinctrl";
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&wcd_buck_vsel_default>;
-		pinctrl-1 = <&wcd_buck_vsel_default>;
-	};
-};
-
 &wcd9xxx_intc {
 	status = "okay";
 };
@@ -79,8 +69,6 @@
 
 &wcd934x_cdc {
 	status = "okay";
-	qcom,has-buck-vsel-gpio;
-	qcom,buck-vsel-gpio-node = <&wcd_buck_vreg_gpio>;
 };
 
 &clock_audio_lnbb {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
index 4783396..41a66e9 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
@@ -46,8 +46,9 @@
 		label = "kgsl-3d0";
 		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
 		status = "ok";
-		reg = <0x5000000 0x40000>;
-		reg-names = "kgsl_3d0_reg_memory";
+		reg = <0x5000000 0x40000
+			0x780000 0x6300>;
+		reg-names = "kgsl_3d0_reg_memory", "qfprom_memory";
 		interrupts = <0 300 0>;
 		interrupt-names = "kgsl_3d0_irq";
 		qcom,id = <0>;
@@ -126,6 +127,8 @@
 		/* Context aware jump target power level */
 		qcom,ca-target-pwrlevel = <1>;
 
+		qcom,gpu-speed-bin = <0x41a0 0x1fe00000 21>;
+
 		/* GPU Mempools */
 		qcom,gpu-mempools {
 			#address-cells = <1>;
@@ -158,57 +161,209 @@
 			};
 		};
 
-		/* Power levels */
-		qcom,gpu-pwrlevels {
+		/*
+		 * Speed-bin zero is default speed bin.
+		 * For rest of the speed bins, speed-bin value
+		 * is calulated as FMAX/4.8 MHz round up to zero
+		 * decimal places.
+		 */
+		qcom,gpu-pwrlevel-bins {
 			#address-cells = <1>;
 			#size-cells = <0>;
 
-			compatible = "qcom,gpu-pwrlevels";
+			compatible="qcom,gpu-pwrlevel-bins";
 
-			/* SVS_L1 */
-			qcom,gpu-pwrlevel@0 {
-				reg = <0>;
-				qcom,gpu-freq = <430000000>;
-				qcom,bus-freq = <11>;
-				qcom,bus-min = <8>;
-				qcom,bus-max = <11>;
+			qcom,gpu-pwrlevels-0 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				qcom,speed-bin = <0>;
+
+				qcom,initial-pwrlevel = <3>;
+
+				/* SVS_L1 */
+				qcom,gpu-pwrlevel@0 {
+					reg = <0>;
+					qcom,gpu-freq = <430000000>;
+					qcom,bus-freq = <11>;
+					qcom,bus-min = <8>;
+					qcom,bus-max = <11>;
+				};
+
+				/* SVS */
+				qcom,gpu-pwrlevel@1 {
+					reg = <1>;
+					qcom,gpu-freq = <355000000>;
+					qcom,bus-freq = <8>;
+					qcom,bus-min = <5>;
+					qcom,bus-max = <9>;
+				};
+
+				/* LOW SVS */
+				qcom,gpu-pwrlevel@2 {
+					reg = <2>;
+					qcom,gpu-freq = <267000000>;
+					qcom,bus-freq = <6>;
+					qcom,bus-min = <4>;
+					qcom,bus-max = <8>;
+				};
+
+				/* MIN SVS */
+				qcom,gpu-pwrlevel@3 {
+					reg = <3>;
+					qcom,gpu-freq = <180000000>;
+					qcom,bus-freq = <4>;
+					qcom,bus-min = <3>;
+					qcom,bus-max = <4>;
+				};
+
+				/* XO */
+				qcom,gpu-pwrlevel@4 {
+					reg = <4>;
+					qcom,gpu-freq = <0>;
+					qcom,bus-freq = <0>;
+					qcom,bus-min = <0>;
+					qcom,bus-max = <0>;
+				};
 			};
 
-			/* SVS */
-			qcom,gpu-pwrlevel@1 {
-				reg = <1>;
-				qcom,gpu-freq = <355000000>;
-				qcom,bus-freq = <8>;
-				qcom,bus-min = <5>;
-				qcom,bus-max = <9>;
+			qcom,gpu-pwrlevels-1 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				qcom,speed-bin = <90>;
+
+				qcom,initial-pwrlevel = <3>;
+
+				/* SVS_L1 */
+				qcom,gpu-pwrlevel@0 {
+					reg = <0>;
+					qcom,gpu-freq = <430000000>;
+					qcom,bus-freq = <11>;
+					qcom,bus-min = <8>;
+					qcom,bus-max = <11>;
+				};
+
+				/* SVS */
+				qcom,gpu-pwrlevel@1 {
+					reg = <1>;
+					qcom,gpu-freq = <355000000>;
+					qcom,bus-freq = <8>;
+					qcom,bus-min = <5>;
+					qcom,bus-max = <9>;
+				};
+
+				/* LOW SVS */
+				qcom,gpu-pwrlevel@2 {
+					reg = <2>;
+					qcom,gpu-freq = <267000000>;
+					qcom,bus-freq = <6>;
+					qcom,bus-min = <4>;
+					qcom,bus-max = <8>;
+				};
+
+				/* MIN SVS */
+				qcom,gpu-pwrlevel@3 {
+					reg = <3>;
+					qcom,gpu-freq = <180000000>;
+					qcom,bus-freq = <4>;
+					qcom,bus-min = <3>;
+					qcom,bus-max = <4>;
+				};
+
+				/* XO */
+				qcom,gpu-pwrlevel@4 {
+					reg = <4>;
+					qcom,gpu-freq = <0>;
+					qcom,bus-freq = <0>;
+					qcom,bus-min = <0>;
+					qcom,bus-max = <0>;
+				};
+
 			};
 
-			/* LOW SVS */
-			qcom,gpu-pwrlevel@2 {
-				reg = <2>;
-				qcom,gpu-freq = <267000000>;
-				qcom,bus-freq = <6>;
-				qcom,bus-min = <4>;
-				qcom,bus-max = <8>;
+			qcom,gpu-pwrlevels-2 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				qcom,speed-bin = <146>;
+
+				qcom,initial-pwrlevel = <6>;
+
+				/* TURBO */
+				qcom,gpu-pwrlevel@0 {
+					reg = <0>;
+					qcom,gpu-freq = <700000000>;
+					qcom,bus-freq = <11>;
+					qcom,bus-min = <8>;
+					qcom,bus-max = <11>;
+				};
+
+				/* NOM_L1 */
+				qcom,gpu-pwrlevel@1 {
+					reg = <1>;
+					qcom,gpu-freq = <650000000>;
+					qcom,bus-freq = <11>;
+					qcom,bus-min = <8>;
+					qcom,bus-max = <11>;
+				};
+
+				/* NOM */
+				qcom,gpu-pwrlevel@2 {
+					reg = <2>;
+					qcom,gpu-freq = <565000000>;
+					qcom,bus-freq = <11>;
+					qcom,bus-min = <8>;
+					qcom,bus-max = <11>;
+				};
+
+				/* SVS_L1 */
+				qcom,gpu-pwrlevel@3 {
+					reg = <3>;
+					qcom,gpu-freq = <430000000>;
+					qcom,bus-freq = <11>;
+					qcom,bus-min = <8>;
+					qcom,bus-max = <11>;
+				};
+
+				/* SVS */
+				qcom,gpu-pwrlevel@4 {
+					reg = <4>;
+					qcom,gpu-freq = <355000000>;
+					qcom,bus-freq = <8>;
+					qcom,bus-min = <5>;
+					qcom,bus-max = <9>;
+				};
+
+				/* LOW SVS */
+				qcom,gpu-pwrlevel@5 {
+					reg = <5>;
+					qcom,gpu-freq = <267000000>;
+					qcom,bus-freq = <6>;
+					qcom,bus-min = <4>;
+					qcom,bus-max = <8>;
+				};
+
+				/* MIN SVS */
+				qcom,gpu-pwrlevel@6 {
+					reg = <6>;
+					qcom,gpu-freq = <180000000>;
+					qcom,bus-freq = <4>;
+					qcom,bus-min = <3>;
+					qcom,bus-max = <4>;
+				};
+
+				/* XO */
+				qcom,gpu-pwrlevel@7 {
+					reg = <7>;
+					qcom,gpu-freq = <0>;
+					qcom,bus-freq = <0>;
+					qcom,bus-min = <0>;
+					qcom,bus-max = <0>;
+				};
+
 			};
 
-			/* MIN SVS */
-			qcom,gpu-pwrlevel@3 {
-				reg = <3>;
-				qcom,gpu-freq = <180000000>;
-				qcom,bus-freq = <4>;
-				qcom,bus-min = <3>;
-				qcom,bus-max = <4>;
-			};
-
-			/* XO */
-			qcom,gpu-pwrlevel@4 {
-				reg = <4>;
-				qcom,gpu-freq = <0>;
-				qcom,bus-freq = <0>;
-				qcom,bus-min = <0>;
-				qcom,bus-max = <0>;
-			};
 		};
 
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index 57408d2..ef1fc08 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -13,6 +13,7 @@
 #include <dt-bindings/gpio/gpio.h>
 #include "sdm670-pmic-overlay.dtsi"
 #include "sdm670-sde-display.dtsi"
+#include "sdm670-camera-sensor-mtp.dtsi"
 #include "smb1355.dtsi"
 
 &ufsphy_mem {
@@ -41,6 +42,12 @@
 	status = "ok";
 };
 
+&pm660l_switch1 {
+	pinctrl-names = "led_enable", "led_disable";
+	pinctrl-0 = <&flash_led3_front_en>;
+	pinctrl-1 = <&flash_led3_front_dis>;
+};
+
 &qupv3_se9_2uart {
 	status = "disabled";
 };
@@ -321,9 +328,7 @@
 };
 
 &dsi_rm67195_amoled_fhd_cmd {
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
-	qcom,mdss-dsi-bl-min-level = <1>;
-	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
 	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>;
 	qcom,platform-reset-gpio = <&tlmm 75 0>;
 	qcom,platform-te-gpio = <&tlmm 10 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index 2ddab5b..d4953c1 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -1558,6 +1558,36 @@
 			};
 		};
 
+		flash_led3_front {
+			flash_led3_front_en: flash_led3_front_en {
+				mux {
+					pins = "gpio21";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio21";
+					drive_strength = <2>;
+					output-high;
+					bias-disable;
+				};
+			};
+
+			flash_led3_front_dis: flash_led3_front_dis {
+				mux {
+					pins = "gpio21";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio21";
+					drive_strength = <2>;
+					output-low;
+					bias-disable;
+				};
+			};
+		};
+
 		/* Pinctrl setting for CAMERA GPIO key */
 		key_cam_snapshot {
 			key_cam_snapshot_default: key_cam_snapshot_default {
@@ -1685,6 +1715,281 @@
 				drive-strength = <2>;
 			};
 		};
+
+		cci0_active: cci0_active {
+			mux {
+				/* CLK, DATA */
+				pins = "gpio17","gpio18";
+				function = "cci_i2c";
+			};
+
+			config {
+				pins = "gpio17","gpio18";
+				bias-pull-up; /* PULL UP*/
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cci0_suspend: cci0_suspend {
+			mux {
+				/* CLK, DATA */
+				pins = "gpio17","gpio18";
+				function = "cci_i2c";
+			};
+
+			config {
+				pins = "gpio17","gpio18";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cci1_active: cci1_active {
+			mux {
+				/* CLK, DATA */
+				pins = "gpio19","gpio20";
+				function = "cci_i2c";
+			};
+
+			config {
+				pins = "gpio19","gpio20";
+				bias-pull-up; /* PULL UP*/
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cci1_suspend: cci1_suspend {
+			mux {
+				/* CLK, DATA */
+				pins = "gpio19","gpio20";
+				function = "cci_i2c";
+			};
+
+			config {
+				pins = "gpio19","gpio20";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_rear_active: cam_sensor_rear_active {
+			/* RESET */
+			mux {
+				pins = "gpio30";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio30";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_rear_suspend: cam_sensor_rear_suspend {
+			/* RESET */
+			mux {
+				pins = "gpio30";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio30";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+				output-low;
+			};
+		};
+
+		cam_sensor_rear_vana: cam_sensor_rear_vana {
+			/*  AVDD LDO */
+			mux {
+				pins = "gpio8";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio8";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_rear_vio: cam_sensor_rear_vio {
+			/* DOVDD LDO */
+			mux {
+				pins = "gpio29";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio29";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_mclk0_active: cam_sensor_mclk0_active {
+			/* MCLK0 */
+			mux {
+				pins = "gpio13";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio13";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_mclk0_suspend: cam_sensor_mclk0_suspend {
+			/* MCLK0 */
+			mux {
+				pins = "gpio13";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio13";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_front_active: cam_sensor_front_active {
+			/* RESET  */
+			mux {
+				pins = "gpio9";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio9";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_front_suspend: cam_sensor_front_suspend {
+			/* RESET */
+			mux {
+				pins = "gpio9";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio9";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+				output-low;
+			};
+		};
+
+		cam_sensor_rear2_active: cam_sensor_rear2_active {
+			/* RESET */
+			mux {
+				pins = "gpio28";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio28";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_rear2_suspend: cam_sensor_rear2_suspend {
+			/* RESET */
+			mux {
+				pins = "gpio28";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio28";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+				output-low;
+			};
+		};
+
+		cam_sensor_mclk1_active: cam_sensor_mclk1_active {
+			/* MCLK1 */
+			mux {
+				pins = "gpio14";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio14";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_mclk1_suspend: cam_sensor_mclk1_suspend {
+			/* MCLK1 */
+			mux {
+				pins = "gpio14";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio14";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_mclk2_active: cam_sensor_mclk2_active {
+			/* MCLK2 */
+			mux {
+				pins = "gpio15";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio15";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_mclk2_suspend: cam_sensor_mclk2_suspend {
+			/* MCLK2 */
+			mux {
+				pins = "gpio15";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio15";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+	};
+};
+
+&pm660l_gpios {
+	camera_rear_dvdd_en {
+		camera_rear_dvdd_en_default: camera_rear_dvdd_en_default {
+			pins = "gpio4";
+			function = "normal";
+			power-source = <0>;
+			output-low;
+		};
+	};
+
+	camera_dvdd_en {
+		camera_dvdd_en_default: camera_dvdd_en_default {
+			pins = "gpio3";
+			function = "normal";
+			power-source = <0>;
+			output-low;
+		};
 	};
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts
index 5b67765..0ea4b1f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts
@@ -33,3 +33,10 @@
 		       <0x0001001b 0x0202001a 0x0 0x0>;
 };
 
+&dsi_dual_nt35597_truly_video_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_rm67195_amoled_fhd_cmd_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts
index 26f5e78..1cf52f5 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts
@@ -26,3 +26,11 @@
 		       <0x0001001b 0x0002001a 0x0 0x0>,
 		       <0x0001001b 0x0202001a 0x0 0x0>;
 };
+
+&dsi_dual_nt35597_truly_video_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_rm67195_amoled_fhd_cmd_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
index c39978e..220487a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
@@ -35,6 +35,7 @@
 		qcom,thermal-mitigation
 				= <3000000 2500000 2000000 1500000
 					1000000 500000>;
+		qcom,auto-recharge-soc;
 
 		qcom,chgr@1000 {
 			reg = <0x1000 0x100>;
@@ -178,6 +179,9 @@
 		qcom,fg-esr-timer-asleep = <256 256>;
 		qcom,fg-esr-timer-charging = <0 96>;
 		qcom,cycle-counter-en;
+		qcom,hold-soc-while-full;
+		qcom,fg-auto-recharge-soc;
+		qcom,fg-recharge-soc-thr = <98>;
 		status = "okay";
 
 		qcom,fg-batt-soc@4000 {
@@ -376,5 +380,5 @@
 };
 
 &usb0 {
-	extcon = <&pm660_pdphy>, <&pm660_pdphy>, <0> /* <&eud> */;
+	extcon = <&pm660_pdphy>, <&pm660_pdphy>, <&eud>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index 5fc4065..93e4c51 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -11,9 +11,11 @@
  */
 
 #include <dt-bindings/gpio/gpio.h>
+#include "sdm670-camera-sensor-qrd.dtsi"
 #include "sdm670-pmic-overlay.dtsi"
 #include "sdm670-audio-overlay.dtsi"
 #include "smb1355.dtsi"
+#include "sdm670-sde-display.dtsi"
 
 &qupv3_se9_2uart {
 	status = "disabled";
@@ -71,6 +73,11 @@
 	qcom,fg-bmd-en-delay-ms = <300>;
 };
 
+&pm660_charger {
+	qcom,battery-data = <&qrd_batterydata>;
+	qcom,sw-jeita-enable;
+};
+
 &tlmm {
 	smb_int_default: smb_int_default {
 		mux {
@@ -165,3 +172,116 @@
 	qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_213_en>;
 	qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
 };
+
+&sdhc_1 {
+	vdd-supply = <&pm660l_l4>;
+	qcom,vdd-voltage-level = <2960000 2960000>;
+	qcom,vdd-current-level = <200 570000>;
+
+	vdd-io-supply = <&pm660_l8>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 325000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on  &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
+
+	status = "ok";
+};
+
+&sdhc_2 {
+	vdd-supply = <&pm660l_l5>;
+	qcom,vdd-voltage-level = <2960000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm660l_l2>;
+	qcom,vdd-io-voltage-level = <1800000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+	cd-gpios = <&tlmm 96 0>;
+
+	status = "ok";
+};
+
+&tlmm {
+	pmx_ts_rst_active {
+		ts_rst_active: ts_rst_active {
+			mux {
+				pins = "gpio99";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio99";
+				drive-strength = <16>;
+				bias-pull-up;
+			};
+		};
+	};
+
+	pmx_ts_rst_suspend {
+		ts_rst_suspend: ts_rst_suspend {
+			mux {
+				pins = "gpio99";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio99";
+				drive-strength = <2>;
+				bias-pull-down;
+			};
+		};
+	};
+};
+
+&soc {
+	hbtp {
+		compatible = "qcom,hbtp-input";
+		pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+		pinctrl-0 = <&ts_rst_active>;
+		pinctrl-1 = <&ts_rst_suspend>;
+		vcc_ana-supply = <&pm660l_l3>;
+		vcc_dig-supply = <&pm660_l13>;
+		qcom,afe-load = <20000>;
+		qcom,afe-vtg-min = <3000000>;
+		qcom,afe-vtg-max = <3000000>;
+		qcom,dig-load = <40000>;
+		qcom,dig-vtg-min = <1800000>;
+		qcom,dig-vtg-max = <1800000>;
+		qcom,fb-resume-delay-us = <1000>;
+		qcom,afe-force-power-on;
+		qcom,afe-power-on-delay-us = <6>;
+		qcom,afe-power-off-delay-us = <6>;
+	};
+};
+
+&dsi_dual_nt36850_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_dual_nt36850_truly_cmd_display {
+	 qcom,dsi-display-active;
+};
+
+&pm660l_wled {
+	status = "okay";
+	qcom,led-strings-list = [01 02];
+};
+
+&mdss_mdp {
+	#cooling-cells = <2>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
index 657363f..c388f4a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
@@ -78,6 +78,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		dmas = <&gpi_dma0 0 0 3 64 0>,
+			<&gpi_dma0 1 0 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se0_i2c_active>;
 		pinctrl-1 = <&qupv3_se0_i2c_sleep>;
@@ -95,6 +98,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S1_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		dmas = <&gpi_dma0 0 1 3 64 0>,
+			<&gpi_dma0 1 1 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se1_i2c_active>;
 		pinctrl-1 = <&qupv3_se1_i2c_sleep>;
@@ -112,6 +118,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S2_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		dmas = <&gpi_dma0 0 2 3 64 0>,
+			<&gpi_dma0 1 2 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se2_i2c_active>;
 		pinctrl-1 = <&qupv3_se2_i2c_sleep>;
@@ -129,6 +138,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		dmas = <&gpi_dma0 0 3 3 64 0>,
+			<&gpi_dma0 1 3 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se3_i2c_active>;
 		pinctrl-1 = <&qupv3_se3_i2c_sleep>;
@@ -146,6 +158,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		dmas = <&gpi_dma0 0 4 3 64 0>,
+			<&gpi_dma0 1 4 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se4_i2c_active>;
 		pinctrl-1 = <&qupv3_se4_i2c_sleep>;
@@ -163,6 +178,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		dmas = <&gpi_dma0 0 5 3 64 0>,
+			<&gpi_dma0 1 5 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se5_i2c_active>;
 		pinctrl-1 = <&qupv3_se5_i2c_sleep>;
@@ -180,6 +198,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		dmas = <&gpi_dma0 0 6 3 64 0>,
+			<&gpi_dma0 1 6 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se6_i2c_active>;
 		pinctrl-1 = <&qupv3_se6_i2c_sleep>;
@@ -197,6 +218,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		dmas = <&gpi_dma0 0 7 3 64 0>,
+			<&gpi_dma0 1 7 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se7_i2c_active>;
 		pinctrl-1 = <&qupv3_se7_i2c_sleep>;
@@ -435,6 +459,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S0_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		dmas = <&gpi_dma1 0 0 3 64 0>,
+			<&gpi_dma1 1 0 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se8_i2c_active>;
 		pinctrl-1 = <&qupv3_se8_i2c_sleep>;
@@ -452,6 +479,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		dmas = <&gpi_dma1 0 1 3 64 0>,
+			<&gpi_dma1 1 1 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se9_i2c_active>;
 		pinctrl-1 = <&qupv3_se9_i2c_sleep>;
@@ -469,6 +499,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		dmas = <&gpi_dma1 0 2 3 64 0>,
+			<&gpi_dma1 1 2 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se10_i2c_active>;
 		pinctrl-1 = <&qupv3_se10_i2c_sleep>;
@@ -486,6 +519,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S3_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		dmas = <&gpi_dma1 0 3 3 64 0>,
+			<&gpi_dma1 1 3 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se11_i2c_active>;
 		pinctrl-1 = <&qupv3_se11_i2c_sleep>;
@@ -503,6 +539,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		dmas = <&gpi_dma1 0 4 3 64 0>,
+			<&gpi_dma1 1 4 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se12_i2c_active>;
 		pinctrl-1 = <&qupv3_se12_i2c_sleep>;
@@ -520,6 +559,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		dmas = <&gpi_dma1 0 5 3 64 0>,
+			<&gpi_dma1 1 5 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se13_i2c_active>;
 		pinctrl-1 = <&qupv3_se13_i2c_sleep>;
@@ -537,6 +579,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S6_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		dmas = <&gpi_dma1 0 6 3 64 0>,
+			<&gpi_dma1 1 6 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se14_i2c_active>;
 		pinctrl-1 = <&qupv3_se14_i2c_sleep>;
@@ -554,6 +599,9 @@
 		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S7_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
 			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		dmas = <&gpi_dma1 0 7 3 64 0>,
+			<&gpi_dma1 1 7 3 64 0>;
+		dma-names = "tx", "rx";
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se15_i2c_active>;
 		pinctrl-1 = <&qupv3_se15_i2c_sleep>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index 53213f8..6404bcf 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -25,6 +25,7 @@
 #include "dsi-panel-nt35695b-truly-fhd-video.dtsi"
 #include "dsi-panel-nt35695b-truly-fhd-cmd.dtsi"
 #include "dsi-panel-rm67195-amoled-fhd-cmd.dtsi"
+#include "dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi"
 #include <dt-bindings/clock/mdss-10nm-pll-clk.h>
 
 &soc {
@@ -108,9 +109,9 @@
 
 		qcom,panel-supply-entry@0 {
 			reg = <0>;
-			qcom,supply-name = "wqhd-vddio";
+			qcom,supply-name = "vddio";
 			qcom,supply-min-voltage = <1800000>;
-			qcom,supply-max-voltage = <1950000>;
+			qcom,supply-max-voltage = <1800000>;
 			qcom,supply-enable-load = <32000>;
 			qcom,supply-disable-load = <80>;
 		};
@@ -420,8 +421,10 @@
 
 		qcom,dsi-panel = <&dsi_rm67195_amoled_fhd_cmd>;
 		vddio-supply = <&pm660_l11>;
-		lab-supply = <&lcdb_ldo_vreg>;
-		ibb-supply = <&lcdb_ncp_vreg>;
+		vdda-3p3-supply = <&pm660l_l6>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+		oledb-supply = <&pm660a_oledb>;
 	};
 
 	dsi_nt35695b_truly_fhd_video_display: qcom,dsi-display@13 {
@@ -469,6 +472,29 @@
 		ibb-supply = <&lcdb_ncp_vreg>;
 	};
 
+	dsi_dual_nt36850_truly_cmd_display: qcom,dsi-display@15 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_nt36850_truly_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 75 0>;
+
+		qcom,dsi-panel = <&dsi_dual_nt36850_truly_cmd>;
+		vddio-supply = <&pm660_l11>;
+		lab-supply = <&lcdb_ldo_vreg>;
+		ibb-supply = <&lcdb_ncp_vreg>;
+	};
+
 	sde_wb: qcom,wb-display@0 {
 		compatible = "qcom,wb-display";
 		cell-index = <0>;
@@ -503,6 +529,11 @@
 &dsi_dual_nt35597_truly_video {
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
+	qcom,mdss-dsi-min-refresh-rate = <53>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
@@ -517,6 +548,9 @@
 &dsi_dual_nt35597_truly_cmd {
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
+	qcom,ulps-enabled;
+	qcom,partial-update-enabled = "single_roi";
+	qcom,panel-roi-alignment = <720 128 720 128 1440 128>;
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
@@ -531,6 +565,7 @@
 &dsi_nt35597_truly_dsc_cmd {
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
+	qcom,ulps-enabled;
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
@@ -546,6 +581,11 @@
 &dsi_nt35597_truly_dsc_video {
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
+	qcom,mdss-dsi-min-refresh-rate = <53>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
@@ -683,6 +723,9 @@
 &dsi_dual_nt35597_cmd {
 	qcom,mdss-dsi-t-clk-post = <0x0d>;
 	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,ulps-enabled;
+	qcom,partial-update-enabled = "single_roi";
+	qcom,panel-roi-alignment = <720 128 720 128 1440 128>;
 	qcom,mdss-dsi-display-timings {
 		 timing@0 {
 			qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07
@@ -723,6 +766,7 @@
 &dsi_nt35695b_truly_fhd_cmd {
 	qcom,mdss-dsi-t-clk-post = <0x07>;
 	qcom,mdss-dsi-t-clk-pre = <0x1c>;
+	qcom,ulps-enabled;
 	qcom,mdss-dsi-display-timings {
 		timing@0 {
 			qcom,mdss-dsi-panel-phy-timings = [00 1c 05 06 0b 0c
@@ -732,3 +776,17 @@
 		};
 	};
 };
+
+&dsi_dual_nt36850_truly_cmd {
+	qcom,mdss-dsi-t-clk-post = <0x0E>;
+	qcom,mdss-dsi-t-clk-pre = <0x30>;
+	qcom,mdss-dsi-display-timings {
+		timing@0{
+			qcom,mdss-dsi-panel-phy-timings = [00 1f 08 08 24 23 08
+				08 05 03 04 00];
+			qcom,display-topology = <2 0 2>,
+				<1 0 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
index 8a811b7..2b80c22 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
@@ -46,6 +46,8 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
+		#power-domain-cells = <0>;
+
 		/* hw blocks */
 		qcom,sde-off = <0x1000>;
 		qcom,sde-len = <0x45C>;
@@ -67,6 +69,11 @@
 		qcom,sde-dspp-off = <0x55000 0x57000>;
 		qcom,sde-dspp-size = <0x17e0>;
 
+		qcom,sde-dest-scaler-top-off = <0x00061000>;
+		qcom,sde-dest-scaler-top-size = <0xc>;
+		qcom,sde-dest-scaler-off = <0x800 0x1000>;
+		qcom,sde-dest-scaler-size = <0x800>;
+
 		qcom,sde-wb-off = <0x66000>;
 		qcom,sde-wb-size = <0x2c8>;
 		qcom,sde-wb-xin-id = <6>;
@@ -124,11 +131,15 @@
 		qcom,sde-mixer-blendstages = <0xb>;
 		qcom,sde-highest-bank-bit = <0x1>;
 		qcom,sde-ubwc-version = <0x200>;
+		qcom,sde-smart-panel-align-mode = <0xc>;
 		qcom,sde-panic-per-pipe;
 		qcom,sde-has-cdp;
 		qcom,sde-has-src-split;
 		qcom,sde-has-dim-layer;
 		qcom,sde-has-idle-pc;
+		qcom,sde-has-dest-scaler;
+		qcom,sde-max-dest-scaler-input-linewidth = <2048>;
+		qcom,sde-max-dest-scaler-output-linewidth = <2560>;
 		qcom,sde-max-bw-low-kbps = <9600000>;
 		qcom,sde-max-bw-high-kbps = <9600000>;
 		qcom,sde-dram-channels = <2>;
@@ -147,7 +158,18 @@
 
 		qcom,sde-danger-lut = <0x0000000f 0x0000ffff 0x00000000
 			0x00000000>;
-		qcom,sde-safe-lut = <0xfffc 0xff00 0xffff 0xffff>;
+		qcom,sde-safe-lut-linear =
+			<4 0xfff8>,
+			<0 0xfff0>;
+		qcom,sde-safe-lut-macrotile =
+			<10 0xfe00>,
+			<11 0xfc00>,
+			<12 0xf800>,
+			<0 0xf000>;
+		qcom,sde-safe-lut-nrt =
+			<0 0xffff>;
+		qcom,sde-safe-lut-cwb =
+			<0 0xffff>;
 		qcom,sde-qos-lut-linear =
 			<4 0x00000000 0x00000357>,
 			<5 0x00000000 0x00003357>,
@@ -358,6 +380,8 @@
 		interrupt-parent = <&mdss_mdp>;
 		interrupts = <2 0>;
 
+		 power-domains = <&mdss_mdp>;
+
 		/* Offline rotator QoS setting */
 		qcom,mdss-rot-vbif-qos-setting = <3 3 3 3 3 3 3 3>;
 		qcom,mdss-rot-vbif-memtype = <3 3>;
@@ -423,6 +447,7 @@
 					"pixel_clk", "pixel_clk_rcg",
 					"esc_clk";
 
+		qcom,null-insertion-enabled;
 		qcom,ctrl-supply-entries {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -456,6 +481,7 @@
 		<&clock_dispcc DISP_CC_MDSS_ESC1_CLK>;
 		clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
 				"pixel_clk", "pixel_clk_rcg", "esc_clk";
+		qcom,null-insertion-enabled;
 		qcom,ctrl-supply-entries {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -544,7 +570,10 @@
 		vdda-1p2-supply = <&pm660_l1>;
 		vdda-0p9-supply = <&pm660l_l1>;
 
-		reg =	<0xae90000 0xa84>,
+		reg =	<0xae90000 0x0dc>,
+			<0xae90200 0x0c0>,
+			<0xae90400 0x508>,
+			<0xae90a00 0x094>,
 			<0x88eaa00 0x200>,
 			<0x88ea200 0x200>,
 			<0x88ea600 0x200>,
@@ -553,7 +582,9 @@
 			<0x88ea030 0x10>,
 			<0x88e8000 0x20>,
 			<0x0aee1000 0x034>;
-		reg-names = "dp_ctrl", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
+		/* dp_ctrl: dp_ahb, dp_aux, dp_link, dp_p0 */
+		reg-names = "dp_ahb", "dp_aux", "dp_link",
+			"dp_p0", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
 			"dp_mmss_cc", "qfprom_physical", "dp_pll",
 			"usb3_dp_com", "hdcp_physical";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 68507a5..409f97e 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -368,7 +368,7 @@
 				 979200    382
 				1132800    408
 				1363200    448
-				1563000    586
+				1536000    586
 				1747200    641
 				1843200    659
 				1996800    696
@@ -409,7 +409,7 @@
 				 979200    38
 				1132800    40
 				1363200    44
-				1563000    58
+				1536000    58
 				1747200    64
 				1843200    65
 				1996800    69
@@ -517,7 +517,7 @@
 		qseecom_mem: qseecom_region {
 			compatible = "shared-dma-pool";
 			alloc-ranges = <0 0x00000000 0 0xffffffff>;
-			reusable;
+			no-map;
 			alignment = <0 0x400000>;
 			size = <0 0x1400000>;
 		};
@@ -538,6 +538,11 @@
 			size = <0 0x5c00000>;
 		};
 
+		cont_splash_memory: cont_splash_region@9d400000 {
+			reg = <0x0 0x9d400000 0x0 0x02400000>;
+			label = "cont_splash_region";
+		};
+
 		dump_mem: mem_dump_region {
 			compatible = "shared-dma-pool";
 			reusable;
@@ -715,9 +720,9 @@
 				 <&clock_gcc GCC_CE1_AXI_CLK>;
 		qcom,ce-opp-freq = <171430000>;
 		qcom,request-bw-before-clk;
-		qcom,smmu-s1-bypass;
-		iommus = <&apps_smmu 0x706 0x3>,
-			 <&apps_smmu 0x716 0x3>;
+		qcom,smmu-s1-enable;
+		iommus = <&apps_smmu 0x706 0x1>,
+			 <&apps_smmu 0x716 0x1>;
 	};
 
 	qcom_crypto: qcrypto@1de0000 {
@@ -752,9 +757,9 @@
 		qcom,use-sw-aead-algo;
 		qcom,use-sw-ahash-algo;
 		qcom,use-sw-hmac-algo;
-		qcom,smmu-s1-bypass;
-		iommus = <&apps_smmu 0x704 0x3>,
-			 <&apps_smmu 0x714 0x3>;
+		qcom,smmu-s1-enable;
+		iommus = <&apps_smmu 0x704 0x1>,
+			 <&apps_smmu 0x714 0x1>;
 	};
 
 	qcom,qbt1000 {
@@ -775,6 +780,7 @@
 		qcom,disk-encrypt-pipe-pair = <2>;
 		qcom,support-fde;
 		qcom,no-clock-support;
+		qcom,fde-key-size;
 		qcom,appsbl-qseecom-support;
 		qcom,msm-bus,name = "qseecom-noc";
 		qcom,msm-bus,num-cases = <4>;
@@ -1083,6 +1089,12 @@
 		qcom,rtb-size = <0x100000>;
 	};
 
+	qcom,mpm2-sleep-counter@c221000 {
+		compatible = "qcom,mpm2-sleep-counter";
+		reg = <0x0c221000 0x1000>;
+		clock-frequency = <32768>;
+	};
+
 	qcom,msm-imem@146bf000 {
 		compatible = "qcom,msm-imem";
 		reg = <0x146bf000 0x1000>;
@@ -1122,7 +1134,7 @@
 	};
 
 	gpi_dma0: qcom,gpi-dma@0x800000 {
-		#dma-cells = <6>;
+		#dma-cells = <5>;
 		compatible = "qcom,gpi-dma";
 		reg = <0x800000 0x60000>;
 		reg-names = "gpi-top";
@@ -1134,11 +1146,13 @@
 		qcom,gpii-mask = <0xfa>;
 		qcom,ev-factor = <2>;
 		iommus = <&apps_smmu 0x0016 0x0>;
+		qcom,smmu-cfg = <0x1>;
+		qcom,iova-range = <0x0 0x100000 0x0 0x100000>;
 		status = "ok";
 	};
 
 	gpi_dma1: qcom,gpi-dma@0xa00000 {
-		#dma-cells = <6>;
+		#dma-cells = <5>;
 		compatible = "qcom,gpi-dma";
 		reg = <0xa00000 0x60000>;
 		reg-names = "gpi-top";
@@ -1149,6 +1163,8 @@
 		qcom,max-num-gpii = <13>;
 		qcom,gpii-mask = <0xfa>;
 		qcom,ev-factor = <2>;
+		qcom,smmu-cfg = <0x1>;
+		qcom,iova-range = <0x0 0x100000 0x0 0x100000>;
 		iommus = <&apps_smmu 0x06d6 0x0>;
 		status = "ok";
 	};
@@ -1584,7 +1600,8 @@
 		interrupts = <GIC_SPI 492 IRQ_TYPE_LEVEL_HIGH>;
 		reg = <0x88e0000 0x2000>;
 		reg-names = "eud_base";
-		status = "disabled";
+		clocks = <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+		clock-names = "cfg_ahb_clk";
 	};
 
 	qcom,llcc@1100000 {
@@ -1825,6 +1842,7 @@
 		reg = <0x0 0x200000>;
 		reg-names = "rmtfs";
 		qcom,client-id = <0x00000001>;
+		qcom,guard-memory;
 	};
 
 	qcom,msm_gsi {
@@ -2126,6 +2144,28 @@
 		status = "ok";
 	};
 
+	sdcc1_ice: sdcc1ice@7c8000 {
+		compatible = "qcom,ice";
+		reg = <0x7c8000 0x8000>;
+		qcom,enable-ice-clk;
+		clock-names = "ice_core_clk_src", "ice_core_clk",
+				"bus_clk", "iface_clk";
+		clocks = <&clock_gcc GCC_SDCC1_ICE_CORE_CLK_SRC>,
+			<&clock_gcc GCC_SDCC1_ICE_CORE_CLK>,
+			<&clock_gcc GCC_SDCC1_APPS_CLK>,
+			<&clock_gcc GCC_SDCC1_AHB_CLK>;
+		qcom,op-freq-hz = <300000000>, <0>, <0>, <0>;
+		qcom,msm-bus,name = "sdcc_ice_noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<150 512 0 0>,    /* No vote */
+			<150 512 1000 0>; /* Max. bandwidth */
+		qcom,bus-vector-names = "MIN",
+					"MAX";
+		qcom,instance-type = "sdcc";
+	};
+
 	sdhc_1: sdhci@7c4000 {
 		compatible = "qcom,sdhci-msm-v5";
 		reg = <0x7C4000 0x1000>, <0x7C5000 0x1000>;
@@ -2136,6 +2176,7 @@
 
 		qcom,bus-width = <8>;
 		qcom,large-address-bus;
+		sdhc-msm-crypto = <&sdcc1_ice>;
 
 		qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
 						192000000 384000000>;
@@ -2508,6 +2549,7 @@
 		qcom,target-dev = <&l3_cpu0>;
 		qcom,cachemiss-ev = <0x17>;
 		qcom,core-dev-table =
+			<  576000  300000000 >,
 			<  748800  556800000 >,
 			<  998400  806400000 >,
 			< 1209660  940800000 >,
@@ -2565,6 +2607,67 @@
 		};
 	};
 
+	mincpu0bw: qcom,mincpu0bw {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			< MHZ_TO_MBPS( 100, 4) >, /* 381 MB/s */
+			< MHZ_TO_MBPS( 200, 4) >, /* 762 MB/s */
+			< MHZ_TO_MBPS( 300, 4) >, /* 1144 MB/s */
+			< MHZ_TO_MBPS( 451, 4) >, /* 1720 MB/s */
+			< MHZ_TO_MBPS( 547, 4) >, /* 2086 MB/s */
+			< MHZ_TO_MBPS( 681, 4) >, /* 2597 MB/s */
+			< MHZ_TO_MBPS( 768, 4) >, /* 2929 MB/s */
+			< MHZ_TO_MBPS(1017, 4) >, /* 3879 MB/s */
+			< MHZ_TO_MBPS(1353, 4) >, /* 5161 MB/s */
+			< MHZ_TO_MBPS(1555, 4) >, /* 5931 MB/s */
+			< MHZ_TO_MBPS(1804, 4) >; /* 6881 MB/s */
+	};
+
+	mincpu6bw: qcom,mincpu6bw {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			< MHZ_TO_MBPS( 100, 4) >, /* 381 MB/s */
+			< MHZ_TO_MBPS( 200, 4) >, /* 762 MB/s */
+			< MHZ_TO_MBPS( 300, 4) >, /* 1144 MB/s */
+			< MHZ_TO_MBPS( 451, 4) >, /* 1720 MB/s */
+			< MHZ_TO_MBPS( 547, 4) >, /* 2086 MB/s */
+			< MHZ_TO_MBPS( 681, 4) >, /* 2597 MB/s */
+			< MHZ_TO_MBPS( 768, 4) >, /* 2929 MB/s */
+			< MHZ_TO_MBPS(1017, 4) >, /* 3879 MB/s */
+			< MHZ_TO_MBPS(1353, 4) >, /* 5161 MB/s */
+			< MHZ_TO_MBPS(1555, 4) >, /* 5931 MB/s */
+			< MHZ_TO_MBPS(1804, 4) >; /* 6881 MB/s */
+	};
+
+	devfreq_compute0: qcom,devfreq-compute0 {
+		compatible = "qcom,arm-cpu-mon";
+		qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5>;
+		qcom,target-dev = <&mincpu0bw>;
+		qcom,core-dev-table =
+				<  748800 MHZ_TO_MBPS( 300, 4) >,
+				< 1209660 MHZ_TO_MBPS( 451, 4) >,
+				< 1612800 MHZ_TO_MBPS( 547, 4) >,
+				< 1708000 MHZ_TO_MBPS( 768, 4) >;
+	};
+
+	devfreq_compute6: qcom,devfreq-compute6 {
+		compatible = "qcom,arm-cpu-mon";
+		qcom,cpulist = <&CPU6 &CPU7>;
+		qcom,target-dev = <&mincpu6bw>;
+		qcom,core-dev-table =
+				< 1132800 MHZ_TO_MBPS( 300, 4) >,
+				< 1363200 MHZ_TO_MBPS( 547, 4) >,
+				< 1747200 MHZ_TO_MBPS( 768, 4) >,
+				< 1996800 MHZ_TO_MBPS(1017, 4) >,
+				< 2457600 MHZ_TO_MBPS(1804, 4) >;
+	};
+
 	cpu_pmu: cpu-pmu {
 		compatible = "arm,armv8-pmuv3";
 		qcom,irq-is-percpu;
@@ -2730,6 +2833,7 @@
 #include "sdm670-audio.dtsi"
 #include "sdm670-usb.dtsi"
 #include "sdm670-gpu.dtsi"
+#include "sdm670-camera.dtsi"
 #include "sdm670-thermal.dtsi"
 #include "sdm670-bus.dtsi"
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts
index c6622d4..f9c6f65 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts
@@ -62,3 +62,7 @@
 &dsi_sharp_4k_dsc_video_display {
 	qcom,dsi-display-active;
 };
+
+&mdss_mdp {
+	connectors = <&sde_rscc &sde_wb &sde_dp>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index 7ca2645..d8a6dc3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -22,10 +22,20 @@
 		status = "ok";
 	};
 
-	led_flash_front: qcom,camera-flash@1 {
+	led_flash_rear_aux: qcom,camera-flash@1 {
 		cell-index = <1>;
 		reg = <0x01 0x00>;
 		compatible = "qcom,camera-flash";
+		flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+		torch-source = <&pmi8998_torch0 &pmi8998_torch1 >;
+		switch-source = <&pmi8998_switch0>;
+		status = "ok";
+	};
+
+	led_flash_front: qcom,camera-flash@2 {
+		cell-index = <2>;
+		reg = <0x02 0x00>;
+		compatible = "qcom,camera-flash";
 		flash-source = <&pmi8998_flash2>;
 		torch-source = <&pmi8998_torch2>;
 		switch-source = <&pmi8998_switch1>;
@@ -74,6 +84,11 @@
 };
 
 &cam_cci {
+	qcom,cam-res-mgr {
+		compatible = "qcom,cam-res-mgr";
+		status = "ok";
+	};
+
 	actuator_rear: qcom,actuator@0 {
 		cell-index = <0>;
 		reg = <0x0>;
@@ -87,7 +102,7 @@
 		rgltr-load-current = <0>;
 	};
 
-	actuator_front: qcom,actuator@1 {
+	actuator_rear_aux: qcom,actuator@1 {
 		cell-index = <1>;
 		reg = <0x1>;
 		compatible = "qcom,actuator";
@@ -100,6 +115,19 @@
 		rgltr-load-current = <0>;
 	};
 
+	actuator_front: qcom,actuator@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
 	ois_rear: qcom,ois@0 {
 		cell-index = <0>;
 		reg = <0x0>;
@@ -289,9 +317,11 @@
 		compatible = "qcom,cam-sensor";
 		reg = <0x1>;
 		csiphy-sd-index = <1>;
-		sensor-position-roll = <90>;
+		sensor-position-roll = <270>;
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <180>;
+		actuator-src = <&actuator_rear_aux>;
+		led-flash-src = <&led_flash_rear_aux>;
 		eeprom-src = <&eeprom_rear_aux>;
 		cam_vdig-supply = <&camera_ldo>;
 		cam_vio-supply = <&pm8998_lvs1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index aa55698..952ba29 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -22,10 +22,20 @@
 		status = "ok";
 	};
 
-	led_flash_front: qcom,camera-flash@1 {
+	led_flash_rear_aux: qcom,camera-flash@1 {
 		cell-index = <1>;
 		reg = <0x01 0x00>;
 		compatible = "qcom,camera-flash";
+		flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+		torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+		switch-source = <&pmi8998_switch0>;
+		status = "ok";
+	};
+
+	led_flash_front: qcom,camera-flash@2 {
+		cell-index = <2>;
+		reg = <0x02 0x00>;
+		compatible = "qcom,camera-flash";
 		flash-source = <&pmi8998_flash2>;
 		torch-source = <&pmi8998_torch2>;
 		switch-source = <&pmi8998_switch1>;
@@ -74,6 +84,11 @@
 };
 
 &cam_cci {
+	qcom,cam-res-mgr {
+		compatible = "qcom,cam-res-mgr";
+		status = "ok";
+	};
+
 	actuator_rear: qcom,actuator@0 {
 		cell-index = <0>;
 		reg = <0x0>;
@@ -87,7 +102,7 @@
 		rgltr-load-current = <0>;
 	};
 
-	actuator_front: qcom,actuator@1 {
+	actuator_rear_aux: qcom,actuator@1 {
 		cell-index = <1>;
 		reg = <0x1>;
 		compatible = "qcom,actuator";
@@ -100,6 +115,19 @@
 		rgltr-load-current = <0>;
 	};
 
+	actuator_front: qcom,actuator@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
 	ois_rear: qcom,ois@0 {
 		cell-index = <0>;
 		reg = <0x0>;
@@ -289,9 +317,11 @@
 		compatible = "qcom,cam-sensor";
 		reg = <0x1>;
 		csiphy-sd-index = <1>;
-		sensor-position-roll = <90>;
+		sensor-position-roll = <270>;
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <180>;
+		actuator-src = <&actuator_rear_aux>;
+		led-flash-src = <&led_flash_rear_aux>;
 		eeprom-src = <&eeprom_rear_aux>;
 		cam_vdig-supply = <&camera_ldo>;
 		cam_vio-supply = <&pm8998_lvs1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
new file mode 100644
index 0000000..8ad5f3c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	led_flash_rear: qcom,camera-flash@0 {
+		cell-index = <0>;
+		reg = <0x00 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+		torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+		switch-source = <&pmi8998_switch0>;
+		status = "ok";
+	};
+
+	led_flash_front: qcom,camera-flash@1 {
+		cell-index = <1>;
+		reg = <0x01 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pmi8998_flash2>;
+		torch-source = <&pmi8998_torch2>;
+		switch-source = <&pmi8998_switch1>;
+		status = "ok";
+	};
+
+	actuator_regulator: gpio-regulator@0 {
+		compatible = "regulator-fixed";
+		reg = <0x00 0x00>;
+		regulator-name = "actuator_regulator";
+		regulator-min-microvolt = <2800000>;
+		regulator-max-microvolt = <2800000>;
+		regulator-enable-ramp-delay = <100>;
+		enable-active-high;
+		gpio = <&tlmm 27 0>;
+		vin-supply = <&pmi8998_bob>;
+	};
+
+	camera_rear_ldo: gpio-regulator@1 {
+		compatible = "regulator-fixed";
+		reg = <0x01 0x00>;
+		regulator-name = "camera_rear_ldo";
+		regulator-min-microvolt = <1050000>;
+		regulator-max-microvolt = <1050000>;
+		regulator-enable-ramp-delay = <135>;
+		enable-active-high;
+		gpio = <&pm8998_gpios 12 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&camera_rear_dvdd_en_default>;
+		vin-supply = <&pm8998_s3>;
+	};
+
+	camera_ldo: gpio-regulator@2 {
+		compatible = "regulator-fixed";
+		reg = <0x02 0x00>;
+		regulator-name = "camera_ldo";
+		regulator-min-microvolt = <1050000>;
+		regulator-max-microvolt = <1050000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&pm8998_gpios 9 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&camera_dvdd_en_default>;
+		vin-supply = <&pm8998_s3>;
+	};
+};
+
+&cam_cci {
+	actuator_rear: qcom,actuator@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	actuator_rear_aux: qcom,actuator@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	ois_rear: qcom,ois@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,ois";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+		status = "disabled";
+	};
+
+	eeprom_rear: qcom,eeprom@0 {
+		cell-index = <0>;
+		reg = <0>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_vdig-supply = <&camera_rear_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 80 0>,
+			<&tlmm 79 0>,
+			<&tlmm 27 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-vaf = <3>;
+		gpio-req-tbl-num = <0 1 2 3>;
+		gpio-req-tbl-flags = <1 0 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0",
+					"CAM_VANA0",
+					"CAM_VAF";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_rear_aux: qcom,eeprom@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				 &cam_sensor_front_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				 &cam_sensor_front_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>,
+			<&tlmm 8 0>,
+			<&tlmm 27 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-vaf = <3>;
+		gpio-req-tbl-num = <0 1 2 3>;
+		gpio-req-tbl-flags = <1 0 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1",
+					"CAM_VANA1",
+					"CAM_VAF";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_front: qcom,eeprom@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,eeprom";
+		cam_vdig-supply = <&camera_ldo>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1050000 0 3312000 0>;
+		rgltr-max-voltage = <1050000 0 3600000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 9 0>,
+			<&tlmm 8 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2",
+					"CAM_VANA2";
+		sensor-position = <1>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@0 {
+		cell-index = <0>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x0>;
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear>;
+		actuator-src = <&actuator_rear>;
+		ois-src = <&ois_rear>;
+		eeprom-src = <&eeprom_rear>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_vdig-supply = <&camera_rear_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 80 0>,
+			<&tlmm 79 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0",
+					"CAM_VANA";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@1 {
+		cell-index = <1>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x1>;
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_rear_aux>;
+		actuator-src = <&actuator_rear_aux>;
+		led-flash-src = <&led_flash_front>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				 &cam_sensor_front_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				 &cam_sensor_front_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>,
+			<&tlmm 8 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2",
+					"CAM_VANA1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@2 {
+		cell-index = <2>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x02>;
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_front>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1050000 0 3312000 0>;
+		rgltr-max-voltage = <1050000 0 3600000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 9 0>,
+			<&tlmm 8 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1",
+					"CAM_VANA1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 46c5660..5a566e3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -45,9 +45,10 @@
 			"csiphy0_clk",
 			"csi0phytimer_clk_src",
 			"csi0phytimer_clk";
-		clock-cntl-level = "turbo";
+		clock-cntl-level = "svs", "turbo";
 		clock-rates =
-			<0 0 0 0 320000000 0 269333333 0>;
+			<0 0 0 0 320000000 0 269333333 0>,
+			<0 0 0 0 384000000 0 269333333 0>;
 		status = "ok";
 	};
 
@@ -79,9 +80,10 @@
 			"csiphy1_clk",
 			"csi1phytimer_clk_src",
 			"csi1phytimer_clk";
-		clock-cntl-level = "turbo";
+		clock-cntl-level = "svs", "turbo";
 		clock-rates =
-			<0 0 0 0 320000000 0 269333333 0>;
+			<0 0 0 0 320000000 0 269333333 0>,
+			<0 0 0 0 384000000 0 269333333 0>;
 
 		status = "ok";
 	};
@@ -114,9 +116,10 @@
 			"csiphy2_clk",
 			"csi2phytimer_clk_src",
 			"csi2phytimer_clk";
-		clock-cntl-level = "turbo";
+		clock-cntl-level = "svs", "turbo";
 		clock-rates =
-			<0 0 0 0 320000000 0 269333333 0>;
+			<0 0 0 0 320000000 0 269333333 0>,
+			<0 0 0 0 384000000 0 269333333 0>;
 		status = "ok";
 	};
 
@@ -146,7 +149,7 @@
 			"cci_clk",
 			"cci_clk_src";
 		src-clock-name = "cci_clk_src";
-		clock-cntl-level = "turbo";
+		clock-cntl-level = "lowsvs";
 		clock-rates = <0 0 0 0 0 37500000>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cci0_active &cci1_active>;
@@ -399,17 +402,17 @@
 			<MSM_BUS_MASTER_AMPSS_M0
 			MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
 			<MSM_BUS_MASTER_AMPSS_M0
-			MSM_BUS_SLAVE_CAMERA_CFG 0 180000>,
+			MSM_BUS_SLAVE_CAMERA_CFG 0 76500>,
 			<MSM_BUS_MASTER_AMPSS_M0
-			MSM_BUS_SLAVE_CAMERA_CFG 0 180000>,
+			MSM_BUS_SLAVE_CAMERA_CFG 0 76500>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 150000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 150000>,
 			<MSM_BUS_MASTER_AMPSS_M0
 			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
 			<MSM_BUS_MASTER_AMPSS_M0
-			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
-			<MSM_BUS_MASTER_AMPSS_M0
-			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
-			<MSM_BUS_MASTER_AMPSS_M0
-			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>;
+			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>;
 		vdd-corners = <RPMH_REGULATOR_LEVEL_OFF
 			RPMH_REGULATOR_LEVEL_RETENTION
 			RPMH_REGULATOR_LEVEL_MIN_SVS
@@ -431,13 +434,14 @@
 			"csid0", "csid1", "csid2",
 			"ife0", "ife1", "ife2", "ipe0",
 			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
-			"icp0", "jpeg-dma0", "jpeg-enc0", "fd0";
+			"icp0", "jpeg-dma0", "jpeg-enc0", "fd0", "lrmecpas";
 		client-axi-port-names =
 			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
 			"cam_hf_1", "cam_hf_2", "cam_hf_2",
 			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
 			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
-			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+			"cam_sf_1";
 		client-bus-camnoc-based;
 		qcom,axi-port-list {
 			qcom,axi-port1 {
@@ -526,7 +530,8 @@
 		cdm-client-names = "vfe",
 			"jpegdma",
 			"jpegenc",
-			"fd";
+			"fd",
+			"lrmecdm";
 		status = "ok";
 	};
 
@@ -600,8 +605,10 @@
 			<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
-		clock-rates = <0 0 0 0 0 0 500000000 0 0 0 600000000 0 0>;
-		clock-cntl-level = "turbo";
+		clock-rates =
+			<0 0 0 0 0 0 384000000 0 0 0 404000000 0 0>,
+			<0 0 0 0 0 0 538000000 0 0 0 600000000 0 0>;
+		clock-cntl-level = "svs", "turbo";
 		src-clock-name = "ife_csid_clk_src";
 		status = "ok";
 	};
@@ -635,12 +642,15 @@
 			<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
-		clock-rates = <0 0 0 0 0 0 600000000 0 0>;
-		clock-cntl-level = "turbo";
+		clock-rates =
+			<0 0 0 0 0 0 404000000 0 0>,
+			<0 0 0 0 0 0 480000000 0 0>,
+			<0 0 0 0 0 0 600000000 0 0>;
+		clock-cntl-level = "svs", "svs_l1", "turbo";
 		src-clock-name = "ife_clk_src";
 		clock-names-option =  "ife_dsp_clk";
 		clocks-option = <&clock_camcc CAM_CC_IFE_0_DSP_CLK>;
-		clock-rates-option = <404000000>;
+		clock-rates-option = <600000000>;
 		status = "ok";
 	};
 
@@ -681,8 +691,10 @@
 			<&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
-		clock-rates = <0 0 0 0 0 0 500000000 0 0 0 600000000 0 0>;
-		clock-cntl-level = "turbo";
+		clock-rates =
+			<0 0 0 0 0 0 384000000 0 0 0 404000000 0 0>,
+			<0 0 0 0 0 0 538000000 0 0 0 600000000 0 0>;
+		clock-cntl-level = "svs", "turbo";
 		src-clock-name = "ife_csid_clk_src";
 		status = "ok";
 	};
@@ -716,12 +728,15 @@
 			<&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
-		clock-rates = <0 0 0 0 0 0 600000000 0 0>;
-		clock-cntl-level = "turbo";
+		clock-rates =
+			<0 0 0 0 0 0 404000000 0 0>,
+			<0 0 0 0 0 0 480000000 0 0>,
+			<0 0 0 0 0 0 600000000 0 0>;
+		clock-cntl-level = "svs", "svs_l1", "turbo";
 		src-clock-name = "ife_clk_src";
 		clock-names-option =  "ife_dsp_clk";
 		clocks-option = <&clock_camcc CAM_CC_IFE_1_DSP_CLK>;
-		clock-rates-option = <404000000>;
+		clock-rates-option = <600000000>;
 		status = "ok";
 	};
 
@@ -759,8 +774,10 @@
 			<&clock_camcc CAM_CC_IFE_LITE_CLK>,
 			<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
-		clock-rates = <0 0 0 0 0 0 384000000 0 0 0 404000000 0>;
-		clock-cntl-level = "turbo";
+		clock-rates =
+			<0 0 0 0 0 0 384000000 0 0 0 404000000 0>,
+			<0 0 0 0 0 0 538000000 0 0 0 600000000 0>;
+		clock-cntl-level = "svs";
 		src-clock-name = "ife_csid_clk_src";
 		status = "ok";
 	};
@@ -791,8 +808,11 @@
 			<&clock_camcc CAM_CC_IFE_LITE_CLK>,
 			<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
-		clock-rates = <0 0 0 0 0 0 404000000 0>;
-		clock-cntl-level = "turbo";
+		clock-rates =
+			<0 0 0 0 0 0 404000000 0>,
+			<0 0 0 0 0 0 480000000 0>,
+			<0 0 0 0 0 0 600000000 0>;
+		clock-cntl-level = "svs", "svs_l1", "turbo";
 		src-clock-name = "ife_clk_src";
 		status = "ok";
 	};
@@ -838,9 +858,12 @@
 				<&clock_camcc CAM_CC_ICP_CLK>,
 				<&clock_camcc CAM_CC_ICP_CLK_SRC>;
 
-		clock-rates = <0 0 400000000 0 0 0 0 600000000>;
-		clock-cntl-level = "turbo";
+		clock-rates =
+			<0 0 200000000 0 0 0 0 400000000>,
+			<0 0 200000000 0 0 0 0 600000000>;
+		clock-cntl-level = "svs", "turbo";
 		fw_name = "CAMERA_ICP.elf";
+		ubwc-cfg = <0x7F 0x1FF>;
 		status = "ok";
 	};
 
@@ -861,7 +884,8 @@
 				<&clock_camcc CAM_CC_IPE_0_CLK>,
 				<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
 
-		clock-rates = <0 0 0 0 240000000>,
+		clock-rates =
+			<0 0 0 0 240000000>,
 			<0 0 0 0 404000000>,
 			<0 0 0 0 480000000>,
 			<0 0 0 0 538000000>,
@@ -1031,8 +1055,11 @@
 			<&clock_camcc CAM_CC_FD_CORE_CLK>,
 			<&clock_camcc CAM_CC_FD_CORE_UAR_CLK>;
 		src-clock-name = "fd_core_clk_src";
-		clock-cntl-level = "svs";
-		clock-rates = <0 0 0 0 0 400000000 0 0>;
+		clock-cntl-level = "svs", "svs_l1", "turbo";
+		clock-rates =
+			<0 0 0 0 0 400000000 0 0>,
+			<0 0 0 0 0 538000000 0 0>,
+			<0 0 0 0 0 600000000 0 0>;
 		status = "ok";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index a61d96e..8e36887 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -602,6 +602,7 @@
 				     <13 32>;
 		qcom,cmb-elem-size = <3 64>,
 				     <7 64>,
+				     <9 64>,
 				     <13 64>;
 
 		clocks = <&clock_aop QDSS_CLK>;
@@ -674,6 +675,15 @@
 			};
 
 			port@7 {
+				reg = <9>;
+				tpda_in_tpdm_prng: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_prng_out_tpda>;
+				};
+			};
+
+			port@8 {
 				reg = <10>;
 				tpda_in_tpdm_qm: endpoint {
 					slave-mode;
@@ -682,7 +692,7 @@
 				};
 			};
 
-			port@8 {
+			port@9 {
 				reg = <11>;
 				tpda_in_tpdm_north: endpoint {
 					slave-mode;
@@ -691,7 +701,7 @@
 				};
 			};
 
-			port@9 {
+			port@10 {
 				reg = <13>;
 				tpda_in_tpdm_pimem: endpoint {
 					slave-mode;
@@ -1329,6 +1339,24 @@
 		};
 	};
 
+	tpdm_prng: tpdm@684c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
+		reg = <0x684c000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-prng";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		port{
+			tpdm_prng_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_prng>;
+			};
+		};
+	};
+
 	tpdm_vsense: tpdm@6840000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b968>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index eac21c8..ee0ad1f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -47,8 +47,8 @@
 		label = "kgsl-3d0";
 		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
 		status = "ok";
-		reg = <0x5000000 0x40000>;
-		reg-names = "kgsl_3d0_reg_memory";
+		reg = <0x5000000 0x40000>, <0x5061000 0x800>;
+		reg-names = "kgsl_3d0_reg_memory", "kgsl_3d0_cx_dbgc_memory";
 		interrupts = <0 300 0>;
 		interrupt-names = "kgsl_3d0_irq";
 		qcom,id = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-hdk-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-hdk-audio-overlay.dtsi
new file mode 100644
index 0000000..492f07b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-hdk-audio-overlay.dtsi
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845-audio-overlay.dtsi"
+
+&snd_934x {
+	qcom,model = "sdm845-tavil-hdk-snd-card";
+
+	qcom,audio-routing =
+		"AIF4 VI", "MCLK",
+		"RX_BIAS", "MCLK",
+		"MADINPUT", "MCLK",
+		"AMIC2", "MIC BIAS2",
+		"MIC BIAS2", "Headset Mic",
+		"DMIC0", "MIC BIAS1",
+		"MIC BIAS1", "Digital Mic0",
+		"DMIC1", "MIC BIAS1",
+		"MIC BIAS1", "Digital Mic1",
+		"DMIC2", "MIC BIAS3",
+		"MIC BIAS3", "Digital Mic2",
+		"DMIC5", "MIC BIAS4",
+		"MIC BIAS4", "Digital Mic5",
+		"SpkrLeft IN", "SPK1 OUT";
+
+	qcom,wsa-max-devs = <1>;
+	qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
+	qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
index 1a8de22..f38f5f8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
@@ -154,6 +154,12 @@
 	/delete-property/ switch-source;
 };
 
+&led_flash_rear_aux {
+	/delete-property/ flash-source;
+	/delete-property/ torch-source;
+	/delete-property/ switch-source;
+};
+
 &led_flash_front {
 	/delete-property/ flash-source;
 	/delete-property/ torch-source;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
index b1b81d1..58f5782 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
@@ -22,6 +22,7 @@
 #include "sdm845-sde-display.dtsi"
 #include "sdm845-qvr.dtsi"
 #include "sdm845-qvr-audio-overlay.dtsi"
+#include "sdm845-camera-sensor-qvr.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM845 v2 QVR";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
index c06b806..5513c92 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
@@ -15,6 +15,7 @@
 
 #include "sdm845-v2.dtsi"
 #include "sdm845-qvr.dtsi"
+#include "sdm845-camera-sensor-qvr.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM845 QVR";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
index 2d701a5..54d25e1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -15,6 +15,27 @@
 #include "smb1355.dtsi"
 
 &vendor {
+	bluetooth: bt_wcn3990 {
+		compatible = "qca,wcn3990";
+		qca,bt-vdd-io-supply = <&pm8998_s3>;
+		qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+		qca,bt-vdd-core-supply = <&pm8998_l7>;
+		qca,bt-vdd-pa-supply = <&pm8998_l17>;
+		qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+
+		qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+		qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+		qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+		qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+		qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+
+		qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+	};
+
 	qvr_batterydata: qcom,battery-data {
 		qcom,batt-id-range-pct = <15>;
 		#include "fg-gen3-batterydata-mlp446579-3800mah.dtsi"
@@ -25,11 +46,20 @@
 	vbus-supply = <&smb2_vbus>;
 };
 
+&qupv3_se6_4uart {
+	status = "ok";
+};
+
 &pmi8998_fg {
 	qcom,battery-data = <&qvr_batterydata>;
 	qcom,fg-bmd-en-delay-ms = <300>;
 };
 
+&pmi8998_charger {
+	qcom,battery-data = <&qvr_batterydata>;
+	qcom,sw-jeita-enable;
+};
+
 &qupv3_se10_i2c {
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 4254fcd..1e8c943 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -504,6 +504,13 @@
 &dsi_dual_nt35597_truly_cmd {
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
+	qcom,esd-check-enabled;
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+	qcom,mdss-dsi-panel-status-value = <0x9c>;
+	qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+	qcom,mdss-dsi-panel-status-read-length = <1>;
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
@@ -518,6 +525,14 @@
 &dsi_nt35597_truly_dsc_cmd {
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
+	qcom,ulps-enabled;
+	qcom,esd-check-enabled;
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+	qcom,mdss-dsi-panel-status-value = <0x9c>;
+	qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+	qcom,mdss-dsi-panel-status-read-length = <1>;
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
@@ -633,15 +648,35 @@
 };
 
 &dsi_sim_cmd {
-	qcom,mdss-dsi-t-clk-post = <0x0d>;
-	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-t-clk-post = <0x0c>;
+	qcom,mdss-dsi-t-clk-pre = <0x29>;
 	qcom,mdss-dsi-display-timings {
 		timing@0{
-			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
-				07 05 03 04 00];
 			qcom,display-topology = <1 0 1>,
-						<2 0 1>;
-			qcom,default-topology-index = <0>;
+						<2 2 1>;
+			qcom,default-topology-index = <1>;
+			qcom,panel-roi-alignment = <720 40 720 40 720 40>;
+			qcom,partial-update-enabled = "single_roi";
+			qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+				07 04 03 04 00];
+		};
+		timing@1{
+			qcom,display-topology = <1 0 1>,
+						<2 2 1>;
+			qcom,default-topology-index = <1>;
+			qcom,panel-roi-alignment = <540 40 540 40 540 40>;
+			qcom,partial-update-enabled = "single_roi";
+			qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+				07 04 03 04 00];
+		};
+		timing@2{
+			qcom,display-topology = <1 0 1>,
+						<2 2 1>;
+			qcom,default-topology-index = <1>;
+			qcom,panel-roi-alignment = <360 40 360 40 360 40>;
+			qcom,partial-update-enabled = "single_roi";
+			qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+				07 04 03 04 00];
 		};
 	};
 };
@@ -657,8 +692,8 @@
 			qcom,default-topology-index = <0>;
 		};
 		timing@1{
-			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
-				07 05 03 04 00];
+			qcom,mdss-dsi-panel-phy-timings = [00 30 0c 0d 2a 27 0c
+				0d 09 03 04 00];
 			qcom,display-topology = <2 0 2>,
 						<1 0 2>;
 			qcom,default-topology-index = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
index b9eac3c..967865b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
@@ -23,6 +23,8 @@
 		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
 		clock-names = "iface_clk";
 		clock-rate = <0>;
+		qcom,dsi-pll-ssc-en;
+		qcom,dsi-pll-ssc-mode = "down-spread";
 		gdsc-supply = <&mdss_core_gdsc>;
 		qcom,platform-supply-entries {
 			#address-cells = <1>;
@@ -50,6 +52,8 @@
 		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
 		clock-names = "iface_clk";
 		clock-rate = <0>;
+		qcom,dsi-pll-ssc-en;
+		qcom,dsi-pll-ssc-mode = "down-spread";
 		gdsc-supply = <&mdss_core_gdsc>;
 		qcom,platform-supply-entries {
 			#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 7c8eab4..0b8e6fd 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -134,6 +134,7 @@
 		qcom,sde-mixer-blendstages = <0xb>;
 		qcom,sde-highest-bank-bit = <0x2>;
 		qcom,sde-ubwc-version = <0x200>;
+		qcom,sde-smart-panel-align-mode = <0xc>;
 		qcom,sde-panic-per-pipe;
 		qcom,sde-has-cdp;
 		qcom,sde-has-src-split;
@@ -202,6 +203,9 @@
 
 		qcom,sde-cdp-setting = <1 1>, <1 0>;
 
+		qcom,sde-qos-cpu-mask = <0x3>;
+		qcom,sde-qos-cpu-dma-latency = <300>;
+
 		qcom,sde-inline-rotator = <&mdss_rotator 0>;
 		qcom,sde-inline-rot-xin = <10 11>;
 		qcom,sde-inline-rot-xin-type = "sspp", "wb";
@@ -451,7 +455,7 @@
 		clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
 					"pixel_clk", "pixel_clk_rcg",
 					"esc_clk";
-
+		qcom,null-insertion-enabled;
 		qcom,ctrl-supply-entries {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -485,6 +489,7 @@
 		<&clock_dispcc DISP_CC_MDSS_ESC1_CLK>;
 		clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
 				"pixel_clk", "pixel_clk_rcg", "esc_clk";
+		qcom,null-insertion-enabled;
 		qcom,ctrl-supply-entries {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -519,6 +524,8 @@
 						00 00 00 00
 						00 00 00 80];
 		qcom,platform-regulator-settings = [1d 1d 1d 1d 1d];
+		qcom,panel-allow-phy-poweroff;
+		qcom,dsi-phy-regulator-min-datarate-bps = <1200000000>;
 		qcom,phy-supply-entries {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -552,6 +559,8 @@
 						00 00 00 00
 						00 00 00 00
 						00 00 00 80];
+		qcom,panel-allow-phy-poweroff;
+		qcom,dsi-phy-regulator-min-datarate-bps = <1200000000>;
 		qcom,phy-supply-entries {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -574,7 +583,10 @@
 		vdda-1p2-supply = <&pm8998_l26>;
 		vdda-0p9-supply = <&pm8998_l1>;
 
-		reg =	<0xae90000 0xa84>,
+		reg =	<0xae90000 0x0dc>,
+			<0xae90200 0x0c0>,
+			<0xae90400 0x508>,
+			<0xae90a00 0x094>,
 			<0x88eaa00 0x200>,
 			<0x88ea200 0x200>,
 			<0x88ea600 0x200>,
@@ -583,7 +595,9 @@
 			<0x88ea030 0x10>,
 			<0x88e8000 0x20>,
 			<0x0aee1000 0x034>;
-		reg-names = "dp_ctrl", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
+		/* dp_ctrl: dp_ahb, dp_aux, dp_link, dp_p0 */
+		reg-names = "dp_ahb", "dp_aux", "dp_link",
+			"dp_p0", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
 			"dp_mmss_cc", "qfprom_physical", "dp_pll",
 			"usb3_dp_com", "hdcp_physical";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index ba397e5..70fe3e7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -119,8 +119,9 @@
 		compatible = "qcom,qusb2phy-v2";
 		reg = <0x088e2000 0x400>,
 			<0x007801e8 0x4>,
-			<0x088e0000 0x2000>;
-		reg-names = "qusb_phy_base", "efuse_addr", "eud_base";
+			<0x088e7014 0x4>;
+		reg-names = "qusb_phy_base", "efuse_addr",
+				"refgen_north_bg_reg_addr";
 
 		qcom,efuse-bit-pos = <25>;
 		qcom,efuse-num-bits = <3>;
@@ -134,7 +135,8 @@
 			 0x210 /* QUSB2PHY_PWR_CTRL1 */
 			 0x230 /* QUSB2PHY_INTR_CTRL */
 			 0x0a8 /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE */
-			 0x254>; /* QUSB2PHY_TEST1 */
+			 0x254 /* QUSB2PHY_TEST1 */
+			 0x198>; /* PLL_BIAS_CONTROL_2 */
 
 		qcom,qusb-phy-init-seq =
 			/* <value reg_offset> */
@@ -222,6 +224,8 @@
 			 0x14fc 0x80 0x00 /* RXA_RX_OFFSET_ADAPTOR_CNTRL2 */
 			 0x1504 0x03 0x00 /* RXA_SIGDET_CNTRL */
 			 0x150c 0x16 0x00 /* RXA_SIGDET_DEGLITCH_CNTRL */
+			 0x1564 0x05 0x00 /* RXA_RX_MODE_00 */
+			 0x14c0 0x03 0x00 /* RXA_VGA_CAL_CNTRL2 */
 			 0x1830 0x0b 0x00 /* RXB_UCDR_FASTLOCK_FO_GAIN */
 			 0x18d4 0x0f 0x00 /* RXB_RX_EQU_ADAPTOR_CNTRL2 */
 			 0x18d8 0x4e 0x00 /* RXB_RX_EQU_ADAPTOR_CNTRL3 */
@@ -230,6 +234,8 @@
 			 0x18fc 0x80 0x00 /* RXB_RX_OFFSET_ADAPTOR_CNTRL2 */
 			 0x1904 0x03 0x00 /* RXB_SIGDET_CNTRL */
 			 0x190c 0x16 0x00 /* RXB_SIGDET_DEGLITCH_CNTRL */
+			 0x1964 0x05 0x00 /* RXB_RX_MODE_00 */
+			 0x18c0 0x03 0x00 /* RXB_VGA_CAL_CNTRL2 */
 			 0x1260 0x10 0x00 /* TXA_HIGHZ_DRVR_EN */
 			 0x12a4 0x12 0x00 /* TXA_RCV_DETECT_LVL_2 */
 			 0x128c 0x16 0x00 /* TXA_LANE_MODE_1 */
@@ -270,6 +276,8 @@
 			 0x1c48 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V4 */
 			 0x1c4c 0x15 0x00 /* PCS_TXDEEMPH_M6DB_LS */
 			 0x1c50 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_LS */
+			 0x1e0c 0x21 0x00 /* PCS_REFGEN_REQ_CONFIG1 */
+			 0x1e10 0x60 0x00 /* PCS_REFGEN_REQ_CONFIG2 */
 			 0x1c5c 0x02 0x00 /* PCS_RATE_SLEW_CNTRL */
 			 0x1ca0 0x04 0x00 /* PCS_PWRUP_RESET_DLY_TIME_AUXCLK */
 			 0x1c8c 0x44 0x00 /* PCS_TSYNC_RSYNC_TIME */
@@ -280,6 +288,7 @@
 			 0x1cb8 0x75 0x00 /* PCS_RXEQTRAINING_WAIT_TIME */
 			 0x1cb0 0x86 0x00 /* PCS_LFPS_TX_ECSTART_EQTLOCK */
 			 0x1cbc 0x13 0x00 /* PCS_RXEQTRAINING_RUN_TIME */
+			 0x1cac 0x04 0x00 /* PCS_LFPS_DET_HIGH_COUNT_VAL */
 			 0xffffffff 0xffffffff 0x00>;
 
 		qcom,qmp-phy-reg-offset =
@@ -396,8 +405,10 @@
 	/* Secondary USB port related QUSB2 PHY */
 	qusb_phy1: qusb@88e3000 {
 		compatible = "qcom,qusb2phy-v2";
-		reg = <0x088e3000 0x400>;
-		reg-names = "qusb_phy_base";
+		reg = <0x088e3000 0x400>,
+			<0x088e7014 0x4>;
+		reg-names = "qusb_phy_base",
+				"refgen_north_bg_reg_addr";
 
 		vdd-supply = <&pm8998_l1>;
 		vdda18-supply = <&pm8998_l12>;
@@ -409,7 +420,8 @@
 			 0x210 /* QUSB2PHY_PWR_CTRL1 */
 			 0x230 /* QUSB2PHY_INTR_CTRL */
 			 0x0a8 /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE */
-			 0x254>; /* QUSB2PHY_TEST1 */
+			 0x254 /* QUSB2PHY_TEST1 */
+			 0x198>; /* PLL_BIAS_CONTROL_2 */
 
 		qcom,qusb-phy-init-seq =
 			/* <value reg_offset> */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
index a456765..d2ee9eb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
@@ -157,6 +157,33 @@
 		compatible = "qcom,msm-cam-smmu";
 		status = "ok";
 
+		msm_cam_smmu_lrme {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x1038 0x0>,
+				<&apps_smmu 0x1058 0x0>,
+				<&apps_smmu 0x1039 0x0>,
+				<&apps_smmu 0x1059 0x0>;
+			label = "lrme";
+			lrme_iova_mem_map: iova-mem-map {
+				iova-mem-region-shared {
+					/* Shared region is 100MB long */
+					iova-region-name = "shared";
+					iova-region-start = <0x7400000>;
+					iova-region-len = <0x6400000>;
+					iova-region-id = <0x1>;
+					status = "ok";
+				};
+				/* IO region is approximately 3.3 GB */
+				iova-mem-region-io {
+					iova-region-name = "io";
+					iova-region-start = <0xd800000>;
+					iova-region-len = <0xd2800000>;
+					iova-region-id = <0x3>;
+					status = "ok";
+				};
+			};
+		};
+
 		msm_cam_smmu_ife {
 			compatible = "qcom,msm-cam-smmu-cb";
 			iommus = <&apps_smmu 0x808 0x0>,
@@ -297,17 +324,17 @@
 			<MSM_BUS_MASTER_AMPSS_M0
 			MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
 			<MSM_BUS_MASTER_AMPSS_M0
-			MSM_BUS_SLAVE_CAMERA_CFG 0 153000>,
+			MSM_BUS_SLAVE_CAMERA_CFG 0 76500>,
 			<MSM_BUS_MASTER_AMPSS_M0
-			MSM_BUS_SLAVE_CAMERA_CFG 0 153000>,
+			MSM_BUS_SLAVE_CAMERA_CFG 0 76500>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 150000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 150000>,
 			<MSM_BUS_MASTER_AMPSS_M0
 			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
 			<MSM_BUS_MASTER_AMPSS_M0
-			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
-			<MSM_BUS_MASTER_AMPSS_M0
-			MSM_BUS_SLAVE_CAMERA_CFG 0 600000>,
-			<MSM_BUS_MASTER_AMPSS_M0
-			MSM_BUS_SLAVE_CAMERA_CFG 0 600000>;
+			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>;
 		vdd-corners = <RPMH_REGULATOR_LEVEL_OFF
 			RPMH_REGULATOR_LEVEL_RETENTION
 			RPMH_REGULATOR_LEVEL_MIN_SVS
@@ -329,13 +356,14 @@
 			"csid0", "csid1", "csid2",
 			"ife0", "ife1", "ife2", "ipe0",
 			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
-			"icp0", "jpeg-dma0", "jpeg-enc0", "fd0";
+			"icp0", "jpeg-dma0", "jpeg-enc0", "fd0", "lrmecpas0";
 		client-axi-port-names =
 			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_hf_2",
 			"cam_sf_1", "cam_hf_1", "cam_hf_2", "cam_hf_2",
 			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
 			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
-			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+			"cam_sf_1";
 		client-bus-camnoc-based;
 		qcom,axi-port-list {
 			qcom,axi-port1 {
@@ -415,4 +443,44 @@
 			};
 		};
 	};
+
+	qcom,cam-lrme {
+		compatible = "qcom,cam-lrme";
+		arch-compat = "lrme";
+		status = "ok";
+	};
+
+	cam_lrme: qcom,lrme@ac6b000 {
+		cell-index = <0>;
+		compatible = "qcom,lrme";
+		reg-names = "lrme";
+		reg = <0xac6b000 0xa00>;
+		reg-cam-base = <0x6b000>;
+		interrupt-names = "lrme";
+		interrupts = <0 476 0>;
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"lrme_clk_src",
+			"lrme_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_LRME_CLK_SRC>,
+			<&clock_camcc CAM_CC_LRME_CLK>;
+		clock-rates = <0 0 0 0 0 200000000 200000000>,
+			<0 0 0 0 0 269000000 269000000>,
+			<0 0 0 0 0 320000000 320000000>,
+			<0 0 0 0 0 400000000 400000000>;
+
+		clock-cntl-level = "lowsvs", "svs", "svs_l1", "turbo";
+		src-clock-name = "lrme_clk_src";
+		status = "ok";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index cfa4517..db2fcc1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -29,6 +29,32 @@
 };
 
 &soc {
+	qcom,memshare {
+		compatible = "qcom,memshare";
+
+		qcom,client_1 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x0>;
+			qcom,client-id = <0>;
+			qcom,allocate-boot-time;
+			label = "modem";
+		};
+
+		qcom,client_2 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x0>;
+			qcom,client-id = <2>;
+			label = "modem";
+		};
+
+		mem_client_3_size: qcom,client_3 {
+			compatible = "qcom,memshare-peripheral";
+			qcom,peripheral-size = <0x500000>;
+			qcom,client-id = <1>;
+			label = "modem";
+		};
+	};
+
 	gpu_gx_domain_addr: syscon@0x5091508 {
 		compatible = "syscon";
 		reg = <0x5091508 0x4>;
@@ -239,9 +265,8 @@
 		< 1324800 1036800000 >,
 		< 1420800 1132800000 >,
 		< 1516800 1209600000 >,
-		< 1612800 1401600000 >,
-		< 1689600 1497600000 >,
-		< 1766400 1593600000 >;
+		< 1689600 1305600000 >,
+		< 1766400 1401600000 >;
 };
 
 &devfreq_l3lat_4 {
@@ -251,8 +276,9 @@
 		< 1132800  748800000 >,
 		< 1363200  940800000 >,
 		< 1689600 1209600000 >,
-		< 1996800 1401600000 >,
-		< 2400000 1593600000 >;
+		< 1996800 1305600000 >,
+		< 2400000 1401600000 >,
+		< 2745600 1593600000 >;
 };
 
 &bwmon {
@@ -278,6 +304,13 @@
 	};
 };
 
+&devfreq_compute {
+	qcom,core-dev-table =
+		< 1881600 MHZ_TO_MBPS( 200, 4) >,
+		< 2649600 MHZ_TO_MBPS(1017, 4) >,
+		< 2745600 MHZ_TO_MBPS(1804, 4) >;
+};
+
 &clock_gcc {
 	compatible = "qcom,gcc-sdm845-v2", "syscon";
 };
@@ -432,6 +465,9 @@
 			2553600 12045
 			2649600 15686
 			2745600 25586
+			2764800 30000
+			2784000 35000
+			2803200 40000
 		>;
 		idle-cost-data = <
 			100 80 60 40
@@ -495,6 +531,9 @@
 			2553600 145
 			2649600 150
 			2745600 155
+			2764800 160
+			2784000 165
+			2803200 170
 		>;
 		idle-cost-data = <
 			4 3 2 1
@@ -623,7 +662,7 @@
 			    0x40 0x194 /* PLL_BIAS_CONTROL_1 */
 			    0x20 0x198 /* PLL_BIAS_CONTROL_2 */
 			    0x21 0x214 /* PWR_CTRL2 */
-			    0x07 0x220 /* IMP_CTRL1 */
+			    0x08 0x220 /* IMP_CTRL1 */
 			    0x58 0x224 /* IMP_CTRL2 */
 			    0x45 0x240 /* TUNE1 */
 			    0x29 0x244 /* TUNE2 */
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 86705d6..e8e9ce7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -592,7 +592,7 @@
 			alloc-ranges = <0 0x00000000 0 0xffffffff>;
 			reusable;
 			alignment = <0 0x400000>;
-			size = <0 0xc00000>;
+			size = <0 0x1000000>;
 		};
 
 		qseecom_mem: qseecom_region {
@@ -1131,6 +1131,15 @@
 		};
 	};
 
+	devfreq_compute: qcom,devfreq-compute {
+		compatible = "qcom,arm-cpu-mon";
+		qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,target-dev = <&mincpubw>;
+		qcom,core-dev-table =
+			< 1881600 MHZ_TO_MBPS(200, 4) >,
+			< 2208000 MHZ_TO_MBPS(681, 4) >;
+	};
+
 	clock_rpmh: qcom,rpmhclk {
 		compatible = "qcom,rpmh-clk-sdm845";
 		#clock-cells = <1>;
@@ -1368,7 +1377,13 @@
 		<123 512 2097152 0>, <1 757 102400 0>,  /* HS G3 RB */
 		<123 512 298189 0>, <1 757 1000 0>,  /* HS G1 RB L2 */
 		<123 512 596378 0>, <1 757 1000 0>,  /* HS G2 RB L2 */
-		<123 512 4194304 0>, <1 757 204800 0>, /* HS G3 RB L2 */
+		/* As UFS working in HS G3 RB L2 mode, aggregated
+		 * bandwidth (AB) should take care of providing
+		 * optimum throughput requested. However, as tested,
+		 * in order to scale up CNOC clock, instantaneous
+		 * bindwidth (IB) needs to be given a proper value too.
+		 */
+		<123 512 4194304 0>, <1 757 204800 409600>, /* HS G3 RB L2 */
 		<123 512 7643136 0>, <1 757 307200 0>; /* Max. bandwidth */
 
 		qcom,bus-vector-names = "MIN",
@@ -2178,6 +2193,7 @@
 		qcom,irq-mask = <0x100>;
 		interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
 		label = "lpass";
+		cpu-affinity = <1 2>;
 		qcom,qos-config = <&glink_qos_adsp>;
 		qcom,ramp-time = <0xaf>;
 	};
@@ -3545,6 +3561,11 @@
 			qcom,dump-id = <0xec>;
 		};
 
+		fcm_dump {
+			qcom,dump-size = <0x400>;
+			qcom,dump-id = <0xee>;
+		};
+
 		rpm_sw_dump {
 			qcom,dump-size = <0x28000>;
 			qcom,dump-id = <0xea>;
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index 6b83b36..12365b3 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -51,6 +51,7 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_SDM450=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
 CONFIG_PREEMPT=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index 791d349..8757cc3 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -55,6 +55,7 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_SDM450=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
 CONFIG_PREEMPT=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index 0228c36..371c77e 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -21,6 +21,7 @@
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_BLK_CGROUP=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_BPF=y
 CONFIG_SCHED_CORE_CTL=y
@@ -52,6 +53,7 @@
 CONFIG_MODULE_SIG_FORCE=y
 CONFIG_MODULE_SIG_SHA512=y
 CONFIG_PARTITION_ADVANCED=y
+CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_SDM670=y
 CONFIG_PCI=y
@@ -69,6 +71,7 @@
 CONFIG_CP15_BARRIER_EMULATION=y
 CONFIG_SETEND_EMULATION=y
 # CONFIG_ARM64_VHE is not set
+# CONFIG_EFI is not set
 CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_COMPAT=y
@@ -279,8 +282,6 @@
 CONFIG_PPP_ASYNC=y
 CONFIG_PPP_SYNC_TTY=y
 CONFIG_USB_USBNET=y
-CONFIG_WIL6210=m
-# CONFIG_WIL6210_TRACING is not set
 CONFIG_WCNSS_MEM_PRE_ALLOC=y
 CONFIG_CLD_LL_CORE=y
 CONFIG_CNSS_GENL=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 529d605..f6c3ec7 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -22,6 +22,8 @@
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_BLK_CGROUP=y
+CONFIG_DEBUG_BLK_CGROUP=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_BPF=y
 CONFIG_SCHED_CORE_CTL=y
@@ -54,6 +56,7 @@
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_SDM670=y
 CONFIG_PCI=y
@@ -264,6 +267,7 @@
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -284,7 +288,6 @@
 CONFIG_PPP_ASYNC=y
 CONFIG_PPP_SYNC_TTY=y
 CONFIG_USB_USBNET=y
-CONFIG_WIL6210=m
 CONFIG_WCNSS_MEM_PRE_ALLOC=y
 CONFIG_CLD_LL_CORE=y
 CONFIG_CNSS_GENL=y
@@ -300,8 +303,6 @@
 # CONFIG_SERIO_SERPORT is not set
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVMEM is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_MSM_GENI=y
 CONFIG_SERIAL_MSM_GENI_CONSOLE=y
 CONFIG_DIAG_CHAR=y
@@ -454,6 +455,7 @@
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
 CONFIG_EDAC_KRYO3XX_ARM64=y
+CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_CE=y
 CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_QPNP=y
@@ -646,6 +648,7 @@
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_CTI=y
 CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_TGU=y
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 0300ef5..1cfa935 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -534,6 +534,7 @@
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_QSEE_IPC_IRQ_BRIDGE=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 18ce445..eceb4be 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -301,8 +301,6 @@
 # CONFIG_SERIO_SERPORT is not set
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVMEM is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_MSM_GENI=y
 CONFIG_SERIAL_MSM_GENI_CONSOLE=y
 CONFIG_DIAG_CHAR=y
@@ -550,6 +548,7 @@
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
 CONFIG_MSM_REMOTEQDSS=y
 CONFIG_QSEE_IPC_IRQ_BRIDGE=y
 CONFIG_QCOM_BIMC_BWMON=y
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 15fef4c..0522f50 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -31,6 +31,7 @@
 #include <linux/vmalloc.h>
 #include <linux/swiotlb.h>
 #include <linux/io.h>
+#include <linux/pci.h>
 
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
@@ -1223,12 +1224,12 @@
 
 	addr = addr & PAGE_MASK;
 	size = PAGE_ALIGN(size);
-	if (mapping->min_iova_align)
+	if (mapping->min_iova_align) {
 		guard_len = ALIGN(size, mapping->min_iova_align) - size;
-	else
+		iommu_unmap(mapping->domain, addr + size, guard_len);
+	} else {
 		guard_len = 0;
-
-	iommu_unmap(mapping->domain, addr + size, guard_len);
+	}
 
 	start = (addr - mapping->base) >> PAGE_SHIFT;
 	count = (size + guard_len) >> PAGE_SHIFT;
@@ -1978,10 +1979,10 @@
 {
 	unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long);
 	int vmid = VMID_HLOS;
-	bool min_iova_align = 0;
+	int min_iova_align = 0;
 
 	iommu_domain_get_attr(mapping->domain,
-			DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_ALIGN,
+			DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
 			&min_iova_align);
 	iommu_domain_get_attr(mapping->domain,
 			DOMAIN_ATTR_SECURE_VMID, &vmid);
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 23de307..41e60a9 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -742,7 +742,7 @@
 10:	ldd	0(%r25), %r25
 11:	ldd	0(%r24), %r24
 #else
-	/* Load new value into r22/r23 - high/low */
+	/* Load old value into r22/r23 - high/low */
 10:	ldw	0(%r25), %r22
 11:	ldw	4(%r25), %r23
 	/* Load new value into fr4 for atomic store later */
@@ -834,11 +834,11 @@
 	copy	%r0, %r28
 #else
 	/* Compare first word */
-19:	ldw,ma	0(%r26), %r29
+19:	ldw	0(%r26), %r29
 	sub,=	%r29, %r22, %r0
 	b,n	cas2_end
 	/* Compare second word */
-20:	ldw,ma	4(%r26), %r29
+20:	ldw	4(%r26), %r29
 	sub,=	%r29, %r23, %r0
 	b,n	cas2_end
 	/* Perform the store */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 70963c8..fc0df0f 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -601,8 +601,7 @@
 		break;
 #endif
 	case KVM_CAP_PPC_HTM:
-		r = cpu_has_feature(CPU_FTR_TM_COMP) &&
-		    is_kvmppc_hv_enabled(kvm);
+		r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled;
 		break;
 	default:
 		r = 0;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index cdc0dea..13dbcc0 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -34,6 +34,7 @@
 #include <linux/mm.h>
 
 #include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
 #include <asm/processor.h>
 #include <asm/tlbflush.h>
 #include <asm/setup.h>
@@ -1046,6 +1047,18 @@
 	return 0;
 }
 
+static bool is_blacklisted(unsigned int cpu)
+{
+	struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+	if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
+		pr_err_once("late loading on model 79 is disabled.\n");
+		return true;
+	}
+
+	return false;
+}
+
 static enum ucode_state request_microcode_fw(int cpu, struct device *device,
 					     bool refresh_fw)
 {
@@ -1054,6 +1067,9 @@
 	const struct firmware *firmware;
 	enum ucode_state ret;
 
+	if (is_blacklisted(cpu))
+		return UCODE_NFOUND;
+
 	sprintf(name, "intel-ucode/%02x-%02x-%02x",
 		c->x86, c->x86_model, c->x86_mask);
 
@@ -1078,6 +1094,9 @@
 static enum ucode_state
 request_microcode_user(int cpu, const void __user *buf, size_t size)
 {
+	if (is_blacklisted(cpu))
+		return UCODE_NFOUND;
+
 	return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
 }
 
diff --git a/block/bio.c b/block/bio.c
index 07f287b..e14a897 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -589,7 +589,7 @@
 	bio->bi_opf = bio_src->bi_opf;
 	bio->bi_iter = bio_src->bi_iter;
 	bio->bi_io_vec = bio_src->bi_io_vec;
-
+	bio->bi_dio_inode = bio_src->bi_dio_inode;
 	bio_clone_blkcg_association(bio, bio_src);
 }
 EXPORT_SYMBOL(__bio_clone_fast);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index abde370..0272fac 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -6,7 +6,7 @@
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/scatterlist.h>
-
+#include <linux/pfk.h>
 #include <trace/events/block.h>
 
 #include "blk.h"
@@ -725,6 +725,11 @@
 	}
 }
 
+static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
+{
+	return (!pfk_allow_merge_bio(bio, nxt));
+}
+
 /*
  * Has to be called with the request spinlock acquired
  */
@@ -752,6 +757,8 @@
 	    !blk_write_same_mergeable(req->bio, next->bio))
 		return 0;
 
+	if (crypto_not_mergeable(req->bio, next->bio))
+		return 0;
 	/*
 	 * If we are allowed to merge, then append bio list
 	 * from next to rq and release next. merge_requests_fn
@@ -862,6 +869,8 @@
 	    !blk_write_same_mergeable(rq->bio, bio))
 		return false;
 
+	if (crypto_not_mergeable(rq->bio, bio))
+		return false;
 	return true;
 }
 
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index c7c3d4e..4ac4910 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2948,10 +2948,11 @@
 
 	/*
 	 * SSD device without seek penalty, disable idling. But only do so
-	 * for devices that support queuing, otherwise we still have a problem
-	 * with sync vs async workloads.
+	 * for devices that support queuing (and when group idle is 0),
+	 * otherwise we still have a problem with sync vs async workloads.
 	 */
-	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
+	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
+		!cfqd->cfq_group_idle)
 		return;
 
 	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index af4cd86..d140d8bb 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -88,6 +88,9 @@
 	bool want = false;
 
 	sinfo = msg->signed_infos;
+	if (!sinfo)
+		goto inconsistent;
+
 	if (sinfo->authattrs) {
 		want = true;
 		msg->have_authattrs = true;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 256a1d5..1ac1e5e 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -465,9 +465,8 @@
 };
 
 enum binder_deferred_state {
-	BINDER_DEFERRED_PUT_FILES    = 0x01,
-	BINDER_DEFERRED_FLUSH        = 0x02,
-	BINDER_DEFERRED_RELEASE      = 0x04,
+	BINDER_DEFERRED_FLUSH        = 0x01,
+	BINDER_DEFERRED_RELEASE      = 0x02,
 };
 
 /**
@@ -504,8 +503,6 @@
  *                        (invariant after initialized)
  * @tsk                   task_struct for group_leader of process
  *                        (invariant after initialized)
- * @files                 files_struct for process
- *                        (invariant after initialized)
  * @deferred_work_node:   element for binder_deferred_list
  *                        (protected by binder_deferred_lock)
  * @deferred_work:        bitmap of deferred work to perform
@@ -552,7 +549,6 @@
 	struct list_head waiting_threads;
 	int pid;
 	struct task_struct *tsk;
-	struct files_struct *files;
 	struct hlist_node deferred_work_node;
 	int deferred_work;
 	bool is_dead;
@@ -901,22 +897,34 @@
 static void binder_free_proc(struct binder_proc *proc);
 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
 
+struct files_struct *binder_get_files_struct(struct binder_proc *proc)
+{
+	return get_files_struct(proc->tsk);
+}
+
 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
 {
-	struct files_struct *files = proc->files;
+	struct files_struct *files;
 	unsigned long rlim_cur;
 	unsigned long irqs;
+	int ret;
 
+	files = binder_get_files_struct(proc);
 	if (files == NULL)
 		return -ESRCH;
 
-	if (!lock_task_sighand(proc->tsk, &irqs))
-		return -EMFILE;
+	if (!lock_task_sighand(proc->tsk, &irqs)) {
+		ret = -EMFILE;
+		goto err;
+	}
 
 	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
 	unlock_task_sighand(proc->tsk, &irqs);
 
-	return __alloc_fd(files, 0, rlim_cur, flags);
+	ret = __alloc_fd(files, 0, rlim_cur, flags);
+err:
+	put_files_struct(files);
+	return ret;
 }
 
 /*
@@ -925,8 +933,12 @@
 static void task_fd_install(
 	struct binder_proc *proc, unsigned int fd, struct file *file)
 {
-	if (proc->files)
-		__fd_install(proc->files, fd, file);
+	struct files_struct *files = binder_get_files_struct(proc);
+
+	if (files) {
+		__fd_install(files, fd, file);
+		put_files_struct(files);
+	}
 }
 
 /*
@@ -934,18 +946,20 @@
  */
 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
 {
+	struct files_struct *files = binder_get_files_struct(proc);
 	int retval;
 
-	if (proc->files == NULL)
+	if (files == NULL)
 		return -ESRCH;
 
-	retval = __close_fd(proc->files, fd);
+	retval = __close_fd(files, fd);
 	/* can't restart close syscall because file table entry was cleared */
 	if (unlikely(retval == -ERESTARTSYS ||
 		     retval == -ERESTARTNOINTR ||
 		     retval == -ERESTARTNOHAND ||
 		     retval == -ERESTART_RESTARTBLOCK))
 		retval = -EINTR;
+	put_files_struct(files);
 
 	return retval;
 }
@@ -4757,7 +4771,6 @@
 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
 		     (unsigned long)pgprot_val(vma->vm_page_prot));
 	binder_alloc_vma_close(&proc->alloc);
-	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
 }
 
 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -4799,10 +4812,8 @@
 	vma->vm_private_data = proc;
 
 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
-	if (ret)
-		return ret;
-	proc->files = get_files_struct(current);
-	return 0;
+
+	return ret;
 
 err_bad_arg:
 	pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
@@ -4981,8 +4992,6 @@
 	struct rb_node *n;
 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
 
-	BUG_ON(proc->files);
-
 	mutex_lock(&binder_procs_lock);
 	hlist_del(&proc->proc_node);
 	mutex_unlock(&binder_procs_lock);
@@ -5064,8 +5073,6 @@
 static void binder_deferred_func(struct work_struct *work)
 {
 	struct binder_proc *proc;
-	struct files_struct *files;
-
 	int defer;
 
 	do {
@@ -5082,21 +5089,11 @@
 		}
 		mutex_unlock(&binder_deferred_lock);
 
-		files = NULL;
-		if (defer & BINDER_DEFERRED_PUT_FILES) {
-			files = proc->files;
-			if (files)
-				proc->files = NULL;
-		}
-
 		if (defer & BINDER_DEFERRED_FLUSH)
 			binder_deferred_flush(proc);
 
 		if (defer & BINDER_DEFERRED_RELEASE)
 			binder_deferred_release(proc); /* frees proc */
-
-		if (files)
-			put_files_struct(files);
 	} while (proc);
 }
 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 9057411..3ad1bcf 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -560,7 +560,7 @@
 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 				   "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
 				   alloc->pid, buffer->data,
-				   prev->data, next->data);
+				   prev->data, next ? next->data : NULL);
 		binder_update_page_range(alloc, 0, buffer_start_page(buffer),
 					 buffer_start_page(buffer) + PAGE_SIZE);
 	}
@@ -984,7 +984,7 @@
 	return ret;
 }
 
-struct shrinker binder_shrinker = {
+static struct shrinker binder_shrinker = {
 	.count_objects = binder_shrink_count,
 	.scan_objects = binder_shrink_scan,
 	.seeks = DEFAULT_SEEKS,
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index ce68c1e..882f1c9 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -131,7 +131,11 @@
 /* drivers/base/power/main.c */
 extern struct list_head dpm_list;	/* The active device list */
 
+#ifdef CONFIG_QCOM_SHOW_RESUME_IRQ
 extern int msm_show_resume_irq_mask;
+#else
+#define msm_show_resume_irq_mask 0
+#endif
 
 static inline struct device *to_device(struct list_head *entry)
 {
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c7f3969..70db4d5 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -720,7 +720,7 @@
 			if (mbus->hw_io_coherency)
 				w->mbus_attr |= ATTR_HW_COHERENCY;
 			w->base = base & DDR_BASE_CS_LOW_MASK;
-			w->size = (size | ~DDR_SIZE_MASK) + 1;
+			w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
 		}
 	}
 	mvebu_mbus_dram_info.num_cs = cs;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 7de9b79c..122ebd2 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1506,7 +1506,7 @@
 	int cid = fl->cid;
 	int interrupted = 0;
 	int err = 0;
-	struct timespec invoket;
+	struct timespec invoket = {0};
 
 	if (fl->profile)
 		getnstimeofday(&invoket);
@@ -1723,7 +1723,7 @@
 			goto bail;
 
 		inbuf.pgid = current->tgid;
-		inbuf.namelen = strlen(proc_name)+1;
+		inbuf.namelen = init->filelen;
 		inbuf.pageslen = 0;
 		if (!me->staticpd_flags) {
 			inbuf.pageslen = 1;
@@ -1775,6 +1775,7 @@
 		err = -ENOTTY;
 	}
 bail:
+	kfree(proc_name);
 	if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
 		me->staticpd_flags = 0;
 	if (mem && err) {
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 2df62e4..b30bfad 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -28,8 +28,7 @@
 #define DIAG_SET_FEATURE_MASK(x) (feature_bytes[(x)/8] |= (1 << (x & 0x7)))
 
 #define diag_check_update(x)	\
-	(!info || (info && (info->peripheral_mask & MD_PERIPHERAL_MASK(x))) \
-	|| (info && (info->peripheral_mask & MD_PERIPHERAL_PD_MASK(x)))) \
+	(!info || (info && (info->peripheral_mask & MD_PERIPHERAL_MASK(x)))) \
 
 struct diag_mask_info msg_mask;
 struct diag_mask_info msg_bt_mask;
@@ -87,16 +86,15 @@
 
 static void diag_send_log_mask_update(uint8_t peripheral, int equip_id)
 {
-	int i;
-	int err = 0;
-	int send_once = 0;
+	int err = 0, send_once = 0, i;
 	int header_len = sizeof(struct diag_ctrl_log_mask);
 	uint8_t *buf = NULL, *temp = NULL;
 	uint8_t upd = 0;
-	uint32_t mask_size = 0;
+	uint32_t mask_size = 0, pd_mask = 0;
 	struct diag_ctrl_log_mask ctrl_pkt;
 	struct diag_mask_info *mask_info = NULL;
 	struct diag_log_mask_t *mask = NULL;
+	struct diagfwd_info *fwd_info = NULL;
 
 	if (peripheral >= NUM_PERIPHERALS)
 		return;
@@ -108,13 +106,14 @@
 		return;
 	}
 
+	MD_PERIPHERAL_PD_MASK(TYPE_CNTL, peripheral, pd_mask);
+
 	if (driver->md_session_mask != 0) {
 		if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
 			if (driver->md_session_map[peripheral])
 				mask_info =
 				driver->md_session_map[peripheral]->log_mask;
-		} else if (driver->md_session_mask &
-				MD_PERIPHERAL_PD_MASK(peripheral)) {
+		} else if (driver->md_session_mask & pd_mask) {
 			upd = diag_mask_to_pd_value(driver->md_session_mask);
 			if (upd && driver->md_session_map[upd])
 				mask_info =
@@ -213,12 +212,12 @@
 {
 	uint8_t *buf = NULL, *temp = NULL;
 	uint8_t upd = 0;
+	uint32_t pd_mask = 0;
+	int num_bytes = EVENT_COUNT_TO_BYTES(driver->last_event_id);
+	int write_len = 0, err = 0, i = 0, temp_len = 0;
 	struct diag_ctrl_event_mask header;
 	struct diag_mask_info *mask_info = NULL;
-	int num_bytes = EVENT_COUNT_TO_BYTES(driver->last_event_id);
-	int write_len = 0;
-	int err = 0;
-	int temp_len = 0;
+	struct diagfwd_info *fwd_info = NULL;
 
 	if (num_bytes <= 0 || num_bytes > driver->event_mask_size) {
 		pr_debug("diag: In %s, invalid event mask length %d\n",
@@ -236,13 +235,14 @@
 		return;
 	}
 
+	MD_PERIPHERAL_PD_MASK(TYPE_CNTL, peripheral, pd_mask);
+
 	if (driver->md_session_mask != 0) {
 		if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
 			if (driver->md_session_map[peripheral])
 				mask_info =
 				driver->md_session_map[peripheral]->event_mask;
-		} else if (driver->md_session_mask &
-				MD_PERIPHERAL_PD_MASK(peripheral)) {
+		} else if (driver->md_session_mask & pd_mask) {
 			upd = diag_mask_to_pd_value(driver->md_session_mask);
 			if (upd && driver->md_session_map[upd])
 				mask_info =
@@ -310,17 +310,16 @@
 
 static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
 {
-	int i;
-	int err = 0;
+	int i, err = 0, temp_len = 0;
 	int header_len = sizeof(struct diag_ctrl_msg_mask);
-	int temp_len = 0;
 	uint8_t *buf = NULL, *temp = NULL;
 	uint8_t upd = 0;
-	uint32_t mask_size = 0;
+	uint8_t msg_mask_tbl_count_local;
+	uint32_t mask_size = 0, pd_mask = 0;
 	struct diag_mask_info *mask_info = NULL;
 	struct diag_msg_mask_t *mask = NULL;
 	struct diag_ctrl_msg_mask header;
-	uint8_t msg_mask_tbl_count_local;
+	struct diagfwd_info *fwd_info = NULL;
 
 	if (peripheral >= NUM_PERIPHERALS)
 		return;
@@ -332,13 +331,14 @@
 		return;
 	}
 
+	MD_PERIPHERAL_PD_MASK(TYPE_CNTL, peripheral, pd_mask);
+
 	if (driver->md_session_mask != 0) {
 		if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
 			if (driver->md_session_map[peripheral])
 				mask_info =
 				driver->md_session_map[peripheral]->msg_mask;
-		} else if (driver->md_session_mask &
-				MD_PERIPHERAL_PD_MASK(peripheral)) {
+		} else if (driver->md_session_mask & pd_mask) {
 			upd = diag_mask_to_pd_value(driver->md_session_mask);
 			if (upd && driver->md_session_map[upd])
 				mask_info =
@@ -510,7 +510,7 @@
 	if (driver->supports_apps_hdlc_encoding)
 		DIAG_SET_FEATURE_MASK(F_DIAG_APPS_HDLC_ENCODE);
 	if (driver->supports_apps_header_untagging) {
-		if (peripheral == PERIPHERAL_MODEM) {
+		if (driver->feature[peripheral].untag_header) {
 			DIAG_SET_FEATURE_MASK(F_DIAG_PKT_HEADER_UNTAG);
 			driver->peripheral_untag[peripheral] =
 				ENABLE_PKT_HEADER_UNTAGGING;
@@ -692,18 +692,15 @@
 				 unsigned char *dest_buf, int dest_len,
 				 struct diag_md_session_t *info)
 {
-	int i;
-	int write_len = 0;
+	uint32_t mask_size = 0, offset = 0;
+	uint32_t *temp = NULL;
+	int write_len = 0, i = 0, found = 0, peripheral;
 	int header_len = sizeof(struct diag_msg_build_mask_t);
-	int found = 0;
-	uint32_t mask_size = 0;
-	uint32_t offset = 0;
 	struct diag_msg_mask_t *mask = NULL;
 	struct diag_msg_build_mask_t *req = NULL;
 	struct diag_msg_build_mask_t rsp;
 	struct diag_mask_info *mask_info = NULL;
 	struct diag_msg_mask_t *mask_next = NULL;
-	uint32_t *temp = NULL;
 
 	mask_info = (!info) ? &msg_mask : info->msg_mask;
 	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
@@ -799,11 +796,18 @@
 		mask_size = dest_len - write_len;
 	memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
 	write_len += mask_size;
-	for (i = 0; i < NUM_PERIPHERALS; i++) {
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (i == APPS_DATA)
+			continue;
 		if (!diag_check_update(i))
 			continue;
+		if (i > NUM_PERIPHERALS)
+			peripheral = diag_search_peripheral_by_pd(i);
+		else
+			peripheral = i;
 		mutex_lock(&driver->md_session_lock);
-		diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
+		diag_send_msg_mask_update(peripheral, req->ssid_first,
+			req->ssid_last);
 		mutex_unlock(&driver->md_session_lock);
 	}
 end:
@@ -814,8 +818,7 @@
 				     unsigned char *dest_buf, int dest_len,
 				     struct diag_md_session_t *info)
 {
-	int i;
-	int write_len = 0;
+	int i, write_len = 0, peripheral;
 	int header_len = sizeof(struct diag_msg_config_rsp_t);
 	struct diag_msg_config_rsp_t rsp;
 	struct diag_msg_config_rsp_t *req = NULL;
@@ -863,11 +866,17 @@
 	memcpy(dest_buf, &rsp, header_len);
 	write_len += header_len;
 
-	for (i = 0; i < NUM_PERIPHERALS; i++) {
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (i == APPS_DATA)
+			continue;
 		if (!diag_check_update(i))
 			continue;
+		if (i > NUM_PERIPHERALS)
+			peripheral = diag_search_peripheral_by_pd(i);
+		else
+			peripheral = i;
 		mutex_lock(&driver->md_session_lock);
-		diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
+		diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
 		mutex_unlock(&driver->md_session_lock);
 	}
 
@@ -914,9 +923,7 @@
 				      unsigned char *dest_buf, int dest_len,
 				      struct diag_md_session_t *info)
 {
-	int i;
-	int write_len = 0;
-	int mask_len = 0;
+	int i, write_len = 0, mask_len = 0, peripheral;
 	int header_len = sizeof(struct diag_event_mask_config_t);
 	struct diag_event_mask_config_t rsp;
 	struct diag_event_mask_config_t *req;
@@ -959,11 +966,17 @@
 	memcpy(dest_buf + write_len, mask_info->ptr, mask_len);
 	write_len += mask_len;
 
-	for (i = 0; i < NUM_PERIPHERALS; i++) {
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (i == APPS_DATA)
+			continue;
 		if (!diag_check_update(i))
 			continue;
+		if (i > NUM_PERIPHERALS)
+			peripheral = diag_search_peripheral_by_pd(i);
+		else
+			peripheral = i;
 		mutex_lock(&driver->md_session_lock);
-		diag_send_event_mask_update(i);
+		diag_send_event_mask_update(peripheral);
 		mutex_unlock(&driver->md_session_lock);
 	}
 
@@ -974,8 +987,7 @@
 				  unsigned char *dest_buf, int dest_len,
 				  struct diag_md_session_t *info)
 {
-	int i;
-	int write_len = 0;
+	int write_len = 0, i, peripheral;
 	uint8_t toggle = 0;
 	struct diag_event_report_t header;
 	struct diag_mask_info *mask_info = NULL;
@@ -1008,11 +1020,17 @@
 	 */
 	header.cmd_code = DIAG_CMD_EVENT_TOGGLE;
 	header.padding = 0;
-	for (i = 0; i < NUM_PERIPHERALS; i++) {
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (i == APPS_DATA)
+			continue;
 		if (!diag_check_update(i))
 			continue;
+		if (i > NUM_PERIPHERALS)
+			peripheral = diag_search_peripheral_by_pd(i);
+		else
+			peripheral = i;
 		mutex_lock(&driver->md_session_lock);
-		diag_send_event_mask_update(i);
+		diag_send_event_mask_update(peripheral);
 		mutex_unlock(&driver->md_session_lock);
 	}
 	memcpy(dest_buf, &header, sizeof(header));
@@ -1149,19 +1167,17 @@
 				 unsigned char *dest_buf, int dest_len,
 				 struct diag_md_session_t *info)
 {
-	int i;
-	int write_len = 0;
+	int i, peripheral, write_len = 0;
 	int status = LOG_STATUS_SUCCESS;
-	int read_len = 0;
-	int payload_len = 0;
+	int read_len = 0, payload_len = 0;
 	int req_header_len = sizeof(struct diag_log_config_req_t);
 	int rsp_header_len = sizeof(struct diag_log_config_set_rsp_t);
 	uint32_t mask_size = 0;
 	struct diag_log_config_req_t *req;
 	struct diag_log_config_set_rsp_t rsp;
 	struct diag_log_mask_t *mask = NULL;
-	unsigned char *temp_buf = NULL;
 	struct diag_mask_info *mask_info = NULL;
+	unsigned char *temp_buf = NULL;
 
 	mask_info = (!info) ? &log_mask : info->log_mask;
 	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
@@ -1264,11 +1280,17 @@
 	memcpy(dest_buf + write_len, src_buf + read_len, payload_len);
 	write_len += payload_len;
 
-	for (i = 0; i < NUM_PERIPHERALS; i++) {
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (i == APPS_DATA)
+			continue;
 		if (!diag_check_update(i))
 			continue;
+		if (i > NUM_PERIPHERALS)
+			peripheral = diag_search_peripheral_by_pd(i);
+		else
+			peripheral = i;
 		mutex_lock(&driver->md_session_lock);
-		diag_send_log_mask_update(i, req->equip_id);
+		diag_send_log_mask_update(peripheral, req->equip_id);
 		mutex_unlock(&driver->md_session_lock);
 	}
 end:
@@ -1282,8 +1304,7 @@
 	struct diag_mask_info *mask_info = NULL;
 	struct diag_log_mask_t *mask = NULL;
 	struct diag_log_config_rsp_t header;
-	int write_len = 0;
-	int i;
+	int write_len = 0, i, peripheral;
 
 	mask_info = (!info) ? &log_mask : info->log_mask;
 	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
@@ -1317,11 +1338,17 @@
 	header.status = LOG_STATUS_SUCCESS;
 	memcpy(dest_buf, &header, sizeof(struct diag_log_config_rsp_t));
 	write_len += sizeof(struct diag_log_config_rsp_t);
-	for (i = 0; i < NUM_PERIPHERALS; i++) {
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (i == APPS_DATA)
+			continue;
 		if (!diag_check_update(i))
 			continue;
+		if (i > NUM_PERIPHERALS)
+			peripheral = diag_search_peripheral_by_pd(i);
+		else
+			peripheral = i;
 		mutex_lock(&driver->md_session_lock);
-		diag_send_log_mask_update(i, ALL_EQUIP_ID);
+		diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
 		mutex_unlock(&driver->md_session_lock);
 	}
 
@@ -1355,8 +1382,7 @@
 
 static int diag_create_msg_mask_table(void)
 {
-	int i;
-	int err = 0;
+	int i, err = 0;
 	struct diag_msg_mask_t *mask = (struct diag_msg_mask_t *)msg_mask.ptr;
 	struct diag_ssid_range_t range;
 
@@ -1377,8 +1403,7 @@
 
 static int diag_create_build_time_mask(void)
 {
-	int i;
-	int err = 0;
+	int i, err = 0;
 	const uint32_t *tbl = NULL;
 	uint32_t tbl_size = 0;
 	struct diag_msg_mask_t *build_mask = NULL;
@@ -1574,8 +1599,7 @@
 
 int diag_log_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
 {
-	int i;
-	int err = 0;
+	int i, err = 0;
 	struct diag_log_mask_t *src_mask = NULL;
 	struct diag_log_mask_t *dest_mask = NULL;
 
@@ -1635,8 +1659,7 @@
 
 static int diag_msg_mask_init(void)
 {
-	int err = 0;
-	int i;
+	int err = 0, i;
 
 	err = __diag_mask_init(&msg_mask, MSG_MASK_SIZE, APPS_BUF_SIZE);
 	if (err)
@@ -1657,12 +1680,10 @@
 
 int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
 {
-	int i;
-	int err = 0;
+	int i, err = 0, mask_size = 0;
 	struct diag_msg_mask_t *src_mask = NULL;
 	struct diag_msg_mask_t *dest_mask = NULL;
 	struct diag_ssid_range_t range;
-	int mask_size = 0;
 
 	if (!src || !dest)
 		return -EINVAL;
@@ -1767,8 +1788,7 @@
 
 static int diag_log_mask_init(void)
 {
-	int err = 0;
-	int i;
+	int err = 0, i;
 
 	err = __diag_mask_init(&log_mask, LOG_MASK_SIZE, APPS_BUF_SIZE);
 	if (err)
@@ -1801,8 +1821,7 @@
 
 static int diag_event_mask_init(void)
 {
-	int err = 0;
-	int i;
+	int err = 0, i;
 
 	err = __diag_mask_init(&event_mask, EVENT_MASK_SIZE, APPS_BUF_SIZE);
 	if (err)
@@ -1855,11 +1874,8 @@
 int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
 			       struct diag_md_session_t *info)
 {
-	int i;
-	int err = 0;
-	int len = 0;
-	int copy_len = 0;
-	int total_len = 0;
+	int i, err = 0, len = 0;
+	int copy_len = 0, total_len = 0;
 	struct diag_msg_mask_userspace_t header;
 	struct diag_mask_info *mask_info = NULL;
 	struct diag_msg_mask_t *mask = NULL;
@@ -1927,11 +1943,8 @@
 int diag_copy_to_user_log_mask(char __user *buf, size_t count,
 			       struct diag_md_session_t *info)
 {
-	int i;
-	int err = 0;
-	int len = 0;
-	int copy_len = 0;
-	int total_len = 0;
+	int i, err = 0, len = 0;
+	int copy_len = 0, total_len = 0;
 	struct diag_log_mask_userspace_t header;
 	struct diag_log_mask_t *mask = NULL;
 	struct diag_mask_info *mask_info = NULL;
@@ -2014,8 +2027,7 @@
 int diag_process_apps_masks(unsigned char *buf, int len,
 			    struct diag_md_session_t *info)
 {
-	int size = 0;
-	int sub_cmd = 0;
+	int size = 0, sub_cmd = 0;
 	int (*hdlr)(unsigned char *src_buf, int src_len,
 		    unsigned char *dest_buf, int dest_len,
 		    struct diag_md_session_t *info) = NULL;
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index dabb1f4..6377677 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -152,15 +152,20 @@
 		return -EIO;
 
 	ch = &diag_md[id];
+	if (!ch)
+		return -EINVAL;
 
 	spin_lock_irqsave(&ch->lock, flags);
 	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
 		if (ch->tbl[i].buf != buf)
 			continue;
 		found = 1;
-		pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, ctxt: %d len: %d at i: %d back to the table, proc: %d, mode: %d\n",
-				   buf, ctx, ch->tbl[i].len,
-				   i, id, driver->logging_mode);
+		pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, len: %d, back to the table for p: %d, t: %d, buf_num: %d, proc: %d, i: %d\n",
+				   buf, ch->tbl[i].len, GET_BUF_PERIPHERAL(ctx),
+				   GET_BUF_TYPE(ctx), GET_BUF_NUM(ctx), id, i);
+		ch->tbl[i].buf = NULL;
+		ch->tbl[i].len = 0;
+		ch->tbl[i].ctx = 0;
 	}
 	spin_unlock_irqrestore(&ch->lock, flags);
 
@@ -194,6 +199,7 @@
 
 		found = 1;
 		driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
 		pr_debug("diag: wake up logging process\n");
 		wake_up_interruptible(&driver->wait_q);
 	}
@@ -224,7 +230,7 @@
 		ch = &diag_md[i];
 		for (j = 0; j < ch->num_tbl_entries && !err; j++) {
 			entry = &ch->tbl[j];
-			if (entry->len <= 0)
+			if (entry->len <= 0 || entry->buf == NULL)
 				continue;
 
 			peripheral = diag_md_get_peripheral(entry->ctx);
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index c9ae689..74180e5 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -25,6 +25,8 @@
 #include <linux/atomic.h>
 #include "diagfwd_bridge.h"
 
+#define THRESHOLD_CLIENT_LIMIT	50
+
 /* Size of the USB buffers used for read and write*/
 #define USB_MAX_OUT_BUF 4096
 #define APPS_BUF_SIZE	4096
@@ -33,7 +35,7 @@
 
 #define DIAG_MAX_REQ_SIZE	(16 * 1024)
 #define DIAG_MAX_RSP_SIZE	(16 * 1024)
-#define APF_DIAG_PADDING	256
+#define APF_DIAG_PADDING	0
 /*
  * In the worst case, the HDLC buffer can be atmost twice the size of the
  * original packet. Add 3 bytes for 16 bit CRC (2 bytes) and a delimiter
@@ -69,12 +71,17 @@
 #define DIAG_CON_CDSP		(0x0040)	/* Bit mask for CDSP */
 
 #define DIAG_CON_UPD_WLAN		(0x1000) /*Bit mask for WLAN PD*/
+#define DIAG_CON_UPD_AUDIO		(0x2000) /*Bit mask for AUDIO PD*/
+#define DIAG_CON_UPD_SENSORS	(0x4000) /*Bit mask for SENSORS PD*/
+
 #define DIAG_CON_NONE		(0x0000)	/* Bit mask for No SS*/
 #define DIAG_CON_ALL		(DIAG_CON_APSS | DIAG_CON_MPSS \
 				| DIAG_CON_LPASS | DIAG_CON_WCNSS \
 				| DIAG_CON_SENSORS | DIAG_CON_WDSP \
 				| DIAG_CON_CDSP)
-#define DIAG_CON_UPD_ALL	(DIAG_CON_UPD_WLAN)
+#define DIAG_CON_UPD_ALL	(DIAG_CON_UPD_WLAN \
+				| DIAG_CON_UPD_AUDIO \
+				| DIAG_CON_UPD_SENSORS)
 
 #define DIAG_STM_MODEM	0x01
 #define DIAG_STM_LPASS	0x02
@@ -214,16 +221,23 @@
 #define APPS_DATA		(NUM_PERIPHERALS)
 
 #define UPD_WLAN		7
-#define NUM_UPD			1
-#define MAX_PERIPHERAL_UPD			1
+#define UPD_AUDIO		8
+#define UPD_SENSORS		9
+#define NUM_UPD			3
+
+#define MAX_PERIPHERAL_UPD			2
 /* Number of sessions possible in Memory Device Mode. +1 for Apps data */
 #define NUM_MD_SESSIONS		(NUM_PERIPHERALS \
 					+ NUM_UPD + 1)
 
 #define MD_PERIPHERAL_MASK(x)	(1 << x)
 
-#define MD_PERIPHERAL_PD_MASK(x)					\
-	((x == PERIPHERAL_MODEM) ? (1 << UPD_WLAN) : 0)\
+#define MD_PERIPHERAL_PD_MASK(x, peripheral, pd_mask)	\
+do {						\
+fwd_info = &peripheral_info[x][peripheral];	\
+for (i = 0; i <= fwd_info->num_pd - 2; i++)	\
+	pd_mask |= (1 << fwd_info->upd_diag_id[i].pd);\
+} while (0)
 
 /*
  * Number of stm processors includes all the peripherals and
@@ -306,6 +320,8 @@
 struct diag_id_tbl_t {
 	struct list_head link;
 	uint8_t diag_id;
+	uint8_t pd_val;
+	uint8_t peripheral;
 	char *process_name;
 } __packed;
 struct diag_id_t {
@@ -452,6 +468,10 @@
 	uint32_t peripheral_mask;
 	uint32_t pd_mask;
 	uint8_t mode_param;
+	uint8_t diag_id;
+	uint8_t pd_val;
+	uint8_t reserved;
+	int peripheral;
 } __packed;
 
 struct diag_md_session_t {
@@ -525,6 +545,7 @@
 	wait_queue_head_t wait_q;
 	struct diag_client_map *client_map;
 	int *data_ready;
+	atomic_t data_ready_notif[THRESHOLD_CLIENT_LIMIT];
 	int num_clients;
 	int polling_reg_flag;
 	int use_device_tree;
@@ -693,7 +714,10 @@
 int diag_mask_param(void);
 void diag_clear_masks(struct diag_md_session_t *info);
 uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask);
-
+int diag_query_pd(char *process_name);
+int diag_search_peripheral_by_pd(uint8_t pd_val);
+uint8_t diag_search_diagid_by_pd(uint8_t pd_val,
+	uint8_t *diag_id, int *peripheral);
 void diag_record_stats(int type, int flag);
 
 struct diag_md_session_t *diag_md_session_get_pid(int pid);
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 54e6486..354e676 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -136,7 +136,6 @@
 
 /* This is the max number of user-space clients supported at initialization*/
 static unsigned int max_clients = 15;
-static unsigned int threshold_client_limit = 50;
 module_param(max_clients, uint, 0000);
 
 /* Timer variables */
@@ -324,7 +323,7 @@
 		if (i < driver->num_clients) {
 			diag_add_client(i, file);
 		} else {
-			if (i < threshold_client_limit) {
+			if (i < THRESHOLD_CLIENT_LIMIT) {
 				driver->num_clients++;
 				temp = krealloc(driver->client_map
 					, (driver->num_clients) * sizeof(struct
@@ -354,11 +353,17 @@
 			}
 		}
 		driver->data_ready[i] = 0x0;
+		atomic_set(&driver->data_ready_notif[i], 0);
 		driver->data_ready[i] |= MSG_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
 		driver->data_ready[i] |= EVENT_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
 		driver->data_ready[i] |= LOG_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
 		driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
 		driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
 
 		if (driver->ref_count == 0)
 			diag_mempool_init();
@@ -395,24 +400,22 @@
 		ret |= DIAG_CON_CDSP;
 	if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN))
 		ret |= DIAG_CON_UPD_WLAN;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_AUDIO))
+		ret |= DIAG_CON_UPD_AUDIO;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_SENSORS))
+		ret |= DIAG_CON_UPD_SENSORS;
 	return ret;
 }
 
 uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask)
 {
 	uint8_t upd = 0;
-	uint32_t pd_mask = 0;
 
-	pd_mask = diag_translate_kernel_to_user_mask(peripheral_mask);
-	switch (pd_mask) {
-	case DIAG_CON_UPD_WLAN:
-		upd = UPD_WLAN;
-		break;
-	default:
-		DIAG_LOG(DIAG_DEBUG_MASKS,
-		"asking for mask update with no pd mask set\n");
+	for (upd = UPD_WLAN; upd < NUM_MD_SESSIONS; upd++) {
+		if (peripheral_mask & (1 << upd))
+			return upd;
 	}
-	return upd;
+	return 0;
 }
 
 int diag_mask_param(void)
@@ -1619,18 +1622,19 @@
 		ret |= (1 << PERIPHERAL_CDSP);
 	if (peripheral_mask & DIAG_CON_UPD_WLAN)
 		ret |= (1 << UPD_WLAN);
-
+	if (peripheral_mask & DIAG_CON_UPD_AUDIO)
+		ret |= (1 << UPD_AUDIO);
+	if (peripheral_mask & DIAG_CON_UPD_SENSORS)
+		ret |= (1 << UPD_SENSORS);
 	return ret;
 }
 
 static int diag_switch_logging(struct diag_logging_mode_param_t *param)
 {
 	int new_mode, i = 0;
-	int curr_mode;
-	int err = 0;
-	uint8_t do_switch = 1;
-	uint32_t peripheral_mask = 0;
-	uint8_t peripheral, upd;
+	int curr_mode, err = 0;
+	uint8_t do_switch = 1, peripheral = 0;
+	uint32_t peripheral_mask = 0, pd_mask = 0;
 
 	if (!param)
 		return -EINVAL;
@@ -1642,30 +1646,42 @@
 	}
 
 	if (param->pd_mask) {
-		switch (param->pd_mask) {
-		case DIAG_CON_UPD_WLAN:
-			peripheral = PERIPHERAL_MODEM;
-			upd = UPD_WLAN;
-			break;
-		default:
+		pd_mask = diag_translate_mask(param->pd_mask);
+		for (i = UPD_WLAN; i < NUM_MD_SESSIONS; i++) {
+			if (pd_mask & (1 << i)) {
+				if (diag_search_diagid_by_pd(i, &param->diag_id,
+					&param->peripheral)) {
+					param->pd_val = i;
+					break;
+				}
+			}
+		}
+		if (!param->diag_id) {
 			DIAG_LOG(DIAG_DEBUG_USERSPACE,
-			"asking for mode switch with no pd mask set\n");
+			"diag_id support is not present for the pd mask = %d\n",
+			param->pd_mask);
 			return -EINVAL;
 		}
 
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			"diag: pd_mask = %d, diag_id = %d, peripheral = %d, pd_val = %d\n",
+			param->pd_mask, param->diag_id,
+			param->peripheral, param->pd_val);
+
+		peripheral = param->peripheral;
 		if (driver->md_session_map[peripheral] &&
 			(MD_PERIPHERAL_MASK(peripheral) &
 			diag_mux->mux_mask)) {
 			DIAG_LOG(DIAG_DEBUG_USERSPACE,
 			"diag_fr: User PD is already logging onto active peripheral logging\n");
-			i = upd - UPD_WLAN;
+			i = param->pd_val - UPD_WLAN;
 			driver->pd_session_clear[i] = 0;
 			return -EINVAL;
 		}
 		peripheral_mask =
 			diag_translate_mask(param->pd_mask);
 		param->peripheral_mask = peripheral_mask;
-		i = upd - UPD_WLAN;
+		i = param->pd_val - UPD_WLAN;
 		if (!driver->pd_session_clear[i]) {
 			driver->pd_logging_mode[i] = 1;
 			driver->num_pd_session += 1;
@@ -1834,6 +1850,7 @@
 	}
 
 	driver->data_ready[i] |= DEINIT_TYPE;
+	atomic_inc(&driver->data_ready_notif[i]);
 	mutex_unlock(&driver->diagchar_mutex);
 	wake_up_interruptible(&driver->wait_q);
 
@@ -2020,11 +2037,126 @@
 	return 0;
 }
 
+/*
+ * diag_search_peripheral_by_pd(uint8_t pd_val)
+ *
+ * Function will return peripheral by searching pd in the
+ * diag_id table.
+ *
+ */
+
+int diag_search_peripheral_by_pd(uint8_t pd_val)
+{
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_id_tbl_t *item = NULL;
+
+	mutex_lock(&driver->diag_id_mutex);
+	list_for_each_safe(start, temp, &driver->diag_id_list) {
+		item = list_entry(start, struct diag_id_tbl_t, link);
+		if (pd_val == item->pd_val) {
+			mutex_unlock(&driver->diag_id_mutex);
+			return item->peripheral;
+		}
+	}
+	mutex_unlock(&driver->diag_id_mutex);
+	return -EINVAL;
+}
+
+/*
+ * diag_search_diagid_by_pd(uint8_t pd_val,
+ *	uint8_t *diag_id, int *peripheral)
+ *
+ * Function will update the peripheral and diag_id
+ * from the pd passed as an argument.
+ *
+ */
+
+uint8_t diag_search_diagid_by_pd(uint8_t pd_val,
+	uint8_t *diag_id, int *peripheral)
+{
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_id_tbl_t *item = NULL;
+
+	mutex_lock(&driver->diag_id_mutex);
+	list_for_each_safe(start, temp, &driver->diag_id_list) {
+		item = list_entry(start, struct diag_id_tbl_t, link);
+		if (pd_val == item->pd_val) {
+			*peripheral = item->peripheral;
+			*diag_id = item->diag_id;
+			mutex_unlock(&driver->diag_id_mutex);
+			return 1;
+		}
+	}
+	mutex_unlock(&driver->diag_id_mutex);
+	return 0;
+}
+
+/*
+ * diag_query_pd_name(char *process_name, char *search_str)
+ *
+ * The function searches the pd string in the control packet string
+ * from the peripheral
+ *
+ */
+
+static int diag_query_pd_name(char *process_name, char *search_str)
+{
+	if (!process_name)
+		return -EINVAL;
+
+	if (strnstr(process_name, search_str, strlen(process_name)))
+		return 1;
+
+	return 0;
+}
+
+/*
+ * diag_query_pd_name(char *process_name, char *search_str)
+ *
+ * The function returns the PD information based on the presence of
+ * the pd specific string in the control packet's string from peripheral.
+ *
+ */
+
+int diag_query_pd(char *process_name)
+{
+	if (!process_name)
+		return -EINVAL;
+
+	if (diag_query_pd_name(process_name, "modem/root_pd"))
+		return PERIPHERAL_MODEM;
+	if (diag_query_pd_name(process_name, "adsp/root_pd"))
+		return PERIPHERAL_LPASS;
+	if (diag_query_pd_name(process_name, "slpi/root_pd"))
+		return PERIPHERAL_SENSORS;
+	if (diag_query_pd_name(process_name, "cdsp/root_pd"))
+		return PERIPHERAL_CDSP;
+	if (diag_query_pd_name(process_name, "wlan_pd"))
+		return UPD_WLAN;
+	if (diag_query_pd_name(process_name, "audio_pd"))
+		return UPD_AUDIO;
+	if (diag_query_pd_name(process_name, "sensor_pd"))
+		return UPD_SENSORS;
+
+	return -EINVAL;
+}
+
+/*
+ * diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
+ *
+ * IOCTL handler based on the parameter received will check on which peripheral
+ * the PD is present and validate if the peripheral supports the diag_id and
+ * tagging feature.
+ *
+ */
+
 static int diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
 {
-	int ret = -EINVAL;
-	int peripheral;
-	char *p_str = NULL;
+	int ret = -EINVAL, i = 0;
+	int peripheral = -EINVAL;
+	uint32_t pd_mask = 0;
 
 	if (!param)
 		return -EINVAL;
@@ -2035,17 +2167,21 @@
 		return -EINVAL;
 	}
 
-	switch (param->pd_mask) {
-	case DIAG_CON_UPD_WLAN:
-		peripheral = PERIPHERAL_MODEM;
-		p_str = "MODEM";
-		break;
-	default:
-		DIAG_LOG(DIAG_DEBUG_USERSPACE,
-		"Invalid pd mask, returning EINVAL\n");
-		return -EINVAL;
+	if (param->pd_mask) {
+		pd_mask = diag_translate_mask(param->pd_mask);
+		for (i = UPD_WLAN; i < NUM_MD_SESSIONS; i++) {
+			if (pd_mask & (1 << i)) {
+				peripheral = diag_search_peripheral_by_pd(i);
+				break;
+			}
+		}
+		if (peripheral < 0) {
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			"diag_id support is not present for the pd mask = %d\n",
+			param->pd_mask);
+			return -EINVAL;
+		}
 	}
-
 	mutex_lock(&driver->diag_cntl_mutex);
 	DIAG_LOG(DIAG_DEBUG_USERSPACE,
 	"diag: %s: Untagging support on APPS is %s\n", __func__,
@@ -2053,8 +2189,8 @@
 	"present" : "absent"));
 
 	DIAG_LOG(DIAG_DEBUG_USERSPACE,
-	"diag: %s: Tagging support on %s is %s\n",
-	__func__, p_str,
+	"diag: %s: Tagging support on peripheral = %d is %s\n",
+	__func__, peripheral,
 	(driver->feature[peripheral].untag_header ?
 	"present" : "absent"));
 
@@ -2966,16 +3102,6 @@
 	return 0;
 }
 
-static int check_data_ready(int index)
-{
-	int data_type = 0;
-
-	mutex_lock(&driver->diagchar_mutex);
-	data_type = driver->data_ready[index];
-	mutex_unlock(&driver->diagchar_mutex);
-	return data_type;
-}
-
 static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 			  loff_t *ppos)
 {
@@ -3002,7 +3128,8 @@
 		pr_err("diag: bad address from user side\n");
 		return -EFAULT;
 	}
-	wait_event_interruptible(driver->wait_q, (check_data_ready(index)) > 0);
+	wait_event_interruptible(driver->wait_q,
+			atomic_read(&driver->data_ready_notif[index]) > 0);
 
 	mutex_lock(&driver->diagchar_mutex);
 
@@ -3013,6 +3140,7 @@
 		/*Copy the type of data being passed*/
 		data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
 		driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
 		if (ret == -EFAULT)
 			goto exit;
@@ -3029,11 +3157,13 @@
 		 * memory device any more, the condition needs to be cleared.
 		 */
 		driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 	}
 
 	if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
 		data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
 		driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
 		if (ret == -EFAULT)
 			goto exit;
@@ -3058,6 +3188,7 @@
 		if (ret == -EFAULT)
 			goto exit;
 		driver->data_ready[index] ^= DEINIT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		mutex_unlock(&driver->diagchar_mutex);
 		diag_remove_client_entry(file);
 		return ret;
@@ -3075,6 +3206,7 @@
 		if (write_len > 0)
 			ret += write_len;
 		driver->data_ready[index] ^= MSG_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		goto exit;
 	}
 
@@ -3101,6 +3233,7 @@
 				goto exit;
 		}
 		driver->data_ready[index] ^= EVENT_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		goto exit;
 	}
 
@@ -3117,6 +3250,7 @@
 		if (write_len > 0)
 			ret += write_len;
 		driver->data_ready[index] ^= LOG_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		goto exit;
 	}
 
@@ -3133,6 +3267,7 @@
 		if (ret == -EFAULT)
 			goto exit;
 		driver->data_ready[index] ^= PKT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		driver->in_busy_pktdata = 0;
 		goto exit;
 	}
@@ -3150,6 +3285,7 @@
 			goto exit;
 
 		driver->data_ready[index] ^= DCI_PKT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		driver->in_busy_dcipktdata = 0;
 		goto exit;
 	}
@@ -3171,6 +3307,7 @@
 			goto exit;
 
 		driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		goto exit;
 	}
 
@@ -3190,6 +3327,7 @@
 		if (ret == -EFAULT)
 			goto exit;
 		driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		goto exit;
 	}
 
@@ -3221,6 +3359,7 @@
 			exit_stat = diag_copy_dci(buf, count, entry, &ret);
 			mutex_lock(&driver->diagchar_mutex);
 			driver->data_ready[index] ^= DCI_DATA_TYPE;
+			atomic_dec(&driver->data_ready_notif[index]);
 			mutex_unlock(&driver->diagchar_mutex);
 			if (exit_stat == 1) {
 				mutex_unlock(&driver->dci_mutex);
@@ -3742,7 +3881,7 @@
 		goto fail;
 	mutex_init(&driver->diag_id_mutex);
 	INIT_LIST_HEAD(&driver->diag_id_list);
-	diag_add_diag_id_to_list(DIAG_ID_APPS, "APPS");
+	diag_add_diag_id_to_list(DIAG_ID_APPS, "APPS", APPS_DATA, APPS_DATA);
 	pr_debug("diagchar initialized now");
 	ret = diagfwd_bridge_init();
 	if (ret)
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index fc67c1a..34624ad 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -226,6 +226,7 @@
 			 * situation.
 			 */
 			driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+			atomic_inc(&driver->data_ready_notif[i]);
 			pr_debug("diag: Force wakeup of logging process\n");
 			wake_up_interruptible(&driver->wait_q);
 			break;
@@ -491,8 +492,10 @@
 
 	mutex_lock(&driver->diagchar_mutex);
 	for (i = 0; i < driver->num_clients; i++)
-		if (driver->client_map[i].pid != 0)
+		if (driver->client_map[i].pid != 0) {
 			driver->data_ready[i] |= type;
+			atomic_inc(&driver->data_ready_notif[i]);
+		}
 	wake_up_interruptible(&driver->wait_q);
 	mutex_unlock(&driver->diagchar_mutex);
 }
@@ -509,6 +512,8 @@
 					driver->client_map[j].pid ==
 					driver->md_session_map[i]->pid) {
 					driver->data_ready[j] |= type;
+					atomic_inc(
+						&driver->data_ready_notif[j]);
 					break;
 				}
 			}
@@ -524,6 +529,7 @@
 	for (i = 0; i < driver->num_clients; i++)
 		if (driver->client_map[i].pid == process_id) {
 			driver->data_ready[i] |= data_type;
+			atomic_inc(&driver->data_ready_notif[i]);
 			break;
 		}
 	wake_up_interruptible(&driver->wait_q);
@@ -993,6 +999,8 @@
 	struct diag_cmd_reg_entry_t entry;
 	struct diag_cmd_reg_entry_t *temp_entry = NULL;
 	struct diag_cmd_reg_t *reg_item = NULL;
+	struct diagfwd_info *fwd_info = NULL;
+	uint32_t pd_mask = 0;
 
 	if (!buf)
 		return -EIO;
@@ -1030,12 +1038,13 @@
 	temp_entry = diag_cmd_search(&entry, ALL_PROC);
 	if (temp_entry) {
 		reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
-								entry);
+					entry);
 		if (info) {
+			MD_PERIPHERAL_PD_MASK(TYPE_CMD, reg_item->proc,
+				pd_mask);
 			if ((MD_PERIPHERAL_MASK(reg_item->proc) &
 				info->peripheral_mask) ||
-				(MD_PERIPHERAL_PD_MASK(reg_item->proc) &
-				info->peripheral_mask))
+				(pd_mask & info->peripheral_mask))
 				write_len = diag_send_data(reg_item, buf, len);
 		} else {
 			if (MD_PERIPHERAL_MASK(reg_item->proc) &
@@ -1657,6 +1666,9 @@
 	switch (type) {
 	case TYPE_DATA:
 		if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n",
+				peripheral, type, num);
 			diagfwd_write_done(peripheral, type, num);
 			diag_ws_on_copy(DIAG_WS_MUX);
 		} else if (peripheral == APPS_DATA) {
@@ -1671,6 +1683,9 @@
 	case TYPE_CMD:
 		if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
 			diagfwd_write_done(peripheral, type, num);
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n",
+				peripheral, type, num);
 		}
 		if (peripheral == APPS_DATA ||
 				ctxt == DIAG_MEMORY_DEVICE_MODE) {
@@ -1787,6 +1802,9 @@
 	}
 	kmemleak_not_leak(driver->data_ready);
 
+	for (i = 0; i < THRESHOLD_CLIENT_LIMIT; i++)
+		atomic_set(&driver->data_ready_notif[i], 0);
+
 	if (driver->apps_req_buf == NULL) {
 		driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
 		if (!driver->apps_req_buf)
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index d8c107e..26661e6 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -694,7 +694,8 @@
 	}
 }
 
-int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name)
+int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name,
+	uint8_t pd_val, uint8_t peripheral)
 {
 	struct diag_id_tbl_t *new_item = NULL;
 
@@ -713,6 +714,8 @@
 	}
 	kmemleak_not_leak(new_item->process_name);
 	new_item->diag_id = diag_id;
+	new_item->pd_val = pd_val;
+	new_item->peripheral = peripheral;
 	strlcpy(new_item->process_name, process_name, strlen(process_name) + 1);
 	INIT_LIST_HEAD(&new_item->link);
 	mutex_lock(&driver->diag_id_mutex);
@@ -747,54 +750,58 @@
 {
 	struct diag_ctrl_diagid *header = NULL;
 	struct diag_ctrl_diagid ctrl_pkt;
-	struct diagfwd_info *fwd_info_data = NULL;
-	struct diagfwd_info *fwd_info_cmd = NULL;
+	struct diagfwd_info *fwd_info = NULL;
 	char *process_name = NULL;
 	int err = 0;
+	int pd_val;
 	char *root_str = NULL;
 	uint8_t local_diag_id = 0;
-	uint8_t new_request = 0, i = 0;
+	uint8_t new_request = 0, i = 0, ch_type = 0;
 
 	if (!buf || len == 0 || peripheral >= NUM_PERIPHERALS)
 		return;
 
-	fwd_info_data = &peripheral_info[TYPE_DATA][peripheral];
-	if (!fwd_info_data)
-		return;
-
-	fwd_info_cmd = &peripheral_info[TYPE_CMD][peripheral];
-	if (!fwd_info_cmd)
-		return;
-
 	header = (struct diag_ctrl_diagid *)buf;
 	process_name = (char *)&header->process_name;
 	if (diag_query_diag_id(process_name, &local_diag_id))
 		ctrl_pkt.diag_id = local_diag_id;
 	else {
 		diag_id++;
-		diag_add_diag_id_to_list(diag_id, process_name);
-		ctrl_pkt.diag_id = diag_id;
 		new_request = 1;
+		pd_val = diag_query_pd(process_name);
+		if (pd_val < 0)
+			return;
+		diag_add_diag_id_to_list(diag_id, process_name,
+			pd_val, peripheral);
+		ctrl_pkt.diag_id = diag_id;
 	}
 	root_str = strnstr(process_name, DIAG_ID_ROOT_STRING,
 		strlen(process_name));
 
 	if (new_request) {
-		fwd_info_data->num_pd++;
-		fwd_info_cmd->num_pd++;
-		if (root_str) {
-			fwd_info_cmd->diagid_root = ctrl_pkt.diag_id;
-			fwd_info_data->diagid_root = ctrl_pkt.diag_id;
-		} else {
-			i = fwd_info_cmd->num_pd - 2;
-			if (i >= 0 && i < MAX_PERIPHERAL_UPD)
-				fwd_info_cmd->diagid_user[i] =
-				ctrl_pkt.diag_id;
+		for (ch_type = 0; ch_type < NUM_TYPES; ch_type++) {
+			if (ch_type == TYPE_DCI ||
+				ch_type == TYPE_DCI_CMD)
+				continue;
+			fwd_info = &peripheral_info[ch_type][peripheral];
+			fwd_info->num_pd++;
 
-			i = fwd_info_data->num_pd - 2;
-			if (i >= 0 && i < MAX_PERIPHERAL_UPD)
-				fwd_info_data->diagid_user[i] =
-				ctrl_pkt.diag_id;
+			if (root_str) {
+				fwd_info->root_diag_id.diagid_val =
+					ctrl_pkt.diag_id;
+				fwd_info->root_diag_id.reg_str =
+					process_name;
+				fwd_info->root_diag_id.pd = pd_val;
+			} else {
+				i = fwd_info->num_pd - 2;
+				if (i >= 0 && i < MAX_PERIPHERAL_UPD) {
+					fwd_info->upd_diag_id[i].diagid_val =
+						ctrl_pkt.diag_id;
+					fwd_info->upd_diag_id[i].reg_str =
+						process_name;
+					fwd_info->upd_diag_id[i].pd = pd_val;
+				}
+			}
 		}
 	}
 
@@ -827,7 +834,8 @@
 			driver->diag_id_sent[peripheral] = 1;
 			diag_send_updates_peripheral(peripheral);
 		}
-		diagfwd_buffers_init(fwd_info_data);
+		fwd_info = &peripheral_info[TYPE_DATA][peripheral];
+		diagfwd_buffers_init(fwd_info);
 		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 		"diag: diag_id sent = %d to peripheral = %d with diag_id = %d for %s :\n",
 			driver->diag_id_sent[peripheral], peripheral,
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index 8b22d7e..1d8d167 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -275,7 +275,8 @@
 } __packed;
 
 int diagfwd_cntl_init(void);
-int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name);
+int diag_add_diag_id_to_list(uint8_t diag_id,
+	char *process_name, uint8_t pd_val, uint8_t peripheral);
 void diagfwd_cntl_channel_init(void);
 void diagfwd_cntl_exit(void);
 void diag_cntl_channel_open(struct diagfwd_info *p_info);
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 4d4b660..4d6ae23 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -30,6 +30,7 @@
 #include "diag_mux.h"
 #include "diag_ipc_logging.h"
 #include "diagfwd_glink.h"
+#include "diag_memorydevice.h"
 
 struct data_header {
 	uint8_t control_char;
@@ -187,8 +188,10 @@
 
 static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
 {
+	int i, ctx = 0;
 	uint32_t max_size = 0;
 	unsigned char *temp_buf = NULL;
+	struct diag_md_info *ch = NULL;
 
 	if (!buf || len == 0)
 		return -EINVAL;
@@ -202,11 +205,31 @@
 		}
 
 		if (buf->len < max_size) {
+			if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE) {
+				ch = &diag_md[DIAG_LOCAL_PROC];
+				for (i = 0; ch != NULL &&
+						i < ch->num_tbl_entries; i++) {
+					if (ch->tbl[i].buf == buf->data) {
+						ctx = ch->tbl[i].ctx;
+						ch->tbl[i].buf = NULL;
+						ch->tbl[i].len = 0;
+						ch->tbl[i].ctx = 0;
+						DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+						"Flushed mdlog table entries before reallocating data buffer, p:%d, t:%d\n",
+						GET_BUF_PERIPHERAL(ctx),
+						GET_BUF_TYPE(ctx));
+						break;
+					}
+				}
+			}
 			temp_buf = krealloc(buf->data, max_size +
 						APF_DIAG_PADDING,
 					    GFP_KERNEL);
 			if (!temp_buf)
 				return -ENOMEM;
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Reallocated data buffer: %pK with size: %d\n",
+			temp_buf, max_size);
 			buf->data = temp_buf;
 			buf->len = max_size;
 		}
@@ -215,10 +238,19 @@
 	return buf->len;
 }
 
+/*
+ * diag_md_get_peripheral(int ctxt)
+ *
+ * Context(ctxt) contains peripheral, channel type, buffer num and diag_id
+ * The function decodes the ctxt, checks for the active user pd session
+ * using diag_id and returns peripheral if not active or the PD if active.
+ *
+ */
 int diag_md_get_peripheral(int ctxt)
 {
-	int pd = 0, i = 0;
+	uint8_t diag_id = 0, i = 0, pd = 0;
 	int type = 0, peripheral = -EINVAL;
+	int index = 0;
 	struct diagfwd_info *fwd_info = NULL;
 
 	peripheral = GET_BUF_PERIPHERAL(ctxt);
@@ -246,22 +278,22 @@
 	if (!fwd_info)
 		return -EINVAL;
 
-	pd = GET_PD_CTXT(ctxt);
+	diag_id = GET_PD_CTXT(ctxt);
 
-	if (driver->num_pd_session) {
-		if (pd == fwd_info->diagid_root) {
-			if (peripheral > NUM_PERIPHERALS)
+	if (driver->num_pd_session &&
+		driver->feature[peripheral].untag_header) {
+		if (diag_id == fwd_info->root_diag_id.diagid_val) {
+			if (peripheral != fwd_info->root_diag_id.pd)
 				peripheral = -EINVAL;
 		} else {
 			for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
-				if (pd == fwd_info->diagid_user[i]) {
-					switch (peripheral) {
-					case PERIPHERAL_MODEM:
-					if (driver->pd_logging_mode[0])
-						peripheral = UPD_WLAN;
-					break;
-					default:
-						peripheral = -EINVAL;
+				if (diag_id ==
+					fwd_info->upd_diag_id[i].diagid_val) {
+					pd = fwd_info->upd_diag_id[i].pd;
+					index = pd - UPD_WLAN;
+					if ((index >= 0 && index < NUM_UPD) &&
+					driver->pd_logging_mode[index]) {
+						peripheral = pd;
 						break;
 					}
 				}
@@ -377,12 +409,28 @@
 	mutex_unlock(&fwd_info->data_mutex);
 	mutex_unlock(&driver->hdlc_disable_mutex);
 	if (buf) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+			fwd_info->peripheral, fwd_info->type,
+			GET_BUF_NUM(buf->ctxt));
 		diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
 				   GET_BUF_NUM(buf->ctxt));
 	}
 	diagfwd_queue_read(fwd_info);
 }
 
+/*
+ * diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
+ *			   unsigned char *buf, int len)
+ *
+ * Data received from the peripheral can contain data from core and user PD
+ * The function segregates the data depending on the diag_id in the header
+ * of the packet chunk and copies to PD specific buffers.
+ * Sets the context for the buffers using diag_id and process it later for
+ * splitting the stream based on active PD logging.
+ *
+ */
+
 static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
 				   unsigned char *buf, int len)
 {
@@ -458,8 +506,10 @@
 				*(uint16_t *) (temp_buf_main + 2);
 			if (packet_len > PERIPHERAL_BUF_SZ)
 				goto end;
-			if ((*temp_buf_main) == fwd_info->diagid_root) {
-				ctxt_cpd = fwd_info->diagid_root;
+			if ((*temp_buf_main) ==
+				fwd_info->root_diag_id.diagid_val) {
+				ctxt_cpd =
+					fwd_info->root_diag_id.diagid_val;
 				len_cpd += packet_len;
 				if (temp_buf_cpd) {
 					memcpy(temp_buf_cpd,
@@ -467,46 +517,25 @@
 					temp_buf_cpd += packet_len;
 				}
 			} else {
-				for (i = 0; i <= (fwd_info->num_pd - 2); i++)
-					if ((*temp_buf_main) ==
-						fwd_info->diagid_user[i])
-						break;
-				ctxt_upd[i] = fwd_info->diagid_user[i];
-				if (temp_buf_upd[i]) {
-					memcpy(temp_buf_upd[i],
-					(temp_buf_main + 4), packet_len);
-					temp_buf_upd[i] += packet_len;
+				for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
+				if ((*temp_buf_main) ==
+					fwd_info->upd_diag_id[i].diagid_val) {
+					ctxt_upd[i] =
+					fwd_info->upd_diag_id[i].diagid_val;
+					if (temp_buf_upd[i]) {
+						memcpy(temp_buf_upd[i],
+							(temp_buf_main + 4),
+							packet_len);
+						temp_buf_upd[i] += packet_len;
+					}
+					len_upd[i] += packet_len;
+					}
 				}
-				len_upd[i] += packet_len;
 			}
 			len = len - 4;
 			temp_buf_main += (packet_len + 4);
 			processed += packet_len;
 		}
-		for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
-			if (fwd_info->type == TYPE_DATA && len_upd[i]) {
-				if (flag_buf_1) {
-					fwd_info->upd_len[i][0] = len_upd[i];
-					temp_fwdinfo_upd =
-						fwd_info->buf_upd[i][0];
-				} else {
-					fwd_info->upd_len[i][1] = len_upd[i];
-					temp_fwdinfo_upd =
-						fwd_info->buf_upd[i][1];
-				}
-				temp_fwdinfo_upd->ctxt &= 0x00FFFFFF;
-				temp_fwdinfo_upd->ctxt |=
-					(SET_PD_CTXT(ctxt_upd[i]));
-				atomic_set(&temp_fwdinfo_upd->in_busy, 1);
-				diagfwd_data_process_done(fwd_info,
-					temp_fwdinfo_upd, len_upd[i]);
-			} else {
-				if (flag_buf_1)
-					fwd_info->upd_len[i][0] = 0;
-				if (flag_buf_2)
-					fwd_info->upd_len[i][1] = 0;
-			}
-		}
 
 		if (flag_buf_1) {
 			fwd_info->cpd_len_1 = len_cpd;
@@ -520,14 +549,31 @@
 					fwd_info->upd_len[i][1] = len_upd[i];
 		}
 
+		for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
+			if (fwd_info->type == TYPE_DATA && len_upd[i]) {
+				if (flag_buf_1)
+					temp_fwdinfo_upd =
+						fwd_info->buf_upd[i][0];
+				else
+					temp_fwdinfo_upd =
+						fwd_info->buf_upd[i][1];
+				temp_fwdinfo_upd->ctxt &= 0x00FFFFFF;
+				temp_fwdinfo_upd->ctxt |=
+					(SET_PD_CTXT(ctxt_upd[i]));
+				atomic_set(&temp_fwdinfo_upd->in_busy, 1);
+				diagfwd_data_process_done(fwd_info,
+					temp_fwdinfo_upd, len_upd[i]);
+			} else {
+				if (flag_buf_1)
+					fwd_info->upd_len[i][0] = 0;
+				if (flag_buf_2)
+					fwd_info->upd_len[i][1] = 0;
+			}
+		}
+
 		if (len_cpd) {
-			if (flag_buf_1)
-				fwd_info->cpd_len_1 = len_cpd;
-			else
-				fwd_info->cpd_len_2 = len_cpd;
 			temp_fwdinfo_cpd->ctxt &= 0x00FFFFFF;
-			temp_fwdinfo_cpd->ctxt |=
-				(SET_PD_CTXT(ctxt_cpd));
+			temp_fwdinfo_cpd->ctxt |= (SET_PD_CTXT(ctxt_cpd));
 			diagfwd_data_process_done(fwd_info,
 				temp_fwdinfo_cpd, len_cpd);
 		} else {
@@ -543,6 +589,10 @@
 end:
 	diag_ws_release();
 	if (temp_fwdinfo_cpd) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+			fwd_info->peripheral, fwd_info->type,
+			GET_BUF_NUM(temp_fwdinfo_cpd->ctxt));
 		diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
 				   GET_BUF_NUM(temp_fwdinfo_cpd->ctxt));
 	}
@@ -663,6 +713,10 @@
 	mutex_unlock(&fwd_info->data_mutex);
 	mutex_unlock(&driver->hdlc_disable_mutex);
 	if (temp_buf) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+			fwd_info->peripheral, fwd_info->type,
+			GET_BUF_NUM(temp_buf->ctxt));
 		diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
 				   GET_BUF_NUM(temp_buf->ctxt));
 	}
@@ -742,6 +796,16 @@
 		else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf)
 			atomic_set(&fwd_info->buf_2->in_busy, 0);
 	}
+	if (fwd_info->buf_1 && !atomic_read(&(fwd_info->buf_1->in_busy))) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
+	}
+	if (fwd_info->buf_2 && !atomic_read(&(fwd_info->buf_2->in_busy))) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
+	}
 }
 
 int diagfwd_peripheral_init(void)
@@ -776,12 +840,13 @@
 			fwd_info->cpd_len_1 = 0;
 			fwd_info->cpd_len_2 = 0;
 			fwd_info->num_pd = 0;
+			fwd_info->root_diag_id.diagid_val = 0;
 			mutex_init(&fwd_info->buf_mutex);
 			mutex_init(&fwd_info->data_mutex);
 			spin_lock_init(&fwd_info->write_buf_lock);
 
 			for (i = 0; i < MAX_PERIPHERAL_UPD; i++) {
-				fwd_info->diagid_user[i] = 0;
+				fwd_info->upd_diag_id[i].diagid_val = 0;
 				fwd_info->upd_len[i][0] = 0;
 				fwd_info->upd_len[i][1] = 0;
 				fwd_info->buf_upd[i][0] = NULL;
@@ -803,12 +868,13 @@
 			fwd_info->num_pd = 0;
 			fwd_info->cpd_len_1 = 0;
 			fwd_info->cpd_len_2 = 0;
+			fwd_info->root_diag_id.diagid_val = 0;
 			spin_lock_init(&fwd_info->write_buf_lock);
 			mutex_init(&fwd_info->buf_mutex);
 			mutex_init(&fwd_info->data_mutex);
 
 			for (i = 0; i < MAX_PERIPHERAL_UPD; i++) {
-				fwd_info->diagid_user[i] = 0;
+				fwd_info->upd_diag_id[i].diagid_val = 0;
 				fwd_info->upd_len[i][0] = 0;
 				fwd_info->upd_len[i][1] = 0;
 				fwd_info->buf_upd[i][0] = NULL;
@@ -1086,7 +1152,7 @@
 
 	if (type == TYPE_CMD) {
 		if (driver->feature[peripheral].diag_id_support)
-			if (!fwd_info->diagid_root ||
+			if (!fwd_info->root_diag_id.diagid_val ||
 				(!driver->diag_id_sent[peripheral])) {
 			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 				 "diag: diag_id is not assigned yet\n");
@@ -1137,10 +1203,28 @@
 	if (!fwd_info->inited)
 		return;
 
-	if (fwd_info->buf_1)
-		atomic_set(&fwd_info->buf_1->in_busy, 0);
-	if (fwd_info->buf_2)
-		atomic_set(&fwd_info->buf_2->in_busy, 0);
+	/*
+	 * Logging mode here is reflecting previous mode
+	 * status and will be updated to new mode later.
+	 *
+	 * Keeping the buffers busy for Memory Device Mode.
+	 */
+
+	if ((driver->logging_mode != DIAG_USB_MODE) ||
+		driver->usb_connected) {
+		if (fwd_info->buf_1) {
+			atomic_set(&fwd_info->buf_1->in_busy, 0);
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
+		}
+		if (fwd_info->buf_2) {
+			atomic_set(&fwd_info->buf_2->in_busy, 0);
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
+		}
+	}
 
 	if (fwd_info->p_ops && fwd_info->p_ops->open)
 		fwd_info->p_ops->open(fwd_info->ctxt);
@@ -1265,10 +1349,18 @@
 	if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
 		fwd_info->c_ops->close(fwd_info);
 
-	if (fwd_info->buf_1 && fwd_info->buf_1->data)
+	if (fwd_info->buf_1 && fwd_info->buf_1->data) {
 		atomic_set(&fwd_info->buf_1->in_busy, 0);
-	if (fwd_info->buf_2 && fwd_info->buf_2->data)
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
+	}
+	if (fwd_info->buf_2 && fwd_info->buf_2->data) {
 		atomic_set(&fwd_info->buf_2->in_busy, 0);
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+				fwd_info->peripheral, fwd_info->type);
+	}
 
 	for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
 		if (fwd_info->buf_ptr[i])
@@ -1294,6 +1386,9 @@
 	 * in_busy flags. No need to queue read in this case.
 	 */
 	if (len == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"Read Length is 0, resetting the diag buffers p: %d, t: %d\n",
+			fwd_info->peripheral, fwd_info->type);
 		diagfwd_reset_buffers(fwd_info, buf);
 		diag_ws_release();
 		return 0;
@@ -1306,7 +1401,7 @@
 	return 0;
 }
 
-void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num)
 {
 	int i = 0, upd_valid_len = 0;
 	struct diagfwd_info *fwd_info = NULL;
@@ -1318,8 +1413,14 @@
 	if (!fwd_info)
 		return;
 
-	if (ctxt == 1 && fwd_info->buf_1) {
-		/* Buffer 1 for core PD is freed */
+	if (buf_num == 1 && fwd_info->buf_1) {
+		/*
+		 * Core PD buffer data is processed and
+		 * length in the buffer is marked zero.
+		 *
+		 * Check if the user PD buffer contains any
+		 * data before freeing core PD buffer.
+		 */
 		fwd_info->cpd_len_1 = 0;
 		for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
 			if (fwd_info->upd_len[i][0]) {
@@ -1327,10 +1428,24 @@
 				break;
 			}
 		}
-		if (!upd_valid_len)
+		/*
+		 * Do not free the core pd buffer if valid data
+		 * is present in any user PD buffer.
+		 */
+		if (!upd_valid_len) {
 			atomic_set(&fwd_info->buf_1->in_busy, 0);
-	} else if (ctxt == 2 && fwd_info->buf_2) {
-		/* Buffer 2 for core PD is freed */
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+				fwd_info->peripheral, fwd_info->type, buf_num);
+		}
+	} else if (buf_num == 2 && fwd_info->buf_2) {
+		/*
+		 * Core PD buffer data is processed and
+		 * length in the buffer is marked zero.
+		 *
+		 * Check if the user PD buffer contains any
+		 * data before freeing core PD buffer.
+		 */
 		fwd_info->cpd_len_2 = 0;
 		for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
 			if (fwd_info->upd_len[i][1]) {
@@ -1338,30 +1453,86 @@
 				break;
 			}
 		}
-		if (!upd_valid_len)
+		/*
+		 * Do not free the core pd buffer if valid data
+		 * is present in any user PD buffer
+		 */
+		if (!upd_valid_len) {
 			atomic_set(&fwd_info->buf_2->in_busy, 0);
-	} else if (ctxt >= 3 && (ctxt % 2)) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+				fwd_info->peripheral, fwd_info->type, buf_num);
+		}
+	} else if (buf_num >= 3 && (buf_num % 2)) {
+		/*
+		 * Go through each User PD buffer, validate the
+		 * request for freeing the buffer by validating
+		 * the buffer number.
+		 *
+		 */
 		for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
-			if (fwd_info->buf_upd[i][0]) {
+			if (fwd_info->buf_upd[i][0] &&
+				(buf_num == ((2 * i) + 3))) {
 				/* Buffer 1 for ith user PD is freed */
-			atomic_set(&fwd_info->buf_upd[i][0]->in_busy, 0);
-			fwd_info->upd_len[i][0] = 0;
+				atomic_set(&fwd_info->buf_upd[i][0]->in_busy,
+					0);
+				fwd_info->upd_len[i][0] = 0;
 			}
-		if (!fwd_info->cpd_len_1)
+			/*
+			 * Check if there is any data in user PD buffer other
+			 * than buffer requested for freeing.
+			 *
+			 */
+			if (fwd_info->upd_len[i][0])
+				upd_valid_len = 1;
+		}
+		/*
+		 * Mark the core pd buffer free if there is no
+		 * data present in core PD buffer and other User PD buffer.
+		 *
+		 */
+		if (!upd_valid_len && !fwd_info->cpd_len_1) {
 			atomic_set(&fwd_info->buf_1->in_busy, 0);
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+				fwd_info->peripheral, fwd_info->type, buf_num);
 		}
-	} else if (ctxt >= 4 && !(ctxt % 2)) {
+	} else if (buf_num >= 4 && !(buf_num % 2)) {
+		/*
+		 * Go through each User PD buffer, validate the
+		 * request for freeing the buffer by validating
+		 * the buffer number.
+		 *
+		 */
 		for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
-			if (fwd_info->buf_upd[i][1]) {
+			if (fwd_info->buf_upd[i][1] &&
+				(buf_num == ((2 * i) + 4))) {
 				/* Buffer 2 for ith user PD is freed */
-			atomic_set(&fwd_info->buf_upd[i][0]->in_busy, 0);
-			fwd_info->upd_len[i][1] = 0;
+				atomic_set(&fwd_info->buf_upd[i][1]->in_busy,
+					0);
+				fwd_info->upd_len[i][1] = 0;
 			}
-		if (!fwd_info->cpd_len_2)
-			atomic_set(&fwd_info->buf_2->in_busy, 0);
+			/*
+			 * Check if there is any data in user PD buffer other
+			 * than buffer requested for freeing.
+			 *
+			 */
+			if (fwd_info->upd_len[i][1])
+				upd_valid_len = 1;
 		}
+		/*
+		 * Mark the core pd buffer free if there is no
+		 * data present in core PD buffer and other User PD buffer.
+		 *
+		 */
+		if (!upd_valid_len && !fwd_info->cpd_len_2) {
+			atomic_set(&fwd_info->buf_2->in_busy, 0);
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+			fwd_info->peripheral, fwd_info->type, buf_num);
+			}
 	} else
-		pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt);
+		pr_err("diag: In %s, invalid buf_num %d\n", __func__, buf_num);
 
 	diagfwd_queue_read(fwd_info);
 }
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index b16670e..6ddce32 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -62,6 +62,12 @@
 	void (*queue_read)(void *ctxt);
 };
 
+struct diag_id_info {
+	uint8_t diagid_val;
+	uint8_t pd;
+	char *reg_str;
+};
+
 struct diagfwd_info {
 	uint8_t peripheral;
 	uint8_t type;
@@ -69,8 +75,6 @@
 	uint8_t inited;
 	uint8_t ch_open;
 	uint8_t num_pd;
-	uint8_t diagid_root;
-	uint8_t diagid_user[MAX_PERIPHERAL_UPD];
 	int cpd_len_1;
 	int cpd_len_2;
 	int upd_len[MAX_PERIPHERAL_UPD][2];
@@ -81,6 +85,8 @@
 	struct mutex buf_mutex;
 	struct mutex data_mutex;
 	void *ctxt;
+	struct diag_id_info root_diag_id;
+	struct diag_id_info upd_diag_id[MAX_PERIPHERAL_UPD];
 	struct diagfwd_buf_t *buf_1;
 	struct diagfwd_buf_t *buf_2;
 	struct diagfwd_buf_t *buf_upd[MAX_PERIPHERAL_UPD][2];
@@ -113,7 +119,7 @@
 void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt);
 
 int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len);
-void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt);
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num);
 void diagfwd_buffers_init(struct diagfwd_info *fwd_info);
 
 /*
diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c
index 7641a6a..d5dd8ae 100644
--- a/drivers/char/hw_random/msm_rng.c
+++ b/drivers/char/hw_random/msm_rng.c
@@ -138,8 +138,6 @@
 
 		/* read FIFO */
 		val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
-		if (!val)
-			break;	/* no data to read so just bail */
 
 		/* write data back to callers pointer */
 		*(retdata++) = val;
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 3b13c9b..d4f27d7 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -390,7 +390,7 @@
 	F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
 	F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
 	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
-	F(286670000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	F(286666667, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
 	F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
 	F(344000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
 	F(430000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index 55d14ff..53446a2 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -292,6 +292,7 @@
 	F(430000000, P_CRC_DIV,  1, 0, 0),
 	F(565000000, P_CRC_DIV,  1, 0, 0),
 	F(650000000, P_CRC_DIV,  1, 0, 0),
+	F(700000000, P_CRC_DIV,  1, 0, 0),
 	F(750000000, P_CRC_DIV,  1, 0, 0),
 	F(780000000, P_CRC_DIV,  1, 0, 0),
 	{ }
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 89ed5cd..dd02a8f 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -762,6 +762,10 @@
 		return -EINVAL;
 	}
 
+	/* Skip vco recalculation for continuous splash use case */
+	if (pll->handoff_resources == true)
+		return 0;
+
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
 		pr_err("failed to enable pll (%d) resource, rc=%d\n",
@@ -814,6 +818,19 @@
 	if (!vco->priv)
 		pr_err("vco priv is null\n");
 
+	/*
+	 * Calculate the vco rate from HW registers only for handoff cases.
+	 * For other cases where a vco_10nm_set_rate() has already been
+	 * called, just return the rate that was set earlier. This is due
+	 * to the fact that recalculating VCO rate requires us to read the
+	 * correct value of the pll_out_div divider clock, which is only set
+	 * afterwards.
+	 */
+	if (pll->vco_current_rate != 0) {
+		pr_debug("returning vco rate = %lld\n", pll->vco_current_rate);
+		return pll->vco_current_rate;
+	}
+
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
 		pr_err("failed to enable pll(%d) resource, rc=%d\n",
@@ -821,6 +838,9 @@
 		return 0;
 	}
 
+	if (!dsi_pll_10nm_lock_status(pll))
+		pll->handoff_resources = true;
+
 	dec = MDSS_PLL_REG_R(pll->pll_base, PLL_DECIMAL_DIV_START_1);
 	dec &= 0xFF;
 
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 9a7e37c..e1d7373 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -117,7 +117,8 @@
 	/* Turn off the clock (and clear the event) */
 	disable_timer(cs5535_event_clock);
 
-	if (clockevent_state_shutdown(&cs5535_clockevent))
+	if (clockevent_state_detached(&cs5535_clockevent) ||
+	    clockevent_state_shutdown(&cs5535_clockevent))
 		return IRQ_HANDLED;
 
 	/* Clear the counter */
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
index f968ffd9..d310380 100644
--- a/drivers/cpufreq/qcom-cpufreq.c
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -42,6 +42,8 @@
 };
 
 static DEFINE_PER_CPU(struct cpufreq_suspend_t, suspend_data);
+static DEFINE_PER_CPU(int, cached_resolve_idx);
+static DEFINE_PER_CPU(unsigned int, cached_resolve_freq);
 
 static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
 			unsigned int index)
@@ -74,6 +76,7 @@
 	int ret = 0;
 	int index;
 	struct cpufreq_frequency_table *table;
+	int first_cpu = cpumask_first(policy->related_cpus);
 
 	mutex_lock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
 
@@ -88,13 +91,11 @@
 	}
 
 	table = policy->freq_table;
-	if (!table) {
-		pr_err("cpufreq: Failed to get frequency table for CPU%u\n",
-		       policy->cpu);
-		ret = -ENODEV;
-		goto done;
-	}
-	index = cpufreq_frequency_table_target(policy, target_freq, relation);
+	if (per_cpu(cached_resolve_freq, first_cpu) == target_freq)
+		index = per_cpu(cached_resolve_idx, first_cpu);
+	else
+		index = cpufreq_frequency_table_target(policy, target_freq,
+						       relation);
 
 	pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
 		policy->cpu, target_freq, relation,
@@ -107,6 +108,23 @@
 	return ret;
 }
 
+static unsigned int msm_cpufreq_resolve_freq(struct cpufreq_policy *policy,
+					     unsigned int target_freq)
+{
+	int index;
+	int first_cpu = cpumask_first(policy->related_cpus);
+	unsigned int freq;
+
+	index = cpufreq_frequency_table_target(policy, target_freq,
+					       CPUFREQ_RELATION_L);
+	freq = policy->freq_table[index].frequency;
+
+	per_cpu(cached_resolve_idx, first_cpu) = index;
+	per_cpu(cached_resolve_freq, first_cpu) = freq;
+
+	return freq;
+}
+
 static int msm_cpufreq_verify(struct cpufreq_policy *policy)
 {
 	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
@@ -296,6 +314,7 @@
 	.init		= msm_cpufreq_init,
 	.verify		= msm_cpufreq_verify,
 	.target		= msm_cpufreq_target,
+	.resolve_freq	= msm_cpufreq_resolve_freq,
 	.get		= msm_cpufreq_get_freq,
 	.name		= "msm",
 	.attr		= msm_freq_attr,
@@ -462,6 +481,7 @@
 	for_each_possible_cpu(cpu) {
 		mutex_init(&(per_cpu(suspend_data, cpu).suspend_mutex));
 		per_cpu(suspend_data, cpu).device_suspended = 0;
+		per_cpu(cached_resolve_freq, cpu) = UINT_MAX;
 	}
 
 	rc = platform_driver_register(&msm_cpufreq_plat_driver);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 78ab946..a3e1de0 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -613,16 +613,36 @@
 
 #ifdef CONFIG_SMP
 
+static void wake_up_idle_cpus(void *v)
+{
+	int cpu;
+	struct cpumask cpus;
+
+	preempt_disable();
+	if (v) {
+		cpumask_andnot(&cpus, v, cpu_isolated_mask);
+		cpumask_and(&cpus, &cpus, cpu_online_mask);
+	} else
+		cpumask_andnot(&cpus, cpu_online_mask, cpu_isolated_mask);
+
+	for_each_cpu(cpu, &cpus) {
+		if (cpu == smp_processor_id())
+			continue;
+		wake_up_if_idle(cpu);
+	}
+	preempt_enable();
+}
+
 /*
  * This function gets called when a part of the kernel has a new latency
- * requirement.  This means we need to get all processors out of their C-state,
- * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
- * wakes them all right up.
+ * requirement.  This means we need to get only those processors out of their
+ * C-state for which qos requirement is changed, and then recalculate a new
+ * suitable C-state. Just do a cross-cpu IPI; that wakes them all right up.
  */
 static int cpuidle_latency_notify(struct notifier_block *b,
 		unsigned long l, void *v)
 {
-	wake_up_all_idle_cpus();
+	wake_up_idle_cpus(v);
 	return NOTIFY_OK;
 }
 
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 630cda2..fc1b4e4 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1356,7 +1356,8 @@
 	struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
 	bool success = false;
 	const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
-	int64_t start_time = ktime_to_ns(ktime_get()), end_time;
+	ktime_t start = ktime_get();
+	uint64_t start_time = ktime_to_ns(start), end_time;
 	struct power_params *pwr_params;
 
 	pwr_params = &cpu->levels[idx].pwr;
@@ -1381,9 +1382,7 @@
 	cluster_unprepare(cpu->parent, cpumask, idx, true, end_time);
 	cpu_unprepare(cpu, idx, true);
 	sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
-	end_time = ktime_to_ns(ktime_get()) - start_time;
-	do_div(end_time, 1000);
-	dev->last_residency = end_time;
+	dev->last_residency = ktime_us_delta(ktime_get(), start);
 	update_history(dev, idx);
 	trace_cpu_idle_exit(idx, success);
 	local_irq_enable();
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
index b3364b4..71416f7 100644
--- a/drivers/cpuidle/lpm-levels.h
+++ b/drivers/cpuidle/lpm-levels.h
@@ -125,7 +125,7 @@
 uint32_t *get_per_cpu_min_residency(int cpu);
 extern struct lpm_cluster *lpm_root_node;
 
-#if CONFIG_SMP
+#if defined(CONFIG_SMP)
 extern DEFINE_PER_CPU(bool, pending_ipi);
 static inline bool is_IPI_pending(const struct cpumask *mask)
 {
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 6fa91ae..182097c 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -25,26 +25,8 @@
 #include <soc/qcom/scm.h>
 #include <soc/qcom/qseecomi.h>
 #include "iceregs.h"
-
-#ifdef CONFIG_PFK
 #include <linux/pfk.h>
-#else
-#include <linux/bio.h>
-static inline int pfk_load_key_start(const struct bio *bio,
-	struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
-{
-	return 0;
-}
 
-static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
-{
-	return 0;
-}
-
-static inline void pfk_clear_on_reset(void)
-{
-}
-#endif
 
 #define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
 	((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
@@ -144,6 +126,9 @@
 		return -EPERM;
 	}
 
+	if (!setting)
+		return -EINVAL;
+
 	if ((short)(crypto_data->key_index) >= 0) {
 
 		memcpy(&setting->crypto_data, crypto_data,
@@ -1451,7 +1436,7 @@
 	int ret = 0;
 	bool is_pfe = false;
 
-	if (!pdev || !req || !setting) {
+	if (!pdev || !req) {
 		pr_err("%s: Invalid params passed\n", __func__);
 		return -EINVAL;
 	}
@@ -1470,6 +1455,7 @@
 		/* It is not an error to have a request with no  bio */
 		return 0;
 	}
+    //pr_err("%s bio is %pK\n", __func__, req->bio);
 
 	ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
 	if (is_pfe) {
@@ -1633,7 +1619,7 @@
 
 	list_for_each_entry(ice_dev, &ice_devices, list) {
 		if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
-			pr_info("%s: found ice device %p\n", __func__, ice_dev);
+			pr_debug("%s: ice device %pK\n", __func__, ice_dev);
 			return ice_dev;
 		}
 	}
diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c
index 9943c8c..1dca479 100644
--- a/drivers/devfreq/arm-memlat-mon.c
+++ b/drivers/devfreq/arm-memlat-mon.c
@@ -31,6 +31,7 @@
 #include "governor.h"
 #include "governor_memlat.h"
 #include <linux/perf_event.h>
+#include <linux/of_device.h>
 
 enum ev_index {
 	INST_IDX,
@@ -63,6 +64,10 @@
 	struct list_head mon_list;
 };
 
+struct memlat_mon_spec {
+	bool is_compute;
+};
+
 #define to_cpustats(cpu_grp, cpu) \
 	(&cpu_grp->cpustats[cpu - cpumask_first(&cpu_grp->cpus)])
 #define to_devstats(cpu_grp, cpu) \
@@ -96,6 +101,9 @@
 	unsigned long ev_count;
 	u64 total, enabled, running;
 
+	if (!event->pevent)
+		return 0;
+
 	total = perf_event_read_value(event->pevent, &enabled, &running);
 	ev_count = total - event->prev_count;
 	event->prev_count = total;
@@ -314,6 +322,7 @@
 	struct device *dev = &pdev->dev;
 	struct memlat_hwmon *hw;
 	struct cpu_grp_info *cpu_grp;
+	const struct memlat_mon_spec *spec;
 	int cpu, ret;
 	u32 event_id;
 
@@ -348,6 +357,22 @@
 
 	cpu_grp->event_ids[CYC_IDX] = CYC_EV;
 
+	for_each_cpu(cpu, &cpu_grp->cpus)
+		to_devstats(cpu_grp, cpu)->id = cpu;
+
+	hw->start_hwmon = &start_hwmon;
+	hw->stop_hwmon = &stop_hwmon;
+	hw->get_cnt = &get_cnt;
+
+	spec = of_device_get_match_data(dev);
+	if (spec && spec->is_compute) {
+		ret = register_compute(dev, hw);
+		if (ret)
+			pr_err("Compute Gov registration failed\n");
+
+		return ret;
+	}
+
 	ret = of_property_read_u32(dev->of_node, "qcom,cachemiss-ev",
 				   &event_id);
 	if (ret) {
@@ -372,24 +397,21 @@
 	else
 		cpu_grp->event_ids[STALL_CYC_IDX] = event_id;
 
-	for_each_cpu(cpu, &cpu_grp->cpus)
-		to_devstats(cpu_grp, cpu)->id = cpu;
-
-	hw->start_hwmon = &start_hwmon;
-	hw->stop_hwmon = &stop_hwmon;
-	hw->get_cnt = &get_cnt;
-
 	ret = register_memlat(dev, hw);
-	if (ret) {
+	if (ret)
 		pr_err("Mem Latency Gov registration failed\n");
-		return ret;
-	}
 
-	return 0;
+	return ret;
 }
 
+static const struct memlat_mon_spec spec[] = {
+	[0] = { false },
+	[1] = { true },
+};
+
 static const struct of_device_id memlat_match_table[] = {
-	{ .compatible = "qcom,arm-memlat-mon" },
+	{ .compatible = "qcom,arm-memlat-mon", .data = &spec[0] },
+	{ .compatible = "qcom,arm-cpu-mon", .data = &spec[1] },
 	{}
 };
 
diff --git a/drivers/devfreq/governor_bw_hwmon.c b/drivers/devfreq/governor_bw_hwmon.c
index a1d9b50..3026bc2 100644
--- a/drivers/devfreq/governor_bw_hwmon.c
+++ b/drivers/devfreq/governor_bw_hwmon.c
@@ -48,9 +48,6 @@
 	unsigned int hyst_trigger_count;
 	unsigned int hyst_length;
 	unsigned int idle_mbps;
-	unsigned int low_power_ceil_mbps;
-	unsigned int low_power_io_percent;
-	unsigned int low_power_delay;
 	unsigned int mbps_zones[NUM_MBPS_ZONES];
 
 	unsigned long prev_ab;
@@ -65,7 +62,6 @@
 	unsigned long hyst_mbps;
 	unsigned long hyst_trig_win;
 	unsigned long hyst_en;
-	unsigned long above_low_power;
 	unsigned long prev_req;
 	unsigned int wake;
 	unsigned int down_cnt;
@@ -317,7 +313,7 @@
 	unsigned long meas_mbps_zone;
 	unsigned long hist_lo_tol, hyst_lo_tol;
 	struct bw_hwmon *hw = node->hw;
-	unsigned int new_bw, io_percent;
+	unsigned int new_bw, io_percent = node->io_percent;
 	ktime_t ts;
 	unsigned int ms = 0;
 
@@ -353,17 +349,6 @@
 			node->hist_mem--;
 	}
 
-	/* Keep track of whether we are in low power mode consistently. */
-	if (meas_mbps > node->low_power_ceil_mbps)
-		node->above_low_power = node->low_power_delay;
-	if (node->above_low_power)
-		node->above_low_power--;
-
-	if (node->above_low_power)
-		io_percent = node->io_percent;
-	else
-		io_percent = node->low_power_io_percent;
-
 	/*
 	 * The AB value that corresponds to the lowest mbps zone greater than
 	 * or equal to the "frequency" the current measurement will pick.
@@ -785,9 +770,6 @@
 gov_attr(hyst_trigger_count, 0U, 90U);
 gov_attr(hyst_length, 0U, 90U);
 gov_attr(idle_mbps, 0U, 2000U);
-gov_attr(low_power_ceil_mbps, 0U, 2500U);
-gov_attr(low_power_io_percent, 1U, 100U);
-gov_attr(low_power_delay, 1U, 60U);
 gov_list_attr(mbps_zones, NUM_MBPS_ZONES, 0U, UINT_MAX);
 
 static struct attribute *dev_attr[] = {
@@ -804,9 +786,6 @@
 	&dev_attr_hyst_trigger_count.attr,
 	&dev_attr_hyst_length.attr,
 	&dev_attr_idle_mbps.attr,
-	&dev_attr_low_power_ceil_mbps.attr,
-	&dev_attr_low_power_io_percent.attr,
-	&dev_attr_low_power_delay.attr,
 	&dev_attr_mbps_zones.attr,
 	&dev_attr_throttle_adj.attr,
 	NULL,
@@ -940,9 +919,6 @@
 	node->guard_band_mbps = 100;
 	node->decay_rate = 90;
 	node->io_percent = 16;
-	node->low_power_ceil_mbps = 0;
-	node->low_power_io_percent = 16;
-	node->low_power_delay = 60;
 	node->bw_step = 190;
 	node->sample_ms = 50;
 	node->up_scale = 0;
diff --git a/drivers/devfreq/governor_memlat.c b/drivers/devfreq/governor_memlat.c
index 9688502..12a90d4 100644
--- a/drivers/devfreq/governor_memlat.c
+++ b/drivers/devfreq/governor_memlat.c
@@ -48,7 +48,8 @@
 static LIST_HEAD(memlat_list);
 static DEFINE_MUTEX(list_lock);
 
-static int use_cnt;
+static int memlat_use_cnt;
+static int compute_use_cnt;
 static DEFINE_MUTEX(state_lock);
 
 #define show_attr(name) \
@@ -240,8 +241,7 @@
 		if (hw->core_stats[i].mem_count)
 			ratio /= hw->core_stats[i].mem_count;
 
-		if (!hw->core_stats[i].inst_count
-		    || !hw->core_stats[i].freq)
+		if (!hw->core_stats[i].freq)
 			continue;
 
 		trace_memlat_dev_meas(dev_name(df->dev.parent),
@@ -280,16 +280,26 @@
 gov_attr(ratio_ceil, 1U, 10000U);
 gov_attr(stall_floor, 0U, 100U);
 
-static struct attribute *dev_attr[] = {
+static struct attribute *memlat_dev_attr[] = {
 	&dev_attr_ratio_ceil.attr,
 	&dev_attr_stall_floor.attr,
 	&dev_attr_freq_map.attr,
 	NULL,
 };
 
-static struct attribute_group dev_attr_group = {
+static struct attribute *compute_dev_attr[] = {
+	&dev_attr_freq_map.attr,
+	NULL,
+};
+
+static struct attribute_group memlat_dev_attr_group = {
 	.name = "mem_latency",
-	.attrs = dev_attr,
+	.attrs = memlat_dev_attr,
+};
+
+static struct attribute_group compute_dev_attr_group = {
+	.name = "compute",
+	.attrs = compute_dev_attr,
 };
 
 #define MIN_MS	10U
@@ -338,6 +348,12 @@
 	.event_handler = devfreq_memlat_ev_handler,
 };
 
+static struct devfreq_governor devfreq_gov_compute = {
+	.name = "compute",
+	.get_target_freq = devfreq_memlat_get_freq,
+	.event_handler = devfreq_memlat_ev_handler,
+};
+
 #define NUM_COLS	2
 static struct core_dev_map *init_core_dev_map(struct device *dev,
 		char *prop_name)
@@ -380,20 +396,17 @@
 	return tbl;
 }
 
-int register_memlat(struct device *dev, struct memlat_hwmon *hw)
+static struct memlat_node *register_common(struct device *dev,
+					   struct memlat_hwmon *hw)
 {
-	int ret = 0;
 	struct memlat_node *node;
 
 	if (!hw->dev && !hw->of_node)
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 
 	node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
 	if (!node)
-		return -ENOMEM;
-
-	node->gov = &devfreq_gov_memlat;
-	node->attr_grp = &dev_attr_group;
+		return ERR_PTR(-ENOMEM);
 
 	node->ratio_ceil = 10;
 	node->hw = hw;
@@ -401,20 +414,68 @@
 	hw->freq_map = init_core_dev_map(dev, "qcom,core-dev-table");
 	if (!hw->freq_map) {
 		dev_err(dev, "Couldn't find the core-dev freq table!\n");
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 	}
 
 	mutex_lock(&list_lock);
 	list_add_tail(&node->list, &memlat_list);
 	mutex_unlock(&list_lock);
 
+	return node;
+}
+
+int register_compute(struct device *dev, struct memlat_hwmon *hw)
+{
+	struct memlat_node *node;
+	int ret = 0;
+
+	node = register_common(dev, hw);
+	if (IS_ERR(node)) {
+		ret = PTR_ERR(node);
+		goto out;
+	}
+
 	mutex_lock(&state_lock);
-	if (!use_cnt)
-		ret = devfreq_add_governor(&devfreq_gov_memlat);
+	node->gov = &devfreq_gov_compute;
+	node->attr_grp = &compute_dev_attr_group;
+
+	if (!compute_use_cnt)
+		ret = devfreq_add_governor(&devfreq_gov_compute);
 	if (!ret)
-		use_cnt++;
+		compute_use_cnt++;
 	mutex_unlock(&state_lock);
 
+out:
+	if (!ret)
+		dev_info(dev, "Compute governor registered.\n");
+	else
+		dev_err(dev, "Compute governor registration failed!\n");
+
+	return ret;
+}
+
+int register_memlat(struct device *dev, struct memlat_hwmon *hw)
+{
+	struct memlat_node *node;
+	int ret = 0;
+
+	node = register_common(dev, hw);
+	if (IS_ERR(node)) {
+		ret = PTR_ERR(node);
+		goto out;
+	}
+
+	mutex_lock(&state_lock);
+	node->gov = &devfreq_gov_memlat;
+	node->attr_grp = &memlat_dev_attr_group;
+
+	if (!memlat_use_cnt)
+		ret = devfreq_add_governor(&devfreq_gov_memlat);
+	if (!ret)
+		memlat_use_cnt++;
+	mutex_unlock(&state_lock);
+
+out:
 	if (!ret)
 		dev_info(dev, "Memory Latency governor registered.\n");
 	else
diff --git a/drivers/devfreq/governor_memlat.h b/drivers/devfreq/governor_memlat.h
index f2ba534..6491c6c 100644
--- a/drivers/devfreq/governor_memlat.h
+++ b/drivers/devfreq/governor_memlat.h
@@ -74,10 +74,16 @@
 
 #ifdef CONFIG_DEVFREQ_GOV_MEMLAT
 int register_memlat(struct device *dev, struct memlat_hwmon *hw);
+int register_compute(struct device *dev, struct memlat_hwmon *hw);
 int update_memlat(struct memlat_hwmon *hw);
 #else
 static inline int register_memlat(struct device *dev,
-					struct memlat_hwmon *hw)
+				  struct memlat_hwmon *hw)
+{
+	return 0;
+}
+static inline int register_compute(struct device *dev,
+				   struct memlat_hwmon *hw)
 {
 	return 0;
 }
diff --git a/drivers/edac/kryo3xx_arm64_edac.c b/drivers/edac/kryo3xx_arm64_edac.c
index 5ca93a6..cf3fdde 100644
--- a/drivers/edac/kryo3xx_arm64_edac.c
+++ b/drivers/edac/kryo3xx_arm64_edac.c
@@ -30,10 +30,11 @@
 #endif
 
 #ifdef CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_CE
-#define ARM64_ERP_PANIC_ON_CE 1
+static bool panic_on_ce = 1;
 #else
-#define ARM64_ERP_PANIC_ON_CE 0
+static bool panic_on_ce;
 #endif
+module_param_named(panic_on_ce, panic_on_ce, bool, 0664);
 
 #ifdef CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE
 #define ARM64_ERP_PANIC_ON_UE 1
@@ -238,6 +239,8 @@
 	else
 		edac_printk(KERN_CRIT, EDAC_CPU,
 			"Way: %d\n", (int) KRYO3XX_ERRXMISC_WAY(errxmisc) >> 2);
+
+	edev_ctl->panic_on_ce = panic_on_ce;
 	errors[errorcode].func(edev_ctl, smp_processor_id(),
 				level, errors[errorcode].msg);
 }
@@ -360,11 +363,34 @@
 	return IRQ_HANDLED;
 }
 
+static void initialize_registers(void *info)
+{
+	set_errxctlr_el1();
+	set_errxmisc_overflow();
+}
+
+static void init_regs_on_cpu(bool all_cpus)
+{
+	int cpu;
+
+	write_errselr_el1(0);
+	if (all_cpus) {
+		for_each_possible_cpu(cpu)
+			smp_call_function_single(cpu, initialize_registers,
+						NULL, 1);
+	} else
+		initialize_registers(NULL);
+
+	write_errselr_el1(1);
+	initialize_registers(NULL);
+}
+
 static int kryo3xx_pmu_cpu_pm_notify(struct notifier_block *self,
 				unsigned long action, void *v)
 {
 	switch (action) {
 	case CPU_PM_EXIT:
+		init_regs_on_cpu(false);
 		kryo3xx_check_l3_scu_error(panic_handler_drvdata->edev_ctl);
 		kryo3xx_check_l1_l2_ecc(panic_handler_drvdata->edev_ctl);
 		break;
@@ -373,23 +399,14 @@
 	return NOTIFY_OK;
 }
 
-static void initialize_registers(void *info)
-{
-	set_errxctlr_el1();
-	set_errxmisc_overflow();
-}
-
 static int kryo3xx_cpu_erp_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct erp_drvdata *drv;
 	int rc = 0;
 	int fail = 0;
-	int cpu;
 
-	for_each_possible_cpu(cpu)
-		smp_call_function_single(cpu, initialize_registers, NULL, 1);
-
+	init_regs_on_cpu(true);
 
 	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
 
@@ -413,7 +430,7 @@
 	drv->edev_ctl->mod_name = dev_name(dev);
 	drv->edev_ctl->dev_name = dev_name(dev);
 	drv->edev_ctl->ctl_name = "cache";
-	drv->edev_ctl->panic_on_ce = ARM64_ERP_PANIC_ON_CE;
+	drv->edev_ctl->panic_on_ce = panic_on_ce;
 	drv->edev_ctl->panic_on_ue = ARM64_ERP_PANIC_ON_UE;
 	drv->nb_pm.notifier_call = kryo3xx_pmu_cpu_pm_notify;
 	platform_set_drvdata(pdev, drv);
diff --git a/drivers/esoc/Kconfig b/drivers/esoc/Kconfig
index a56c7e0..3c65f69 100644
--- a/drivers/esoc/Kconfig
+++ b/drivers/esoc/Kconfig
@@ -38,7 +38,7 @@
 	  allow logging of different esoc driver traces.
 
 config ESOC_MDM_4x
-	bool "Add support for external mdm9x25/mdm9x35/mdm9x45/mdm9x55"
+	bool "Add support for external mdm9x25/mdm9x35/mdm9x55"
 	help
 	  In some Qualcomm Technologies, Inc. boards, an external modem such as
 	  mdm9x25 or mdm9x35 is connected to a primary msm. The primary soc can
@@ -49,7 +49,7 @@
 	tristate "Command engine for 4x series external modems"
 	help
 	  Provides a command engine to control the behavior of an external modem
-	  such as mdm9x25/mdm9x35/mdm9x45/mdm9x55/QSC. Allows the primary soc to put the
+	  such as mdm9x25/mdm9x35/mdm9x55/QSC. Allows the primary soc to put the
 	  external modem in a specific mode. Also listens for events on the
 	  external modem.
 
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index 7eb0458..677e21d 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -88,12 +88,10 @@
 		return;
 	if (mdm->irq_mask & IRQ_ERRFATAL) {
 		enable_irq(mdm->errfatal_irq);
-		irq_set_irq_wake(mdm->errfatal_irq, 1);
 		mdm->irq_mask &= ~IRQ_ERRFATAL;
 	}
 	if (mdm->irq_mask & IRQ_STATUS) {
 		enable_irq(mdm->status_irq);
-		irq_set_irq_wake(mdm->status_irq, 1);
 		mdm->irq_mask &= ~IRQ_STATUS;
 	}
 	if (mdm->irq_mask & IRQ_PBLRDY) {
@@ -107,12 +105,10 @@
 	if (!mdm)
 		return;
 	if (!(mdm->irq_mask & IRQ_ERRFATAL)) {
-		irq_set_irq_wake(mdm->errfatal_irq, 0);
 		disable_irq_nosync(mdm->errfatal_irq);
 		mdm->irq_mask |= IRQ_ERRFATAL;
 	}
 	if (!(mdm->irq_mask & IRQ_STATUS)) {
-		irq_set_irq_wake(mdm->status_irq, 0);
 		disable_irq_nosync(mdm->status_irq);
 		mdm->irq_mask |= IRQ_STATUS;
 	}
@@ -179,26 +175,48 @@
 	struct device *dev = mdm->dev;
 	int ret;
 	bool graceful_shutdown = false;
+	u32 status, err_fatal;
 
 	switch (cmd) {
 	case ESOC_PWR_ON:
+		if (esoc->auto_boot) {
+			/*
+			 * If esoc has already booted, we would have missed
+			 * status change interrupt. Read status and err_fatal
+			 * signals to arrive at the state of esoc.
+			 */
+			esoc->clink_ops->get_status(&status, esoc);
+			esoc->clink_ops->get_err_fatal(&err_fatal, esoc);
+			if (err_fatal)
+				return -EIO;
+			if (status && !mdm->ready) {
+				mdm->ready = true;
+				esoc->clink_ops->notify(ESOC_BOOT_DONE, esoc);
+			}
+		}
 		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
-		mdm_enable_irqs(mdm);
 		mdm->init = 1;
 		mdm_do_first_power_on(mdm);
+		mdm_enable_irqs(mdm);
 		break;
 	case ESOC_PWR_OFF:
 		mdm_disable_irqs(mdm);
 		mdm->debug = 0;
 		mdm->ready = false;
 		mdm->trig_cnt = 0;
+		if (esoc->primary)
+			break;
 		graceful_shutdown = true;
-		ret = sysmon_send_shutdown(&esoc->subsys);
-		if (ret) {
-			dev_err(mdm->dev, "sysmon shutdown fail, ret = %d\n",
-									ret);
-			graceful_shutdown = false;
-			goto force_poff;
+		if (!esoc->userspace_handle_shutdown) {
+			ret = sysmon_send_shutdown(&esoc->subsys);
+			if (ret) {
+				dev_err(mdm->dev,
+				 "sysmon shutdown fail, ret = %d\n", ret);
+				graceful_shutdown = false;
+				goto force_poff;
+			}
+		} else {
+			esoc_clink_queue_request(ESOC_REQ_SEND_SHUTDOWN, esoc);
 		}
 		dev_dbg(mdm->dev, "Waiting for status gpio go low\n");
 		status_down = false;
@@ -229,12 +247,17 @@
 				esoc->subsys.sysmon_shutdown_ret);
 		}
 
+		if (esoc->primary)
+			break;
 		/*
 		 * Force a shutdown of the mdm. This is required in order
 		 * to prevent the mdm from immediately powering back on
-		 * after the shutdown
+		 * after the shutdown. Avoid setting status to 0, if line is
+		 * monitored by multiple mdms(might be wrongly interpreted as
+		 * a primary crash).
 		 */
-		gpio_set_value(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
+		if (esoc->statusline_not_a_powersource == false)
+			gpio_set_value(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
 		esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
 		mdm_power_down(mdm);
 		mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
@@ -250,9 +273,12 @@
 		 */
 		mdm->ready = false;
 		cancel_delayed_work(&mdm->mdm2ap_status_check_work);
-		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
-		dev_dbg(mdm->dev, "set ap2mdm errfatal to force reset\n");
-		msleep(mdm->ramdump_delay_ms);
+		if (!mdm->esoc->auto_boot) {
+			gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
+			dev_dbg(mdm->dev,
+				"set ap2mdm errfatal to force reset\n");
+			msleep(mdm->ramdump_delay_ms);
+		}
 		break;
 	case ESOC_EXE_DEBUG:
 		mdm->debug = 1;
@@ -380,6 +406,8 @@
 		status_down = false;
 		dev_dbg(dev, "signal apq err fatal for graceful restart\n");
 		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
+		if (esoc->primary)
+			break;
 		timeout = local_clock();
 		do_div(timeout, NSEC_PER_MSEC);
 		timeout += MDM_MODEM_TIMEOUT;
@@ -421,7 +449,8 @@
 		goto mdm_pwroff_irq;
 	esoc = mdm->esoc;
 	dev_err(dev, "%s: mdm sent errfatal interrupt\n",
-					 __func__);
+					__func__);
+	subsys_set_crash_status(esoc->subsys_dev, true);
 	/* disable irq ?*/
 	esoc_clink_evt_notify(ESOC_ERR_FATAL, esoc);
 	return IRQ_HANDLED;
@@ -442,11 +471,26 @@
 		return IRQ_HANDLED;
 	dev = mdm->dev;
 	esoc = mdm->esoc;
+	/*
+	 * On auto boot devices, there is a possibility of receiving
+	 * status change interrupt before esoc_clink structure is
+	 * initialized. Ignore them.
+	 */
+	if (!esoc)
+		return IRQ_HANDLED;
 	value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
 	if (value == 0 && mdm->ready) {
 		dev_err(dev, "unexpected reset external modem\n");
+		subsys_set_crash_status(esoc->subsys_dev, true);
 		esoc_clink_evt_notify(ESOC_UNEXPECTED_RESET, esoc);
 	} else if (value == 1) {
+		/*
+		 * In auto_boot cases, bailout early if mdm
+		 * is up already.
+		 */
+		if (esoc->auto_boot && mdm->ready)
+			return IRQ_HANDLED;
+
 		cancel_delayed_work(&mdm->mdm2ap_status_check_work);
 		dev_dbg(dev, "status = 1: mdm is now ready\n");
 		mdm->ready = true;
@@ -454,6 +498,8 @@
 		queue_work(mdm->mdm_queue, &mdm->mdm_status_work);
 		if (mdm->get_restart_reason)
 			queue_work(mdm->mdm_queue, &mdm->restart_reason_work);
+		if (esoc->auto_boot)
+			esoc->clink_ops->notify(ESOC_BOOT_DONE, esoc);
 	}
 	return IRQ_HANDLED;
 }
@@ -582,13 +628,21 @@
 						&mdm->ramdump_delay_ms);
 	if (ret)
 		mdm->ramdump_delay_ms = DEF_RAMDUMP_DELAY;
-	/* Multilple gpio_request calls are allowed */
+	/*
+	 * In certain scenarios, multiple esoc devices are monitoring
+	 * same AP2MDM_STATUS line. But only one of them will have a
+	 * successful gpio_request call. Initialize gpio only if request
+	 * succeeds.
+	 */
 	if (gpio_request(MDM_GPIO(mdm, AP2MDM_STATUS), "AP2MDM_STATUS"))
 		dev_err(dev, "Failed to configure AP2MDM_STATUS gpio\n");
-	/* Multilple gpio_request calls are allowed */
+	else
+		gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
 	if (gpio_request(MDM_GPIO(mdm, AP2MDM_ERRFATAL), "AP2MDM_ERRFATAL"))
 		dev_err(dev, "%s Failed to configure AP2MDM_ERRFATAL gpio\n",
 			   __func__);
+	else
+		gpio_direction_output(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
 	if (gpio_request(MDM_GPIO(mdm, MDM2AP_STATUS), "MDM2AP_STATUS")) {
 		dev_err(dev, "%s Failed to configure MDM2AP_STATUS gpio\n",
 			   __func__);
@@ -621,9 +675,6 @@
 		}
 	}
 
-	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
-	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
-
 	if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_CHNLRDY)))
 		gpio_direction_output(MDM_GPIO(mdm, AP2MDM_CHNLRDY), 0);
 
@@ -646,6 +697,7 @@
 		goto errfatal_err;
 	}
 	mdm->errfatal_irq = irq;
+	irq_set_irq_wake(mdm->errfatal_irq, 1);
 
 errfatal_err:
 	 /* status irq */
@@ -664,6 +716,7 @@
 		goto status_err;
 	}
 	mdm->status_irq = irq;
+	irq_set_irq_wake(mdm->status_irq, 1);
 status_err:
 	if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
 		irq =  platform_get_irq_byname(pdev, "plbrdy_irq");
diff --git a/drivers/esoc/esoc-mdm-dbg-eng.c b/drivers/esoc/esoc-mdm-dbg-eng.c
index 309c820..a61588a 100644
--- a/drivers/esoc/esoc-mdm-dbg-eng.c
+++ b/drivers/esoc/esoc-mdm-dbg-eng.c
@@ -269,7 +269,7 @@
 {
 	unsigned int i;
 	unsigned long flags;
-	size_t count;
+	size_t count = 0;
 
 	spin_lock_irqsave(&req_lock, flags);
 	for (i = 0; i < ARRAY_SIZE(req_to_str); i++) {
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
index 31cd8c4..77ae84b 100644
--- a/drivers/esoc/esoc-mdm-drv.c
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -13,6 +13,7 @@
 #include <linux/delay.h>
 #include <linux/workqueue.h>
 #include <linux/reboot.h>
+#include <linux/of.h>
 #include "esoc.h"
 #include "mdm-dbg.h"
 
@@ -74,7 +75,14 @@
 		break;
 	case ESOC_UNEXPECTED_RESET:
 	case ESOC_ERR_FATAL:
-		if (mdm_drv->mode == CRASH)
+		/*
+		 * Modem can crash while we are waiting for boot_done during
+		 * a subsystem_get(). Setting mode to CRASH will prevent a
+		 * subsequent subsystem_get() from entering poweron ops. Avoid
+		 * this by seting mode to CRASH only if device was up and
+		 * running.
+		 */
+		if (mdm_drv->mode == CRASH || mdm_drv->mode != RUN)
 			return;
 		mdm_drv->mode = CRASH;
 		queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work);
@@ -164,8 +172,9 @@
 								subsys);
 	struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
 	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+	int timeout = INT_MAX;
 
-	if (!esoc_req_eng_enabled(esoc_clink)) {
+	if (!esoc_clink->auto_boot && !esoc_req_eng_enabled(esoc_clink)) {
 		dev_dbg(&esoc_clink->dev, "Wait for req eng registration\n");
 		wait_for_completion(&mdm_drv->req_eng_wait);
 	}
@@ -190,8 +199,17 @@
 			return ret;
 		}
 	}
-	wait_for_completion(&mdm_drv->boot_done);
-	if (mdm_drv->boot_fail) {
+
+	/*
+	 * In autoboot case, it is possible that we can forever wait for
+	 * boot completion, when esoc fails to boot. This is because there
+	 * is no helper application which can alert esoc driver about boot
+	 * failure. Prevent going to wait forever in such case.
+	 */
+	if (esoc_clink->auto_boot)
+		timeout = 10 * HZ;
+	ret = wait_for_completion_timeout(&mdm_drv->boot_done, timeout);
+	if (mdm_drv->boot_fail || ret <= 0) {
 		dev_err(&esoc_clink->dev, "booting failed\n");
 		return -EIO;
 	}
@@ -219,10 +237,12 @@
 
 static int mdm_register_ssr(struct esoc_clink *esoc_clink)
 {
-	esoc_clink->subsys.shutdown = mdm_subsys_shutdown;
-	esoc_clink->subsys.ramdump = mdm_subsys_ramdumps;
-	esoc_clink->subsys.powerup = mdm_subsys_powerup;
-	esoc_clink->subsys.crash_shutdown = mdm_crash_shutdown;
+	struct subsys_desc *subsys = &esoc_clink->subsys;
+
+	subsys->shutdown = mdm_subsys_shutdown;
+	subsys->ramdump = mdm_subsys_ramdumps;
+	subsys->powerup = mdm_subsys_powerup;
+	subsys->crash_shutdown = mdm_crash_shutdown;
 	return esoc_clink_register_ssr(esoc_clink);
 }
 
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
index 47d54db..0e85776 100644
--- a/drivers/esoc/esoc-mdm-pon.c
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -68,6 +68,9 @@
 	struct device *dev = mdm->dev;
 
 	dev_dbg(dev, "Powering on modem for the first time\n");
+	if (mdm->esoc->auto_boot)
+		return 0;
+
 	mdm_toggle_soft_reset(mdm, false);
 	/* Add a delay to allow PON sequence to complete*/
 	mdelay(50);
@@ -134,6 +137,9 @@
 
 static void mdm4x_cold_reset(struct mdm_ctrl *mdm)
 {
+	if (!gpio_is_valid(MDM_GPIO(mdm, AP2MDM_SOFT_RESET)))
+		return;
+
 	dev_dbg(mdm->dev, "Triggering mdm cold reset");
 	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
 			!!mdm->soft_reset_inverted);
@@ -201,15 +207,6 @@
 	.setup = mdm4x_pon_setup,
 };
 
-struct mdm_pon_ops mdm9x45_pon_ops = {
-	.pon = mdm4x_do_first_power_on,
-	.soft_reset = mdm4x_toggle_soft_reset,
-	.poff_force = mdm4x_power_down,
-	.cold_reset = mdm4x_cold_reset,
-	.dt_init = mdm4x_pon_dt_init,
-	.setup = mdm4x_pon_setup,
-};
-
 struct mdm_pon_ops mdm9x55_pon_ops = {
 	.pon = mdm4x_do_first_power_on,
 	.soft_reset = mdm9x55_toggle_soft_reset,
diff --git a/drivers/esoc/esoc-mdm.h b/drivers/esoc/esoc-mdm.h
index fa3a576..621d913 100644
--- a/drivers/esoc/esoc-mdm.h
+++ b/drivers/esoc/esoc-mdm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -33,8 +33,6 @@
 #define MDM9x35_PCIE			"PCIe"
 #define MDM9x35_DUAL_LINK		"HSIC+PCIe"
 #define MDM9x35_HSIC			"HSIC"
-#define MDM9x45_LABEL			"MDM9x45"
-#define MDM9x45_PCIE			"PCIe"
 #define MDM9x55_LABEL			"MDM9x55"
 #define MDM9x55_PCIE			"PCIe"
 #define MDM2AP_STATUS_TIMEOUT_MS	120000L
@@ -151,6 +149,5 @@
 
 extern struct mdm_pon_ops mdm9x25_pon_ops;
 extern struct mdm_pon_ops mdm9x35_pon_ops;
-extern struct mdm_pon_ops mdm9x45_pon_ops;
 extern struct mdm_pon_ops mdm9x55_pon_ops;
 #endif
diff --git a/drivers/esoc/esoc.h b/drivers/esoc/esoc.h
index e6794c2..df3c9df 100644
--- a/drivers/esoc/esoc.h
+++ b/drivers/esoc/esoc.h
@@ -60,6 +60,12 @@
  * @subsys_desc: descriptor for subsystem restart
  * @subsys_dev: ssr device handle.
  * @np: device tree node for esoc_clink.
+ * @auto_boot: boots independently.
+ * @primary: primary esoc controls(reset/poweroff) all secondary
+ *	 esocs, but not	otherway around.
+ * @statusline_not_a_powersource: True if status line to esoc is not a
+ *				power source.
+ * @userspace_handle_shutdown: True if user space handles shutdown requests.
  */
 struct esoc_clink {
 	const char *name;
@@ -79,6 +85,10 @@
 	struct subsys_desc subsys;
 	struct subsys_device *subsys_dev;
 	struct device_node *np;
+	bool auto_boot;
+	bool primary;
+	bool statusline_not_a_powersource;
+	bool userspace_handle_shutdown;
 };
 
 /**
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 08cd0bd..3907439 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -825,7 +825,7 @@
 {
 	uint32_t reference_clock, tmp;
 	struct cgs_display_info info = {0};
-	struct cgs_mode_info mode_info;
+	struct cgs_mode_info mode_info = {0};
 
 	info.mode_info = &mode_info;
 
@@ -3718,10 +3718,9 @@
 	uint32_t ref_clock;
 	uint32_t refresh_rate = 0;
 	struct cgs_display_info info = {0};
-	struct cgs_mode_info mode_info;
+	struct cgs_mode_info mode_info = {0};
 
 	info.mode_info = &mode_info;
-
 	cgs_get_active_displays_info(hwmgr->device, &info);
 	num_active_displays = info.display_count;
 
@@ -3737,6 +3736,7 @@
 	frame_time_in_us = 1000000 / refresh_rate;
 
 	pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+
 	data->frame_time_x2 = frame_time_in_us * 2 / 100;
 
 	display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index a4d81cf..ef80ec6 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -530,7 +530,7 @@
 
 	drm_mode_object_unregister(blob->dev, &blob->base);
 
-	kfree(blob);
+	vfree(blob);
 }
 
 /**
@@ -557,7 +557,7 @@
 	if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
 		return ERR_PTR(-EINVAL);
 
-	blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+	blob = vmalloc(sizeof(struct drm_property_blob)+length);
 	if (!blob)
 		return ERR_PTR(-ENOMEM);
 
@@ -573,7 +573,7 @@
 	ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
 				      true, drm_property_free_blob);
 	if (ret) {
-		kfree(blob);
+		vfree(blob);
 		return ERR_PTR(-EINVAL);
 	}
 
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index c4a60dc..0a03298 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -44,6 +44,8 @@
 	u32 channels;
 
 	struct completion hpd_comp;
+	struct workqueue_struct *notify_workqueue;
+	struct delayed_work notify_delayed_work;
 
 	struct dp_audio dp_audio;
 };
@@ -428,7 +430,7 @@
 	audio->engine_on = enable;
 }
 
-static struct dp_audio_private *get_audio_get_data(struct platform_device *pdev)
+static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
 {
 	struct msm_ext_disp_data *ext_data;
 	struct dp_audio *dp_audio;
@@ -459,18 +461,22 @@
 	int rc = 0;
 	struct dp_audio_private *audio;
 
-	audio = get_audio_get_data(pdev);
+	audio = dp_audio_get_data(pdev);
 	if (IS_ERR(audio)) {
 		rc = PTR_ERR(audio);
 		goto end;
 	}
 
+	mutex_lock(&audio->dp_audio.ops_lock);
+
 	audio->channels = params->num_of_channels;
 
 	dp_audio_setup_sdp(audio);
 	dp_audio_setup_acr(audio);
 	dp_audio_safe_to_exit_level(audio);
 	dp_audio_enable(audio, true);
+
+	mutex_unlock(&audio->dp_audio.ops_lock);
 end:
 	return rc;
 }
@@ -482,7 +488,7 @@
 	struct dp_audio_private *audio;
 	struct sde_edid_ctrl *edid;
 
-	audio = get_audio_get_data(pdev);
+	audio = dp_audio_get_data(pdev);
 	if (IS_ERR(audio)) {
 		rc = PTR_ERR(audio);
 		goto end;
@@ -510,18 +516,12 @@
 	int rc = 0;
 	struct dp_audio_private *audio;
 
-	audio = get_audio_get_data(pdev);
+	audio = dp_audio_get_data(pdev);
 	if (IS_ERR(audio)) {
 		rc = PTR_ERR(audio);
 		goto end;
 	}
 
-	if (!audio->panel) {
-		pr_err("invalid panel data\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
 	return audio->session_on;
 end:
 	return rc;
@@ -532,7 +532,7 @@
 	int rc = 0;
 	struct dp_audio_private *audio;
 
-	audio = get_audio_get_data(pdev);
+	audio = dp_audio_get_data(pdev);
 	if (IS_ERR(audio)) {
 		rc = PTR_ERR(audio);
 		goto end;
@@ -547,16 +547,13 @@
 {
 	struct dp_audio_private *audio;
 
-	audio = get_audio_get_data(pdev);
+	audio = dp_audio_get_data(pdev);
 	if (IS_ERR(audio))
 		return;
 
-	if (!audio->panel) {
-		pr_err("invalid panel data\n");
-		return;
-	}
-
+	mutex_lock(&audio->dp_audio.ops_lock);
 	dp_audio_enable(audio, false);
+	mutex_unlock(&audio->dp_audio.ops_lock);
 
 	complete_all(&audio->hpd_comp);
 
@@ -568,7 +565,7 @@
 	int rc = 0, ack_hpd;
 	struct dp_audio_private *audio;
 
-	audio = get_audio_get_data(pdev);
+	audio = dp_audio_get_data(pdev);
 	if (IS_ERR(audio)) {
 		rc = PTR_ERR(audio);
 		goto end;
@@ -596,6 +593,24 @@
 	return rc;
 }
 
+static int dp_audio_codec_ready(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct dp_audio_private *audio;
+
+	audio = dp_audio_get_data(pdev);
+	if (IS_ERR(audio)) {
+		pr_err("invalid input\n");
+		rc = PTR_ERR(audio);
+		goto end;
+	}
+
+	queue_delayed_work(audio->notify_workqueue,
+			&audio->notify_delayed_work, HZ/4);
+end:
+	return rc;
+}
+
 static int dp_audio_init_ext_disp(struct dp_audio_private *audio)
 {
 	int rc = 0;
@@ -617,6 +632,7 @@
 	ops->get_intf_id        = dp_audio_get_intf_id;
 	ops->teardown_done      = dp_audio_teardown_done;
 	ops->acknowledge        = dp_audio_ack_done;
+	ops->ready              = dp_audio_codec_ready;
 
 	if (!audio->pdev->dev.of_node) {
 		pr_err("cannot find audio dev.of_node\n");
@@ -648,6 +664,31 @@
 	return rc;
 }
 
+static int dp_audio_notify(struct dp_audio_private *audio, u32 state)
+{
+	int rc = 0;
+	struct msm_ext_disp_init_data *ext = &audio->ext_audio_data;
+
+	rc = ext->intf_ops.audio_notify(audio->ext_pdev,
+			EXT_DISPLAY_TYPE_DP, state);
+	if (rc) {
+		pr_err("failed to notify audio. state=%d err=%d\n", state, rc);
+		goto end;
+	}
+
+	reinit_completion(&audio->hpd_comp);
+	rc = wait_for_completion_timeout(&audio->hpd_comp, HZ * 5);
+	if (!rc) {
+		pr_err("timeout. state=%d err=%d\n", state, rc);
+		rc = -ETIMEDOUT;
+		goto end;
+	}
+
+	pr_debug("success\n");
+end:
+	return rc;
+}
+
 static int dp_audio_on(struct dp_audio *dp_audio)
 {
 	int rc = 0;
@@ -656,11 +697,14 @@
 
 	if (!dp_audio) {
 		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
+		return -EINVAL;
 	}
 
 	audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
+	if (IS_ERR(audio)) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
 
 	ext = &audio->ext_audio_data;
 
@@ -674,21 +718,9 @@
 		goto end;
 	}
 
-	rc = ext->intf_ops.audio_notify(audio->ext_pdev,
-			EXT_DISPLAY_TYPE_DP,
-			EXT_DISPLAY_CABLE_CONNECT);
-	if (rc) {
-		pr_err("failed to notify audio, err=%d\n", rc);
+	rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_CONNECT);
+	if (rc)
 		goto end;
-	}
-
-	reinit_completion(&audio->hpd_comp);
-	rc = wait_for_completion_timeout(&audio->hpd_comp, HZ * 5);
-	if (!rc) {
-		pr_err("timeout\n");
-		rc = -ETIMEDOUT;
-		goto end;
-	}
 
 	pr_debug("success\n");
 end:
@@ -700,6 +732,7 @@
 	int rc = 0;
 	struct dp_audio_private *audio;
 	struct msm_ext_disp_init_data *ext;
+	bool work_pending = false;
 
 	if (!dp_audio) {
 		pr_err("invalid input\n");
@@ -709,21 +742,13 @@
 	audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
 	ext = &audio->ext_audio_data;
 
-	rc = ext->intf_ops.audio_notify(audio->ext_pdev,
-			EXT_DISPLAY_TYPE_DP,
-			EXT_DISPLAY_CABLE_DISCONNECT);
-	if (rc) {
-		pr_err("failed to notify audio, err=%d\n", rc);
-		goto end;
-	}
+	work_pending = cancel_delayed_work_sync(&audio->notify_delayed_work);
+	if (work_pending)
+		pr_debug("pending notification work completed\n");
 
-	reinit_completion(&audio->hpd_comp);
-	rc = wait_for_completion_timeout(&audio->hpd_comp, HZ * 5);
-	if (!rc) {
-		pr_err("timeout\n");
-		rc = -ETIMEDOUT;
+	rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_DISCONNECT);
+	if (rc)
 		goto end;
-	}
 
 	pr_debug("success\n");
 end:
@@ -739,6 +764,35 @@
 	return rc;
 }
 
+static void dp_audio_notify_work_fn(struct work_struct *work)
+{
+	struct dp_audio_private *audio;
+	struct delayed_work *dw = to_delayed_work(work);
+
+	audio = container_of(dw, struct dp_audio_private, notify_delayed_work);
+
+	dp_audio_notify(audio, EXT_DISPLAY_CABLE_CONNECT);
+}
+
+static int dp_audio_create_notify_workqueue(struct dp_audio_private *audio)
+{
+	audio->notify_workqueue = create_workqueue("sdm_dp_audio_notify");
+	if (IS_ERR_OR_NULL(audio->notify_workqueue)) {
+		pr_err("Error creating notify_workqueue\n");
+		return -EPERM;
+	}
+
+	INIT_DELAYED_WORK(&audio->notify_delayed_work, dp_audio_notify_work_fn);
+
+	return 0;
+}
+
+static void dp_audio_destroy_notify_workqueue(struct dp_audio_private *audio)
+{
+	if (audio->notify_workqueue)
+		destroy_workqueue(audio->notify_workqueue);
+}
+
 struct dp_audio *dp_audio_get(struct platform_device *pdev,
 			struct dp_panel *panel,
 			struct dp_catalog_audio *catalog)
@@ -759,6 +813,10 @@
 		goto error;
 	}
 
+	rc = dp_audio_create_notify_workqueue(audio);
+	if (rc)
+		goto error_notify_workqueue;
+
 	init_completion(&audio->hpd_comp);
 
 	audio->pdev = pdev;
@@ -767,18 +825,23 @@
 
 	dp_audio = &audio->dp_audio;
 
+	mutex_init(&dp_audio->ops_lock);
+
 	dp_audio->on  = dp_audio_on;
 	dp_audio->off = dp_audio_off;
 
 	rc = dp_audio_init_ext_disp(audio);
 	if (rc) {
-		devm_kfree(&pdev->dev, audio);
-		goto error;
+		goto error_ext_disp;
 	}
 
 	catalog->init(catalog);
 
 	return dp_audio;
+error_ext_disp:
+	dp_audio_destroy_notify_workqueue(audio);
+error_notify_workqueue:
+	devm_kfree(&pdev->dev, audio);
 error:
 	return ERR_PTR(rc);
 }
@@ -791,6 +854,9 @@
 		return;
 
 	audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
+	mutex_destroy(&dp_audio->ops_lock);
+
+	dp_audio_destroy_notify_workqueue(audio);
 
 	devm_kfree(&audio->pdev->dev, audio);
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
index d6e6b74..807444b 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -29,6 +29,8 @@
 	u32 lane_count;
 	u32 bw_code;
 
+	struct mutex ops_lock;
+
 	/**
 	 * on()
 	 *
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index b9b996a..fc7cb22 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -84,7 +84,7 @@
 	}
 
 	dp_catalog_get_priv(aux);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_aux.base;
 
 	return dp_read(base + DP_AUX_DATA);
 end:
@@ -104,7 +104,7 @@
 	}
 
 	dp_catalog_get_priv(aux);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_aux.base;
 
 	dp_write(base + DP_AUX_DATA, aux->data);
 end:
@@ -124,7 +124,7 @@
 	}
 
 	dp_catalog_get_priv(aux);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_aux.base;
 
 	dp_write(base + DP_AUX_TRANS_CTRL, aux->data);
 end:
@@ -145,7 +145,7 @@
 	}
 
 	dp_catalog_get_priv(aux);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_aux.base;
 
 	if (read) {
 		data = dp_read(base + DP_AUX_TRANS_CTRL);
@@ -195,7 +195,7 @@
 	}
 
 	dp_catalog_get_priv(aux);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_aux.base;
 
 	aux_ctrl = dp_read(base + DP_AUX_CTRL);
 
@@ -220,7 +220,7 @@
 	}
 
 	dp_catalog_get_priv(aux);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_aux.base;
 
 	aux_ctrl = dp_read(base + DP_AUX_CTRL);
 
@@ -297,7 +297,7 @@
 {
 	u32 ack;
 	struct dp_catalog_private *catalog;
-	void __iomem *base;
+	void __iomem *ahb_base;
 
 	if (!aux) {
 		pr_err("invalid input\n");
@@ -305,14 +305,14 @@
 	}
 
 	dp_catalog_get_priv(aux);
-	base = catalog->io->ctrl_io.base;
+	ahb_base = catalog->io->dp_ahb.base;
 
-	aux->isr = dp_read(base + DP_INTR_STATUS);
+	aux->isr = dp_read(ahb_base + DP_INTR_STATUS);
 	aux->isr &= ~DP_INTR_MASK1;
 	ack = aux->isr & DP_INTERRUPT_STATUS1;
 	ack <<= 1;
 	ack |= DP_INTR_MASK1;
-	dp_write(base + DP_INTR_STATUS, ack);
+	dp_write(ahb_base + DP_INTR_STATUS, ack);
 }
 
 /* controller related catalog functions */
@@ -327,106 +327,107 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_ahb.base;
 
 	return dp_read(base + DP_HDCP_STATUS);
 }
 
-static void dp_catalog_ctrl_setup_infoframe_sdp(struct dp_catalog_ctrl *ctrl)
+static void dp_catalog_panel_setup_infoframe_sdp(struct dp_catalog_panel *panel)
 {
 	struct dp_catalog_private *catalog;
+	struct drm_msm_ext_hdr_metadata *hdr;
 	void __iomem *base;
 	u32 header, data;
 
-	if (!ctrl) {
+	if (!panel) {
 		pr_err("invalid input\n");
 		return;
 	}
 
-	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	dp_catalog_get_priv(panel);
+	hdr = &panel->hdr_data.hdr_meta;
+	base = catalog->io->dp_link.base;
 
 	header = dp_read(base + MMSS_DP_VSCEXT_0);
-	header |= ctrl->hdr_data.vsc_hdr_byte1;
+	header |= panel->hdr_data.vscext_header_byte1;
 	dp_write(base + MMSS_DP_VSCEXT_0, header);
 
 	header = dp_read(base + MMSS_DP_VSCEXT_1);
-	header |= ctrl->hdr_data.vsc_hdr_byte1;
+	header |= panel->hdr_data.vscext_header_byte2;
 	dp_write(base + MMSS_DP_VSCEXT_1, header);
 
 	header = dp_read(base + MMSS_DP_VSCEXT_1);
-	header |= ctrl->hdr_data.vsc_hdr_byte1;
+	header |= panel->hdr_data.vscext_header_byte3;
 	dp_write(base + MMSS_DP_VSCEXT_1, header);
 
-	header =  ctrl->hdr_data.version;
-	header |=  ctrl->hdr_data.length << 8;
-	header |= ctrl->hdr_data.eotf << 16;
-	header |= (ctrl->hdr_data.descriptor_id << 24);
+	header =  panel->hdr_data.version;
+	header |=  panel->hdr_data.length << 8;
+	header |= hdr->eotf << 16;
 	dp_write(base + MMSS_DP_VSCEXT_2, header);
 
-	data = (DP_GET_LSB(ctrl->hdr_data.display_primaries_x[0]) |
-		(DP_GET_MSB(ctrl->hdr_data.display_primaries_x[0]) << 8) |
-		(DP_GET_LSB(ctrl->hdr_data.display_primaries_y[0]) << 16) |
-		(DP_GET_MSB(ctrl->hdr_data.display_primaries_y[0]) << 24));
+	data = (DP_GET_LSB(hdr->display_primaries_x[0]) |
+		(DP_GET_MSB(hdr->display_primaries_x[0]) << 8) |
+		(DP_GET_LSB(hdr->display_primaries_y[0]) << 16) |
+		(DP_GET_MSB(hdr->display_primaries_y[0]) << 24));
 	dp_write(base + MMSS_DP_VSCEXT_3, data);
 
-	data = (DP_GET_LSB(ctrl->hdr_data.display_primaries_x[1]) |
-		(DP_GET_MSB(ctrl->hdr_data.display_primaries_x[1]) << 8) |
-		(DP_GET_LSB(ctrl->hdr_data.display_primaries_y[1]) << 16) |
-		(DP_GET_MSB(ctrl->hdr_data.display_primaries_y[1]) << 24));
+	data = (DP_GET_LSB(hdr->display_primaries_x[1]) |
+		(DP_GET_MSB(hdr->display_primaries_x[1]) << 8) |
+		(DP_GET_LSB(hdr->display_primaries_y[1]) << 16) |
+		(DP_GET_MSB(hdr->display_primaries_y[1]) << 24));
 	dp_write(base + MMSS_DP_VSCEXT_4, data);
 
-	data = (DP_GET_LSB(ctrl->hdr_data.display_primaries_x[2]) |
-		(DP_GET_MSB(ctrl->hdr_data.display_primaries_x[2]) << 8) |
-		(DP_GET_LSB(ctrl->hdr_data.display_primaries_y[2]) << 16) |
-		(DP_GET_MSB(ctrl->hdr_data.display_primaries_y[2]) << 24));
+	data = (DP_GET_LSB(hdr->display_primaries_x[2]) |
+		(DP_GET_MSB(hdr->display_primaries_x[2]) << 8) |
+		(DP_GET_LSB(hdr->display_primaries_y[2]) << 16) |
+		(DP_GET_MSB(hdr->display_primaries_y[2]) << 24));
 	dp_write(base + MMSS_DP_VSCEXT_5, data);
 
-	data = (DP_GET_LSB(ctrl->hdr_data.white_point_x) |
-		(DP_GET_MSB(ctrl->hdr_data.white_point_x) << 8) |
-		(DP_GET_LSB(ctrl->hdr_data.white_point_y) << 16) |
-		(DP_GET_MSB(ctrl->hdr_data.white_point_y) << 24));
+	data = (DP_GET_LSB(hdr->white_point_x) |
+		(DP_GET_MSB(hdr->white_point_x) << 8) |
+		(DP_GET_LSB(hdr->white_point_y) << 16) |
+		(DP_GET_MSB(hdr->white_point_y) << 24));
 	dp_write(base + MMSS_DP_VSCEXT_6, data);
 
-	data = (DP_GET_LSB(ctrl->hdr_data.max_luminance) |
-		(DP_GET_MSB(ctrl->hdr_data.max_luminance) << 8) |
-		(DP_GET_LSB(ctrl->hdr_data.min_luminance) << 16) |
-		(DP_GET_MSB(ctrl->hdr_data.min_luminance) << 24));
+	data = (DP_GET_LSB(hdr->max_luminance) |
+		(DP_GET_MSB(hdr->max_luminance) << 8) |
+		(DP_GET_LSB(hdr->min_luminance) << 16) |
+		(DP_GET_MSB(hdr->min_luminance) << 24));
 	dp_write(base + MMSS_DP_VSCEXT_7, data);
 
-	data = (DP_GET_LSB(ctrl->hdr_data.max_content_light_level) |
-		(DP_GET_MSB(ctrl->hdr_data.max_content_light_level) << 8) |
-		(DP_GET_LSB(ctrl->hdr_data.max_average_light_level) << 16) |
-		(DP_GET_MSB(ctrl->hdr_data.max_average_light_level) << 24));
+	data = (DP_GET_LSB(hdr->max_content_light_level) |
+		(DP_GET_MSB(hdr->max_content_light_level) << 8) |
+		(DP_GET_LSB(hdr->max_average_light_level) << 16) |
+		(DP_GET_MSB(hdr->max_average_light_level) << 24));
 	dp_write(base + MMSS_DP_VSCEXT_8, data);
 
 	dp_write(base + MMSS_DP_VSCEXT_9, 0x00);
 }
 
-static void dp_catalog_ctrl_setup_vsc_sdp(struct dp_catalog_ctrl *ctrl)
+static void dp_catalog_panel_setup_vsc_sdp(struct dp_catalog_panel *panel)
 {
 	struct dp_catalog_private *catalog;
 	void __iomem *base;
 	u32 value;
 
-	if (!ctrl) {
+	if (!panel) {
 		pr_err("invalid input\n");
 		return;
 	}
 
-	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	dp_catalog_get_priv(panel);
+	base = catalog->io->dp_link.base;
 
 	value = dp_read(base + MMSS_DP_GENERIC0_0);
-	value |= ctrl->hdr_data.vsc_hdr_byte1;
+	value |= panel->hdr_data.vsc_header_byte1;
 	dp_write(base + MMSS_DP_GENERIC0_0, value);
 
 	value = dp_read(base + MMSS_DP_GENERIC0_1);
-	value |= ctrl->hdr_data.vsc_hdr_byte2;
+	value |= panel->hdr_data.vsc_header_byte2;
 	dp_write(base + MMSS_DP_GENERIC0_1, value);
 
 	value = dp_read(base + MMSS_DP_GENERIC0_1);
-	value |= ctrl->hdr_data.vsc_hdr_byte3;
+	value |= panel->hdr_data.vsc_header_byte3;
 	dp_write(base + MMSS_DP_GENERIC0_1, value);
 
 	dp_write(base + MMSS_DP_GENERIC0_2, 0x00);
@@ -434,25 +435,31 @@
 	dp_write(base + MMSS_DP_GENERIC0_4, 0x00);
 	dp_write(base + MMSS_DP_GENERIC0_5, 0x00);
 
-	dp_write(base + MMSS_DP_GENERIC0_6, ctrl->hdr_data.pkt_payload);
+	value = (panel->hdr_data.colorimetry & 0xF) |
+		((panel->hdr_data.pixel_encoding & 0xF) << 4) |
+		((panel->hdr_data.bpc & 0x7) << 8) |
+		((panel->hdr_data.dynamic_range & 0x1) << 15) |
+		((panel->hdr_data.content_type & 0x7) << 16);
+
+	dp_write(base + MMSS_DP_GENERIC0_6, value);
 	dp_write(base + MMSS_DP_GENERIC0_7, 0x00);
 	dp_write(base + MMSS_DP_GENERIC0_8, 0x00);
 	dp_write(base + MMSS_DP_GENERIC0_9, 0x00);
 }
 
-static void dp_catalog_ctrl_config_hdr(struct dp_catalog_ctrl *ctrl)
+static void dp_catalog_panel_config_hdr(struct dp_catalog_panel *panel)
 {
 	struct dp_catalog_private *catalog;
 	void __iomem *base;
 	u32 cfg, cfg2;
 
-	if (!ctrl) {
+	if (!panel) {
 		pr_err("invalid input\n");
 		return;
 	}
 
-	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	dp_catalog_get_priv(panel);
+	base = catalog->io->dp_link.base;
 
 	cfg = dp_read(base + MMSS_DP_SDP_CFG);
 	/* VSCEXT_SDP_EN */
@@ -468,8 +475,8 @@
 	cfg2 |= BIT(16);
 	dp_write(base + MMSS_DP_SDP_CFG2, cfg2);
 
-	dp_catalog_ctrl_setup_vsc_sdp(ctrl);
-	dp_catalog_ctrl_setup_infoframe_sdp(ctrl);
+	dp_catalog_panel_setup_vsc_sdp(panel);
+	dp_catalog_panel_setup_infoframe_sdp(panel);
 
 	cfg = dp_read(base + DP_MISC1_MISC0);
 	/* Indicates presence of VSC */
@@ -481,7 +488,7 @@
 	/* Send VSC */
 	cfg |= BIT(7);
 
-	switch (ctrl->hdr_data.bpc) {
+	switch (panel->hdr_data.bpc) {
 	default:
 	case 10:
 		cfg |= BIT(9);
@@ -511,7 +518,7 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_link.base;
 
 	dp_write(base + DP_VALID_BOUNDARY, ctrl->valid_boundary);
 	dp_write(base + DP_TU, ctrl->dp_tu);
@@ -529,7 +536,7 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_link.base;
 
 	dp_write(base + DP_STATE_CTRL, state);
 }
@@ -537,7 +544,7 @@
 static void dp_catalog_ctrl_config_ctrl(struct dp_catalog_ctrl *ctrl, u32 cfg)
 {
 	struct dp_catalog_private *catalog;
-	void __iomem *base;
+	void __iomem *link_base;
 
 	if (!ctrl) {
 		pr_err("invalid input\n");
@@ -545,13 +552,11 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	link_base = catalog->io->dp_link.base;
 
 	pr_debug("DP_CONFIGURATION_CTRL=0x%x\n", cfg);
 
-	dp_write(base + DP_CONFIGURATION_CTRL, cfg);
-	dp_write(base + DP_MAINLINK_LEVELS, 0xa08);
-	dp_write(base + MMSS_DP_ASYNC_FIFO_CONFIG, 0x1);
+	dp_write(link_base + DP_CONFIGURATION_CTRL, cfg);
 }
 
 static void dp_catalog_ctrl_lane_mapping(struct dp_catalog_ctrl *ctrl)
@@ -565,9 +570,9 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_link.base;
 
-	dp_write(base + DP_LOGICAL2PHYSCIAL_LANE_MAPPING, 0xe4);
+	dp_write(base + DP_LOGICAL2PHYSICAL_LANE_MAPPING, 0xe4);
 }
 
 static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl,
@@ -583,7 +588,7 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_link.base;
 
 	if (enable) {
 		dp_write(base + DP_MAINLINK_CTRL, 0x02000000);
@@ -614,7 +619,7 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_link.base;
 
 	misc_val |= (tb << 5);
 	misc_val |= BIT(0); /* Configure clock to synchronous mode */
@@ -680,7 +685,7 @@
 			nvid *= 3;
 	}
 
-	base_ctrl = catalog->io->ctrl_io.base;
+	base_ctrl = catalog->io->dp_link.base;
 	pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
 	dp_write(base_ctrl + DP_SOFTWARE_MVID, mvid);
 	dp_write(base_ctrl + DP_SOFTWARE_NVID, nvid);
@@ -700,7 +705,7 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_link.base;
 
 	bit = 1;
 	bit <<= (pattern - 1);
@@ -754,7 +759,57 @@
 	dp_write(base + USB3_DP_COM_RESET_OVRD_CTRL, 0x00);
 	/* make sure phy is brought out of reset */
 	wmb();
+}
 
+static void dp_catalog_panel_tpg_cfg(struct dp_catalog_panel *panel,
+	bool enable)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!panel) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(panel);
+	base = catalog->io->dp_p0.base;
+
+	if (!enable) {
+		dp_write(base + MMSS_DP_TPG_MAIN_CONTROL, 0x0);
+		dp_write(base + MMSS_DP_BIST_ENABLE, 0x0);
+		dp_write(base + MMSS_DP_TIMING_ENGINE_EN, 0x0);
+		wmb(); /* ensure Timing generator is turned off */
+		return;
+	}
+
+	dp_write(base + MMSS_DP_INTF_CONFIG, 0x0);
+	dp_write(base + MMSS_DP_INTF_HSYNC_CTL, panel->hsync_ctl);
+	dp_write(base + MMSS_DP_INTF_VSYNC_PERIOD_F0, panel->vsync_period *
+			panel->hsync_period);
+	dp_write(base + MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, panel->v_sync_width *
+			panel->hsync_period);
+	dp_write(base + MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
+	dp_write(base + MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
+	dp_write(base + MMSS_DP_INTF_DISPLAY_HCTL, panel->display_hctl);
+	dp_write(base + MMSS_DP_INTF_ACTIVE_HCTL, 0);
+	dp_write(base + MMSS_INTF_DISPLAY_V_START_F0, panel->display_v_start);
+	dp_write(base + MMSS_DP_INTF_DISPLAY_V_END_F0, panel->display_v_end);
+	dp_write(base + MMSS_INTF_DISPLAY_V_START_F1, 0);
+	dp_write(base + MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
+	dp_write(base + MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
+	dp_write(base + MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
+	dp_write(base + MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
+	dp_write(base + MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
+	dp_write(base + MMSS_DP_INTF_POLARITY_CTL, 0);
+	wmb(); /* ensure TPG registers are programmed */
+
+	dp_write(base + MMSS_DP_TPG_MAIN_CONTROL, 0x100);
+	dp_write(base + MMSS_DP_TPG_VIDEO_CONFIG, 0x5);
+	wmb(); /* ensure TPG config is programmed */
+	dp_write(base + MMSS_DP_BIST_ENABLE, 0x1);
+	dp_write(base + MMSS_DP_TIMING_ENGINE_EN, 0x1);
+	wmb(); /* ensure Timing generator is turned on */
 }
 
 static void dp_catalog_ctrl_reset(struct dp_catalog_ctrl *ctrl)
@@ -769,7 +824,7 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_ahb.base;
 
 	sw_reset = dp_read(base + DP_SW_RESET);
 
@@ -794,7 +849,7 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_link.base;
 
 	while (--cnt) {
 		/* DP_MAINLINK_READY */
@@ -821,7 +876,7 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_ahb.base;
 
 	if (enable) {
 		dp_write(base + DP_INTR_STATUS, DP_INTR_MASK1);
@@ -843,7 +898,7 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_aux.base;
 
 	if (en) {
 		u32 reftimer = dp_read(base + DP_DP_HPD_REFTIMER);
@@ -874,7 +929,7 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_ahb.base;
 
 	ctrl->isr = dp_read(base + DP_INTR_STATUS2);
 	ctrl->isr &= ~DP_INTR_MASK2;
@@ -895,7 +950,7 @@
 	}
 
 	dp_catalog_get_priv(ctrl);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_ahb.base;
 
 	dp_write(base + DP_PHY_CTRL, 0x5); /* bit 0 & 2 */
 	usleep_range(1000, 1010); /* h/w recommended delay */
@@ -984,7 +1039,7 @@
 
 	dp_catalog_get_priv(ctrl);
 
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_link.base;
 
 	dp_write(base + DP_STATE_CTRL, 0x0);
 
@@ -1012,7 +1067,7 @@
 		/* 1111100000111110 */
 		dp_write(base + DP_TEST_80BIT_CUSTOM_PATTERN_REG2, 0x0000F83E);
 		break;
-	case DP_TEST_PHY_PATTERN_HBR2_CTS_EYE_PATTERN:
+	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1:
 		value = BIT(16);
 		dp_write(base + DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value);
 		value |= 0xFC;
@@ -1020,6 +1075,10 @@
 		dp_write(base + DP_MAINLINK_LEVELS, 0x2);
 		dp_write(base + DP_STATE_CTRL, 0x10);
 		break;
+	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3:
+		dp_write(base + DP_MAINLINK_CTRL, 0x11);
+		dp_write(base + DP_STATE_CTRL, 0x8);
+		break;
 	default:
 		pr_debug("No valid test pattern requested: 0x%x\n", pattern);
 		return;
@@ -1041,7 +1100,7 @@
 
 	dp_catalog_get_priv(ctrl);
 
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_link.base;
 
 	return dp_read(base + DP_MAINLINK_READY);
 }
@@ -1058,7 +1117,7 @@
 	}
 
 	dp_catalog_get_priv(panel);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_link.base;
 
 	dp_write(base + DP_TOTAL_HOR_VER, panel->total);
 	dp_write(base + DP_START_HOR_VER_FROM_SYNC, panel->sync_start);
@@ -1118,7 +1177,7 @@
 		return;
 
 	dp_catalog_get_priv(audio);
-	base = catalog->io->ctrl_io.base;
+	base = catalog->io->dp_link.base;
 
 	/* AUDIO_TIMESTAMP_SDP_EN */
 	sdp_cfg |= BIT(1);
@@ -1157,7 +1216,7 @@
 
 	dp_catalog_get_priv(audio);
 
-	base    = catalog->io->ctrl_io.base;
+	base    = catalog->io->dp_link.base;
 	sdp_map = catalog->audio_map;
 	sdp     = audio->sdp_type;
 	header  = audio->sdp_header;
@@ -1179,7 +1238,7 @@
 
 	dp_catalog_get_priv(audio);
 
-	base    = catalog->io->ctrl_io.base;
+	base    = catalog->io->dp_link.base;
 	sdp_map = catalog->audio_map;
 	sdp     = audio->sdp_type;
 	header  = audio->sdp_header;
@@ -1197,7 +1256,7 @@
 	dp_catalog_get_priv(audio);
 
 	select = audio->data;
-	base   = catalog->io->ctrl_io.base;
+	base   = catalog->io->dp_link.base;
 
 	acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
 
@@ -1214,7 +1273,7 @@
 
 	dp_catalog_get_priv(audio);
 
-	base   = catalog->io->ctrl_io.base;
+	base   = catalog->io->dp_link.base;
 	safe_to_exit_level = audio->data;
 
 	mainlink_levels = dp_read(base + DP_MAINLINK_LEVELS);
@@ -1236,7 +1295,7 @@
 
 	dp_catalog_get_priv(audio);
 
-	base   = catalog->io->ctrl_io.base;
+	base   = catalog->io->dp_link.base;
 	enable = !!audio->data;
 
 	audio_ctrl = dp_read(base + MMSS_DP_AUDIO_CFG);
@@ -1287,7 +1346,6 @@
 		.phy_lane_cfg   = dp_catalog_ctrl_phy_lane_cfg,
 		.update_vx_px   = dp_catalog_ctrl_update_vx_px,
 		.get_interrupt  = dp_catalog_ctrl_get_interrupt,
-		.config_hdr     = dp_catalog_ctrl_config_hdr,
 		.update_transfer_unit = dp_catalog_ctrl_update_transfer_unit,
 		.read_hdcp_status     = dp_catalog_ctrl_read_hdcp_status,
 		.send_phy_pattern    = dp_catalog_ctrl_send_phy_pattern,
@@ -1304,6 +1362,8 @@
 	};
 	struct dp_catalog_panel panel = {
 		.timing_cfg = dp_catalog_panel_timing_cfg,
+		.config_hdr = dp_catalog_panel_config_hdr,
+		.tpg_config = dp_catalog_panel_tpg_cfg,
 	};
 
 	if (!io) {
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index aca2f18..c70e8d1 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -15,6 +15,8 @@
 #ifndef _DP_CATALOG_H_
 #define _DP_CATALOG_H_
 
+#include <drm/msm_drm.h>
+
 #include "dp_parser.h"
 
 /* interrupts */
@@ -34,30 +36,27 @@
 #define DP_INTR_FRAME_END		BIT(6)
 #define DP_INTR_CRC_UPDATED		BIT(9)
 
-#define HDR_PRIMARIES_COUNT   3
-
 struct dp_catalog_hdr_data {
-	u32 vsc_hdr_byte0;
-	u32 vsc_hdr_byte1;
-	u32 vsc_hdr_byte2;
-	u32 vsc_hdr_byte3;
-	u32 pkt_payload;
+	u32 vsc_header_byte0;
+	u32 vsc_header_byte1;
+	u32 vsc_header_byte2;
+	u32 vsc_header_byte3;
+
+	u32 vscext_header_byte0;
+	u32 vscext_header_byte1;
+	u32 vscext_header_byte2;
+	u32 vscext_header_byte3;
 
 	u32 bpc;
 
 	u32 version;
 	u32 length;
-	u32 eotf;
-	u32 descriptor_id;
+	u32 pixel_encoding;
+	u32 colorimetry;
+	u32 dynamic_range;
+	u32 content_type;
 
-	u32 display_primaries_x[HDR_PRIMARIES_COUNT];
-	u32 display_primaries_y[HDR_PRIMARIES_COUNT];
-	u32 white_point_x;
-	u32 white_point_y;
-	u32 max_luminance;
-	u32 min_luminance;
-	u32 max_content_light_level;
-	u32 max_average_light_level;
+	struct drm_msm_ext_hdr_metadata hdr_meta;
 };
 
 struct dp_catalog_aux {
@@ -83,7 +82,6 @@
 	u32 valid_boundary;
 	u32 valid_boundary2;
 	u32 isr;
-	struct dp_catalog_hdr_data hdr_data;
 
 	void (*state_ctrl)(struct dp_catalog_ctrl *ctrl, u32 state);
 	void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u32 config);
@@ -104,7 +102,6 @@
 	void (*update_vx_px)(struct dp_catalog_ctrl *ctrl, u8 v_level,
 				u8 p_level);
 	void (*get_interrupt)(struct dp_catalog_ctrl *ctrl);
-	void (*config_hdr)(struct dp_catalog_ctrl *ctrl);
 	void (*update_transfer_unit)(struct dp_catalog_ctrl *ctrl);
 	u32 (*read_hdcp_status)(struct dp_catalog_ctrl *ctrl);
 	void (*send_phy_pattern)(struct dp_catalog_ctrl *ctrl,
@@ -148,7 +145,20 @@
 	u32 width_blanking;
 	u32 dp_active;
 
+	struct dp_catalog_hdr_data hdr_data;
+
+	/* TPG */
+	u32 hsync_period;
+	u32 vsync_period;
+	u32 display_v_start;
+	u32 display_v_end;
+	u32 v_sync_width;
+	u32 hsync_ctl;
+	u32 display_hctl;
+
 	int (*timing_cfg)(struct dp_catalog_panel *panel);
+	void (*config_hdr)(struct dp_catalog_panel *panel);
+	void (*tpg_config)(struct dp_catalog_panel *panel, bool enable);
 };
 
 struct dp_catalog {
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 13ca6b2..65672c9 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -40,6 +40,7 @@
 #define MR_LINK_SYMBOL_ERM 0x80
 #define MR_LINK_PRBS7 0x100
 #define MR_LINK_CUSTOM80 0x200
+#define MR_LINK_TRAINING4  0x40
 
 struct dp_vc_tu_mapping_table {
 	u32 vic;
@@ -973,7 +974,7 @@
 	}
 
 	/* print success info as this is a result of user initiated action */
-	pr_debug("link training #2 successful\n");
+	pr_info("link training #2 successful\n");
 
 end:
 	dp_ctrl_state_ctrl(ctrl, 0);
@@ -1072,7 +1073,8 @@
 	return ctrl->power->clk_enable(ctrl->power, DP_CTRL_PM, false);
 }
 
-static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
+static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl,
+	bool flip, bool multi_func)
 {
 	struct dp_ctrl_private *ctrl;
 	struct dp_catalog_ctrl *catalog;
@@ -1087,8 +1089,10 @@
 	ctrl->orientation = flip;
 	catalog = ctrl->catalog;
 
-	catalog->usb_reset(ctrl->catalog, flip);
-	catalog->phy_reset(ctrl->catalog);
+	if (!multi_func) {
+		catalog->usb_reset(ctrl->catalog, flip);
+		catalog->phy_reset(ctrl->catalog);
+	}
 	catalog->enable_irq(ctrl->catalog, true);
 
 	return 0;
@@ -1214,9 +1218,6 @@
 	u32 pattern_sent = 0x0;
 	u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel;
 
-	pr_debug("request: %s\n",
-			dp_link_get_phy_test_pattern(pattern_requested));
-
 	ctrl->catalog->update_vx_px(ctrl->catalog,
 			ctrl->link->phy_params.v_level,
 			ctrl->link->phy_params.p_level);
@@ -1224,6 +1225,9 @@
 	ctrl->link->send_test_response(ctrl->link);
 
 	pattern_sent = ctrl->catalog->read_phy_pattern(ctrl->catalog);
+	pr_debug("pattern_request: %s. pattern_sent: 0x%x\n",
+			dp_link_get_phy_test_pattern(pattern_requested),
+			pattern_sent);
 
 	switch (pattern_sent) {
 	case MR_LINK_TRAINING1:
@@ -1235,7 +1239,7 @@
 		if ((pattern_requested ==
 				DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT)
 			|| (pattern_requested ==
-				DP_TEST_PHY_PATTERN_HBR2_CTS_EYE_PATTERN))
+				DP_TEST_PHY_PATTERN_CP2520_PATTERN_1))
 			success = true;
 		break;
 	case MR_LINK_PRBS7:
@@ -1247,40 +1251,57 @@
 				DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN)
 			success = true;
 		break;
+	case MR_LINK_TRAINING4:
+		if (pattern_requested ==
+				DP_TEST_PHY_PATTERN_CP2520_PATTERN_3)
+			success = true;
+		break;
 	default:
 		success = false;
-		return;
+		break;
 	}
 
 	pr_debug("%s: %s\n", success ? "success" : "failed",
 			dp_link_get_phy_test_pattern(pattern_requested));
 }
 
-static void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
+static bool dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
 {
 	struct dp_ctrl_private *ctrl;
 	u32 sink_request = 0x0;
+	bool req_handled = false;
 
 	if (!dp_ctrl) {
 		pr_err("invalid input\n");
-		return;
+		goto end;
 	}
 
 	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
 	sink_request = ctrl->link->sink_request;
 
 	if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
-		pr_info("PHY_TEST_PATTERN request\n");
+		pr_info("PHY_TEST_PATTERN\n");
 		dp_ctrl_process_phy_test_request(ctrl);
+
+		req_handled = true;
 	}
 
-	if (sink_request & DP_LINK_STATUS_UPDATED)
+	if (sink_request & DP_LINK_STATUS_UPDATED) {
+		pr_info("DP_LINK_STATUS_UPDATED\n");
 		dp_ctrl_link_maintenance(ctrl);
 
+		req_handled = true;
+	}
+
 	if (sink_request & DP_TEST_LINK_TRAINING) {
+		pr_info("DP_TEST_LINK_TRAINING\n");
 		ctrl->link->send_test_response(ctrl->link);
 		dp_ctrl_link_maintenance(ctrl);
+
+		req_handled = true;
 	}
+end:
+	return req_handled;
 }
 
 static void dp_ctrl_reset(struct dp_ctrl *dp_ctrl)
@@ -1442,6 +1463,7 @@
 	ctrl->aux      = in->aux;
 	ctrl->link     = in->link;
 	ctrl->catalog  = in->catalog;
+	ctrl->dev  = in->dev;
 
 	dp_ctrl = &ctrl->dp_ctrl;
 
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index d6d10ed..aaac0ab 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -23,7 +23,7 @@
 #include "dp_catalog.h"
 
 struct dp_ctrl {
-	int (*init)(struct dp_ctrl *dp_ctrl, bool flip);
+	int (*init)(struct dp_ctrl *dp_ctrl, bool flip, bool multi_func);
 	void (*deinit)(struct dp_ctrl *dp_ctrl);
 	int (*on)(struct dp_ctrl *dp_ctrl);
 	void (*off)(struct dp_ctrl *dp_ctrl);
@@ -31,7 +31,7 @@
 	void (*push_idle)(struct dp_ctrl *dp_ctrl);
 	void (*abort)(struct dp_ctrl *dp_ctrl);
 	void (*isr)(struct dp_ctrl *dp_ctrl);
-	void (*handle_sink_request)(struct dp_ctrl *dp_ctrl);
+	bool (*handle_sink_request)(struct dp_ctrl *dp_ctrl);
 };
 
 struct dp_ctrl_in {
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 92ac0ec..a530642 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -29,6 +29,11 @@
 
 struct dp_debug_private {
 	struct dentry *root;
+	u8 *edid;
+	u32 edid_size;
+
+	u8 *dpcd;
+	u32 dpcd_size;
 
 	struct dp_usbpd *usbpd;
 	struct dp_link *link;
@@ -39,6 +44,138 @@
 	struct dp_debug dp_debug;
 };
 
+static ssize_t dp_debug_write_edid(struct file *file,
+		const char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct dp_debug_private *debug = file->private_data;
+	u8 *buf = NULL, *buf_t = NULL, *edid = NULL;
+	const int char_to_nib = 2;
+	size_t edid_size = 0;
+	size_t size = 0, edid_buf_index = 0;
+	ssize_t rc = count;
+
+	if (!debug)
+		return -ENODEV;
+
+	if (*ppos)
+		goto bail;
+
+	size = min_t(size_t, count, SZ_1K);
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf) {
+		rc = -ENOMEM;
+		goto bail;
+	}
+
+	if (copy_from_user(buf, user_buff, size))
+		goto bail;
+
+	edid_size = size / char_to_nib;
+	buf_t = buf;
+
+	memset(debug->edid, 0, debug->edid_size);
+
+	if (edid_size != debug->edid_size) {
+		pr_debug("clearing debug edid\n");
+		goto bail;
+	}
+
+	while (edid_size--) {
+		char t[3];
+		int d;
+
+		memcpy(t, buf_t, sizeof(char) * char_to_nib);
+		t[char_to_nib] = '\0';
+
+		if (kstrtoint(t, 16, &d)) {
+			pr_err("kstrtoint error\n");
+			goto bail;
+		}
+
+		if (edid_buf_index < debug->edid_size)
+			debug->edid[edid_buf_index++] = d;
+
+		buf_t += char_to_nib;
+	}
+
+	print_hex_dump(KERN_DEBUG, "DEBUG EDID: ", DUMP_PREFIX_NONE,
+		16, 1, debug->edid, debug->edid_size, false);
+
+	edid = debug->edid;
+bail:
+	kfree(buf);
+	debug->panel->set_edid(debug->panel, edid);
+	return rc;
+}
+
+static ssize_t dp_debug_write_dpcd(struct file *file,
+		const char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct dp_debug_private *debug = file->private_data;
+	u8 *buf = NULL, *buf_t = NULL, *dpcd = NULL;
+	const int char_to_nib = 2;
+	size_t dpcd_size = 0;
+	size_t size = 0, dpcd_buf_index = 0;
+	ssize_t rc = count;
+
+	pr_debug("count=%zu\n", count);
+
+	if (!debug)
+		return -ENODEV;
+
+	if (*ppos)
+		goto bail;
+
+	size = min_t(size_t, count, SZ_32);
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (!buf) {
+		rc = -ENOMEM;
+		goto bail;
+	}
+
+	if (copy_from_user(buf, user_buff, size))
+		goto bail;
+
+	dpcd_size = size / char_to_nib;
+	buf_t = buf;
+
+	memset(debug->dpcd, 0, debug->dpcd_size);
+
+	if (dpcd_size != debug->dpcd_size) {
+		pr_debug("clearing debug dpcd\n");
+		goto bail;
+	}
+
+	while (dpcd_size--) {
+		char t[3];
+		int d;
+
+		memcpy(t, buf_t, sizeof(char) * char_to_nib);
+		t[char_to_nib] = '\0';
+
+		if (kstrtoint(t, 16, &d)) {
+			pr_err("kstrtoint error\n");
+			goto bail;
+		}
+
+		if (dpcd_buf_index < debug->dpcd_size)
+			debug->dpcd[dpcd_buf_index++] = d;
+
+		buf_t += char_to_nib;
+	}
+
+	print_hex_dump(KERN_DEBUG, "DEBUG DPCD: ", DUMP_PREFIX_NONE,
+		8, 1, debug->dpcd, debug->dpcd_size, false);
+
+	dpcd = debug->dpcd;
+bail:
+	kfree(buf);
+	debug->panel->set_dpcd(debug->panel, dpcd);
+	return rc;
+}
+
 static ssize_t dp_debug_write_hpd(struct file *file,
 		const char __user *user_buff, size_t count, loff_t *ppos)
 {
@@ -143,6 +280,44 @@
 	return len;
 }
 
+static ssize_t dp_debug_tpg_write(struct file *file,
+		const char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct dp_debug_private *debug = file->private_data;
+	char buf[SZ_8];
+	size_t len = 0;
+	u32 tpg_state = 0;
+
+	if (!debug)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	/* Leave room for termination char */
+	len = min_t(size_t, count, SZ_8 - 1);
+	if (copy_from_user(buf, user_buff, len))
+		goto bail;
+
+	buf[len] = '\0';
+
+	if (kstrtoint(buf, 10, &tpg_state) != 0)
+		goto bail;
+
+	tpg_state &= 0x1;
+	pr_debug("tpg_state: %d\n", tpg_state);
+
+	if (tpg_state == debug->dp_debug.tpg_state)
+		goto bail;
+
+	if (debug->panel)
+		debug->panel->tpg_config(debug->panel, tpg_state);
+
+	debug->dp_debug.tpg_state = tpg_state;
+bail:
+	return len;
+}
+
 static ssize_t dp_debug_read_connected(struct file *file,
 		char __user *user_buff, size_t count, loff_t *ppos)
 {
@@ -198,6 +373,7 @@
 		goto error;
 	}
 
+	mutex_lock(&connector->dev->mode_config.mutex);
 	list_for_each_entry(mode, &connector->modes, head) {
 		len += snprintf(buf + len, SZ_4K - len,
 		"%s %d %d %d %d %d %d %d %d %d %d 0x%x\n",
@@ -206,6 +382,7 @@
 		mode->htotal, mode->vdisplay, mode->vsync_start,
 		mode->vsync_end, mode->vtotal, mode->flags);
 	}
+	mutex_unlock(&connector->dev->mode_config.mutex);
 
 	if (copy_to_user(user_buff, buf, len)) {
 		kfree(buf);
@@ -415,6 +592,28 @@
 	return len;
 }
 
+static ssize_t dp_debug_tpg_read(struct file *file,
+	char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct dp_debug_private *debug = file->private_data;
+	char buf[SZ_8];
+	u32 len = 0;
+
+	if (!debug)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	len += snprintf(buf, SZ_8, "%d\n", debug->dp_debug.tpg_state);
+
+	if (copy_to_user(user_buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;
+	return len;
+}
+
 static const struct file_operations dp_debug_fops = {
 	.open = simple_open,
 	.read = dp_debug_read_info,
@@ -431,6 +630,16 @@
 	.write = dp_debug_write_hpd,
 };
 
+static const struct file_operations edid_fops = {
+	.open = simple_open,
+	.write = dp_debug_write_edid,
+};
+
+static const struct file_operations dpcd_fops = {
+	.open = simple_open,
+	.write = dp_debug_write_dpcd,
+};
+
 static const struct file_operations connected_fops = {
 	.open = simple_open,
 	.read = dp_debug_read_connected,
@@ -442,15 +651,18 @@
 	.write = dp_debug_bw_code_write,
 };
 
+static const struct file_operations tpg_fops = {
+	.open = simple_open,
+	.read = dp_debug_tpg_read,
+	.write = dp_debug_tpg_write,
+};
+
 static int dp_debug_init(struct dp_debug *dp_debug)
 {
 	int rc = 0;
 	struct dp_debug_private *debug = container_of(dp_debug,
 		struct dp_debug_private, dp_debug);
-	struct dentry *dir, *file, *edid_modes;
-	struct dentry *hpd, *connected;
-	struct dentry *max_bw_code;
-	struct dentry *root = debug->root;
+	struct dentry *dir, *file;
 
 	dir = debugfs_create_dir(DEBUG_NAME, NULL);
 	if (IS_ERR_OR_NULL(dir)) {
@@ -460,6 +672,8 @@
 		goto error;
 	}
 
+	debug->root = dir;
+
 	file = debugfs_create_file("dp_debug", 0444, dir,
 				debug, &dp_debug_fops);
 	if (IS_ERR_OR_NULL(file)) {
@@ -469,46 +683,73 @@
 		goto error_remove_dir;
 	}
 
-	edid_modes = debugfs_create_file("edid_modes", 0644, dir,
+	file = debugfs_create_file("edid_modes", 0644, dir,
 					debug, &edid_modes_fops);
-	if (IS_ERR_OR_NULL(edid_modes)) {
-		rc = PTR_ERR(edid_modes);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
 		pr_err("[%s] debugfs create edid_modes failed, rc=%d\n",
 		       DEBUG_NAME, rc);
 		goto error_remove_dir;
 	}
 
-	hpd = debugfs_create_file("hpd", 0644, dir,
+	file = debugfs_create_file("hpd", 0644, dir,
 					debug, &hpd_fops);
-	if (IS_ERR_OR_NULL(hpd)) {
-		rc = PTR_ERR(hpd);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
 		pr_err("[%s] debugfs hpd failed, rc=%d\n",
 			DEBUG_NAME, rc);
 		goto error_remove_dir;
 	}
 
-	connected = debugfs_create_file("connected", 0444, dir,
+	file = debugfs_create_file("connected", 0444, dir,
 					debug, &connected_fops);
-	if (IS_ERR_OR_NULL(connected)) {
-		rc = PTR_ERR(connected);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
 		pr_err("[%s] debugfs connected failed, rc=%d\n",
 			DEBUG_NAME, rc);
 		goto error_remove_dir;
 	}
 
-	max_bw_code = debugfs_create_file("max_bw_code", 0644, dir,
+	file = debugfs_create_file("max_bw_code", 0644, dir,
 			debug, &bw_code_fops);
-	if (IS_ERR_OR_NULL(max_bw_code)) {
-		rc = PTR_ERR(max_bw_code);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
 		pr_err("[%s] debugfs max_bw_code failed, rc=%d\n",
 		       DEBUG_NAME, rc);
 		goto error_remove_dir;
 	}
 
-	root = dir;
-	return rc;
+	file = debugfs_create_file("edid", 0644, dir,
+					debug, &edid_fops);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
+		pr_err("[%s] debugfs edid failed, rc=%d\n",
+			DEBUG_NAME, rc);
+		goto error_remove_dir;
+	}
+
+	file = debugfs_create_file("dpcd", 0644, dir,
+					debug, &dpcd_fops);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
+		pr_err("[%s] debugfs dpcd failed, rc=%d\n",
+			DEBUG_NAME, rc);
+		goto error_remove_dir;
+	}
+
+	file = debugfs_create_file("tpg_ctrl", 0644, dir,
+			debug, &tpg_fops);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
+		pr_err("[%s] debugfs tpg failed, rc=%d\n",
+		       DEBUG_NAME, rc);
+		goto error_remove_dir;
+	}
+
+	return 0;
+
 error_remove_dir:
-	debugfs_remove(dir);
+	debugfs_remove_recursive(dir);
 error:
 	return rc;
 }
@@ -533,6 +774,24 @@
 		goto error;
 	}
 
+	debug->edid = devm_kzalloc(dev, SZ_256, GFP_KERNEL);
+	if (!debug->edid) {
+		rc = -ENOMEM;
+		kfree(debug);
+		goto error;
+	}
+
+	debug->edid_size = SZ_256;
+
+	debug->dpcd = devm_kzalloc(dev, SZ_16, GFP_KERNEL);
+	if (!debug->dpcd) {
+		rc = -ENOMEM;
+		kfree(debug);
+		goto error;
+	}
+
+	debug->dpcd_size = SZ_16;
+
 	debug->dp_debug.debug_en = false;
 	debug->usbpd = usbpd;
 	debug->link = link;
@@ -565,7 +824,7 @@
 
 	debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
 
-	debugfs_remove(debug->root);
+	debugfs_remove_recursive(debug->root);
 
 	return 0;
 }
@@ -581,5 +840,7 @@
 
 	dp_debug_deinit(dp_debug);
 
+	devm_kfree(debug->dev, debug->edid);
+	devm_kfree(debug->dev, debug->dpcd);
 	devm_kfree(debug->dev, debug);
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index d5a9301..6e3e9a9 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -25,6 +25,7 @@
  * @vdisplay: used to filter out vdisplay value
  * @hdisplay: used to filter out hdisplay value
  * @vrefresh: used to filter out vrefresh value
+ * @tpg_state: specifies whether tpg feature is enabled
  */
 struct dp_debug {
 	bool debug_en;
@@ -32,6 +33,7 @@
 	int vdisplay;
 	int hdisplay;
 	int vrefresh;
+	bool tpg_state;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 7fbc63a..c0623d8 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -277,7 +277,6 @@
 static int dp_display_initialize_hdcp(struct dp_display_private *dp)
 {
 	struct sde_hdcp_init_data hdcp_init_data;
-	struct resource *res;
 	int rc = 0;
 
 	if (!dp) {
@@ -293,15 +292,6 @@
 		goto error;
 	}
 
-	res = platform_get_resource_byname(dp->pdev,
-		IORESOURCE_MEM, "dp_ctrl");
-	if (!res) {
-		pr_err("Error getting dp ctrl resource\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	hdcp_init_data.phy_addr      = res->start;
 	hdcp_init_data.client_id     = HDCP_CLIENT_DP;
 	hdcp_init_data.drm_aux       = dp->aux->drm_aux;
 	hdcp_init_data.cb_data       = (void *)dp;
@@ -310,6 +300,10 @@
 	hdcp_init_data.sec_access    = true;
 	hdcp_init_data.notify_status = dp_display_notify_hdcp_status_cb;
 	hdcp_init_data.core_io       = &dp->parser->io.ctrl_io;
+	hdcp_init_data.dp_ahb        = &dp->parser->io.dp_ahb;
+	hdcp_init_data.dp_aux        = &dp->parser->io.dp_aux;
+	hdcp_init_data.dp_link       = &dp->parser->io.dp_link;
+	hdcp_init_data.dp_p0         = &dp->parser->io.dp_p0;
 	hdcp_init_data.qfprom_io     = &dp->parser->io.qfprom_io;
 	hdcp_init_data.hdcp_io       = &dp->parser->io.hdcp_io;
 	hdcp_init_data.revision      = &dp->panel->link_info.revision;
@@ -363,24 +357,12 @@
 	dp->dp_display.drm_dev = drm;
 	priv = drm->dev_private;
 
-	rc = dp->parser->parse(dp->parser);
-	if (rc) {
-		pr_err("device tree parsing failed\n");
-		goto end;
-	}
-
 	rc = dp->aux->drm_aux_register(dp->aux);
 	if (rc) {
 		pr_err("DRM DP AUX register failed\n");
 		goto end;
 	}
 
-	rc = dp->panel->sde_edid_register(dp->panel);
-	if (rc) {
-		pr_err("DRM DP EDID register failed\n");
-		goto end;
-	}
-
 	rc = dp->power->power_client_init(dp->power, &priv->phandle);
 	if (rc) {
 		pr_err("Power client create failed\n");
@@ -414,7 +396,6 @@
 	}
 
 	(void)dp->power->power_client_deinit(dp->power);
-	(void)dp->panel->sde_edid_deregister(dp->panel);
 	(void)dp->aux->drm_aux_deregister(dp->aux);
 	dp_display_deinitialize_hdcp(dp);
 }
@@ -512,7 +493,6 @@
 static int dp_display_process_hpd_high(struct dp_display_private *dp)
 {
 	int rc = 0;
-	u32 max_pclk_from_edid = 0;
 	struct edid *edid;
 
 	dp->aux->init(dp->aux, dp->parser->aux_cfg);
@@ -538,11 +518,7 @@
 
 	dp->panel->handle_sink_request(dp->panel);
 
-	max_pclk_from_edid = dp->panel->get_max_pclk(dp->panel);
-
-	dp->dp_display.max_pclk_khz = min(max_pclk_from_edid,
-		dp->parser->max_pclk_khz);
-
+	dp->dp_display.max_pclk_khz = dp->parser->max_pclk_khz;
 notify:
 	dp_display_send_hpd_notification(dp, true);
 
@@ -563,7 +539,7 @@
 		flip = true;
 
 	dp->power->init(dp->power, flip);
-	dp->ctrl->init(dp->ctrl, flip);
+	dp->ctrl->init(dp->ctrl, flip, dp->usbpd->multi_func);
 	enable_irq(dp->irq);
 	dp->core_initialized = true;
 }
@@ -695,6 +671,8 @@
 
 static int dp_display_handle_hpd_irq(struct dp_display_private *dp)
 {
+	bool req_handled;
+
 	if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) {
 		dp_display_send_hpd_notification(dp, false);
 
@@ -706,7 +684,18 @@
 		return dp_display_process_hpd_high(dp);
 	}
 
-	dp->ctrl->handle_sink_request(dp->ctrl);
+	mutex_lock(&dp->audio->ops_lock);
+	req_handled = dp->ctrl->handle_sink_request(dp->ctrl);
+	mutex_unlock(&dp->audio->ops_lock);
+
+	/*
+	 * reconfigure audio if test was executed
+	 * which could have changed the contoller's state
+	 */
+	if (req_handled && dp->audio_supported) {
+		dp->audio->off(dp->audio);
+		dp->audio->on(dp->audio);
+	}
 
 	dp_display_handle_video_request(dp);
 
@@ -806,6 +795,12 @@
 		goto error_parser;
 	}
 
+	rc = dp->parser->parse(dp->parser);
+	if (rc) {
+		pr_err("device tree parsing failed\n");
+		goto error_catalog;
+	}
+
 	dp->catalog = dp_catalog_get(dev, &dp->parser->io);
 	if (IS_ERR(dp->catalog)) {
 		rc = PTR_ERR(dp->catalog);
@@ -909,6 +904,7 @@
 static int dp_display_set_mode(struct dp_display *dp_display,
 		struct dp_display_mode *mode)
 {
+	const u32 num_components = 3, default_bpp = 24;
 	struct dp_display_private *dp;
 
 	if (!dp_display) {
@@ -918,8 +914,16 @@
 	dp = container_of(dp_display, struct dp_display_private, dp_display);
 
 	mutex_lock(&dp->session_lock);
+	mode->timing.bpp =
+		dp_display->connector->display_info.bpc * num_components;
+	if (!mode->timing.bpp)
+		mode->timing.bpp = default_bpp;
+
+	mode->timing.bpp = dp->panel->get_mode_bpp(dp->panel,
+			mode->timing.bpp, mode->timing.pixel_clk_khz);
+
 	dp->panel->pinfo = mode->timing;
-	dp->panel->init_info(dp->panel);
+	dp->panel->init(dp->panel);
 	mutex_unlock(&dp->session_lock);
 
 	return 0;
@@ -950,6 +954,10 @@
 	}
 
 	rc = dp->ctrl->on(dp->ctrl);
+
+	if (dp->debug->tpg_state)
+		dp->panel->tpg_config(dp->panel, true);
+
 	if (!rc)
 		dp->power_on = true;
 end:
@@ -1054,6 +1062,7 @@
 	}
 
 	dp->ctrl->off(dp->ctrl);
+	dp->panel->deinit(dp->panel);
 
 	dp->power_on = false;
 
@@ -1113,10 +1122,35 @@
 	return 0;
 }
 
-static int dp_display_validate_mode(struct dp_display *dp,
-	struct dp_display_mode *mode)
+static int dp_display_validate_mode(struct dp_display *dp, u32 mode_pclk_khz)
 {
-	return 0;
+	const u32 num_components = 3, default_bpp = 24;
+	struct dp_display_private *dp_display;
+	struct drm_dp_link *link_info;
+	u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
+
+	if (!dp || !mode_pclk_khz) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	dp_display = container_of(dp, struct dp_display_private, dp_display);
+	link_info = &dp_display->panel->link_info;
+
+	mode_bpp = dp->connector->display_info.bpc * num_components;
+	if (!mode_bpp)
+		mode_bpp = default_bpp;
+
+	mode_bpp = dp_display->panel->get_mode_bpp(dp_display->panel,
+			mode_bpp, mode_pclk_khz);
+
+	mode_rate_khz = mode_pclk_khz * mode_bpp;
+	supported_rate_khz = link_info->num_lanes * link_info->rate * 8;
+
+	if (mode_rate_khz > supported_rate_khz)
+		return MODE_BAD;
+
+	return MODE_OK;
 }
 
 static int dp_display_get_modes(struct dp_display *dp,
@@ -1139,36 +1173,20 @@
 	return ret;
 }
 
-static bool dp_display_check_video_test(struct dp_display *dp)
-{
-	struct dp_display_private *dp_display;
 
-	if (!dp) {
-		pr_err("invalid params\n");
-		return false;
+static int dp_display_pre_kickoff(struct dp_display *dp_display,
+			struct drm_msm_ext_hdr_metadata *hdr)
+{
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		return -EINVAL;
 	}
 
-	dp_display = container_of(dp, struct dp_display_private, dp_display);
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
 
-	if (dp_display->panel->video_test)
-		return true;
-
-	return false;
-}
-
-static int dp_display_get_test_bpp(struct dp_display *dp)
-{
-	struct dp_display_private *dp_display;
-
-	if (!dp) {
-		pr_err("invalid params\n");
-		return 0;
-	}
-
-	dp_display = container_of(dp, struct dp_display_private, dp_display);
-
-	return dp_link_bit_depth_to_bpp(
-		dp_display->link->test_video.test_bit_depth);
+	return dp->panel->setup_hdr(dp->panel, hdr);
 }
 
 static int dp_display_probe(struct platform_device *pdev)
@@ -1212,8 +1230,7 @@
 	g_dp_display->request_irq   = dp_request_irq;
 	g_dp_display->get_debug     = dp_get_debug;
 	g_dp_display->send_hpd_event    = dp_display_send_hpd_event;
-	g_dp_display->is_video_test = dp_display_check_video_test;
-	g_dp_display->get_test_bpp = dp_display_get_test_bpp;
+	g_dp_display->pre_kickoff   = dp_display_pre_kickoff;
 
 	rc = component_add(&pdev->dev, &dp_display_comp_ops);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 5539d61..2d314c7 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -16,6 +16,7 @@
 #define _DP_DISPLAY_H_
 
 #include <drm/drmP.h>
+#include <drm/msm_drm.h>
 
 #include "dp_panel.h"
 
@@ -34,8 +35,7 @@
 
 	int (*set_mode)(struct dp_display *dp_display,
 			struct dp_display_mode *mode);
-	int (*validate_mode)(struct dp_display *dp_display,
-			struct dp_display_mode *mode);
+	int (*validate_mode)(struct dp_display *dp_display, u32 mode_pclk_khz);
 	int (*get_modes)(struct dp_display *dp_display,
 		struct dp_display_mode *dp_mode);
 	int (*prepare)(struct dp_display *dp_display);
@@ -43,8 +43,8 @@
 	int (*request_irq)(struct dp_display *dp_display);
 	struct dp_debug *(*get_debug)(struct dp_display *dp_display);
 	void (*send_hpd_event)(struct dp_display *dp_display);
-	bool (*is_video_test)(struct dp_display *dp_display);
-	int (*get_test_bpp)(struct dp_display *dp_display);
+	int (*pre_kickoff)(struct dp_display *dp_display,
+				struct drm_msm_ext_hdr_metadata *hdr_meta);
 };
 
 int dp_display_get_num_of_displays(void);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 170734f..1915254 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -29,8 +29,6 @@
 static void convert_to_dp_mode(const struct drm_display_mode *drm_mode,
 			struct dp_display_mode *dp_mode, struct dp_display *dp)
 {
-	const u32 num_components = 3;
-
 	memset(dp_mode, 0, sizeof(*dp_mode));
 
 	dp_mode->timing.h_active = drm_mode->hdisplay;
@@ -49,15 +47,6 @@
 	dp_mode->timing.v_front_porch = drm_mode->vsync_start -
 					 drm_mode->vdisplay;
 
-	if (dp->is_video_test(dp))
-		dp_mode->timing.bpp = dp->get_test_bpp(dp);
-	else
-		dp_mode->timing.bpp = dp->connector->display_info.bpc *
-		num_components;
-
-	if (!dp_mode->timing.bpp)
-		dp_mode->timing.bpp = 24;
-
 	dp_mode->timing.refresh_rate = drm_mode->vrefresh;
 
 	dp_mode->timing.pixel_clk_khz = drm_mode->clock;
@@ -254,7 +243,6 @@
 				  const struct drm_display_mode *mode,
 				  struct drm_display_mode *adjusted_mode)
 {
-	int rc = 0;
 	bool ret = true;
 	struct dp_display_mode dp_mode;
 	struct dp_bridge *bridge;
@@ -270,14 +258,7 @@
 	dp = bridge->display;
 
 	convert_to_dp_mode(mode, &dp_mode, dp);
-
-	rc = dp->validate_mode(dp, &dp_mode);
-	if (rc) {
-		pr_err("[%d] mode is not valid, rc=%d\n", bridge->id, rc);
-		ret = false;
-	} else {
-		convert_to_drm_mode(&dp_mode, adjusted_mode);
-	}
+	convert_to_drm_mode(&dp_mode, adjusted_mode);
 end:
 	return ret;
 }
@@ -292,9 +273,22 @@
 	.mode_set     = dp_bridge_mode_set,
 };
 
+int dp_connector_pre_kickoff(struct drm_connector *connector,
+		void *display,
+		struct msm_display_kickoff_params *params)
+{
+	struct dp_display *dp = display;
+
+	if (!connector || !display || !params) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	return dp->pre_kickoff(dp, params->hdr_meta);
+}
+
 int dp_connector_post_init(struct drm_connector *connector,
-		void *info,
-		void *display)
+		void *info, void *display, struct msm_mode_info *mode_info)
 {
 	struct dp_display *dp_display = display;
 
@@ -528,5 +522,5 @@
 			mode->picture_aspect_ratio != debug->aspect_ratio))
 		return MODE_BAD;
 
-	return MODE_OK;
+	return dp_disp->validate_mode(dp_disp, mode->clock);
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index eb78e71..e856be1 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -32,15 +32,28 @@
 };
 
 /**
+ * dp_connector_pre_kickoff - callback to perform pre kickoff initialization
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * @params: Pointer to kickoff parameters
+ * Returns: Zero on success
+ */
+int dp_connector_pre_kickoff(struct drm_connector *connector,
+		void *display,
+		struct msm_display_kickoff_params *params);
+
+/**
  * dp_connector_post_init - callback to perform additional initialization steps
  * @connector: Pointer to drm connector structure
  * @info: Pointer to sde connector info structure
  * @display: Pointer to private display handle
+ * @mode_info: Pointer to mode info structure
  * Returns: Zero on success
  */
 int dp_connector_post_init(struct drm_connector *connector,
 		void *info,
-		void *display);
+		void *display,
+		struct msm_mode_info *mode_info);
 
 /**
  * dp_connector_detect - callback to determine if connector is connected
diff --git a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
index 016e1b8..0e1490f 100644
--- a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
+++ b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
@@ -234,7 +234,7 @@
 
 static void dp_hdcp2p2_set_interrupts(struct dp_hdcp2p2_ctrl *ctrl, bool enable)
 {
-	void __iomem *base = ctrl->init_data.core_io->base;
+	void __iomem *base = ctrl->init_data.dp_ahb->base;
 	struct dp_hdcp2p2_interrupts *intr = ctrl->intr;
 
 	while (intr && intr->reg) {
@@ -740,13 +740,13 @@
 	struct dp_hdcp2p2_interrupts *intr;
 	u32 hdcp_int_val = 0;
 
-	if (!ctrl || !ctrl->init_data.core_io) {
+	if (!ctrl || !ctrl->init_data.dp_ahb) {
 		pr_err("invalid input\n");
 		rc = -EINVAL;
 		goto end;
 	}
 
-	io = ctrl->init_data.core_io;
+	io = ctrl->init_data.dp_ahb;
 	intr = ctrl->intr;
 
 	while (intr && intr->reg) {
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 0cf488d..84ba4ef 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -680,7 +680,8 @@
 	case DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT:
 	case DP_TEST_PHY_PATTERN_PRBS7:
 	case DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN:
-	case DP_TEST_PHY_PATTERN_HBR2_CTS_EYE_PATTERN:
+	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1:
+	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3:
 		return true;
 	default:
 		return false;
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index b1d9249..4bb7be5 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -121,9 +121,12 @@
 	case DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN:
 		return DP_LINK_ENUM_STR(
 			DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN);
-	case DP_TEST_PHY_PATTERN_HBR2_CTS_EYE_PATTERN:
-		return DP_LINK_ENUM_STR(
-			DP_TEST_PHY_PATTERN_HBR2_CTS_EYE_PATTERN);
+	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1:
+		return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_1);
+	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_2:
+		return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_2);
+	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3:
+		return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_3);
 	default:
 		return "unknown";
 	}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index fc3fb56..041581d 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -30,6 +30,9 @@
 	struct dp_link *link;
 	struct dp_catalog_panel *catalog;
 	bool aux_cfg_update_done;
+	bool custom_edid;
+	bool custom_dpcd;
+	bool panel_on;
 };
 
 static const struct dp_panel_info fail_safe = {
@@ -69,12 +72,17 @@
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 	link_info = &dp_panel->link_info;
 
-	rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_DPCD_REV,
-		dpcd, (DP_RECEIVER_CAP_SIZE + 1));
-	if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
-		pr_err("dpcd read failed, rlen=%d\n", rlen);
-		rc = -EINVAL;
-		goto end;
+	if (!panel->custom_dpcd) {
+		rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_DPCD_REV,
+			dp_panel->dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+		if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
+			pr_err("dpcd read failed, rlen=%d\n", rlen);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		print_hex_dump(KERN_DEBUG, "[drm-dp] SINK DPCD: ",
+			DUMP_PREFIX_NONE, 8, 1, dp_panel->dpcd, rlen, false);
 	}
 
 	link_info->revision = dp_panel->dpcd[DP_DPCD_REV];
@@ -133,6 +141,52 @@
 	link_info->num_lanes = default_num_lanes;
 	pr_debug("link_rate=%d num_lanes=%d\n",
 		link_info->rate, link_info->num_lanes);
+
+	return 0;
+}
+
+static int dp_panel_set_edid(struct dp_panel *dp_panel, u8 *edid)
+{
+	struct dp_panel_private *panel;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	if (edid) {
+		dp_panel->edid_ctrl->edid = (struct edid *)edid;
+		panel->custom_edid = true;
+	} else {
+		panel->custom_edid = false;
+	}
+
+	return 0;
+}
+
+static int dp_panel_set_dpcd(struct dp_panel *dp_panel, u8 *dpcd)
+{
+	struct dp_panel_private *panel;
+	u8 *dp_dpcd;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	dp_dpcd = dp_panel->dpcd;
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	if (dpcd) {
+		memcpy(dp_dpcd, dpcd, DP_RECEIVER_CAP_SIZE + 1);
+		panel->custom_dpcd = true;
+	} else {
+		panel->custom_dpcd = false;
+	}
+
 	return 0;
 }
 
@@ -150,6 +204,11 @@
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 
+	if (panel->custom_edid) {
+		pr_debug("skip edid read in debug mode\n");
+		return 0;
+	}
+
 	do {
 		sde_get_edid(connector, &panel->aux->drm_aux->ddc,
 			(void **)&dp_panel->edid_ctrl);
@@ -159,6 +218,12 @@
 			panel->aux->reconfig(panel->aux);
 			panel->aux_cfg_update_done = true;
 		} else {
+			u8 *buf = (u8 *)dp_panel->edid_ctrl->edid;
+			u32 size = buf[0x7F] ? 256 : 128;
+
+			print_hex_dump(KERN_DEBUG, "[drm-dp] SINK EDID: ",
+				DUMP_PREFIX_NONE, 16, 1, buf, size, false);
+
 			return 0;
 		}
 	} while (retry_cnt < max_retry);
@@ -204,32 +269,48 @@
 	return 0;
 }
 
-static u32 dp_panel_get_max_pclk(struct dp_panel *dp_panel)
+static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
+		u32 mode_edid_bpp, u32 mode_pclk_khz)
 {
 	struct drm_dp_link *link_info;
-	const u8 num_components = 3;
-	u32 bpc = 0, bpp = 0, max_data_rate_khz = 0, max_pclk_rate_khz = 0;
+	const u32 max_supported_bpp = 30, min_supported_bpp = 18;
+	u32 bpp = 0, data_rate_khz = 0;
 
-	if (!dp_panel) {
+	bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
+
+	link_info = &dp_panel->link_info;
+	data_rate_khz = link_info->num_lanes * link_info->rate * 8;
+
+	while (bpp > min_supported_bpp) {
+		if (mode_pclk_khz * bpp <= data_rate_khz)
+			break;
+		bpp -= 6;
+	}
+
+	return bpp;
+}
+
+static u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
+		u32 mode_edid_bpp, u32 mode_pclk_khz)
+{
+	struct dp_panel_private *panel;
+	u32 bpp = mode_edid_bpp;
+
+	if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
 		pr_err("invalid input\n");
 		return 0;
 	}
 
-	link_info = &dp_panel->link_info;
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 
-	bpc = sde_get_sink_bpc(dp_panel->edid_ctrl);
-	bpp = bpc * num_components;
-	if (!bpp)
-		bpp = DP_PANEL_DEFAULT_BPP;
+	if (dp_panel->video_test)
+		bpp = dp_link_bit_depth_to_bpp(
+				panel->link->test_video.test_bit_depth);
+	else
+		bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp,
+				mode_pclk_khz);
 
-	max_data_rate_khz = (link_info->num_lanes * link_info->rate * 8);
-	max_pclk_rate_khz = max_data_rate_khz / bpp;
-
-	pr_debug("bpp=%d, max_lane_cnt=%d\n", bpp, link_info->num_lanes);
-	pr_debug("max_data_rate=%dKHz, max_pclk_rate=%dKHz\n",
-		max_data_rate_khz, max_pclk_rate_khz);
-
-	return max_pclk_rate_khz;
+	return bpp;
 }
 
 static void dp_panel_set_test_mode(struct dp_panel_private *panel,
@@ -320,6 +401,58 @@
 	}
 }
 
+static void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
+{
+	u32 hsync_start_x, hsync_end_x;
+	struct dp_catalog_panel *catalog;
+	struct dp_panel_private *panel;
+	struct dp_panel_info *pinfo;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+	catalog = panel->catalog;
+	pinfo = &panel->dp_panel.pinfo;
+
+	if (!panel->panel_on) {
+		pr_debug("DP panel not enabled, handle TPG on next panel on\n");
+		return;
+	}
+
+	if (!enable) {
+		panel->catalog->tpg_config(catalog, false);
+		return;
+	}
+
+	/* TPG config */
+	catalog->hsync_period = pinfo->h_sync_width + pinfo->h_back_porch +
+			pinfo->h_active + pinfo->h_front_porch;
+	catalog->vsync_period = pinfo->v_sync_width + pinfo->v_back_porch +
+			pinfo->v_active + pinfo->v_front_porch;
+
+	catalog->display_v_start = ((pinfo->v_sync_width +
+			pinfo->v_back_porch) * catalog->hsync_period);
+	catalog->display_v_end = ((catalog->vsync_period -
+			pinfo->v_front_porch) * catalog->hsync_period) - 1;
+
+	catalog->display_v_start += pinfo->h_sync_width + pinfo->h_back_porch;
+	catalog->display_v_end -= pinfo->h_front_porch;
+
+	hsync_start_x = pinfo->h_back_porch + pinfo->h_sync_width;
+	hsync_end_x = catalog->hsync_period - pinfo->h_front_porch - 1;
+
+	catalog->v_sync_width = pinfo->v_sync_width;
+
+	catalog->hsync_ctl = (catalog->hsync_period << 16) |
+			pinfo->h_sync_width;
+	catalog->display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+	panel->catalog->tpg_config(catalog, true);
+}
+
 static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
 {
 	int rc = 0;
@@ -379,38 +512,27 @@
 	catalog->dp_active = data;
 
 	panel->catalog->timing_cfg(catalog);
+	panel->panel_on = true;
 end:
 	return rc;
 }
 
-static int dp_panel_edid_register(struct dp_panel *dp_panel)
+static int dp_panel_edid_register(struct dp_panel_private *panel)
 {
 	int rc = 0;
 
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	dp_panel->edid_ctrl = sde_edid_init();
-	if (!dp_panel->edid_ctrl) {
+	panel->dp_panel.edid_ctrl = sde_edid_init();
+	if (!panel->dp_panel.edid_ctrl) {
 		pr_err("sde edid init for DP failed\n");
 		rc = -ENOMEM;
-		goto end;
 	}
-end:
+
 	return rc;
 }
 
-static void dp_panel_edid_deregister(struct dp_panel *dp_panel)
+static void dp_panel_edid_deregister(struct dp_panel_private *panel)
 {
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	sde_edid_deinit((void **)&dp_panel->edid_ctrl);
+	sde_edid_deinit((void **)&panel->dp_panel.edid_ctrl);
 }
 
 static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
@@ -445,13 +567,31 @@
 	pr_info("bpp = %d\n", pinfo->bpp);
 	pr_info("active low (h|v)=(%d|%d)\n", pinfo->h_active_low,
 		pinfo->v_active_low);
-
-	pinfo->bpp = max_t(u32, 18, min_t(u32, pinfo->bpp, 30));
-	pr_info("updated bpp = %d\n", pinfo->bpp);
 end:
 	return rc;
 }
 
+static int dp_panel_deinit_panel_info(struct dp_panel *dp_panel)
+{
+	int rc = 0;
+	struct dp_panel_private *panel;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	if (!panel->custom_edid)
+		sde_free_edid((void **)&dp_panel->edid_ctrl);
+
+	memset(&dp_panel->pinfo, 0, sizeof(dp_panel->pinfo));
+	panel->panel_on = false;
+
+	return rc;
+}
+
 static u32 dp_panel_get_min_req_link_rate(struct dp_panel *dp_panel)
 {
 	const u32 encoding_factx10 = 8;
@@ -478,6 +618,87 @@
 	return min_link_rate_khz;
 }
 
+enum dp_panel_hdr_pixel_encoding {
+	RGB,
+	YCbCr444,
+	YCbCr422,
+	YCbCr420,
+	YONLY,
+	RAW,
+};
+
+enum dp_panel_hdr_rgb_colorimetry {
+	sRGB,
+	RGB_WIDE_GAMUT_FIXED_POINT,
+	RGB_WIDE_GAMUT_FLOATING_POINT,
+	ADOBERGB,
+	DCI_P3,
+	CUSTOM_COLOR_PROFILE,
+	ITU_R_BT_2020_RGB,
+};
+
+enum dp_panel_hdr_dynamic_range {
+	VESA,
+	CEA,
+};
+
+enum dp_panel_hdr_content_type {
+	NOT_DEFINED,
+	GRAPHICS,
+	PHOTO,
+	VIDEO,
+	GAME,
+};
+
+static int dp_panel_setup_hdr(struct dp_panel *dp_panel,
+		struct drm_msm_ext_hdr_metadata *hdr_meta)
+{
+	int rc = 0;
+	struct dp_panel_private *panel;
+	struct dp_catalog_hdr_data *hdr;
+
+	if (!hdr_meta || !hdr_meta->hdr_state)
+		goto end;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+	hdr = &panel->catalog->hdr_data;
+
+	hdr->vsc_header_byte0 = 0x00;
+	hdr->vsc_header_byte1 = 0x07;
+	hdr->vsc_header_byte2 = 0x05;
+	hdr->vsc_header_byte3 = 0x13;
+
+	/* VSC SDP Payload for DB16 */
+	hdr->pixel_encoding = RGB;
+	hdr->colorimetry = ITU_R_BT_2020_RGB;
+
+	/* VSC SDP Payload for DB17 */
+	hdr->dynamic_range = CEA;
+	hdr->bpc = 10;
+
+	/* VSC SDP Payload for DB18 */
+	hdr->content_type = GRAPHICS;
+
+	hdr->vscext_header_byte0 = 0x00;
+	hdr->vscext_header_byte1 = 0x87;
+	hdr->vscext_header_byte2 = 0x1D;
+	hdr->vscext_header_byte3 = 0x13 << 2;
+
+	hdr->version = 0x01;
+
+	memcpy(&hdr->hdr_meta, hdr_meta, sizeof(hdr->hdr_meta));
+
+	panel->catalog->config_hdr(panel->catalog);
+end:
+	return rc;
+}
+
 struct dp_panel *dp_panel_get(struct dp_panel_in *in)
 {
 	int rc = 0;
@@ -505,15 +726,20 @@
 	panel->aux_cfg_update_done = false;
 	dp_panel->max_bw_code = DP_LINK_BW_8_1;
 
-	dp_panel->sde_edid_register = dp_panel_edid_register;
-	dp_panel->sde_edid_deregister = dp_panel_edid_deregister;
-	dp_panel->init_info = dp_panel_init_panel_info;
+	dp_panel->init = dp_panel_init_panel_info;
+	dp_panel->deinit = dp_panel_deinit_panel_info;
 	dp_panel->timing_cfg = dp_panel_timing_cfg;
 	dp_panel->read_sink_caps = dp_panel_read_sink_caps;
 	dp_panel->get_min_req_link_rate = dp_panel_get_min_req_link_rate;
-	dp_panel->get_max_pclk = dp_panel_get_max_pclk;
+	dp_panel->get_mode_bpp = dp_panel_get_mode_bpp;
 	dp_panel->get_modes = dp_panel_get_modes;
 	dp_panel->handle_sink_request = dp_panel_handle_sink_request;
+	dp_panel->set_edid = dp_panel_set_edid;
+	dp_panel->set_dpcd = dp_panel_set_dpcd;
+	dp_panel->tpg_config = dp_panel_tpg_config;
+
+	dp_panel_edid_register(panel);
+	dp_panel->setup_hdr = dp_panel_setup_hdr;
 
 	return dp_panel;
 error:
@@ -529,5 +755,6 @@
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 
+	dp_panel_edid_deregister(panel);
 	devm_kfree(panel->dev, panel);
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 01a978a..6cc3f4d 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -15,6 +15,8 @@
 #ifndef _DP_PANEL_H_
 #define _DP_PANEL_H_
 
+#include <drm/msm_drm.h>
+
 #include "dp_aux.h"
 #include "dp_link.h"
 #include "dp_usbpd.h"
@@ -59,12 +61,11 @@
 
 struct dp_panel {
 	/* dpcd raw data */
-	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+	u8 dpcd[DP_RECEIVER_CAP_SIZE + 1];
 	u8 ds_ports[DP_MAX_DOWNSTREAM_PORTS];
 
 	struct drm_dp_link link_info;
 	struct sde_edid_ctrl *edid_ctrl;
-	struct drm_connector *connector;
 	struct dp_panel_info pinfo;
 	bool video_test;
 
@@ -74,17 +75,22 @@
 	/* debug */
 	u32 max_bw_code;
 
-	int (*sde_edid_register)(struct dp_panel *dp_panel);
-	void (*sde_edid_deregister)(struct dp_panel *dp_panel);
-	int (*init_info)(struct dp_panel *dp_panel);
+	int (*init)(struct dp_panel *dp_panel);
+	int (*deinit)(struct dp_panel *dp_panel);
 	int (*timing_cfg)(struct dp_panel *dp_panel);
 	int (*read_sink_caps)(struct dp_panel *dp_panel,
 		struct drm_connector *connector);
 	u32 (*get_min_req_link_rate)(struct dp_panel *dp_panel);
-	u32 (*get_max_pclk)(struct dp_panel *dp_panel);
+	u32 (*get_mode_bpp)(struct dp_panel *dp_panel, u32 mode_max_bpp,
+			u32 mode_pclk_khz);
 	int (*get_modes)(struct dp_panel *dp_panel,
 		struct drm_connector *connector, struct dp_display_mode *mode);
 	void (*handle_sink_request)(struct dp_panel *dp_panel);
+	int (*set_edid)(struct dp_panel *dp_panel, u8 *edid);
+	int (*set_dpcd)(struct dp_panel *dp_panel, u8 *dpcd);
+	int (*setup_hdr)(struct dp_panel *dp_panel,
+		struct drm_msm_ext_hdr_metadata *hdr_meta);
+	void (*tpg_config)(struct dp_panel *dp_panel, bool enable);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index f7201ec..c112cdc 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -22,7 +22,10 @@
 {
 	struct dp_io *io = &parser->io;
 
-	msm_dss_iounmap(&io->ctrl_io);
+	msm_dss_iounmap(&io->dp_ahb);
+	msm_dss_iounmap(&io->dp_aux);
+	msm_dss_iounmap(&io->dp_link);
+	msm_dss_iounmap(&io->dp_p0);
 	msm_dss_iounmap(&io->phy_io);
 	msm_dss_iounmap(&io->ln_tx0_io);
 	msm_dss_iounmap(&io->ln_tx0_io);
@@ -47,7 +50,25 @@
 		goto err;
 	}
 
-	rc = msm_dss_ioremap_byname(pdev, &io->ctrl_io, "dp_ctrl");
+	rc = msm_dss_ioremap_byname(pdev, &io->dp_ahb, "dp_ahb");
+	if (rc) {
+		pr_err("unable to remap dp io resources\n");
+		goto err;
+	}
+
+	rc = msm_dss_ioremap_byname(pdev, &io->dp_aux, "dp_aux");
+	if (rc) {
+		pr_err("unable to remap dp io resources\n");
+		goto err;
+	}
+
+	rc = msm_dss_ioremap_byname(pdev, &io->dp_link, "dp_link");
+	if (rc) {
+		pr_err("unable to remap dp io resources\n");
+		goto err;
+	}
+
+	rc = msm_dss_ioremap_byname(pdev, &io->dp_p0, "dp_p0");
 	if (rc) {
 		pr_err("unable to remap dp io resources\n");
 		goto err;
@@ -441,6 +462,22 @@
 	mp->num_clk = 0;
 }
 
+static void dp_parser_put_gpio_data(struct device *dev,
+	struct dss_module_power *mp)
+{
+	if (!mp) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (mp->gpio_config) {
+		devm_kfree(dev, mp->gpio_config);
+		mp->gpio_config = NULL;
+	}
+
+	mp->num_gpio = 0;
+}
+
 static int dp_parser_init_clk_data(struct dp_parser *parser)
 {
 	int num_clk = 0, i = 0, rc = 0;
@@ -637,11 +674,9 @@
 	power = parser->mp;
 
 	for (i = 0; i < DP_MAX_PM; i++) {
-		struct dss_module_power *mp = &power[i];
-
-		devm_kfree(&parser->pdev->dev, mp->clk_config);
-		devm_kfree(&parser->pdev->dev, mp->vreg_config);
-		devm_kfree(&parser->pdev->dev, mp->gpio_config);
+		dp_parser_put_clk_data(&parser->pdev->dev, &power[i]);
+		dp_parser_put_vreg_data(&parser->pdev->dev, &power[i]);
+		dp_parser_put_gpio_data(&parser->pdev->dev, &power[i]);
 	}
 
 	devm_kfree(&parser->pdev->dev, parser);
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index 76a72a2..72da381 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -58,7 +58,10 @@
 /**
  * struct dp_ctrl_resource - controller's IO related data
  *
- * @ctrl_io: controller's mapped memory address
+ * @dp_ahb: controller's ahb mapped memory address
+ * @dp_aux: controller's aux mapped memory address
+ * @dp_link: controller's link mapped memory address
+ * @dp_p0: controller's p0 mapped memory address
  * @phy_io: phy's mapped memory address
  * @ln_tx0_io: USB-DP lane TX0's mapped memory address
  * @ln_tx1_io: USB-DP lane TX1's mapped memory address
@@ -70,6 +73,10 @@
  */
 struct dp_io {
 	struct dss_io_data ctrl_io;
+	struct dss_io_data dp_ahb;
+	struct dss_io_data dp_aux;
+	struct dss_io_data dp_link;
+	struct dss_io_data dp_p0;
 	struct dss_io_data phy_io;
 	struct dss_io_data ln_tx0_io;
 	struct dss_io_data ln_tx1_io;
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index 25d035d..4e2194e 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -25,137 +25,158 @@
 #define DP_INTR_STATUS2				(0x00000024)
 #define DP_INTR_STATUS3				(0x00000028)
 
-#define DP_DP_HPD_CTRL				(0x00000200)
-#define DP_DP_HPD_INT_STATUS			(0x00000204)
-#define DP_DP_HPD_INT_ACK			(0x00000208)
-#define DP_DP_HPD_INT_MASK			(0x0000020C)
-#define DP_DP_HPD_REFTIMER			(0x00000218)
-#define DP_DP_HPD_EVENT_TIME_0			(0x0000021C)
-#define DP_DP_HPD_EVENT_TIME_1			(0x00000220)
-#define DP_AUX_CTRL				(0x00000230)
-#define DP_AUX_DATA				(0x00000234)
-#define DP_AUX_TRANS_CTRL			(0x00000238)
-#define DP_TIMEOUT_COUNT			(0x0000023C)
-#define DP_AUX_LIMITS				(0x00000240)
-#define DP_AUX_STATUS				(0x00000244)
+#define DP_DP_HPD_CTRL				(0x00000000)
+#define DP_DP_HPD_INT_STATUS			(0x00000004)
+#define DP_DP_HPD_INT_ACK			(0x00000008)
+#define DP_DP_HPD_INT_MASK			(0x0000000C)
+#define DP_DP_HPD_REFTIMER			(0x00000018)
+#define DP_DP_HPD_EVENT_TIME_0			(0x0000001C)
+#define DP_DP_HPD_EVENT_TIME_1			(0x00000020)
+#define DP_AUX_CTRL				(0x00000030)
+#define DP_AUX_DATA				(0x00000034)
+#define DP_AUX_TRANS_CTRL			(0x00000038)
+#define DP_TIMEOUT_COUNT			(0x0000003C)
+#define DP_AUX_LIMITS				(0x00000040)
+#define DP_AUX_STATUS				(0x00000044)
 
 #define DP_DPCD_CP_IRQ				(0x201)
 #define DP_DPCD_RXSTATUS			(0x69493)
 
-#define DP_INTERRUPT_TRANS_NUM			(0x000002A0)
+#define DP_INTERRUPT_TRANS_NUM			(0x000000A0)
 
-#define DP_MAINLINK_CTRL			(0x00000400)
-#define DP_STATE_CTRL				(0x00000404)
-#define DP_CONFIGURATION_CTRL			(0x00000408)
-#define DP_SOFTWARE_MVID			(0x00000410)
-#define DP_SOFTWARE_NVID			(0x00000418)
-#define DP_TOTAL_HOR_VER			(0x0000041C)
-#define DP_START_HOR_VER_FROM_SYNC		(0x00000420)
-#define DP_HSYNC_VSYNC_WIDTH_POLARITY		(0x00000424)
-#define DP_ACTIVE_HOR_VER			(0x00000428)
-#define DP_MISC1_MISC0				(0x0000042C)
-#define DP_VALID_BOUNDARY			(0x00000430)
-#define DP_VALID_BOUNDARY_2			(0x00000434)
-#define DP_LOGICAL2PHYSCIAL_LANE_MAPPING	(0x00000438)
+#define DP_MAINLINK_CTRL			(0x00000000)
+#define DP_STATE_CTRL				(0x00000004)
+#define DP_CONFIGURATION_CTRL			(0x00000008)
+#define DP_SOFTWARE_MVID			(0x00000010)
+#define DP_SOFTWARE_NVID			(0x00000018)
+#define DP_TOTAL_HOR_VER			(0x0000001C)
+#define DP_START_HOR_VER_FROM_SYNC		(0x00000020)
+#define DP_HSYNC_VSYNC_WIDTH_POLARITY		(0x00000024)
+#define DP_ACTIVE_HOR_VER			(0x00000028)
+#define DP_MISC1_MISC0				(0x0000002C)
+#define DP_VALID_BOUNDARY			(0x00000030)
+#define DP_VALID_BOUNDARY_2			(0x00000034)
+#define DP_LOGICAL2PHYSICAL_LANE_MAPPING	(0x00000038)
 
-#define DP_MAINLINK_READY			(0x00000440)
-#define DP_MAINLINK_LEVELS			(0x00000444)
-#define DP_TU					(0x0000044C)
+#define DP_MAINLINK_READY			(0x00000040)
+#define DP_MAINLINK_LEVELS			(0x00000044)
+#define DP_TU					(0x0000004C)
 
-#define DP_HBR2_COMPLIANCE_SCRAMBLER_RESET	(0x00000454)
-#define DP_TEST_80BIT_CUSTOM_PATTERN_REG0	(0x000004C0)
-#define DP_TEST_80BIT_CUSTOM_PATTERN_REG1	(0x000004C4)
-#define DP_TEST_80BIT_CUSTOM_PATTERN_REG2	(0x000004C8)
+#define DP_HBR2_COMPLIANCE_SCRAMBLER_RESET	(0x00000054)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG0	(0x000000C0)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG1	(0x000000C4)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG2	(0x000000C8)
 
-#define MMSS_DP_MISC1_MISC0			(0x0000042C)
-#define MMSS_DP_AUDIO_TIMING_GEN		(0x00000480)
-#define MMSS_DP_AUDIO_TIMING_RBR_32		(0x00000484)
-#define MMSS_DP_AUDIO_TIMING_HBR_32		(0x00000488)
-#define MMSS_DP_AUDIO_TIMING_RBR_44		(0x0000048C)
-#define MMSS_DP_AUDIO_TIMING_HBR_44		(0x00000490)
-#define MMSS_DP_AUDIO_TIMING_RBR_48		(0x00000494)
-#define MMSS_DP_AUDIO_TIMING_HBR_48		(0x00000498)
+#define MMSS_DP_MISC1_MISC0			(0x0000002C)
+#define MMSS_DP_AUDIO_TIMING_GEN		(0x00000080)
+#define MMSS_DP_AUDIO_TIMING_RBR_32		(0x00000084)
+#define MMSS_DP_AUDIO_TIMING_HBR_32		(0x00000088)
+#define MMSS_DP_AUDIO_TIMING_RBR_44		(0x0000008C)
+#define MMSS_DP_AUDIO_TIMING_HBR_44		(0x00000090)
+#define MMSS_DP_AUDIO_TIMING_RBR_48		(0x00000094)
+#define MMSS_DP_AUDIO_TIMING_HBR_48		(0x00000098)
 
-#define MMSS_DP_PSR_CRC_RG			(0x00000554)
-#define MMSS_DP_PSR_CRC_B			(0x00000558)
+#define MMSS_DP_PSR_CRC_RG			(0x00000154)
+#define MMSS_DP_PSR_CRC_B			(0x00000158)
 
-#define DP_COMPRESSION_MODE_CTRL		(0x00000580)
+#define DP_COMPRESSION_MODE_CTRL		(0x00000180)
 
-#define MMSS_DP_AUDIO_CFG			(0x00000600)
-#define MMSS_DP_AUDIO_STATUS			(0x00000604)
-#define MMSS_DP_AUDIO_PKT_CTRL			(0x00000608)
-#define MMSS_DP_AUDIO_PKT_CTRL2			(0x0000060C)
-#define MMSS_DP_AUDIO_ACR_CTRL			(0x00000610)
-#define MMSS_DP_AUDIO_CTRL_RESET		(0x00000614)
+#define MMSS_DP_AUDIO_CFG			(0x00000200)
+#define MMSS_DP_AUDIO_STATUS			(0x00000204)
+#define MMSS_DP_AUDIO_PKT_CTRL			(0x00000208)
+#define MMSS_DP_AUDIO_PKT_CTRL2			(0x0000020C)
+#define MMSS_DP_AUDIO_ACR_CTRL			(0x00000210)
+#define MMSS_DP_AUDIO_CTRL_RESET		(0x00000214)
 
-#define MMSS_DP_SDP_CFG				(0x00000628)
-#define MMSS_DP_SDP_CFG2			(0x0000062C)
-#define MMSS_DP_AUDIO_TIMESTAMP_0		(0x00000630)
-#define MMSS_DP_AUDIO_TIMESTAMP_1		(0x00000634)
+#define MMSS_DP_SDP_CFG				(0x00000228)
+#define MMSS_DP_SDP_CFG2			(0x0000022C)
+#define MMSS_DP_SDP_CFG3			(0x0000024C)
+#define MMSS_DP_AUDIO_TIMESTAMP_0		(0x00000230)
+#define MMSS_DP_AUDIO_TIMESTAMP_1		(0x00000234)
 
-#define MMSS_DP_AUDIO_STREAM_0			(0x00000640)
-#define MMSS_DP_AUDIO_STREAM_1			(0x00000644)
+#define MMSS_DP_AUDIO_STREAM_0			(0x00000240)
+#define MMSS_DP_AUDIO_STREAM_1			(0x00000244)
 
-#define MMSS_DP_EXTENSION_0			(0x00000650)
-#define MMSS_DP_EXTENSION_1			(0x00000654)
-#define MMSS_DP_EXTENSION_2			(0x00000658)
-#define MMSS_DP_EXTENSION_3			(0x0000065C)
-#define MMSS_DP_EXTENSION_4			(0x00000660)
-#define MMSS_DP_EXTENSION_5			(0x00000664)
-#define MMSS_DP_EXTENSION_6			(0x00000668)
-#define MMSS_DP_EXTENSION_7			(0x0000066C)
-#define MMSS_DP_EXTENSION_8			(0x00000670)
-#define MMSS_DP_EXTENSION_9			(0x00000674)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_0		(0x00000678)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_1		(0x0000067C)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_2		(0x00000680)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_3		(0x00000684)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_4		(0x00000688)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_5		(0x0000068C)
-#define MMSS_DP_AUDIO_ISRC_0			(0x00000690)
-#define MMSS_DP_AUDIO_ISRC_1			(0x00000694)
-#define MMSS_DP_AUDIO_ISRC_2			(0x00000698)
-#define MMSS_DP_AUDIO_ISRC_3			(0x0000069C)
-#define MMSS_DP_AUDIO_ISRC_4			(0x000006A0)
-#define MMSS_DP_AUDIO_ISRC_5			(0x000006A4)
-#define MMSS_DP_AUDIO_INFOFRAME_0		(0x000006A8)
-#define MMSS_DP_AUDIO_INFOFRAME_1		(0x000006AC)
-#define MMSS_DP_AUDIO_INFOFRAME_2		(0x000006B0)
+#define MMSS_DP_EXTENSION_0			(0x00000250)
+#define MMSS_DP_EXTENSION_1			(0x00000254)
+#define MMSS_DP_EXTENSION_2			(0x00000258)
+#define MMSS_DP_EXTENSION_3			(0x0000025C)
+#define MMSS_DP_EXTENSION_4			(0x00000260)
+#define MMSS_DP_EXTENSION_5			(0x00000264)
+#define MMSS_DP_EXTENSION_6			(0x00000268)
+#define MMSS_DP_EXTENSION_7			(0x0000026C)
+#define MMSS_DP_EXTENSION_8			(0x00000270)
+#define MMSS_DP_EXTENSION_9			(0x00000274)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_0		(0x00000278)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_1		(0x0000027C)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_2		(0x00000280)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_3		(0x00000284)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_4		(0x00000288)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_5		(0x0000028C)
+#define MMSS_DP_AUDIO_ISRC_0			(0x00000290)
+#define MMSS_DP_AUDIO_ISRC_1			(0x00000294)
+#define MMSS_DP_AUDIO_ISRC_2			(0x00000298)
+#define MMSS_DP_AUDIO_ISRC_3			(0x0000029C)
+#define MMSS_DP_AUDIO_ISRC_4			(0x000002A0)
+#define MMSS_DP_AUDIO_ISRC_5			(0x000002A4)
+#define MMSS_DP_AUDIO_INFOFRAME_0		(0x000002A8)
+#define MMSS_DP_AUDIO_INFOFRAME_1		(0x000002AC)
+#define MMSS_DP_AUDIO_INFOFRAME_2		(0x000002B0)
 
-#define MMSS_DP_GENERIC0_0			(0x00000700)
-#define MMSS_DP_GENERIC0_1			(0x00000704)
-#define MMSS_DP_GENERIC0_2			(0x00000708)
-#define MMSS_DP_GENERIC0_3			(0x0000070C)
-#define MMSS_DP_GENERIC0_4			(0x00000710)
-#define MMSS_DP_GENERIC0_5			(0x00000714)
-#define MMSS_DP_GENERIC0_6			(0x00000718)
-#define MMSS_DP_GENERIC0_7			(0x0000071C)
-#define MMSS_DP_GENERIC0_8			(0x00000720)
-#define MMSS_DP_GENERIC0_9			(0x00000724)
-#define MMSS_DP_GENERIC1_0			(0x00000728)
-#define MMSS_DP_GENERIC1_1			(0x0000072C)
-#define MMSS_DP_GENERIC1_2			(0x00000730)
-#define MMSS_DP_GENERIC1_3			(0x00000734)
-#define MMSS_DP_GENERIC1_4			(0x00000738)
-#define MMSS_DP_GENERIC1_5			(0x0000073C)
-#define MMSS_DP_GENERIC1_6			(0x00000740)
-#define MMSS_DP_GENERIC1_7			(0x00000744)
-#define MMSS_DP_GENERIC1_8			(0x00000748)
-#define MMSS_DP_GENERIC1_9			(0x0000074C)
+#define MMSS_DP_GENERIC0_0			(0x00000300)
+#define MMSS_DP_GENERIC0_1			(0x00000304)
+#define MMSS_DP_GENERIC0_2			(0x00000308)
+#define MMSS_DP_GENERIC0_3			(0x0000030C)
+#define MMSS_DP_GENERIC0_4			(0x00000310)
+#define MMSS_DP_GENERIC0_5			(0x00000314)
+#define MMSS_DP_GENERIC0_6			(0x00000318)
+#define MMSS_DP_GENERIC0_7			(0x0000031C)
+#define MMSS_DP_GENERIC0_8			(0x00000320)
+#define MMSS_DP_GENERIC0_9			(0x00000324)
+#define MMSS_DP_GENERIC1_0			(0x00000328)
+#define MMSS_DP_GENERIC1_1			(0x0000032C)
+#define MMSS_DP_GENERIC1_2			(0x00000330)
+#define MMSS_DP_GENERIC1_3			(0x00000334)
+#define MMSS_DP_GENERIC1_4			(0x00000338)
+#define MMSS_DP_GENERIC1_5			(0x0000033C)
+#define MMSS_DP_GENERIC1_6			(0x00000340)
+#define MMSS_DP_GENERIC1_7			(0x00000344)
+#define MMSS_DP_GENERIC1_8			(0x00000348)
+#define MMSS_DP_GENERIC1_9			(0x0000034C)
 
-#define MMSS_DP_VSCEXT_0			(0x000006D0)
-#define MMSS_DP_VSCEXT_1			(0x000006D4)
-#define MMSS_DP_VSCEXT_2			(0x000006D8)
-#define MMSS_DP_VSCEXT_3			(0x000006DC)
-#define MMSS_DP_VSCEXT_4			(0x000006E0)
-#define MMSS_DP_VSCEXT_5			(0x000006E4)
-#define MMSS_DP_VSCEXT_6			(0x000006E8)
-#define MMSS_DP_VSCEXT_7			(0x000006EC)
-#define MMSS_DP_VSCEXT_8			(0x000006F0)
-#define MMSS_DP_VSCEXT_9			(0x000006F4)
+#define MMSS_DP_VSCEXT_0			(0x000002D0)
+#define MMSS_DP_VSCEXT_1			(0x000002D4)
+#define MMSS_DP_VSCEXT_2			(0x000002D8)
+#define MMSS_DP_VSCEXT_3			(0x000002DC)
+#define MMSS_DP_VSCEXT_4			(0x000002E0)
+#define MMSS_DP_VSCEXT_5			(0x000002E4)
+#define MMSS_DP_VSCEXT_6			(0x000002E8)
+#define MMSS_DP_VSCEXT_7			(0x000002EC)
+#define MMSS_DP_VSCEXT_8			(0x000002F0)
+#define MMSS_DP_VSCEXT_9			(0x000002F4)
 
-#define MMSS_DP_TIMING_ENGINE_EN		(0x00000A10)
-#define MMSS_DP_ASYNC_FIFO_CONFIG		(0x00000A88)
+#define MMSS_DP_BIST_ENABLE			(0x00000000)
+#define MMSS_DP_TIMING_ENGINE_EN		(0x00000010)
+#define MMSS_DP_INTF_CONFIG			(0x00000014)
+#define MMSS_DP_INTF_HSYNC_CTL			(0x00000018)
+#define MMSS_DP_INTF_VSYNC_PERIOD_F0		(0x0000001C)
+#define MMSS_DP_INTF_VSYNC_PERIOD_F1		(0x00000020)
+#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0	(0x00000024)
+#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1	(0x00000028)
+#define MMSS_INTF_DISPLAY_V_START_F0		(0x0000002C)
+#define MMSS_INTF_DISPLAY_V_START_F1		(0x00000030)
+#define MMSS_DP_INTF_DISPLAY_V_END_F0		(0x00000034)
+#define MMSS_DP_INTF_DISPLAY_V_END_F1		(0x00000038)
+#define MMSS_DP_INTF_ACTIVE_V_START_F0		(0x0000003C)
+#define MMSS_DP_INTF_ACTIVE_V_START_F1		(0x00000040)
+#define MMSS_DP_INTF_ACTIVE_V_END_F0		(0x00000044)
+#define MMSS_DP_INTF_ACTIVE_V_END_F1		(0x00000048)
+#define MMSS_DP_INTF_DISPLAY_HCTL		(0x0000004C)
+#define MMSS_DP_INTF_ACTIVE_HCTL		(0x00000050)
+#define MMSS_DP_INTF_POLARITY_CTL		(0x00000058)
+#define MMSS_DP_TPG_MAIN_CONTROL		(0x00000060)
+#define MMSS_DP_TPG_VIDEO_CONFIG		(0x00000064)
+#define MMSS_DP_ASYNC_FIFO_CONFIG		(0x00000088)
 
 /*DP PHY Register offsets */
 #define DP_PHY_REVISION_ID0                     (0x00000000)
@@ -197,14 +218,14 @@
 /* DP HDCP 1.3 registers */
 #define DP_HDCP_CTRL                                   (0x0A0)
 #define DP_HDCP_STATUS                                 (0x0A4)
-#define DP_HDCP_SW_UPPER_AKSV                          (0x298)
-#define DP_HDCP_SW_LOWER_AKSV                          (0x29C)
-#define DP_HDCP_ENTROPY_CTRL0                          (0x750)
-#define DP_HDCP_ENTROPY_CTRL1                          (0x75C)
+#define DP_HDCP_SW_UPPER_AKSV                          (0x098)
+#define DP_HDCP_SW_LOWER_AKSV                          (0x09C)
+#define DP_HDCP_ENTROPY_CTRL0                          (0x350)
+#define DP_HDCP_ENTROPY_CTRL1                          (0x35C)
 #define DP_HDCP_SHA_STATUS                             (0x0C8)
 #define DP_HDCP_RCVPORT_DATA2_0                        (0x0B0)
-#define DP_HDCP_RCVPORT_DATA3                          (0x2A4)
-#define DP_HDCP_RCVPORT_DATA4                          (0x2A8)
+#define DP_HDCP_RCVPORT_DATA3                          (0x0A4)
+#define DP_HDCP_RCVPORT_DATA4                          (0x0A8)
 #define DP_HDCP_RCVPORT_DATA5                          (0x0C0)
 #define DP_HDCP_RCVPORT_DATA6                          (0x0C4)
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
index 5d9d21f..0ddb47f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -62,6 +62,11 @@
 	ctrl->ops.debug_bus = dsi_ctrl_hw_cmn_debug_bus;
 	ctrl->ops.get_cmd_read_data = dsi_ctrl_hw_cmn_get_cmd_read_data;
 	ctrl->ops.clear_rdbk_register = dsi_ctrl_hw_cmn_clear_rdbk_reg;
+	ctrl->ops.ctrl_reset = dsi_ctrl_hw_cmn_ctrl_reset;
+	ctrl->ops.mask_error_intr = dsi_ctrl_hw_cmn_mask_error_intr;
+	ctrl->ops.error_intr_ctrl = dsi_ctrl_hw_cmn_error_intr_ctrl;
+	ctrl->ops.get_error_mask = dsi_ctrl_hw_cmn_get_error_mask;
+	ctrl->ops.get_hw_version = dsi_ctrl_hw_cmn_get_hw_version;
 
 	switch (version) {
 	case DSI_CTRL_VERSION_1_4:
@@ -76,6 +81,8 @@
 		ctrl->ops.clamp_disable = dsi_ctrl_hw_14_clamp_disable;
 		ctrl->ops.reg_dump_to_buffer =
 			dsi_ctrl_hw_14_reg_dump_to_buffer;
+		ctrl->ops.schedule_dma_cmd = NULL;
+		ctrl->ops.get_cont_splash_status = NULL;
 		break;
 	case DSI_CTRL_VERSION_2_0:
 		ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map;
@@ -88,9 +95,13 @@
 		ctrl->ops.ulps_ops.get_lanes_in_ulps = NULL;
 		ctrl->ops.clamp_enable = NULL;
 		ctrl->ops.clamp_disable = NULL;
+		ctrl->ops.schedule_dma_cmd = NULL;
+		ctrl->ops.get_cont_splash_status = NULL;
 		break;
 	case DSI_CTRL_VERSION_2_2:
 		ctrl->ops.phy_reset_config = dsi_ctrl_hw_22_phy_reset_config;
+		ctrl->ops.get_cont_splash_status =
+			dsi_ctrl_hw_22_get_cont_splash_status;
 		ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map;
 		ctrl->ops.wait_for_lane_idle =
 			dsi_ctrl_hw_20_wait_for_lane_idle;
@@ -101,6 +112,7 @@
 		ctrl->ops.ulps_ops.get_lanes_in_ulps = NULL;
 		ctrl->ops.clamp_enable = NULL;
 		ctrl->ops.clamp_disable = NULL;
+		ctrl->ops.schedule_dma_cmd = dsi_ctrl_hw_22_schedule_dma_cmd;
 		break;
 	default:
 		break;
@@ -113,6 +125,7 @@
  * @version:     DSI controller version.
  * @index:       DSI controller instance ID.
  * @phy_isolation_enabled:       DSI controller works isolated from phy.
+ * @null_insertion_enabled:      DSI controller inserts null packet.
  *
  * This function setups the catalog information in the dsi_ctrl_hw object.
  *
@@ -120,7 +133,7 @@
  */
 int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
 		   enum dsi_ctrl_version version, u32 index,
-		   bool phy_isolation_enabled)
+		   bool phy_isolation_enabled, bool null_insertion_enabled)
 {
 	int rc = 0;
 
@@ -131,6 +144,7 @@
 	}
 
 	ctrl->index = index;
+	ctrl->null_insertion_enabled = null_insertion_enabled;
 	set_bit(DSI_CTRL_VIDEO_TPG, ctrl->feature_map);
 	set_bit(DSI_CTRL_CMD_TPG, ctrl->feature_map);
 	set_bit(DSI_CTRL_VARIABLE_REFRESH_RATE, ctrl->feature_map);
@@ -194,6 +208,7 @@
 	phy->ops.ulps_ops.is_lanes_in_ulps =
 		dsi_phy_hw_v3_0_is_lanes_in_ulps;
 	phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v3_0;
+	phy->ops.phy_lane_reset = dsi_phy_hw_v3_0_lane_reset;
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 186a5b5..735f61f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -24,6 +24,7 @@
  * @version:     DSI controller version.
  * @index:       DSI controller instance ID.
  * @phy_isolation_enabled:       DSI controller works isolated from phy.
+ * @null_insertion_enabled:      DSI controller inserts null packet.
  *
  * This function setups the catalog information in the dsi_ctrl_hw object.
  *
@@ -31,7 +32,7 @@
  */
 int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
 		   enum dsi_ctrl_version version, u32 index,
-		   bool phy_isolation_enabled);
+		   bool phy_isolation_enabled, bool null_insertion_enabled);
 
 /**
  * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
@@ -101,6 +102,7 @@
 bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes);
 int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
 		u32 *timing_val, u32 size);
+int dsi_phy_hw_v3_0_lane_reset(struct dsi_phy_hw *phy);
 
 /* DSI controller common ops */
 u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
@@ -176,6 +178,14 @@
 				     u32 rx_byte,
 				     u32 pkt_size, u32 *hw_read_cnt);
 void dsi_ctrl_hw_cmn_clear_rdbk_reg(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_22_schedule_dma_cmd(struct dsi_ctrl_hw *ctrl, int line_on);
+int dsi_ctrl_hw_cmn_ctrl_reset(struct dsi_ctrl_hw *ctrl,
+			int mask);
+void dsi_ctrl_hw_cmn_mask_error_intr(struct dsi_ctrl_hw *ctrl, u32 idx,
+			bool en);
+void dsi_ctrl_hw_cmn_error_intr_ctrl(struct dsi_ctrl_hw *ctrl, bool en);
+u32 dsi_ctrl_hw_cmn_get_error_mask(struct dsi_ctrl_hw *ctrl);
+u32 dsi_ctrl_hw_cmn_get_hw_version(struct dsi_ctrl_hw *ctrl);
 
 /* Definitions specific to 1.4 DSI controller hardware */
 int dsi_ctrl_hw_14_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes);
@@ -204,4 +214,7 @@
 					  char *buf,
 					  u32 size);
 
+/* Definitions specific to 2.2 DSI controller hardware */
+bool dsi_ctrl_hw_22_get_cont_splash_status(struct dsi_ctrl_hw *ctrl);
+
 #endif /* _DSI_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
index 2a84a2d..1fd10d9 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
@@ -198,6 +198,13 @@
 };
 
 /**
+ * dsi_display_clk_mngr_update_splash_status() - Update splash stattus
+ * @clk_mngr:     Structure containing DSI clock information
+ * @status:     Splash status
+ */
+void dsi_display_clk_mngr_update_splash_status(void *clk_mgr, bool status);
+
+/**
  * dsi_display_clk_mgr_register() - Register DSI clock manager
  * @info:     Structure containing DSI clock information
  */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index 560964e..61406fe 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -46,6 +46,7 @@
 	post_clockon_cb post_clkon_cb;
 	pre_clockon_cb pre_clkon_cb;
 
+	bool is_cont_splash_enabled;
 	void *priv_data;
 };
 
@@ -287,7 +288,18 @@
 static int dsi_link_clk_set_rate(struct dsi_link_clks *l_clks)
 {
 	int rc = 0;
+	struct dsi_clk_mngr *mngr;
 
+	mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[0]);
+
+	if (mngr->is_cont_splash_enabled)
+		return 0;
+	/*
+	 * In an ideal world, cont_splash_enabled should not be required inside
+	 * the clock manager. But, in the current driver cont_splash_enabled
+	 * flag is set inside mdp driver and there is no interface event
+	 * associated with this flag setting.
+	 */
 	rc = clk_set_rate(l_clks->clks.esc_clk, l_clks->freq.esc_clk_rate);
 	if (rc) {
 		pr_err("clk_set_rate failed for esc_clk rc = %d\n", rc);
@@ -1143,6 +1155,19 @@
 	return rc;
 }
 
+void dsi_display_clk_mngr_update_splash_status(void *clk_mgr, bool status)
+{
+	struct dsi_clk_mngr *mngr;
+
+	if (!clk_mgr) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	mngr = (struct dsi_clk_mngr *)clk_mgr;
+	mngr->is_cont_splash_enabled = status;
+}
+
 void *dsi_display_clk_mngr_register(struct dsi_clk_info *info)
 {
 	struct dsi_clk_mngr *mngr;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index a51c1a7..a74216b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -905,7 +905,15 @@
 	u32 v_total = 0, v_blank = 0, sleep_ms = 0, fps = 0, ret;
 	struct dsi_mode_info *timing;
 
-	if (dsi_ctrl->host_config.panel_mode != DSI_OP_VIDEO_MODE)
+	/**
+	 * No need to wait if the panel is not video mode or
+	 * if DSI controller supports command DMA scheduling or
+	 * if we are sending init commands.
+	 */
+	if ((dsi_ctrl->host_config.panel_mode != DSI_OP_VIDEO_MODE) ||
+		(dsi_ctrl->version >= DSI_CTRL_VERSION_2_2) ||
+		(dsi_ctrl->current_state.vid_engine_state !=
+					DSI_CTRL_ENGINE_ON))
 		return;
 
 	dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
@@ -943,8 +951,9 @@
 	u32 hw_flags = 0;
 	u32 length = 0;
 	u8 *buffer = NULL;
-	u32 cnt = 0;
+	u32 cnt = 0, line_no = 0x1;
 	u8 *cmdbuf;
+	struct dsi_mode_info *timing;
 
 	rc = mipi_dsi_create_packet(&packet, msg);
 	if (rc) {
@@ -999,6 +1008,17 @@
 				  true : false;
 	}
 
+	timing = &(dsi_ctrl->host_config.video_timing);
+	if (timing)
+		line_no += timing->v_back_porch + timing->v_sync_width +
+				timing->v_active;
+	if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
+		dsi_ctrl->hw.ops.schedule_dma_cmd &&
+		(dsi_ctrl->current_state.vid_engine_state ==
+					DSI_CTRL_ENGINE_ON))
+		dsi_ctrl->hw.ops.schedule_dma_cmd(&dsi_ctrl->hw,
+				line_no);
+
 	hw_flags |= (flags & DSI_CTRL_CMD_DEFER_TRIGGER) ?
 			DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER : 0;
 
@@ -1021,6 +1041,9 @@
 		dsi_ctrl_wait_for_video_done(dsi_ctrl);
 		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
 					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
+		if (dsi_ctrl->hw.ops.mask_error_intr)
+			dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
+					BIT(DSI_FIFO_OVERFLOW), true);
 		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
 
 		if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
@@ -1038,7 +1061,8 @@
 				msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
 
 		if (ret == 0) {
-			u32 status = 0;
+			u32 status = dsi_ctrl->hw.ops.get_interrupt_status(
+								&dsi_ctrl->hw);
 			u32 mask = DSI_CMD_MODE_DMA_DONE;
 
 			if (status & mask) {
@@ -1060,6 +1084,9 @@
 			}
 		}
 
+		if (dsi_ctrl->hw.ops.mask_error_intr)
+			dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
+					BIT(DSI_FIFO_OVERFLOW), false);
 		dsi_ctrl->hw.ops.reset_cmd_fifo(&dsi_ctrl->hw);
 	}
 error:
@@ -1448,6 +1475,9 @@
 	dsi_ctrl->phy_isolation_enabled = of_property_read_bool(of_node,
 				    "qcom,dsi-phy-isolation-enabled");
 
+	dsi_ctrl->null_insertion_enabled = of_property_read_bool(of_node,
+					"qcom,null-insertion-enabled");
+
 	return 0;
 }
 
@@ -1505,7 +1535,8 @@
 	}
 
 	rc = dsi_catalog_ctrl_setup(&dsi_ctrl->hw, dsi_ctrl->version,
-		    dsi_ctrl->cell_index, dsi_ctrl->phy_isolation_enabled);
+		dsi_ctrl->cell_index, dsi_ctrl->phy_isolation_enabled,
+		dsi_ctrl->null_insertion_enabled);
 	if (rc) {
 		pr_err("Catalog does not support version (%d)\n",
 		       dsi_ctrl->version);
@@ -1645,7 +1676,9 @@
 	mutex_lock(&ctrl->ctrl_lock);
 	if (ctrl->refcount == 1) {
 		pr_err("[%s] Device in use\n", ctrl->name);
+		mutex_unlock(&ctrl->ctrl_lock);
 		ctrl = ERR_PTR(-EBUSY);
+		return ctrl;
 	} else {
 		ctrl->refcount++;
 	}
@@ -1918,7 +1951,7 @@
 	}
 
 	dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
-	dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0);
+	dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0xFF00E0);
 	dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, true);
 
 	mutex_unlock(&dsi_ctrl->ctrl_lock);
@@ -1969,33 +2002,77 @@
 static void dsi_ctrl_handle_error_status(struct dsi_ctrl *dsi_ctrl,
 				unsigned long int error)
 {
-	pr_err("%s: %lu\n", __func__, error);
+	struct dsi_event_cb_info cb_info;
+
+	cb_info = dsi_ctrl->irq_info.irq_err_cb;
+
+	/* disable error interrupts */
+	if (dsi_ctrl->hw.ops.error_intr_ctrl)
+		dsi_ctrl->hw.ops.error_intr_ctrl(&dsi_ctrl->hw, false);
+
+	/* clear error interrupts first */
+	if (dsi_ctrl->hw.ops.clear_error_status)
+		dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+					error);
 
 	/* DTLN PHY error */
-	if (error & 0x3000e00)
-		if (dsi_ctrl->hw.ops.clear_error_status)
-			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
-					0x3000e00);
+	if (error & 0x3000E00)
+		pr_err("dsi PHY contention error: 0x%lx\n", error);
+
+	/* TX timeout error */
+	if (error & 0xE0) {
+		if (error & 0xA0) {
+			if (cb_info.event_cb) {
+				cb_info.event_idx = DSI_LP_Rx_TIMEOUT;
+				(void)cb_info.event_cb(cb_info.event_usr_ptr,
+							cb_info.event_idx,
+							dsi_ctrl->cell_index,
+							0, 0, 0, 0);
+			}
+		}
+		pr_err("tx timeout error: 0x%lx\n", error);
+	}
 
 	/* DSI FIFO OVERFLOW error */
-	if (error & 0xf0000) {
-		if (dsi_ctrl->hw.ops.clear_error_status)
-			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
-					0xf0000);
+	if (error & 0xF0000) {
+		u32 mask = 0;
+
+		if (dsi_ctrl->hw.ops.get_error_mask)
+			mask = dsi_ctrl->hw.ops.get_error_mask(&dsi_ctrl->hw);
+		/* no need to report FIFO overflow if already masked */
+		if (cb_info.event_cb && !(mask & 0xf0000)) {
+			cb_info.event_idx = DSI_FIFO_OVERFLOW;
+			(void)cb_info.event_cb(cb_info.event_usr_ptr,
+						cb_info.event_idx,
+						dsi_ctrl->cell_index,
+						0, 0, 0, 0);
+			pr_err("dsi FIFO OVERFLOW error: 0x%lx\n", error);
+		}
 	}
 
 	/* DSI FIFO UNDERFLOW error */
-	if (error & 0xf00000) {
-		if (dsi_ctrl->hw.ops.clear_error_status)
-			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
-					0xf00000);
+	if (error & 0xF00000) {
+		if (cb_info.event_cb) {
+			cb_info.event_idx = DSI_FIFO_UNDERFLOW;
+			(void)cb_info.event_cb(cb_info.event_usr_ptr,
+						cb_info.event_idx,
+						dsi_ctrl->cell_index,
+						0, 0, 0, 0);
+		}
+		pr_err("dsi FIFO UNDERFLOW error: 0x%lx\n", error);
 	}
 
 	/* DSI PLL UNLOCK error */
 	if (error & BIT(8))
-		if (dsi_ctrl->hw.ops.clear_error_status)
-			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
-					BIT(8));
+		pr_err("dsi PLL unlock error: 0x%lx\n", error);
+
+	/* ACK error */
+	if (error & 0xF)
+		pr_err("ack error: 0x%lx\n", error);
+
+	/* enable back DSI interrupts */
+	if (dsi_ctrl->hw.ops.error_intr_ctrl)
+		dsi_ctrl->hw.ops.error_intr_ctrl(&dsi_ctrl->hw, true);
 }
 
 /**
@@ -2009,39 +2086,28 @@
 	struct dsi_ctrl *dsi_ctrl;
 	struct dsi_event_cb_info cb_info;
 	unsigned long flags;
-	uint32_t cell_index, status, i;
-	uint64_t errors;
+	uint32_t status = 0x0, i;
+	uint64_t errors = 0x0;
 
 	if (!ptr)
 		return IRQ_NONE;
 	dsi_ctrl = ptr;
 
-	/* clear status interrupts */
+	/* check status interrupts */
 	if (dsi_ctrl->hw.ops.get_interrupt_status)
 		status = dsi_ctrl->hw.ops.get_interrupt_status(&dsi_ctrl->hw);
-	else
-		status = 0x0;
 
-	if (dsi_ctrl->hw.ops.clear_interrupt_status)
-		dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw, status);
-
-	spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
-	cell_index = dsi_ctrl->cell_index;
-	spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags);
-
-	/* clear error interrupts */
+	/* check error interrupts */
 	if (dsi_ctrl->hw.ops.get_error_status)
 		errors = dsi_ctrl->hw.ops.get_error_status(&dsi_ctrl->hw);
-	else
-		errors = 0x0;
 
-	if (errors) {
-		/* handle DSI error recovery */
+	/* clear interrupts */
+	if (dsi_ctrl->hw.ops.clear_interrupt_status)
+		dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw, 0x0);
+
+	/* handle DSI error recovery */
+	if (status & DSI_ERROR)
 		dsi_ctrl_handle_error_status(dsi_ctrl, errors);
-		if (dsi_ctrl->hw.ops.clear_error_status)
-			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
-							errors);
-	}
 
 	if (status & DSI_CMD_MODE_DMA_DONE) {
 		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
@@ -2062,9 +2128,16 @@
 	}
 
 	if (status & DSI_BTA_DONE) {
+		u32 fifo_overflow_mask = (DSI_DLN0_HS_FIFO_OVERFLOW |
+					DSI_DLN1_HS_FIFO_OVERFLOW |
+					DSI_DLN2_HS_FIFO_OVERFLOW |
+					DSI_DLN3_HS_FIFO_OVERFLOW);
 		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
 					DSI_SINT_BTA_DONE);
 		complete_all(&dsi_ctrl->irq_info.bta_done);
+		if (dsi_ctrl->hw.ops.clear_error_status)
+			dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+					fifo_overflow_mask);
 	}
 
 	for (i = 0; status && i < DSI_STATUS_INTERRUPT_COUNT; ++i) {
@@ -2077,7 +2150,8 @@
 			if (cb_info.event_cb)
 				(void)cb_info.event_cb(cb_info.event_usr_ptr,
 						cb_info.event_idx,
-						cell_index, irq, 0, 0, 0);
+						dsi_ctrl->cell_index,
+						irq, 0, 0, 0);
 		}
 		status >>= 1;
 	}
@@ -2229,6 +2303,7 @@
 /**
  * dsi_ctrl_host_init() - Initialize DSI host hardware.
  * @dsi_ctrl:        DSI controller handle.
+ * @is_splash_enabled:        boolean signifying splash status.
  *
  * Initializes DSI controller hardware with host configuration provided by
  * dsi_ctrl_update_host_config(). Initialization can be performed only during
@@ -2237,7 +2312,7 @@
  *
  * Return: error code.
  */
-int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl)
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool is_splash_enabled)
 {
 	int rc = 0;
 
@@ -2254,37 +2329,42 @@
 		goto error;
 	}
 
-	dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
+	/* For Splash usecases we omit hw operations as bootloader
+	 * already takes care of them
+	 */
+	if (!is_splash_enabled) {
+		dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
 					&dsi_ctrl->host_config.lane_map);
 
-	dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
+		dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
 				    &dsi_ctrl->host_config.common_config);
 
-	if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
-		dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
+		if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
+			dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
 					&dsi_ctrl->host_config.common_config,
 					&dsi_ctrl->host_config.u.cmd_engine);
 
-		dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
+			dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
 				&dsi_ctrl->host_config.video_timing,
 				dsi_ctrl->host_config.video_timing.h_active * 3,
 				0x0,
 				NULL);
-	} else {
-		dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
+		} else {
+			dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
 					&dsi_ctrl->host_config.common_config,
 					&dsi_ctrl->host_config.u.video_engine);
-		dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw,
+			dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw,
 					  &dsi_ctrl->host_config.video_timing);
+		}
 	}
 
 	dsi_ctrl_setup_isr(dsi_ctrl);
 
 	dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
-	dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0);
+	dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0xFF00E0);
 
-	pr_debug("[DSI_%d]Host initialization complete\n",
-		dsi_ctrl->cell_index);
+	pr_debug("[DSI_%d]Host initialization complete, continuous splash status:%d\n",
+		dsi_ctrl->cell_index, is_splash_enabled);
 	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
 error:
 	mutex_unlock(&dsi_ctrl->ctrl_lock);
@@ -2304,6 +2384,48 @@
 	return 0;
 }
 
+int dsi_ctrl_reset(struct dsi_ctrl *dsi_ctrl, int mask)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl)
+		return -EINVAL;
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	rc = dsi_ctrl->hw.ops.ctrl_reset(&dsi_ctrl->hw, mask);
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+	return rc;
+}
+
+int dsi_ctrl_get_hw_version(struct dsi_ctrl *dsi_ctrl)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl)
+		return -EINVAL;
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	rc = dsi_ctrl->hw.ops.get_hw_version(&dsi_ctrl->hw);
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+	return rc;
+}
+
+int dsi_ctrl_vid_engine_en(struct dsi_ctrl *dsi_ctrl, bool on)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl)
+		return -EINVAL;
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, on);
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+	return rc;
+}
+
 /**
  * dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
  * @dsi_ctrl:        DSI controller handle.
@@ -2502,8 +2624,12 @@
 
 	if ((flags & DSI_CTRL_CMD_BROADCAST) &&
 		(flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
+		dsi_ctrl_wait_for_video_done(dsi_ctrl);
 		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
 					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
+		if (dsi_ctrl->hw.ops.mask_error_intr)
+			dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
+					BIT(DSI_FIFO_OVERFLOW), true);
 		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
 
 		/* trigger command */
@@ -2534,6 +2660,9 @@
 						dsi_ctrl->cell_index);
 			}
 		}
+		if (dsi_ctrl->hw.ops.mask_error_intr)
+			dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
+					BIT(DSI_FIFO_OVERFLOW), false);
 	}
 
 	mutex_unlock(&dsi_ctrl->ctrl_lock);
@@ -2563,6 +2692,43 @@
 }
 
 /**
+ * dsi_ctrl_update_host_engine_state_for_cont_splash() -
+ *            set engine state for dsi controller during continuous splash
+ * @dsi_ctrl:          DSI controller handle.
+ * @state:             Engine state.
+ *
+ * Set host engine state for DSI controller during continuous splash.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_update_host_engine_state_for_cont_splash(struct dsi_ctrl *dsi_ctrl,
+					enum dsi_engine_state state)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->cell_index, rc);
+		goto error;
+	}
+
+	pr_debug("[DSI_%d] Set host engine state = %d\n", dsi_ctrl->cell_index,
+		 state);
+	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
+error:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
  * dsi_ctrl_set_power_state() - set power state for dsi controller
  * @dsi_ctrl:          DSI controller handle.
  * @state:             Power state.
@@ -2732,8 +2898,6 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&dsi_ctrl->ctrl_lock);
-
 	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
 	if (rc) {
 		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
@@ -2750,7 +2914,6 @@
 		 state);
 	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
 error:
-	mutex_unlock(&dsi_ctrl->ctrl_lock);
 	return rc;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 0e5c0bd..a33bbfe 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -146,6 +146,7 @@
  * @irq_stat_mask:       Hardware mask of currently enabled interrupts.
  * @irq_stat_refcount:   Number of times each interrupt has been requested.
  * @irq_stat_cb:         Status IRQ callback definitions.
+ * @irq_err_cb:          IRQ callback definition to handle DSI ERRORs.
  * @cmd_dma_done:          Completion signal for DSI_CMD_MODE_DMA_DONE interrupt
  * @vid_frame_done:        Completion signal for DSI_VIDEO_MODE_FRAME_DONE int.
  * @cmd_frame_done:        Completion signal for DSI_CMD_FRAME_DONE interrupt.
@@ -156,6 +157,7 @@
 	uint32_t irq_stat_mask;
 	int irq_stat_refcount[DSI_STATUS_INTERRUPT_COUNT];
 	struct dsi_event_cb_info irq_stat_cb[DSI_STATUS_INTERRUPT_COUNT];
+	struct dsi_event_cb_info irq_err_cb;
 
 	struct completion cmd_dma_done;
 	struct completion vid_frame_done;
@@ -177,6 +179,7 @@
  * @current_state:       Current driver and hardware state.
  * @clk_cb:		 Callback for DSI clock control.
  * @irq_info:            Interrupt information.
+ * @recovery_cb:         Recovery call back to SDE.
  * @clk_info:            Clock information.
  * @clk_freq:            DSi Link clock frequency information.
  * @pwr_info:            Power information.
@@ -195,6 +198,8 @@
  * @misr_cache:          Cached Frame MISR value
  * @phy_isolation_enabled:    A boolean property allows to isolate the phy from
  *                          dsi controller and run only dsi controller.
+ * @null_insertion_enabled:  A boolean property to allow dsi controller to
+ *                           insert null packet.
  */
 struct dsi_ctrl {
 	struct platform_device *pdev;
@@ -213,6 +218,7 @@
 	struct clk_ctrl_cb clk_cb;
 
 	struct dsi_ctrl_interrupts irq_info;
+	struct dsi_event_cb_info recovery_cb;
 
 	/* Clock and power states */
 	struct dsi_ctrl_clk_info clk_info;
@@ -239,6 +245,7 @@
 	u32 misr_cache;
 
 	bool phy_isolation_enabled;
+	bool null_insertion_enabled;
 };
 
 /**
@@ -392,6 +399,7 @@
 /**
  * dsi_ctrl_host_init() - Initialize DSI host hardware.
  * @dsi_ctrl:        DSI controller handle.
+ * @is_splash_enabled:       boolean signifying splash status.
  *
  * Initializes DSI controller hardware with host configuration provided by
  * dsi_ctrl_update_host_config(). Initialization can be performed only during
@@ -400,7 +408,7 @@
  *
  * Return: error code.
  */
-int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl);
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool is_splash_enabled);
 
 /**
  * dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
@@ -490,6 +498,17 @@
 int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags);
 
 /**
+ * dsi_ctrl_update_host_engine_state_for_cont_splash() - update engine
+ *                                 states for cont splash usecase
+ * @dsi_ctrl:              DSI controller handle.
+ * @state:                 DSI engine state
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_update_host_engine_state_for_cont_splash(struct dsi_ctrl *dsi_ctrl,
+				enum dsi_engine_state state);
+
+/**
  * dsi_ctrl_set_power_state() - set power state for dsi controller
  * @dsi_ctrl:          DSI controller handle.
  * @state:             Power state.
@@ -636,4 +655,24 @@
  */
 void dsi_ctrl_drv_unregister(void);
 
+/**
+ * dsi_ctrl_reset() - Reset DSI PHY CLK/DATA lane
+ * @dsi_ctrl:        DSI controller handle.
+ * @mask:	     Mask to indicate if CLK and/or DATA lane needs reset.
+ */
+int dsi_ctrl_reset(struct dsi_ctrl *dsi_ctrl, int mask);
+
+/**
+ * dsi_ctrl_get_hw_version() - read dsi controller hw revision
+ * @dsi_ctrl:        DSI controller handle.
+ */
+int dsi_ctrl_get_hw_version(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_vid_engine_en() - Control DSI video engine HW state
+ * @dsi_ctrl:        DSI controller handle.
+ * @on:		variable to control video engine ON/OFF.
+ */
+int dsi_ctrl_vid_engine_en(struct dsi_ctrl *dsi_ctrl, bool on);
+
 #endif /* _DSI_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index 714a450..c77065c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -96,6 +96,7 @@
  * @DSI_SINT_DESKEW_DONE:              The deskew calibration operation done.
  * @DSI_SINT_DYN_BLANK_DMA_DONE:       The dynamic blankin DMA operation has
  *                                     completed.
+ * @DSI_SINT_ERROR:                    DSI error has happened.
  */
 enum dsi_status_int_index {
 	DSI_SINT_CMD_MODE_DMA_DONE = 0,
@@ -108,6 +109,7 @@
 	DSI_SINT_DYN_REFRESH_DONE = 7,
 	DSI_SINT_DESKEW_DONE = 8,
 	DSI_SINT_DYN_BLANK_DMA_DONE = 9,
+	DSI_SINT_ERROR = 10,
 
 	DSI_STATUS_INTERRUPT_COUNT
 };
@@ -126,6 +128,7 @@
  * @DSI_DESKEW_DONE:              The deskew calibration operation has completed
  * @DSI_DYN_BLANK_DMA_DONE:       The dynamic blankin DMA operation has
  *                                completed.
+ * @DSI_ERROR:                    DSI error has happened.
  */
 enum dsi_status_int_type {
 	DSI_CMD_MODE_DMA_DONE = BIT(DSI_SINT_CMD_MODE_DMA_DONE),
@@ -137,7 +140,8 @@
 	DSI_CMD_FRAME_DONE = BIT(DSI_SINT_CMD_FRAME_DONE),
 	DSI_DYN_REFRESH_DONE = BIT(DSI_SINT_DYN_REFRESH_DONE),
 	DSI_DESKEW_DONE = BIT(DSI_SINT_DESKEW_DONE),
-	DSI_DYN_BLANK_DMA_DONE = BIT(DSI_SINT_DYN_BLANK_DMA_DONE)
+	DSI_DYN_BLANK_DMA_DONE = BIT(DSI_SINT_DYN_BLANK_DMA_DONE),
+	DSI_ERROR = BIT(DSI_SINT_ERROR)
 };
 
 /**
@@ -175,6 +179,7 @@
  * @DSI_EINT_DLN1_LP1_CONTENTION:        PHY level contention while lane 1 high.
  * @DSI_EINT_DLN2_LP1_CONTENTION:        PHY level contention while lane 2 high.
  * @DSI_EINT_DLN3_LP1_CONTENTION:        PHY level contention while lane 3 high.
+ * @DSI_EINT_PANEL_SPECIFIC_ERR:         DSI Protocol violation error.
  */
 enum dsi_error_int_index {
 	DSI_EINT_RDBK_SINGLE_ECC_ERR = 0,
@@ -209,6 +214,7 @@
 	DSI_EINT_DLN1_LP1_CONTENTION = 29,
 	DSI_EINT_DLN2_LP1_CONTENTION = 30,
 	DSI_EINT_DLN3_LP1_CONTENTION = 31,
+	DSI_EINT_PANEL_SPECIFIC_ERR = 32,
 
 	DSI_ERROR_INTERRUPT_COUNT
 };
@@ -248,6 +254,7 @@
  * @DSI_DLN1_LP1_CONTENTION:        PHY level contention while lane 1 is high.
  * @DSI_DLN2_LP1_CONTENTION:        PHY level contention while lane 2 is high.
  * @DSI_DLN3_LP1_CONTENTION:        PHY level contention while lane 3 is high.
+ * @DSI_PANEL_SPECIFIC_ERR:         DSI Protocol violation.
  */
 enum dsi_error_int_type {
 	DSI_RDBK_SINGLE_ECC_ERR = BIT(DSI_EINT_RDBK_SINGLE_ECC_ERR),
@@ -282,6 +289,7 @@
 	DSI_DLN1_LP1_CONTENTION = BIT(DSI_EINT_DLN1_LP1_CONTENTION),
 	DSI_DLN2_LP1_CONTENTION = BIT(DSI_EINT_DLN2_LP1_CONTENTION),
 	DSI_DLN3_LP1_CONTENTION = BIT(DSI_EINT_DLN3_LP1_CONTENTION),
+	DSI_PANEL_SPECIFIC_ERR = BIT(DSI_EINT_PANEL_SPECIFIC_ERR),
 };
 
 /**
@@ -533,6 +541,12 @@
 				 u32 *hw_read_cnt);
 
 	/**
+	 * get_cont_splash_status() - get continuous splash status
+	 * @ctrl:           Pointer to the controller host hardware.
+	 */
+	bool (*get_cont_splash_status)(struct dsi_ctrl_hw *ctrl);
+
+	/**
 	 * wait_for_lane_idle() - wait for DSI lanes to go to idle state
 	 * @ctrl:          Pointer to the controller host hardware.
 	 * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
@@ -721,6 +735,48 @@
 	 * @ctrl:         Pointer to the controller host hardware.
 	 */
 	void (*clear_rdbk_register)(struct dsi_ctrl_hw *ctrl);
+
+	/** schedule_dma_cmd() - Schdeule DMA command transfer on a
+	 *                       particular blanking line.
+	 * @ctrl:         Pointer to the controller host hardware.
+	 * @line_no:      Blanking line number on whihch DMA command
+	 *                needs to be sent.
+	 */
+	void (*schedule_dma_cmd)(struct dsi_ctrl_hw *ctrl, int line_no);
+
+	/**
+	 * ctrl_reset() - Reset DSI lanes to recover from DSI errors
+	 * @ctrl:         Pointer to the controller host hardware.
+	 * @mask:         Indicates the error type.
+	 */
+	int (*ctrl_reset)(struct dsi_ctrl_hw *ctrl, int mask);
+
+	/**
+	 * mask_error_int() - Mask/Unmask particular DSI error interrupts
+	 * @ctrl:         Pointer to the controller host hardware.
+	 * @idx:	  Indicates the errors to be masked.
+	 * @en:		  Bool for mask or unmask of the error
+	 */
+	void (*mask_error_intr)(struct dsi_ctrl_hw *ctrl, u32 idx, bool en);
+
+	/**
+	 * error_intr_ctrl() - Mask/Unmask master DSI error interrupt
+	 * @ctrl:         Pointer to the controller host hardware.
+	 * @en:		  Bool for mask or unmask of DSI error
+	 */
+	void (*error_intr_ctrl)(struct dsi_ctrl_hw *ctrl, bool en);
+
+	/**
+	 * get_error_mask() - get DSI error interrupt mask status
+	 * @ctrl:         Pointer to the controller host hardware.
+	 */
+	u32 (*get_error_mask)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * get_hw_version() - get DSI controller hw version
+	 * @ctrl:         Pointer to the controller host hardware.
+	 */
+	u32 (*get_hw_version)(struct dsi_ctrl_hw *ctrl);
 };
 
 /*
@@ -739,6 +795,8 @@
  * @supported_errors:       Number of supported errors.
  * @phy_isolation_enabled:    A boolean property allows to isolate the phy from
  *                          dsi controller and run only dsi controller.
+ * @null_insertion_enabled:  A boolean property to allow dsi controller to
+ *                           insert null packet.
  */
 struct dsi_ctrl_hw {
 	void __iomem *base;
@@ -758,6 +816,7 @@
 	u64 supported_errors;
 
 	bool phy_isolation_enabled;
+	bool null_insertion_enabled;
 };
 
 #endif /* _DSI_CTRL_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
index 1b1e811..650c2e0 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
@@ -17,10 +17,14 @@
 #include "dsi_ctrl_hw.h"
 #include "dsi_ctrl_reg.h"
 #include "dsi_hw.h"
+#include "dsi_catalog.h"
 
 /* Equivalent to register DISP_CC_MISC_CMD */
 #define DISP_CC_CLAMP_REG_OFF 0x00
 
+/* register to configure DMA scheduling */
+#define DSI_DMA_SCHEDULE_CTRL 0x100
+
 /**
  * dsi_ctrl_hw_22_phy_reset_config() - to configure clamp control during ulps
  * @ctrl:          Pointer to the controller host hardware.
@@ -40,3 +44,38 @@
 		reg |= BIT(ctrl->index);
 	DSI_DISP_CC_W32(ctrl, DISP_CC_CLAMP_REG_OFF, reg);
 }
+
+/**
+ * dsi_ctrl_hw_22_schedule_dma_cmd() - to schedule DMA command transfer
+ * @ctrl:         Pointer to the controller host hardware.
+ * @line_no:      Line number at which command needs to be sent.
+ */
+void dsi_ctrl_hw_22_schedule_dma_cmd(struct dsi_ctrl_hw *ctrl, int line_no)
+{
+	u32 reg = 0;
+
+	reg = DSI_R32(ctrl, DSI_DMA_SCHEDULE_CTRL);
+	reg |= BIT(28);
+	reg |= (line_no & 0xffff);
+
+	DSI_W32(ctrl, DSI_DMA_SCHEDULE_CTRL, reg);
+}
+
+/*
+ * dsi_ctrl_hw_22_get_cont_splash_status() - to verify whether continuous
+ *                                           splash is enabled or not
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * Return:         Return Continuous splash status
+ */
+bool dsi_ctrl_hw_22_get_cont_splash_status(struct dsi_ctrl_hw *ctrl)
+{
+	u32 reg = 0;
+
+	/**
+	 * DSI scratch register 1 is used to notify whether continuous
+	 * splash is enabled or not by bootloader
+	 */
+	reg = DSI_R32(ctrl, DSI_SCRATCH_REGISTER_1);
+	return reg == 0x1 ? true : false;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 2959e94..c2c8f57 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -334,7 +334,7 @@
 	u32 width_final, stride_final;
 	u32 height_final;
 	u32 stream_total = 0, stream_ctrl = 0;
-	u32 reg_ctrl = 0, reg_ctrl2 = 0;
+	u32 reg_ctrl = 0, reg_ctrl2 = 0, data = 0;
 
 	if (roi && (!roi->w || !roi->h))
 		return;
@@ -391,6 +391,9 @@
 		height_final = mode->v_active;
 	}
 
+	/* HS Timer value */
+	DSI_W32(ctrl, DSI_HS_TIMER_CTRL, 0x3FD08);
+
 	stream_ctrl = (stride_final + 1) << 16;
 	stream_ctrl |= (vc_id & 0x3) << 8;
 	stream_ctrl |= 0x39; /* packet data type */
@@ -405,6 +408,14 @@
 	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_TOTAL, stream_total);
 	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_TOTAL, stream_total);
 
+	if (ctrl->null_insertion_enabled) {
+		/* enable null packet insertion */
+		data = (vc_id << 1);
+		data |= 0 << 16;
+		data |= 0x1;
+		DSI_W32(ctrl, DSI_COMMAND_MODE_NULL_INSERTION_CTRL, data);
+	}
+
 	pr_debug("ctrl %d stream_ctrl 0x%x stream_total 0x%x\n", ctrl->index,
 			stream_ctrl, stream_total);
 }
@@ -834,6 +845,8 @@
 		ints |= DSI_DYN_REFRESH_DONE;
 	if (reg & BIT(30))
 		ints |= DSI_DESKEW_DONE;
+	if (reg & BIT(24))
+		ints |= DSI_ERROR;
 
 	pr_debug("[DSI_%d] Interrupt status = 0x%x, INT_CTRL=0x%x\n",
 		 ctrl->index, ints, reg);
@@ -870,6 +883,12 @@
 	if (ints & DSI_DESKEW_DONE)
 		reg |= BIT(30);
 
+	/*
+	 * Do not clear error status.
+	 * It will be cleared as part of
+	 * error handler function.
+	 */
+	reg &= ~BIT(24);
 	DSI_W32(ctrl, DSI_INT_CTRL, reg);
 
 	pr_debug("[DSI_%d] Clear interrupts, ints = 0x%x, INT_CTRL=0x%x\n",
@@ -936,7 +955,7 @@
 	u32 timeout_errors;
 	u32 clk_error;
 	u32 dsi_status;
-	u64 errors = 0;
+	u64 errors = 0, shift = 0x1;
 
 	dln0_phy_err = DSI_R32(ctrl, DSI_DLN0_PHY_ERR);
 	if (dln0_phy_err & BIT(0))
@@ -983,6 +1002,8 @@
 		errors |= DSI_RDBK_INCOMPLETE_PKT;
 	if (ack_error & BIT(24))
 		errors |= DSI_PERIPH_ERROR_PKT;
+	if (ack_error & BIT(15))
+		errors |= (shift << DSI_EINT_PANEL_SPECIFIC_ERR);
 
 	timeout_errors = DSI_R32(ctrl, DSI_TIMEOUT_STATUS);
 	if (timeout_errors & BIT(0))
@@ -1020,7 +1041,6 @@
 	u32 timeout_error = 0;
 	u32 clk_error = 0;
 	u32 dsi_status = 0;
-	u32 int_ctrl = 0;
 
 	if (errors & DSI_RDBK_SINGLE_ECC_ERR)
 		ack_error |= BIT(16);
@@ -1032,6 +1052,8 @@
 		ack_error |= BIT(23);
 	if (errors & DSI_PERIPH_ERROR_PKT)
 		ack_error |= BIT(24);
+	if (errors & DSI_PANEL_SPECIFIC_ERR)
+		ack_error |= BIT(15);
 
 	if (errors & DSI_LP_RX_TIMEOUT)
 		timeout_error |= BIT(4);
@@ -1080,14 +1102,14 @@
 
 	DSI_W32(ctrl, DSI_DLN0_PHY_ERR, dln0_phy_err);
 	DSI_W32(ctrl, DSI_FIFO_STATUS, fifo_status);
+	/* Writing of an extra 0 is needed to clear ack error bits */
 	DSI_W32(ctrl, DSI_ACK_ERR_STATUS, ack_error);
+	wmb(); /* make sure register is committed */
+	DSI_W32(ctrl, DSI_ACK_ERR_STATUS, 0x0);
 	DSI_W32(ctrl, DSI_TIMEOUT_STATUS, timeout_error);
 	DSI_W32(ctrl, DSI_CLK_STATUS, clk_error);
 	DSI_W32(ctrl, DSI_STATUS, dsi_status);
 
-	int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
-	int_ctrl |= BIT(24);
-	DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
 	pr_debug("[DSI_%d] clear errors = 0x%llx, phy=0x%x, fifo=0x%x",
 		 ctrl->index, errors, dln0_phy_err, fifo_status);
 	pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
@@ -1348,3 +1370,102 @@
 	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
 }
 
+int dsi_ctrl_hw_cmn_ctrl_reset(struct dsi_ctrl_hw *ctrl,
+		int mask)
+{
+	int rc = 0;
+	u32 data;
+
+	pr_debug("DSI CTRL and PHY reset. ctrl-num = %d %d\n",
+			ctrl->index, mask);
+
+	data = DSI_R32(ctrl, 0x0004);
+	/* Disable DSI video mode */
+	DSI_W32(ctrl, 0x004, (data & ~BIT(1)));
+	wmb(); /* ensure register committed */
+	/* Disable DSI controller */
+	DSI_W32(ctrl, 0x004, (data & ~(BIT(0) | BIT(1))));
+	wmb(); /* ensure register committed */
+	/* "Force On" all dynamic clocks */
+	DSI_W32(ctrl, 0x11c, 0x100a00);
+
+	/* DSI_SW_RESET */
+	DSI_W32(ctrl, 0x118, 0x1);
+	wmb(); /* ensure register is committed */
+	DSI_W32(ctrl, 0x118, 0x0);
+	wmb(); /* ensure register is committed */
+
+	/* Remove "Force On" all dynamic clocks */
+	DSI_W32(ctrl, 0x11c, 0x00);
+	/* Enable DSI controller */
+	DSI_W32(ctrl, 0x004, (data & ~BIT(1)));
+	wmb(); /* ensure register committed */
+
+	return rc;
+}
+
+void dsi_ctrl_hw_cmn_mask_error_intr(struct dsi_ctrl_hw *ctrl, u32 idx, bool en)
+{
+	u32 reg = 0;
+
+	reg = DSI_R32(ctrl, 0x10c);
+
+	if (idx & BIT(DSI_FIFO_OVERFLOW)) {
+		if (en)
+			reg |= (0xf << 16);
+		else
+			reg &= ~(0xf << 16);
+	}
+
+	if (idx & BIT(DSI_FIFO_UNDERFLOW)) {
+		if (en)
+			reg |= (0xf << 26);
+		else
+			reg &= ~(0xf << 26);
+	}
+
+	if (idx & BIT(DSI_LP_Rx_TIMEOUT)) {
+		if (en)
+			reg |= (0x7 << 23);
+		else
+			reg &= ~(0x7 << 23);
+	}
+
+	DSI_W32(ctrl, 0x10c, reg);
+	wmb(); /* ensure error is masked */
+}
+
+void dsi_ctrl_hw_cmn_error_intr_ctrl(struct dsi_ctrl_hw *ctrl, bool en)
+{
+	u32 reg = 0;
+	u32 dsi_total_mask = 0x2222AA02;
+
+	reg = DSI_R32(ctrl, 0x110);
+	reg &= dsi_total_mask;
+
+	if (en)
+		reg |= (BIT(24) | BIT(25));
+	else
+		reg &= ~BIT(25);
+
+	DSI_W32(ctrl, 0x110, reg);
+	wmb(); /* ensure error is masked */
+}
+
+u32 dsi_ctrl_hw_cmn_get_error_mask(struct dsi_ctrl_hw *ctrl)
+{
+	u32 reg = 0;
+
+	reg = DSI_R32(ctrl, 0x10c);
+
+	return reg;
+}
+
+u32 dsi_ctrl_hw_cmn_get_hw_version(struct dsi_ctrl_hw *ctrl)
+{
+	u32 reg = 0;
+
+	reg = DSI_R32(ctrl, 0x0);
+
+	return reg;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index 2f0d25f..d45f849 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -358,6 +358,7 @@
  * @clk_rate_hz:      DSI bit clock rate per lane in Hz.
  * @dsc_enabled:      DSC compression enabled.
  * @dsc:              DSC compression configuration.
+ * @roi_caps:         Panel ROI capabilities.
  */
 struct dsi_mode_info {
 	u32 h_active;
@@ -377,6 +378,7 @@
 	u64 clk_rate_hz;
 	bool dsc_enabled;
 	struct msm_display_dsc_info *dsc;
+	struct msm_roi_caps roi_caps;
 };
 
 /**
@@ -505,6 +507,7 @@
  * @topology:             Topology selected for the panel
  * @dsc:                  DSC compression info
  * @dsc_enabled:          DSC compression enabled
+ * @roi_caps:		  Panel ROI capabilities
  */
 struct dsi_display_mode_priv_info {
 	struct dsi_panel_cmd_set cmd_sets[DSI_CMD_SET_MAX];
@@ -520,6 +523,7 @@
 	struct msm_display_topology topology;
 	struct msm_display_dsc_info dsc;
 	bool dsc_enabled;
+	struct msm_roi_caps roi_caps;
 };
 
 /**
@@ -581,4 +585,16 @@
 		uint32_t data2, uint32_t data3);
 };
 
+/**
+ * enum dsi_error_status - various dsi errors
+ * @DSI_FIFO_OVERFLOW:     DSI FIFO Overflow error
+ * @DSI_FIFO_UNDERFLOW:    DSI FIFO Underflow error
+ * @DSI_LP_Rx_TIMEOUT:     DSI LP/RX Timeout error
+ */
+enum dsi_error_status {
+	DSI_FIFO_OVERFLOW = 1,
+	DSI_FIFO_UNDERFLOW,
+	DSI_LP_Rx_TIMEOUT,
+};
+
 #endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 8bca4e4..c9c1d4c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -36,6 +36,8 @@
 
 #define MISR_BUFF_SIZE	256
 
+#define MAX_NAME_SIZE	64
+
 static DEFINE_MUTEX(dsi_display_list_lock);
 static LIST_HEAD(dsi_display_list);
 static char dsi_display_primary[MAX_CMDLINE_PARAM_LEN];
@@ -131,18 +133,19 @@
 	int i;
 	struct dsi_display_ctrl *m_ctrl, *ctrl;
 
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+	mutex_lock(&m_ctrl->ctrl->ctrl_lock);
+
 	if (display->cmd_engine_refcount > 0) {
 		display->cmd_engine_refcount++;
-		return 0;
+		goto done;
 	}
 
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-
 	rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
 	if (rc) {
 		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
 		       display->name, rc);
-		goto error;
+		goto done;
 	}
 
 	for (i = 0; i < display->ctrl_count; i++) {
@@ -160,10 +163,11 @@
 	}
 
 	display->cmd_engine_refcount++;
-	return rc;
+	goto done;
 error_disable_master:
 	(void)dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
-error:
+done:
+	mutex_unlock(&m_ctrl->ctrl->ctrl_lock);
 	return rc;
 }
 
@@ -173,15 +177,17 @@
 	int i;
 	struct dsi_display_ctrl *m_ctrl, *ctrl;
 
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+	mutex_lock(&m_ctrl->ctrl->ctrl_lock);
+
 	if (display->cmd_engine_refcount == 0) {
 		pr_err("[%s] Invalid refcount\n", display->name);
-		return 0;
+		goto done;
 	} else if (display->cmd_engine_refcount > 1) {
 		display->cmd_engine_refcount--;
-		return 0;
+		goto done;
 	}
 
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
 	for (i = 0; i < display->ctrl_count; i++) {
 		ctrl = &display->ctrl[i];
 		if (!ctrl->ctrl || (ctrl == m_ctrl))
@@ -203,6 +209,8 @@
 
 error:
 	display->cmd_engine_refcount = 0;
+done:
+	mutex_unlock(&m_ctrl->ctrl->ctrl_lock);
 	return rc;
 }
 
@@ -255,11 +263,18 @@
 	if (!panel)
 		return -EINVAL;
 
+	/* acquire panel_lock to make sure no commands are in progress */
+	dsi_panel_acquire_panel_lock(panel);
+
 	config = &(panel->esd_config);
 	lenp = config->status_valid_params ?: config->status_cmds_rlen;
 	count = config->status_cmd.count;
 	cmds = config->status_cmd.cmds;
-	flags = (DSI_CTRL_CMD_FETCH_MEMORY | DSI_CTRL_CMD_READ);
+	if (cmds->last_command) {
+		cmds->msg.flags |= MIPI_DSI_MSG_LASTCOMMAND;
+		flags |= DSI_CTRL_CMD_LAST_COMMAND;
+	}
+	flags |= (DSI_CTRL_CMD_FETCH_MEMORY | DSI_CTRL_CMD_READ);
 
 	for (i = 0; i < count; ++i) {
 		memset(config->status_buf, 0x0, SZ_4K);
@@ -277,6 +292,8 @@
 	}
 
 error:
+	/* release panel_lock */
+	dsi_panel_release_panel_lock(panel);
 	return rc;
 }
 
@@ -372,7 +389,7 @@
 	struct dsi_display *dsi_display = display;
 	struct dsi_panel *panel;
 	u32 status_mode;
-	int rc = 0;
+	int rc = 0x1;
 
 	if (dsi_display == NULL)
 		return -EINVAL;
@@ -381,6 +398,14 @@
 
 	status_mode = panel->esd_config.status_mode;
 
+	mutex_lock(&dsi_display->display_lock);
+
+	if (!panel->panel_initialized) {
+		pr_debug("Panel not initialized\n");
+		mutex_unlock(&dsi_display->display_lock);
+		return rc;
+	}
+
 	dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
 		DSI_ALL_CLKS, DSI_CLK_ON);
 
@@ -397,6 +422,7 @@
 
 	dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
 		DSI_ALL_CLKS, DSI_CLK_OFF);
+	mutex_unlock(&dsi_display->display_lock);
 
 	return rc;
 }
@@ -453,6 +479,32 @@
 	}
 }
 
+/**
+ * dsi_display_get_cont_splash_status - Get continuous splash status.
+ * @dsi_display:         DSI display handle.
+ *
+ * Return: boolean to signify whether continuous splash is enabled.
+ */
+static bool dsi_display_get_cont_splash_status(struct dsi_display *display)
+{
+	u32 val = 0;
+	int i;
+	struct dsi_display_ctrl *ctrl;
+	struct dsi_ctrl_hw *hw;
+
+	for (i = 0; i < display->ctrl_count ; i++) {
+		ctrl = &(display->ctrl[i]);
+		if (!ctrl || !ctrl->ctrl)
+			continue;
+
+		hw = &(ctrl->ctrl->hw);
+		val = hw->ops.get_cont_splash_status(hw);
+		if (!val)
+			return false;
+	}
+	return true;
+}
+
 int dsi_display_set_power(struct drm_connector *connector,
 		int power_mode, void *disp)
 {
@@ -674,6 +726,8 @@
 {
 	int rc = 0;
 	struct dentry *dir, *dump_file, *misr_data;
+	char name[MAX_NAME_SIZE];
+	int i;
 
 	dir = debugfs_create_dir(display->name, NULL);
 	if (IS_ERR_OR_NULL(dir)) {
@@ -707,6 +761,35 @@
 		goto error_remove_dir;
 	}
 
+	for (i = 0; i < display->ctrl_count; i++) {
+		struct msm_dsi_phy *phy = display->ctrl[i].phy;
+
+		if (!phy || !phy->name)
+			continue;
+
+		snprintf(name, ARRAY_SIZE(name),
+				"%s_allow_phy_power_off", phy->name);
+		dump_file = debugfs_create_bool(name, 0600, dir,
+				&phy->allow_phy_power_off);
+		if (IS_ERR_OR_NULL(dump_file)) {
+			rc = PTR_ERR(dump_file);
+			pr_err("[%s] debugfs create %s failed, rc=%d\n",
+			       display->name, name, rc);
+			goto error_remove_dir;
+		}
+
+		snprintf(name, ARRAY_SIZE(name),
+				"%s_regulator_min_datarate_bps", phy->name);
+		dump_file = debugfs_create_u32(name, 0600, dir,
+				&phy->regulator_min_datarate_bps);
+		if (IS_ERR_OR_NULL(dump_file)) {
+			rc = PTR_ERR(dump_file);
+			pr_err("[%s] debugfs create %s failed, rc=%d\n",
+			       display->name, name, rc);
+			goto error_remove_dir;
+		}
+	}
+
 	display->root = dir;
 	return rc;
 error_remove_dir:
@@ -924,7 +1007,7 @@
 /**
  * dsi_display_phy_idle_on() - enable DSI PHY while coming out of idle screen.
  * @dsi_display:         DSI display handle.
- * @enable:           enable/disable DSI PHY.
+ * @mmss_clamp:          True if clamp is enabled.
  *
  * Return: error code.
  */
@@ -971,7 +1054,6 @@
 /**
  * dsi_display_phy_idle_off() - disable DSI PHY while going to idle screen.
  * @dsi_display:         DSI display handle.
- * @enable:           enable/disable DSI PHY.
  *
  * Return: error code.
  */
@@ -986,9 +1068,16 @@
 		return -EINVAL;
 	}
 
-	if (!display->panel->allow_phy_power_off) {
-		pr_debug("panel doesn't support this feature\n");
-		return 0;
+	for (i = 0; i < display->ctrl_count; i++) {
+		struct msm_dsi_phy *phy = display->ctrl[i].phy;
+
+		if (!phy)
+			continue;
+
+		if (!phy->allow_phy_power_off) {
+			pr_debug("phy doesn't support this feature\n");
+			return 0;
+		}
 	}
 
 	m_ctrl = &display->ctrl[display->cmd_master_idx];
@@ -1037,6 +1126,13 @@
 	case SDE_CONN_EVENT_CMD_DONE:
 		irq_status_idx = DSI_SINT_CMD_FRAME_DONE;
 		break;
+	case SDE_CONN_EVENT_VID_FIFO_OVERFLOW:
+	case SDE_CONN_EVENT_CMD_FIFO_UNDERFLOW:
+		if (event_info) {
+			for (i = 0; i < display->ctrl_count; i++)
+				display->ctrl[i].ctrl->recovery_cb =
+							*event_info;
+		}
 	default:
 		/* nothing to do */
 		pr_debug("[%s] unhandled event %d\n", display->name, event_idx);
@@ -1055,6 +1151,30 @@
 	}
 }
 
+/**
+ * dsi_config_host_engine_state_for_cont_splash()- update host engine state
+ *                                                 during continuous splash.
+ * @display: Handle to dsi display
+ *
+ */
+static void dsi_config_host_engine_state_for_cont_splash
+					(struct dsi_display *display)
+{
+	int i;
+	struct dsi_display_ctrl *ctrl;
+	enum dsi_engine_state host_state = DSI_CTRL_ENGINE_ON;
+
+	/* Sequence does not matter for split dsi usecases */
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+
+		dsi_ctrl_update_host_engine_state_for_cont_splash(ctrl->ctrl,
+							host_state);
+	}
+}
+
 static int dsi_display_ctrl_power_on(struct dsi_display *display)
 {
 	int rc = 0;
@@ -1452,7 +1572,8 @@
 
 	for (i = 0 ; i < display->ctrl_count; i++) {
 		ctrl = &display->ctrl[i];
-		rc = dsi_ctrl_host_init(ctrl->ctrl);
+		rc = dsi_ctrl_host_init(ctrl->ctrl,
+				display->is_cont_splash_enabled);
 		if (rc) {
 			pr_err("[%s] failed to init host_%d, rc=%d\n",
 			       display->name, i, rc);
@@ -1493,6 +1614,14 @@
 	int i;
 	struct dsi_display_ctrl *m_ctrl, *ctrl;
 
+	/* Host engine states are already taken care for
+	 * continuous splash case
+	 */
+	if (display->is_cont_splash_enabled) {
+		pr_debug("cont splash enabled, host enable not required\n");
+		return 0;
+	}
+
 	m_ctrl = &display->ctrl[display->cmd_master_idx];
 
 	rc = dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
@@ -1631,7 +1760,8 @@
 	rc = dsi_phy_enable(m_ctrl->phy,
 			    &display->config,
 			    m_src,
-			    true);
+			    true,
+			    display->is_cont_splash_enabled);
 	if (rc) {
 		pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
 		       display->name, rc);
@@ -1646,7 +1776,8 @@
 		rc = dsi_phy_enable(ctrl->phy,
 				    &display->config,
 				    DSI_PLL_SOURCE_NON_NATIVE,
-				    true);
+				    true,
+				    display->is_cont_splash_enabled);
 		if (rc) {
 			pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
 			       display->name, rc);
@@ -1760,6 +1891,14 @@
 	int i;
 	struct dsi_display_ctrl *m_ctrl, *ctrl;
 
+	/* For continuous splash use case ctrl states are updated
+	 * separately and hence we do an early return
+	 */
+	if (display->is_cont_splash_enabled) {
+		pr_debug("cont splash enabled, phy sw reset not required\n");
+		return 0;
+	}
+
 	m_ctrl = &display->ctrl[display->cmd_master_idx];
 
 	rc = dsi_ctrl_phy_sw_reset(m_ctrl->ctrl);
@@ -1785,6 +1924,61 @@
 	return rc;
 }
 
+static void dsi_display_aspace_cb_locked(void *cb_data, bool is_detach)
+{
+	struct dsi_display *display;
+	struct dsi_display_ctrl *display_ctrl;
+	int rc, cnt;
+
+	if (!cb_data) {
+		pr_err("aspace cb called with invalid cb_data\n");
+		return;
+	}
+	display = (struct dsi_display *)cb_data;
+
+	/*
+	 * acquire panel_lock to make sure no commands are in-progress
+	 * while detaching the non-secure context banks
+	 */
+	dsi_panel_acquire_panel_lock(display->panel);
+
+	if (is_detach) {
+		/* invalidate the stored iova */
+		display->cmd_buffer_iova = 0;
+
+		/* return the virtual address mapping */
+		msm_gem_put_vaddr_locked(display->tx_cmd_buf);
+		msm_gem_vunmap(display->tx_cmd_buf);
+
+	} else {
+		rc = msm_gem_get_iova_locked(display->tx_cmd_buf,
+				display->aspace, &(display->cmd_buffer_iova));
+		if (rc) {
+			pr_err("failed to get the iova rc %d\n", rc);
+			goto end;
+		}
+
+		display->vaddr =
+			(void *) msm_gem_get_vaddr_locked(display->tx_cmd_buf);
+
+		if (IS_ERR_OR_NULL(display->vaddr)) {
+			pr_err("failed to get va rc %d\n", rc);
+			goto end;
+		}
+	}
+
+	for (cnt = 0; cnt < display->ctrl_count; cnt++) {
+		display_ctrl = &display->ctrl[cnt];
+		display_ctrl->ctrl->cmd_buffer_size = display->cmd_buffer_size;
+		display_ctrl->ctrl->cmd_buffer_iova = display->cmd_buffer_iova;
+		display_ctrl->ctrl->vaddr = display->vaddr;
+	}
+
+end:
+	/* release panel_lock */
+	dsi_panel_release_panel_lock(display->panel);
+}
+
 static int dsi_host_attach(struct mipi_dsi_host *host,
 			   struct mipi_dsi_device *dsi)
 {
@@ -1802,7 +1996,6 @@
 {
 	struct dsi_display *display = to_dsi_display(host);
 	struct dsi_display_ctrl *display_ctrl;
-	struct msm_gem_address_space *aspace = NULL;
 	int rc = 0, cnt = 0;
 
 	if (!host || !msg) {
@@ -1846,19 +2039,27 @@
 			goto error_disable_cmd_engine;
 		}
 
-		aspace = msm_gem_smmu_address_space_get(display->drm_dev,
-				MSM_SMMU_DOMAIN_UNSECURE);
-		if (!aspace) {
+		display->aspace = msm_gem_smmu_address_space_get(
+				display->drm_dev, MSM_SMMU_DOMAIN_UNSECURE);
+		if (!display->aspace) {
 			pr_err("failed to get aspace\n");
 			rc = -EINVAL;
 			goto free_gem;
 		}
 
-		rc = msm_gem_get_iova(display->tx_cmd_buf, aspace,
+		/* register to aspace */
+		rc = msm_gem_address_space_register_cb(display->aspace,
+				dsi_display_aspace_cb_locked, (void *)display);
+		if (rc) {
+			pr_err("failed to register callback %d", rc);
+			goto free_gem;
+		}
+
+		rc = msm_gem_get_iova(display->tx_cmd_buf, display->aspace,
 					&(display->cmd_buffer_iova));
 		if (rc) {
 			pr_err("failed to get the iova rc %d\n", rc);
-			goto free_gem;
+			goto free_aspace_cb;
 		}
 
 		display->vaddr =
@@ -1910,7 +2111,10 @@
 	}
 	return rc;
 put_iova:
-	msm_gem_put_iova(display->tx_cmd_buf, aspace);
+	msm_gem_put_iova(display->tx_cmd_buf, display->aspace);
+free_aspace_cb:
+	msm_gem_address_space_unregister_cb(display->aspace,
+			dsi_display_aspace_cb_locked, display);
 free_gem:
 	mutex_lock(&display->drm_dev->struct_mutex);
 	msm_gem_free_object(display->tx_cmd_buf);
@@ -2299,6 +2503,7 @@
 		 *     not be changed during static screen.
 		 */
 
+	  pr_debug("updating power states for ctrl and phy\n");
 		rc = dsi_display_ctrl_power_on(display);
 		if (rc) {
 			pr_err("[%s] failed to power on dsi controllers, rc=%d\n",
@@ -2965,6 +3170,20 @@
 		}
 	}
 
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+
+		if (!ctrl->phy || !ctrl->ctrl)
+			continue;
+
+		rc = dsi_phy_set_clk_freq(ctrl->phy, &ctrl->ctrl->clk_freq);
+		if (rc) {
+			pr_err("[%s] failed to set phy clk freq, rc=%d\n",
+			       display->name, rc);
+			goto error;
+		}
+	}
+
 	if (priv_info->phy_timing_len) {
 		for (i = 0; i < display->ctrl_count; i++) {
 			ctrl = &display->ctrl[i];
@@ -3042,6 +3261,110 @@
 }
 
 /**
+ * dsi_display_splash_res_init() - Initialize resources for continuous splash
+ * @display:    Pointer to dsi display
+ * Returns:     Zero on success
+ */
+static int dsi_display_splash_res_init(struct  dsi_display *display)
+{
+	int rc = 0;
+
+	/* Vote for gdsc required to read register address space */
+
+	display->cont_splash_client = sde_power_client_create(display->phandle,
+						"cont_splash_client");
+	rc = sde_power_resource_enable(display->phandle,
+			display->cont_splash_client, true);
+	if (rc) {
+		pr_err("failed to vote gdsc for continuous splash, rc=%d\n",
+							rc);
+		return -EINVAL;
+	}
+
+	/* Verify whether continuous splash is enabled or not */
+	display->is_cont_splash_enabled =
+		dsi_display_get_cont_splash_status(display);
+	if (!display->is_cont_splash_enabled) {
+		pr_err("Continuous splash is not enabled\n");
+		goto splash_disabled;
+	}
+
+	/* Update splash status for clock manager */
+	dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
+				display->is_cont_splash_enabled);
+
+	/* Vote for Core clk and link clk. Votes on ctrl and phy
+	 * regulator are inplicit from  pre clk on callback
+	 */
+	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_ON);
+	if (rc) {
+		pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
+		       display->name, rc);
+		goto clk_manager_update;
+	}
+
+	/* Vote on panel regulator will be removed during suspend path */
+	rc = dsi_pwr_enable_regulator(&display->panel->power_info, true);
+	if (rc) {
+		pr_err("[%s] failed to enable vregs, rc=%d\n",
+				display->panel->name, rc);
+		goto clks_disabled;
+	}
+
+	dsi_config_host_engine_state_for_cont_splash(display);
+
+	return rc;
+
+clks_disabled:
+	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_OFF);
+
+clk_manager_update:
+	/* Update splash status for clock manager */
+	dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
+				false);
+
+splash_disabled:
+	(void)sde_power_resource_enable(display->phandle,
+			display->cont_splash_client, false);
+	display->is_cont_splash_enabled = false;
+	return rc;
+}
+
+/**
+ * dsi_display_splash_res_cleanup() - cleanup for continuous splash
+ * @display:    Pointer to dsi display
+ * Returns:     Zero on success
+ */
+int dsi_display_splash_res_cleanup(struct  dsi_display *display)
+{
+	int rc = 0;
+
+	if (!display->is_cont_splash_enabled)
+		return 0;
+
+	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_OFF);
+	if (rc)
+		pr_err("[%s] failed to disable DSI link clocks, rc=%d\n",
+		       display->name, rc);
+
+	rc = sde_power_resource_enable(display->phandle,
+			display->cont_splash_client, false);
+	if (rc)
+		pr_err("failed to remove vote on gdsc for continuous splash, rc=%d\n",
+				rc);
+
+	display->is_cont_splash_enabled = false;
+	/* Update splash status for clock manager */
+	dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
+				display->is_cont_splash_enabled);
+
+	return rc;
+}
+
+/**
  * dsi_display_bind - bind dsi device with controlling device
  * @dev:        Pointer to base of platform device
  * @master:     Pointer to container of drm device
@@ -3127,6 +3450,7 @@
 		}
 	}
 
+	display->phandle = &priv->phandle;
 	info.pre_clkoff_cb = dsi_pre_clkoff_cb;
 	info.pre_clkon_cb = dsi_pre_clkon_cb;
 	info.post_clkoff_cb = dsi_post_clkoff_cb;
@@ -3203,6 +3527,12 @@
 
 	pr_info("Successfully bind display panel '%s'\n", display->name);
 	display->drm_dev = drm;
+
+	/* Initialize resources for continuous splash */
+	rc = dsi_display_splash_res_init(display);
+	if (rc)
+		pr_err("Continuous splash resource init failed, rc=%d\n", rc);
+
 	goto error;
 
 error_host_deinit:
@@ -3622,9 +3952,6 @@
 	if (display->panel->esd_config.esd_enabled)
 		info->capabilities |= MSM_DISPLAY_ESD_ENABLED;
 
-	memcpy(&info->roi_caps, &display->panel->roi_caps,
-			sizeof(info->roi_caps));
-
 error:
 	mutex_unlock(&display->display_lock);
 	return rc;
@@ -4030,6 +4357,232 @@
 	return rc;
 }
 
+static void dsi_display_handle_fifo_underflow(struct work_struct *work)
+{
+	struct dsi_display *display = NULL;
+
+	display =  container_of(work, struct dsi_display, fifo_underflow_work);
+	if (!display)
+		return;
+	pr_debug("handle DSI FIFO underflow error\n");
+
+	dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_ON);
+	dsi_display_soft_reset(display);
+	dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_OFF);
+}
+
+static void dsi_display_handle_fifo_overflow(struct work_struct *work)
+{
+	struct dsi_display *display = NULL;
+	struct dsi_display_ctrl *ctrl;
+	int i, rc;
+	int mask = BIT(20); /* clock lane */
+	int (*cb_func)(void *event_usr_ptr,
+		uint32_t event_idx, uint32_t instance_idx,
+		uint32_t data0, uint32_t data1,
+		uint32_t data2, uint32_t data3);
+	void *data;
+	u32 version = 0;
+
+	display =  container_of(work, struct dsi_display, fifo_overflow_work);
+	if (!display || !display->panel ||
+			(display->panel->panel_mode != DSI_OP_VIDEO_MODE))
+		return;
+
+	pr_debug("handle DSI FIFO overflow error\n");
+	dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_ON);
+
+	/*
+	 * below recovery sequence is not applicable to
+	 * hw version 2.0.0, 2.1.0 and 2.2.0, so return early.
+	 */
+	ctrl = &display->ctrl[display->clk_master_idx];
+	version = dsi_ctrl_get_hw_version(ctrl->ctrl);
+	if (!version || (version < 0x20020001))
+		goto end;
+
+	/* reset ctrl and lanes */
+	for (i = 0 ; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		rc = dsi_ctrl_reset(ctrl->ctrl, mask);
+		rc = dsi_phy_lane_reset(ctrl->phy);
+	}
+
+	/* wait for display line count to be in active area */
+	ctrl = &display->ctrl[display->clk_master_idx];
+	if (ctrl->ctrl->recovery_cb.event_cb) {
+		cb_func = ctrl->ctrl->recovery_cb.event_cb;
+		data = ctrl->ctrl->recovery_cb.event_usr_ptr;
+		rc = cb_func(data, SDE_CONN_EVENT_VID_FIFO_OVERFLOW,
+				display->clk_master_idx, 0, 0, 0, 0);
+		if (rc < 0) {
+			pr_debug("sde callback failed\n");
+			goto end;
+		}
+	}
+
+	/* Enable Video mode for DSI controller */
+	for (i = 0 ; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		dsi_ctrl_vid_engine_en(ctrl->ctrl, true);
+	}
+	/*
+	 * Add sufficient delay to make sure
+	 * pixel transmission has started
+	 */
+	udelay(200);
+end:
+	dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_OFF);
+}
+
+static void dsi_display_handle_lp_rx_timeout(struct work_struct *work)
+{
+	struct dsi_display *display = NULL;
+	struct dsi_display_ctrl *ctrl;
+	int i, rc;
+	int mask = (BIT(20) | (0xF << 16)); /* clock lane and 4 data lane */
+	int (*cb_func)(void *event_usr_ptr,
+		uint32_t event_idx, uint32_t instance_idx,
+		uint32_t data0, uint32_t data1,
+		uint32_t data2, uint32_t data3);
+	void *data;
+	u32 version = 0;
+
+	display =  container_of(work, struct dsi_display, fifo_overflow_work);
+	if (!display || (display->panel->panel_mode != DSI_OP_VIDEO_MODE))
+		return;
+	pr_debug("handle DSI LP RX Timeout error\n");
+
+	dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_ON);
+
+	/*
+	 * below recovery sequence is not applicable to
+	 * hw version 2.0.0, 2.1.0 and 2.2.0, so return early.
+	 */
+	ctrl = &display->ctrl[display->clk_master_idx];
+	version = dsi_ctrl_get_hw_version(ctrl->ctrl);
+	if (!version || (version < 0x20020001))
+		goto end;
+
+	/* reset ctrl and lanes */
+	for (i = 0 ; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		rc = dsi_ctrl_reset(ctrl->ctrl, mask);
+		rc = dsi_phy_lane_reset(ctrl->phy);
+	}
+
+	ctrl = &display->ctrl[display->clk_master_idx];
+	if (ctrl->ctrl->recovery_cb.event_cb) {
+		cb_func = ctrl->ctrl->recovery_cb.event_cb;
+		data = ctrl->ctrl->recovery_cb.event_usr_ptr;
+		rc = cb_func(data, SDE_CONN_EVENT_VID_FIFO_OVERFLOW,
+				display->clk_master_idx, 0, 0, 0, 0);
+		if (rc < 0) {
+			pr_debug("Target is in suspend/shutdown\n");
+			goto end;
+		}
+	}
+
+	/* Enable Video mode for DSI controller */
+	for (i = 0 ; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		dsi_ctrl_vid_engine_en(ctrl->ctrl, true);
+	}
+
+	/*
+	 * Add sufficient delay to make sure
+	 * pixel transmission as started
+	 */
+	udelay(200);
+end:
+	dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_ALL_CLKS, DSI_CLK_OFF);
+}
+
+static int dsi_display_cb_error_handler(void *data,
+		uint32_t event_idx, uint32_t instance_idx,
+		uint32_t data0, uint32_t data1,
+		uint32_t data2, uint32_t data3)
+{
+	struct dsi_display *display =  data;
+
+	if (!display)
+		return -EINVAL;
+
+	switch (event_idx) {
+	case DSI_FIFO_UNDERFLOW:
+		queue_work(display->err_workq, &display->fifo_underflow_work);
+		break;
+	case DSI_FIFO_OVERFLOW:
+		queue_work(display->err_workq, &display->fifo_overflow_work);
+		break;
+	case DSI_LP_Rx_TIMEOUT:
+		queue_work(display->err_workq, &display->lp_rx_timeout_work);
+		break;
+	default:
+		pr_warn("unhandled error interrupt: %d\n", event_idx);
+		break;
+	}
+
+	return 0;
+}
+
+static void dsi_display_register_error_handler(struct dsi_display *display)
+{
+	int i = 0;
+	struct dsi_display_ctrl *ctrl;
+	struct dsi_event_cb_info event_info;
+
+	if (!display)
+		return;
+
+	display->err_workq = create_singlethread_workqueue("dsi_err_workq");
+	if (!display->err_workq) {
+		pr_err("failed to create dsi workq!\n");
+		return;
+	}
+
+	INIT_WORK(&display->fifo_underflow_work,
+				dsi_display_handle_fifo_underflow);
+	INIT_WORK(&display->fifo_overflow_work,
+				dsi_display_handle_fifo_overflow);
+	INIT_WORK(&display->lp_rx_timeout_work,
+				dsi_display_handle_lp_rx_timeout);
+
+	memset(&event_info, 0, sizeof(event_info));
+
+	event_info.event_cb = dsi_display_cb_error_handler;
+	event_info.event_usr_ptr = display;
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		ctrl->ctrl->irq_info.irq_err_cb = event_info;
+	}
+}
+
+static void dsi_display_unregister_error_handler(struct dsi_display *display)
+{
+	int i = 0;
+	struct dsi_display_ctrl *ctrl;
+
+	if (!display)
+		return;
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		memset(&ctrl->ctrl->irq_info.irq_err_cb,
+				0, sizeof(struct dsi_event_cb_info));
+	}
+
+	if (display->err_workq)
+		destroy_workqueue(display->err_workq);
+}
+
 int dsi_display_prepare(struct dsi_display *display)
 {
 	int rc = 0;
@@ -4059,11 +4612,18 @@
 		goto error;
 	}
 
-	rc = dsi_panel_pre_prepare(display->panel);
-	if (rc) {
-		pr_err("[%s] panel pre-prepare failed, rc=%d\n",
-		       display->name, rc);
-		goto error;
+	if (!display->is_cont_splash_enabled) {
+		/*
+		 * For continuous splash usecase we skip panel
+		 * pre prepare since the regulator vote is already
+		 * taken care in splash resource init
+		 */
+		rc = dsi_panel_pre_prepare(display->panel);
+		if (rc) {
+			pr_err("[%s] panel pre-prepare failed, rc=%d\n",
+					display->name, rc);
+			goto error;
+		}
 	}
 
 	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
@@ -4100,6 +4660,8 @@
 		       display->name, rc);
 		goto error_phy_disable;
 	}
+	/* Set up DSI ERROR event callback */
+	dsi_display_register_error_handler(display);
 
 	rc = dsi_display_ctrl_host_enable(display);
 	if (rc) {
@@ -4122,12 +4684,19 @@
 		goto error_ctrl_link_off;
 	}
 
-	rc = dsi_panel_prepare(display->panel);
-	if (rc) {
-		pr_err("[%s] panel prepare failed, rc=%d\n", display->name, rc);
-		goto error_ctrl_link_off;
+	if (!display->is_cont_splash_enabled) {
+		/*
+		 * For continuous splash usecase we skip panel
+		 * prepare since the pnael is already in
+		 * active state and panel on commands are not needed
+		 */
+		rc = dsi_panel_prepare(display->panel);
+		if (rc) {
+			pr_err("[%s] panel prepare failed, rc=%d\n",
+					display->name, rc);
+			goto error_ctrl_link_off;
+		}
 	}
-
 	goto error;
 
 error_ctrl_link_off:
@@ -4155,13 +4724,20 @@
 		struct dsi_rect *out_roi)
 {
 	const struct dsi_rect *bounds = &ctrl->ctrl->mode_bounds;
+	struct dsi_display_mode *cur_mode;
+	struct msm_roi_caps *roi_caps;
 	struct dsi_rect req_roi = { 0 };
 	int rc = 0;
 
-	if (req_rois->num_rects > display->panel->roi_caps.num_roi) {
+	cur_mode = display->panel->cur_mode;
+	if (!cur_mode)
+		return 0;
+
+	roi_caps = &cur_mode->priv_info->roi_caps;
+	if (req_rois->num_rects > roi_caps->num_roi) {
 		pr_err("request for %d rois greater than max %d\n",
 				req_rois->num_rects,
-				display->panel->roi_caps.num_roi);
+				roi_caps->num_roi);
 		rc = -EINVAL;
 		goto exit;
 	}
@@ -4198,13 +4774,20 @@
 static int dsi_display_set_roi(struct dsi_display *display,
 		struct msm_roi_list *rois)
 {
+	struct dsi_display_mode *cur_mode;
+	struct msm_roi_caps *roi_caps;
 	int rc = 0;
 	int i;
 
 	if (!display || !rois || !display->panel)
 		return -EINVAL;
 
-	if (!display->panel->roi_caps.enabled)
+	cur_mode = display->panel->cur_mode;
+	if (!cur_mode)
+		return 0;
+
+	roi_caps = &cur_mode->priv_info->roi_caps;
+	if (!roi_caps->enabled)
 		return 0;
 
 	for (i = 0; i < display->ctrl_count; i++) {
@@ -4259,6 +4842,46 @@
 	return rc;
 }
 
+int dsi_display_config_ctrl_for_cont_splash(struct dsi_display *display)
+{
+	int rc = 0;
+
+	if (!display || !display->panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	if (!display->panel->cur_mode) {
+		pr_err("no valid mode set for the display");
+		return -EINVAL;
+	}
+
+	if (!display->is_cont_splash_enabled)
+		return 0;
+
+	if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
+		rc = dsi_display_vid_engine_enable(display);
+		if (rc) {
+			pr_err("[%s]failed to enable DSI video engine, rc=%d\n",
+			       display->name, rc);
+			goto error_out;
+		}
+	} else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
+		rc = dsi_display_cmd_engine_enable(display);
+		if (rc) {
+			pr_err("[%s]failed to enable DSI cmd engine, rc=%d\n",
+			       display->name, rc);
+			goto error_out;
+		}
+	} else {
+		pr_err("[%s] Invalid configuration\n", display->name);
+		rc = -EINVAL;
+	}
+
+error_out:
+	return rc;
+}
+
 int dsi_display_enable(struct dsi_display *display)
 {
 	int rc = 0;
@@ -4274,6 +4897,25 @@
 		return -EINVAL;
 	}
 
+	/* Engine states and panel states are populated during splash
+	 * resource init and hence we return early
+	 */
+	if (display->is_cont_splash_enabled) {
+
+		dsi_display_config_ctrl_for_cont_splash(display);
+
+		rc = dsi_display_splash_res_cleanup(display);
+		if (rc) {
+			pr_err("Continuous splash res cleanup failed, rc=%d\n",
+				rc);
+			return -EINVAL;
+		}
+
+		display->panel->panel_initialized = true;
+		pr_debug("cont splash enabled, display enable not required\n");
+		return 0;
+	}
+
 	mutex_lock(&display->display_lock);
 
 	mode = display->panel->cur_mode;
@@ -4481,6 +5123,9 @@
 		pr_err("[%s] failed to disable Link clocks, rc=%d\n",
 		       display->name, rc);
 
+	/* Free up DSI ERROR event callback */
+	dsi_display_unregister_error_handler(display);
+
 	rc = dsi_display_ctrl_deinit(display);
 	if (rc)
 		pr_err("[%s] failed to deinit controller, rc=%d\n",
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index da4f5eb..886641b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation.All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -128,6 +128,7 @@
  * @display_type:     Display type as defined in device tree.
  * @list:             List pointer.
  * @is_active:        Is display active.
+ * @is_cont_splash_enabled:  Is continuous splash enabled
  * @display_lock:     Mutex for dsi_display interface.
  * @ctrl_count:       Number of DSI interfaces required by panel.
  * @ctrl:             Controller information for DSI display.
@@ -165,6 +166,7 @@
 	const char *display_type;
 	struct list_head list;
 	bool is_active;
+	bool is_cont_splash_enabled;
 	struct mutex display_lock;
 
 	u32 ctrl_count;
@@ -192,11 +194,15 @@
 	u32 cmd_buffer_size;
 	u32 cmd_buffer_iova;
 	void *vaddr;
+	struct msm_gem_address_space *aspace;
 
 	struct mipi_dsi_host host;
 	struct dsi_bridge    *bridge;
 	u32 cmd_engine_refcount;
 
+	struct sde_power_handle *phandle;
+	struct sde_power_client *cont_splash_client;
+
 	void *clk_mngr;
 	void *dsi_clk_handle;
 	void *mdp_clk_handle;
@@ -206,6 +212,11 @@
 
 	bool misr_enable;
 	u32 misr_frame_count;
+	/* multiple dsi error handlers */
+	struct workqueue_struct *err_workq;
+	struct work_struct fifo_underflow_work;
+	struct work_struct fifo_overflow_work;
+	struct work_struct lp_rx_timeout_work;
 };
 
 int dsi_display_dev_probe(struct platform_device *pdev);
@@ -359,6 +370,22 @@
 int dsi_display_prepare(struct dsi_display *display);
 
 /**
+ * dsi_display_splash_res_cleanup() - cleanup for continuous splash
+ * @display:    Pointer to dsi display
+ * Returns:     Zero on success
+ */
+int dsi_display_splash_res_cleanup(struct  dsi_display *display);
+
+/**
+ * dsi_display_config_ctrl_for_cont_splash()- Enable engine modes for DSI
+ *                                     controller during continuous splash
+ * @display: Handle to DSI display
+ *
+ * Return:        returns error code
+ */
+int dsi_display_config_ctrl_for_cont_splash(struct dsi_display *display);
+
+/**
  * dsi_display_enable() - enable display
  * @display:            Handle to display.
  *
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 280c754..91da637 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -65,7 +65,7 @@
 		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
 }
 
-static void convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
+void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
 				struct drm_display_mode *drm_mode)
 {
 	memset(drm_mode, 0, sizeof(*drm_mode));
@@ -129,6 +129,9 @@
 		return;
 	}
 
+	if (!c_bridge || !c_bridge->display)
+		pr_err("Incorrect bridge details\n");
+
 	/* By this point mode should have been validated through mode_fixup */
 	rc = dsi_display_set_mode(c_bridge->display,
 			&(c_bridge->dsi_mode), 0x0);
@@ -157,11 +160,16 @@
 	rc = dsi_display_enable(c_bridge->display);
 	if (rc) {
 		pr_err("[%d] DSI display enable failed, rc=%d\n",
-		       c_bridge->id, rc);
+				c_bridge->id, rc);
 		(void)dsi_display_unprepare(c_bridge->display);
 	}
 	SDE_ATRACE_END("dsi_display_enable");
 	SDE_ATRACE_END("dsi_bridge_pre_enable");
+
+	rc = dsi_display_splash_res_cleanup(c_bridge->display);
+	if (rc)
+		pr_err("Continuous splash pipeline cleanup failed, rc=%d\n",
+									rc);
 }
 
 static void dsi_bridge_enable(struct drm_bridge *bridge)
@@ -274,7 +282,7 @@
 
 	if (bridge->encoder && bridge->encoder->crtc) {
 
-		convert_to_dsi_mode(&bridge->encoder->crtc->mode,
+		convert_to_dsi_mode(&bridge->encoder->crtc->state->mode,
 							&cur_dsi_mode);
 		rc = dsi_display_validate_mode_vrr(c_bridge->display,
 					&cur_dsi_mode, &dsi_mode);
@@ -290,7 +298,7 @@
 			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
 	}
 
-	convert_to_drm_mode(&dsi_mode, adjusted_mode);
+	dsi_convert_to_drm_mode(&dsi_mode, adjusted_mode);
 
 	return true;
 }
@@ -330,6 +338,11 @@
 			sizeof(dsi_mode.priv_info->dsc));
 	}
 
+	if (dsi_mode.priv_info->roi_caps.enabled) {
+		memcpy(&mode_info->roi_caps, &dsi_mode.priv_info->roi_caps,
+			sizeof(dsi_mode.priv_info->roi_caps));
+	}
+
 	return 0;
 }
 
@@ -344,8 +357,7 @@
 };
 
 int dsi_conn_post_init(struct drm_connector *connector,
-		void *info,
-		void *display)
+		void *info, void *display, struct msm_mode_info *mode_info)
 {
 	struct dsi_display *dsi_display = display;
 	struct dsi_panel *panel;
@@ -444,23 +456,23 @@
 		break;
 	}
 
-	if (panel->roi_caps.enabled) {
+	if (mode_info && mode_info->roi_caps.enabled) {
 		sde_kms_info_add_keyint(info, "partial_update_num_roi",
-				panel->roi_caps.num_roi);
+				mode_info->roi_caps.num_roi);
 		sde_kms_info_add_keyint(info, "partial_update_xstart",
-				panel->roi_caps.align.xstart_pix_align);
+				mode_info->roi_caps.align.xstart_pix_align);
 		sde_kms_info_add_keyint(info, "partial_update_walign",
-				panel->roi_caps.align.width_pix_align);
+				mode_info->roi_caps.align.width_pix_align);
 		sde_kms_info_add_keyint(info, "partial_update_wmin",
-				panel->roi_caps.align.min_width);
+				mode_info->roi_caps.align.min_width);
 		sde_kms_info_add_keyint(info, "partial_update_ystart",
-				panel->roi_caps.align.ystart_pix_align);
+				mode_info->roi_caps.align.ystart_pix_align);
 		sde_kms_info_add_keyint(info, "partial_update_halign",
-				panel->roi_caps.align.height_pix_align);
+				mode_info->roi_caps.align.height_pix_align);
 		sde_kms_info_add_keyint(info, "partial_update_hmin",
-				panel->roi_caps.align.min_height);
+				mode_info->roi_caps.align.min_height);
 		sde_kms_info_add_keyint(info, "partial_update_roimerge",
-				panel->roi_caps.merge_rois);
+				mode_info->roi_caps.merge_rois);
 	}
 
 end:
@@ -554,7 +566,7 @@
 		struct drm_display_mode *m;
 
 		memset(&drm_mode, 0x0, sizeof(drm_mode));
-		convert_to_drm_mode(&modes[i], &drm_mode);
+		dsi_convert_to_drm_mode(&modes[i], &drm_mode);
 		m = drm_mode_duplicate(connector->dev, &drm_mode);
 		if (!m) {
 			pr_err("failed to add mode %ux%u\n",
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
index 828e65d..9a47969 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -37,11 +37,13 @@
  * @connector: Pointer to drm connector structure
  * @info: Pointer to sde connector info structure
  * @display: Pointer to private display handle
+ * @mode_info: Pointer to mode info structure
  * Returns: Zero on success
  */
 int dsi_conn_post_init(struct drm_connector *connector,
 		void *info,
-		void *display);
+		void *display,
+		struct msm_mode_info *mode_info);
 
 /**
  * dsi_conn_detect - callback to determine if connector is connected
@@ -128,4 +130,12 @@
  */
 int dsi_conn_post_kickoff(struct drm_connector *connector);
 
+/**
+ * dsi_convert_to_drm_mode - Update drm mode with dsi mode information
+ * @dsi_mode: input parameter. structure having dsi mode information.
+ * @drm_mode: output parameter. DRM mode set for the display
+ */
+void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
+				struct drm_display_mode *drm_mode);
+
 #endif /* _DSI_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 956db0e..d0cb51b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -493,7 +493,6 @@
 	}
 
 	for (i = 0; i < count; i++) {
-		/* TODO:  handle last command */
 		if (state == DSI_CMD_SET_STATE_LP)
 			cmds->msg.flags |= MIPI_DSI_MSG_USE_LPM;
 
@@ -746,6 +745,10 @@
 	if (rc)
 		pr_err("qcom,mdss-dsi-h-sync-skew is not defined, rc=%d\n", rc);
 
+	pr_debug("panel horz active:%d front_portch:%d back_porch:%d sync_skew:%d\n",
+		mode->h_active, mode->h_front_porch, mode->h_back_porch,
+		mode->h_sync_width);
+
 	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-height",
 				  &mode->v_active);
 	if (rc) {
@@ -777,6 +780,9 @@
 		       rc);
 		goto error;
 	}
+	pr_debug("panel vert active:%d front_portch:%d back_porch:%d pulse_width:%d\n",
+		mode->v_active, mode->v_front_porch, mode->v_back_porch,
+		mode->v_sync_width);
 
 error:
 	return rc;
@@ -2176,7 +2182,9 @@
 
 	intf_width = mode->timing.h_active;
 	if (intf_width % priv_info->dsc.slice_width) {
-		pr_err("invalid slice width for the panel\n");
+		pr_err("invalid slice width for the intf width:%d slice width:%d\n",
+			intf_width, priv_info->dsc.slice_width);
+		rc = -EINVAL;
 		goto error;
 	}
 
@@ -2395,21 +2403,37 @@
 	return rc;
 }
 
-static int dsi_panel_parse_partial_update_caps(struct dsi_panel *panel,
-					       struct device_node *of_node)
+static int dsi_panel_parse_partial_update_caps(struct dsi_display_mode *mode,
+				struct device_node *of_node)
 {
-	struct msm_roi_caps *roi_caps = &panel->roi_caps;
+	struct msm_roi_caps *roi_caps = NULL;
 	const char *data;
 	int rc = 0;
 
+	if (!mode || !mode->priv_info) {
+		pr_err("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	roi_caps = &mode->priv_info->roi_caps;
+
 	memset(roi_caps, 0, sizeof(*roi_caps));
 
 	data = of_get_property(of_node, "qcom,partial-update-enabled", NULL);
 	if (data) {
 		if (!strcmp(data, "dual_roi"))
 			roi_caps->num_roi = 2;
-		else
+		else if (!strcmp(data, "single_roi"))
 			roi_caps->num_roi = 1;
+		else {
+			pr_info(
+			"invalid value for qcom,partial-update-enabled: %s\n",
+			data);
+			return 0;
+		}
+	} else {
+		pr_info("partial update disabled as the property is not set\n");
+		return 0;
 	}
 
 	roi_caps->merge_rois = of_property_read_bool(of_node,
@@ -2422,7 +2446,7 @@
 
 	if (roi_caps->enabled)
 		rc = dsi_panel_parse_roi_alignment(of_node,
-				&panel->roi_caps.align);
+				&roi_caps->align);
 
 	if (rc)
 		memset(roi_caps, 0, sizeof(*roi_caps));
@@ -2739,10 +2763,6 @@
 	if (rc)
 		pr_err("failed to parse hdr config, rc=%d\n", rc);
 
-	rc = dsi_panel_parse_partial_update_caps(panel, of_node);
-	if (rc)
-		pr_debug("failed to partial update caps, rc=%d\n", rc);
-
 	rc = dsi_panel_get_mode_count(panel, of_node);
 	if (rc) {
 		pr_err("failed to get mode count, rc=%d\n", rc);
@@ -3051,6 +3071,10 @@
 			"failed to parse panel phy timings, rc=%d\n", rc);
 			goto parse_fail;
 		}
+
+		rc = dsi_panel_parse_partial_update_caps(mode, child_np);
+		if (rc)
+			pr_err("failed to partial update caps, rc=%d\n", rc);
 	}
 	goto done;
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index f63fd27..ea67f45 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -156,8 +156,6 @@
 	enum dsi_op_mode panel_mode;
 
 	struct dsi_dfps_capabilities dfps_caps;
-	struct msm_roi_caps roi_caps;
-
 	struct dsi_panel_phy_props phy_props;
 
 	struct dsi_display_mode *cur_mode;
@@ -193,6 +191,16 @@
 	return panel->panel_initialized;
 }
 
+static inline void dsi_panel_acquire_panel_lock(struct dsi_panel *panel)
+{
+	mutex_lock(&panel->panel_lock);
+}
+
+static inline void dsi_panel_release_panel_lock(struct dsi_panel *panel)
+{
+	mutex_unlock(&panel->panel_lock);
+}
+
 struct dsi_panel *dsi_panel_get(struct device *parent,
 				struct device_node *of_node,
 				int topology_override);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index a91dba8..197d448 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -33,6 +33,8 @@
 
 #define DSI_PHY_DEFAULT_LABEL "MDSS PHY CTRL"
 
+#define BITS_PER_BYTE	8
+
 struct dsi_phy_list_item {
 	struct msm_dsi_phy *phy;
 	struct list_head list;
@@ -290,6 +292,14 @@
 
 	/* Actual timing values are dependent on panel */
 	timing->count_per_lane = phy->ver_info->timing_cfg_count;
+
+	phy->allow_phy_power_off = of_property_read_bool(pdev->dev.of_node,
+			"qcom,panel-allow-phy-poweroff");
+
+	of_property_read_u32(pdev->dev.of_node,
+			"qcom,dsi-phy-regulator-min-datarate-bps",
+			&phy->regulator_min_datarate_bps);
+
 	return 0;
 err:
 	lane->count_per_lane = 0;
@@ -641,7 +651,8 @@
 			goto error;
 		}
 
-		if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_OFF) {
+		if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_OFF &&
+				dsi_phy->regulator_required) {
 			rc = dsi_pwr_enable_regulator(
 				&dsi_phy->pwr_info.phy_pwr, true);
 			if (rc) {
@@ -652,7 +663,8 @@
 			}
 		}
 	} else {
-		if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_OFF) {
+		if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_OFF &&
+				dsi_phy->regulator_required) {
 			rc = dsi_pwr_enable_regulator(
 				&dsi_phy->pwr_info.phy_pwr, false);
 			if (rc) {
@@ -787,6 +799,7 @@
  * @config:             DSI host configuration.
  * @pll_source:         Source PLL for PHY clock.
  * @skip_validation:    Validation will not be performed on parameters.
+ * @is_cont_splash_enabled:    check whether continuous splash enabled.
  *
  * Validates and enables DSI PHY.
  *
@@ -795,7 +808,8 @@
 int dsi_phy_enable(struct msm_dsi_phy *phy,
 		   struct dsi_host_config *config,
 		   enum dsi_phy_pll_source pll_source,
-		   bool skip_validation)
+		   bool skip_validation,
+		   bool is_cont_splash_enabled)
 {
 	int rc = 0;
 
@@ -829,7 +843,10 @@
 		goto error;
 	}
 
-	dsi_phy_enable_hw(phy);
+	if (!is_cont_splash_enabled) {
+		dsi_phy_enable_hw(phy);
+		pr_debug("cont splash not enabled, phy enable required\n");
+	}
 	phy->dsi_phy_state = DSI_PHY_ENGINE_ON;
 
 error:
@@ -838,6 +855,21 @@
 	return rc;
 }
 
+int dsi_phy_lane_reset(struct msm_dsi_phy *phy)
+{
+	int ret = 0;
+
+	if (!phy)
+		return ret;
+
+	mutex_lock(&phy->phy_lock);
+	if (phy->hw.ops.phy_lane_reset)
+		ret = phy->hw.ops.phy_lane_reset(&phy->hw);
+	mutex_unlock(&phy->phy_lock);
+
+	return ret;
+}
+
 /**
  * dsi_phy_disable() - disable DSI PHY hardware.
  * @phy:        DSI PHY handle.
@@ -876,6 +908,8 @@
 		return -EINVAL;
 	}
 
+	pr_debug("[%s] enable=%d\n", phy->name, enable);
+
 	mutex_lock(&phy->phy_lock);
 	if (enable) {
 		if (phy->hw.ops.phy_idle_on)
@@ -884,7 +918,17 @@
 		if (phy->hw.ops.regulator_enable)
 			phy->hw.ops.regulator_enable(&phy->hw,
 				&phy->cfg.regulators);
+
+		if (phy->hw.ops.enable)
+			phy->hw.ops.enable(&phy->hw, &phy->cfg);
+
+		phy->dsi_phy_state = DSI_PHY_ENGINE_ON;
 	} else {
+		phy->dsi_phy_state = DSI_PHY_ENGINE_OFF;
+
+		if (phy->hw.ops.disable)
+			phy->hw.ops.disable(&phy->hw, &phy->cfg);
+
 		if (phy->hw.ops.phy_idle_off)
 			phy->hw.ops.phy_idle_off(&phy->hw);
 	}
@@ -894,6 +938,33 @@
 }
 
 /**
+ * dsi_phy_set_clk_freq() - set DSI PHY clock frequency setting
+ * @phy:          DSI PHY handle
+ * @clk_freq:     link clock frequency
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_clk_freq(struct msm_dsi_phy *phy,
+		struct link_clk_freq *clk_freq)
+{
+	if (!phy || !clk_freq) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	phy->regulator_required = clk_freq->byte_clk_rate >
+		(phy->regulator_min_datarate_bps / BITS_PER_BYTE);
+
+	pr_debug("[%s] lane_datarate=%u min_datarate=%u required=%d\n",
+			phy->name,
+			clk_freq->byte_clk_rate * BITS_PER_BYTE,
+			phy->regulator_min_datarate_bps,
+			phy->regulator_required);
+
+	return 0;
+}
+
+/**
  * dsi_phy_set_timing_params() - timing parameters for the panel
  * @phy:          DSI PHY handle
  * @timing:       array holding timing params.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
index e721486..a158812 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
@@ -67,6 +67,9 @@
  * @mode:              Current mode.
  * @data_lanes:        Number of data lanes used.
  * @dst_format:        Destination format.
+ * @allow_phy_power_off: True if PHY is allowed to power off when idle
+ * @regulator_min_datarate_bps: Minimum per lane data rate to turn on regulator
+ * @regulator_required: True if phy regulator is required
  */
 struct msm_dsi_phy {
 	struct platform_device *pdev;
@@ -88,6 +91,10 @@
 	struct dsi_mode_info mode;
 	enum dsi_data_lanes data_lanes;
 	enum dsi_pixel_format dst_format;
+
+	bool allow_phy_power_off;
+	u32 regulator_min_datarate_bps;
+	bool regulator_required;
 };
 
 /**
@@ -159,6 +166,7 @@
  * @config:             DSI host configuration.
  * @pll_source:         Source PLL for PHY clock.
  * @skip_validation:    Validation will not be performed on parameters.
+ * @is_cont_splash_enabled:    check whether continuous splash enabled.
  *
  * Validates and enables DSI PHY.
  *
@@ -167,7 +175,8 @@
 int dsi_phy_enable(struct msm_dsi_phy *dsi_phy,
 		   struct dsi_host_config *config,
 		   enum dsi_phy_pll_source pll_source,
-		   bool skip_validation);
+		   bool skip_validation,
+		   bool is_cont_splash_enabled);
 
 /**
  * dsi_phy_disable() - disable DSI PHY hardware.
@@ -209,6 +218,16 @@
 int dsi_phy_idle_ctrl(struct msm_dsi_phy *phy, bool enable);
 
 /**
+ * dsi_phy_set_clk_freq() - set DSI PHY clock frequency setting
+ * @phy:          DSI PHY handle
+ * @clk_freq:     link clock frequency
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_clk_freq(struct msm_dsi_phy *phy,
+		struct link_clk_freq *clk_freq);
+
+/**
  * dsi_phy_set_timing_params() - timing parameters for the panel
  * @phy:          DSI PHY handle
  * @timing:       array holding timing params.
@@ -223,6 +242,14 @@
 			      u32 *timing, u32 size);
 
 /**
+ * dsi_phy_lane_reset() - Reset DSI PHY lanes in case of error
+ * @phy:	DSI PHY handle
+ *
+ * Return: error code.
+ */
+int dsi_phy_lane_reset(struct msm_dsi_phy *phy);
+
+/**
  * dsi_phy_drv_register() - register platform driver for dsi phy
  */
 void dsi_phy_drv_register(void);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
index 51c2f46..efebd99 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -233,6 +233,13 @@
 	int (*phy_timing_val)(struct dsi_phy_per_lane_cfgs *timing_val,
 				u32 *timing, u32 size);
 
+	/**
+	 * phy_lane_reset() - Reset dsi phy lanes in case of error.
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 * Return:    error code.
+	 */
+	int (*phy_lane_reset)(struct dsi_phy_hw *phy);
+
 	void *timing_ops;
 	struct phy_ulps_config_ops ulps_ops;
 };
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
index 371239d..8d91141 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
@@ -17,6 +17,7 @@
 #include <linux/iopoll.h>
 #include "dsi_hw.h"
 #include "dsi_phy_hw.h"
+#include "dsi_catalog.h"
 
 #define DSIPHY_CMN_CLK_CFG0						0x010
 #define DSIPHY_CMN_CLK_CFG1						0x014
@@ -373,6 +374,29 @@
 		 lanes);
 }
 
+int dsi_phy_hw_v3_0_lane_reset(struct dsi_phy_hw *phy)
+{
+	int ret = 0, loop = 10, u_dly = 200;
+	u32 ln_status = 0;
+
+	while ((ln_status != 0x1f) && loop) {
+		DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x1f);
+		wmb(); /* ensure register is committed */
+		loop--;
+		udelay(u_dly);
+		ln_status = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS1);
+		pr_debug("trial no: %d\n", loop);
+	}
+
+	if (!loop)
+		pr_debug("could not reset phy lanes\n");
+
+	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x0);
+	wmb(); /* ensure register is committed */
+
+	return ret;
+}
+
 void dsi_phy_hw_v3_0_ulps_exit(struct dsi_phy_hw *phy,
 			struct dsi_phy_cfg *cfg, u32 lanes)
 {
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 28f2e7c..95bdc36 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -16,6 +16,9 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/msm_drm_notify.h>
+#include <linux/notifier.h>
+
 #include "msm_drv.h"
 #include "msm_kms.h"
 #include "msm_gem.h"
@@ -30,6 +33,47 @@
 	struct kthread_work commit_work;
 };
 
+static BLOCKING_NOTIFIER_HEAD(msm_drm_notifier_list);
+
+/**
+ * msm_drm_register_client - register a client notifier
+ * @nb: notifier block to callback on events
+ *
+ * This function registers a notifier callback function
+ * to msm_drm_notifier_list, which would be called when
+ * received unblank/power down event.
+ */
+int msm_drm_register_client(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&msm_drm_notifier_list,
+						nb);
+}
+
+/**
+ * msm_drm_unregister_client - unregister a client notifier
+ * @nb: notifier block to callback on events
+ *
+ * This function unregisters the callback function from
+ * msm_drm_notifier_list.
+ */
+int msm_drm_unregister_client(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&msm_drm_notifier_list,
+						  nb);
+}
+
+/**
+ * msm_drm_notifier_call_chain - notify clients of drm_events
+ * @val: event MSM_DRM_EARLY_EVENT_BLANK or MSM_DRM_EVENT_BLANK
+ * @v: notifier data, inculde display id and display blank
+ *     event(unblank or power down).
+ */
+static int msm_drm_notifier_call_chain(unsigned long val, void *v)
+{
+	return blocking_notifier_call_chain(&msm_drm_notifier_list, val,
+					    v);
+}
+
 /* block until specified crtcs are no longer pending update, and
  * atomically mark them as pending update
  */
@@ -97,7 +141,8 @@
 	struct drm_connector_state *old_conn_state;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *old_crtc_state;
-	int i;
+	struct msm_drm_notifier notifier_data;
+	int i, blank;
 
 	SDE_ATRACE_BEGIN("msm_disable");
 	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
@@ -144,6 +189,11 @@
 		DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
 				 encoder->base.id, encoder->name);
 
+		blank = MSM_DRM_BLANK_POWERDOWN;
+		notifier_data.data = &blank;
+		notifier_data.id = crtc_idx;
+		msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
+					     &notifier_data);
 		/*
 		 * Each encoder has at most one connector (since we always steal
 		 * it away), so we won't call disable hooks twice.
@@ -159,6 +209,8 @@
 			funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
 
 		drm_bridge_post_disable(encoder->bridge);
+		msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
+					    &notifier_data);
 	}
 
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -296,10 +348,11 @@
 	struct drm_crtc_state *old_crtc_state;
 	struct drm_connector *connector;
 	struct drm_connector_state *old_conn_state;
+	struct msm_drm_notifier notifier_data;
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	int bridge_enable_count = 0;
-	int i;
+	int i, blank;
 
 	SDE_ATRACE_BEGIN("msm_enable");
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -350,6 +403,12 @@
 		DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
 				 encoder->base.id, encoder->name);
 
+		blank = MSM_DRM_BLANK_UNBLANK;
+		notifier_data.data = &blank;
+		notifier_data.id =
+			connector->state->crtc->index;
+		msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
+					    &notifier_data);
 		/*
 		 * Each encoder has at most one connector (since we always steal
 		 * it away), so we won't call enable hooks twice.
@@ -391,6 +450,8 @@
 				 encoder->base.id, encoder->name);
 
 		drm_bridge_enable(encoder->bridge);
+		msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
+					    &notifier_data);
 	}
 	SDE_ATRACE_END("msm_enable");
 }
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c0f796c..33778f8e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -215,12 +215,16 @@
 	struct msm_kms *kms = priv->kms;
 	struct vblank_event *vbl_ev, *tmp;
 	unsigned long flags;
+	LIST_HEAD(tmp_head);
 
 	spin_lock_irqsave(&vbl_ctrl->lock, flags);
 	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
 		list_del(&vbl_ev->node);
-		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+		list_add_tail(&vbl_ev->node, &tmp_head);
+	}
+	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 
+	list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) {
 		if (vbl_ev->enable)
 			kms->funcs->enable_vblank(kms,
 						priv->crtcs[vbl_ev->crtc_id]);
@@ -229,11 +233,7 @@
 						priv->crtcs[vbl_ev->crtc_id]);
 
 		kfree(vbl_ev);
-
-		spin_lock_irqsave(&vbl_ctrl->lock, flags);
 	}
-
-	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 }
 
 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
@@ -691,6 +691,14 @@
 
 	drm_mode_config_reset(ddev);
 
+	if (kms && kms->funcs && kms->funcs->cont_splash_config) {
+		ret = kms->funcs->cont_splash_config(kms);
+		if (ret) {
+			dev_err(dev, "kms cont_splash config failed.\n");
+			goto fail;
+		}
+	}
+
 #ifdef CONFIG_DRM_FBDEV_EMULATION
 	if (fbdev)
 		priv->fbdev = msm_fbdev_init(ddev);
@@ -1365,7 +1373,7 @@
 		if (node->event.type != event->type ||
 			obj->id != node->info.object_id)
 			continue;
-		len = event->length + sizeof(struct drm_msm_event_resp);
+		len = event->length + sizeof(struct msm_drm_event);
 		if (node->base.file_priv->event_space < len) {
 			DRM_ERROR("Insufficient space %d for event %x len %d\n",
 				node->base.file_priv->event_space, event->type,
@@ -1379,7 +1387,8 @@
 		notify->base.event = &notify->event;
 		notify->base.pid = node->base.pid;
 		notify->event.type = node->event.type;
-		notify->event.length = len;
+		notify->event.length = event->length +
+					sizeof(struct drm_msm_event_resp);
 		memcpy(&notify->info, &node->info, sizeof(notify->info));
 		memcpy(notify->data, payload, event->length);
 		ret = drm_event_reserve_init_locked(dev, node->base.file_priv,
@@ -1766,6 +1775,14 @@
 		struct device_node *np = dev->of_node;
 		unsigned int i;
 
+		for (i = 0; ; i++) {
+			node = of_parse_phandle(np, "connectors", i);
+			if (!node)
+				break;
+
+			component_match_add(dev, matchptr, compare_of, node);
+		}
+
 		for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
 			node = dsi_display_get_boot_display(i);
 
@@ -1777,13 +1794,6 @@
 			}
 		}
 
-		for (i = 0; ; i++) {
-			node = of_parse_phandle(np, "connectors", i);
-			if (!node)
-				break;
-
-			component_match_add(dev, matchptr, compare_of, node);
-		}
 		return 0;
 	}
 
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 8554574..5bb474d 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -162,8 +162,11 @@
 enum msm_mdp_conn_property {
 	/* blob properties, always put these first */
 	CONNECTOR_PROP_SDE_INFO,
+	CONNECTOR_PROP_MODE_INFO,
 	CONNECTOR_PROP_HDR_INFO,
+	CONNECTOR_PROP_EXT_HDR_INFO,
 	CONNECTOR_PROP_PP_DITHER,
+	CONNECTOR_PROP_HDR_METADATA,
 
 	/* # of blob properties */
 	CONNECTOR_PROP_BLOBCOUNT,
@@ -229,11 +232,13 @@
  * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
  * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
  * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
+ * @MSM_ENC_ACTIVE_REGION - wait for the TG to be in active pixel region
  */
 enum msm_event_wait {
 	MSM_ENC_COMMIT_DONE = 0,
 	MSM_ENC_TX_COMPLETE,
 	MSM_ENC_VBLANK,
+	MSM_ENC_ACTIVE_REGION,
 };
 
 /**
@@ -409,6 +414,7 @@
  * @clk_rate:	     DSI bit clock per lane in HZ.
  * @topology:        supported topology for the mode
  * @comp_info:       compression info supported
+ * @roi_caps:        panel roi capabilities
  */
 struct msm_mode_info {
 	uint32_t frame_rate;
@@ -419,6 +425,7 @@
 	uint64_t clk_rate;
 	struct msm_display_topology topology;
 	struct msm_compression_info comp_info;
+	struct msm_roi_caps roi_caps;
 };
 
 /**
@@ -480,6 +487,7 @@
  */
 struct msm_display_kickoff_params {
 	struct msm_roi_list *rois;
+	struct drm_msm_ext_hdr_metadata *hdr_meta;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index f5cdf64..e8bf244 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -42,15 +42,31 @@
 		struct drm_file *file_priv,
 		unsigned int *handle)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+	struct msm_framebuffer *msm_fb;
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return -EINVAL;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+
 	return drm_gem_handle_create(file_priv,
 			msm_fb->planes[0], handle);
 }
 
 static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-	int i, n = drm_format_num_planes(fb->pixel_format);
+	struct msm_framebuffer *msm_fb;
+	int i, n;
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = drm_format_num_planes(fb->pixel_format);
 
 	DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
 
@@ -73,9 +89,16 @@
 #ifdef CONFIG_DEBUG_FS
 void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-	int i, n = drm_format_num_planes(fb->pixel_format);
+	struct msm_framebuffer *msm_fb;
+	int i, n;
 
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = drm_format_num_planes(fb->pixel_format);
 	seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
 			fb->width, fb->height, (char *)&fb->pixel_format,
 			drm_framebuffer_read_refcount(fb), fb->base.id);
@@ -90,8 +113,14 @@
 
 void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+	struct msm_framebuffer *msm_fb;
 
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
 	if (enable)
 		msm_fb->flags |= MSM_FRAMEBUFFER_FLAG_KMAP;
 	else
@@ -100,10 +129,17 @@
 
 static int msm_framebuffer_kmap(struct drm_framebuffer *fb)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-	int i, n = drm_format_num_planes(fb->pixel_format);
+	struct msm_framebuffer *msm_fb;
+	int i, n;
 	struct drm_gem_object *bo;
 
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return -EINVAL;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = drm_format_num_planes(fb->pixel_format);
 	if (atomic_inc_return(&msm_fb->kmap_count) > 1)
 		return 0;
 
@@ -124,10 +160,17 @@
 
 static void msm_framebuffer_kunmap(struct drm_framebuffer *fb)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-	int i, n = drm_format_num_planes(fb->pixel_format);
+	struct msm_framebuffer *msm_fb;
+	int i, n;
 	struct drm_gem_object *bo;
 
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = drm_format_num_planes(fb->pixel_format);
 	if (atomic_dec_return(&msm_fb->kmap_count) > 0)
 		return;
 
@@ -151,10 +194,17 @@
 int msm_framebuffer_prepare(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-	int ret, i, n = drm_format_num_planes(fb->pixel_format);
+	struct msm_framebuffer *msm_fb;
+	int ret, i, n;
 	uint32_t iova;
 
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return -EINVAL;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = drm_format_num_planes(fb->pixel_format);
 	for (i = 0; i < n; i++) {
 		ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
 		DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
@@ -171,8 +221,16 @@
 void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-	int i, n = drm_format_num_planes(fb->pixel_format);
+	struct msm_framebuffer *msm_fb;
+	int i, n;
+
+	if (fb == NULL) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = drm_format_num_planes(fb->pixel_format);
 
 	if (msm_fb->flags & MSM_FRAMEBUFFER_FLAG_KMAP)
 		msm_framebuffer_kunmap(fb);
@@ -184,7 +242,14 @@
 uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace, int plane)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+	struct msm_framebuffer *msm_fb;
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return -EINVAL;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
 	if (!msm_fb->planes[plane])
 		return 0;
 	return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
@@ -193,9 +258,15 @@
 uint32_t msm_framebuffer_phys(struct drm_framebuffer *fb,
 		int plane)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+	struct msm_framebuffer *msm_fb;
 	dma_addr_t phys_addr;
 
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return -EINVAL;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
 	if (!msm_fb->planes[plane])
 		return 0;
 
@@ -208,7 +279,14 @@
 
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+	struct msm_framebuffer *msm_fb;
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return ERR_PTR(-EINVAL);
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
 	return msm_fb->planes[plane];
 }
 
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 304faa6..db9e7ee 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -104,6 +104,8 @@
 	struct msm_gem_address_space *(*get_address_space)(
 			struct msm_kms *kms,
 			unsigned int domain);
+	/* handle continuous splash  */
+	int (*cont_splash_config)(struct msm_kms *kms);
 };
 
 struct msm_kms {
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 92d1865..7c879651 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -33,6 +33,10 @@
 #define SZ_4G	(((size_t) SZ_1G) * 4)
 #endif
 
+#ifndef SZ_2G
+#define SZ_2G	(((size_t) SZ_1G) * 2)
+#endif
+
 struct msm_smmu_client {
 	struct device *dev;
 	struct dma_iommu_mapping *mmu_mapping;
@@ -300,26 +304,26 @@
 static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
 	[MSM_SMMU_DOMAIN_UNSECURE] = {
 		.label = "mdp_ns",
-		.va_start = SZ_128K,
-		.va_size = SZ_4G - SZ_128K,
+		.va_start = SZ_2G,
+		.va_size = SZ_4G - SZ_2G,
 		.secure = false,
 	},
 	[MSM_SMMU_DOMAIN_SECURE] = {
 		.label = "mdp_s",
-		.va_start = SZ_128K,
-		.va_size = SZ_4G - SZ_128K,
+		.va_start = SZ_2G,
+		.va_size = SZ_4G - SZ_2G,
 		.secure = true,
 	},
 	[MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
 		.label = "rot_ns",
-		.va_start = SZ_128K,
-		.va_size = SZ_4G - SZ_128K,
+		.va_start = SZ_2G,
+		.va_size = SZ_4G - SZ_2G,
 		.secure = false,
 	},
 	[MSM_SMMU_DOMAIN_NRT_SECURE] = {
 		.label = "rot_s",
-		.va_start = SZ_128K,
-		.va_size = SZ_4G - SZ_128K,
+		.va_start = SZ_2G,
+		.va_size = SZ_4G - SZ_2G,
 		.secure = true,
 	},
 };
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 2c5b7ea..c2419dc 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -1016,8 +1016,10 @@
 		}
 	}
 
-	if (!found)
+	if (!found) {
+		ret = -ENOENT;
 		goto exit;
+	}
 
 	/**
 	 * sde_crtc is virtual ensure that hardware has been attached to the
@@ -1028,7 +1030,7 @@
 	    sde_crtc->num_mixers > ARRAY_SIZE(sde_crtc->mixers)) {
 		DRM_ERROR("Invalid mixer config act cnt %d max cnt %ld\n",
 			sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
-		ret = -EINVAL;
+		ret = -EPERM;
 		goto exit;
 	}
 
@@ -1896,7 +1898,7 @@
 
 	if (!hw_dspp) {
 		DRM_ERROR("invalid dspp\n");
-		ret = -EINVAL;
+		ret = -EPERM;
 		goto exit;
 	}
 
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 88dd60da..c99fb0c 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -16,10 +16,12 @@
 
 #include "sde_kms.h"
 #include "sde_connector.h"
+#include "sde_encoder.h"
 #include <linux/backlight.h>
 #include "dsi_drm.h"
 #include "dsi_display.h"
 #include "sde_crtc.h"
+#include "sde_rm.h"
 
 #define BL_NODE_NAME_SIZE 32
 
@@ -152,7 +154,7 @@
 {
 	struct sde_connector *c_conn;
 	unsigned long irq_flags;
-	void (*cb_func)(uint32_t event_idx,
+	int (*cb_func)(uint32_t event_idx,
 			uint32_t instance_idx, void *usr,
 			uint32_t data0, uint32_t data1,
 			uint32_t data2, uint32_t data3);
@@ -175,7 +177,7 @@
 	spin_unlock_irqrestore(&c_conn->event_lock, irq_flags);
 
 	if (cb_func)
-		cb_func(event_idx, instance_idx, usr,
+		rc = cb_func(event_idx, instance_idx, usr,
 			data0, data1, data2, data3);
 	else
 		rc = -EAGAIN;
@@ -185,7 +187,7 @@
 
 int sde_connector_register_event(struct drm_connector *connector,
 		uint32_t event_idx,
-		void (*cb_func)(uint32_t event_idx,
+		int (*cb_func)(uint32_t event_idx,
 			uint32_t instance_idx, void *usr,
 			uint32_t data0, uint32_t data1,
 			uint32_t data2, uint32_t data3),
@@ -345,6 +347,39 @@
 	return 0;
 }
 
+int sde_connector_get_mode_info(struct drm_connector_state *conn_state,
+	struct msm_mode_info *mode_info)
+{
+	struct sde_connector_state *sde_conn_state = NULL;
+
+	if (!conn_state || !mode_info) {
+		SDE_ERROR("Invalid arguments\n");
+		return -EINVAL;
+	}
+
+	sde_conn_state = to_sde_connector_state(conn_state);
+	memcpy(mode_info, &sde_conn_state->mode_info,
+		sizeof(sde_conn_state->mode_info));
+
+	return 0;
+}
+
+static int sde_connector_handle_disp_recovery(uint32_t event_idx,
+			uint32_t instance_idx, void *usr,
+			uint32_t data0, uint32_t data1,
+			uint32_t data2, uint32_t data3)
+{
+	struct sde_connector *c_conn = usr;
+	int rc = 0;
+
+	if (!c_conn)
+		return -EINVAL;
+
+	rc = sde_kms_handle_recovery(c_conn->encoder);
+
+	return rc;
+}
+
 int sde_connector_get_info(struct drm_connector *connector,
 		struct msm_display_info *info)
 {
@@ -441,6 +476,54 @@
 	return rc;
 }
 
+static int _sde_connector_update_bl_scale(struct sde_connector *c_conn, int idx)
+{
+	struct drm_connector conn;
+	struct dsi_display *dsi_display;
+	struct dsi_backlight_config *bl_config;
+	uint64_t value;
+	int rc = 0;
+
+	if (!c_conn) {
+		SDE_ERROR("Invalid params sde_connector null\n");
+		return -EINVAL;
+	}
+
+	conn = c_conn->base;
+	dsi_display = c_conn->display;
+	if (!dsi_display || !dsi_display->panel) {
+		SDE_ERROR("Invalid params(s) dsi_display %pK, panel %pK\n",
+			dsi_display,
+			((dsi_display) ? dsi_display->panel : NULL));
+		return -EINVAL;
+	}
+
+	bl_config = &dsi_display->panel->bl_config;
+	value = sde_connector_get_property(conn.state, idx);
+
+	if (idx == CONNECTOR_PROP_BL_SCALE) {
+		if (value > MAX_BL_SCALE_LEVEL)
+			bl_config->bl_scale = MAX_BL_SCALE_LEVEL;
+		else
+			bl_config->bl_scale = (u32)value;
+	} else if (idx == CONNECTOR_PROP_AD_BL_SCALE) {
+		if (value > MAX_AD_BL_SCALE_LEVEL)
+			bl_config->bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+		else
+			bl_config->bl_scale_ad = (u32)value;
+	} else {
+		SDE_DEBUG("invalid idx %d\n", idx);
+		return 0;
+	}
+
+	SDE_DEBUG("bl_scale = %u, bl_scale_ad = %u, bl_level = %u\n",
+		bl_config->bl_scale, bl_config->bl_scale_ad,
+		bl_config->bl_level);
+	rc = c_conn->ops.set_backlight(dsi_display, bl_config->bl_level);
+
+	return rc;
+}
+
 int sde_connector_pre_kickoff(struct drm_connector *connector)
 {
 	struct sde_connector *c_conn;
@@ -471,6 +554,10 @@
 			_sde_connector_update_power_locked(c_conn);
 			mutex_unlock(&c_conn->lock);
 			break;
+		case CONNECTOR_PROP_BL_SCALE:
+		case CONNECTOR_PROP_AD_BL_SCALE:
+			_sde_connector_update_bl_scale(c_conn, idx);
+			break;
 		default:
 			/* nothing to do for most properties */
 			break;
@@ -481,6 +568,7 @@
 		return 0;
 
 	params.rois = &c_state->rois;
+	params.hdr_meta = &c_state->hdr_meta;
 
 	SDE_EVT32_VERBOSE(connector->base.id);
 
@@ -531,6 +619,8 @@
 		drm_property_unreference_blob(c_conn->blob_hdr);
 	if (c_conn->blob_dither)
 		drm_property_unreference_blob(c_conn->blob_dither);
+	if (c_conn->blob_mode_info)
+		drm_property_unreference_blob(c_conn->blob_mode_info);
 	msm_property_destroy(&c_conn->property_info);
 
 	if (c_conn->bl_device)
@@ -671,44 +761,81 @@
 	return &c_state->base;
 }
 
-static int _sde_connector_roi_v1_check_roi(
-		struct sde_connector *c_conn,
-		struct drm_clip_rect *roi_conn,
-		const struct msm_roi_caps *caps)
+int sde_connector_roi_v1_check_roi(struct drm_connector_state *conn_state)
 {
-	const struct msm_roi_alignment *align = &caps->align;
-	int w = roi_conn->x2 - roi_conn->x1;
-	int h = roi_conn->y2 - roi_conn->y1;
+	const struct msm_roi_alignment *align = NULL;
+	struct sde_connector *c_conn = NULL;
+	struct msm_mode_info mode_info;
+	struct sde_connector_state *c_state;
+	int i, w, h;
 
-	if (w <= 0 || h <= 0) {
-		SDE_ERROR_CONN(c_conn, "invalid conn roi w %d h %d\n", w, h);
+	if (!conn_state)
 		return -EINVAL;
-	}
 
-	if (w < align->min_width || w % align->width_pix_align) {
-		SDE_ERROR_CONN(c_conn,
-				"invalid conn roi width %d min %d align %d\n",
-				w, align->min_width, align->width_pix_align);
-		return -EINVAL;
-	}
+	memset(&mode_info, 0, sizeof(mode_info));
 
-	if (h < align->min_height || h % align->height_pix_align) {
-		SDE_ERROR_CONN(c_conn,
-				"invalid conn roi height %d min %d align %d\n",
-				h, align->min_height, align->height_pix_align);
-		return -EINVAL;
-	}
+	c_state = to_sde_connector_state(conn_state);
+	c_conn = to_sde_connector(conn_state->connector);
 
-	if (roi_conn->x1 % align->xstart_pix_align) {
-		SDE_ERROR_CONN(c_conn, "invalid conn roi x1 %d align %d\n",
-				roi_conn->x1, align->xstart_pix_align);
-		return -EINVAL;
-	}
+	memcpy(&mode_info, &c_state->mode_info, sizeof(c_state->mode_info));
 
-	if (roi_conn->y1 % align->ystart_pix_align) {
-		SDE_ERROR_CONN(c_conn, "invalid conn roi y1 %d align %d\n",
-				roi_conn->y1, align->ystart_pix_align);
-		return -EINVAL;
+	if (!mode_info.roi_caps.enabled)
+		return 0;
+
+	if (c_state->rois.num_rects > mode_info.roi_caps.num_roi) {
+		SDE_ERROR_CONN(c_conn, "too many rects specified: %d > %d\n",
+				c_state->rois.num_rects,
+				mode_info.roi_caps.num_roi);
+		return -E2BIG;
+	};
+
+	align = &mode_info.roi_caps.align;
+	for (i = 0; i < c_state->rois.num_rects; ++i) {
+		struct drm_clip_rect *roi_conn;
+
+		roi_conn = &c_state->rois.roi[i];
+		w = roi_conn->x2 - roi_conn->x1;
+		h = roi_conn->y2 - roi_conn->y1;
+
+		SDE_EVT32_VERBOSE(DRMID(&c_conn->base),
+				roi_conn->x1, roi_conn->y1,
+				roi_conn->x2, roi_conn->y2);
+
+		if (w <= 0 || h <= 0) {
+			SDE_ERROR_CONN(c_conn, "invalid conn roi w %d h %d\n",
+					w, h);
+			return -EINVAL;
+		}
+
+		if (w < align->min_width || w % align->width_pix_align) {
+			SDE_ERROR_CONN(c_conn,
+					"invalid conn roi width %d min %d align %d\n",
+					w, align->min_width,
+					align->width_pix_align);
+			return -EINVAL;
+		}
+
+		if (h < align->min_height || h % align->height_pix_align) {
+			SDE_ERROR_CONN(c_conn,
+					"invalid conn roi height %d min %d align %d\n",
+					h, align->min_height,
+					align->height_pix_align);
+			return -EINVAL;
+		}
+
+		if (roi_conn->x1 % align->xstart_pix_align) {
+			SDE_ERROR_CONN(c_conn,
+					"invalid conn roi x1 %d align %d\n",
+					roi_conn->x1, align->xstart_pix_align);
+			return -EINVAL;
+		}
+
+		if (roi_conn->y1 % align->ystart_pix_align) {
+			SDE_ERROR_CONN(c_conn,
+					"invalid conn roi y1 %d align %d\n",
+					roi_conn->y1, align->ystart_pix_align);
+			return -EINVAL;
+		}
 	}
 
 	return 0;
@@ -720,27 +847,13 @@
 		void *usr_ptr)
 {
 	struct sde_drm_roi_v1 roi_v1;
-	struct msm_display_info display_info;
-	struct msm_roi_caps *caps;
-	int i, rc;
+	int i;
 
 	if (!c_conn || !c_state) {
 		SDE_ERROR("invalid args\n");
 		return -EINVAL;
 	}
 
-	rc = sde_connector_get_info(&c_conn->base, &display_info);
-	if (rc) {
-		SDE_ERROR_CONN(c_conn, "display get info error: %d\n", rc);
-		return rc;
-	}
-
-	caps = &display_info.roi_caps;
-	if (!caps->enabled) {
-		SDE_ERROR_CONN(c_conn, "display roi capability is disabled\n");
-		return -ENOTSUPP;
-	}
-
 	memset(&c_state->rois, 0, sizeof(c_state->rois));
 
 	if (!usr_ptr) {
@@ -760,22 +873,14 @@
 		return 0;
 	}
 
-	if (roi_v1.num_rects > SDE_MAX_ROI_V1 ||
-			roi_v1.num_rects > caps->num_roi) {
-		SDE_ERROR_CONN(c_conn, "too many rects specified: %d\n",
+	if (roi_v1.num_rects > SDE_MAX_ROI_V1) {
+		SDE_ERROR_CONN(c_conn, "num roi rects more than supported: %d",
 				roi_v1.num_rects);
 		return -EINVAL;
 	}
 
 	c_state->rois.num_rects = roi_v1.num_rects;
 	for (i = 0; i < roi_v1.num_rects; ++i) {
-		int rc;
-
-		rc = _sde_connector_roi_v1_check_roi(c_conn, &roi_v1.roi[i],
-				caps);
-		if (rc)
-			return rc;
-
 		c_state->rois.roi[i] = roi_v1.roi[i];
 		SDE_DEBUG_CONN(c_conn, "roi%d: roi (%d,%d) (%d,%d)\n", i,
 				c_state->rois.roi[i].x1,
@@ -787,40 +892,62 @@
 	return 0;
 }
 
-static int _sde_connector_update_bl_scale(struct sde_connector *c_conn,
-		int idx,
-		uint64_t value)
+static int _sde_connector_set_ext_hdr_info(
+	struct sde_connector *c_conn,
+	struct sde_connector_state *c_state,
+	void *usr_ptr)
 {
-	struct dsi_display *dsi_display = c_conn->display;
-	struct dsi_backlight_config *bl_config;
-	int rc = 0;
+	struct drm_connector *connector;
+	struct drm_msm_ext_hdr_metadata *hdr_meta;
+	int i;
 
-	if (!dsi_display || !dsi_display->panel) {
-		pr_err("Invalid params(s) dsi_display %pK, panel %pK\n",
-			dsi_display,
-			((dsi_display) ? dsi_display->panel : NULL));
+	if (!c_conn || !c_state) {
+		SDE_ERROR_CONN(c_conn, "invalid args\n");
 		return -EINVAL;
 	}
 
-	bl_config = &dsi_display->panel->bl_config;
-	if (idx == CONNECTOR_PROP_BL_SCALE) {
-		bl_config->bl_scale = value;
-		if (value > MAX_BL_SCALE_LEVEL)
-			bl_config->bl_scale = MAX_BL_SCALE_LEVEL;
-		SDE_DEBUG("set to panel: bl_scale = %u, bl_level = %u\n",
-			bl_config->bl_scale, bl_config->bl_level);
-		rc = c_conn->ops.set_backlight(dsi_display,
-					       bl_config->bl_level);
-	} else if (idx == CONNECTOR_PROP_AD_BL_SCALE) {
-		bl_config->bl_scale_ad = value;
-		if (value > MAX_AD_BL_SCALE_LEVEL)
-			bl_config->bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
-		SDE_DEBUG("set to panel: bl_scale_ad = %u, bl_level = %u\n",
-			bl_config->bl_scale_ad, bl_config->bl_level);
-		rc = c_conn->ops.set_backlight(dsi_display,
-					       bl_config->bl_level);
+	connector = &c_conn->base;
+
+	if (!connector->hdr_supported) {
+		SDE_ERROR_CONN(c_conn, "sink doesn't support HDR\n");
+		return -ENOTSUPP;
 	}
-	return rc;
+
+	memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta));
+
+	if (!usr_ptr) {
+		SDE_DEBUG_CONN(c_conn, "hdr metadata cleared\n");
+		return 0;
+	}
+
+	if (copy_from_user(&c_state->hdr_meta,
+		(void __user *)usr_ptr,
+			sizeof(*hdr_meta))) {
+		SDE_ERROR_CONN(c_conn, "failed to copy hdr metadata\n");
+		return -EFAULT;
+	}
+
+	hdr_meta = &c_state->hdr_meta;
+
+	SDE_DEBUG_CONN(c_conn, "hdr_state %d\n", hdr_meta->hdr_state);
+	SDE_DEBUG_CONN(c_conn, "hdr_supported %d\n", hdr_meta->hdr_supported);
+	SDE_DEBUG_CONN(c_conn, "eotf %d\n", hdr_meta->eotf);
+	SDE_DEBUG_CONN(c_conn, "white_point_x %d\n", hdr_meta->white_point_x);
+	SDE_DEBUG_CONN(c_conn, "white_point_y %d\n", hdr_meta->white_point_y);
+	SDE_DEBUG_CONN(c_conn, "max_luminance %d\n", hdr_meta->max_luminance);
+	SDE_DEBUG_CONN(c_conn, "max_content_light_level %d\n",
+				hdr_meta->max_content_light_level);
+	SDE_DEBUG_CONN(c_conn, "max_average_light_level %d\n",
+				hdr_meta->max_average_light_level);
+
+	for (i = 0; i < HDR_PRIMARIES_COUNT; i++) {
+		SDE_DEBUG_CONN(c_conn, "display_primaries_x [%d]\n",
+				   hdr_meta->display_primaries_x[i]);
+		SDE_DEBUG_CONN(c_conn, "display_primaries_y [%d]\n",
+				   hdr_meta->display_primaries_y[i]);
+	}
+
+	return 0;
 }
 
 static int sde_connector_atomic_set_property(struct drm_connector *connector,
@@ -831,6 +958,7 @@
 	struct sde_connector *c_conn;
 	struct sde_connector_state *c_state;
 	int idx, rc;
+	uint64_t fence_fd;
 
 	if (!connector || !state || !property) {
 		SDE_ERROR("invalid argument(s), conn %pK, state %pK, prp %pK\n",
@@ -869,18 +997,37 @@
 					c_conn->fb_kmap);
 		}
 		break;
-	case CONNECTOR_PROP_BL_SCALE:
-	case CONNECTOR_PROP_AD_BL_SCALE:
-		rc = _sde_connector_update_bl_scale(c_conn, idx, val);
+	case CONNECTOR_PROP_RETIRE_FENCE:
+		rc = sde_fence_create(&c_conn->retire_fence, &fence_fd, 0);
+		if (rc) {
+			SDE_ERROR("fence create failed rc:%d\n", rc);
+			goto end;
+		}
+
+		rc = copy_to_user((uint64_t __user *)val, &fence_fd,
+			sizeof(uint64_t));
+		if (rc) {
+			SDE_ERROR("copy to user failed rc:%d\n", rc);
+			/* fence will be released with timeline update */
+			put_unused_fd(fence_fd);
+			rc = -EFAULT;
+			goto end;
+		}
+		break;
+	case CONNECTOR_PROP_ROI_V1:
+		rc = _sde_connector_set_roi_v1(c_conn, c_state, (void *)val);
+		if (rc)
+			SDE_ERROR_CONN(c_conn, "invalid roi_v1, rc: %d\n", rc);
 		break;
 	default:
 		break;
 	}
 
-	if (idx == CONNECTOR_PROP_ROI_V1) {
-		rc = _sde_connector_set_roi_v1(c_conn, c_state, (void *)val);
+	if (idx == CONNECTOR_PROP_HDR_METADATA) {
+		rc = _sde_connector_set_ext_hdr_info(c_conn,
+			c_state, (void *)val);
 		if (rc)
-			SDE_ERROR_CONN(c_conn, "invalid roi_v1, rc: %d\n", rc);
+			SDE_ERROR_CONN(c_conn, "cannot set hdr info %d\n", rc);
 	}
 
 	/* check for custom property handling */
@@ -948,6 +1095,19 @@
 	return rc;
 }
 
+void sde_conn_timeline_status(struct drm_connector *conn)
+{
+	struct sde_connector *c_conn;
+
+	if (!conn) {
+		SDE_ERROR("invalid connector\n");
+		return;
+	}
+
+	c_conn = to_sde_connector(conn);
+	sde_fence_timeline_status(&c_conn->retire_fence, &conn->base);
+}
+
 void sde_connector_prepare_fence(struct drm_connector *connector)
 {
 	if (!connector) {
@@ -981,6 +1141,29 @@
 	sde_fence_signal(&to_sde_connector(connector)->retire_fence, ts, true);
 }
 
+static void sde_connector_update_hdr_props(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn = to_sde_connector(connector);
+	struct drm_msm_ext_hdr_properties hdr = {};
+
+	hdr.hdr_supported = connector->hdr_supported;
+
+	if (hdr.hdr_supported) {
+		hdr.hdr_eotf = connector->hdr_eotf;
+		hdr.hdr_metadata_type_one = connector->hdr_metadata_type_one;
+		hdr.hdr_max_luminance = connector->hdr_max_luminance;
+		hdr.hdr_avg_luminance = connector->hdr_avg_luminance;
+		hdr.hdr_min_luminance = connector->hdr_min_luminance;
+
+		msm_property_set_blob(&c_conn->property_info,
+			      &c_conn->blob_ext_hdr,
+			      &hdr,
+			      sizeof(hdr),
+			      CONNECTOR_PROP_EXT_HDR_INFO);
+
+	}
+}
+
 static enum drm_connector_status
 sde_connector_detect(struct drm_connector *connector, bool force)
 {
@@ -1189,12 +1372,39 @@
 	/* debugfs under connector->debugfs are deleted by drm_debugfs */
 }
 
+static int sde_connector_fill_modes(struct drm_connector *connector,
+		uint32_t max_width, uint32_t max_height)
+{
+	int rc, mode_count = 0;
+	struct sde_connector *sde_conn = NULL;
+
+	sde_conn = to_sde_connector(connector);
+	if (!sde_conn) {
+		SDE_ERROR("invalid arguments\n");
+		return 0;
+	}
+
+	mode_count = drm_helper_probe_single_connector_modes(connector,
+			max_width, max_height);
+
+	rc = sde_connector_set_blob_data(connector,
+				connector->state,
+				CONNECTOR_PROP_MODE_INFO);
+	if (rc) {
+		SDE_ERROR_CONN(sde_conn,
+			"failed to setup mode info prop, rc = %d\n", rc);
+		return 0;
+	}
+
+	return mode_count;
+}
+
 static const struct drm_connector_funcs sde_connector_ops = {
 	.dpms =                   sde_connector_dpms,
 	.reset =                  sde_connector_atomic_reset,
 	.detect =                 sde_connector_detect,
 	.destroy =                sde_connector_destroy,
-	.fill_modes =             drm_helper_probe_single_connector_modes,
+	.fill_modes =             sde_connector_fill_modes,
 	.atomic_duplicate_state = sde_connector_atomic_duplicate_state,
 	.atomic_destroy_state =   sde_connector_atomic_destroy_state,
 	.atomic_set_property =    sde_connector_atomic_set_property,
@@ -1207,6 +1417,7 @@
 static int sde_connector_get_modes(struct drm_connector *connector)
 {
 	struct sde_connector *c_conn;
+	int mode_count = 0;
 
 	if (!connector) {
 		SDE_ERROR("invalid connector\n");
@@ -1219,7 +1430,15 @@
 		return 0;
 	}
 
-	return c_conn->ops.get_modes(connector, c_conn->display);
+	mode_count = c_conn->ops.get_modes(connector, c_conn->display);
+	if (!mode_count) {
+		SDE_ERROR_CONN(c_conn, "failed to get modes\n");
+		return 0;
+	}
+
+	sde_connector_update_hdr_props(connector);
+
+	return mode_count;
 }
 
 static enum drm_mode_status
@@ -1316,6 +1535,162 @@
 	.best_encoder = sde_connector_best_encoder,
 };
 
+static int sde_connector_populate_mode_info(struct drm_connector *conn,
+	struct sde_kms_info *info)
+{
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	struct sde_connector *c_conn = NULL;
+	struct drm_display_mode *mode;
+	struct msm_mode_info mode_info;
+	int rc = 0;
+
+	if (!conn || !conn->dev || !conn->dev->dev_private) {
+		SDE_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	priv = conn->dev->dev_private;
+	sde_kms = to_sde_kms(priv->kms);
+
+	c_conn = to_sde_connector(conn);
+	if (!c_conn->ops.get_mode_info) {
+		SDE_ERROR_CONN(c_conn, "get_mode_info not defined\n");
+		return -EINVAL;
+	}
+
+	list_for_each_entry(mode, &conn->modes, head) {
+		int topology_idx = 0;
+
+		memset(&mode_info, 0, sizeof(mode_info));
+
+		rc = c_conn->ops.get_mode_info(mode, &mode_info,
+			sde_kms->catalog->max_mixer_width,
+			c_conn->display);
+		if (rc) {
+			SDE_ERROR_CONN(c_conn,
+				"failed to get mode info for mode %s\n",
+				mode->name);
+			continue;
+		}
+
+		sde_kms_info_add_keystr(info, "mode_name", mode->name);
+
+		topology_idx = (int)sde_rm_get_topology_name(
+							mode_info.topology);
+		if (topology_idx < SDE_RM_TOPOLOGY_MAX) {
+			sde_kms_info_add_keystr(info, "topology",
+					e_topology_name[topology_idx].name);
+		} else {
+			SDE_ERROR_CONN(c_conn, "invalid topology\n");
+			continue;
+		}
+
+		if (!mode_info.roi_caps.num_roi)
+			continue;
+
+		sde_kms_info_add_keyint(info, "partial_update_num_roi",
+			mode_info.roi_caps.num_roi);
+		sde_kms_info_add_keyint(info, "partial_update_xstart",
+			mode_info.roi_caps.align.xstart_pix_align);
+		sde_kms_info_add_keyint(info, "partial_update_walign",
+			mode_info.roi_caps.align.width_pix_align);
+		sde_kms_info_add_keyint(info, "partial_update_wmin",
+			mode_info.roi_caps.align.min_width);
+		sde_kms_info_add_keyint(info, "partial_update_ystart",
+			mode_info.roi_caps.align.ystart_pix_align);
+		sde_kms_info_add_keyint(info, "partial_update_halign",
+			mode_info.roi_caps.align.height_pix_align);
+		sde_kms_info_add_keyint(info, "partial_update_hmin",
+			mode_info.roi_caps.align.min_height);
+		sde_kms_info_add_keyint(info, "partial_update_roimerge",
+			mode_info.roi_caps.merge_rois);
+	}
+
+	return rc;
+}
+
+int sde_connector_set_blob_data(struct drm_connector *conn,
+		struct drm_connector_state *state,
+		enum msm_mdp_conn_property prop_id)
+{
+	struct sde_kms_info *info;
+	struct sde_connector *c_conn = NULL;
+	struct sde_connector_state *sde_conn_state = NULL;
+	struct msm_mode_info mode_info;
+	struct drm_property_blob *blob = NULL;
+	int rc = 0;
+
+	c_conn = to_sde_connector(conn);
+	if (!c_conn) {
+		SDE_ERROR("invalid argument\n");
+		return -EINVAL;
+	}
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	sde_kms_info_reset(info);
+
+	switch (prop_id) {
+	case CONNECTOR_PROP_SDE_INFO:
+		memset(&mode_info, 0, sizeof(mode_info));
+
+		if (state) {
+			sde_conn_state = to_sde_connector_state(state);
+			memcpy(&mode_info, &sde_conn_state->mode_info,
+					sizeof(sde_conn_state->mode_info));
+		} else {
+			/**
+			 * connector state is assigned only on first
+			 * atomic_commit. But this function is allowed to be
+			 * invoked during probe/init sequence. So not throwing
+			 * an error.
+			 */
+			SDE_DEBUG_CONN(c_conn, "invalid connector state\n");
+		}
+
+		if (!c_conn->ops.post_init) {
+			SDE_ERROR_CONN(c_conn, "post_init not defined\n");
+			goto exit;
+		}
+
+		rc = c_conn->ops.post_init(conn, info, c_conn->display,
+				&mode_info);
+		if (rc) {
+			SDE_ERROR_CONN(c_conn, "post-init failed, %d\n", rc);
+			goto exit;
+		}
+
+		blob = c_conn->blob_caps;
+	break;
+	case CONNECTOR_PROP_MODE_INFO:
+		rc = sde_connector_populate_mode_info(conn, info);
+		if (rc) {
+			SDE_ERROR_CONN(c_conn,
+					"mode info population failed, %d\n",
+					rc);
+			goto exit;
+		}
+		blob = c_conn->blob_mode_info;
+	break;
+	default:
+		SDE_ERROR_CONN(c_conn, "invalid prop_id: %d\n", prop_id);
+		goto exit;
+	};
+
+	msm_property_set_blob(&c_conn->property_info,
+			&blob,
+			SDE_KMS_INFO_DATA(info),
+			SDE_KMS_INFO_DATALEN(info),
+			prop_id);
+exit:
+	kfree(info);
+
+	return rc;
+}
+
 struct drm_connector *sde_connector_init(struct drm_device *dev,
 		struct drm_encoder *encoder,
 		struct drm_panel *panel,
@@ -1326,7 +1701,6 @@
 {
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
-	struct sde_kms_info *info;
 	struct sde_connector *c_conn = NULL;
 	struct dsi_display *dsi_display;
 	struct msm_display_info display_info;
@@ -1423,35 +1797,25 @@
 			CONNECTOR_PROP_COUNT, CONNECTOR_PROP_BLOBCOUNT,
 			sizeof(struct sde_connector_state));
 
-	if (c_conn->ops.post_init) {
-		info = kmalloc(sizeof(*info), GFP_KERNEL);
-		if (!info) {
-			SDE_ERROR("failed to allocate info buffer\n");
-			rc = -ENOMEM;
-			goto error_cleanup_fence;
-		}
+	msm_property_install_blob(&c_conn->property_info,
+			"capabilities",
+			DRM_MODE_PROP_IMMUTABLE,
+			CONNECTOR_PROP_SDE_INFO);
 
-		sde_kms_info_reset(info);
-		rc = c_conn->ops.post_init(&c_conn->base, info, display);
-		if (rc) {
-			SDE_ERROR("post-init failed, %d\n", rc);
-			kfree(info);
-			goto error_cleanup_fence;
-		}
-
-		msm_property_install_blob(&c_conn->property_info,
-				"capabilities",
-				DRM_MODE_PROP_IMMUTABLE,
-				CONNECTOR_PROP_SDE_INFO);
-
-		msm_property_set_blob(&c_conn->property_info,
-				&c_conn->blob_caps,
-				SDE_KMS_INFO_DATA(info),
-				SDE_KMS_INFO_DATALEN(info),
-				CONNECTOR_PROP_SDE_INFO);
-		kfree(info);
+	rc = sde_connector_set_blob_data(&c_conn->base,
+			NULL,
+			CONNECTOR_PROP_SDE_INFO);
+	if (rc) {
+		SDE_ERROR_CONN(c_conn,
+			"failed to setup connector info, rc = %d\n", rc);
+		goto error_cleanup_fence;
 	}
 
+	msm_property_install_blob(&c_conn->property_info,
+			"mode_properties",
+			DRM_MODE_PROP_IMMUTABLE,
+			CONNECTOR_PROP_MODE_INFO);
+
 	if (connector_type == DRM_MODE_CONNECTOR_DSI) {
 		dsi_display = (struct dsi_display *)(display);
 		if (dsi_display && dsi_display->panel &&
@@ -1470,16 +1834,32 @@
 	}
 
 	rc = sde_connector_get_info(&c_conn->base, &display_info);
-	if (!rc && display_info.roi_caps.enabled) {
-		msm_property_install_volatile_range(
-				&c_conn->property_info, "sde_drm_roi_v1", 0x0,
-				0, ~0, 0, CONNECTOR_PROP_ROI_V1);
-	}
+	if (!rc && (connector_type == DRM_MODE_CONNECTOR_DSI) &&
+			(display_info.capabilities & MSM_DISPLAY_CAP_VID_MODE))
+		sde_connector_register_event(&c_conn->base,
+			SDE_CONN_EVENT_VID_FIFO_OVERFLOW,
+			sde_connector_handle_disp_recovery,
+			c_conn);
+
+	msm_property_install_volatile_range(
+			&c_conn->property_info, "sde_drm_roi_v1", 0x0,
+			0, ~0, 0, CONNECTOR_PROP_ROI_V1);
+
 	/* install PP_DITHER properties */
 	_sde_connector_install_dither_property(dev, sde_kms, c_conn);
 
-	msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
-			0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
+	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		msm_property_install_blob(&c_conn->property_info,
+				"ext_hdr_properties",
+				DRM_MODE_PROP_IMMUTABLE,
+				CONNECTOR_PROP_EXT_HDR_INFO);
+	}
+
+	msm_property_install_volatile_range(&c_conn->property_info,
+		"hdr_metadata", 0x0, 0, ~0, 0, CONNECTOR_PROP_HDR_METADATA);
+
+	msm_property_install_volatile_range(&c_conn->property_info,
+		"RETIRE_FENCE", 0x0, 0, ~0, 0, CONNECTOR_PROP_RETIRE_FENCE);
 
 	msm_property_install_range(&c_conn->property_info, "autorefresh",
 			0x0, 0, AUTOREFRESH_MAX_FRAME_CNT, 0,
@@ -1530,6 +1910,8 @@
 		drm_property_unreference_blob(c_conn->blob_hdr);
 	if (c_conn->blob_dither)
 		drm_property_unreference_blob(c_conn->blob_dither);
+	if (c_conn->blob_mode_info)
+		drm_property_unreference_blob(c_conn->blob_mode_info);
 
 	msm_property_destroy(&c_conn->property_info);
 error_cleanup_fence:
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 005d050..18fc66d 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -38,11 +38,13 @@
 	 * @connector: Pointer to drm connector structure
 	 * @info: Pointer to sde connector info structure
 	 * @display: Pointer to private display handle
+	 * @mode_info: Pointer to mode info structure
 	 * Returns: Zero on success
 	 */
 	int (*post_init)(struct drm_connector *connector,
 			void *info,
-			void *display);
+			void *display,
+			struct msm_mode_info *mode_info);
 
 	/**
 	 * detect - determine if connector is connected
@@ -228,6 +230,8 @@
 enum sde_connector_events {
 	SDE_CONN_EVENT_VID_DONE, /* video mode frame done */
 	SDE_CONN_EVENT_CMD_DONE, /* command mode frame done */
+	SDE_CONN_EVENT_VID_FIFO_OVERFLOW, /* dsi fifo overflow error */
+	SDE_CONN_EVENT_CMD_FIFO_UNDERFLOW, /* dsi fifo underflow error */
 	SDE_CONN_EVENT_COUNT,
 };
 
@@ -235,9 +239,10 @@
  * struct sde_connector_evt - local event registration entry structure
  * @cb_func: Pointer to desired callback function
  * @usr: User pointer to pass to callback on event trigger
+ * Returns: Zero success, negetive for failure
  */
 struct sde_connector_evt {
-	void (*cb_func)(uint32_t event_idx,
+	int (*cb_func)(uint32_t event_idx,
 			uint32_t instance_idx, void *usr,
 			uint32_t data0, uint32_t data1,
 			uint32_t data2, uint32_t data3);
@@ -264,7 +269,9 @@
  * @property_data: Array of private data for generic property handling
  * @blob_caps: Pointer to blob structure for 'capabilities' property
  * @blob_hdr: Pointer to blob structure for 'hdr_properties' property
+ * @blob_ext_hdr: Pointer to blob structure for 'ext_hdr_properties' property
  * @blob_dither: Pointer to blob structure for default dither config
+ * @blob_mode_info: Pointer to blob structure for mode info
  * @fb_kmap: true if kernel mapping of framebuffer is requested
  * @event_table: Array of registered events
  * @event_lock: Lock object for event_table
@@ -296,7 +303,9 @@
 	struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
 	struct drm_property_blob *blob_caps;
 	struct drm_property_blob *blob_hdr;
+	struct drm_property_blob *blob_ext_hdr;
 	struct drm_property_blob *blob_dither;
+	struct drm_property_blob *blob_mode_info;
 
 	bool fb_kmap;
 	struct sde_connector_evt event_table[SDE_CONN_EVENT_COUNT];
@@ -354,6 +363,10 @@
  * @property_values: Local cache of current connector property values
  * @rois: Regions of interest structure for mapping CRTC to Connector output
  * @property_blobs: blob properties
+ * @mode_info: local copy of msm_mode_info struct
+ * @hdr_meta: HDR metadata info passed from userspace
+ * @old_topology_name: topology of previous atomic state. remove this in later
+ *	kernel versions which provide drm_atomic_state old_state pointers
  */
 struct sde_connector_state {
 	struct drm_connector_state base;
@@ -363,6 +376,9 @@
 
 	struct msm_roi_list rois;
 	struct drm_property_blob *property_blobs[CONNECTOR_PROP_BLOBCOUNT];
+	struct msm_mode_info mode_info;
+	struct drm_msm_ext_hdr_metadata hdr_meta;
+	enum sde_rm_topology_name old_topology_name;
 };
 
 /**
@@ -415,6 +431,43 @@
 }
 
 /**
+ * sde_connector_get_old_topology_name - helper accessor to retrieve
+ *	topology_name for the previous mode
+ * @connector: pointer to drm connector state
+ * Returns: cached value of the previous topology, or SDE_RM_TOPOLOGY_NONE
+ */
+static inline enum sde_rm_topology_name sde_connector_get_old_topology_name(
+		struct drm_connector_state *state)
+{
+	struct sde_connector_state *c_state = to_sde_connector_state(state);
+
+	if (!state)
+		return SDE_RM_TOPOLOGY_NONE;
+
+	return c_state->old_topology_name;
+}
+
+/**
+ * sde_connector_set_old_topology_name - helper to cache value of previous
+ *	mode's topology
+ * @connector: pointer to drm connector state
+ * Returns: 0 on success, negative errno on failure
+ */
+static inline int sde_connector_set_old_topology_name(
+		struct drm_connector_state *state,
+		enum sde_rm_topology_name top)
+{
+	struct sde_connector_state *c_state = to_sde_connector_state(state);
+
+	if (!state)
+		return -EINVAL;
+
+	c_state->old_topology_name = top;
+
+	return 0;
+}
+
+/**
  * sde_connector_get_lp - helper accessor to retrieve LP state
  * @connector: pointer to drm connector
  * Returns: value of the CONNECTOR_PROP_LP property or 0
@@ -532,7 +585,7 @@
  */
 int sde_connector_register_event(struct drm_connector *connector,
 		uint32_t event_idx,
-		void (*cb_func)(uint32_t event_idx,
+		int (*cb_func)(uint32_t event_idx,
 			uint32_t instance_idx, void *usr,
 			uint32_t data0, uint32_t data1,
 			uint32_t data2, uint32_t data3),
@@ -593,6 +646,24 @@
 		struct drm_connector_state *state, void **cfg, size_t *len);
 
 /**
+ * sde_connector_set_blob_data - set connector blob property data
+ * @conn: Pointer to drm_connector struct
+ * @state: Pointer to the drm_connector_state struct
+ * @prop_id: property id to be populated
+ * Returns: Zero on success
+ */
+int sde_connector_set_blob_data(struct drm_connector *conn,
+		struct drm_connector_state *state,
+		enum msm_mdp_conn_property prop_id);
+
+/**
+ * sde_connector_roi_v1_check_roi - validate connector ROI
+ * @conn_state: Pointer to drm_connector_state struct
+ * Returns: Zero on success
+ */
+int sde_connector_roi_v1_check_roi(struct drm_connector_state *conn_state);
+
+/**
  * sde_connector_schedule_status_work - manage ESD thread
  * conn: Pointer to drm_connector struct
  * @en: flag to start/stop ESD thread
@@ -610,4 +681,18 @@
 		struct drm_connector *connector,
 		struct drm_connector_state *connector_state);
 
+/**
+ * sde_connector_get_mode_info - get information of the current mode in the
+ *                               given connector state.
+ * conn_state: Pointer to the DRM connector state object
+ * mode_info: Pointer to the mode info structure
+ */
+int sde_connector_get_mode_info(struct drm_connector_state *conn_state,
+	struct msm_mode_info *mode_info);
+
+/**
+ * sde_conn_timeline_status - current buffer timeline status
+ * conn: Pointer to drm_connector struct
+ */
+void sde_conn_timeline_status(struct drm_connector *conn);
 #endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index a0846ff..b6c6234 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -55,9 +55,22 @@
 	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
 
 	if (cb_tbl_error) {
-		SDE_ERROR("irq has no registered callback, idx %d enables %d\n",
-				irq_idx, enable_counts);
-		SDE_EVT32_IRQ(irq_idx, enable_counts, SDE_EVTLOG_ERROR);
+		/*
+		 * If enable count is zero and callback list is empty, then it's
+		 * not a fatal issue. Log this case as debug. If the enable
+		 * count is nonzero and callback list is empty, then its a real
+		 * issue. Log this case as error to ensure we don't have silent
+		 * IRQs running.
+		 */
+		if (!enable_counts) {
+			SDE_DEBUG("irq has no callback, idx %d enables %d\n",
+					irq_idx, enable_counts);
+			SDE_EVT32_IRQ(irq_idx, enable_counts);
+		} else {
+			SDE_ERROR("irq has no callback, idx %d enables %d\n",
+					irq_idx, enable_counts);
+			SDE_EVT32_IRQ(irq_idx, enable_counts, SDE_EVTLOG_ERROR);
+		}
 	}
 
 	/*
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 8f80e99..aced5cd 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -48,6 +48,9 @@
 #define MEM_PROTECT_SD_CTRL_SWITCH 0x18
 #define MDP_DEVICE_ID            0x1A
 
+#define SDE_PSTATES_MAX (SDE_STAGE_MAX * 4)
+#define SDE_MULTIRECT_PLANE_MAX (SDE_STAGE_MAX * 2)
+
 struct sde_crtc_custom_events {
 	u32 event;
 	int (*func)(struct drm_crtc *crtc, bool en,
@@ -58,12 +61,15 @@
 	bool en, struct sde_irq_callback *ad_irq);
 static int sde_crtc_idle_interrupt_handler(struct drm_crtc *crtc_drm,
 	bool en, struct sde_irq_callback *idle_irq);
+static int sde_crtc_pm_event_handler(struct drm_crtc *crtc, bool en,
+		struct sde_irq_callback *noirq);
 
 static struct sde_crtc_custom_events custom_events[] = {
 	{DRM_EVENT_AD_BACKLIGHT, sde_cp_ad_interrupt},
 	{DRM_EVENT_CRTC_POWER, sde_crtc_power_interrupt_handler},
 	{DRM_EVENT_IDLE_NOTIFY, sde_crtc_idle_interrupt_handler},
 	{DRM_EVENT_HISTOGRAM, sde_cp_hist_interrupt},
+	{DRM_EVENT_SDE_POWER, sde_crtc_pm_event_handler},
 };
 
 /* default input fence timeout, in ms */
@@ -604,18 +610,6 @@
 		return;
 }
 
-/**
- * sde_crtc_destroy_dest_scaler - free memory allocated for scaler lut
- * @sde_crtc: Pointer to sde crtc
- */
-static void _sde_crtc_destroy_dest_scaler(struct sde_crtc *sde_crtc)
-{
-	if (!sde_crtc)
-		return;
-
-	kfree(sde_crtc->scl3_lut_cfg);
-}
-
 static void sde_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
@@ -629,7 +623,6 @@
 		drm_property_unreference_blob(sde_crtc->blob_info);
 	msm_property_destroy(&sde_crtc->property_info);
 	sde_cp_crtc_destroy_properties(crtc);
-	_sde_crtc_destroy_dest_scaler(sde_crtc);
 
 	sde_fence_deinit(&sde_crtc->output_fence);
 	_sde_crtc_deinit_events(sde_crtc);
@@ -846,6 +839,11 @@
 				cstate->user_roi_list.roi[i].y1,
 				cstate->user_roi_list.roi[i].x2,
 				cstate->user_roi_list.roi[i].y2);
+		SDE_EVT32_VERBOSE(DRMID(crtc),
+				cstate->user_roi_list.roi[i].x1,
+				cstate->user_roi_list.roi[i].y1,
+				cstate->user_roi_list.roi[i].x2,
+				cstate->user_roi_list.roi[i].y2);
 	}
 
 	return 0;
@@ -889,6 +887,7 @@
 
 	for_each_connector_in_state(state->state, conn, conn_state, i) {
 		struct sde_connector_state *sde_conn_state;
+		struct sde_rect conn_roi;
 
 		if (!conn_state || conn_state->crtc != crtc)
 			continue;
@@ -915,12 +914,19 @@
 					sde_crtc->name);
 			return -EINVAL;
 		}
+
+		sde_kms_rect_merge_rectangles(&sde_conn_state->rois, &conn_roi);
+		SDE_EVT32_VERBOSE(DRMID(crtc), DRMID(conn),
+				conn_roi.x, conn_roi.y,
+				conn_roi.w, conn_roi.h);
 	}
 
 	sde_kms_rect_merge_rectangles(&crtc_state->user_roi_list, crtc_roi);
 
 	SDE_DEBUG("%s: crtc roi (%d,%d,%d,%d)\n", sde_crtc->name,
 			crtc_roi->x, crtc_roi->y, crtc_roi->w, crtc_roi->h);
+	SDE_EVT32_VERBOSE(DRMID(crtc), crtc_roi->x, crtc_roi->y, crtc_roi->w,
+			crtc_roi->h);
 
 	return 0;
 }
@@ -1178,14 +1184,56 @@
 		struct drm_crtc_state *state)
 {
 	struct sde_crtc *sde_crtc;
-	int lm_idx;
-	int rc;
+	struct sde_crtc_state *sde_crtc_state;
+	struct msm_mode_info mode_info;
+	struct drm_connector *conn;
+	struct drm_connector_state *conn_state;
+	int rc, lm_idx, i;
 
 	if (!crtc || !state)
 		return -EINVAL;
 
+	memset(&mode_info, 0, sizeof(mode_info));
+
 	sde_crtc = to_sde_crtc(crtc);
 
+	if (hweight_long(state->connector_mask) != 1) {
+		SDE_ERROR("invalid connector count(%d) for crtc: %d\n",
+			(int)hweight_long(state->connector_mask),
+			crtc->base.id);
+		return -EINVAL;
+	}
+
+	for_each_connector_in_state(state->state, conn, conn_state, i) {
+		rc = sde_connector_get_mode_info(conn_state, &mode_info);
+		if (rc) {
+			SDE_ERROR("failed to get mode info\n");
+			return -EINVAL;
+		}
+		break;
+	}
+
+	if (!mode_info.roi_caps.enabled)
+		return 0;
+
+	sde_crtc_state = to_sde_crtc_state(state);
+	if (sde_crtc_state->user_roi_list.num_rects >
+					mode_info.roi_caps.num_roi) {
+		SDE_ERROR("roi count is more than supported limit, %d > %d\n",
+				sde_crtc_state->user_roi_list.num_rects,
+				mode_info.roi_caps.num_roi);
+		return -E2BIG;
+	}
+
+	/**
+	 * TODO: Need to check against ROI alignment restrictions if partial
+	 * update support is added for destination scalar configurations
+	 */
+	if (sde_crtc_state->num_ds_enabled) {
+		SDE_ERROR("DS and PU concurrency is not supported\n");
+		return -EINVAL;
+	}
+
 	rc = _sde_crtc_set_crtc_roi(crtc, state);
 	if (rc)
 		return rc;
@@ -1754,16 +1802,11 @@
 	size_t len = 0;
 	int ret = 0;
 
-	if (!sde_crtc || !cstate || !sde_crtc->scl3_lut_cfg) {
+	if (!sde_crtc || !cstate) {
 		SDE_ERROR("invalid args\n");
 		return -EINVAL;
 	}
 
-	if (sde_crtc->scl3_lut_cfg->is_configured) {
-		SDE_DEBUG("%s: lut already configured\n", sde_crtc->name);
-		return 0;
-	}
-
 	lut_data = msm_property_get_blob(&sde_crtc->property_info,
 			&cstate->property_state, &len, lut_idx);
 	if (!lut_data || !len) {
@@ -1773,7 +1816,7 @@
 		len = 0;
 	}
 
-	cfg = sde_crtc->scl3_lut_cfg;
+	cfg = &cstate->scl3_lut_cfg;
 
 	switch (lut_idx) {
 	case CRTC_PROP_DEST_SCALER_LUT_ED:
@@ -1790,15 +1833,31 @@
 		break;
 	default:
 		ret = -EINVAL;
-		SDE_ERROR("invalid LUT index = %d", lut_idx);
+		SDE_ERROR("%s:invalid LUT idx(%d)\n", sde_crtc->name, lut_idx);
+		SDE_EVT32(DRMID(&sde_crtc->base), lut_idx, SDE_EVTLOG_ERROR);
 		break;
 	}
 
 	cfg->is_configured = cfg->dir_lut && cfg->cir_lut && cfg->sep_lut;
 
+	SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base), ret, lut_idx, len,
+			cfg->is_configured);
 	return ret;
 }
 
+void sde_crtc_timeline_status(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc;
+
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	sde_fence_timeline_status(&sde_crtc->output_fence, &crtc->base);
+}
+
 /**
  * sde_crtc_secure_ctrl - Initiates the operations to swtich  between secure
  *                       and non-secure mode
@@ -1923,6 +1982,44 @@
 	return ret;
 }
 
+static int _sde_validate_hw_resources(struct sde_crtc *sde_crtc)
+{
+	int i;
+
+	/**
+	 * Check if sufficient hw resources are
+	 * available as per target caps & topology
+	 */
+	if (!sde_crtc) {
+		SDE_ERROR("invalid argument\n");
+		return -EINVAL;
+	}
+
+	if (!sde_crtc->num_mixers ||
+		sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+		SDE_ERROR("%s: invalid number mixers: %d\n",
+			sde_crtc->name, sde_crtc->num_mixers);
+		SDE_EVT32(DRMID(&sde_crtc->base), sde_crtc->num_mixers,
+			SDE_EVTLOG_ERROR);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		if (!sde_crtc->mixers[i].hw_lm || !sde_crtc->mixers[i].hw_ctl
+			|| !sde_crtc->mixers[i].hw_ds) {
+			SDE_ERROR("%s:insufficient resources for mixer(%d)\n",
+				sde_crtc->name, i);
+			SDE_EVT32(DRMID(&sde_crtc->base), sde_crtc->num_mixers,
+				i, sde_crtc->mixers[i].hw_lm,
+				sde_crtc->mixers[i].hw_ctl,
+				sde_crtc->mixers[i].hw_ds, SDE_EVTLOG_ERROR);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
 /**
  * _sde_crtc_dest_scaler_setup - Set up dest scaler block
  * @crtc: Pointer to drm crtc
@@ -1939,37 +2036,49 @@
 	u32 flush_mask = 0, op_mode = 0;
 	u32 lm_idx = 0, num_mixers = 0;
 	int i, count = 0;
+	bool ds_dirty = false;
 
 	if (!crtc)
 		return;
 
-	sde_crtc   = to_sde_crtc(crtc);
+	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(crtc->state);
-	kms    = _sde_crtc_get_kms(crtc);
+	kms = _sde_crtc_get_kms(crtc);
 	num_mixers = sde_crtc->num_mixers;
+	count = cstate->num_ds;
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
+	SDE_EVT32(DRMID(crtc), num_mixers, count, cstate->ds_dirty,
+		sde_crtc->ds_reconfig, cstate->num_ds_enabled);
 
-	if (!cstate->ds_dirty) {
+	/**
+	 * destination scaler configuration will be done either
+	 * or on set property or on power collapse (idle/suspend)
+	 */
+	ds_dirty = (cstate->ds_dirty || sde_crtc->ds_reconfig);
+	if (sde_crtc->ds_reconfig) {
+		SDE_DEBUG("reconfigure dest scaler block\n");
+		sde_crtc->ds_reconfig = false;
+	}
+
+	if (!ds_dirty) {
 		SDE_DEBUG("no change in settings, skip commit\n");
 	} else if (!kms || !kms->catalog) {
-		SDE_ERROR("invalid parameters\n");
+		SDE_ERROR("crtc%d:invalid parameters\n", crtc->base.id);
 	} else if (!kms->catalog->mdp[0].has_dest_scaler) {
 		SDE_DEBUG("dest scaler feature not supported\n");
-	} else if (num_mixers > CRTC_DUAL_MIXERS) {
-		SDE_ERROR("invalid number mixers: %d\n", num_mixers);
-	} else if (!sde_crtc->scl3_lut_cfg->is_configured) {
-		SDE_DEBUG("no LUT data available\n");
+	} else if (_sde_validate_hw_resources(sde_crtc)) {
+		//do nothing
+	} else if (!cstate->scl3_lut_cfg.is_configured) {
+		SDE_ERROR("crtc%d:no LUT data available\n", crtc->base.id);
 	} else {
-		count = cstate->num_ds_enabled ? cstate->num_ds : num_mixers;
-
 		for (i = 0; i < count; i++) {
 			cfg = &cstate->ds_cfg[i];
 
 			if (!cfg->flags)
 				continue;
 
-			lm_idx = cfg->ndx;
+			lm_idx = cfg->idx;
 			hw_lm  = sde_crtc->mixers[lm_idx].hw_lm;
 			hw_ctl = sde_crtc->mixers[lm_idx].hw_ctl;
 			hw_ds  = sde_crtc->mixers[lm_idx].hw_ds;
@@ -1983,7 +2092,7 @@
 					CRTC_DUAL_MIXERS) ?
 					SDE_DS_OP_MODE_DUAL : 0;
 				hw_ds->ops.setup_opmode(hw_ds, op_mode);
-				SDE_EVT32(DRMID(crtc), op_mode);
+				SDE_EVT32_VERBOSE(DRMID(crtc), op_mode);
 			}
 
 			/* Setup scaler */
@@ -1992,33 +2101,23 @@
 					SDE_DRM_DESTSCALER_ENHANCER_UPDATE)) {
 				if (hw_ds->ops.setup_scaler)
 					hw_ds->ops.setup_scaler(hw_ds,
-							cfg->scl3_cfg,
-							sde_crtc->scl3_lut_cfg);
+						&cfg->scl3_cfg,
+						&cstate->scl3_lut_cfg);
 
-				/**
-				 * Clear the flags as the block doesn't have to
-				 * be programmed in each commit if no updates
-				 */
-				cfg->flags &= ~SDE_DRM_DESTSCALER_SCALE_UPDATE;
-				cfg->flags &=
-					~SDE_DRM_DESTSCALER_ENHANCER_UPDATE;
 			}
 
 			/*
 			 * Dest scaler shares the flush bit of the LM in control
 			 */
-			if (cfg->set_lm_flush && hw_lm && hw_ctl &&
-				hw_ctl->ops.get_bitmask_mixer) {
+			if (hw_ctl->ops.get_bitmask_mixer) {
 				flush_mask = hw_ctl->ops.get_bitmask_mixer(
 						hw_ctl, hw_lm->idx);
 				SDE_DEBUG("Set lm[%d] flush = %d",
 					hw_lm->idx, flush_mask);
 				hw_ctl->ops.update_pending_flush(hw_ctl,
-								flush_mask);
+							flush_mask);
 			}
-			cfg->set_lm_flush = false;
 		}
-		cstate->ds_dirty = false;
 	}
 }
 
@@ -2180,47 +2279,6 @@
 	SDE_ATRACE_END("signal_retire_fence");
 }
 
-/* _sde_crtc_idle_notify - signal idle timeout to client */
-static void _sde_crtc_idle_notify(struct sde_crtc *sde_crtc)
-{
-	struct drm_crtc *crtc;
-	struct drm_event event;
-	int ret = 0;
-
-	if (!sde_crtc) {
-		SDE_ERROR("invalid sde crtc\n");
-		return;
-	}
-
-	crtc = &sde_crtc->base;
-	event.type = DRM_EVENT_IDLE_NOTIFY;
-	event.length = sizeof(u32);
-	msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
-								(u8 *)&ret);
-
-	SDE_DEBUG("crtc:%d idle timeout notified\n", crtc->base.id);
-}
-
-/*
- * sde_crtc_handle_event - crtc frame event handle.
- * This API must manage only non-IRQ context events.
- */
-static bool _sde_crtc_handle_event(struct sde_crtc *sde_crtc, u32 event)
-{
-	bool event_processed = false;
-
-	/**
-	 * idle events are originated from commit thread and can be processed
-	 * in same context
-	 */
-	if (event & SDE_ENCODER_FRAME_EVENT_IDLE) {
-		_sde_crtc_idle_notify(sde_crtc);
-		event_processed = true;
-	}
-
-	return event_processed;
-}
-
 static void sde_crtc_frame_event_work(struct kthread_work *work)
 {
 	struct msm_drm_private *priv;
@@ -2314,15 +2372,6 @@
 	SDE_ATRACE_END("crtc_frame_event");
 }
 
-/*
- * sde_crtc_frame_event_cb - crtc frame event callback API. CRTC module
- * registers this API to encoder for all frame event callbacks like
- * release_fence, retire_fence, frame_error, frame_done, idle_timeout,
- * etc. Encoder may call different events from different context - IRQ,
- * user thread, commit_thread, etc. Each event should be carefully
- * reviewed and should be processed in proper task context to avoid scheduling
- * delay or properly manage the irq context's bottom half processing.
- */
 static void sde_crtc_frame_event_cb(void *data, u32 event)
 {
 	struct drm_crtc *crtc = (struct drm_crtc *)data;
@@ -2331,7 +2380,6 @@
 	struct sde_crtc_frame_event *fevent;
 	unsigned long flags;
 	u32 crtc_id;
-	bool event_processed = false;
 
 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		SDE_ERROR("invalid parameters\n");
@@ -2344,11 +2392,6 @@
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 	SDE_EVT32_VERBOSE(DRMID(crtc), event);
 
-	/* try to process the event in caller context */
-	event_processed = _sde_crtc_handle_event(sde_crtc, event);
-	if (event_processed)
-		return;
-
 	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
 	fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
 			struct sde_crtc_frame_event, list);
@@ -2389,24 +2432,6 @@
 		sde_crtc_secure_ctrl(crtc, true);
 }
 
-/* _sde_crtc_set_idle_timeout - update idle timeout wait duration */
-static void _sde_crtc_set_idle_timeout(struct drm_crtc *crtc, u64 val)
-{
-	struct drm_encoder *encoder;
-
-	if (!crtc) {
-		SDE_ERROR("invalid crtc\n");
-		return;
-	}
-
-	drm_for_each_encoder(encoder, crtc->dev) {
-		if (encoder->crtc != crtc)
-			continue;
-
-		sde_encoder_set_idle_timeout(encoder, (u32) val);
-	}
-}
-
 /**
  * _sde_crtc_set_input_fence_timeout - update ns version of in fence timeout
  * @cstate: Pointer to sde crtc state
@@ -2490,28 +2515,6 @@
 }
 
 /**
- * _sde_crtc_dest_scaler_init - allocate memory for scaler lut
- * @sde_crtc    :  Pointer to sde crtc
- * @catalog :  Pointer to mdss catalog info
- */
-static void _sde_crtc_dest_scaler_init(struct sde_crtc *sde_crtc,
-				struct sde_mdss_cfg *catalog)
-{
-	if (!sde_crtc || !catalog)
-		return;
-
-	if (!catalog->mdp[0].has_dest_scaler) {
-		SDE_DEBUG("dest scaler feature not supported\n");
-		return;
-	}
-
-	sde_crtc->scl3_lut_cfg = kzalloc(sizeof(struct sde_hw_scaler3_lut_cfg),
-				GFP_KERNEL);
-	if (!sde_crtc->scl3_lut_cfg)
-		SDE_ERROR("failed to create scale LUT for dest scaler");
-}
-
-/**
  * _sde_crtc_set_dest_scaler - copy dest scaler settings from userspace
  * @sde_crtc   :  Pointer to sde crtc
  * @cstate :  Pointer to sde crtc state
@@ -2525,7 +2528,7 @@
 	struct sde_drm_dest_scaler_cfg *ds_cfg_usr;
 	struct sde_drm_scaler_v2 scaler_v2;
 	void __user *scaler_v2_usr;
-	int i, count, ret = 0;
+	int i, count;
 
 	if (!sde_crtc || !cstate) {
 		SDE_ERROR("invalid sde_crtc/state\n");
@@ -2534,15 +2537,14 @@
 
 	SDE_DEBUG("crtc %s\n", sde_crtc->name);
 
-	cstate->num_ds = 0;
-	cstate->ds_dirty = false;
 	if (!usr_ptr) {
 		SDE_DEBUG("ds data removed\n");
 		return 0;
 	}
 
 	if (copy_from_user(&ds_data, usr_ptr, sizeof(ds_data))) {
-		SDE_ERROR("failed to copy dest scaler data from user\n");
+		SDE_ERROR("%s:failed to copy dest scaler data from user\n",
+			sde_crtc->name);
 		return -EINVAL;
 	}
 
@@ -2552,11 +2554,10 @@
 		return 0;
 	}
 
-	if (!sde_crtc->num_mixers || count > sde_crtc->num_mixers ||
-		(count && (count != sde_crtc->num_mixers) &&
-		!(ds_data.ds_cfg[0].flags & SDE_DRM_DESTSCALER_PU_ENABLE))) {
-		SDE_ERROR("invalid config:num ds(%d), mixers(%d),flags(%d)\n",
-			count, sde_crtc->num_mixers, ds_data.ds_cfg[0].flags);
+	if (count > SDE_MAX_DS_COUNT) {
+		SDE_ERROR("%s: invalid config: num_ds(%d) max(%d)\n",
+			sde_crtc->name, count, SDE_MAX_DS_COUNT);
+		SDE_EVT32(DRMID(&sde_crtc->base), count, SDE_EVTLOG_ERROR);
 		return -EINVAL;
 	}
 
@@ -2564,49 +2565,35 @@
 	for (i = 0; i < count; i++) {
 		ds_cfg_usr = &ds_data.ds_cfg[i];
 
-		cstate->ds_cfg[i].ndx = ds_cfg_usr->index;
+		cstate->ds_cfg[i].idx = ds_cfg_usr->index;
 		cstate->ds_cfg[i].flags = ds_cfg_usr->flags;
 		cstate->ds_cfg[i].lm_width = ds_cfg_usr->lm_width;
 		cstate->ds_cfg[i].lm_height = ds_cfg_usr->lm_height;
-		cstate->ds_cfg[i].scl3_cfg = NULL;
+		memset(&scaler_v2, 0, sizeof(scaler_v2));
 
 		if (ds_cfg_usr->scaler_cfg) {
 			scaler_v2_usr =
 			(void __user *)((uintptr_t)ds_cfg_usr->scaler_cfg);
 
-			memset(&scaler_v2, 0, sizeof(scaler_v2));
-
-			cstate->ds_cfg[i].scl3_cfg =
-				kzalloc(sizeof(struct sde_hw_scaler3_cfg),
-					GFP_KERNEL);
-
-			if (!cstate->ds_cfg[i].scl3_cfg) {
-				ret = -ENOMEM;
-				goto err;
-			}
-
 			if (copy_from_user(&scaler_v2, scaler_v2_usr,
 					sizeof(scaler_v2))) {
-				SDE_ERROR("scale data:copy from user failed\n");
-				ret = -EINVAL;
-				goto err;
+				SDE_ERROR("%s:scaler: copy from user failed\n",
+					sde_crtc->name);
+				return -EINVAL;
 			}
-
-			sde_set_scaler_v2(cstate->ds_cfg[i].scl3_cfg,
-					&scaler_v2);
-
-			SDE_DEBUG("en(%d)dir(%d)de(%d) src(%dx%d) dst(%dx%d)\n",
-				scaler_v2.enable, scaler_v2.dir_en,
-				scaler_v2.de.enable, scaler_v2.src_width[0],
-				scaler_v2.src_height[0], scaler_v2.dst_width,
-				scaler_v2.dst_height);
-			SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base),
-				scaler_v2.enable, scaler_v2.dir_en,
-				scaler_v2.de.enable, scaler_v2.src_width[0],
-				scaler_v2.src_height[0], scaler_v2.dst_width,
-				scaler_v2.dst_height);
 		}
 
+		sde_set_scaler_v2(&cstate->ds_cfg[i].scl3_cfg, &scaler_v2);
+
+		SDE_DEBUG("en(%d)dir(%d)de(%d) src(%dx%d) dst(%dx%d)\n",
+			scaler_v2.enable, scaler_v2.dir_en, scaler_v2.de.enable,
+			scaler_v2.src_width[0], scaler_v2.src_height[0],
+			scaler_v2.dst_width, scaler_v2.dst_height);
+		SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base),
+			scaler_v2.enable, scaler_v2.dir_en, scaler_v2.de.enable,
+			scaler_v2.src_width[0], scaler_v2.src_height[0],
+			scaler_v2.dst_width, scaler_v2.dst_height);
+
 		SDE_DEBUG("ds cfg[%d]-ndx(%d) flags(%d) lm(%dx%d)\n",
 			i, ds_cfg_usr->index, ds_cfg_usr->flags,
 			ds_cfg_usr->lm_width, ds_cfg_usr->lm_height);
@@ -2617,13 +2604,9 @@
 
 	cstate->num_ds = count;
 	cstate->ds_dirty = true;
+	SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base), count, cstate->ds_dirty);
+
 	return 0;
-
-err:
-	for (; i >= 0; i--)
-		kfree(cstate->ds_cfg[i].scl3_cfg);
-
-	return ret;
 }
 
 /**
@@ -2641,7 +2624,7 @@
 	struct sde_hw_ds *hw_ds;
 	struct sde_hw_ds_cfg *cfg;
 	u32 i, ret = 0, lm_idx;
-	u32 num_ds_enable = 0;
+	u32 num_ds_enable = 0, hdisplay = 0;
 	u32 max_in_width = 0, max_out_width = 0;
 	u32 prev_lm_width = 0, prev_lm_height = 0;
 
@@ -2655,13 +2638,13 @@
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
-	if (!cstate->ds_dirty && !cstate->num_ds_enabled) {
+	if (!cstate->ds_dirty) {
 		SDE_DEBUG("dest scaler property not set, skip validation\n");
 		return 0;
 	}
 
 	if (!kms || !kms->catalog) {
-		SDE_ERROR("invalid parameters\n");
+		SDE_ERROR("crtc%d: invalid parameters\n", crtc->base.id);
 		return -EINVAL;
 	}
 
@@ -2671,40 +2654,13 @@
 	}
 
 	if (!sde_crtc->num_mixers) {
-		SDE_ERROR("mixers not allocated\n");
-		return -EINVAL;
+		SDE_DEBUG("mixers not allocated\n");
+		return 0;
 	}
 
-	/**
-	 * Check if sufficient hw resources are
-	 * available as per target caps & topology
-	 */
-	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
-		SDE_ERROR("invalid config: mixers(%d) max(%d)\n",
-			sde_crtc->num_mixers, CRTC_DUAL_MIXERS);
-		ret = -EINVAL;
+	ret = _sde_validate_hw_resources(sde_crtc);
+	if (ret)
 		goto err;
-	}
-
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
-		if (!sde_crtc->mixers[i].hw_lm || !sde_crtc->mixers[i].hw_ds) {
-			SDE_ERROR("insufficient HW resources allocated\n");
-			ret = -EINVAL;
-			goto err;
-		}
-	}
-
-	/**
-	 * Check if DS needs to be enabled or disabled
-	 * In case of enable, validate the data
-	 */
-	if (!cstate->ds_dirty || !cstate->num_ds ||
-		!(cstate->ds_cfg[0].flags & SDE_DRM_DESTSCALER_ENABLE)) {
-		SDE_DEBUG("disable dest scaler,dirty(%d)num(%d)flags(%d)\n",
-			cstate->ds_dirty, cstate->num_ds,
-			cstate->ds_cfg[0].flags);
-		goto disable;
-	}
 
 	/**
 	 * No of dest scalers shouldn't exceed hw ds block count and
@@ -2714,17 +2670,30 @@
 	if (cstate->num_ds > kms->catalog->ds_count ||
 		((cstate->num_ds != sde_crtc->num_mixers) &&
 		!(cstate->ds_cfg[0].flags & SDE_DRM_DESTSCALER_PU_ENABLE))) {
-		SDE_ERROR("invalid cfg: num_ds(%d), hw_ds_cnt(%d) flags(%d)\n",
-			cstate->num_ds, kms->catalog->ds_count,
+		SDE_ERROR("crtc%d: num_ds(%d), hw_ds_cnt(%d) flags(%d)\n",
+			crtc->base.id, cstate->num_ds, kms->catalog->ds_count,
 			cstate->ds_cfg[0].flags);
 		ret = -EINVAL;
 		goto err;
 	}
 
+	/**
+	 * Check if DS needs to be enabled or disabled
+	 * In case of enable, validate the data
+	 */
+	if (!(cstate->ds_cfg[0].flags & SDE_DRM_DESTSCALER_ENABLE)) {
+		SDE_DEBUG("disable dest scaler, num(%d) flags(%d)\n",
+			cstate->num_ds, cstate->ds_cfg[0].flags);
+		goto disable;
+	}
+
+	/* Display resolution */
+	hdisplay = mode->hdisplay/sde_crtc->num_mixers;
+
 	/* Validate the DS data */
 	for (i = 0; i < cstate->num_ds; i++) {
 		cfg = &cstate->ds_cfg[i];
-		lm_idx = cfg->ndx;
+		lm_idx = cfg->idx;
 
 		/**
 		 * Validate against topology
@@ -2733,8 +2702,10 @@
 		 */
 		if (lm_idx >= sde_crtc->num_mixers || (i != lm_idx &&
 			!(cfg->flags & SDE_DRM_DESTSCALER_PU_ENABLE))) {
-			SDE_ERROR("invalid user data(%d):idx(%d), flags(%d)\n",
-				i, lm_idx, cfg->flags);
+			SDE_ERROR("crtc%d: ds_cfg id(%d):idx(%d), flags(%d)\n",
+				crtc->base.id, i, lm_idx, cfg->flags);
+			SDE_EVT32(DRMID(crtc), i, lm_idx, cfg->flags,
+				SDE_EVTLOG_ERROR);
 			ret = -EINVAL;
 			goto err;
 		}
@@ -2753,14 +2724,13 @@
 		}
 
 		/* Check LM width and height */
-		if (cfg->lm_width > (mode->hdisplay/sde_crtc->num_mixers) ||
-			cfg->lm_height > mode->vdisplay ||
-			!cfg->lm_width || !cfg->lm_height) {
-			SDE_ERROR("invalid lm size[%d,%d] display [%d,%d]\n",
-				cfg->lm_width,
-				cfg->lm_height,
-				mode->hdisplay/sde_crtc->num_mixers,
-				mode->vdisplay);
+		if (cfg->lm_width > hdisplay || cfg->lm_height > mode->vdisplay
+			|| !cfg->lm_width || !cfg->lm_height) {
+			SDE_ERROR("crtc%d: lm size[%d,%d] display [%d,%d]\n",
+				crtc->base.id, cfg->lm_width, cfg->lm_height,
+				hdisplay, mode->vdisplay);
+			SDE_EVT32(DRMID(crtc),  cfg->lm_width, cfg->lm_height,
+				hdisplay, mode->vdisplay, SDE_EVTLOG_ERROR);
 			ret = -E2BIG;
 			goto err;
 		}
@@ -2771,9 +2741,13 @@
 		} else {
 			if (cfg->lm_width != prev_lm_width ||
 				cfg->lm_height != prev_lm_height) {
-				SDE_ERROR("lm size:left[%d,%d], right[%d %d]\n",
-					cfg->lm_width, cfg->lm_height,
-					prev_lm_width, prev_lm_height);
+				SDE_ERROR("crtc%d:lm left[%d,%d]right[%d %d]\n",
+					crtc->base.id, cfg->lm_width,
+					cfg->lm_height, prev_lm_width,
+					prev_lm_height);
+				SDE_EVT32(DRMID(crtc), cfg->lm_width,
+					cfg->lm_height, prev_lm_width,
+					prev_lm_height, SDE_EVTLOG_ERROR);
 				ret = -EINVAL;
 				goto err;
 			}
@@ -2782,22 +2756,40 @@
 		/* Check scaler data */
 		if (cfg->flags & SDE_DRM_DESTSCALER_SCALE_UPDATE ||
 			cfg->flags & SDE_DRM_DESTSCALER_ENHANCER_UPDATE) {
-			if (!cfg->scl3_cfg) {
-				ret = -EINVAL;
-				SDE_ERROR("null scale data\n");
-				goto err;
-			}
-			if (cfg->scl3_cfg->src_width[0] > max_in_width ||
-				cfg->scl3_cfg->dst_width > max_out_width ||
-				!cfg->scl3_cfg->src_width[0] ||
-				!cfg->scl3_cfg->dst_width) {
-				SDE_ERROR("scale width(%d %d) for ds-%d:\n",
-					cfg->scl3_cfg->src_width[0],
-					cfg->scl3_cfg->dst_width,
+
+			/**
+			 * Scaler src and dst width shouldn't exceed the maximum
+			 * width limitation. Also, if there is no partial update
+			 * dst width and height must match display resolution.
+			 */
+			if (cfg->scl3_cfg.src_width[0] > max_in_width ||
+				cfg->scl3_cfg.dst_width > max_out_width ||
+				!cfg->scl3_cfg.src_width[0] ||
+				!cfg->scl3_cfg.dst_width ||
+				(!(cfg->flags & SDE_DRM_DESTSCALER_PU_ENABLE)
+				 && (cfg->scl3_cfg.dst_width != hdisplay ||
+				 cfg->scl3_cfg.dst_height != mode->vdisplay))) {
+				SDE_ERROR("crtc%d: ", crtc->base.id);
+				SDE_ERROR("src_w(%d) dst(%dx%d) display(%dx%d)",
+					cfg->scl3_cfg.src_width[0],
+					cfg->scl3_cfg.dst_width,
+					cfg->scl3_cfg.dst_height,
+					hdisplay, mode->vdisplay);
+				SDE_ERROR("num_mixers(%d) flags(%d) ds-%d:\n",
+					sde_crtc->num_mixers, cfg->flags,
 					hw_ds->idx - DS_0);
 				SDE_ERROR("scale_en = %d, DE_en =%d\n",
-					cfg->scl3_cfg->enable,
-					cfg->scl3_cfg->de.enable);
+					cfg->scl3_cfg.enable,
+					cfg->scl3_cfg.de.enable);
+
+				SDE_EVT32(DRMID(crtc), cfg->scl3_cfg.enable,
+					cfg->scl3_cfg.de.enable, cfg->flags,
+					max_in_width, max_out_width,
+					cfg->scl3_cfg.src_width[0],
+					cfg->scl3_cfg.dst_width,
+					cfg->scl3_cfg.dst_height, hdisplay,
+					mode->vdisplay, sde_crtc->num_mixers,
+					SDE_EVTLOG_ERROR);
 
 				cfg->flags &=
 					~SDE_DRM_DESTSCALER_SCALE_UPDATE;
@@ -2812,36 +2804,34 @@
 		if (cfg->flags & SDE_DRM_DESTSCALER_ENABLE)
 			num_ds_enable++;
 
-		/**
-		 * Validation successful, indicator for flush to be issued
-		 */
-		cfg->set_lm_flush = true;
-
-		SDE_DEBUG("ds[%d]: flags = 0x%X\n",
+		SDE_DEBUG("ds[%d]: flags[0x%X]\n",
 			hw_ds->idx - DS_0, cfg->flags);
+		SDE_EVT32_VERBOSE(DRMID(crtc), hw_ds->idx - DS_0, cfg->flags);
 	}
 
 disable:
-	SDE_DEBUG("dest scaler enable status, old = %d, new = %d",
-		cstate->num_ds_enabled, num_ds_enable);
-	SDE_EVT32(DRMID(crtc), cstate->num_ds_enabled, num_ds_enable,
-		cstate->ds_dirty);
+	SDE_DEBUG("dest scaler status : %d -> %d\n",
+		cstate->num_ds_enabled,	num_ds_enable);
+	SDE_EVT32_VERBOSE(DRMID(crtc), cstate->num_ds_enabled, num_ds_enable,
+			cstate->num_ds, cstate->ds_dirty);
 
 	if (cstate->num_ds_enabled != num_ds_enable) {
 		/* Disabling destination scaler */
 		if (!num_ds_enable) {
-			for (i = 0; i < sde_crtc->num_mixers; i++) {
+			for (i = 0; i < cstate->num_ds; i++) {
 				cfg = &cstate->ds_cfg[i];
-				cfg->ndx = i;
+				cfg->idx = i;
 				/* Update scaler settings in disable case */
 				cfg->flags = SDE_DRM_DESTSCALER_SCALE_UPDATE;
-				cfg->scl3_cfg->enable = 0;
-				cfg->scl3_cfg->de.enable = 0;
-				cfg->set_lm_flush = true;
+				cfg->scl3_cfg.enable = 0;
+				cfg->scl3_cfg.de.enable = 0;
 			}
 		}
 		cstate->num_ds_enabled = num_ds_enable;
 		cstate->ds_dirty = true;
+	} else {
+		if (!cstate->num_ds_enabled)
+			cstate->ds_dirty = false;
 	}
 
 	return 0;
@@ -3097,6 +3087,12 @@
 	_sde_crtc_blend_setup(crtc, true);
 	_sde_crtc_dest_scaler_setup(crtc);
 
+	/* cancel the idle notify delayed work */
+	if (sde_encoder_check_mode(sde_crtc->mixers[0].encoder,
+					MSM_DISPLAY_CAP_VID_MODE) &&
+		kthread_cancel_delayed_work_sync(&sde_crtc->idle_notify_work))
+		SDE_DEBUG("idle notify work cancelled\n");
+
 	/*
 	 * Since CP properties use AXI buffer to program the
 	 * HW, check if context bank is in attached
@@ -3128,6 +3124,7 @@
 	struct msm_drm_thread *event_thread;
 	unsigned long flags;
 	struct sde_crtc_state *cstate;
+	int idle_time = 0;
 
 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		SDE_ERROR("invalid crtc\n");
@@ -3153,6 +3150,7 @@
 	}
 
 	event_thread = &priv->event_thread[crtc->index];
+	idle_time = sde_crtc_get_property(cstate, CRTC_PROP_IDLE_TIMEOUT);
 
 	if (sde_crtc->event) {
 		SDE_DEBUG("already received sde_crtc->event\n");
@@ -3183,6 +3181,15 @@
 	/* wait for acquire fences before anything else is done */
 	_sde_crtc_wait_for_fences(crtc);
 
+	/* schedule the idle notify delayed work */
+	if (idle_time && sde_encoder_check_mode(sde_crtc->mixers[0].encoder,
+						MSM_DISPLAY_CAP_VID_MODE)) {
+		kthread_queue_delayed_work(&event_thread->worker,
+					&sde_crtc->idle_notify_work,
+					msecs_to_jiffies(idle_time));
+		SDE_DEBUG("schedule idle notify work in %dms\n", idle_time);
+	}
+
 	if (!cstate->rsc_update) {
 		drm_for_each_encoder(encoder, dev) {
 			if (encoder->crtc != crtc)
@@ -3481,6 +3488,60 @@
 	return -EAGAIN;
 }
 
+/**
+ * _sde_crtc_prepare_for_kickoff_rot - rotator related kickoff preparation
+ * @dev: Pointer to drm device
+ * @crtc: Pointer to crtc structure
+ * Returns: true on preparation errors
+ */
+static bool _sde_crtc_prepare_for_kickoff_rot(struct drm_device *dev,
+		struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+
+	if (!crtc || !dev) {
+		SDE_ERROR("invalid argument(s)\n");
+		return false;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
+
+	/* default to ASYNC mode for inline rotation */
+	cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask ?
+		SDE_CTL_ROT_OP_MODE_INLINE_ASYNC : SDE_CTL_ROT_OP_MODE_OFFLINE;
+
+	if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
+		return false;
+
+	/* extra steps needed for inline ASYNC modes */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+
+		/*
+		 * For inline ASYNC modes, the flush bits are not written
+		 * to hardware atomically, so avoid using it if a video
+		 * mode encoder is active on this CRTC.
+		 */
+		if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) {
+			cstate->sbuf_cfg.rot_op_mode =
+				SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
+			return false;
+		}
+	}
+
+	/*
+	 * For ASYNC inline modes, kick off the rotator now so that the H/W
+	 * can start as soon as it's ready.
+	 */
+	if (_sde_crtc_commit_kickoff_rot(crtc, cstate))
+		return true;
+
+	return false;
+}
+
 void sde_crtc_commit_kickoff(struct drm_crtc *crtc,
 		struct drm_crtc_state *old_state)
 {
@@ -3500,7 +3561,6 @@
 	dev = crtc->dev;
 	sde_crtc = to_sde_crtc(crtc);
 	sde_kms = _sde_crtc_get_kms(crtc);
-	is_error = false;
 	reset_req = false;
 
 	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
@@ -3521,9 +3581,7 @@
 
 	SDE_ATRACE_BEGIN("crtc_commit");
 
-	/* default to ASYNC mode for inline rotation */
-	cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask ?
-		SDE_CTL_ROT_OP_MODE_INLINE_ASYNC : SDE_CTL_ROT_OP_MODE_OFFLINE;
+	is_error = _sde_crtc_prepare_for_kickoff_rot(dev, crtc);
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		struct sde_encoder_kickoff_params params = { 0 };
@@ -3540,29 +3598,9 @@
 				crtc->state);
 		if (sde_encoder_prepare_for_kickoff(encoder, &params))
 			reset_req = true;
-
-		/*
-		 * For inline ASYNC modes, the flush bits are not written
-		 * to hardware atomically, so avoid using it if a video
-		 * mode encoder is active on this CRTC.
-		 */
-		if (cstate->sbuf_cfg.rot_op_mode ==
-				SDE_CTL_ROT_OP_MODE_INLINE_ASYNC &&
-				sde_encoder_get_intf_mode(encoder) ==
-				INTF_MODE_VIDEO)
-			cstate->sbuf_cfg.rot_op_mode =
-				SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
 	}
 
 	/*
-	 * For ASYNC inline modes, kick off the rotator now so that the H/W
-	 * can start as soon as it's ready.
-	 */
-	if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_INLINE_ASYNC)
-		if (_sde_crtc_commit_kickoff_rot(crtc, cstate))
-			is_error = true;
-
-	/*
 	 * Optionally attempt h/w recovery if any errors were detected while
 	 * preparing for the kickoff
 	 */
@@ -3777,6 +3815,9 @@
 			old_cstate, cstate,
 			&cstate->property_state, cstate->property_values);
 
+	/* clear destination scaler dirty bit */
+	cstate->ds_dirty = false;
+
 	/* duplicate base helper */
 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
 
@@ -3843,19 +3884,22 @@
 {
 	struct drm_crtc *crtc = arg;
 	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
 	struct drm_plane *plane;
 	struct drm_encoder *encoder;
 	struct sde_crtc_mixer *m;
-	u32 i, misr_status;
+	u32 i, misr_status, power_on;
 	unsigned long flags;
 	struct sde_crtc_irq_info *node = NULL;
 	int ret = 0;
+	struct drm_event event;
 
 	if (!crtc) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
 
 	mutex_lock(&sde_crtc->crtc_lock);
 
@@ -3884,6 +3928,12 @@
 
 		sde_cp_crtc_post_ipc(crtc);
 
+		event.type = DRM_EVENT_SDE_POWER;
+		event.length = sizeof(power_on);
+		power_on = 1;
+		msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
+				(u8 *)&power_on);
+
 		for (i = 0; i < sde_crtc->num_mixers; ++i) {
 			m = &sde_crtc->mixers[i];
 			if (!m->hw_lm || !m->hw_lm->ops.setup_misr ||
@@ -3895,6 +3945,21 @@
 		}
 		break;
 	case SDE_POWER_EVENT_PRE_DISABLE:
+		drm_for_each_encoder(encoder, crtc->dev) {
+			if (encoder->crtc != crtc)
+				continue;
+			/*
+			 * disable the vsync source after updating the
+			 * rsc state. rsc state update might have vsync wait
+			 * and vsync source must be disabled after it.
+			 * It will avoid generating any vsync from this point
+			 * till mode-2 entry. It is SW workaround for HW
+			 * limitation and should not be removed without
+			 * checking the updated design.
+			 */
+			sde_encoder_control_te(encoder, false);
+		}
+
 		for (i = 0; i < sde_crtc->num_mixers; ++i) {
 			m = &sde_crtc->mixers[i];
 			if (!m->hw_lm || !m->hw_lm->ops.collect_misr ||
@@ -3929,6 +3994,19 @@
 			sde_plane_set_revalidate(plane, true);
 
 		sde_cp_crtc_suspend(crtc);
+
+		/**
+		 * destination scaler if enabled should be reconfigured
+		 * in the next frame update
+		 */
+		if (cstate->num_ds_enabled)
+			sde_crtc->ds_reconfig = true;
+
+		event.type = DRM_EVENT_SDE_POWER;
+		event.length = sizeof(power_on);
+		power_on = 0;
+		msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
+				(u8 *)&power_on);
 		break;
 	default:
 		SDE_DEBUG("event:%d not handled\n", event_type);
@@ -3960,10 +4038,6 @@
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
-	for (i = 0; i < cstate->num_connectors; i++)
-		sde_connector_schedule_status_work(cstate->connectors[i],
-							false);
-
 	if (sde_kms_is_suspend_state(crtc->dev))
 		_sde_crtc_set_suspend(crtc, true);
 
@@ -3978,6 +4052,10 @@
 	msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
 			(u8 *)&power_on);
 
+	/* destination scaler if enabled should be reconfigured on resume */
+	if (cstate->num_ds_enabled)
+		sde_crtc->ds_reconfig = true;
+
 	/* wait for frame_event_done completion */
 	if (_sde_crtc_wait_for_frame_done(crtc))
 		SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
@@ -4060,15 +4138,13 @@
 	struct sde_crtc_irq_info *node = NULL;
 	struct drm_event event;
 	u32 power_on;
-	int ret, i;
-	struct sde_crtc_state *cstate;
+	int ret;
 
 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 	priv = crtc->dev->dev_private;
-	cstate = to_sde_crtc_state(crtc->state);
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 	SDE_EVT32_VERBOSE(DRMID(crtc));
@@ -4132,9 +4208,6 @@
 		SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE |
 		SDE_POWER_EVENT_PRE_DISABLE,
 		sde_crtc_handle_power_event, crtc, sde_crtc->name);
-
-	for (i = 0; i < cstate->num_connectors; i++)
-		sde_connector_schedule_status_work(cstate->connectors[i], true);
 }
 
 struct plane_state {
@@ -4239,13 +4312,14 @@
 }
 
 static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
-		struct drm_crtc_state *state)
+		struct drm_crtc_state *state, struct plane_state pstates[],
+		int cnt)
 {
 	struct drm_encoder *encoder;
 	struct sde_crtc_state *cstate;
 	uint32_t secure;
 	uint32_t fb_ns = 0, fb_sec = 0, fb_sec_dir = 0;
-	int encoder_cnt = 0;
+	int encoder_cnt = 0, i;
 	int rc;
 
 	if (!crtc || !state) {
@@ -4255,31 +4329,40 @@
 
 	cstate = to_sde_crtc_state(state);
 
-	secure = sde_crtc_get_property(cstate,
-			CRTC_PROP_SECURITY_LEVEL);
+	secure = sde_crtc_get_property(cstate, CRTC_PROP_SECURITY_LEVEL);
 
-	rc = _sde_crtc_find_plane_fb_modes(state,
-			&fb_ns,
-			&fb_sec,
-			&fb_sec_dir);
+	rc = _sde_crtc_find_plane_fb_modes(state, &fb_ns, &fb_sec, &fb_sec_dir);
 	if (rc)
 		return rc;
 
-	/**
-	 * validate planes
-	 * fb_sec_dir is for secure camera preview and secure display  use case,
-	 * fb_sec is for secure video playback,
-	 * fb_ns is for normal non secure use cases.
-	 */
-	if ((secure == SDE_DRM_SEC_ONLY) &&
-			(fb_ns || fb_sec || (fb_sec && fb_sec_dir))) {
-		SDE_ERROR(
-		"crtc%d: invalid planes fb_modes Sec:%d, NS:%d, Sec_Dir:%d\n",
+	if (secure == SDE_DRM_SEC_ONLY) {
+		/*
+		 * validate planes - only fb_sec_dir is allowed during sec_crtc
+		 * - fb_sec_dir is for secure camera preview and
+		 * secure display use case
+		 * - fb_sec is for secure video playback
+		 * - fb_ns is for normal non secure use cases
+		 */
+		if (fb_ns || fb_sec) {
+			SDE_ERROR(
+			 "crtc%d: invalid fb_modes Sec:%d, NS:%d, Sec_Dir:%d\n",
 				crtc->base.id, fb_sec, fb_ns, fb_sec_dir);
-		return -EINVAL;
+			return -EINVAL;
+		}
+
+		/* only one blending stage is allowed in sec_crtc */
+		for (i = 1; i < cnt; i++) {
+			if (pstates[i].stage != pstates[i-1].stage) {
+				SDE_ERROR(
+				  "crtc%d: invalid blend stages %d:%d, %d:%d\n",
+				  crtc->base.id, i, pstates[i].stage,
+				  i-1, pstates[i-1].stage);
+				return -EINVAL;
+			}
+		}
 	}
 
-	/**
+	/*
 	 * secure_crtc is not allowed in a shared toppolgy
 	 * across different encoders.
 	 */
@@ -4288,17 +4371,15 @@
 			if (encoder->crtc ==  crtc)
 				encoder_cnt++;
 
-		if (encoder_cnt >
-			MAX_ALLOWED_ENCODER_CNT_PER_SECURE_CRTC) {
-			SDE_ERROR(
-				"crtc%d, invalid virtual encoder crtc%d\n",
-				crtc->base.id,
-				encoder_cnt);
+		if (encoder_cnt > MAX_ALLOWED_ENCODER_CNT_PER_SECURE_CRTC) {
+			SDE_ERROR("crtc%d, invalid virtual encoder crtc%d\n",
+				crtc->base.id, encoder_cnt);
 			return -EINVAL;
 
 		}
 	}
 	SDE_DEBUG("crtc:%d Secure validation successful\n", crtc->base.id);
+
 	return 0;
 }
 
@@ -4306,7 +4387,7 @@
 		struct drm_crtc_state *state)
 {
 	struct sde_crtc *sde_crtc;
-	struct plane_state pstates[SDE_STAGE_MAX * 4];
+	struct plane_state *pstates = NULL;
 	struct sde_crtc_state *cstate;
 
 	const struct drm_plane_state *pstate;
@@ -4315,7 +4396,7 @@
 
 	int cnt = 0, rc = 0, mixer_width, i, z_pos;
 
-	struct sde_multirect_plane_states multirect_plane[SDE_STAGE_MAX * 2];
+	struct sde_multirect_plane_states *multirect_plane = NULL;
 	int multirect_count = 0;
 	const struct drm_plane_state *pipe_staged[SSPP_MAX];
 	int left_zpos_cnt = 0, right_zpos_cnt = 0;
@@ -4334,6 +4415,17 @@
 		goto end;
 	}
 
+	pstates = kzalloc(SDE_PSTATES_MAX *
+			sizeof(struct plane_state), GFP_KERNEL);
+
+	multirect_plane = kzalloc(SDE_MULTIRECT_PLANE_MAX *
+		sizeof(struct sde_multirect_plane_states), GFP_KERNEL);
+
+	if (!pstates || !multirect_plane) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
 	mode = &state->adjusted_mode;
 	SDE_DEBUG("%s: check", sde_crtc->name);
 
@@ -4355,10 +4447,6 @@
 	_sde_crtc_setup_is_ppsplit(state);
 	_sde_crtc_setup_lm_bounds(crtc, state);
 
-	rc = _sde_crtc_check_secure_state(crtc, state);
-	if (rc)
-		return rc;
-
 	 /* get plane state for all drm planes associated with crtc state */
 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
 		if (IS_ERR_OR_NULL(pstate)) {
@@ -4367,7 +4455,7 @@
 					sde_crtc->name, plane->base.id, rc);
 			goto end;
 		}
-		if (cnt >= ARRAY_SIZE(pstates))
+		if (cnt >= SDE_PSTATES_MAX)
 			continue;
 
 		pstates[cnt].sde_pstate = to_sde_plane_state(pstate);
@@ -4432,6 +4520,10 @@
 	/* assign mixer stages based on sorted zpos property */
 	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
 
+	rc = _sde_crtc_check_secure_state(crtc, state, pstates, cnt);
+	if (rc)
+		goto end;
+
 	rc = _sde_crtc_excl_dim_layer_check(state, pstates, cnt);
 	if (rc)
 		goto end;
@@ -4577,6 +4669,8 @@
 	}
 
 end:
+	kfree(pstates);
+	kfree(multirect_plane);
 	_sde_crtc_rp_free_unused(&cstate->rp);
 	return rc;
 }
@@ -4709,8 +4803,8 @@
 		"input_fence_timeout", 0x0, 0, SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT,
 		SDE_CRTC_INPUT_FENCE_TIMEOUT, CRTC_PROP_INPUT_FENCE_TIMEOUT);
 
-	msm_property_install_range(&sde_crtc->property_info, "output_fence",
-			0x0, 0, INR_OPEN_MAX, 0, CRTC_PROP_OUTPUT_FENCE);
+	msm_property_install_volatile_range(&sde_crtc->property_info,
+		"output_fence", 0x0, 0, ~0, 0, CRTC_PROP_OUTPUT_FENCE);
 
 	msm_property_install_range(&sde_crtc->property_info,
 			"output_fence_offset", 0x0, 0, 1, 0,
@@ -4754,7 +4848,7 @@
 			CRTC_PROP_ROT_CLK);
 
 	msm_property_install_range(&sde_crtc->property_info,
-		"idle_time", IDLE_TIMEOUT, 0, U64_MAX, 0,
+		"idle_time", 0, 0, U64_MAX, 0,
 		CRTC_PROP_IDLE_TIMEOUT);
 
 	msm_property_install_blob(&sde_crtc->property_info, "capabilities",
@@ -4830,6 +4924,7 @@
 	}
 
 	sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split);
+	sde_kms_info_add_keyint(info, "has_hdr", catalog->has_hdr);
 	if (catalog->perf.max_bw_low)
 		sde_kms_info_add_keyint(info, "max_bandwidth_low",
 				catalog->perf.max_bw_low * 1000LL);
@@ -4881,6 +4976,47 @@
 	kfree(info);
 }
 
+static int _sde_crtc_get_output_fence(struct drm_crtc *crtc,
+	const struct drm_crtc_state *state, uint64_t *val)
+{
+	struct drm_encoder *encoder;
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	uint32_t offset, i;
+	bool conn_offset = 0, is_cmd = true;
+
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(state);
+
+	for (i = 0; i < cstate->num_connectors; ++i) {
+		conn_offset = sde_connector_needs_offset(cstate->connectors[i]);
+		if (conn_offset)
+			break;
+	}
+
+	/**
+	 * set the cmd flag only when all the encoders attached
+	 * to the crtc are in cmd mode. Consider all other cases
+	 * as video mode.
+	 */
+	drm_for_each_encoder(encoder, crtc->dev) {
+		if (encoder->crtc == crtc)
+			is_cmd = sde_encoder_check_mode(encoder,
+					MSM_DISPLAY_CAP_CMD_MODE);
+	}
+
+	offset = sde_crtc_get_property(cstate, CRTC_PROP_OUTPUT_FENCE_OFFSET);
+
+	/**
+	 * set the offset to 0 only for cmd mode panels, so
+	 * the release fence for the current frame can be
+	 * triggered right after PP_DONE interrupt.
+	 */
+	offset = is_cmd ? 0 : (offset + conn_offset);
+
+	return sde_fence_create(&sde_crtc->output_fence, val, offset);
+}
+
 /**
  * sde_crtc_atomic_set_property - atomically set a crtc drm property
  * @crtc: Pointer to drm crtc structure
@@ -4896,69 +5032,89 @@
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
-	int idx, ret = -EINVAL;
+	int idx, ret;
+	uint64_t fence_fd;
 
 	if (!crtc || !state || !property) {
 		SDE_ERROR("invalid argument(s)\n");
-	} else {
-		sde_crtc = to_sde_crtc(crtc);
-		cstate = to_sde_crtc_state(state);
-		ret = msm_property_atomic_set(&sde_crtc->property_info,
-				&cstate->property_state, property, val);
-		if (!ret) {
-			idx = msm_property_index(&sde_crtc->property_info,
-					property);
-			switch (idx) {
-			case CRTC_PROP_INPUT_FENCE_TIMEOUT:
-				_sde_crtc_set_input_fence_timeout(cstate);
-				break;
-			case CRTC_PROP_DIM_LAYER_V1:
-				_sde_crtc_set_dim_layer_v1(cstate,
-							(void __user *)val);
-				break;
-			case CRTC_PROP_ROI_V1:
-				ret = _sde_crtc_set_roi_v1(state,
-							(void __user *)val);
-				break;
-			case CRTC_PROP_DEST_SCALER:
-				ret = _sde_crtc_set_dest_scaler(sde_crtc,
-						cstate, (void __user *)val);
-				break;
-			case CRTC_PROP_DEST_SCALER_LUT_ED:
-			case CRTC_PROP_DEST_SCALER_LUT_CIR:
-			case CRTC_PROP_DEST_SCALER_LUT_SEP:
-				ret = _sde_crtc_set_dest_scaler_lut(sde_crtc,
-								cstate, idx);
-				break;
-			case CRTC_PROP_CORE_CLK:
-			case CRTC_PROP_CORE_AB:
-			case CRTC_PROP_CORE_IB:
-				cstate->bw_control = true;
-				break;
-			case CRTC_PROP_LLCC_AB:
-			case CRTC_PROP_LLCC_IB:
-			case CRTC_PROP_DRAM_AB:
-			case CRTC_PROP_DRAM_IB:
-				cstate->bw_control = true;
-				cstate->bw_split_vote = true;
-				break;
-			case CRTC_PROP_IDLE_TIMEOUT:
-				_sde_crtc_set_idle_timeout(crtc, val);
-			default:
-				/* nothing to do */
-				break;
-			}
-		} else {
-			ret = sde_cp_crtc_set_property(crtc,
-					property, val);
-		}
-		if (ret)
-			DRM_ERROR("failed to set the property\n");
-
-		SDE_DEBUG("crtc%d %s[%d] <= 0x%llx ret=%d\n", crtc->base.id,
-				property->name, property->base.id, val, ret);
+		return -EINVAL;
 	}
 
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(state);
+
+	/* check with cp property system first */
+	ret = sde_cp_crtc_set_property(crtc, property, val);
+	if (ret != -ENOENT)
+		goto exit;
+
+	/* if not handled by cp, check msm_property system */
+	ret = msm_property_atomic_set(&sde_crtc->property_info,
+			&cstate->property_state, property, val);
+	if (ret)
+		goto exit;
+
+	idx = msm_property_index(&sde_crtc->property_info, property);
+	switch (idx) {
+	case CRTC_PROP_INPUT_FENCE_TIMEOUT:
+		_sde_crtc_set_input_fence_timeout(cstate);
+		break;
+	case CRTC_PROP_DIM_LAYER_V1:
+		_sde_crtc_set_dim_layer_v1(cstate, (void __user *)val);
+		break;
+	case CRTC_PROP_ROI_V1:
+		ret = _sde_crtc_set_roi_v1(state, (void __user *)val);
+		break;
+	case CRTC_PROP_DEST_SCALER:
+		ret = _sde_crtc_set_dest_scaler(sde_crtc, cstate,
+				(void __user *)val);
+		break;
+	case CRTC_PROP_DEST_SCALER_LUT_ED:
+	case CRTC_PROP_DEST_SCALER_LUT_CIR:
+	case CRTC_PROP_DEST_SCALER_LUT_SEP:
+		ret = _sde_crtc_set_dest_scaler_lut(sde_crtc, cstate, idx);
+		break;
+	case CRTC_PROP_CORE_CLK:
+	case CRTC_PROP_CORE_AB:
+	case CRTC_PROP_CORE_IB:
+		cstate->bw_control = true;
+		break;
+	case CRTC_PROP_LLCC_AB:
+	case CRTC_PROP_LLCC_IB:
+	case CRTC_PROP_DRAM_AB:
+	case CRTC_PROP_DRAM_IB:
+		cstate->bw_control = true;
+		cstate->bw_split_vote = true;
+		break;
+	case CRTC_PROP_OUTPUT_FENCE:
+		ret = _sde_crtc_get_output_fence(crtc, state, &fence_fd);
+		if (ret) {
+			SDE_ERROR("fence create failed rc:%d\n", ret);
+			goto exit;
+		}
+
+		ret = copy_to_user((uint64_t __user *)val, &fence_fd,
+				sizeof(uint64_t));
+		if (ret) {
+			SDE_ERROR("copy to user failed rc:%d\n", ret);
+			put_unused_fd(fence_fd);
+			ret = -EFAULT;
+			goto exit;
+		}
+		break;
+	default:
+		/* nothing to do */
+		break;
+	}
+
+exit:
+	if (ret)
+		SDE_ERROR("%s: failed to set property%d %s: %d\n", crtc->name,
+				DRMID(property), property->name, ret);
+	else
+		SDE_DEBUG("%s: %s[%d] <= 0x%llx\n", crtc->name, property->name,
+				property->base.id, val);
+
 	return ret;
 }
 
@@ -4992,62 +5148,29 @@
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
-	struct drm_encoder *encoder;
-	int i, ret = -EINVAL;
-	bool conn_offset = 0;
-	bool is_cmd = true;
+	int ret = -EINVAL, i;
 
 	if (!crtc || !state) {
 		SDE_ERROR("invalid argument(s)\n");
-	} else {
-		sde_crtc = to_sde_crtc(crtc);
-		cstate = to_sde_crtc_state(state);
-
-		for (i = 0; i < cstate->num_connectors; ++i) {
-			conn_offset = sde_connector_needs_offset(
-						cstate->connectors[i]);
-			if (conn_offset)
-				break;
-		}
-
-		/**
-		 * set the cmd flag only when all the encoders attached
-		 * to the crtc are in cmd mode. Consider all other cases
-		 * as video mode.
-		 */
-		drm_for_each_encoder(encoder, crtc->dev) {
-			if (encoder->crtc == crtc)
-				is_cmd = sde_encoder_check_mode(encoder,
-						MSM_DISPLAY_CAP_CMD_MODE);
-		}
-
-		i = msm_property_index(&sde_crtc->property_info, property);
-		if (i == CRTC_PROP_OUTPUT_FENCE) {
-			uint32_t offset = sde_crtc_get_property(cstate,
-					CRTC_PROP_OUTPUT_FENCE_OFFSET);
-
-			/**
-			 * set the offset to 0 only for cmd mode panels, so
-			 * the release fence for the current frame can be
-			 * triggered right after PP_DONE interrupt.
-			 */
-			offset = is_cmd ? 0 : (offset + conn_offset);
-
-			ret = sde_fence_create(&sde_crtc->output_fence, val,
-								offset);
-			if (ret)
-				SDE_ERROR("fence create failed\n");
-		} else {
-			ret = msm_property_atomic_get(&sde_crtc->property_info,
-					&cstate->property_state,
-					property, val);
-			if (ret)
-				ret = sde_cp_crtc_get_property(crtc,
-					property, val);
-		}
-		if (ret)
-			DRM_ERROR("get property failed\n");
+		goto end;
 	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(state);
+
+	i = msm_property_index(&sde_crtc->property_info, property);
+	if (i == CRTC_PROP_OUTPUT_FENCE) {
+		ret = _sde_crtc_get_output_fence(crtc, state, val);
+	} else {
+		ret = msm_property_atomic_get(&sde_crtc->property_info,
+			&cstate->property_state, property, val);
+		if (ret)
+			ret = sde_cp_crtc_get_property(crtc, property, val);
+	}
+	if (ret)
+		DRM_ERROR("get property failed\n");
+
+end:
 	return ret;
 }
 
@@ -5545,6 +5668,30 @@
 	return rc;
 }
 
+/*
+ * __sde_crtc_idle_notify_work - signal idle timeout to user space
+ */
+static void __sde_crtc_idle_notify_work(struct kthread_work *work)
+{
+	struct sde_crtc *sde_crtc = container_of(work, struct sde_crtc,
+				idle_notify_work.work);
+	struct drm_crtc *crtc;
+	struct drm_event event;
+	int ret = 0;
+
+	if (!sde_crtc) {
+		SDE_ERROR("invalid sde crtc\n");
+	} else {
+		crtc = &sde_crtc->base;
+		event.type = DRM_EVENT_IDLE_NOTIFY;
+		event.length = sizeof(u32);
+		msm_mode_object_event_notify(&crtc->base, crtc->dev,
+				&event, (u8 *)&ret);
+
+		SDE_DEBUG("crtc[%d]: idle timeout notified\n", crtc->base.id);
+	}
+}
+
 /* initialize crtc */
 struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
 {
@@ -5612,13 +5759,13 @@
 
 	sde_crtc_install_properties(crtc, kms->catalog);
 
-	/* Init dest scaler */
-	_sde_crtc_dest_scaler_init(sde_crtc, kms->catalog);
-
 	/* Install color processing properties */
 	sde_cp_crtc_init(crtc);
 	sde_cp_crtc_install_properties(crtc);
 
+	kthread_init_delayed_work(&sde_crtc->idle_notify_work,
+					__sde_crtc_idle_notify_work);
+
 	SDE_DEBUG("%s: successfully initialized crtc\n", sde_crtc->name);
 	return crtc;
 }
@@ -5760,8 +5907,30 @@
 	return 0;
 }
 
+static int sde_crtc_pm_event_handler(struct drm_crtc *crtc, bool en,
+		struct sde_irq_callback *noirq)
+{
+	/*
+	 * IRQ object noirq is not being used here since there is
+	 * no crtc irq from pm event.
+	 */
+	return 0;
+}
+
 static int sde_crtc_idle_interrupt_handler(struct drm_crtc *crtc_drm,
 	bool en, struct sde_irq_callback *irq)
 {
 	return 0;
 }
+
+/**
+ * sde_crtc_update_cont_splash_mixer_settings - update mixer settings
+ *	during device bootup for cont_splash use case
+ * @crtc: Pointer to drm crtc structure
+ */
+void sde_crtc_update_cont_splash_mixer_settings(
+		struct drm_crtc *crtc)
+{
+	_sde_crtc_setup_mixers(crtc);
+	crtc->enabled = true;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index cdf2ed1..1d5b65e 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -188,6 +188,7 @@
  * @enabled       : whether the SDE CRTC is currently enabled. updated in the
  *                  commit-thread, not state-swap time which is earlier, so
  *                  safe to make decisions on during VBLANK on/off work
+ * @ds_reconfig   : force reconfiguration of the destination scaler block
  * @feature_list  : list of color processing features supported on a crtc
  * @active_list   : list of color processing features are active
  * @dirty_list    : list of color processing features are dirty
@@ -211,11 +212,11 @@
  * @misr_data     : store misr data before turning off the clocks.
  * @sbuf_flush_mask: flush mask for inline rotator
  * @sbuf_flush_mask_old: inline rotator flush mask for previous commit
+ * @idle_notify_work: delayed worker to notify idle timeout to user space
  * @power_event   : registered power event handle
  * @cur_perf      : current performance committed to clock/bandwidth driver
  * @rp_lock       : serialization lock for resource pool
  * @rp_head       : list of active resource pool
- * @scl3_cfg_lut  : qseed3 lut config
  */
 struct sde_crtc {
 	struct drm_crtc base;
@@ -226,7 +227,6 @@
 	u32 num_mixers;
 	bool mixers_swapped;
 	struct sde_crtc_mixer mixers[CRTC_DUAL_MIXERS];
-	struct sde_hw_scaler3_lut_cfg *scl3_lut_cfg;
 
 	struct drm_pending_vblank_event *event;
 	u32 vsync_count;
@@ -248,6 +248,7 @@
 	bool suspend;
 	bool enabled;
 
+	bool ds_reconfig;
 	struct list_head feature_list;
 	struct list_head active_list;
 	struct list_head dirty_list;
@@ -276,6 +277,7 @@
 
 	u32 sbuf_flush_mask;
 	u32 sbuf_flush_mask_old;
+	struct kthread_delayed_work idle_notify_work;
 
 	struct sde_power_event *power_event;
 
@@ -374,6 +376,7 @@
  * @num_ds_enabled: Number of destination scalers enabled
  * @ds_dirty: Boolean to indicate if dirty or not
  * @ds_cfg: Destination scaler config
+ * @scl3_lut_cfg: QSEED3 lut config
  * @new_perf: new performance state being requested
  * @sbuf_cfg: stream buffer configuration
  * @sbuf_prefill_line: number of line for inline rotator prefetch
@@ -403,6 +406,7 @@
 	uint32_t num_ds_enabled;
 	bool ds_dirty;
 	struct sde_hw_ds_cfg ds_cfg[SDE_MAX_DS_COUNT];
+	struct sde_hw_scaler3_lut_cfg scl3_lut_cfg;
 
 	struct sde_core_perf_params new_perf;
 	struct sde_ctl_sbuf_cfg sbuf_cfg;
@@ -724,4 +728,18 @@
 int sde_crtc_helper_reset_custom_properties(struct drm_crtc *crtc,
 		struct drm_crtc_state *crtc_state);
 
+/**
+ * sde_crtc_timeline_status - current buffer timeline status
+ * @crtc: Pointer to crtc
+ */
+void sde_crtc_timeline_status(struct drm_crtc *crtc);
+
+/**
+ * sde_crtc_update_cont_splash_mixer_settings - update mixer settings
+ *	during device bootup for cont_splash use case
+ * @crtc: Pointer to drm crtc structure
+ */
+void sde_crtc_update_cont_splash_mixer_settings(
+		struct drm_crtc *crtc);
+
 #endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 1616705..d7a3f24 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -73,6 +73,10 @@
 
 #define IDLE_SHORT_TIMEOUT	1
 
+#define FAULT_TOLERENCE_DELTA_IN_MS 2
+
+#define FAULT_TOLERENCE_WAIT_IN_MS 5
+
 /* Maximum number of VSYNC wait attempts for RSC state transition */
 #define MAX_RSC_WAIT	5
 
@@ -88,7 +92,7 @@
  *	This event happens at INTERRUPT level.
  *	Event signals the end of the data transfer after the PP FRAME_DONE
  *	event. At the end of this event, a delayed work is scheduled to go to
- *	IDLE_PC state after IDLE_TIMEOUT time.
+ *	IDLE_PC state after IDLE_POWERCOLLAPSE_DURATION time.
  * @SDE_ENC_RC_EVENT_PRE_STOP:
  *	This event happens at NORMAL priority.
  *	This event, when received during the ON state, set RSC to IDLE, and
@@ -114,9 +118,9 @@
  *	with new vtotal.
  * @SDE_ENC_RC_EVENT_ENTER_IDLE:
  *	This event happens at NORMAL priority from a work item.
- *	Event signals that there were no frame updates for IDLE_TIMEOUT time.
- *	This would disable MDP/DSI core clocks and request RSC with IDLE state
- *	and change the resource state to IDLE.
+ *	Event signals that there were no frame updates for
+ *	IDLE_POWERCOLLAPSE_DURATION time. This would disable MDP/DSI core clocks
+ *      and request RSC with IDLE state and change the resource state to IDLE.
  */
 enum sde_enc_rc_events {
 	SDE_ENC_RC_EVENT_KICKOFF = 1,
@@ -177,13 +181,10 @@
  *				Bit0 = phys_encs[0] etc.
  * @crtc_frame_event_cb:	callback handler for frame event
  * @crtc_frame_event_cb_data:	callback handler private data
- * @frame_done_timeout:		frame done timeout in Hz
- * @frame_done_timer:		watchdog timer for frame done event
  * @vsync_event_timer:		vsync timer
  * @rsc_client:			rsc client pointer
  * @rsc_state_init:		boolean to indicate rsc config init
  * @disp_info:			local copy of msm_display_info struct
- * @mode_info:			local copy of msm_mode_info struct
  * @misr_enable:		misr enable/disable status
  * @misr_frame_count:		misr frame count before start capturing the data
  * @idle_pc_supported:		indicate if idle power collaps is supported
@@ -198,7 +199,6 @@
  * @rsc_config:			rsc configuration for display vtotal, fps, etc.
  * @cur_conn_roi:		current connector roi
  * @prv_conn_roi:		previous connector roi to optimize if unchanged
- * @idle_timeout:		idle timeout duration in milliseconds
  */
 struct sde_encoder_virt {
 	struct drm_encoder base;
@@ -224,14 +224,11 @@
 	void (*crtc_frame_event_cb)(void *, u32 event);
 	void *crtc_frame_event_cb_data;
 
-	atomic_t frame_done_timeout;
-	struct timer_list frame_done_timer;
 	struct timer_list vsync_event_timer;
 
 	struct sde_rsc_client *rsc_client;
 	bool rsc_state_init;
 	struct msm_display_info disp_info;
-	struct msm_mode_info mode_info;
 	bool misr_enable;
 	u32 misr_frame_count;
 
@@ -246,38 +243,75 @@
 	struct sde_rsc_cmd_config rsc_config;
 	struct sde_rect cur_conn_roi;
 	struct sde_rect prv_conn_roi;
-
-	u32 idle_timeout;
 };
 
 #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
 
-bool sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
-
+static struct drm_connector_state *_sde_encoder_get_conn_state(
+		struct drm_encoder *drm_enc)
 {
-	struct sde_encoder_virt *sde_enc;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	struct list_head *connector_list;
+	struct drm_connector *conn_iter;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid argument\n");
+		return NULL;
+	}
+
+	priv = drm_enc->dev->dev_private;
+	sde_kms = to_sde_kms(priv->kms);
+	connector_list = &sde_kms->dev->mode_config.connector_list;
+
+	list_for_each_entry(conn_iter, connector_list, head)
+		if (conn_iter->encoder == drm_enc)
+			return conn_iter->state;
+
+	return NULL;
+}
+
+static int _sde_encoder_get_mode_info(struct drm_encoder *drm_enc,
+		struct msm_mode_info *mode_info)
+{
+	struct drm_connector_state *conn_state;
+
+	if (!drm_enc || !mode_info) {
+		SDE_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	conn_state = _sde_encoder_get_conn_state(drm_enc);
+	if (!conn_state) {
+		SDE_ERROR("invalid connector state for the encoder: %d\n",
+			drm_enc->base.id);
+		return -EINVAL;
+	}
+
+	return sde_connector_get_mode_info(conn_state, mode_info);
+}
+
+static bool _sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
+{
 	struct msm_compression_info *comp_info;
+	struct msm_mode_info mode_info;
+	int rc = 0;
 
 	if (!drm_enc)
 		return false;
 
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	comp_info = &sde_enc->mode_info.comp_info;
+	rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
+	if (rc) {
+		SDE_ERROR("failed to get mode info, enc: %d\n",
+			drm_enc->base.id);
+		return false;
+	}
+
+	comp_info = &mode_info.comp_info;
 
 	return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
 }
 
-void sde_encoder_set_idle_timeout(struct drm_encoder *drm_enc, u32 idle_timeout)
-{
-	struct sde_encoder_virt *sde_enc;
-
-	if (!drm_enc)
-		return;
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	sde_enc->idle_timeout = idle_timeout;
-}
-
 bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc)
 {
 	enum sde_rm_topology_name topology;
@@ -536,7 +570,8 @@
 		struct drm_connector_state *conn_state)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
-	int i = 0;
+	struct msm_mode_info mode_info;
+	int rc, i = 0;
 
 	if (!hw_res || !drm_enc || !conn_state) {
 		SDE_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
@@ -558,7 +593,18 @@
 			phys->ops.get_hw_resources(phys, hw_res, conn_state);
 	}
 
-	hw_res->topology = sde_enc->mode_info.topology;
+	/**
+	 * NOTE: Do not use sde_encoder_get_mode_info here as this function is
+	 * called from atomic_check phase. Use the below API to get mode
+	 * information of the temporary conn_state passed.
+	 */
+	rc = sde_connector_get_mode_info(conn_state, &mode_info);
+	if (rc) {
+		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
+		return;
+	}
+
+	hw_res->topology = mode_info.topology;
 	hw_res->is_primary = sde_enc->disp_info.is_primary;
 }
 
@@ -699,6 +745,8 @@
 	const struct drm_display_mode *mode;
 	struct drm_display_mode *adj_mode;
 	struct sde_connector *sde_conn = NULL;
+	struct sde_connector_state *sde_conn_state = NULL;
+	struct sde_crtc_state *sde_crtc_state = NULL;
 	int i = 0;
 	int ret = 0;
 
@@ -716,7 +764,10 @@
 	mode = &crtc_state->mode;
 	adj_mode = &crtc_state->adjusted_mode;
 	sde_conn = to_sde_connector(conn_state->connector);
-	SDE_EVT32(DRMID(drm_enc));
+	sde_conn_state = to_sde_connector_state(conn_state);
+	sde_crtc_state = to_sde_crtc_state(crtc_state);
+
+	SDE_EVT32(DRMID(drm_enc), drm_atomic_crtc_needs_modeset(crtc_state));
 
 	/*
 	 * display drivers may populate private fields of the drm display mode
@@ -745,12 +796,59 @@
 		}
 	}
 
+	if (!ret && drm_atomic_crtc_needs_modeset(crtc_state)) {
+		struct sde_rect mode_roi, roi;
+
+		mode_roi.x = 0;
+		mode_roi.y = 0;
+		mode_roi.w = crtc_state->adjusted_mode.hdisplay;
+		mode_roi.h = crtc_state->adjusted_mode.vdisplay;
+
+		if (sde_conn_state->rois.num_rects) {
+			sde_kms_rect_merge_rectangles(
+					&sde_conn_state->rois, &roi);
+			if (!sde_kms_rect_is_equal(&mode_roi, &roi)) {
+				SDE_ERROR_ENC(sde_enc,
+					"roi (%d,%d,%d,%d) on connector invalid during modeset\n",
+					roi.x, roi.y, roi.w, roi.h);
+				ret = -EINVAL;
+			}
+		}
+
+		if (sde_crtc_state->user_roi_list.num_rects) {
+			sde_kms_rect_merge_rectangles(
+					&sde_crtc_state->user_roi_list, &roi);
+			if (!sde_kms_rect_is_equal(&mode_roi, &roi)) {
+				SDE_ERROR_ENC(sde_enc,
+					"roi (%d,%d,%d,%d) on crtc invalid during modeset\n",
+					roi.x, roi.y, roi.w, roi.h);
+				ret = -EINVAL;
+			}
+		}
+
+		if (ret)
+			return ret;
+	}
+
+	if (!ret) {
+		/**
+		 * record topology in previous atomic state to be able to handle
+		 * topology transitions correctly.
+		 */
+		enum sde_rm_topology_name old_top;
+
+		old_top  = sde_connector_get_property(conn_state,
+				CONNECTOR_PROP_TOPOLOGY_NAME);
+		ret = sde_connector_set_old_topology_name(conn_state, old_top);
+		if (ret)
+			return ret;
+	}
 
 	if (!ret && sde_conn && drm_atomic_crtc_needs_modeset(crtc_state)) {
 		struct msm_display_topology *topology = NULL;
 
 		ret = sde_conn->ops.get_mode_info(adj_mode,
-				&sde_enc->mode_info,
+				&sde_conn_state->mode_info,
 				sde_kms->catalog->max_mixer_width,
 				sde_conn->display);
 		if (ret) {
@@ -775,7 +873,7 @@
 		 * de-activating crtc.
 		 */
 		if (crtc_state->active)
-			topology = &sde_enc->mode_info.topology;
+			topology = &sde_conn_state->mode_info.topology;
 
 		ret = sde_rm_update_topology(conn_state, topology);
 		if (ret) {
@@ -783,6 +881,24 @@
 				"RM failed to update topology, rc: %d\n", ret);
 			return ret;
 		}
+
+		ret = sde_connector_set_blob_data(conn_state->connector,
+				conn_state,
+				CONNECTOR_PROP_SDE_INFO);
+		if (ret) {
+			SDE_ERROR_ENC(sde_enc,
+				"connector failed to update info, rc: %d\n",
+				ret);
+			return ret;
+		}
+
+	}
+
+	ret = sde_connector_roi_v1_check_roi(conn_state);
+	if (ret) {
+		SDE_ERROR_ENC(sde_enc, "connector roi check failed, rc: %d",
+				ret);
+		return ret;
 	}
 
 	if (!ret)
@@ -948,14 +1064,23 @@
 	struct sde_hw_dsc *hw_dsc = sde_enc->hw_dsc[0];
 	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
 	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
-	struct msm_display_dsc_info *dsc =
-		&sde_enc->mode_info.comp_info.dsc_info;
+	struct msm_mode_info mode_info;
+	struct msm_display_dsc_info *dsc = NULL;
+	int rc;
 
-	if (dsc == NULL || hw_dsc == NULL || hw_pp == NULL || !enc_master) {
+	if (hw_dsc == NULL || hw_pp == NULL || !enc_master) {
 		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
 		return -EINVAL;
 	}
 
+	rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
+	if (rc) {
+		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
+		return -EINVAL;
+	}
+
+	dsc = &mode_info.comp_info.dsc_info;
+
 	_sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
 
 	this_frame_slices = roi->w / dsc->slice_width;
@@ -992,8 +1117,9 @@
 	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
 	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 	struct msm_display_dsc_info dsc[MAX_CHANNELS_PER_ENC];
+	struct msm_mode_info mode_info;
 	bool half_panel_partial_update;
-	int i;
+	int i, rc;
 
 	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
 		hw_pp[i] = sde_enc->hw_pp[i];
@@ -1005,6 +1131,12 @@
 		}
 	}
 
+	rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
+	if (rc) {
+		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
+		return -EINVAL;
+	}
+
 	half_panel_partial_update =
 			hweight_long(params->affected_displays) == 1;
 
@@ -1014,8 +1146,8 @@
 	if (enc_master->intf_mode == INTF_MODE_VIDEO)
 		dsc_common_mode |= DSC_MODE_VIDEO;
 
-	memcpy(&dsc[0], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[0]));
-	memcpy(&dsc[1], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[1]));
+	memcpy(&dsc[0], &mode_info.comp_info.dsc_info, sizeof(dsc[0]));
+	memcpy(&dsc[1], &mode_info.comp_info.dsc_info, sizeof(dsc[1]));
 
 	/*
 	 * Since both DSC use same pic dimension, set same pic dimension
@@ -1078,10 +1210,10 @@
 	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
 	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
 	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
-	struct msm_display_dsc_info *dsc =
-		&sde_enc->mode_info.comp_info.dsc_info;
+	struct msm_display_dsc_info *dsc = NULL;
+	struct msm_mode_info mode_info;
 	bool half_panel_partial_update;
-	int i;
+	int i, rc;
 
 	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
 		hw_pp[i] = sde_enc->hw_pp[i];
@@ -1093,6 +1225,14 @@
 		}
 	}
 
+	rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
+	if (rc) {
+		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
+		return -EINVAL;
+	}
+
+	dsc = &mode_info.comp_info.dsc_info;
+
 	half_panel_partial_update =
 			hweight_long(params->affected_displays) == 1;
 
@@ -1181,7 +1321,17 @@
 	}
 
 	SDE_DEBUG_ENC(sde_enc, "topology:%d\n", topology);
-	SDE_EVT32(DRMID(&sde_enc->base));
+	SDE_EVT32(DRMID(&sde_enc->base), topology,
+			sde_enc->cur_conn_roi.x,
+			sde_enc->cur_conn_roi.y,
+			sde_enc->cur_conn_roi.w,
+			sde_enc->cur_conn_roi.h,
+			sde_enc->prv_conn_roi.x,
+			sde_enc->prv_conn_roi.y,
+			sde_enc->prv_conn_roi.w,
+			sde_enc->prv_conn_roi.h,
+			sde_enc->base.crtc->state->adjusted_mode.hdisplay,
+			sde_enc->base.crtc->state->adjusted_mode.vdisplay);
 
 	if (sde_kms_rect_is_equal(&sde_enc->cur_conn_roi,
 			&sde_enc->prv_conn_roi))
@@ -1215,8 +1365,8 @@
 	struct sde_kms *sde_kms;
 	struct sde_hw_mdp *hw_mdptop;
 	struct drm_encoder *drm_enc;
-	struct msm_mode_info *mode_info;
-	int i;
+	struct msm_mode_info mode_info;
+	int i, rc = 0;
 
 	if (!sde_enc || !disp_info) {
 		SDE_ERROR("invalid param sde_enc:%d or disp_info:%d\n",
@@ -1245,9 +1395,9 @@
 		return;
 	}
 
-	mode_info = &sde_enc->mode_info;
-	if (!mode_info) {
-		SDE_ERROR("invalid mode info\n");
+	rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
+	if (rc) {
+		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
 		return;
 	}
 
@@ -1257,7 +1407,7 @@
 			vsync_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
 
 		vsync_cfg.pp_count = sde_enc->num_phys_encs;
-		vsync_cfg.frame_rate = mode_info->frame_rate;
+		vsync_cfg.frame_rate = mode_info.frame_rate;
 		if (is_dummy)
 			vsync_cfg.vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_1;
 		else if (disp_info->is_te_using_watchdog_timer)
@@ -1308,11 +1458,12 @@
 	struct sde_rsc_cmd_config *rsc_config;
 	int ret, prefill_lines;
 	struct msm_display_info *disp_info;
-	struct msm_mode_info *mode_info;
+	struct msm_mode_info mode_info;
 	int wait_vblank_crtc_id = SDE_RSC_INVALID_CRTC_ID;
 	int wait_count = 0;
 	struct drm_crtc *primary_crtc;
 	int pipe = -1;
+	int rc = 0;
 
 	if (!drm_enc || !drm_enc->crtc || !drm_enc->dev) {
 		SDE_ERROR("invalid arguments\n");
@@ -1322,7 +1473,6 @@
 	sde_enc = to_sde_encoder_virt(drm_enc);
 	crtc = drm_enc->crtc;
 	disp_info = &sde_enc->disp_info;
-	mode_info = &sde_enc->mode_info;
 	rsc_config = &sde_enc->rsc_config;
 
 	if (!sde_enc->rsc_client) {
@@ -1330,6 +1480,12 @@
 		return 0;
 	}
 
+	rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
+	if (rc) {
+		SDE_ERROR_ENC(sde_enc, "failed to mode info\n");
+		return 0;
+	}
+
 	/**
 	 * only primary command mode panel can request CMD state.
 	 * all other panels/displays can request for VID state including
@@ -1339,20 +1495,20 @@
 		(((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) &&
 		  disp_info->is_primary) ? SDE_RSC_CMD_STATE :
 		SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
-	prefill_lines = config ? mode_info->prefill_lines +
-		config->inline_rotate_prefill : mode_info->prefill_lines;
+	prefill_lines = config ? mode_info.prefill_lines +
+		config->inline_rotate_prefill : mode_info.prefill_lines;
 
 	/* compare specific items and reconfigure the rsc */
-	if ((rsc_config->fps != mode_info->frame_rate) ||
-	    (rsc_config->vtotal != mode_info->vtotal) ||
+	if ((rsc_config->fps != mode_info.frame_rate) ||
+	    (rsc_config->vtotal != mode_info.vtotal) ||
 	    (rsc_config->prefill_lines != prefill_lines) ||
-	    (rsc_config->jitter_numer != mode_info->jitter_numer) ||
-	    (rsc_config->jitter_denom != mode_info->jitter_denom)) {
-		rsc_config->fps = mode_info->frame_rate;
-		rsc_config->vtotal = mode_info->vtotal;
+	    (rsc_config->jitter_numer != mode_info.jitter_numer) ||
+	    (rsc_config->jitter_denom != mode_info.jitter_denom)) {
+		rsc_config->fps = mode_info.frame_rate;
+		rsc_config->vtotal = mode_info.vtotal;
 		rsc_config->prefill_lines = prefill_lines;
-		rsc_config->jitter_numer = mode_info->jitter_numer;
-		rsc_config->jitter_denom = mode_info->jitter_denom;
+		rsc_config->jitter_numer = mode_info.jitter_numer;
+		rsc_config->jitter_denom = mode_info.jitter_denom;
 		sde_enc->rsc_state_init = false;
 	}
 
@@ -1503,9 +1659,7 @@
 static void _sde_encoder_resource_control_rsc_update(
 		struct drm_encoder *drm_enc, bool enable)
 {
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
 	struct sde_encoder_rsc_config rsc_cfg = { 0 };
-	int i;
 
 	if (enable) {
 		rsc_cfg.inline_rotate_prefill =
@@ -1514,22 +1668,6 @@
 		_sde_encoder_update_rsc_client(drm_enc, &rsc_cfg, true);
 	} else {
 		_sde_encoder_update_rsc_client(drm_enc, NULL, false);
-
-		/**
-		 * disable the vsync source after updating the rsc state. rsc
-		 * state update might have vsync wait and vsync source must be
-		 * disabled after it. It will avoid generating any vsync from
-		 * this point till mode-2 entry. It is SW workaround for
-		 * HW limitation and should not be removed without checking the
-		 * updated design.
-		 */
-		for (i = 0; i < sde_enc->num_phys_encs; i++) {
-			struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
-			if (phys && phys->ops.prepare_idle_pc)
-				phys->ops.prepare_idle_pc(phys);
-		}
-
 	}
 }
 
@@ -1581,7 +1719,7 @@
 		u32 sw_event)
 {
 	bool autorefresh_enabled = false;
-	unsigned int lp, idle_timeout;
+	unsigned int lp, idle_pc_duration;
 	struct sde_encoder_virt *sde_enc;
 	struct msm_drm_private *priv;
 	struct msm_drm_thread *disp_thread;
@@ -1705,18 +1843,18 @@
 			lp = SDE_MODE_DPMS_ON;
 
 		if (lp == SDE_MODE_DPMS_LP2)
-			idle_timeout = IDLE_SHORT_TIMEOUT;
+			idle_pc_duration = IDLE_SHORT_TIMEOUT;
 		else
-			idle_timeout = sde_enc->idle_timeout;
+			idle_pc_duration = IDLE_POWERCOLLAPSE_DURATION;
 
-		if (!autorefresh_enabled && idle_timeout)
+		if (!autorefresh_enabled)
 			kthread_queue_delayed_work(
 				&disp_thread->worker,
 				&sde_enc->delayed_off_work,
-				msecs_to_jiffies(idle_timeout));
+				msecs_to_jiffies(idle_pc_duration));
 		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
 				autorefresh_enabled,
-				idle_timeout, SDE_EVTLOG_FUNC_CASE2);
+				idle_pc_duration, SDE_EVTLOG_FUNC_CASE2);
 		SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work scheduled\n",
 				sw_event);
 		break;
@@ -1761,8 +1899,11 @@
 		break;
 
 	case SDE_ENC_RC_EVENT_STOP:
-		mutex_lock(&sde_enc->rc_lock);
+		/* cancel vsync event work and timer */
+		kthread_cancel_work_sync(&sde_enc->vsync_event_work);
+		del_timer_sync(&sde_enc->vsync_event_timer);
 
+		mutex_lock(&sde_enc->rc_lock);
 		/* return if the resource control is already in OFF state */
 		if (sde_enc->rc_state == SDE_ENC_RC_STATE_OFF) {
 			SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in OFF state\n",
@@ -1927,6 +2068,7 @@
 	struct sde_kms *sde_kms;
 	struct list_head *connector_list;
 	struct drm_connector *conn = NULL, *conn_iter;
+	struct sde_connector_state *sde_conn_state = NULL;
 	struct sde_connector *sde_conn = NULL;
 	struct sde_rm_hw_iter dsc_iter, pp_iter;
 	int i = 0, ret;
@@ -1958,13 +2100,15 @@
 	}
 
 	sde_conn = to_sde_connector(conn);
-	if (sde_conn) {
-		ret = sde_conn->ops.get_mode_info(adj_mode, &sde_enc->mode_info,
+	sde_conn_state = to_sde_connector_state(conn->state);
+	if (sde_conn && sde_conn_state) {
+		ret = sde_conn->ops.get_mode_info(adj_mode,
+				&sde_conn_state->mode_info,
 				sde_kms->catalog->max_mixer_width,
 				sde_conn->display);
 		if (ret) {
 			SDE_ERROR_ENC(sde_enc,
-				"invalid topology for the mode\n");
+				"failed to get mode info from the display\n");
 			return;
 		}
 	}
@@ -2035,6 +2179,30 @@
 						SDE_ENC_RC_EVENT_POST_MODESET);
 }
 
+void sde_encoder_control_te(struct drm_encoder *drm_enc, bool enable)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *phys;
+	int i;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!sde_enc) {
+		SDE_ERROR("invalid sde encoder\n");
+		return;
+	}
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		phys = sde_enc->phys_encs[i];
+		if (phys && phys->ops.control_te)
+			phys->ops.control_te(phys, enable);
+	}
+}
+
 static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
@@ -2072,6 +2240,7 @@
 				sde_kms->catalog);
 
 	_sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info, false);
+	sde_encoder_control_te(drm_enc, true);
 
 	memset(&sde_enc->prv_conn_roi, 0, sizeof(sde_enc->prv_conn_roi));
 	memset(&sde_enc->cur_conn_roi, 0, sizeof(sde_enc->cur_conn_roi));
@@ -2107,13 +2276,22 @@
 	int i, ret = 0;
 	struct msm_compression_info *comp_info = NULL;
 	struct drm_display_mode *cur_mode = NULL;
+	struct msm_mode_info mode_info;
+	struct drm_connector *drm_conn = NULL;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
 		return;
 	}
 	sde_enc = to_sde_encoder_virt(drm_enc);
-	comp_info = &sde_enc->mode_info.comp_info;
+
+	ret = _sde_encoder_get_mode_info(drm_enc, &mode_info);
+	if (ret) {
+		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
+		return;
+	}
+
+	comp_info = &mode_info.comp_info;
 	cur_mode = &sde_enc->base.crtc->state->adjusted_mode;
 
 	SDE_DEBUG_ENC(sde_enc, "\n");
@@ -2175,6 +2353,10 @@
 		sde_enc->cur_master->ops.enable(sde_enc->cur_master);
 
 	_sde_encoder_virt_enable_helper(drm_enc);
+
+	/* Enable ESD thread */
+	drm_conn = sde_enc->cur_master->connector;
+	sde_connector_schedule_status_work(drm_conn, true);
 }
 
 static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -2182,6 +2364,8 @@
 	struct sde_encoder_virt *sde_enc = NULL;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
+	struct drm_connector *drm_conn = NULL;
+	enum sde_intf_mode intf_mode;
 	int i = 0;
 
 	if (!drm_enc) {
@@ -2200,19 +2384,44 @@
 
 	priv = drm_enc->dev->dev_private;
 	sde_kms = to_sde_kms(priv->kms);
+	intf_mode = sde_encoder_get_intf_mode(drm_enc);
 
 	SDE_EVT32(DRMID(drm_enc));
 
+	/* Disable ESD thread */
+	drm_conn = sde_enc->cur_master->connector;
+	sde_connector_schedule_status_work(drm_conn, false);
+
 	/* wait for idle */
 	sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
 
-	sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_PRE_STOP);
+	/*
+	 * For primary command mode encoders, execute the resource control
+	 * pre-stop operations before the physical encoders are disabled, to
+	 * allow the rsc to transition its states properly.
+	 *
+	 * For other encoder types, rsc should not be enabled until after
+	 * they have been fully disabled, so delay the pre-stop operations
+	 * until after the physical disable calls have returned.
+	 */
+	if (sde_enc->disp_info.is_primary && intf_mode == INTF_MODE_CMD) {
+		sde_encoder_resource_control(drm_enc,
+				SDE_ENC_RC_EVENT_PRE_STOP);
+		for (i = 0; i < sde_enc->num_phys_encs; i++) {
+			struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+			if (phys && phys->ops.disable)
+				phys->ops.disable(phys);
+		}
+	} else {
+		for (i = 0; i < sde_enc->num_phys_encs; i++) {
+			struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
-		if (phys && phys->ops.disable)
-			phys->ops.disable(phys);
+			if (phys && phys->ops.disable)
+				phys->ops.disable(phys);
+		}
+		sde_encoder_resource_control(drm_enc,
+				SDE_ENC_RC_EVENT_PRE_STOP);
 	}
 
 	/*
@@ -2222,12 +2431,6 @@
 	 */
 	_sde_encoder_dsc_disable(sde_enc);
 
-	/* after phys waits for frame-done, should be no more frames pending */
-	if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) {
-		SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id);
-		del_timer_sync(&sde_enc->frame_done_timer);
-	}
-
 	sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_STOP);
 
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
@@ -2383,9 +2586,6 @@
 		}
 
 		if (!sde_enc->frame_busy_mask[0]) {
-			atomic_set(&sde_enc->frame_done_timeout, 0);
-			del_timer(&sde_enc->frame_done_timer);
-
 			sde_encoder_resource_control(drm_enc,
 					SDE_ENC_RC_EVENT_FRAME_DONE);
 
@@ -2413,9 +2613,6 @@
 
 	sde_encoder_resource_control(&sde_enc->base,
 						SDE_ENC_RC_EVENT_ENTER_IDLE);
-
-	sde_encoder_frame_done_callback(&sde_enc->base, NULL,
-				SDE_ENCODER_FRAME_EVENT_IDLE);
 }
 
 /**
@@ -2534,26 +2731,46 @@
 	}
 }
 
-int sde_encoder_helper_wait_event_timeout(
-		int32_t drm_id,
-		int32_t hw_id,
-		struct sde_encoder_wait_info *info)
+static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id,
+	s64 timeout_ms, struct sde_encoder_wait_info *info)
 {
 	int rc = 0;
-	s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
-	s64 jiffies = msecs_to_jiffies(info->timeout_ms);
-	s64 time;
+	s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms);
+	ktime_t cur_ktime;
+	ktime_t exp_ktime = ktime_add_ms(ktime_get(), timeout_ms);
 
 	do {
 		rc = wait_event_timeout(*(info->wq),
-				atomic_read(info->atomic_cnt) == 0, jiffies);
-		time = ktime_to_ms(ktime_get());
+			atomic_read(info->atomic_cnt) == 0, wait_time_jiffies);
+		cur_ktime = ktime_get();
 
-		SDE_EVT32_VERBOSE(drm_id, hw_id, rc, time, expected_time,
-				atomic_read(info->atomic_cnt));
+		SDE_EVT32(drm_id, hw_id, rc, ktime_to_ms(cur_ktime),
+			timeout_ms, atomic_read(info->atomic_cnt));
 	/* If we timed out, counter is valid and time is less, wait again */
 	} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
-			(time < expected_time));
+			(ktime_compare_safe(exp_ktime, cur_ktime) > 0));
+
+	return rc;
+}
+
+int sde_encoder_helper_wait_event_timeout(int32_t drm_id, int32_t hw_id,
+	struct sde_encoder_wait_info *info)
+{
+	int rc;
+	ktime_t exp_ktime = ktime_add_ms(ktime_get(), info->timeout_ms);
+
+	rc = _sde_encoder_wait_timeout(drm_id, hw_id, info->timeout_ms, info);
+
+	/**
+	 * handle disabled irq case where timer irq is also delayed.
+	 * wait for additional timeout of FAULT_TOLERENCE_WAIT_IN_MS
+	 * if it event_timeout expired late detected.
+	 */
+	if (atomic_read(info->atomic_cnt) && (!rc) &&
+	    (ktime_compare_safe(ktime_get(), ktime_add_ms(exp_ktime,
+	     FAULT_TOLERENCE_DELTA_IN_MS)) > 0))
+		rc = _sde_encoder_wait_timeout(drm_id, hw_id,
+			FAULT_TOLERENCE_WAIT_IN_MS, info);
 
 	return rc;
 }
@@ -2771,6 +2988,8 @@
 
 	SDE_DEBUG_ENC(sde_enc, "affected_displays 0x%lx num_active_phys %d\n",
 			params->affected_displays, num_active_phys);
+	SDE_EVT32_VERBOSE(DRMID(drm_enc), params->affected_displays,
+			num_active_phys);
 
 	/* for left/right only update, ppsplit master switches interface */
 	_sde_encoder_ppsplit_swap_intf_for_right_only_update(drm_enc,
@@ -2980,7 +3199,6 @@
 	struct sde_encoder_virt *sde_enc;
 	struct msm_drm_private *priv;
 	struct msm_drm_thread *event_thread;
-	bool autorefresh_enabled = false;
 
 	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
 			!drm_enc->crtc) {
@@ -3002,28 +3220,16 @@
 		return;
 	}
 
-	if (sde_enc->cur_master &&
-		sde_enc->cur_master->ops.is_autorefresh_enabled)
-		autorefresh_enabled =
-			sde_enc->cur_master->ops.is_autorefresh_enabled(
-						sde_enc->cur_master);
-
-	/*
-	 * Queue work to update the vsync event timer
-	 * if autorefresh is enabled.
-	 */
-	SDE_EVT32_VERBOSE(autorefresh_enabled);
-	if (autorefresh_enabled)
-		kthread_queue_work(&event_thread->worker,
+	kthread_queue_work(&event_thread->worker,
 				&sde_enc->vsync_event_work);
-	else
-		del_timer(&sde_enc->vsync_event_timer);
 }
 
 static void sde_encoder_vsync_event_work_handler(struct kthread_work *work)
 {
 	struct sde_encoder_virt *sde_enc = container_of(work,
 			struct sde_encoder_virt, vsync_event_work);
+	bool autorefresh_enabled = false;
+	int rc = 0;
 	ktime_t wakeup_time;
 
 	if (!sde_enc) {
@@ -3031,12 +3237,32 @@
 		return;
 	}
 
-	if (_sde_encoder_wakeup_time(&sde_enc->base, &wakeup_time))
+	rc = _sde_encoder_power_enable(sde_enc, true);
+	if (rc) {
+		SDE_ERROR_ENC(sde_enc, "sde enc power enabled failed:%d\n", rc);
 		return;
+	}
+
+	if (sde_enc->cur_master &&
+		sde_enc->cur_master->ops.is_autorefresh_enabled)
+		autorefresh_enabled =
+			sde_enc->cur_master->ops.is_autorefresh_enabled(
+						sde_enc->cur_master);
+
+	/* Update timer if autorefresh is enabled else return */
+	if (!autorefresh_enabled)
+		goto exit;
+
+	rc = _sde_encoder_wakeup_time(&sde_enc->base, &wakeup_time);
+	if (rc)
+		goto exit;
 
 	SDE_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
 	mod_timer(&sde_enc->vsync_event_timer,
 			nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+
+exit:
+	_sde_encoder_power_enable(sde_enc, false);
 }
 
 int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
@@ -3101,7 +3327,7 @@
 		}
 	}
 
-	if (sde_encoder_is_dsc_enabled(drm_enc)) {
+	if (_sde_encoder_is_dsc_enabled(drm_enc)) {
 		rc = _sde_encoder_dsc_setup(sde_enc, params);
 		if (rc) {
 			SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n", rc);
@@ -3167,12 +3393,6 @@
 
 	SDE_DEBUG_ENC(sde_enc, "\n");
 
-	atomic_set(&sde_enc->frame_done_timeout,
-			SDE_FRAME_DONE_TIMEOUT * 1000 /
-			drm_enc->crtc->state->adjusted_mode.vrefresh);
-	mod_timer(&sde_enc->frame_done_timer, jiffies +
-		((atomic_read(&sde_enc->frame_done_timeout) * HZ) / 1000));
-
 	/* create a 'no pipes' commit to release buffers on errors */
 	if (is_error)
 		_sde_encoder_reset_ctl_hw(drm_enc);
@@ -3733,36 +3953,6 @@
 	return ret;
 }
 
-static void sde_encoder_frame_done_timeout(unsigned long data)
-{
-	struct drm_encoder *drm_enc = (struct drm_encoder *) data;
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-	struct msm_drm_private *priv;
-	u32 event;
-
-	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
-		SDE_ERROR("invalid parameters\n");
-		return;
-	}
-	priv = drm_enc->dev->dev_private;
-
-	if (!sde_enc->frame_busy_mask[0] || !sde_enc->crtc_frame_event_cb) {
-		SDE_DEBUG_ENC(sde_enc, "invalid timeout\n");
-		SDE_EVT32(DRMID(drm_enc), sde_enc->frame_busy_mask[0], 0);
-		return;
-	} else if (!atomic_xchg(&sde_enc->frame_done_timeout, 0)) {
-		SDE_ERROR_ENC(sde_enc, "invalid timeout\n");
-		SDE_EVT32(DRMID(drm_enc), 0, 1);
-		return;
-	}
-
-	SDE_ERROR_ENC(sde_enc, "frame done timeout\n");
-
-	event = SDE_ENCODER_FRAME_EVENT_ERROR;
-	SDE_EVT32(DRMID(drm_enc), event);
-	sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data, event);
-}
-
 static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = {
 	.mode_set = sde_encoder_virt_mode_set,
 	.disable = sde_encoder_virt_disable,
@@ -3806,10 +3996,6 @@
 	drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode, NULL);
 	drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs);
 
-	atomic_set(&sde_enc->frame_done_timeout, 0);
-	setup_timer(&sde_enc->frame_done_timer, sde_encoder_frame_done_timeout,
-			(unsigned long) sde_enc);
-
 	if ((disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) &&
 			disp_info->is_primary)
 		setup_timer(&sde_enc->vsync_event_timer,
@@ -3828,7 +4014,6 @@
 	mutex_init(&sde_enc->rc_lock);
 	kthread_init_delayed_work(&sde_enc->delayed_off_work,
 			sde_encoder_off_work);
-	sde_enc->idle_timeout = IDLE_TIMEOUT;
 	sde_enc->vblank_enabled = false;
 
 	kthread_init_work(&sde_enc->vsync_event_work,
@@ -3875,6 +4060,9 @@
 		case MSM_ENC_VBLANK:
 			fn_wait = phys->ops.wait_for_vblank;
 			break;
+		case MSM_ENC_ACTIVE_REGION:
+			fn_wait = phys->ops.wait_for_active;
+			break;
 		default:
 			SDE_ERROR_ENC(sde_enc, "unknown wait event %d\n",
 					event);
@@ -3916,3 +4104,176 @@
 
 	return INTF_MODE_NONE;
 }
+
+/**
+ * sde_encoder_update_caps_for_cont_splash - update encoder settings during
+ *	device bootup when cont_splash is enabled
+ * @drm_enc:    Pointer to drm encoder structure
+ * @Return:	true if successful in updating the encoder structure
+ */
+int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	struct drm_connector *conn = NULL;
+	struct sde_connector *sde_conn = NULL;
+	struct sde_connector_state *sde_conn_state = NULL;
+	struct drm_display_mode *drm_mode = NULL;
+	struct sde_rm_hw_iter dsc_iter, pp_iter, ctl_iter;
+	int ret = 0, i;
+
+	if (!encoder) {
+		SDE_ERROR("invalid drm enc\n");
+		return -EINVAL;
+	}
+
+	if (!encoder->dev || !encoder->dev->dev_private) {
+		SDE_ERROR("drm device invalid\n");
+		return -EINVAL;
+	}
+
+	priv = encoder->dev->dev_private;
+	if (!priv->kms) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+	sde_enc = to_sde_encoder_virt(encoder);
+	if (!priv->num_connectors) {
+		SDE_ERROR_ENC(sde_enc, "No connectors registered\n");
+		return -EINVAL;
+	}
+	SDE_DEBUG_ENC(sde_enc,
+			"num of connectors: %d\n", priv->num_connectors);
+
+	for (i = 0; i < priv->num_connectors; i++) {
+		SDE_DEBUG_ENC(sde_enc, "connector id: %d\n",
+				priv->connectors[i]->base.id);
+		sde_conn = to_sde_connector(priv->connectors[i]);
+		if (!sde_conn->encoder) {
+			SDE_DEBUG_ENC(sde_enc,
+				"encoder not attached to connector\n");
+			continue;
+		}
+		if (sde_conn->encoder->base.id
+				== encoder->base.id) {
+			conn = (priv->connectors[i]);
+			break;
+		}
+	}
+
+	if (!conn || !conn->state) {
+		SDE_ERROR_ENC(sde_enc, "connector not found\n");
+		return -EINVAL;
+	}
+
+	sde_conn_state = to_sde_connector_state(conn->state);
+
+	if (!sde_conn->ops.get_mode_info) {
+		SDE_ERROR_ENC(sde_enc, "conn: get_mode_info ops not found\n");
+		return -EINVAL;
+	}
+
+	ret = sde_conn->ops.get_mode_info(&encoder->crtc->state->adjusted_mode,
+				&sde_conn_state->mode_info,
+				sde_kms->catalog->max_mixer_width,
+				sde_conn->display);
+	if (ret) {
+		SDE_ERROR_ENC(sde_enc,
+			"conn: ->get_mode_info failed. ret=%d\n", ret);
+		return ret;
+	}
+
+	ret = sde_rm_reserve(&sde_kms->rm, encoder, encoder->crtc->state,
+			conn->state, false);
+	if (ret) {
+		SDE_ERROR_ENC(sde_enc,
+			"failed to reserve hw resources, %d\n", ret);
+		return ret;
+	}
+
+	if (conn->encoder) {
+		conn->state->best_encoder = conn->encoder;
+		SDE_DEBUG_ENC(sde_enc,
+			"configured cstate->best_encoder to ID = %d\n",
+			conn->state->best_encoder->base.id);
+	} else {
+		SDE_ERROR_ENC(sde_enc, "No encoder mapped to connector=%d\n",
+				conn->base.id);
+	}
+
+	SDE_DEBUG_ENC(sde_enc, "connector topology = %llu\n",
+			sde_connector_get_topology_name(conn));
+	drm_mode = &encoder->crtc->state->adjusted_mode;
+	SDE_DEBUG_ENC(sde_enc, "hdisplay = %d, vdisplay = %d\n",
+			drm_mode->hdisplay, drm_mode->vdisplay);
+	drm_set_preferred_mode(conn, drm_mode->hdisplay, drm_mode->vdisplay);
+
+	if (encoder->bridge) {
+		SDE_DEBUG_ENC(sde_enc, "Bridge mapped to encoder\n");
+		/*
+		 * For cont-splash use case, we update the mode
+		 * configurations manually. This will skip the
+		 * usually mode set call when actual frame is
+		 * pushed from framework. The bridge needs to
+		 * be updated with the current drm mode by
+		 * calling the bridge mode set ops.
+		 */
+		if (encoder->bridge->funcs) {
+			SDE_DEBUG_ENC(sde_enc, "calling mode_set\n");
+			encoder->bridge->funcs->mode_set(encoder->bridge,
+						drm_mode, drm_mode);
+		}
+	} else {
+		SDE_ERROR_ENC(sde_enc, "No bridge attached to encoder\n");
+	}
+
+	sde_rm_init_hw_iter(&pp_iter, encoder->base.id, SDE_HW_BLK_PINGPONG);
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+		sde_enc->hw_pp[i] = NULL;
+		if (!sde_rm_get_hw(&sde_kms->rm, &pp_iter))
+			break;
+		sde_enc->hw_pp[i] = (struct sde_hw_pingpong *) pp_iter.hw;
+	}
+
+	sde_rm_init_hw_iter(&dsc_iter, encoder->base.id, SDE_HW_BLK_DSC);
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+		sde_enc->hw_dsc[i] = NULL;
+		if (!sde_rm_get_hw(&sde_kms->rm, &dsc_iter))
+			break;
+		sde_enc->hw_dsc[i] = (struct sde_hw_dsc *) dsc_iter.hw;
+	}
+
+	sde_rm_init_hw_iter(&ctl_iter, encoder->base.id, SDE_HW_BLK_CTL);
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		phys->hw_ctl = NULL;
+		if (!sde_rm_get_hw(&sde_kms->rm, &ctl_iter))
+			break;
+		phys->hw_ctl = (struct sde_hw_ctl *) ctl_iter.hw;
+	}
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (!phys) {
+			SDE_ERROR_ENC(sde_enc,
+				"phys encoders not initialized\n");
+			return -EINVAL;
+		}
+
+		phys->hw_pp = sde_enc->hw_pp[i];
+		if (phys->ops.cont_splash_mode_set)
+			phys->ops.cont_splash_mode_set(phys, drm_mode);
+
+		if (phys->ops.is_master && phys->ops.is_master(phys)) {
+			phys->connector = conn;
+			sde_enc->cur_master = phys;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 40d3903..f8a3cf3 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -29,9 +29,8 @@
 #define SDE_ENCODER_FRAME_EVENT_PANEL_DEAD		BIT(2)
 #define SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE	BIT(3)
 #define SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE	BIT(4)
-#define SDE_ENCODER_FRAME_EVENT_IDLE			BIT(5)
 
-#define IDLE_TIMEOUT	(66 - 16/2)
+#define IDLE_POWERCOLLAPSE_DURATION	(66 - 16/2)
 
 /**
  * Encoder functions and data types
@@ -163,19 +162,19 @@
 enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder);
 
 /**
+ * sde_encoder_control_te - control enabling/disabling VSYNC_IN_EN
+ * @encoder:	encoder pointer
+ * @enable:	boolean to indicate enable/disable
+ */
+void sde_encoder_control_te(struct drm_encoder *encoder, bool enable);
+
+/**
  * sde_encoder_virt_restore - restore the encoder configs
  * @encoder:	encoder pointer
  */
 void sde_encoder_virt_restore(struct drm_encoder *encoder);
 
 /**
- * sde_encoder_is_dsc_enabled - check if encoder is in DSC mode
- * @drm_enc: Pointer to drm encoder object
- * @Return: true if encoder is in DSC mode
- */
-bool sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc);
-
-/**
  * sde_encoder_is_dsc_merge - check if encoder is in DSC merge mode
  * @drm_enc: Pointer to drm encoder object
  * @Return: true if encoder is in DSC merge mode
@@ -214,12 +213,11 @@
 void sde_encoder_prepare_commit(struct drm_encoder *drm_enc);
 
 /**
- * sde_encoder_set_idle_timeout - set the idle timeout for video
- *                    and command mode encoders.
- * @drm_enc:    Pointer to previously created drm encoder structure
- * @idle_timeout:    idle timeout duration in milliseconds
+ * sde_encoder_update_caps_for_cont_splash - update encoder settings during
+ *	device bootup when cont_splash is enabled
+ * @drm_enc:    Pointer to drm encoder structure
+ * @Return:     true if successful in updating the encoder structure
  */
-void sde_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
-							u32 idle_timeout);
+int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder);
 
 #endif /* __SDE_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index d00400d..edfdc0b 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -99,6 +99,8 @@
  *				encoder. Can be switched at enable time. Based
  *				on split_role and current mode (CMD/VID).
  * @mode_fixup:			DRM Call. Fixup a DRM mode.
+ * @cont_splash_mode_set:	mode set with specific HW resources during
+ *                              cont splash enabled state.
  * @mode_set:			DRM Call. Set a DRM mode.
  *				This likely caches the mode, for use at enable.
  * @enable:			DRM Call. Enable a DRM mode.
@@ -126,14 +128,14 @@
  *				SDE_ENC_ERR_NEEDS_HW_RESET state
  * @irq_control:		Handler to enable/disable all the encoder IRQs
  * @update_split_role:		Update the split role of the phys enc
- * @prepare_idle_pc:		phys encoder can update the vsync_enable status
- *                              on idle power collapse prepare
+ * @control_te:			Interface to control the vsync_enable status
  * @restore:			Restore all the encoder configs.
  * @is_autorefresh_enabled:	provides the autorefresh current
  *                              enable/disable state.
  * @get_line_count:		Obtain current vertical line count
  * @wait_dma_trigger:		Returns true if lut dma has to trigger and wait
  *                              unitl transaction is complete.
+ * @wait_for_active:		Wait for display scan line to be in active area
  */
 
 struct sde_encoder_phys_ops {
@@ -147,6 +149,8 @@
 	void (*mode_set)(struct sde_encoder_phys *encoder,
 			struct drm_display_mode *mode,
 			struct drm_display_mode *adjusted_mode);
+	void (*cont_splash_mode_set)(struct sde_encoder_phys *encoder,
+			struct drm_display_mode *adjusted_mode);
 	void (*enable)(struct sde_encoder_phys *encoder);
 	void (*disable)(struct sde_encoder_phys *encoder);
 	int (*atomic_check)(struct sde_encoder_phys *encoder,
@@ -174,11 +178,12 @@
 	void (*irq_control)(struct sde_encoder_phys *phys, bool enable);
 	void (*update_split_role)(struct sde_encoder_phys *phys_enc,
 			enum sde_enc_split_role role);
-	void (*prepare_idle_pc)(struct sde_encoder_phys *phys_enc);
+	void (*control_te)(struct sde_encoder_phys *phys_enc, bool enable);
 	void (*restore)(struct sde_encoder_phys *phys);
 	bool (*is_autorefresh_enabled)(struct sde_encoder_phys *phys);
 	int (*get_line_count)(struct sde_encoder_phys *phys);
 	bool (*wait_dma_trigger)(struct sde_encoder_phys *phys);
+	int (*wait_for_active)(struct sde_encoder_phys *phys);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 164f2e1..756984b 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -359,6 +359,21 @@
 	irq->irq_idx = -EINVAL;
 }
 
+static void sde_encoder_phys_cmd_cont_splash_mode_set(
+		struct sde_encoder_phys *phys_enc,
+		struct drm_display_mode *adj_mode)
+{
+	if (!phys_enc || !adj_mode) {
+		SDE_ERROR("invalid args\n");
+		return;
+	}
+
+	phys_enc->cached_mode = *adj_mode;
+	phys_enc->enable_state = SDE_ENC_ENABLED;
+
+	_sde_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
+}
+
 static void sde_encoder_phys_cmd_mode_set(
 		struct sde_encoder_phys *phys_enc,
 		struct drm_display_mode *mode,
@@ -474,6 +489,21 @@
 			phys_enc->split_role == ENC_ROLE_SLAVE;
 }
 
+static bool _sde_encoder_phys_is_disabling_ppsplit_slave(
+		struct sde_encoder_phys *phys_enc)
+{
+	enum sde_rm_topology_name old_top;
+
+	if (!phys_enc || !phys_enc->connector ||
+			phys_enc->split_role != ENC_ROLE_SLAVE)
+		return false;
+
+	old_top = sde_connector_get_old_topology_name(
+			phys_enc->connector->state);
+
+	return old_top == SDE_RM_TOPOLOGY_PPSPLIT;
+}
+
 static int _sde_encoder_phys_cmd_poll_write_pointer_started(
 		struct sde_encoder_phys *phys_enc)
 {
@@ -663,7 +693,15 @@
 {
 	struct sde_encoder_phys_cmd *cmd_enc;
 
-	if (!phys_enc || _sde_encoder_phys_is_ppsplit_slave(phys_enc))
+	if (!phys_enc)
+		return;
+
+	/**
+	 * pingpong split slaves do not register for IRQs
+	 * check old and new topologies
+	 */
+	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc) ||
+			_sde_encoder_phys_is_disabling_ppsplit_slave(phys_enc))
 		return;
 
 	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
@@ -891,7 +929,7 @@
 	return cfg.enable;
 }
 
-static void _sde_encoder_phys_cmd_connect_te(
+static void sde_encoder_phys_cmd_connect_te(
 		struct sde_encoder_phys *phys_enc, bool enable)
 {
 	if (!phys_enc || !phys_enc->hw_pp ||
@@ -902,12 +940,6 @@
 	phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
 }
 
-static void sde_encoder_phys_cmd_prepare_idle_pc(
-		struct sde_encoder_phys *phys_enc)
-{
-	_sde_encoder_phys_cmd_connect_te(phys_enc, false);
-}
-
 static int sde_encoder_phys_cmd_get_line_count(
 		struct sde_encoder_phys *phys_enc)
 {
@@ -1029,6 +1061,7 @@
 			to_sde_encoder_phys_cmd(phys_enc);
 	struct sde_encoder_wait_info wait_info;
 	int ret;
+	bool frame_pending = true;
 
 	if (!phys_enc || !phys_enc->hw_ctl) {
 		SDE_ERROR("invalid argument(s)\n");
@@ -1046,10 +1079,17 @@
 	ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
 			&wait_info);
 	if (ret == -ETIMEDOUT) {
-		SDE_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
-		ret = -EINVAL;
-	} else if (!ret)
-		ret = 0;
+		struct sde_hw_ctl *ctl = phys_enc->hw_ctl;
+
+		if (ctl && ctl->ops.get_start_state)
+			frame_pending = ctl->ops.get_start_state(ctl);
+
+		if (frame_pending)
+			SDE_ERROR_CMDENC(cmd_enc,
+					"ctl start interrupt wait failed\n");
+		else
+			ret = 0;
+	}
 
 	return ret;
 }
@@ -1214,19 +1254,6 @@
 	SDE_DEBUG_CMDENC(cmd_enc, "disabled autorefresh\n");
 }
 
-static void sde_encoder_phys_cmd_handle_post_kickoff(
-		struct sde_encoder_phys *phys_enc)
-{
-	if (!phys_enc)
-		return;
-
-	/**
-	 * re-enable external TE, either for the first time after enabling
-	 * or if disabled for Autorefresh
-	 */
-	_sde_encoder_phys_cmd_connect_te(phys_enc, true);
-}
-
 static void sde_encoder_phys_cmd_trigger_start(
 		struct sde_encoder_phys *phys_enc)
 {
@@ -1253,6 +1280,7 @@
 	ops->prepare_commit = sde_encoder_phys_cmd_prepare_commit;
 	ops->is_master = sde_encoder_phys_cmd_is_master;
 	ops->mode_set = sde_encoder_phys_cmd_mode_set;
+	ops->cont_splash_mode_set = sde_encoder_phys_cmd_cont_splash_mode_set;
 	ops->mode_fixup = sde_encoder_phys_cmd_mode_fixup;
 	ops->enable = sde_encoder_phys_cmd_enable;
 	ops->disable = sde_encoder_phys_cmd_disable;
@@ -1270,11 +1298,11 @@
 	ops->irq_control = sde_encoder_phys_cmd_irq_control;
 	ops->update_split_role = sde_encoder_phys_cmd_update_split_role;
 	ops->restore = sde_encoder_phys_cmd_enable_helper;
-	ops->prepare_idle_pc = sde_encoder_phys_cmd_prepare_idle_pc;
+	ops->control_te = sde_encoder_phys_cmd_connect_te;
 	ops->is_autorefresh_enabled =
 			sde_encoder_phys_cmd_is_autorefresh_enabled;
-	ops->handle_post_kickoff = sde_encoder_phys_cmd_handle_post_kickoff;
 	ops->get_line_count = sde_encoder_phys_cmd_get_line_count;
+	ops->wait_for_active = NULL;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_cmd_init(
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index eef249a..47aa5e9 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -36,6 +36,10 @@
 /* maximum number of consecutive kickoff errors */
 #define KICKOFF_MAX_ERRORS	2
 
+/* Poll time to do recovery during active region */
+#define POLL_TIME_USEC_FOR_LN_CNT 500
+#define MAX_POLL_CNT 10
+
 static bool sde_encoder_phys_vid_is_master(
 		struct sde_encoder_phys *phys_enc)
 {
@@ -501,6 +505,21 @@
 		irq->hw_idx = phys_enc->intf_idx;
 }
 
+static void sde_encoder_phys_vid_cont_splash_mode_set(
+		struct sde_encoder_phys *phys_enc,
+		struct drm_display_mode *adj_mode)
+{
+	if (!phys_enc || !adj_mode) {
+		SDE_ERROR("invalid args\n");
+		return;
+	}
+
+	phys_enc->cached_mode = *adj_mode;
+	phys_enc->enable_state = SDE_ENC_ENABLED;
+
+	_sde_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
+}
+
 static void sde_encoder_phys_vid_mode_set(
 		struct sde_encoder_phys *phys_enc,
 		struct drm_display_mode *mode,
@@ -725,7 +744,8 @@
 		struct sde_encoder_phys *phys_enc, bool notify)
 {
 	struct sde_encoder_wait_info wait_info;
-	int ret;
+	int ret = 0;
+	u32 event = 0;
 
 	if (!phys_enc) {
 		pr_err("invalid encoder\n");
@@ -738,11 +758,10 @@
 
 	if (!sde_encoder_phys_vid_is_master(phys_enc)) {
 		/* signal done for slave video encoder, unless it is pp-split */
-		if (!_sde_encoder_phys_is_ppsplit(phys_enc) &&
-			notify && phys_enc->parent_ops.handle_frame_done)
-			phys_enc->parent_ops.handle_frame_done(
-					phys_enc->parent, phys_enc,
-					SDE_ENCODER_FRAME_EVENT_DONE);
+		if (!_sde_encoder_phys_is_ppsplit(phys_enc) && notify) {
+			event = SDE_ENCODER_FRAME_EVENT_DONE;
+			goto end;
+		}
 		return 0;
 	}
 
@@ -750,13 +769,20 @@
 	ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
 			&wait_info);
 
-	if (ret == -ETIMEDOUT) {
-		sde_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
-	} else if (!ret && notify && phys_enc->parent_ops.handle_frame_done)
+	if (ret == -ETIMEDOUT)
+		event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE
+				| SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE
+				| SDE_ENCODER_FRAME_EVENT_ERROR;
+	else if (!ret && notify)
+		event = SDE_ENCODER_FRAME_EVENT_DONE;
+
+end:
+	SDE_EVT32(DRMID(phys_enc->parent), event, notify, ret,
+			ret ? SDE_EVTLOG_FATAL : 0);
+	if (phys_enc->parent_ops.handle_frame_done && event)
 		phys_enc->parent_ops.handle_frame_done(
 				phys_enc->parent, phys_enc,
 				SDE_ENCODER_FRAME_EVENT_DONE);
-
 	return ret;
 }
 
@@ -860,6 +886,9 @@
 		sde_encoder_phys_inc_pending(phys_enc);
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 
+	if (!sde_encoder_phys_vid_is_master(phys_enc))
+		goto exit;
+
 	/*
 	 * Wait for a vsync so we know the ENABLE=0 latched before
 	 * the (connector) source of the vsync's gets disabled,
@@ -868,7 +897,16 @@
 	 * the settings changes for the new modeset (like new
 	 * scanout buffer) don't latch properly..
 	 */
-	if (sde_encoder_phys_vid_is_master(phys_enc)) {
+	ret = sde_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+	if (ret) {
+		SDE_ERROR_VIDENC(vid_enc,
+				"failed to enable vblank irq: %d\n",
+				ret);
+		SDE_EVT32(DRMID(phys_enc->parent),
+				vid_enc->hw_intf->idx - INTF_0, ret,
+				SDE_EVTLOG_FUNC_CASE1,
+				SDE_EVTLOG_ERROR);
+	} else {
 		ret = _sde_encoder_phys_vid_wait_for_vblank(phys_enc, false);
 		if (ret) {
 			atomic_set(&phys_enc->pending_kickoff_cnt, 0);
@@ -876,10 +914,13 @@
 					"failure waiting for disable: %d\n",
 					ret);
 			SDE_EVT32(DRMID(phys_enc->parent),
-					vid_enc->hw_intf->idx - INTF_0, ret);
+					vid_enc->hw_intf->idx - INTF_0, ret,
+					SDE_EVTLOG_FUNC_CASE2,
+					SDE_EVTLOG_ERROR);
 		}
+		sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
 	}
-
+exit:
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 }
 
@@ -981,10 +1022,77 @@
 	return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
 }
 
+static int sde_encoder_phys_vid_wait_for_active(
+			struct sde_encoder_phys *phys_enc)
+{
+	struct drm_display_mode mode;
+	struct sde_encoder_phys_vid *vid_enc;
+	u32 ln_cnt, min_ln_cnt, active_lns_cnt;
+	u32 clk_period, time_of_line;
+	u32 delay, retry = MAX_POLL_CNT;
+
+	vid_enc =  to_sde_encoder_phys_vid(phys_enc);
+
+	if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count) {
+		SDE_ERROR_VIDENC(vid_enc, "invalid vid_enc params\n");
+		return -EINVAL;
+	}
+
+	mode = phys_enc->cached_mode;
+
+	/*
+	 * calculate clk_period as pico second to maintain good
+	 * accuracy with high pclk rate and this number is in 17 bit
+	 * range.
+	 */
+	clk_period = DIV_ROUND_UP_ULL(1000000000, mode.clock);
+	if (!clk_period) {
+		SDE_ERROR_VIDENC(vid_enc, "Unable to calculate clock period\n");
+		return -EINVAL;
+	}
+
+	min_ln_cnt = (mode.vtotal - mode.vsync_start) +
+		(mode.vsync_end - mode.vsync_start);
+	active_lns_cnt = mode.vdisplay;
+	time_of_line = mode.htotal * clk_period;
+
+	/* delay in micro seconds */
+	delay = (time_of_line * (min_ln_cnt +
+		(mode.vsync_start - mode.vdisplay))) / 1000000;
+
+	/*
+	 * Wait for max delay before
+	 * polling to check active region
+	 */
+	if (delay > POLL_TIME_USEC_FOR_LN_CNT)
+		delay = POLL_TIME_USEC_FOR_LN_CNT;
+
+	while (retry) {
+		ln_cnt = vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+
+		if ((ln_cnt >= min_ln_cnt) &&
+			(ln_cnt < (active_lns_cnt + min_ln_cnt))) {
+			SDE_DEBUG_VIDENC(vid_enc,
+					"Needed lines left line_cnt=%d\n",
+					ln_cnt);
+			return 0;
+		}
+
+		SDE_ERROR_VIDENC(vid_enc, "line count is less. line_cnt = %d\n",
+				ln_cnt);
+		/* Add delay so that line count is in active region */
+		udelay(delay);
+		retry--;
+	}
+
+	return -EINVAL;
+}
+
 static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
 {
 	ops->is_master = sde_encoder_phys_vid_is_master;
 	ops->mode_set = sde_encoder_phys_vid_mode_set;
+	ops->cont_splash_mode_set = sde_encoder_phys_vid_cont_splash_mode_set;
 	ops->mode_fixup = sde_encoder_phys_vid_mode_fixup;
 	ops->enable = sde_encoder_phys_vid_enable;
 	ops->disable = sde_encoder_phys_vid_disable;
@@ -1004,6 +1112,7 @@
 	ops->hw_reset = sde_encoder_helper_hw_reset;
 	ops->get_line_count = sde_encoder_phys_vid_get_line_count;
 	ops->wait_dma_trigger = sde_encoder_phys_vid_wait_dma_trigger;
+	ops->wait_for_active = sde_encoder_phys_vid_wait_for_active;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_vid_init(
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 816339b..686c640 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -402,3 +402,30 @@
 
 	_sde_fence_trigger(ctx, ts);
 }
+
+void sde_fence_timeline_status(struct sde_fence_context *ctx,
+					struct drm_mode_object *drm_obj)
+{
+	char *obj_name;
+
+	if (!ctx || !drm_obj) {
+		SDE_ERROR("invalid input params\n");
+		return;
+	}
+
+	switch (drm_obj->type) {
+	case DRM_MODE_OBJECT_CRTC:
+		obj_name = "crtc";
+		break;
+	case DRM_MODE_OBJECT_CONNECTOR:
+		obj_name = "connector";
+		break;
+	default:
+		obj_name = "unknown";
+		break;
+	}
+
+	SDE_ERROR("drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
+		obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
+		ctx->commit_count);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
index 029175b..29d2ec7 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.h
+++ b/drivers/gpu/drm/msm/sde/sde_fence.h
@@ -132,6 +132,15 @@
  */
 void sde_fence_signal(struct sde_fence_context *fence, ktime_t ts,
 		bool reset_timeline);
+
+/**
+ * sde_fence_timeline_status - prints fence timeline status
+ * @fence: Pointer fence container
+ * @drm_obj Pointer to drm object associated with fence timeline
+ */
+void sde_fence_timeline_status(struct sde_fence_context *ctx,
+					struct drm_mode_object *drm_obj);
+
 #else
 static inline void *sde_sync_get(uint64_t fd)
 {
@@ -185,6 +194,12 @@
 {
 	return 0;
 }
+
+static inline void sde_fence_timeline_status(struct sde_fence_context *ctx,
+					struct drm_mode_object *drm_obj);
+{
+	/* do nothing */
+}
 #endif /* IS_ENABLED(CONFIG_SW_SYNC) */
 
 #endif /* _SDE_FENCE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index b8b0967..ddff6ee 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -15,6 +15,7 @@
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
 #include <linux/soc/qcom/llcc-qcom.h>
+#include <linux/pm_qos.h>
 
 #include "sde_hw_mdss.h"
 #include "sde_hw_catalog.h"
@@ -115,6 +116,8 @@
 		"NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25"
 #define DEFAULT_MAX_PER_PIPE_BW			2400000
 #define DEFAULT_AMORTIZABLE_THRESHOLD		25
+#define DEFAULT_CPU_MASK			0
+#define DEFAULT_CPU_DMA_LATENCY			PM_QOS_DEFAULT_VALUE
 
 /*************************************************************
  *  DTSI PROPERTY INDEX
@@ -145,6 +148,7 @@
 	SMART_DMA_REV,
 	IDLE_PC,
 	DEST_SCALER,
+	SMART_PANEL_ALIGN_MODE,
 	SDE_PROP_MAX,
 };
 
@@ -176,6 +180,8 @@
 	PERF_QOS_LUT_NRT,
 	PERF_QOS_LUT_CWB,
 	PERF_CDP_SETTING,
+	PERF_CPU_MASK,
+	PERF_CPU_DMA_LATENCY,
 	PERF_PROP_MAX,
 };
 
@@ -398,6 +404,8 @@
 	{SMART_DMA_REV, "qcom,sde-smart-dma-rev", false, PROP_TYPE_STRING},
 	{IDLE_PC, "qcom,sde-has-idle-pc", false, PROP_TYPE_BOOL},
 	{DEST_SCALER, "qcom,sde-has-dest-scaler", false, PROP_TYPE_BOOL},
+	{SMART_PANEL_ALIGN_MODE, "qcom,sde-smart-panel-align-mode",
+			false, PROP_TYPE_U32},
 };
 
 static struct sde_prop_type sde_perf_prop[] = {
@@ -448,6 +456,9 @@
 
 	{PERF_CDP_SETTING, "qcom,sde-cdp-setting", false,
 			PROP_TYPE_U32_ARRAY},
+	{PERF_CPU_MASK, "qcom,sde-qos-cpu-mask", false, PROP_TYPE_U32},
+	{PERF_CPU_DMA_LATENCY, "qcom,sde-qos-cpu-dma-latency", false,
+			PROP_TYPE_U32},
 };
 
 static struct sde_prop_type sspp_prop[] = {
@@ -2729,6 +2740,9 @@
 	cfg->mdp[0].has_dest_scaler =
 		PROP_VALUE_ACCESS(prop_value, DEST_SCALER, 0);
 
+	cfg->mdp[0].smart_panel_align_mode =
+		PROP_VALUE_ACCESS(prop_value, SMART_PANEL_ALIGN_MODE, 0);
+
 	rc = of_property_read_string(np, sde_prop[QSEED_TYPE].prop_name, &type);
 	if (!rc && !strcmp(type, "qseedv3")) {
 		cfg->qseed_type = SDE_SSPP_SCALER_QSEED3;
@@ -3083,6 +3097,15 @@
 		cfg->has_cdp = true;
 	}
 
+	cfg->perf.cpu_mask =
+			prop_exists[PERF_CPU_MASK] ?
+			PROP_VALUE_ACCESS(prop_value, PERF_CPU_MASK, 0) :
+			DEFAULT_CPU_MASK;
+	cfg->perf.cpu_dma_latency =
+			prop_exists[PERF_CPU_DMA_LATENCY] ?
+			PROP_VALUE_ACCESS(prop_value, PERF_CPU_DMA_LATENCY, 0) :
+			DEFAULT_CPU_DMA_LATENCY;
+
 freeprop:
 	kfree(prop_value);
 end:
@@ -3147,6 +3170,12 @@
 		goto end;
 	}
 
+	if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300) ||
+	    IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_301) ||
+	    IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_400) ||
+	    IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_401))
+		sde_cfg->has_hdr = true;
+
 	index = sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
 		0, plane_formats, ARRAY_SIZE(plane_formats));
 	index += sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index d56ad06..1cd65ea 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -524,6 +524,7 @@
  * @ubwc_static:       ubwc static configuration
  * @ubwc_swizzle:      ubwc default swizzle setting
  * @has_dest_scaler:   indicates support of destination scaler
+ * @smart_panel_align_mode: split display smart panel align modes
  * @clk_ctrls          clock control register definition
  */
 struct sde_mdp_cfg {
@@ -532,6 +533,7 @@
 	u32 ubwc_static;
 	u32 ubwc_swizzle;
 	bool has_dest_scaler;
+	u32 smart_panel_align_mode;
 	struct sde_clk_ctrl_reg clk_ctrls[SDE_CLK_CTRL_MAX];
 };
 
@@ -866,6 +868,8 @@
  * @sfe_lut_tbl: LUT tables for safe signals
  * @qos_lut_tbl: LUT tables for QoS signals
  * @cdp_cfg            cdp use case configurations
+ * @cpu_mask:          pm_qos cpu mask value
+ * @cpu_dma_latency:   pm_qos cpu dma latency value
  */
 struct sde_perf_cfg {
 	u32 max_bw_low;
@@ -890,6 +894,8 @@
 	struct sde_qos_lut_tbl sfe_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
 	struct sde_qos_lut_tbl qos_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
 	struct sde_perf_cdp_cfg cdp_cfg[SDE_PERF_CDP_USAGE_MAX];
+	u32 cpu_mask;
+	u32 cpu_dma_latency;
 };
 
 /**
@@ -913,6 +919,7 @@
  * @has_sbuf           indicate if stream buffer is available
  * @sbuf_headroom      stream buffer headroom in lines
  * @has_idle_pc        indicate if idle power collapse feature is supported
+ * @has_hdr            HDR feature support
  * @dma_formats        Supported formats for dma pipe
  * @cursor_formats     Supported formats for cursor pipe
  * @vig_formats        Supported formats for vig pipe
@@ -941,6 +948,7 @@
 	u32 vbif_qos_nlvl;
 	u32 ts_prefill_rev;
 
+	bool has_hdr;
 	u32 mdss_count;
 	struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index ea4ca4f..426ecf1 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -42,6 +42,47 @@
 
 #define SDE_REG_RESET_TIMEOUT_US        2000
 
+#define MDP_CTL_FLUSH(n) ((0x2000) + (0x200*n) + CTL_FLUSH)
+#define CTL_FLUSH_LM_BIT(n) (6 + n)
+#define CTL_TOP_LM_OFFSET(index, lm) (0x2000 + (0x200 * index) + (lm * 0x4))
+
+int sde_unstage_pipe_for_cont_splash(struct sde_splash_data *data,
+		void __iomem *mmio)
+{
+	int i, j;
+	u32 op_mode;
+
+	if (!data) {
+		pr_err("invalid splash data\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < data->ctl_top_cnt; i++) {
+		struct ctl_top *top = &data->top[i];
+		u8 ctl_id = data->ctl_ids[i] - CTL_0;
+		u32 regval = 0;
+
+		op_mode = readl_relaxed(mmio + MDP_CTL_FLUSH(ctl_id));
+
+		/* Set border fill*/
+		regval |= CTL_MIXER_BORDER_OUT;
+
+		for (j = 0; j < top->ctl_lm_cnt; j++) {
+			u8 lm_id = top->lm[j].lm_id - LM_0;
+
+			writel_relaxed(regval,
+			mmio + CTL_TOP_LM_OFFSET(ctl_id, lm_id));
+
+			op_mode |= BIT(CTL_FLUSH_LM_BIT(lm_id));
+		}
+		op_mode |= CTL_FLUSH_MASK_CTL;
+
+		writel_relaxed(op_mode, mmio + MDP_CTL_FLUSH(ctl_id));
+	}
+	return 0;
+
+}
+
 static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
 		struct sde_mdss_cfg *m,
 		void __iomem *addr,
@@ -83,6 +124,11 @@
 	SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
 }
 
+static inline int sde_hw_ctl_get_start_state(struct sde_hw_ctl *ctx)
+{
+	return SDE_REG_READ(&ctx->hw, CTL_START);
+}
+
 static inline void sde_hw_ctl_trigger_pending(struct sde_hw_ctl *ctx)
 {
 	SDE_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
@@ -608,6 +654,7 @@
 	ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
 	ops->get_bitmask_wb = sde_hw_ctl_get_bitmask_wb;
 	ops->reg_dma_flush = sde_hw_reg_dma_flush;
+	ops->get_start_state = sde_hw_ctl_get_start_state;
 
 	if (cap & BIT(SDE_CTL_SBUF)) {
 		ops->get_bitmask_rot = sde_hw_ctl_get_bitmask_rot;
@@ -616,6 +663,27 @@
 	}
 };
 
+#define CTL_BASE_OFFSET	0x2000
+#define CTL_TOP_OFFSET(index) (CTL_BASE_OFFSET + (0x200 * (index)) + CTL_TOP)
+
+void sde_get_ctl_top_for_cont_splash(void __iomem *mmio,
+		struct ctl_top *top, int index)
+{
+	if (!mmio || !top) {
+		SDE_ERROR("invalid input parameters\n");
+		return;
+	}
+
+	top->value = readl_relaxed(mmio + CTL_TOP_OFFSET(index));
+	top->intf_sel = (top->value >> 4) & 0xf;
+	top->pp_sel = (top->value >> 8) & 0x7;
+	top->dspp_sel = (top->value >> 11) & 0x3;
+	top->mode_sel = (top->value >> 17) & 0x1;
+
+	SDE_DEBUG("ctl[%d]_top->0x%x,pp_sel=0x%x,dspp_sel=0x%x,intf_sel=0x%x\n",
+	       index, top->value, top->pp_sel, top->dspp_sel, top->intf_sel);
+}
+
 static struct sde_hw_blk_ops sde_hw_ops = {
 	.start = NULL,
 	.stop = NULL,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index fe4e194..f8594da 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -225,6 +225,12 @@
 	 */
 	void (*reg_dma_flush)(struct sde_hw_ctl *ctx, bool blocking);
 
+	/**
+	 * check if ctl start trigger state to confirm the frame pending
+	 * status
+	 * @ctx       : ctl path ctx pointer
+	 */
+	int (*get_start_state)(struct sde_hw_ctl *ctx);
 };
 
 /**
@@ -254,6 +260,24 @@
 };
 
 /**
+ * sde_unstage_pipe_for_cont_splash - Unstage pipes for continuous splash
+ * @data: pointer to sde splash data
+ * @mmio: mapped register io address of MDP
+ * @return: error code
+ */
+int sde_unstage_pipe_for_cont_splash(struct sde_splash_data *data,
+		void __iomem *mmio);
+
+/**
+ * sde_get_ctl_top_for_cont_splash - retrieve the current LM blocks
+ * @mmio: mapped register io address of MDP
+ * @top: pointer to the current "ctl_top" structure thats needs update
+ * @index: ctl_top index
+ */
+void sde_get_ctl_top_for_cont_splash(void __iomem *mmio,
+		struct ctl_top *top, int index);
+
+/**
  * sde_hw_ctl - convert base object sde_hw_base to container
  * @hw: Pointer to base hardware block
  * return: Pointer to hardware block container
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ds.h b/drivers/gpu/drm/msm/sde/sde_hw_ds.h
index d81cfaf..6e97c5d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ds.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ds.h
@@ -27,20 +27,18 @@
 #define SDE_DS_OP_MODE_DUAL BIT(16)
 
 /* struct sde_hw_ds_cfg - destination scaler config
- * @ndx          : DS selection index
+ * @idx          : DS selection index
  * @flags        : Flag to switch between mode for DS
  * @lm_width     : Layer mixer width configuration
  * @lm_heigh     : Layer mixer height configuration
- * @set_lm_flush : LM flush bit
- * @scl3_cfg     : Pointer to sde_hw_scaler3_cfg.
+ * @scl3_cfg     : Configuration data for scaler
  */
 struct sde_hw_ds_cfg {
-	u32 ndx;
+	u32 idx;
 	int flags;
 	u32 lm_width;
 	u32 lm_height;
-	bool set_lm_flush;
-	struct sde_hw_scaler3_cfg *scl3_cfg;
+	struct sde_hw_scaler3_cfg scl3_cfg;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
index 4e677c2..134db51 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -278,6 +278,43 @@
 	}
 };
 
+#define CTL_BASE_OFFSET	0x2000
+#define CTL_TOP_LM_OFFSET(index, lm)	\
+	(CTL_BASE_OFFSET + (0x200 * index) + (lm * 0x4))
+
+int sde_get_ctl_lm_for_cont_splash(void __iomem *mmio, int max_lm_cnt,
+		u8 lm_cnt, u8 *lm_ids, struct ctl_top *top, int index)
+{
+	int j;
+	struct sde_splash_lm_hw *lm;
+
+	if (!mmio || !top || !lm_ids) {
+		SDE_ERROR("invalid input parameters\n");
+		return 0;
+	}
+
+	lm = top->lm;
+	for (j = 0; j < max_lm_cnt; j++) {
+		lm[top->ctl_lm_cnt].lm_reg_value = readl_relaxed(mmio
+			      + CTL_TOP_LM_OFFSET(index, j));
+		SDE_DEBUG("ctl[%d]_top --> lm[%d]=0x%x, j=%d\n",
+			index, top->ctl_lm_cnt,
+			lm[top->ctl_lm_cnt].lm_reg_value, j);
+		SDE_DEBUG("lm_cnt = %d\n", lm_cnt);
+		if (lm[top->ctl_lm_cnt].lm_reg_value) {
+			lm[top->ctl_lm_cnt].ctl_id = index;
+			lm_ids[lm_cnt++] = j + LM_0;
+			lm[top->ctl_lm_cnt].lm_id = j + LM_0;
+			SDE_DEBUG("ctl_id=%d, lm[%d].lm_id = %d\n",
+				lm[top->ctl_lm_cnt].ctl_id,
+				top->ctl_lm_cnt,
+				lm[top->ctl_lm_cnt].lm_id);
+			top->ctl_lm_cnt++;
+		}
+	}
+	return top->ctl_lm_cnt;
+}
+
 static struct sde_hw_blk_ops sde_hw_ops = {
 	.start = NULL,
 	.stop = NULL,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.h b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
index 8a146bd..a2307ec 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
@@ -117,6 +117,19 @@
 }
 
 /**
+ * sde_get_ctl_lm_for_cont_splash - retrieve the current LM blocks
+ * @mmio: mapped register io address of MDP
+ * @max_lm_cnt: number of LM blocks supported in the hw
+ * @lm_cnt: number of LM blocks already active
+ * @lm_ids: pointer to store the active LM block IDs
+ * @top: pointer to the current "ctl_top" structure
+ * @index: ctl_top index
+ * return: number of active LM blocks for this CTL block
+ */
+int sde_get_ctl_lm_for_cont_splash(void __iomem *mmio, int max_lm_cnt,
+		u8 lm_cnt, u8 *lm_ids, struct ctl_top *top, int index);
+
+/**
  * sde_hw_lm_init(): Initializes the mixer hw driver object.
  * should be called once before accessing every mixer.
  * @idx:  mixer index for which driver object is required
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index 952ee8f..3125ebf 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -535,4 +535,64 @@
 	struct sde_rect rect;
 };
 
+/**
+ * struct sde_splash_lm_hw - Struct contains LM block properties
+ * @lm_id:	stores the current LM ID
+ * @ctl_id:	stores the current CTL ID associated with the LM.
+ * @lm_reg_value:Store the LM block register value
+ */
+struct sde_splash_lm_hw {
+	u8 lm_id;
+	u8 ctl_id;
+	u32 lm_reg_value;
+};
+
+/**
+ * struct ctl_top - Struct contains CTL block properties
+ * @value:	Store the CTL block register value
+ * @mode_sel:	stores the mode selected in the CTL block
+ * @dspp_sel:	stores the dspp selected in the CTL block
+ * @pp_sel:	stores the pp selected in the CTL block
+ * @intf_sel:	stores the intf selected in the CTL block
+ * @lm:		Pointer to store the list of LMs in the CTL block
+ * @ctl_lm_cnt:	stores the active number of MDSS "LM" blocks in the CTL block
+ */
+struct ctl_top {
+	u32 value;
+	u8 mode_sel;
+	u8 dspp_sel;
+	u8 pp_sel;
+	u8 intf_sel;
+	struct sde_splash_lm_hw lm[LM_MAX - LM_0];
+	u8 ctl_lm_cnt;
+};
+
+/**
+ * struct sde_splash_data - Struct contains details of continuous splash
+ *	memory region and initial pipeline configuration.
+ * @smmu_handoff_pending:boolean to notify handoff from splash memory to smmu
+ * @splash_base:	Base address of continuous splash region reserved
+ *                      by bootloader
+ * @splash_size:	Size of continuous splash region
+ * @top:	struct ctl_top objects
+ * @ctl_ids:	stores the valid MDSS ctl block ids for the current mode
+ * @lm_ids:	stores the valid MDSS layer mixer block ids for the current mode
+ * @dsc_ids:	stores the valid MDSS DSC block ids for the current mode
+ * @ctl_top_cnt:stores the active number of MDSS "top" blks of the current mode
+ * @lm_cnt:	stores the active number of MDSS "LM" blks for the current mode
+ * @dsc_cnt:	stores the active number of MDSS "dsc" blks for the current mode
+ */
+struct sde_splash_data {
+	bool smmu_handoff_pending;
+	unsigned long splash_base;
+	u32 splash_size;
+	struct ctl_top top[CTL_MAX - CTL_0];
+	u8 ctl_ids[CTL_MAX - CTL_0];
+	u8 lm_ids[LM_MAX - LM_0];
+	u8 dsc_ids[DSC_MAX - DSC_0];
+	u8 ctl_top_cnt;
+	u8 lm_cnt;
+	u8 dsc_cnt;
+};
+
 #endif  /* _SDE_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
index d8f79f1..050e21b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
@@ -357,6 +357,49 @@
 	}
 };
 
+#define MDP_PP_DSC_OFFSET(index) (0x71000 + (0x800 * index) + 0x0a0)
+#define MDP_PP_AUTOREFRESH_OFFSET(index) (0x71000 + (0x800 * index) + 0x030)
+
+int sde_get_pp_dsc_for_cont_splash(void __iomem *mmio,
+			int max_dsc_cnt, u8 *dsc_ids)
+{
+	int index;
+	int value, dsc_cnt = 0;
+
+	if (!mmio || !dsc_ids) {
+		SDE_ERROR("invalid input parameters\n");
+		return 0;
+	}
+
+	SDE_DEBUG("max_dsc_cnt = %d\n", max_dsc_cnt);
+	for (index = 0; index < max_dsc_cnt; index++) {
+		value = readl_relaxed(mmio
+			      + MDP_PP_DSC_OFFSET(index));
+		SDE_DEBUG("DSC[%d]=0x%x\n",
+					index, value);
+		SDE_DEBUG("dsc_cnt = %d\n", dsc_cnt);
+		if (value) {
+			dsc_ids[dsc_cnt] = index + DSC_0;
+			dsc_cnt++;
+		}
+		value = readl_relaxed(mmio
+			      + MDP_PP_AUTOREFRESH_OFFSET(index));
+		SDE_DEBUG("AUTOREFRESH[%d]=0x%x\n",
+					index, value);
+		if (value) {
+			SDE_DEBUG("Disabling autoreferesh\n");
+			writel_relaxed(0x0, mmio
+				+ MDP_PP_AUTOREFRESH_OFFSET(index));
+			/*
+			 * Wait for one frame update so that auto refresh
+			 * disable is through
+			 */
+			usleep_range(16000, 20000);
+		}
+	}
+	return dsc_cnt;
+}
+
 static struct sde_hw_blk_ops sde_hw_ops = {
 	.start = NULL,
 	.stop = NULL,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
index 389b2d2..fef49f4 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
@@ -161,6 +161,16 @@
 }
 
 /**
+ * sde_get_pp_dsc_for_cont_splash - retrieve the current dsc enabled blocks
+ * @mmio: mapped register io address of MDP
+ * @max_dsc_cnt: number of DSC blocks supported in the hw
+ * @dsc_ids: pointer to store the active DSC block IDs
+ * return: number of active DSC blocks
+ */
+int sde_get_pp_dsc_for_cont_splash(void __iomem *mmio,
+		int max_dsc_cnt, u8 *dsc_ids);
+
+/**
  * sde_hw_pingpong_init - initializes the pingpong driver for the passed
  *	pingpong idx.
  * @idx:  Pingpong index for which driver object is required
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
index 3326aa2..cf65784 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
@@ -509,8 +509,13 @@
 			last_cmd_buf[i] =
 			    alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
 			if (IS_ERR_OR_NULL(last_cmd_buf[i])) {
-				rc = -EINVAL;
-				break;
+				/*
+				 * This will allow reg dma to fall back to
+				 * AHB domain
+				 */
+				pr_info("Failed to allocate reg dma, ret:%lu\n",
+						PTR_ERR(last_cmd_buf[i]));
+				return 0;
 			}
 		}
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index acecf1a..e7aa6ea 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -259,7 +259,8 @@
  * Setup source pixel format, flip,
  */
 static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
-		const struct sde_format *fmt, u32 flags,
+		const struct sde_format *fmt,
+		bool blend_enabled, u32 flags,
 		enum sde_sspp_multirect_index rect_mode)
 {
 	struct sde_hw_blk_reg_map *c;
@@ -328,7 +329,8 @@
 			SDE_FETCH_CONFIG_RESET_VALUE |
 			ctx->mdp->highest_bank_bit << 18);
 		if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version)) {
-			fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+			fast_clear = (fmt->alpha_enable && blend_enabled) ?
+				BIT(31) : 0;
 			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
 					fast_clear | (ctx->mdp->ubwc_swizzle) |
 					(ctx->mdp->highest_bank_bit << 4));
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index d32c9d8..fdf215f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -293,12 +293,14 @@
 	/**
 	 * setup_format - setup pixel format cropping rectangle, flip
 	 * @ctx: Pointer to pipe context
-	 * @cfg: Pointer to pipe config structure
+	 * @fmt: Pointer to sde_format structure
+	 * @blend_enabled: flag indicating blend enabled or disabled on plane
 	 * @flags: Extra flags for format config
 	 * @index: rectangle index in multirect
 	 */
 	void (*setup_format)(struct sde_hw_pipe *ctx,
-			const struct sde_format *fmt, u32 flags,
+			const struct sde_format *fmt,
+			bool blend_enabled, u32 flags,
 			enum sde_sspp_multirect_index index);
 
 	/**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index b48022f..51e4ba2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -86,6 +86,9 @@
 				lower_pipe = FLD_SMART_PANEL_FREE_RUN;
 
 			upper_pipe = lower_pipe;
+
+			/* smart panel align mode */
+			lower_pipe |= BIT(mdp->caps->smart_panel_align_mode);
 		} else {
 			if (cfg->intf == INTF_2) {
 				lower_pipe = FLD_INTF_1_SW_TRG_MUX;
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
index d6a1d30..10435da 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -104,7 +104,8 @@
 	}
 
 	/* disable irq until power event enables it */
-	irq_set_status_flags(sde_kms->irq_num, IRQ_NOAUTOEN);
+	if (!sde_kms->cont_splash_en)
+		irq_set_status_flags(sde_kms->irq_num, IRQ_NOAUTOEN);
 }
 
 int sde_irq_postinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index f6f04fd..2acbb0c 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -20,8 +20,11 @@
 
 #include <drm/drm_crtc.h>
 #include <linux/debugfs.h>
+#include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/dma-buf.h>
+#include <linux/memblock.h>
+#include <linux/bootmem.h>
 
 #include "msm_drv.h"
 #include "msm_mmu.h"
@@ -84,6 +87,7 @@
 
 static int sde_kms_hw_init(struct msm_kms *kms);
 static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
+static int _sde_kms_mmu_init(struct sde_kms *sde_kms);
 static int _sde_kms_register_events(struct msm_kms *kms,
 		struct drm_mode_object *obj, u32 event, bool en);
 bool sde_is_custom_client(void)
@@ -491,6 +495,30 @@
 	return 0;
 }
 
+static int _sde_kms_release_splash_buffer(unsigned int mem_addr,
+							unsigned int size)
+{
+	unsigned long pfn_start, pfn_end, pfn_idx;
+	int ret = 0;
+
+	if (!mem_addr || !size)
+		SDE_ERROR("invalid params\n");
+
+	pfn_start = mem_addr >> PAGE_SHIFT;
+	pfn_end = (mem_addr + size) >> PAGE_SHIFT;
+
+	ret = memblock_free(mem_addr, size);
+	if (ret) {
+		SDE_ERROR("continuous splash memory free failed:%d\n", ret);
+		return ret;
+	}
+	for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
+		free_reserved_page(pfn_to_page(pfn_idx));
+
+	return ret;
+
+}
+
 static void sde_kms_prepare_commit(struct msm_kms *kms,
 		struct drm_atomic_state *state)
 {
@@ -498,6 +526,11 @@
 	struct msm_drm_private *priv;
 	struct drm_device *dev;
 	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int i, rc = 0;
+	struct drm_plane *plane;
+	bool commit_no_planes = true;
 
 	if (!kms)
 		return;
@@ -510,9 +543,38 @@
 
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		if (encoder->crtc != NULL)
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+				head) {
+			if (encoder->crtc != crtc)
+				continue;
+
 			sde_encoder_prepare_commit(encoder);
+		}
+	}
+
+	if (sde_kms->splash_data.smmu_handoff_pending) {
+		list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+			if (plane->state != NULL &&
+					plane->state->crtc != NULL)
+				commit_no_planes = false;
+	}
+
+	if (sde_kms->splash_data.smmu_handoff_pending && commit_no_planes) {
+
+		rc = sde_unstage_pipe_for_cont_splash(&sde_kms->splash_data,
+						sde_kms->mmio);
+		if (rc)
+			SDE_ERROR("pipe staging failed: %d\n", rc);
+
+		rc = _sde_kms_release_splash_buffer(
+				sde_kms->splash_data.splash_base,
+				sde_kms->splash_data.splash_size);
+		if (rc)
+			SDE_ERROR("release of splash memory failed %d\n", rc);
+
+		sde_kms->splash_data.smmu_handoff_pending = false;
+	}
 
 	/*
 	 * NOTE: for secure use cases we want to apply the new HW
@@ -575,6 +637,14 @@
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 
 	SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
+
+	if (sde_kms->cont_splash_en) {
+		SDE_DEBUG("Disabling cont_splash feature\n");
+		sde_kms->cont_splash_en = false;
+		sde_power_resource_enable(&priv->phandle,
+				sde_kms->core_client, false);
+		SDE_DEBUG("removing Vote for MDP Resources\n");
+	}
 }
 
 static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
@@ -774,6 +844,7 @@
 		.get_dst_format = dsi_display_get_dst_format,
 		.post_kickoff = dsi_conn_post_kickoff,
 		.check_status = dsi_display_check_status,
+		.enable_event = dsi_conn_enable_event
 	};
 	static const struct sde_connector_ops wb_ops = {
 		.post_init =    sde_wb_connector_post_init,
@@ -795,6 +866,7 @@
 		.get_mode_info  = dp_connector_get_mode_info,
 		.send_hpd_event = dp_connector_send_hpd_event,
 		.check_status = NULL,
+		.pre_kickoff  = dp_connector_pre_kickoff,
 	};
 	struct msm_display_info info;
 	struct drm_encoder *encoder;
@@ -1089,6 +1161,30 @@
 }
 
 /**
+ * sde_kms_timeline_status - provides current timeline status
+ *    This API should be called without mode config lock.
+ * @dev: Pointer to drm device
+ */
+void sde_kms_timeline_status(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+	struct drm_connector *conn;
+
+	if (!dev) {
+		SDE_ERROR("invalid drm device node\n");
+		return;
+	}
+
+	drm_for_each_crtc(crtc, dev)
+		sde_crtc_timeline_status(crtc);
+
+	mutex_lock(&dev->mode_config.mutex);
+	drm_for_each_connector(conn, dev)
+		sde_conn_timeline_status(conn);
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+/**
  * struct sde_kms_fbo_fb - framebuffer creation list
  * @list: list of framebuffer attached to framebuffer object
  * @fb: Pointer to framebuffer attached to framebuffer object
@@ -1815,10 +1911,10 @@
 	struct sde_kms *sde_kms;
 	struct drm_device *dev;
 	struct drm_crtc *crtc;
-	struct drm_crtc *sec_crtc = NULL, *temp_crtc = NULL;
+	struct drm_crtc *cur_crtc = NULL, *global_crtc = NULL;
 	struct drm_crtc_state *crtc_state;
-	int secure_crtc_cnt = 0, active_crtc_cnt = 0;
-	int secure_global_crtc_cnt = 0, active_mode_crtc_cnt = 0;
+	int active_crtc_cnt = 0, global_active_crtc_cnt = 0;
+	bool sec_session = false, global_sec_session = false;
 	int i;
 
 	if (!kms || !state) {
@@ -1826,56 +1922,64 @@
 		SDE_ERROR("invalid arguments\n");
 	}
 
-	/* iterate state object for active and secure crtc */
+	sde_kms = to_sde_kms(kms);
+	dev = sde_kms->dev;
+
+	/* iterate state object for active secure/non-secure crtc */
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 		if (!crtc_state->active)
 			continue;
+
 		active_crtc_cnt++;
 		if (sde_crtc_get_secure_level(crtc, crtc_state) ==
-				SDE_DRM_SEC_ONLY) {
-			sec_crtc = crtc;
-			secure_crtc_cnt++;
-		}
+				SDE_DRM_SEC_ONLY)
+			sec_session = true;
+
+		cur_crtc = crtc;
 	}
 
-	/* bail out from further validation if no secure ctrc */
-	if (!secure_crtc_cnt)
-		return 0;
-
-	if ((secure_crtc_cnt > MAX_ALLOWED_SECURE_CLIENT_CNT) ||
-		(secure_crtc_cnt &&
-		 (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE))) {
-		SDE_ERROR("Secure check failed active:%d, secure:%d\n",
-				active_crtc_cnt, secure_crtc_cnt);
-		return -EPERM;
-	}
-
-	sde_kms = to_sde_kms(kms);
-	dev = sde_kms->dev;
 	/* iterate global list for active and secure crtc */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
 		if (!crtc->state->active)
 			continue;
 
-		active_mode_crtc_cnt++;
-
+		global_active_crtc_cnt++;
 		if (sde_crtc_get_secure_level(crtc, crtc->state) ==
-				SDE_DRM_SEC_ONLY) {
-			secure_global_crtc_cnt++;
-			temp_crtc = crtc;
-		}
+				SDE_DRM_SEC_ONLY)
+			global_sec_session = true;
+
+		global_crtc = crtc;
 	}
 
-	/**
-	 * if more than one crtc is active fail
-	 * check if the previous and current commit secure
-	 * are same
+	/*
+	 * - fail secure crtc commit, if any other crtc session is already
+	 *   in progress
+	 * - fail non-secure crtc commit, if any secure crtc session is already
+	 *   in progress
 	 */
-	if (secure_crtc_cnt && ((active_mode_crtc_cnt > 1) ||
-			(secure_global_crtc_cnt && (temp_crtc != sec_crtc))))
-		SDE_ERROR("Secure check failed active:%d crtc_id:%d\n",
-				active_mode_crtc_cnt, temp_crtc->base.id);
+	if (global_sec_session || sec_session) {
+		if ((global_active_crtc_cnt >
+					MAX_ALLOWED_CRTC_CNT_DURING_SECURE) ||
+		    (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE)) {
+			SDE_ERROR(
+			"Secure check failed global_active:%d active:%d\n",
+				global_active_crtc_cnt, active_crtc_cnt);
+			return -EPERM;
+
+		/*
+		 * As only one crtc is allowed during secure session, the crtc
+		 * in this commit should match with the global crtc, if it
+		 * exists
+		 */
+		} else if (global_crtc && (global_crtc != cur_crtc)) {
+			SDE_ERROR(
+			    "crtc%d-sec%d not allowed during crtc%d-sec%d\n",
+				cur_crtc->base.id, sec_session,
+				global_crtc->base.id, global_sec_session);
+			return -EPERM;
+		}
+
+	}
 
 	return 0;
 }
@@ -1974,6 +2078,142 @@
 
 }
 
+static int _sde_kms_gen_drm_mode(struct sde_kms *sde_kms,
+				void *display,
+				struct drm_display_mode *drm_mode)
+{
+	struct dsi_display_mode *modes = NULL;
+	u32 count = 0;
+	u32 size = 0;
+	int rc = 0;
+
+	rc = dsi_display_get_mode_count(display, &count);
+	if (rc) {
+		SDE_ERROR("failed to get num of modes, rc=%d\n", rc);
+		return rc;
+	}
+
+	SDE_DEBUG("num of modes = %d\n", count);
+	size = count * sizeof(*modes);
+	modes = kzalloc(size,  GFP_KERNEL);
+	if (!modes) {
+		count = 0;
+		goto end;
+	}
+
+	rc = dsi_display_get_modes(display, modes);
+	if (rc) {
+		SDE_ERROR("failed to get modes, rc=%d\n", rc);
+		count = 0;
+		goto error;
+	}
+
+	/* TODO; currently consider modes[0] as the preferred mode */
+	dsi_convert_to_drm_mode(&modes[0], drm_mode);
+
+	SDE_DEBUG("hdisplay = %d, vdisplay = %d\n",
+		drm_mode->hdisplay, drm_mode->vdisplay);
+	drm_mode_set_name(drm_mode);
+	drm_mode_set_crtcinfo(drm_mode, 0);
+error:
+	kfree(modes);
+end:
+	return rc;
+}
+
+static int sde_kms_cont_splash_config(struct msm_kms *kms)
+{
+	void *display;
+	struct dsi_display *dsi_display;
+	struct msm_display_info info;
+	struct drm_encoder *encoder = NULL;
+	struct drm_crtc *crtc = NULL;
+	int i, rc = 0;
+	struct drm_display_mode *drm_mode = NULL;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!kms) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	sde_kms = to_sde_kms(kms);
+	dev = sde_kms->dev;
+	if (!dev || !dev->platformdev) {
+		SDE_ERROR("invalid device\n");
+		return -EINVAL;
+	}
+
+	if (!sde_kms->cont_splash_en) {
+		DRM_INFO("cont_splash feature not enabled\n");
+		return rc;
+	}
+
+	/* Currently, we only support one dsi display configuration */
+	/* dsi */
+	for (i = 0; i < sde_kms->dsi_display_count; ++i) {
+		display = sde_kms->dsi_displays[i];
+		dsi_display = (struct dsi_display *)display;
+		SDE_DEBUG("display->name = %s\n", dsi_display->name);
+
+		if (dsi_display->bridge->base.encoder) {
+			encoder = dsi_display->bridge->base.encoder;
+			SDE_DEBUG("encoder name = %s\n", encoder->name);
+		}
+		memset(&info, 0x0, sizeof(info));
+		rc = dsi_display_get_info(&info, display);
+		if (rc) {
+			SDE_ERROR("dsi get_info %d failed\n", i);
+			encoder = NULL;
+			continue;
+		}
+		SDE_DEBUG("info.is_connected = %s, info.is_primary = %s\n",
+			((info.is_connected) ? "true" : "false"),
+			((info.is_primary) ? "true" : "false"));
+		break;
+	}
+
+	if (!encoder) {
+		SDE_ERROR("encoder not initialized\n");
+		return -EINVAL;
+	}
+
+	priv = sde_kms->dev->dev_private;
+	encoder->crtc = priv->crtcs[0];
+	crtc = encoder->crtc;
+	SDE_DEBUG("crtc id = %d\n", crtc->base.id);
+
+	crtc->state->encoder_mask = (1 << drm_encoder_index(encoder));
+	drm_mode = drm_mode_create(encoder->dev);
+	if (!drm_mode) {
+		SDE_ERROR("drm_mode create failed\n");
+		return -EINVAL;
+	}
+	_sde_kms_gen_drm_mode(sde_kms, display, drm_mode);
+	SDE_DEBUG("drm_mode->name = %s, id=%d, type=0x%x, flags=0x%x\n",
+			drm_mode->name, drm_mode->base.id,
+			drm_mode->type, drm_mode->flags);
+
+	/* Update CRTC drm structure */
+	crtc->state->active = true;
+	rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode);
+	if (rc) {
+		SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc);
+		return rc;
+	}
+	drm_mode_copy(&crtc->state->adjusted_mode, drm_mode);
+	drm_mode_copy(&crtc->mode, drm_mode);
+
+	/* Update encoder structure */
+	sde_encoder_update_caps_for_cont_splash(encoder);
+
+	sde_crtc_update_cont_splash_mixer_settings(crtc);
+
+	return rc;
+}
+
 static int sde_kms_pm_suspend(struct device *dev)
 {
 	struct drm_device *ddev;
@@ -2154,6 +2394,7 @@
 	.pm_suspend      = sde_kms_pm_suspend,
 	.pm_resume       = sde_kms_pm_resume,
 	.destroy         = sde_kms_destroy,
+	.cont_splash_config = sde_kms_cont_splash_config,
 	.register_events = _sde_kms_register_events,
 	.get_address_space = _sde_kms_get_address_space,
 	.postopen = _sde_kms_post_open,
@@ -2229,6 +2470,103 @@
 	return ret;
 }
 
+static void _sde_kms_pm_qos_add_request(struct sde_kms *sde_kms)
+{
+	struct pm_qos_request *req;
+	u32 cpu_mask;
+	u32 cpu_dma_latency;
+	int cpu;
+
+	if (!sde_kms || !sde_kms->catalog)
+		return;
+
+	cpu_mask = sde_kms->catalog->perf.cpu_mask;
+	cpu_dma_latency = sde_kms->catalog->perf.cpu_dma_latency;
+	if (!cpu_mask)
+		return;
+
+	req = &sde_kms->pm_qos_cpu_req;
+	req->type = PM_QOS_REQ_AFFINE_CORES;
+	cpumask_empty(&req->cpus_affine);
+	for_each_possible_cpu(cpu) {
+		if ((1 << cpu) & cpu_mask)
+			cpumask_set_cpu(cpu, &req->cpus_affine);
+	}
+	pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, cpu_dma_latency);
+
+	SDE_EVT32_VERBOSE(cpu_mask, cpu_dma_latency);
+}
+
+static void _sde_kms_pm_qos_remove_request(struct sde_kms *sde_kms)
+{
+	if (!sde_kms || !sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
+		return;
+
+	pm_qos_remove_request(&sde_kms->pm_qos_cpu_req);
+}
+
+/* the caller api needs to turn on clock before calling this function */
+static int _sde_kms_cont_splash_res_init(struct sde_kms *sde_kms)
+{
+	struct sde_mdss_cfg *cat;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	struct sde_splash_data *splash_data;
+	int i;
+	int ctl_top_cnt;
+
+	if (!sde_kms || !sde_kms->catalog) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+	cat = sde_kms->catalog;
+	dev = sde_kms->dev;
+	priv = dev->dev_private;
+	splash_data = &sde_kms->splash_data;
+	SDE_DEBUG("mixer_count=%d, ctl_count=%d, dsc_count=%d\n",
+			cat->mixer_count,
+			cat->ctl_count,
+			cat->dsc_count);
+
+	ctl_top_cnt = cat->ctl_count;
+
+	if (ctl_top_cnt > ARRAY_SIZE(splash_data->top)) {
+		SDE_ERROR("Mismatch in ctl_top array size\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < ctl_top_cnt; i++) {
+		sde_get_ctl_top_for_cont_splash(sde_kms->mmio,
+				&splash_data->top[i], i);
+		if (splash_data->top[i].intf_sel) {
+			splash_data->lm_cnt +=
+				sde_get_ctl_lm_for_cont_splash
+					(sde_kms->mmio,
+					sde_kms->catalog->mixer_count,
+					splash_data->lm_cnt,
+					splash_data->lm_ids,
+					&splash_data->top[i], i);
+			splash_data->ctl_ids[splash_data->ctl_top_cnt]
+							= i + CTL_0;
+			splash_data->ctl_top_cnt++;
+			sde_kms->cont_splash_en = true;
+		}
+	}
+
+	/* Skip DSC blk reads if cont_splash is disabled */
+	if (!sde_kms->cont_splash_en)
+		return 0;
+
+	splash_data->dsc_cnt =
+		sde_get_pp_dsc_for_cont_splash(sde_kms->mmio,
+				sde_kms->catalog->dsc_count,
+				splash_data->dsc_ids);
+	SDE_DEBUG("splash_data: ctl_top_cnt=%d, lm_cnt=%d, dsc_cnt=%d\n",
+		splash_data->ctl_top_cnt, splash_data->lm_cnt,
+		splash_data->dsc_cnt);
+
+	return 0;
+}
+
 static void sde_kms_handle_power_event(u32 event_type, void *usr)
 {
 	struct sde_kms *sde_kms = usr;
@@ -2244,7 +2582,9 @@
 	if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
 		sde_irq_update(msm_kms, true);
 		sde_vbif_init_memtypes(sde_kms);
+		_sde_kms_pm_qos_add_request(sde_kms);
 	} else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
+		_sde_kms_pm_qos_remove_request(sde_kms);
 		sde_irq_update(msm_kms, false);
 	}
 }
@@ -2299,6 +2639,43 @@
 	return rc;
 }
 
+static int _sde_kms_get_splash_data(struct sde_splash_data *data)
+{
+	int ret = 0;
+	struct device_node *parent, *node;
+	struct resource r;
+
+	if (!data)
+		return -EINVAL;
+
+	parent = of_find_node_by_path("/reserved-memory");
+	if (!parent) {
+		SDE_ERROR("failed to find reserved-memory node\n");
+		return -EINVAL;
+	}
+
+	node = of_find_node_by_name(parent, "cont_splash_region");
+	if (!node) {
+		SDE_ERROR("failed to find splash memory reservation\n");
+		return -EINVAL;
+	}
+
+	if (of_address_to_resource(node, 0, &r)) {
+		SDE_ERROR("failed to find data for  splash memory\n");
+		return -EINVAL;
+	}
+
+	data->splash_base = (unsigned long)r.start;
+	data->splash_size = (r.end - r.start) + 1;
+
+	pr_info("found continuous splash base address:%lx size:%x\n",
+						data->splash_base,
+						data->splash_size);
+	data->smmu_handoff_pending = true;
+
+	return ret;
+}
+
 static int sde_kms_hw_init(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms;
@@ -2396,6 +2773,12 @@
 		goto error;
 	}
 
+	rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
+	if (rc) {
+		SDE_ERROR("sde splash data fetch failed: %d\n", rc);
+		goto error;
+	}
+
 	rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
 		true);
 	if (rc) {
@@ -2419,15 +2802,7 @@
 
 	sde_dbg_init_dbg_buses(sde_kms->core_rev);
 
-	/*
-	 * Now we need to read the HW catalog and initialize resources such as
-	 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
-	 */
-	rc = _sde_kms_mmu_init(sde_kms);
-	if (rc) {
-		SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
-		goto power_error;
-	}
+	_sde_kms_cont_splash_res_init(sde_kms);
 
 	/* Initialize reg dma block which is a singleton */
 	rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
@@ -2437,6 +2812,12 @@
 		goto power_error;
 	}
 
+	rc = _sde_kms_mmu_init(sde_kms);
+	if (rc) {
+		SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
+		goto power_error;
+	}
+
 	rc = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio,
 			sde_kms->dev);
 	if (rc) {
@@ -2554,7 +2935,11 @@
 		SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
 	}
 
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+	if (sde_kms->cont_splash_en)
+		SDE_DEBUG("Skipping MDP Resources disable\n");
+	else
+		sde_power_resource_enable(&priv->phandle,
+						sde_kms->core_client, false);
 
 	return 0;
 
@@ -2623,3 +3008,9 @@
 
 	return ret;
 }
+
+int sde_kms_handle_recovery(struct drm_encoder *encoder)
+{
+	SDE_EVT32(DRMID(encoder), MSM_ENC_ACTIVE_REGION);
+	return sde_encoder_wait_for_event(encoder, MSM_ENC_ACTIVE_REGION);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index f047305..26c45e2 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -21,6 +21,7 @@
 
 #include <linux/msm_ion.h>
 #include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
 
 #include "msm_drv.h"
 #include "msm_kms.h"
@@ -30,6 +31,7 @@
 #include "sde_hw_catalog.h"
 #include "sde_hw_ctl.h"
 #include "sde_hw_lm.h"
+#include "sde_hw_pingpong.h"
 #include "sde_hw_interrupts.h"
 #include "sde_hw_wb.h"
 #include "sde_hw_top.h"
@@ -182,6 +184,7 @@
 
 	struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
 	struct sde_power_client *core_client;
+	struct pm_qos_request pm_qos_cpu_req;
 
 	struct ion_client *iclient;
 	struct sde_power_event *power_event;
@@ -212,7 +215,8 @@
 
 	struct sde_rm rm;
 	bool rm_init;
-
+	struct sde_splash_data splash_data;
+	bool cont_splash_en;
 	struct sde_hw_vbif *hw_vbif[VBIF_MAX];
 	struct sde_hw_mdp *hw_mdp;
 	int dsi_display_count;
@@ -366,7 +370,8 @@
  * @S: Pointer to sde_kms_info structure
  * Returns: Pointer to byte data
  */
-#define SDE_KMS_INFO_DATA(S)    ((S) ? ((struct sde_kms_info *)(S))->data : 0)
+#define SDE_KMS_INFO_DATA(S)    ((S) ? ((struct sde_kms_info *)(S))->data \
+							: NULL)
 
 /**
  * SDE_KMS_INFO_DATALEN - Macro for accessing sde_kms_info data length
@@ -562,4 +567,17 @@
 int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only);
 int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only);
 
+/**
+ * sde_kms_timeline_status - provides current timeline status
+ * @dev: Pointer to drm device
+ */
+void sde_kms_timeline_status(struct drm_device *dev);
+
+/**
+ * sde_kms_handle_recovery - handler function for FIFO overflow issue
+ * @encoder: pointer to drm encoder structure
+ * return: 0 on success; error code otherwise
+ */
+int sde_kms_handle_recovery(struct drm_encoder *encoder);
+
 #endif /* __sde_kms_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 2faceea..067c4604 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -38,14 +38,6 @@
 #include "sde_color_processing.h"
 #include "sde_hw_rot.h"
 
-static bool suspend_blank = true;
-module_param(suspend_blank, bool, 0400);
-MODULE_PARM_DESC(suspend_blank,
-		"If set, active planes will force their outputs to black,\n"
-		"by temporarily enabling the color fill, when recovering\n"
-		"from a system resume instead of attempting to display the\n"
-		"last provided frame buffer.");
-
 #define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
 		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
 
@@ -826,6 +818,7 @@
 				SDE_ERROR_PLANE(psde, "%ums timeout on %08X\n",
 						wait_ms, prefix);
 				psde->is_error = true;
+				sde_kms_timeline_status(plane->dev);
 				ret = -ETIMEDOUT;
 				break;
 			case -ERESTARTSYS:
@@ -1458,6 +1451,7 @@
 	const struct sde_format *fmt;
 	const struct drm_plane *plane;
 	struct sde_plane_state *pstate;
+	bool blend_enable = true;
 
 	if (!psde || !psde->base.state) {
 		SDE_ERROR("invalid plane\n");
@@ -1480,6 +1474,9 @@
 	 */
 	fmt = sde_get_sde_format(DRM_FORMAT_ABGR8888);
 
+	blend_enable = (SDE_DRM_BLEND_OP_OPAQUE !=
+			sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP));
+
 	/* update sspp */
 	if (fmt && psde->pipe_hw->ops.setup_solidfill) {
 		psde->pipe_hw->ops.setup_solidfill(psde->pipe_hw,
@@ -1495,7 +1492,7 @@
 
 		if (psde->pipe_hw->ops.setup_format)
 			psde->pipe_hw->ops.setup_format(psde->pipe_hw,
-					fmt, SDE_SSPP_SOLID_FILL,
+					fmt, blend_enable, SDE_SSPP_SOLID_FILL,
 					pstate->multirect_index);
 
 		if (psde->pipe_hw->ops.setup_rects)
@@ -3499,10 +3496,6 @@
 	else if (psde->pipe_hw && psde->csc_ptr && psde->pipe_hw->ops.setup_csc)
 		psde->pipe_hw->ops.setup_csc(psde->pipe_hw, psde->csc_ptr);
 
-	/* force black color fill during suspend */
-	if (sde_kms_is_suspend_state(plane->dev) && suspend_blank)
-		_sde_plane_color_fill(psde, 0x0, 0x0);
-
 	/* flag h/w flush complete */
 	if (plane->state)
 		pstate->pending = false;
@@ -3538,6 +3531,7 @@
 	struct sde_rect src, dst;
 	const struct sde_rect *crtc_roi;
 	bool q16_data = true;
+	bool blend_enabled = true;
 	int idx;
 
 	if (!plane) {
@@ -3772,8 +3766,12 @@
 		if (rstate->out_rotation & DRM_REFLECT_Y)
 			src_flags |= SDE_SSPP_FLIP_UD;
 
+		blend_enabled = (SDE_DRM_BLEND_OP_OPAQUE !=
+			sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP));
+
 		/* update format */
-		psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt, src_flags,
+		psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt,
+				blend_enabled, src_flags,
 				pstate->multirect_index);
 
 		if (psde->pipe_hw->ops.setup_cdp) {
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 21fbcb5..c2c1f75 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -175,6 +175,18 @@
 	iter->type = type;
 }
 
+enum sde_rm_topology_name sde_rm_get_topology_name(
+	struct msm_display_topology topology)
+{
+	int i;
+
+	for (i = 0; i < SDE_RM_TOPOLOGY_MAX; i++)
+		if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
+			return g_top_table[i].top_name;
+
+	return SDE_RM_TOPOLOGY_NONE;
+}
+
 static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
 {
 	struct list_head *blk_list;
@@ -714,7 +726,8 @@
 static int _sde_rm_reserve_lms(
 		struct sde_rm *rm,
 		struct sde_rm_rsvp *rsvp,
-		struct sde_rm_requirements *reqs)
+		struct sde_rm_requirements *reqs,
+		u8 *_lm_ids)
 
 {
 	struct sde_rm_hw_blk *lm[MAX_BLOCKS];
@@ -742,6 +755,14 @@
 		lm_count = 0;
 		lm[lm_count] = iter_i.blk;
 
+		SDE_DEBUG("blk id = %d, _lm_ids[%d] = %d\n",
+			iter_i.blk->id,
+			lm_count,
+			_lm_ids ? _lm_ids[lm_count] : -1);
+
+		if (_lm_ids && (lm[lm_count])->id != _lm_ids[lm_count])
+			continue;
+
 		if (!_sde_rm_check_lm_and_get_connected_blks(
 				rm, rsvp, reqs, lm[lm_count],
 				&dspp[lm_count], &ds[lm_count],
@@ -765,6 +786,14 @@
 				continue;
 
 			lm[lm_count] = iter_j.blk;
+			SDE_DEBUG("blk id = %d, _lm_ids[%d] = %d\n",
+				iter_i.blk->id,
+				lm_count,
+				_lm_ids ? _lm_ids[lm_count] : -1);
+
+			if (_lm_ids && (lm[lm_count])->id != _lm_ids[lm_count])
+				continue;
+
 			++lm_count;
 		}
 	}
@@ -818,7 +847,8 @@
 		struct sde_rm *rm,
 		struct sde_rm_rsvp *rsvp,
 		struct sde_rm_requirements *reqs,
-		const struct sde_rm_topology_def *top)
+		const struct sde_rm_topology_def *top,
+		u8 *_ctl_ids)
 {
 	struct sde_rm_hw_blk *ctls[MAX_BLOCKS];
 	struct sde_rm_hw_iter iter;
@@ -845,7 +875,7 @@
 		 * bypass rest feature checks on finding CTL preferred
 		 * for primary displays.
 		 */
-		if (!primary_pref) {
+		if (!primary_pref && !_ctl_ids) {
 			if (top->needs_split_display != has_split_display)
 				continue;
 
@@ -860,6 +890,14 @@
 		}
 
 		ctls[i] = iter.blk;
+
+		SDE_DEBUG("blk id = %d, _ctl_ids[%d] = %d\n",
+			iter.blk->id, i,
+			_ctl_ids ? _ctl_ids[i] : -1);
+
+		if (_ctl_ids && (ctls[i]->id != _ctl_ids[i]))
+			continue;
+
 		SDE_DEBUG("ctl %d match\n", iter.blk->id);
 
 		if (++i == top->num_ctl)
@@ -880,7 +918,8 @@
 static int _sde_rm_reserve_dsc(
 		struct sde_rm *rm,
 		struct sde_rm_rsvp *rsvp,
-		const struct sde_rm_topology_def *top)
+		const struct sde_rm_topology_def *top,
+		u8 *_dsc_ids)
 {
 	struct sde_rm_hw_iter iter;
 	int alloc_count = 0;
@@ -895,6 +934,14 @@
 		if (RESERVED_BY_OTHER(iter.blk, rsvp))
 			continue;
 
+		SDE_DEBUG("blk id = %d, _dsc_ids[%d] = %d\n",
+			iter.blk->id,
+			alloc_count,
+			_dsc_ids ? _dsc_ids[alloc_count] : -1);
+
+		if (_dsc_ids && (iter.blk->id != _dsc_ids[alloc_count]))
+			continue;
+
 		iter.blk->rsvp_nxt = rsvp;
 		SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
 
@@ -1043,10 +1090,10 @@
 	 * - Check mixers without DSPPs
 	 * - Only then allow to grab from mixers with DSPP capability
 	 */
-	ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
+	ret = _sde_rm_reserve_lms(rm, rsvp, reqs, NULL);
 	if (ret && !RM_RQ_DSPP(reqs)) {
 		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
-		ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
+		ret = _sde_rm_reserve_lms(rm, rsvp, reqs, NULL);
 	}
 
 	if (ret) {
@@ -1059,11 +1106,11 @@
 	 * - Check mixers without Split Display
 	 * - Only then allow to grab from CTLs with split display capability
 	 */
-	_sde_rm_reserve_ctls(rm, rsvp, reqs, reqs->topology);
+	_sde_rm_reserve_ctls(rm, rsvp, reqs, reqs->topology, NULL);
 	if (ret && !reqs->topology->needs_split_display) {
 		memcpy(&topology, reqs->topology, sizeof(topology));
 		topology.needs_split_display = true;
-		_sde_rm_reserve_ctls(rm, rsvp, reqs, &topology);
+		_sde_rm_reserve_ctls(rm, rsvp, reqs, &topology, NULL);
 	}
 	if (ret) {
 		SDE_ERROR("unable to find appropriate CTL\n");
@@ -1075,7 +1122,104 @@
 	if (ret)
 		return ret;
 
-	ret = _sde_rm_reserve_dsc(rm, rsvp, reqs->topology);
+	ret = _sde_rm_reserve_dsc(rm, rsvp, reqs->topology, NULL);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static int _sde_rm_make_next_rsvp_for_cont_splash(
+		struct sde_rm *rm,
+		struct drm_encoder *enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct sde_rm_rsvp *rsvp,
+		struct sde_rm_requirements *reqs)
+{
+	int ret;
+	struct sde_rm_topology_def topology;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+	int i;
+
+	if (!enc->dev || !enc->dev->dev_private) {
+		SDE_ERROR("drm device invalid\n");
+		return -EINVAL;
+	}
+	priv = enc->dev->dev_private;
+	if (!priv->kms) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+	sde_kms = to_sde_kms(priv->kms);
+
+	for (i = 0; i < sde_kms->splash_data.lm_cnt; i++)
+		SDE_DEBUG("splash_data.lm_ids[%d] = %d\n",
+			i, sde_kms->splash_data.lm_ids[i]);
+
+	if (sde_kms->splash_data.lm_cnt !=
+			reqs->topology->num_lm)
+		SDE_DEBUG("Configured splash screen LMs != needed LM cnt\n");
+
+	/* Create reservation info, tag reserved blocks with it as we go */
+	rsvp->seq = ++rm->rsvp_next_seq;
+	rsvp->enc_id = enc->base.id;
+	rsvp->topology = reqs->topology->top_name;
+	list_add_tail(&rsvp->list, &rm->rsvps);
+
+	/*
+	 * Assign LMs and blocks whose usage is tied to them: DSPP & Pingpong.
+	 * Do assignment preferring to give away low-resource mixers first:
+	 * - Check mixers without DSPPs
+	 * - Only then allow to grab from mixers with DSPP capability
+	 */
+	ret = _sde_rm_reserve_lms(rm, rsvp, reqs,
+				sde_kms->splash_data.lm_ids);
+	if (ret && !RM_RQ_DSPP(reqs)) {
+		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
+		ret = _sde_rm_reserve_lms(rm, rsvp, reqs,
+					sde_kms->splash_data.lm_ids);
+	}
+
+	if (ret) {
+		SDE_ERROR("unable to find appropriate mixers\n");
+		return ret;
+	}
+
+	/*
+	 * Do assignment preferring to give away low-resource CTLs first:
+	 * - Check mixers without Split Display
+	 * - Only then allow to grab from CTLs with split display capability
+	 */
+	for (i = 0; i < sde_kms->splash_data.ctl_top_cnt; i++)
+		SDE_DEBUG("splash_data.ctl_ids[%d] = %d\n",
+			i, sde_kms->splash_data.ctl_ids[i]);
+
+	_sde_rm_reserve_ctls(rm, rsvp, reqs, reqs->topology,
+			sde_kms->splash_data.ctl_ids);
+	if (ret && !reqs->topology->needs_split_display) {
+		memcpy(&topology, reqs->topology, sizeof(topology));
+		topology.needs_split_display = true;
+		_sde_rm_reserve_ctls(rm, rsvp, reqs, &topology,
+				sde_kms->splash_data.ctl_ids);
+	}
+	if (ret) {
+		SDE_ERROR("unable to find appropriate CTL\n");
+		return ret;
+	}
+
+	/* Assign INTFs, WBs, and blks whose usage is tied to them: CTL & CDM */
+	ret = _sde_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < sde_kms->splash_data.dsc_cnt; i++)
+		SDE_DEBUG("splash_data.dsc_ids[%d] = %d\n",
+			i, sde_kms->splash_data.dsc_ids[i]);
+
+	ret = _sde_rm_reserve_dsc(rm, rsvp, reqs->topology,
+				sde_kms->splash_data.dsc_ids);
 	if (ret)
 		return ret;
 
@@ -1319,6 +1463,8 @@
 {
 	struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
 	struct sde_rm_requirements reqs;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
 	int ret;
 
 	if (!rm || !enc || !crtc_state || !conn_state) {
@@ -1326,8 +1472,20 @@
 		return -EINVAL;
 	}
 
+	if (!enc->dev || !enc->dev->dev_private) {
+		SDE_ERROR("drm device invalid\n");
+		return -EINVAL;
+	}
+	priv = enc->dev->dev_private;
+	if (!priv->kms) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+	sde_kms = to_sde_kms(priv->kms);
+
 	/* Check if this is just a page-flip */
-	if (!drm_atomic_crtc_needs_modeset(crtc_state))
+	if (!sde_kms->cont_splash_en &&
+			!drm_atomic_crtc_needs_modeset(crtc_state))
 		return 0;
 
 	SDE_DEBUG("reserving hw for conn %d enc %d crtc %d test_only %d\n",
@@ -1378,8 +1536,14 @@
 	}
 
 	/* Check the proposed reservation, store it in hw's "next" field */
-	ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+	if (sde_kms->cont_splash_en) {
+		SDE_DEBUG("cont_splash feature enabled\n");
+		ret = _sde_rm_make_next_rsvp_for_cont_splash
+			(rm, enc, crtc_state, conn_state, rsvp_nxt, &reqs);
+	} else {
+		ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
 			rsvp_nxt, &reqs);
+	}
 
 	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_RSVPNEXT);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 3b9b82f..0545609 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -107,6 +107,15 @@
 };
 
 /**
+ * sde_rm_get_topology_name - get the name of the given topology config
+ * @topology: msm_display_topology topology config
+ * @Return: name of the given topology
+ */
+enum sde_rm_topology_name sde_rm_get_topology_name(
+	struct msm_display_topology topology);
+
+
+/**
  * sde_rm_init - Read hardware catalog and create reservation tracking objects
  *	for all HW blocks.
  * @rm: SDE Resource Manager handle
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index 8076e0c..a4c8518 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -353,8 +353,7 @@
 }
 
 int sde_wb_connector_post_init(struct drm_connector *connector,
-		void *info,
-		void *display)
+		void *info, void *display, struct msm_mode_info *mode_info)
 {
 	struct sde_connector *c_conn;
 	struct sde_wb_device *wb_dev = display;
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.h b/drivers/gpu/drm/msm/sde/sde_wb.h
index c3f9e06..5e31664 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_wb.h
@@ -133,11 +133,13 @@
  * @connector: Pointer to drm connector structure
  * @info: Pointer to connector info
  * @display: Pointer to private display structure
+ * @mode_info: Pointer to the mode info structure
  * Returns: Zero on success
  */
 int sde_wb_connector_post_init(struct drm_connector *connector,
 		void *info,
-		void *display);
+		void *display,
+		struct msm_mode_info *mode_info);
 
 /**
  * sde_wb_connector_detect - perform writeback connection status detection
@@ -280,7 +282,8 @@
 static inline
 int sde_wb_connector_post_init(struct drm_connector *connector,
 		void *info,
-		void *display)
+		void *display,
+		struct msm_mode_info *mode_info)
 {
 	return 0;
 }
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index c2ce9f0..295e841 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -2558,7 +2558,8 @@
 
 	mutex_lock(&sde_dbg_base.mutex);
 
-	sde_evtlog_dump_all(sde_dbg_base.evtlog);
+	if (dump_all)
+		sde_evtlog_dump_all(sde_dbg_base.evtlog);
 
 	if (dump_all || !blk_arr || !len) {
 		_sde_dump_reg_all();
@@ -2711,7 +2712,7 @@
 		return -EINVAL;
 
 	len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog, evtlog_buf,
-			SDE_EVTLOG_BUF_MAX);
+			SDE_EVTLOG_BUF_MAX, true);
 	if (copy_to_user(buff, evtlog_buf, len))
 		return -EFAULT;
 	*ppos += len;
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index 786451c3..7b1b4c6 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -17,7 +17,8 @@
 #include <linux/debugfs.h>
 #include <linux/list.h>
 
-#define SDE_EVTLOG_DATA_LIMITER	(-1)
+/* select an uncommon hex value for the limiter */
+#define SDE_EVTLOG_DATA_LIMITER	(0xC0DEBEEF)
 #define SDE_EVTLOG_FUNC_ENTRY	0x1111
 #define SDE_EVTLOG_FUNC_EXIT	0x2222
 #define SDE_EVTLOG_FUNC_CASE1	0x3333
@@ -66,7 +67,7 @@
  * number must be greater than print entry to prevent out of bound evtlog
  * entry array access.
  */
-#define SDE_EVTLOG_ENTRY	(SDE_EVTLOG_PRINT_ENTRY * 4)
+#define SDE_EVTLOG_ENTRY	(SDE_EVTLOG_PRINT_ENTRY * 8)
 #define SDE_EVTLOG_MAX_DATA 15
 #define SDE_EVTLOG_BUF_MAX 512
 #define SDE_EVTLOG_BUF_ALIGN 32
@@ -87,12 +88,14 @@
 };
 
 /**
+ * @last_dump: Index of last entry to be output during evtlog dumps
  * @filter_list: Linked list of currently active filter strings
  */
 struct sde_dbg_evtlog {
 	struct sde_dbg_evtlog_log logs[SDE_EVTLOG_ENTRY];
 	u32 first;
 	u32 last;
+	u32 last_dump;
 	u32 curr;
 	u32 next;
 	u32 enable;
@@ -197,10 +200,12 @@
  * @evtlog:		pointer to evtlog
  * @evtlog_buf:		target buffer to print into
  * @evtlog_buf_size:	size of target buffer
+ * @update_last_entry:	whether or not to stop at most recent entry
  * Returns:		number of bytes written to buffer
  */
 ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
-		char *evtlog_buf, ssize_t evtlog_buf_size);
+		char *evtlog_buf, ssize_t evtlog_buf_size,
+		bool update_last_entry);
 
 /**
  * sde_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
@@ -352,7 +357,8 @@
 }
 
 static inline ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
-		char *evtlog_buf, ssize_t evtlog_buf_size)
+		char *evtlog_buf, ssize_t evtlog_buf_size,
+		bool update_last_entry)
 {
 	return 0;
 }
diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
index 67c664f..9a75179 100644
--- a/drivers/gpu/drm/msm/sde_dbg_evtlog.c
+++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
@@ -105,27 +105,32 @@
 }
 
 /* always dump the last entries which are not dumped yet */
-static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog)
+static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog,
+		bool update_last_entry)
 {
 	if (!evtlog)
 		return false;
 
 	evtlog->first = evtlog->next;
 
-	if (evtlog->last == evtlog->first)
+	if (update_last_entry)
+		evtlog->last_dump = evtlog->last;
+
+	if (evtlog->last_dump == evtlog->first)
 		return false;
 
-	if (evtlog->last < evtlog->first) {
+	if (evtlog->last_dump < evtlog->first) {
 		evtlog->first %= SDE_EVTLOG_ENTRY;
-		if (evtlog->last < evtlog->first)
-			evtlog->last += SDE_EVTLOG_ENTRY;
+		if (evtlog->last_dump < evtlog->first)
+			evtlog->last_dump += SDE_EVTLOG_ENTRY;
 	}
 
-	if ((evtlog->last - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) {
+	if ((evtlog->last_dump - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) {
 		pr_info("evtlog skipping %d entries, last=%d\n",
-			evtlog->last - evtlog->first - SDE_EVTLOG_PRINT_ENTRY,
-			evtlog->last - 1);
-		evtlog->first = evtlog->last - SDE_EVTLOG_PRINT_ENTRY;
+			evtlog->last_dump - evtlog->first -
+			SDE_EVTLOG_PRINT_ENTRY,
+			evtlog->last_dump - 1);
+		evtlog->first = evtlog->last_dump - SDE_EVTLOG_PRINT_ENTRY;
 	}
 	evtlog->next = evtlog->first + 1;
 
@@ -133,7 +138,8 @@
 }
 
 ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
-		char *evtlog_buf, ssize_t evtlog_buf_size)
+		char *evtlog_buf, ssize_t evtlog_buf_size,
+		bool update_last_entry)
 {
 	int i;
 	ssize_t off = 0;
@@ -146,7 +152,7 @@
 	spin_lock_irqsave(&evtlog->spin_lock, flags);
 
 	/* update markers, exit if nothing to print */
-	if (!_sde_evtlog_dump_calc_range(evtlog))
+	if (!_sde_evtlog_dump_calc_range(evtlog, update_last_entry))
 		goto exit;
 
 	log = &evtlog->logs[evtlog->first % SDE_EVTLOG_ENTRY];
@@ -179,12 +185,16 @@
 void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog)
 {
 	char buf[SDE_EVTLOG_BUF_MAX];
+	bool update_last_entry = true;
 
 	if (!evtlog)
 		return;
 
-	while (sde_evtlog_dump_to_buffer(evtlog, buf, sizeof(buf)))
+	while (sde_evtlog_dump_to_buffer(
+				evtlog, buf, sizeof(buf), update_last_entry)) {
 		pr_info("%s", buf);
+		update_last_entry = false;
+	}
 }
 
 struct sde_dbg_evtlog *sde_evtlog_init(void)
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
index 791a6ca..c2ba3b97 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.c
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -566,45 +566,6 @@
 	return rc;
 }
 
-u32 sde_get_sink_bpc(void *input)
-{
-	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
-	struct edid *edid = edid_ctrl->edid;
-
-	if (!edid) {
-		SDE_ERROR("invalid edid input\n");
-		return 0;
-	}
-
-	if ((edid->revision < 3) || !(edid->input & DRM_EDID_INPUT_DIGITAL))
-		return 0;
-
-	if (edid->revision < 4) {
-		if (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)
-			return 8;
-		else
-			return 0;
-	}
-
-	switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
-	case DRM_EDID_DIGITAL_DEPTH_6:
-		return 6;
-	case DRM_EDID_DIGITAL_DEPTH_8:
-		return 8;
-	case DRM_EDID_DIGITAL_DEPTH_10:
-		return 10;
-	case DRM_EDID_DIGITAL_DEPTH_12:
-		return 12;
-	case DRM_EDID_DIGITAL_DEPTH_14:
-		return 14;
-	case DRM_EDID_DIGITAL_DEPTH_16:
-		return 16;
-	case DRM_EDID_DIGITAL_DEPTH_UNDEF:
-	default:
-		return 0;
-	}
-}
-
 u8 sde_get_edid_checksum(void *input)
 {
 	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
index 07bdf50..fd56116 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.h
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -138,14 +138,6 @@
 bool sde_detect_hdmi_monitor(void *edid_ctrl);
 
 /**
- * sde_get_sink_bpc() - return the bpc of sink device.
- * @edid_ctrl:     Handle to the edid_ctrl structure.
- *
- * Return: bpc supported by the sink.
- */
-u32 sde_get_sink_bpc(void *edid_ctrl);
-
-/**
  * sde_get_edid_checksum() - return the checksum of last block of EDID.
  * @input:     Handle to the edid_ctrl structure.
  *
diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h
index 05d290b..6c44260 100644
--- a/drivers/gpu/drm/msm/sde_hdcp.h
+++ b/drivers/gpu/drm/msm/sde_hdcp.h
@@ -42,6 +42,10 @@
 
 struct sde_hdcp_init_data {
 	struct dss_io_data *core_io;
+	struct dss_io_data *dp_ahb;
+	struct dss_io_data *dp_aux;
+	struct dss_io_data *dp_link;
+	struct dss_io_data *dp_p0;
 	struct dss_io_data *qfprom_io;
 	struct dss_io_data *hdcp_io;
 	struct drm_dp_aux *drm_aux;
diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c
index 3673d125..c012f9d 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_1x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_1x.c
@@ -256,12 +256,15 @@
 	u32 ksv_lsb_addr, ksv_msb_addr;
 	u32 aksv_lsb, aksv_msb;
 	u8 aksv[5];
-	struct dss_io_data *io;
+	struct dss_io_data *dp_ahb;
+	struct dss_io_data *dp_aux;
+	struct dss_io_data *dp_link;
 	struct dss_io_data *qfprom_io;
 	struct sde_hdcp_1x *hdcp = input;
 	struct sde_hdcp_reg_set *reg_set;
 
-	if (!hdcp || !hdcp->init_data.core_io ||
+	if (!hdcp || !hdcp->init_data.dp_ahb ||
+		!hdcp->init_data.dp_aux ||
 		!hdcp->init_data.qfprom_io) {
 		pr_err("invalid input\n");
 		rc = -EINVAL;
@@ -276,7 +279,9 @@
 		goto end;
 	}
 
-	io = hdcp->init_data.core_io;
+	dp_ahb = hdcp->init_data.dp_ahb;
+	dp_aux = hdcp->init_data.dp_aux;
+	dp_link = hdcp->init_data.dp_link;
 	qfprom_io = hdcp->init_data.qfprom_io;
 	reg_set = &hdcp->reg_set;
 
@@ -327,18 +332,18 @@
 		goto end;
 	}
 
-	DSS_REG_W(io, reg_set->aksv_lsb, aksv_lsb);
-	DSS_REG_W(io, reg_set->aksv_msb, aksv_msb);
+	DSS_REG_W(dp_aux, reg_set->aksv_lsb, aksv_lsb);
+	DSS_REG_W(dp_aux, reg_set->aksv_msb, aksv_msb);
 
 	/* Setup seed values for random number An */
-	DSS_REG_W(io, reg_set->entropy_ctrl0, 0xB1FFB0FF);
-	DSS_REG_W(io, reg_set->entropy_ctrl1, 0xF00DFACE);
+	DSS_REG_W(dp_link, reg_set->entropy_ctrl0, 0xB1FFB0FF);
+	DSS_REG_W(dp_link, reg_set->entropy_ctrl1, 0xF00DFACE);
 
 	/* make sure hw is programmed */
 	wmb();
 
 	/* enable hdcp engine */
-	DSS_REG_W(io, reg_set->ctrl, 0x1);
+	DSS_REG_W(dp_ahb, reg_set->ctrl, 0x1);
 
 	hdcp->hdcp_state = HDCP_STATE_AUTHENTICATING;
 end:
@@ -415,7 +420,7 @@
 	struct dss_io_data *io;
 	struct sde_hdcp_int_set *isr;
 
-	io = hdcp->init_data.core_io;
+	io = hdcp->init_data.dp_ahb;
 	isr = &hdcp->int_set;
 
 	intr_reg = DSS_REG_R(io, isr->int_reg);
@@ -462,7 +467,8 @@
 	int rc;
 	u32 link0_status;
 	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
-	struct dss_io_data *io = hdcp->init_data.core_io;
+	struct dss_io_data *dp_ahb = hdcp->init_data.dp_ahb;
+	struct dss_io_data *dp_aux = hdcp->init_data.dp_aux;
 
 	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
 		pr_err("invalid state\n");
@@ -470,7 +476,7 @@
 	}
 
 	/* Wait for HDCP keys to be checked and validated */
-	rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
+	rc = readl_poll_timeout(dp_ahb->base + reg_set->status, link0_status,
 				((link0_status >> reg_set->keys_offset) & 0x7)
 					== HDCP_KEYS_STATE_VALID ||
 				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
@@ -484,10 +490,10 @@
 	 * 1.1_Features turned off by default.
 	 * No need to write AInfo since 1.1_Features is disabled.
 	 */
-	DSS_REG_W(io, reg_set->data4, 0);
+	DSS_REG_W(dp_aux, reg_set->data4, 0);
 
 	/* Wait for An0 and An1 bit to be ready */
-	rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
+	rc = readl_poll_timeout(dp_ahb->base + reg_set->status, link0_status,
 				(link0_status & (BIT(8) | BIT(9))) ||
 				!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
 				HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
@@ -554,7 +560,8 @@
 
 static int sde_hdcp_1x_read_an_aksv_from_hw(struct sde_hdcp_1x *hdcp)
 {
-	struct dss_io_data *io = hdcp->init_data.core_io;
+	struct dss_io_data *dp_ahb = hdcp->init_data.dp_ahb;
+	struct dss_io_data *dp_aux = hdcp->init_data.dp_aux;
 	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
 
 	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
@@ -562,21 +569,21 @@
 		return -EINVAL;
 	}
 
-	hdcp->an_0 = DSS_REG_R(io, reg_set->data5);
+	hdcp->an_0 = DSS_REG_R(dp_ahb, reg_set->data5);
 	if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
 		udelay(1);
-		hdcp->an_0 = DSS_REG_R(io, reg_set->data5);
+		hdcp->an_0 = DSS_REG_R(dp_ahb, reg_set->data5);
 	}
 
-	hdcp->an_1 = DSS_REG_R(io, reg_set->data6);
+	hdcp->an_1 = DSS_REG_R(dp_ahb, reg_set->data6);
 	if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
 		udelay(1);
-		hdcp->an_1 = DSS_REG_R(io, reg_set->data6);
+		hdcp->an_1 = DSS_REG_R(dp_ahb, reg_set->data6);
 	}
 
 	/* Read AKSV */
-	hdcp->aksv_0 = DSS_REG_R(io, reg_set->data3);
-	hdcp->aksv_1 = DSS_REG_R(io, reg_set->data4);
+	hdcp->aksv_0 = DSS_REG_R(dp_aux, reg_set->data3);
+	hdcp->aksv_1 = DSS_REG_R(dp_aux, reg_set->data4);
 
 	return 0;
 }
@@ -649,7 +656,7 @@
 	u32 const r0_read_delay_us = 1;
 	u32 const r0_read_timeout_us = r0_read_delay_us * 10;
 	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
-	struct dss_io_data *io = hdcp->init_data.core_io;
+	struct dss_io_data *io = hdcp->init_data.dp_ahb;
 
 	if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
 		pr_err("invalid state\n");
@@ -910,7 +917,7 @@
 	int i, rc = 0;
 	u8 *ksv_fifo = hdcp->current_tp.ksv_list;
 	u32 ksv_bytes = hdcp->sink_addr.ksv_fifo.len;
-	struct dss_io_data *io = hdcp->init_data.core_io;
+	struct dss_io_data *io = hdcp->init_data.dp_ahb;
 	struct dss_io_data *sec_io = hdcp->init_data.hdcp_io;
 	struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
 	u32 sha_status = 0, status;
@@ -1087,7 +1094,8 @@
 
 static void sde_hdcp_1x_cache_topology(struct sde_hdcp_1x *hdcp)
 {
-	if (!hdcp || !hdcp->init_data.core_io) {
+	if (!hdcp || !hdcp->init_data.dp_ahb || !hdcp->init_data.dp_aux ||
+		!hdcp->init_data.dp_link || !hdcp->init_data.dp_p0) {
 		pr_err("invalid input\n");
 		return;
 	}
@@ -1146,6 +1154,7 @@
 		DSS_REG_W_ND(io, REG_HDMI_DDC_ARBITRATION, DSS_REG_R(io,
 				REG_HDMI_DDC_ARBITRATION) & ~(BIT(4)));
 	else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+		io = hdcp->init_data.dp_aux;
 		DSS_REG_W(io, DP_DP_HPD_REFTIMER, 0x10013);
 	}
 
@@ -1224,12 +1233,12 @@
 	struct sde_hdcp_int_set *isr;
 	u32 ret = 0, reg;
 
-	if (!hdcp || !hdcp->init_data.core_io) {
+	if (!hdcp || !hdcp->init_data.dp_ahb) {
 		pr_err("invalid input\n");
 		return -EINVAL;
 	}
 
-	io = hdcp->init_data.core_io;
+	io = hdcp->init_data.dp_ahb;
 	reg_set = &hdcp->reg_set;
 	isr = &hdcp->int_set;
 
@@ -1264,12 +1273,12 @@
 	int rc = 0;
 	u32 reg;
 
-	if (!hdcp || !hdcp->init_data.core_io) {
+	if (!hdcp || !hdcp->init_data.dp_ahb) {
 		pr_err("invalid input\n");
 		return;
 	}
 
-	io = hdcp->init_data.core_io;
+	io = hdcp->init_data.dp_ahb;
 	reg_set = &hdcp->reg_set;
 	isr = &hdcp->int_set;
 
@@ -1327,13 +1336,13 @@
 	struct sde_hdcp_reg_set *reg_set;
 	struct sde_hdcp_int_set *isr;
 
-	if (!hdcp || !hdcp->init_data.core_io) {
+	if (!hdcp || !hdcp->init_data.dp_ahb) {
 		pr_err("invalid input\n");
 		rc = -EINVAL;
 		goto error;
 	}
 
-	io = hdcp->init_data.core_io;
+	io = hdcp->init_data.dp_ahb;
 	reg_set = &hdcp->reg_set;
 	isr = &hdcp->int_set;
 
@@ -1531,8 +1540,7 @@
 		.off = sde_hdcp_1x_off
 	};
 
-	if (!init_data || !init_data->core_io || !init_data->qfprom_io ||
-		!init_data->mutex || !init_data->notify_status ||
+	if (!init_data || !init_data->mutex || !init_data->notify_status ||
 		!init_data->workq || !init_data->cb_data) {
 		pr_err("invalid input\n");
 		goto error;
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 7a0da3d..43fcf0d 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -360,7 +360,7 @@
 		ab_quota_nrt = max_t(u64, ab_quota_nrt,
 				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA);
 		ib_quota_nrt = max_t(u64, ib_quota_nrt,
-				SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
+				SDE_POWER_HANDLE_ENABLE_NRT_BUS_IB_QUOTA);
 	} else {
 		ab_quota_rt = min_t(u64, ab_quota_rt,
 				SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA);
@@ -379,30 +379,30 @@
 		struct msm_bus_vectors *vect = NULL;
 		struct msm_bus_scale_pdata *bw_table =
 			pdbus->data_bus_scale_table;
-		u32 nrt_axi_port_cnt = pdbus->nrt_axi_port_cnt;
-		u32 total_axi_port_cnt = pdbus->axi_port_cnt;
-		u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt;
+		u32 nrt_data_paths_cnt = pdbus->nrt_data_paths_cnt;
+		u32 total_data_paths_cnt = pdbus->data_paths_cnt;
+		u32 rt_data_paths_cnt = total_data_paths_cnt -
+			nrt_data_paths_cnt;
 
-		if (!bw_table || !total_axi_port_cnt ||
-		    total_axi_port_cnt > MAX_AXI_PORT_COUNT) {
+		if (!bw_table || !total_data_paths_cnt ||
+		    total_data_paths_cnt > MAX_AXI_PORT_COUNT) {
 			pr_err("invalid input\n");
 			return -EINVAL;
 		}
 
-		if (pdbus->bus_channels) {
+		if (nrt_data_paths_cnt) {
+
+			ab_quota_rt = div_u64(ab_quota_rt, rt_data_paths_cnt);
+			ab_quota_nrt = div_u64(ab_quota_nrt,
+						nrt_data_paths_cnt);
+
 			ib_quota_rt = div_u64(ib_quota_rt,
-						pdbus->bus_channels);
+						rt_data_paths_cnt);
 			ib_quota_nrt = div_u64(ib_quota_nrt,
-						pdbus->bus_channels);
-		}
+						nrt_data_paths_cnt);
 
-		if (nrt_axi_port_cnt) {
-
-			ab_quota_rt = div_u64(ab_quota_rt, rt_axi_port_cnt);
-			ab_quota_nrt = div_u64(ab_quota_nrt, nrt_axi_port_cnt);
-
-			for (i = 0; i < total_axi_port_cnt; i++) {
-				if (i < rt_axi_port_cnt) {
+			for (i = 0; i < total_data_paths_cnt; i++) {
+				if (i < rt_data_paths_cnt) {
 					ab_quota[i] = ab_quota_rt;
 					ib_quota[i] = ib_quota_rt;
 				} else {
@@ -412,10 +412,11 @@
 			}
 		} else {
 			ab_quota[0] = div_u64(ab_quota_rt + ab_quota_nrt,
-					total_axi_port_cnt);
-			ib_quota[0] = ib_quota_rt + ib_quota_nrt;
+					total_data_paths_cnt);
+			ib_quota[0] = div_u64(ib_quota_rt + ib_quota_nrt,
+					total_data_paths_cnt);
 
-			for (i = 1; i < total_axi_port_cnt; i++) {
+			for (i = 1; i < total_data_paths_cnt; i++) {
 				ab_quota[i] = ab_quota[0];
 				ib_quota[i] = ib_quota[0];
 			}
@@ -424,7 +425,7 @@
 		new_uc_idx = (pdbus->curr_bw_uc_idx %
 			(bw_table->num_usecases - 1)) + 1;
 
-		for (i = 0; i < total_axi_port_cnt; i++) {
+		for (i = 0; i < total_data_paths_cnt; i++) {
 			vect = &bw_table->usecase[new_uc_idx].vectors[i];
 			vect->ab = ab_quota[i];
 			vect->ib = ib_quota[i];
@@ -432,8 +433,8 @@
 			pr_debug(
 				"%s uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
 				bw_table->name,
-				new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt"
-				, i, vect->ab, vect->ib);
+				new_uc_idx, (i < rt_data_paths_cnt) ?
+				"rt" : "nrt", i, vect->ab, vect->ib);
 		}
 	}
 	pdbus->curr_bw_uc_idx = new_uc_idx;
@@ -518,10 +519,10 @@
 		rc = 0;
 	}
 
-	pdbus->nrt_axi_port_cnt = 0;
+	pdbus->nrt_data_paths_cnt = 0;
 	rc = of_property_read_u32(pdev->dev.of_node,
 			"qcom,sde-num-nrt-paths",
-			&pdbus->nrt_axi_port_cnt);
+			&pdbus->nrt_data_paths_cnt);
 	if (rc) {
 		pr_debug("number of axi port property not specified\n");
 		rc = 0;
@@ -535,7 +536,7 @@
 			pr_err("Error. qcom,msm-bus,num-paths not found\n");
 			return rc;
 		}
-		pdbus->axi_port_cnt = paths;
+		pdbus->data_paths_cnt = paths;
 
 		pdbus->data_bus_scale_table =
 				msm_bus_pdata_from_node(pdev, node);
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 6e00184..9cc78aa 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -18,7 +18,8 @@
 
 #define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	0
 #define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
-#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	400000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA		400000000
+#define SDE_POWER_HANDLE_ENABLE_NRT_BUS_IB_QUOTA	0
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
 
 #include <linux/sde_io_util.h>
@@ -101,8 +102,8 @@
  * struct sde_power_data_handle: power handle struct for data bus
  * @data_bus_scale_table: pointer to bus scaling table
  * @data_bus_hdl: current data bus handle
- * @axi_port_cnt: number of rt axi ports
- * @nrt_axi_port_cnt: number of nrt axi ports
+ * @data_paths_cnt: number of rt data path ports
+ * @nrt_data_paths_cnt: number of nrt data path ports
  * @bus_channels: number of memory bus channels
  * @curr_bw_uc_idx: current use case index of data bus
  * @ao_bw_uc_idx: active only use case index of data bus
@@ -115,8 +116,8 @@
 struct sde_power_data_bus_handle {
 	struct msm_bus_scale_pdata *data_bus_scale_table;
 	u32 data_bus_hdl;
-	u32 axi_port_cnt;
-	u32 nrt_axi_port_cnt;
+	u32 data_paths_cnt;
+	u32 nrt_data_paths_cnt;
 	u32 bus_channels;
 	u32 curr_bw_uc_idx;
 	u32 ao_bw_uc_idx;
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index e957779..654a2ad 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -187,34 +187,34 @@
 						0x39e038a8, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x10,
 						0x888babec, rsc->debug_mode);
-	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x14,
-						0xa806a020, rsc->debug_mode);
 
 	/* Mode - 2 sequence */
+	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x14,
+						0xaaa8a020, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x18,
-						0xa138ebaa, rsc->debug_mode);
+						0xe1a138eb, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x1c,
-						0xaca581e1, rsc->debug_mode);
+						0xe0aca581, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x20,
-						0xe2a2ede0, rsc->debug_mode);
+						0x82e2a2ed, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x24,
-						0xea8a3982, rsc->debug_mode);
+						0x8cea8a39, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x28,
-						0xa920888c, rsc->debug_mode);
+						0xe9a92088, rsc->debug_mode);
 
-	/* tcs sleep sequence */
+	/* tcs sleep & wake sequence */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
-						0x89e6a6e9, rsc->debug_mode);
+						0x2089e6a6, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
-						0xa7e9a920, rsc->debug_mode);
+						0xe7a7e9a9, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
-						0x002089e7, rsc->debug_mode);
+						0x00002089, rsc->debug_mode);
 
 	/* branch address */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
-						0x2b, rsc->debug_mode);
+						0x2a, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0,
-						0x31, rsc->debug_mode);
+						0x30, rsc->debug_mode);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 8e2e24a..44e116f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -39,5 +39,5 @@
 g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
 {
 	return nvkm_xtensa_new_(&g84_bsp, device, index,
-				true, 0x103000, pengine);
+				device->chipset != 0x92, 0x103000, pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 5df9669..240872a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -240,6 +240,8 @@
 			mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
 		}
 
+		mmu->func->flush(vm);
+
 		nvkm_memory_del(&pgt);
 	}
 }
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 728e897..b75ecdf 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -764,6 +764,12 @@
 #define A6XX_VBIF_PERF_PWR_CNT_HIGH1            0x3119
 #define A6XX_VBIF_PERF_PWR_CNT_HIGH2            0x311a
 
+/* GBIF countables */
+#define GBIF_AXI0_READ_DATA_TOTAL_BEATS    34
+#define GBIF_AXI1_READ_DATA_TOTAL_BEATS    35
+#define GBIF_AXI0_WRITE_DATA_TOTAL_BEATS   46
+#define GBIF_AXI1_WRITE_DATA_TOTAL_BEATS   47
+
 /* GBIF registers */
 #define A6XX_GBIF_HALT                    0x3c45
 #define A6XX_GBIF_HALT_ACK                0x3c46
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index a56a593..d0e6d73 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -346,7 +346,7 @@
 		.major = 3,
 		.minor = 0,
 		.patchid = ANY_ID,
-		.features = ADRENO_64BIT | ADRENO_RPMH |
+		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_IFPC |
 			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_LM,
 		.sqefw_name = "a630_sqe.fw",
 		.zap_name = "a630_zap",
@@ -355,8 +355,8 @@
 		.num_protected_regs = 0x20,
 		.busy_mask = 0xFFFFFFFE,
 		.gpmufw_name = "a630_gmu.bin",
-		.gpmu_major = 0x0,
-		.gpmu_minor = 0x005,
+		.gpmu_major = 0x1,
+		.gpmu_minor = 0x001,
 		.gpmu_tsens = 0x000C000D,
 		.max_power = 5448,
 	},
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index ae5a78d..9f09aba 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -17,6 +17,7 @@
 #include <linux/of_device.h>
 #include <linux/delay.h>
 #include <linux/input.h>
+#include <linux/io.h>
 #include <soc/qcom/scm.h>
 
 #include <linux/msm-bus-board.h>
@@ -36,6 +37,7 @@
 #include "adreno_trace.h"
 
 #include "a3xx_reg.h"
+#include "a6xx_reg.h"
 #include "adreno_snapshot.h"
 
 /* Include the master list of GPU cores that are supported */
@@ -610,7 +612,7 @@
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	struct adreno_irq *irq_params = gpudev->irq;
 	irqreturn_t ret = IRQ_NONE;
-	unsigned int status = 0, fence = 0, tmp, int_bit;
+	unsigned int status = 0, fence = 0, fence_retries = 0, tmp, int_bit;
 	int i;
 
 	atomic_inc(&adreno_dev->pending_irq_refcnt);
@@ -627,13 +629,24 @@
 
 	/*
 	 * If the AHB fence is not in ALLOW mode when we receive an RBBM
-	 * interrupt, something went wrong. Set a fault and change the
-	 * fence to ALLOW so we can clear the interrupt.
+	 * interrupt, something went wrong. This means that we cannot proceed
+	 * since the IRQ status and clear registers are not accessible.
+	 * This is usually harmless because the GMU will abort power collapse
+	 * and change the fence back to ALLOW. Poll so that this can happen.
 	 */
-	adreno_readreg(adreno_dev, ADRENO_REG_GMU_AO_AHB_FENCE_CTRL, &fence);
-	if (fence != 0) {
-		KGSL_DRV_CRIT_RATELIMIT(device, "AHB fence is stuck in ISR\n");
-		return ret;
+	if (kgsl_gmu_isenabled(device)) {
+		do {
+			adreno_readreg(adreno_dev,
+					ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
+					&fence);
+
+			if (fence_retries == FENCE_RETRY_MAX) {
+				KGSL_DRV_CRIT_RATELIMIT(device,
+						"AHB fence stuck in ISR\n");
+				return ret;
+			}
+			fence_retries++;
+		} while (fence != 0);
 	}
 
 	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
@@ -1022,6 +1035,28 @@
 }
 #endif
 
+static void adreno_cx_dbgc_probe(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct resource *res;
+
+	res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
+					   "kgsl_3d0_cx_dbgc_memory");
+
+	if (res == NULL)
+		return;
+
+	adreno_dev->cx_dbgc_base = res->start - device->reg_phys;
+	adreno_dev->cx_dbgc_len = resource_size(res);
+	adreno_dev->cx_dbgc_virt = devm_ioremap(device->dev,
+					device->reg_phys +
+						adreno_dev->cx_dbgc_base,
+					adreno_dev->cx_dbgc_len);
+
+	if (adreno_dev->cx_dbgc_virt == NULL)
+		KGSL_DRV_WARN(device, "cx_dbgc ioremap failed\n");
+}
+
 static int adreno_probe(struct platform_device *pdev)
 {
 	struct kgsl_device *device;
@@ -1072,6 +1107,9 @@
 		return status;
 	}
 
+	/* Probe for the optional CX_DBGC block */
+	adreno_cx_dbgc_probe(device);
+
 	/*
 	 * qcom,iommu-secure-id is used to identify MMUs that can handle secure
 	 * content but that is only part of the story - the GPU also has to be
@@ -1589,26 +1627,104 @@
 			}
 		}
 
-		/* VBIF DDR cycles */
-		if (adreno_dev->ram_cycles_lo == 0) {
-			ret = adreno_perfcounter_get(adreno_dev,
-				KGSL_PERFCOUNTER_GROUP_VBIF,
-				VBIF_AXI_TOTAL_BEATS,
-				&adreno_dev->ram_cycles_lo, NULL,
-				PERFCOUNTER_FLAG_KERNEL);
+		if (adreno_has_gbif(adreno_dev)) {
+			if (adreno_dev->starved_ram_lo_ch1 == 0) {
+				ret = adreno_perfcounter_get(adreno_dev,
+					KGSL_PERFCOUNTER_GROUP_VBIF_PWR, 1,
+					&adreno_dev->starved_ram_lo_ch1, NULL,
+					PERFCOUNTER_FLAG_KERNEL);
 
-			if (ret) {
-				KGSL_DRV_ERR(device,
-					"Unable to get perf counters for bus DCVS\n");
-				adreno_dev->ram_cycles_lo = 0;
+				if (ret) {
+					KGSL_DRV_ERR(device,
+						"Unable to get perf counters for bus DCVS\n");
+					adreno_dev->starved_ram_lo_ch1 = 0;
+				}
+			}
+
+			if (adreno_dev->ram_cycles_lo == 0) {
+				ret = adreno_perfcounter_get(adreno_dev,
+					KGSL_PERFCOUNTER_GROUP_VBIF,
+					GBIF_AXI0_READ_DATA_TOTAL_BEATS,
+					&adreno_dev->ram_cycles_lo, NULL,
+					PERFCOUNTER_FLAG_KERNEL);
+
+				if (ret) {
+					KGSL_DRV_ERR(device,
+						"Unable to get perf counters for bus DCVS\n");
+					adreno_dev->ram_cycles_lo = 0;
+				}
+			}
+
+			if (adreno_dev->ram_cycles_lo_ch1_read == 0) {
+				ret = adreno_perfcounter_get(adreno_dev,
+					KGSL_PERFCOUNTER_GROUP_VBIF,
+					GBIF_AXI1_READ_DATA_TOTAL_BEATS,
+					&adreno_dev->ram_cycles_lo_ch1_read,
+					NULL,
+					PERFCOUNTER_FLAG_KERNEL);
+
+				if (ret) {
+					KGSL_DRV_ERR(device,
+						"Unable to get perf counters for bus DCVS\n");
+					adreno_dev->ram_cycles_lo_ch1_read = 0;
+				}
+			}
+
+			if (adreno_dev->ram_cycles_lo_ch0_write == 0) {
+				ret = adreno_perfcounter_get(adreno_dev,
+					KGSL_PERFCOUNTER_GROUP_VBIF,
+					GBIF_AXI0_WRITE_DATA_TOTAL_BEATS,
+					&adreno_dev->ram_cycles_lo_ch0_write,
+					NULL,
+					PERFCOUNTER_FLAG_KERNEL);
+
+				if (ret) {
+					KGSL_DRV_ERR(device,
+						"Unable to get perf counters for bus DCVS\n");
+					adreno_dev->ram_cycles_lo_ch0_write = 0;
+				}
+			}
+
+			if (adreno_dev->ram_cycles_lo_ch1_write == 0) {
+				ret = adreno_perfcounter_get(adreno_dev,
+					KGSL_PERFCOUNTER_GROUP_VBIF,
+					GBIF_AXI1_WRITE_DATA_TOTAL_BEATS,
+					&adreno_dev->ram_cycles_lo_ch1_write,
+					NULL,
+					PERFCOUNTER_FLAG_KERNEL);
+
+				if (ret) {
+					KGSL_DRV_ERR(device,
+						"Unable to get perf counters for bus DCVS\n");
+					adreno_dev->ram_cycles_lo_ch1_write = 0;
+				}
+			}
+		} else {
+			/* VBIF DDR cycles */
+			if (adreno_dev->ram_cycles_lo == 0) {
+				ret = adreno_perfcounter_get(adreno_dev,
+					KGSL_PERFCOUNTER_GROUP_VBIF,
+					VBIF_AXI_TOTAL_BEATS,
+					&adreno_dev->ram_cycles_lo, NULL,
+					PERFCOUNTER_FLAG_KERNEL);
+
+				if (ret) {
+					KGSL_DRV_ERR(device,
+						"Unable to get perf counters for bus DCVS\n");
+					adreno_dev->ram_cycles_lo = 0;
+				}
 			}
 		}
 	}
 
 	/* Clear the busy_data stats - we're starting over from scratch */
 	adreno_dev->busy_data.gpu_busy = 0;
-	adreno_dev->busy_data.vbif_ram_cycles = 0;
-	adreno_dev->busy_data.vbif_starved_ram = 0;
+	adreno_dev->busy_data.bif_ram_cycles = 0;
+	adreno_dev->busy_data.bif_ram_cycles_read_ch1 = 0;
+	adreno_dev->busy_data.bif_ram_cycles_write_ch0 = 0;
+	adreno_dev->busy_data.bif_ram_cycles_write_ch1 = 0;
+	adreno_dev->busy_data.bif_starved_ram = 0;
+	adreno_dev->busy_data.bif_starved_ram_ch1 = 0;
 
 	/* Restore performance counter registers with saved values */
 	adreno_perfcounter_restore(adreno_dev);
@@ -1761,8 +1877,19 @@
 	 * because some idle level transitions require VBIF and MMU.
 	 */
 	if (gpudev->wait_for_lowest_idle &&
-			gpudev->wait_for_lowest_idle(adreno_dev))
-		return -EINVAL;
+			gpudev->wait_for_lowest_idle(adreno_dev)) {
+		struct gmu_device *gmu = &device->gmu;
+
+		set_bit(GMU_FAULT, &gmu->flags);
+		gmu_snapshot(device);
+		/*
+		 * Assume GMU hang after 10ms without responding.
+		 * It shall be relative safe to clear vbif and stop
+		 * MMU later. Early return in adreno_stop function
+		 * will result in kernel panic in adreno_start
+		 */
+		error = -EINVAL;
+	}
 
 	adreno_vbif_clear_pending_transactions(device);
 
@@ -1776,7 +1903,7 @@
 
 	clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
 
-	return 0;
+	return error;
 }
 
 static inline bool adreno_try_soft_reset(struct kgsl_device *device, int fault)
@@ -2431,8 +2558,12 @@
 
 	/* Clear the busy_data stats - we're starting over from scratch */
 	adreno_dev->busy_data.gpu_busy = 0;
-	adreno_dev->busy_data.vbif_ram_cycles = 0;
-	adreno_dev->busy_data.vbif_starved_ram = 0;
+	adreno_dev->busy_data.bif_ram_cycles = 0;
+	adreno_dev->busy_data.bif_ram_cycles_read_ch1 = 0;
+	adreno_dev->busy_data.bif_ram_cycles_write_ch0 = 0;
+	adreno_dev->busy_data.bif_ram_cycles_write_ch1 = 0;
+	adreno_dev->busy_data.bif_starved_ram = 0;
+	adreno_dev->busy_data.bif_starved_ram_ch1 = 0;
 
 	/* Set the page table back to the default page table */
 	adreno_ringbuffer_set_global(adreno_dev, 0);
@@ -2770,6 +2901,56 @@
 	rmb();
 }
 
+bool adreno_is_cx_dbgc_register(struct kgsl_device *device,
+		unsigned int offsetwords)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	return adreno_dev->cx_dbgc_virt &&
+		(offsetwords >= (adreno_dev->cx_dbgc_base >> 2)) &&
+		(offsetwords < (adreno_dev->cx_dbgc_base +
+				adreno_dev->cx_dbgc_len) >> 2);
+}
+
+void adreno_cx_dbgc_regread(struct kgsl_device *device,
+	unsigned int offsetwords, unsigned int *value)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int cx_dbgc_offset;
+
+	if (!adreno_is_cx_dbgc_register(device, offsetwords))
+		return;
+
+	cx_dbgc_offset = (offsetwords << 2) - adreno_dev->cx_dbgc_base;
+	*value = __raw_readl(adreno_dev->cx_dbgc_virt + cx_dbgc_offset);
+
+	/*
+	 * ensure this read finishes before the next one.
+	 * i.e. act like normal readl()
+	 */
+	rmb();
+}
+
+void adreno_cx_dbgc_regwrite(struct kgsl_device *device,
+	unsigned int offsetwords, unsigned int value)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int cx_dbgc_offset;
+
+	if (!adreno_is_cx_dbgc_register(device, offsetwords))
+		return;
+
+	cx_dbgc_offset = (offsetwords << 2) - adreno_dev->cx_dbgc_base;
+	trace_kgsl_regwrite(device, offsetwords, value);
+
+	/*
+	 * ensure previous writes post before this one,
+	 * i.e. act like normal writel()
+	 */
+	wmb();
+	__raw_writel(value, adreno_dev->cx_dbgc_virt + cx_dbgc_offset);
+}
+
 /**
  * adreno_waittimestamp - sleep while waiting for the specified timestamp
  * @device - pointer to a KGSL device structure
@@ -2948,7 +3129,8 @@
 
 		if (adreno_is_a6xx(adreno_dev)) {
 			/* clock sourced from XO */
-			stats->busy_time = gpu_busy * 10 / 192;
+			stats->busy_time = gpu_busy * 10;
+			do_div(stats->busy_time, 192);
 		} else {
 			/* clock sourced from GFX3D */
 			stats->busy_time = adreno_ticks_to_us(gpu_busy,
@@ -2962,12 +3144,36 @@
 		if (adreno_dev->ram_cycles_lo != 0)
 			ram_cycles = counter_delta(device,
 				adreno_dev->ram_cycles_lo,
-				&busy->vbif_ram_cycles);
+				&busy->bif_ram_cycles);
+
+		if (adreno_has_gbif(adreno_dev)) {
+			if (adreno_dev->ram_cycles_lo_ch1_read != 0)
+				ram_cycles += counter_delta(device,
+					adreno_dev->ram_cycles_lo_ch1_read,
+					&busy->bif_ram_cycles_read_ch1);
+
+			if (adreno_dev->ram_cycles_lo_ch0_write != 0)
+				ram_cycles += counter_delta(device,
+					adreno_dev->ram_cycles_lo_ch0_write,
+					&busy->bif_ram_cycles_write_ch0);
+
+			if (adreno_dev->ram_cycles_lo_ch1_write != 0)
+				ram_cycles += counter_delta(device,
+					adreno_dev->ram_cycles_lo_ch1_write,
+					&busy->bif_ram_cycles_write_ch1);
+		}
 
 		if (adreno_dev->starved_ram_lo != 0)
 			starved_ram = counter_delta(device,
 				adreno_dev->starved_ram_lo,
-				&busy->vbif_starved_ram);
+				&busy->bif_starved_ram);
+
+		if (adreno_has_gbif(adreno_dev)) {
+			if (adreno_dev->starved_ram_lo_ch1 != 0)
+				starved_ram += counter_delta(device,
+					adreno_dev->starved_ram_lo_ch1,
+					&busy->bif_starved_ram_ch1);
+		}
 
 		stats->ram_time = ram_cycles;
 		stats->ram_wait = starved_ram;
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index d6cba9d..c5db02a 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -164,6 +164,9 @@
 /* Number of times to try hard reset */
 #define NUM_TIMES_RESET_RETRY 5
 
+/* Number of times to poll the AHB fence in ISR */
+#define FENCE_RETRY_MAX 100
+
 /* One cannot wait forever for the core to idle, so set an upper limit to the
  * amount of time to wait for the core to go idle
  */
@@ -279,8 +282,12 @@
 
 struct adreno_busy_data {
 	unsigned int gpu_busy;
-	unsigned int vbif_ram_cycles;
-	unsigned int vbif_starved_ram;
+	unsigned int bif_ram_cycles;
+	unsigned int bif_ram_cycles_read_ch1;
+	unsigned int bif_ram_cycles_write_ch0;
+	unsigned int bif_ram_cycles_write_ch1;
+	unsigned int bif_starved_ram;
+	unsigned int bif_starved_ram_ch1;
 	unsigned int throttle_cycles[ADRENO_GPMU_THROTTLE_COUNTERS];
 };
 
@@ -397,8 +404,18 @@
  * @pwron_fixup_dwords: Number of dwords in the command buffer
  * @input_work: Work struct for turning on the GPU after a touch event
  * @busy_data: Struct holding GPU VBIF busy stats
- * @ram_cycles_lo: Number of DDR clock cycles for the monitor session
- * @perfctr_pwr_lo: Number of cycles VBIF is stalled by DDR
+ * @ram_cycles_lo: Number of DDR clock cycles for the monitor session (Only
+ * DDR channel 0 read cycles in case of GBIF)
+ * @ram_cycles_lo_ch1_read: Number of DDR channel 1 Read clock cycles for
+ * the monitor session
+ * @ram_cycles_lo_ch0_write: Number of DDR channel 0 Write clock cycles for
+ * the monitor session
+ * @ram_cycles_lo_ch1_write: Number of DDR channel 0 Write clock cycles for
+ * the monitor session
+ * @starved_ram_lo: Number of cycles VBIF/GBIF is stalled by DDR (Only channel 0
+ * stall cycles in case of GBIF)
+ * @starved_ram_lo_ch1: Number of cycles GBIF is stalled by DDR channel 1
+ * @perfctr_pwr_lo: GPU busy cycles
  * @halt: Atomic variable to check whether the GPU is currently halted
  * @pending_irq_refcnt: Atomic variable to keep track of running IRQ handlers
  * @ctx_d_debugfs: Context debugfs node
@@ -434,6 +451,9 @@
 	unsigned int chipid;
 	unsigned long gmem_base;
 	unsigned long gmem_size;
+	unsigned long cx_dbgc_base;
+	unsigned int cx_dbgc_len;
+	void __iomem *cx_dbgc_virt;
 	const struct adreno_gpu_core *gpucore;
 	struct adreno_firmware fw[2];
 	size_t gpmu_cmds_size;
@@ -455,7 +475,11 @@
 	struct work_struct input_work;
 	struct adreno_busy_data busy_data;
 	unsigned int ram_cycles_lo;
+	unsigned int ram_cycles_lo_ch1_read;
+	unsigned int ram_cycles_lo_ch0_write;
+	unsigned int ram_cycles_lo_ch1_write;
 	unsigned int starved_ram_lo;
+	unsigned int starved_ram_lo_ch1;
 	unsigned int perfctr_pwr_lo;
 	atomic_t halt;
 	atomic_t pending_irq_refcnt;
@@ -1060,6 +1084,13 @@
 		unsigned int *val);
 void adreno_efuse_unmap(struct adreno_device *adreno_dev);
 
+bool adreno_is_cx_dbgc_register(struct kgsl_device *device,
+		unsigned int offset);
+void adreno_cx_dbgc_regread(struct kgsl_device *adreno_device,
+		unsigned int offsetwords, unsigned int *value);
+void adreno_cx_dbgc_regwrite(struct kgsl_device *device,
+		unsigned int offsetwords, unsigned int value);
+
 #define ADRENO_TARGET(_name, _id) \
 static inline int adreno_is_##_name(struct adreno_device *adreno_dev) \
 { \
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 92f51bc..6f6acf7 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -156,7 +156,7 @@
 	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
 	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
 	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
-	{A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+	{A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
 	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
 	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
 	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
@@ -1079,7 +1079,7 @@
 		if ((value & mask) == expected_ret)
 			return 0;
 		/* Wait 100us to reduce unnecessary AHB bus traffic */
-		udelay(100);
+		usleep_range(10, 100);
 	} while (!time_after(jiffies, t));
 
 	/* Double check one last time */
@@ -1225,7 +1225,6 @@
 		unsigned int clear_mask)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct gmu_device *gmu = &device->gmu;
 	int ret = 0;
 
 	if (!kgsl_gmu_isenabled(device))
@@ -1239,9 +1238,7 @@
 			GPU_START_TIMEOUT,
 			check_mask)) {
 		ret = -ETIMEDOUT;
-		dev_err(&gmu->pdev->dev,
-			"OOB set timed out, mask %x\n", set_mask);
-		WARN_ON(true);
+		WARN(1, "OOB set timed out, mask %x\n", set_mask);
 	}
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, clear_mask);
@@ -1844,7 +1841,7 @@
 		}
 
 		/* Wait 100us to reduce unnecessary AHB bus traffic */
-		udelay(100);
+		usleep_range(10, 100);
 	}
 
 	/* Check one last time */
@@ -1858,8 +1855,7 @@
 			return 0;
 	}
 
-	dev_err(&gmu->pdev->dev,
-			"Timeout waiting for lowest idle level: %d\n", reg);
+	WARN(1, "Timeout waiting for lowest idle level: %d\n", reg);
 	return -ETIMEDOUT;
 }
 
@@ -2964,6 +2960,41 @@
 	return 0;
 }
 
+static void a6xx_efuse_speed_bin(struct adreno_device *adreno_dev)
+{
+	unsigned int val;
+	unsigned int speed_bin[3];
+	struct kgsl_device *device = &adreno_dev->dev;
+
+	if (of_property_read_u32_array(device->pdev->dev.of_node,
+		"qcom,gpu-speed-bin", speed_bin, 3))
+		return;
+
+	adreno_efuse_read_u32(adreno_dev, speed_bin[0], &val);
+
+	adreno_dev->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
+}
+
+static const struct {
+	int (*check)(struct adreno_device *adreno_dev);
+	void (*func)(struct adreno_device *adreno_dev);
+} a6xx_efuse_funcs[] = {
+	{ adreno_is_a615, a6xx_efuse_speed_bin },
+};
+
+static void a6xx_check_features(struct adreno_device *adreno_dev)
+{
+	unsigned int i;
+
+	if (adreno_efuse_map(adreno_dev))
+		return;
+	for (i = 0; i < ARRAY_SIZE(a6xx_efuse_funcs); i++) {
+		if (a6xx_efuse_funcs[i].check(adreno_dev))
+			a6xx_efuse_funcs[i].func(adreno_dev);
+	}
+
+	adreno_efuse_unmap(adreno_dev);
+}
 static void a6xx_platform_setup(struct adreno_device *adreno_dev)
 {
 	uint64_t addr;
@@ -2982,7 +3013,8 @@
 
 		a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs =
 				a6xx_perfcounters_gbif_pwr;
-		a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].reg_count
+		a6xx_perfcounter_groups[
+			KGSL_PERFCOUNTER_GROUP_VBIF_PWR].reg_count
 				= ARRAY_SIZE(a6xx_perfcounters_gbif_pwr);
 
 		gpudev->vbif_xin_halt_ctrl0_mask =
@@ -2990,6 +3022,9 @@
 	} else
 		gpudev->vbif_xin_halt_ctrl0_mask =
 				A6XX_VBIF_XIN_HALT_CTRL0_MASK;
+
+	/* Check efuse bits for various capabilties */
+	a6xx_check_features(adreno_dev);
 }
 
 
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 4357518..c1a76bc 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -195,35 +195,6 @@
 	unsigned int ctxt_id;
 };
 
-static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
-	0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
-	0xBE20, 0xBE23,
-};
-
-static const unsigned int a6xx_sp_non_ctx_registers[] = {
-	0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
-	0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
-};
-
-static const unsigned int a6xx_tp_non_ctx_registers[] = {
-	0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
-};
-
-static struct a6xx_non_ctx_dbgahb_registers {
-	unsigned int regbase;
-	unsigned int statetype;
-	const unsigned int *regs;
-	unsigned int num_sets;
-	unsigned int offset;
-} a6xx_non_ctx_dbgahb[] = {
-	{ 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
-		ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
-	{ 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
-		ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
-	{ 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
-		ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
-};
-
 static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
 	/* VBIF */
 	0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
@@ -332,6 +303,15 @@
 	/* VFD */
 	0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
 	0xA630, 0xA630,
+	/* SP */
+	0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
+	0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
+	/* TP */
+	0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
+	/* HLSQ */
+	0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
+	0xBE20, 0xBE23,
+
 };
 
 /*
@@ -437,7 +417,6 @@
 	A6XX_DBGBUS_VBIF, 0x100,
 };
 
-static void __iomem *a6xx_cx_dbgc;
 static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
 	{ A6XX_DBGBUS_GMU_CX, 0x100, },
 	{ A6XX_DBGBUS_CX, 0x100, },
@@ -848,106 +827,6 @@
 	return data_size + sizeof(*header);
 }
 
-static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
-				u8 *buf, size_t remain, void *priv)
-{
-	struct kgsl_snapshot_regs *header =
-				(struct kgsl_snapshot_regs *)buf;
-	struct a6xx_non_ctx_dbgahb_registers *regs =
-				(struct a6xx_non_ctx_dbgahb_registers *)priv;
-	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
-	int count = 0;
-	unsigned int read_sel;
-	int i, j;
-
-	if (!device->snapshot_legacy)
-		return 0;
-
-	/* Figure out how many registers we are going to dump */
-	for (i = 0; i < regs->num_sets; i++) {
-		int start = regs->regs[i * 2];
-		int end = regs->regs[i * 2 + 1];
-
-		count += (end - start + 1);
-	}
-
-	if (remain < (count * 8) + sizeof(*header)) {
-		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
-		return 0;
-	}
-
-	header->count = count;
-
-	read_sel = (regs->statetype & 0xff) << 8;
-	kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
-
-	for (i = 0; i < regs->num_sets; i++) {
-		unsigned int start = regs->regs[2 * i];
-		unsigned int end = regs->regs[2 * i + 1];
-
-		for (j = start; j <= end; j++) {
-			unsigned int val;
-
-			val = a6xx_read_dbgahb(device, regs->regbase, j);
-			*data++ = j;
-			*data++ = val;
-
-		}
-	}
-	return (count * 8) + sizeof(*header);
-}
-
-static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
-				size_t remain, void *priv)
-{
-	struct kgsl_snapshot_regs *header =
-				(struct kgsl_snapshot_regs *)buf;
-	struct a6xx_non_ctx_dbgahb_registers *regs =
-				(struct a6xx_non_ctx_dbgahb_registers *)priv;
-	unsigned int count = 0;
-	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
-	unsigned int i, k;
-	unsigned int *src;
-
-	if (crash_dump_valid == false)
-		return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
-				regs);
-
-	if (remain < sizeof(*header)) {
-		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
-		return 0;
-	}
-
-	remain -= sizeof(*header);
-
-	src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
-
-	for (i = 0; i < regs->num_sets; i++) {
-		unsigned int start;
-		unsigned int end;
-
-		start = regs->regs[2 * i];
-		end = regs->regs[(2 * i) + 1];
-
-		if (remain < (end - start + 1) * 8) {
-			SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
-			goto out;
-		}
-
-		remain -= ((end - start) + 1) * 8;
-
-		for (k = start; k <= end; k++, count++) {
-			*data++ = k;
-			*data++ = *src++;
-		}
-	}
-out:
-	header->count = count;
-
-	/* Return the size of the section */
-	return (count * 8) + sizeof(*header);
-}
-
 static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
 				struct kgsl_snapshot *snapshot)
 {
@@ -967,12 +846,6 @@
 				a6xx_snapshot_cluster_dbgahb, &info);
 		}
 	}
-
-	for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
-		kgsl_snapshot_add_section(device,
-			KGSL_SNAPSHOT_SECTION_REGS, snapshot,
-			a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
-	}
 }
 
 static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
@@ -1254,46 +1127,6 @@
 	return size;
 }
 
-static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
-{
-	void __iomem *reg;
-
-	if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
-		(offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
-		"Read beyond CX_DBGC block: 0x%x\n", offsetwords))
-		return;
-
-	reg = a6xx_cx_dbgc +
-		((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
-	*value = __raw_readl(reg);
-
-	/*
-	 * ensure this read finishes before the next one.
-	 * i.e. act like normal readl()
-	 */
-	rmb();
-}
-
-static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
-{
-	void __iomem *reg;
-
-	if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
-		(offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
-		"Write beyond CX_DBGC block: 0x%x\n", offsetwords))
-		return;
-
-	reg = a6xx_cx_dbgc +
-		((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
-
-	/*
-	 * ensure previous writes post before this one,
-	 * i.e. act like normal writel()
-	 */
-	wmb();
-	__raw_writel(value, reg);
-}
-
 /* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
 static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
 	unsigned int block_id, unsigned int index, unsigned int *val)
@@ -1303,10 +1136,10 @@
 	reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
 			(index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
 
-	_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
-	_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
-	_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
-	_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
 
 	/*
 	 * There needs to be a delay of 1 us to ensure enough time for correct
@@ -1314,9 +1147,9 @@
 	 */
 	udelay(1);
 
-	_cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
+	adreno_cx_dbgc_regread(device, A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
 	val++;
-	_cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
+	adreno_cx_dbgc_regread(device, A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
 }
 
 /*
@@ -1398,50 +1231,42 @@
 	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
 	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
 
-	a6xx_cx_dbgc = ioremap(device->reg_phys +
-			(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
-			(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
-				A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
-
-	if (a6xx_cx_dbgc) {
-		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
 		(0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
 		(0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
 		(0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
 
-		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
-			0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
+		0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
 
-		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
-		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
-		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
-		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
 
-		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
-			(0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
-			(1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
-			(2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
-			(3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
-			(4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
-			(5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
-			(6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
-			(7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
-		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
-			(8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
-			(9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
-			(10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
-			(11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
-			(12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
-			(13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
-			(14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
-			(15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
+		(0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
+		(1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
+		(2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
+		(3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
+		(4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
+		(5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
+		(6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
+		(7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
+		(8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
+		(9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
+		(10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
+		(11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
+		(12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
+		(13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
+		(14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
+		(15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
 
-		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
-		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
-		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
-		_cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
-	} else
-		KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+	adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
 
 	for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
 		kgsl_snapshot_add_section(device,
@@ -1457,14 +1282,14 @@
 				snapshot, a6xx_snapshot_vbif_debugbus_block,
 				(void *) &a6xx_vbif_debugbus_blocks);
 
-	if (a6xx_cx_dbgc) {
+	/* Dump the CX debugbus data if the block exists */
+	if (adreno_is_cx_dbgc_register(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_A)) {
 		for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
 			kgsl_snapshot_add_section(device,
 				KGSL_SNAPSHOT_SECTION_DEBUGBUS,
 				snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
 				(void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
 		}
-		iounmap(a6xx_cx_dbgc);
 	}
 }
 
@@ -1769,40 +1594,6 @@
 	return qwords;
 }
 
-static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
-{
-	int qwords = 0;
-	unsigned int i, k;
-	unsigned int count;
-
-	for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
-		struct a6xx_non_ctx_dbgahb_registers *regs =
-				&a6xx_non_ctx_dbgahb[i];
-
-		regs->offset = *offset;
-
-		/* Program the aperture */
-		ptr[qwords++] = (regs->statetype & 0xff) << 8;
-		ptr[qwords++] =	(((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
-					(1 << 21) | 1;
-
-		for (k = 0; k < regs->num_sets; k++) {
-			unsigned int start = regs->regs[2 * k];
-
-			count = REG_PAIR_COUNT(regs->regs, k);
-			ptr[qwords++] =
-				a6xx_crashdump_registers.gpuaddr + *offset;
-			ptr[qwords++] =
-				(((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
-					start - regs->regbase / 4) << 44)) |
-							count;
-
-			*offset += count * sizeof(unsigned int);
-		}
-	}
-	return qwords;
-}
-
 void a6xx_crashdump_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1894,26 +1685,6 @@
 		}
 	}
 
-	/*
-	 * Calculate the script and data size for non context debug
-	 * AHB registers
-	 */
-	for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
-		struct a6xx_non_ctx_dbgahb_registers *regs =
-				&a6xx_non_ctx_dbgahb[i];
-
-		/* 16 bytes for programming the aperture */
-		script_size += 16;
-
-		/* Reading each pair of registers takes 16 bytes */
-		script_size += 16 * regs->num_sets;
-
-		/* A dword per register read from the cluster list */
-		for (k = 0; k < regs->num_sets; k++)
-			data_size += REG_PAIR_COUNT(regs->regs, k) *
-				sizeof(unsigned int);
-	}
-
 	/* Now allocate the script and data buffers */
 
 	/* The script buffers needs 2 extra qwords on the end */
@@ -1964,8 +1735,6 @@
 
 	ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
 
-	ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
-
 	*ptr++ = 0;
 	*ptr++ = 0;
 }
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 902dc0a..0caf55b 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -681,7 +681,7 @@
 	 * then set up the timer.  If this misses, then preemption is indeed a
 	 * thing and the timer will be set up in due time
 	 */
-	if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
+	if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
 		if (drawqueue_is_current(dispatch_q))
 			mod_timer(&dispatcher->timer, dispatch_q->expires);
 	}
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index d984c6d..b81be8f 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -505,6 +505,8 @@
 		kgsl_drawobj_destroy(list[i]);
 	}
 
+	debugfs_remove_recursive(drawctxt->debug_root);
+
 	/*
 	 * internal_timestamp is set in adreno_ringbuffer_addcmds,
 	 * which holds the device mutex.
@@ -562,8 +564,6 @@
 
 	mutex_unlock(&device->mutex);
 
-	debugfs_remove_recursive(drawctxt->debug_root);
-
 	/* wake threads waiting to submit commands from this context */
 	wake_up_all(&drawctxt->waiting);
 	wake_up_all(&drawctxt->wq);
diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c
index 8b283ae..13d71982 100644
--- a/drivers/gpu/msm/adreno_ioctl.c
+++ b/drivers/gpu/msm/adreno_ioctl.c
@@ -143,7 +143,7 @@
 
 	if (WARN_ON(_IOC_SIZE(cmds[i].cmd) > sizeof(data))) {
 		if (__ratelimit(&_rs))
-			WARN(1, "data too big for ioctl 0x%08X: %d/%ld\n",
+			WARN(1, "data too big for ioctl 0x%08X: %d/%zu\n",
 				cmd, _IOC_SIZE(cmds[i].cmd), sizeof(data));
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 9ea8069..03db16d 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -171,17 +171,23 @@
  */
 inline void adreno_perfcounter_save(struct adreno_device *adreno_dev)
 {
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	struct adreno_perfcounters *counters = ADRENO_PERFCOUNTERS(adreno_dev);
 	struct adreno_perfcount_group *group;
 	unsigned int counter, groupid;
-	int ret;
+	int ret = 0;
 
 	if (counters == NULL)
 		return;
 
-	ret = adreno_perfcntr_active_oob_get(adreno_dev);
+	if (gpudev->oob_set)
+		ret = gpudev->oob_set(adreno_dev, OOB_PERFCNTR_SET_MASK,
+				OOB_PERFCNTR_CHECK_MASK,
+				OOB_PERFCNTR_CLEAR_MASK);
+
+	/* if oob_set timeout, clear the mask and return */
 	if (ret)
-		return;
+		goto done;
 
 	for (groupid = 0; groupid < counters->group_count; groupid++) {
 		group = &(counters->groups[groupid]);
@@ -203,7 +209,9 @@
 		}
 	}
 
-	adreno_perfcntr_active_oob_put(adreno_dev);
+done:
+	if (gpudev->oob_clear)
+		gpudev->oob_clear(adreno_dev, OOB_PERFCNTR_CLEAR_MASK);
 }
 
 static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
@@ -633,25 +641,26 @@
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_perfcount_register *reg;
-	unsigned int shift = counter << 3;
 
 	reg = &counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF].regs[counter];
 
 	if (adreno_has_gbif(adreno_dev)) {
+		unsigned int shift = counter << 3;
+		unsigned int perfctr_mask = 1 << counter;
 		/*
 		 * Write 1, followed by 0 to CLR register for
 		 * clearing the counter
 		 */
 		kgsl_regrmw(device, reg->select - GBIF_PERF_CLR_REG_SEL_OFF,
-			1 << counter, 1);
+			perfctr_mask, perfctr_mask);
 		kgsl_regrmw(device, reg->select - GBIF_PERF_CLR_REG_SEL_OFF,
-			1 << counter, 0);
+			perfctr_mask, 0);
 		/* select the desired countable */
 		kgsl_regrmw(device, reg->select,
 			GBIF_PERF_RMW_MASK << shift, countable << shift);
 		/* enable counter */
 		kgsl_regrmw(device, reg->select - GBIF_PERF_EN_REG_SEL_OFF,
-			1 << counter, 1);
+			perfctr_mask, perfctr_mask);
 
 	} else {
 		/*
@@ -680,17 +689,17 @@
 	reg = &counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs[counter];
 
 	if (adreno_has_gbif(adreno_dev)) {
+		unsigned int perfctr_mask = GBIF_PWR_RMW_MASK << counter;
 		/*
 		 * Write 1, followed by 0 to CLR register for
 		 * clearing the counter
 		 */
 		kgsl_regrmw(device, reg->select + GBIF_PWR_CLR_REG_EN_OFF,
-			GBIF_PWR_RMW_MASK << counter, 1);
+			perfctr_mask, perfctr_mask);
 		kgsl_regrmw(device, reg->select + GBIF_PWR_CLR_REG_EN_OFF,
-			GBIF_PWR_RMW_MASK << counter, 0);
+			perfctr_mask, 0);
 		/* Enable the counter */
-		kgsl_regrmw(device, reg->select,
-			GBIF_PWR_RMW_MASK << counter, 1);
+		kgsl_regrmw(device, reg->select, perfctr_mask, perfctr_mask);
 	} else {
 		/*
 		 * Write 1, followed by 0 to CLR register for
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 1adfeb2..70043db 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -537,6 +537,9 @@
 			total_sizedwords += 1;
 	}
 
+	if (gpudev->set_marker)
+		total_sizedwords += 4;
+
 	ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
 	if (IS_ERR(ringcmds))
 		return PTR_ERR(ringcmds);
@@ -556,6 +559,9 @@
 		*ringcmds++ = KGSL_CMD_INTERNAL_IDENTIFIER;
 	}
 
+	if (gpudev->set_marker)
+		ringcmds += gpudev->set_marker(ringcmds, 1);
+
 	if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP) {
 		/* Disable protected mode for the fixup */
 		*ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
@@ -674,6 +680,9 @@
 		*ringcmds++ = timestamp;
 	}
 
+	if (gpudev->set_marker)
+		ringcmds += gpudev->set_marker(ringcmds, 0);
+
 	if (adreno_is_a3xx(adreno_dev)) {
 		/* Dummy set-constant to trigger context rollover */
 		*ringcmds++ = cp_packet(adreno_dev, CP_SET_CONSTANT, 2);
@@ -898,9 +907,6 @@
 			dwords += 8;
 	}
 
-	if (gpudev->set_marker)
-		dwords += 4;
-
 	if (gpudev->ccu_invalidate)
 		dwords += 4;
 
@@ -933,9 +939,6 @@
 			gpu_ticks_submitted));
 	}
 
-	if (gpudev->set_marker)
-		cmds += gpudev->set_marker(cmds, 1);
-
 	if (numibs) {
 		list_for_each_entry(ib, &cmdobj->cmdlist, node) {
 			/*
@@ -960,9 +963,6 @@
 	if (gpudev->ccu_invalidate)
 		cmds += gpudev->ccu_invalidate(adreno_dev, cmds);
 
-	if (gpudev->set_marker)
-		cmds += gpudev->set_marker(cmds, 0);
-
 	if (adreno_is_preemption_execution_enabled(adreno_dev)) {
 		if (gpudev->preemption_yield_enable)
 			cmds += gpudev->preemption_yield_enable(cmds);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 31868a0..2e1ceea 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2846,10 +2846,8 @@
 	bool ret = (kgsl_driver.full_cache_threshold != 0) &&
 		(size >= kgsl_driver.full_cache_threshold) &&
 		(op == KGSL_GPUMEM_CACHE_FLUSH);
-	if (ret) {
-		trace_kgsl_mem_sync_full_cache(actual_count, op_size);
+	if (ret)
 		flush_cache_all();
-	}
 	return ret;
 }
 #endif
@@ -2913,8 +2911,10 @@
 		entries[actual_count++] = entry;
 
 		full_flush  = check_full_flush(op_size, param->op);
-		if (full_flush)
+		if (full_flush) {
+			trace_kgsl_mem_sync_full_cache(actual_count, op_size);
 			break;
+		}
 
 		last_id = id;
 	}
@@ -3002,8 +3002,10 @@
 			size += (entries[i]->memdesc.size - objs[i].offset);
 
 		full_flush = check_full_flush(size, objs[i].op);
-		if (full_flush)
+		if (full_flush) {
+			trace_kgsl_mem_sync_full_cache(i, size);
 			break;
+		}
 
 		ptr += sizeof(*objs);
 	}
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index e339a08..834706a 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -303,6 +303,7 @@
 	if (!(m->flags & KGSL_MEMFLAGS_SPARSE_VIRT))
 		return 0;
 
+	spin_lock(&entry->bind_lock);
 	node = rb_first(&entry->bind_tree);
 
 	while (node != NULL) {
@@ -313,6 +314,7 @@
 				obj->v_off, obj->size, obj->p_off);
 		node = rb_next(node);
 	}
+	spin_unlock(&entry->bind_lock);
 
 	seq_putc(s, '\n');
 
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index df25c28..56496f7 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -18,6 +18,7 @@
 #include <linux/msm-bus.h>
 #include <linux/msm-bus-board.h>
 #include <linux/pm_opp.h>
+#include <linux/io.h>
 #include <soc/qcom/cmd-db.h>
 
 #include "kgsl_device.h"
@@ -59,8 +60,6 @@
 	unsigned int image_start;
 };
 
-static void gmu_snapshot(struct kgsl_device *device);
-
 struct gmu_iommu_context {
 	const char *name;
 	struct device *dev;
@@ -183,8 +182,8 @@
 
 	if (ret) {
 		dev_err(&gmu->pdev->dev,
-				"gmu map err: gaddr=0x%016llX, paddr=0x%016llX\n",
-				md->gmuaddr, md->physaddr);
+				"gmu map err: gaddr=0x%016llX, paddr=0x%pa\n",
+				md->gmuaddr, &(md->physaddr));
 		free_gmu_mem(gmu, md);
 	}
 
@@ -1339,7 +1338,7 @@
 	return 0;
 }
 
-static void gmu_snapshot(struct kgsl_device *device)
+void gmu_snapshot(struct kgsl_device *device)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct gmu_device *gmu = &device->gmu;
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index e0c857f..60d9cf8 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -233,6 +233,7 @@
 	unsigned int fault_count;
 };
 
+void gmu_snapshot(struct kgsl_device *device);
 bool kgsl_gmu_isenabled(struct kgsl_device *device);
 int gmu_probe(struct kgsl_device *device);
 void gmu_remove(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 2cc60b5..eef5f45 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -594,12 +594,12 @@
 
 	gmu->ver = ver;
 	if (major != FW_VER_MAJOR(ver))
-		dev_err(dev, "FW version major %d error (expect %d)\n",
+		WARN_ONCE(1, "FW version major %d error (expect %d)\n",
 				FW_VER_MAJOR(ver),
 				adreno_dev->gpucore->gpmu_major);
 
 	if (minor > FW_VER_MINOR(ver))
-		dev_err(dev, "FW version minor %d error (expect %d)\n",
+		WARN_ONCE(1, "FW version minor %d error (expect %d)\n",
 				FW_VER_MINOR(ver),
 				adreno_dev->gpucore->gpmu_minor);
 
diff --git a/drivers/gpu/msm/kgsl_ioctl.c b/drivers/gpu/msm/kgsl_ioctl.c
index bfce4d4..9b02e19 100644
--- a/drivers/gpu/msm/kgsl_ioctl.c
+++ b/drivers/gpu/msm/kgsl_ioctl.c
@@ -145,7 +145,7 @@
 
 	if (_IOC_SIZE(cmds[nr].cmd) > sizeof(data)) {
 		if (__ratelimit(&_rs))
-			WARN(1, "data too big for ioctl 0x%08X: %d/%ld\n",
+			WARN(1, "data too big for ioctl 0x%08X: %d/%zu\n",
 				cmd, _IOC_SIZE(cmds[nr].cmd), sizeof(data));
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index c02046a..dc0e733 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -126,9 +126,11 @@
 		if (memdesc == NULL)
 			continue;
 
-		seq_printf(s, "0x%16.16llX-0x%16.16llX %16llu %s\n",
-			memdesc->gpuaddr, memdesc->gpuaddr + memdesc->size - 1,
-			memdesc->size, global_pt_entries[i].name);
+		seq_printf(s, "0x%pK-0x%pK %16llu %s\n",
+			(uint64_t *)(uintptr_t) memdesc->gpuaddr,
+			(uint64_t *)(uintptr_t) (memdesc->gpuaddr +
+			memdesc->size - 1), memdesc->size,
+			global_pt_entries[i].name);
 	}
 }
 
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index c31a85b..5da8c1d 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -422,6 +422,24 @@
 	__free_pages(page, page_order);
 }
 
+/*
+ * Return true if the pool of specified page size is supported
+ * or no pools are supported otherwise return false.
+ */
+bool kgsl_pool_avaialable(int page_size)
+{
+	int i;
+
+	if (!kgsl_num_pools)
+		return true;
+
+	for (i = 0; i < kgsl_num_pools; i++)
+		if (ilog2(page_size >> PAGE_SHIFT) == kgsl_pools[i].pool_order)
+			return true;
+
+	return false;
+}
+
 static void kgsl_pool_reserve_pages(void)
 {
 	int i, j;
diff --git a/drivers/gpu/msm/kgsl_pool.h b/drivers/gpu/msm/kgsl_pool.h
index d55e1ad..8091afb 100644
--- a/drivers/gpu/msm/kgsl_pool.h
+++ b/drivers/gpu/msm/kgsl_pool.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -40,5 +40,6 @@
 int kgsl_pool_alloc_page(int *page_size, struct page **pages,
 			unsigned int pages_len, unsigned int *align);
 void kgsl_pool_free_page(struct page *p);
+bool kgsl_pool_avaialable(int size);
 #endif /* __KGSL_POOL_H */
 
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 5061f6a..de5df54 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -27,7 +27,6 @@
 #include "kgsl_device.h"
 #include "kgsl_log.h"
 #include "kgsl_mmu.h"
-#include "kgsl_pool.h"
 
 /*
  * The user can set this from debugfs to force failed memory allocations to
@@ -1057,7 +1056,7 @@
 	else if (type < ARRAY_SIZE(memtype_str) && memtype_str[type] != NULL)
 		strlcpy(name, memtype_str[type], name_size);
 	else
-		snprintf(name, name_size, "unknown(%3d)", type);
+		snprintf(name, name_size, "VK/others(%3d)", type);
 }
 EXPORT_SYMBOL(kgsl_get_memory_usage);
 
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 5466a49..55bb34f 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -368,6 +368,8 @@
 	}
 }
 
+#include "kgsl_pool.h"
+
 /**
  * kgsl_get_page_size() - Get supported pagesize
  * @size: Size of the page
@@ -378,11 +380,14 @@
 #ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
 static inline int kgsl_get_page_size(size_t size, unsigned int align)
 {
-	if (align >= ilog2(SZ_1M) && size >= SZ_1M)
+	if (align >= ilog2(SZ_1M) && size >= SZ_1M &&
+		kgsl_pool_avaialable(SZ_1M))
 		return SZ_1M;
-	else if (align >= ilog2(SZ_64K) && size >= SZ_64K)
+	else if (align >= ilog2(SZ_64K) && size >= SZ_64K &&
+		kgsl_pool_avaialable(SZ_64K))
 		return SZ_64K;
-	else if (align >= ilog2(SZ_8K) && size >= SZ_8K)
+	else if (align >= ilog2(SZ_8K) && size >= SZ_8K &&
+		kgsl_pool_avaialable(SZ_8K))
 		return SZ_8K;
 	else
 		return PAGE_SIZE;
diff --git a/drivers/hwtracing/coresight/coresight-ost.c b/drivers/hwtracing/coresight/coresight-ost.c
index 3399c27..a5075ba 100644
--- a/drivers/hwtracing/coresight/coresight-ost.c
+++ b/drivers/hwtracing/coresight/coresight-ost.c
@@ -123,13 +123,14 @@
 
 static int stm_trace_data_header(void __iomem *addr)
 {
-	char hdr[16];
+	char hdr[24];
 	int len = 0;
 
-	*(uint16_t *)(hdr) = STM_MAKE_VERSION(0, 1);
+	*(uint16_t *)(hdr) = STM_MAKE_VERSION(0, 2);
 	*(uint16_t *)(hdr + 2) = STM_HEADER_MAGIC;
 	*(uint32_t *)(hdr + 4) = raw_smp_processor_id();
 	*(uint64_t *)(hdr + 8) = sched_clock();
+	*(uint64_t *)(hdr + 16) = task_tgid_nr(get_current());
 
 	len += stm_ost_send(addr, hdr, sizeof(hdr));
 	len += stm_ost_send(addr, current->comm, TASK_COMM_LEN);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 8477292..7aea288 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -340,12 +340,15 @@
 			data->word = dma_buffer[0] | (dma_buffer[1] << 8);
 			break;
 		case I2C_SMBUS_BLOCK_DATA:
-		case I2C_SMBUS_I2C_BLOCK_DATA:
 			if (desc->rxbytes != dma_buffer[0] + 1)
 				return -EMSGSIZE;
 
 			memcpy(data->block, dma_buffer, desc->rxbytes);
 			break;
+		case I2C_SMBUS_I2C_BLOCK_DATA:
+			memcpy(&data->block[1], dma_buffer, desc->rxbytes);
+			data->block[0] = desc->rxbytes;
+			break;
 		}
 		return 0;
 	}
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index c21ca7b..8f1c5f2 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -94,6 +94,12 @@
 #define SB800_PIIX4_PORT_IDX_ALT	0x2e
 #define SB800_PIIX4_PORT_IDX_SEL	0x2f
 #define SB800_PIIX4_PORT_IDX_MASK	0x06
+#define SB800_PIIX4_PORT_IDX_SHIFT	1
+
+/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+#define SB800_PIIX4_PORT_IDX_KERNCZ		0x02
+#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ	0x18
+#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ	3
 
 /* insmod parameters */
 
@@ -149,6 +155,8 @@
  */
 static DEFINE_MUTEX(piix4_mutex_sb800);
 static u8 piix4_port_sel_sb800;
+static u8 piix4_port_mask_sb800;
+static u8 piix4_port_shift_sb800;
 static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
 	" port 0", " port 2", " port 3", " port 4"
 };
@@ -347,7 +355,19 @@
 
 	/* Find which register is used for port selection */
 	if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
-		piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+		switch (PIIX4_dev->device) {
+		case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
+			piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
+			piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
+			piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
+			break;
+		case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
+		default:
+			piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+			piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+			piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
+			break;
+		}
 	} else {
 		mutex_lock(&piix4_mutex_sb800);
 		outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
@@ -355,6 +375,8 @@
 		piix4_port_sel_sb800 = (port_sel & 0x01) ?
 				       SB800_PIIX4_PORT_IDX_ALT :
 				       SB800_PIIX4_PORT_IDX;
+		piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+		piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
 		mutex_unlock(&piix4_mutex_sb800);
 	}
 
@@ -616,8 +638,8 @@
 	smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
 
 	port = adapdata->port;
-	if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port)
-		outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port,
+	if ((smba_en_lo & piix4_port_mask_sb800) != port)
+		outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
 		       SB800_PIIX4_SMB_IDX + 1);
 
 	retval = piix4_access(adap, addr, flags, read_write,
@@ -706,7 +728,7 @@
 
 	adapdata->smba = smba;
 	adapdata->sb800_main = sb800_main;
-	adapdata->port = port << 1;
+	adapdata->port = port << piix4_port_shift_sb800;
 
 	/* set up the sysfs linkage to our parent device */
 	adap->dev.parent = &dev->dev;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 793cbb5..1bfb98e 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -72,7 +72,10 @@
 #define I2C_NACK		GP_IRQ1
 #define I2C_BUS_PROTO		GP_IRQ3
 #define I2C_ARB_LOST		GP_IRQ4
-#define DM_I2C_RX_ERR		((GP_IRQ1 | GP_IRQ3 | GP_IRQ4) >> 4)
+#define DM_I2C_CB_ERR		((BIT(GP_IRQ1) | BIT(GP_IRQ3) | BIT(GP_IRQ4)) \
+									<< 5)
+
+#define I2C_AUTO_SUSPEND_DELAY	250
 
 enum i2c_se_mode {
 	UNINITIALIZED,
@@ -223,7 +226,7 @@
 	struct i2c_msg *cur = gi2c->cur;
 
 	if (!cur || (m_stat & M_CMD_FAILURE_EN) ||
-		    (dm_rx_st & (DM_I2C_RX_ERR)) ||
+		    (dm_rx_st & (DM_I2C_CB_ERR)) ||
 		    (m_stat & M_CMD_ABORT_EN)) {
 
 		if (m_stat & M_GP_IRQ_1_EN)
@@ -349,13 +352,33 @@
 				m_stat, cb_str->cb_event);
 }
 
+static void gi2c_gsi_cb_err(struct msm_gpi_dma_async_tx_cb_param *cb,
+								char *xfer)
+{
+	struct geni_i2c_dev *gi2c = cb->userdata;
+
+	if (cb->status & DM_I2C_CB_ERR) {
+		GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+			    "%s TCE Unexpected Err, stat:0x%x\n",
+				xfer, cb->status);
+		if (cb->status & (BIT(GP_IRQ1) << 5))
+			geni_i2c_err(gi2c, I2C_NACK);
+		if (cb->status & (BIT(GP_IRQ3) << 5))
+			geni_i2c_err(gi2c, I2C_BUS_PROTO);
+		if (cb->status & (BIT(GP_IRQ4) << 5))
+			geni_i2c_err(gi2c, I2C_ARB_LOST);
+	}
+}
+
 static void gi2c_gsi_tx_cb(void *ptr)
 {
 	struct msm_gpi_dma_async_tx_cb_param *tx_cb = ptr;
 	struct geni_i2c_dev *gi2c = tx_cb->userdata;
 
-	if (!(gi2c->cur->flags & I2C_M_RD))
+	if (!(gi2c->cur->flags & I2C_M_RD)) {
+		gi2c_gsi_cb_err(tx_cb, "TX");
 		complete(&gi2c->xfer);
+	}
 }
 
 static void gi2c_gsi_rx_cb(void *ptr)
@@ -364,17 +387,7 @@
 	struct geni_i2c_dev *gi2c = rx_cb->userdata;
 
 	if (gi2c->cur->flags & I2C_M_RD) {
-		if (rx_cb->status & DM_I2C_RX_ERR) {
-			GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
-				    "RX TCE Unexpected Err, stat:0x%x\n",
-				    rx_cb->status);
-			if (rx_cb->status & GP_IRQ1)
-				geni_i2c_err(gi2c, I2C_NACK);
-			if (rx_cb->status & GP_IRQ3)
-				geni_i2c_err(gi2c, I2C_BUS_PROTO);
-			if (rx_cb->status & GP_IRQ4)
-				geni_i2c_err(gi2c, I2C_ARB_LOST);
-		}
+		gi2c_gsi_cb_err(rx_cb, "RX");
 		complete(&gi2c->xfer);
 	}
 }
@@ -385,12 +398,22 @@
 	struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
 	int i, ret = 0, timeout = 0;
 
+	ret = pinctrl_select_state(gi2c->i2c_rsc.geni_pinctrl,
+				gi2c->i2c_rsc.geni_gpio_active);
+	if (ret) {
+		GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+			"%s: Error %d pinctrl_select_state active\n",
+			__func__, ret);
+		return ret;
+	}
+
 	if (!gi2c->tx_c) {
 		gi2c->tx_c = dma_request_slave_channel(gi2c->dev, "tx");
 		if (!gi2c->tx_c) {
 			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
 				    "tx dma req slv chan ret :%d\n", ret);
-			return -EIO;
+			ret = -EIO;
+			goto geni_i2c_gsi_xfer_out;
 		}
 		gi2c->tx_ev.init.callback = gi2c_ev_cb;
 		gi2c->tx_ev.init.cb_param = gi2c;
@@ -400,7 +423,7 @@
 		if (ret) {
 			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
 				    "tx dma slave config ret :%d\n", ret);
-			return ret;
+			goto geni_i2c_gsi_xfer_out;
 		}
 	}
 	if (!gi2c->rx_c) {
@@ -408,7 +431,8 @@
 		if (!gi2c->rx_c) {
 			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
 				    "rx dma req slv chan ret :%d\n", ret);
-			return -EIO;
+			ret = -EIO;
+			goto geni_i2c_gsi_xfer_out;
 		}
 		gi2c->rx_ev.init.cb_param = gi2c;
 		gi2c->rx_ev.init.callback = gi2c_ev_cb;
@@ -418,7 +442,7 @@
 		if (ret) {
 			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
 				    "rx dma slave config ret :%d\n", ret);
-			return ret;
+			goto geni_i2c_gsi_xfer_out;
 		}
 	}
 
@@ -502,7 +526,7 @@
 				GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
 					    "prep_slave_sg for rx failed\n");
 				gi2c->err = -ENOMEM;
-				return gi2c->err;
+				goto geni_i2c_gsi_xfer_out;
 			}
 			gi2c->rx_desc->callback = gi2c_gsi_rx_cb;
 			gi2c->rx_desc->callback_param = &gi2c->rx_cb;
@@ -534,7 +558,7 @@
 			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
 				    "prep_slave_sg for tx failed\n");
 			gi2c->err = -ENOMEM;
-			return gi2c->err;
+			goto geni_i2c_gsi_xfer_out;
 		}
 		gi2c->tx_desc->callback = gi2c_gsi_tx_cb;
 		gi2c->tx_desc->callback_param = &gi2c->tx_cb;
@@ -559,10 +583,15 @@
 		if (gi2c->err) {
 			dmaengine_terminate_all(gi2c->tx_c);
 			gi2c->cfg_sent = 0;
-			return gi2c->err;
+			goto geni_i2c_gsi_xfer_out;
 		}
 	}
-	return gi2c->err;
+geni_i2c_gsi_xfer_out:
+	if (!ret && gi2c->err)
+		ret = gi2c->err;
+	pinctrl_select_state(gi2c->i2c_rsc.geni_pinctrl,
+				gi2c->i2c_rsc.geni_gpio_sleep);
+	return ret;
 }
 
 static int geni_i2c_xfer(struct i2c_adapter *adap,
@@ -686,7 +715,9 @@
 geni_i2c_txn_ret:
 	if (ret == 0)
 		ret = num;
-	pm_runtime_put_sync(gi2c->dev);
+
+	pm_runtime_mark_last_busy(gi2c->dev);
+	pm_runtime_put_autosuspend(gi2c->dev);
 	gi2c->cur = NULL;
 	gi2c->err = 0;
 	dev_dbg(gi2c->dev, "i2c txn ret:%d\n", ret);
@@ -830,6 +861,8 @@
 	strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
 
 	pm_runtime_set_suspended(gi2c->dev);
+	pm_runtime_set_autosuspend_delay(gi2c->dev, I2C_AUTO_SUSPEND_DELAY);
+	pm_runtime_use_autosuspend(gi2c->dev);
 	pm_runtime_enable(gi2c->dev);
 	i2c_add_adapter(&gi2c->adap);
 
@@ -858,10 +891,13 @@
 {
 	struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
 
-	if (gi2c->se_mode == FIFO_SE_DMA)
+	if (gi2c->se_mode == FIFO_SE_DMA) {
 		disable_irq(gi2c->irq);
-
-	se_geni_resources_off(&gi2c->i2c_rsc);
+		se_geni_resources_off(&gi2c->i2c_rsc);
+	} else {
+		/* GPIO is set to sleep state already. So just clocks off */
+		se_geni_clks_off(&gi2c->i2c_rsc);
+	}
 	return 0;
 }
 
@@ -876,7 +912,12 @@
 		snprintf(ipc_name, I2C_NAME_SIZE, "i2c-%d", gi2c->adap.nr);
 		gi2c->ipcl = ipc_log_context_create(2, ipc_name, 0);
 	}
-	ret = se_geni_resources_on(&gi2c->i2c_rsc);
+
+	if (gi2c->se_mode != GSI_ONLY)
+		ret = se_geni_resources_on(&gi2c->i2c_rsc);
+	else
+		ret = se_geni_clks_on(&gi2c->i2c_rsc);
+
 	if (ret)
 		return ret;
 
diff --git a/drivers/iio/dummy/iio_simple_dummy_events.c b/drivers/iio/dummy/iio_simple_dummy_events.c
index ed63ffd..7ec2a0b 100644
--- a/drivers/iio/dummy/iio_simple_dummy_events.c
+++ b/drivers/iio/dummy/iio_simple_dummy_events.c
@@ -72,6 +72,7 @@
 				st->event_en = state;
 			else
 				return -EINVAL;
+			break;
 		default:
 			return -EINVAL;
 		}
diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c
index 66d1499..e174102 100644
--- a/drivers/input/misc/hbtp_input.c
+++ b/drivers/input/misc/hbtp_input.c
@@ -30,10 +30,7 @@
 #include <linux/delay.h>
 #include <linux/completion.h>
 
-#if defined(CONFIG_FB)
-#include <linux/notifier.h>
-#include <linux/fb.h>
-#endif
+#include <linux/msm_drm_notify.h>
 
 #define HBTP_INPUT_NAME			"hbtp_input"
 #define DISP_COORDS_SIZE		2
@@ -41,6 +38,7 @@
 #define HBTP_PINCTRL_VALID_STATE_CNT		(2)
 #define HBTP_HOLD_DURATION_US			(10)
 #define HBTP_PINCTRL_DDIC_SEQ_NUM		(4)
+#define HBTP_WAIT_TIMEOUT_MS			2000
 
 struct hbtp_data {
 	struct platform_device *pdev;
@@ -50,9 +48,7 @@
 	struct mutex sensormutex;
 	struct hbtp_sensor_data *sensor_data;
 	bool touch_status[HBTP_MAX_FINGER];
-#if defined(CONFIG_FB)
-	struct notifier_block fb_notif;
-#endif
+	struct notifier_block dsi_panel_notif;
 	struct pinctrl *ts_pinctrl;
 	struct pinctrl_state *gpio_state_active;
 	struct pinctrl_state *gpio_state_suspend;
@@ -61,7 +57,7 @@
 	struct pinctrl_state *ddic_rst_state_suspend;
 	u32 ts_pinctrl_seq_delay;
 	u32 ddic_pinctrl_seq_delay[HBTP_PINCTRL_DDIC_SEQ_NUM];
-	u32 fb_resume_seq_delay;
+	u32 dsi_panel_resume_seq_delay;
 	bool lcd_on;
 	bool power_suspended;
 	bool power_sync_enabled;
@@ -99,64 +95,51 @@
 
 static struct kobject *sensor_kobject;
 
-#if defined(CONFIG_FB)
-static int hbtp_fb_suspend(struct hbtp_data *ts);
-static int hbtp_fb_early_resume(struct hbtp_data *ts);
-static int hbtp_fb_resume(struct hbtp_data *ts);
-#endif
+static int hbtp_dsi_panel_suspend(struct hbtp_data *ts);
+static int hbtp_dsi_panel_early_resume(struct hbtp_data *ts);
 
-#if defined(CONFIG_FB)
-static int fb_notifier_callback(struct notifier_block *self,
+static int dsi_panel_notifier_callback(struct notifier_block *self,
 				 unsigned long event, void *data)
 {
 	int blank;
-	struct fb_event *evdata = data;
+	struct msm_drm_notifier *evdata = data;
 	struct hbtp_data *hbtp_data =
-	container_of(self, struct hbtp_data, fb_notif);
+	container_of(self, struct hbtp_data, dsi_panel_notif);
 
-	if (evdata && evdata->data && hbtp_data &&
-		(event == FB_EARLY_EVENT_BLANK ||
-		event == FB_R_EARLY_EVENT_BLANK)) {
+	if (!evdata || (evdata->id != 0))
+		return 0;
+
+	if (hbtp_data && (event == MSM_DRM_EARLY_EVENT_BLANK)) {
 		blank = *(int *)(evdata->data);
-		if (event == FB_EARLY_EVENT_BLANK) {
-			if (blank == FB_BLANK_UNBLANK) {
-				pr_debug("%s: receives EARLY_BLANK:UNBLANK\n",
+		if (blank == MSM_DRM_BLANK_UNBLANK) {
+			pr_debug("%s: receives EARLY_BLANK:UNBLANK\n",
 					__func__);
-				hbtp_data->lcd_on = true;
-				hbtp_fb_early_resume(hbtp_data);
-			} else if (blank == FB_BLANK_POWERDOWN) {
-				pr_debug("%s: receives EARLY_BLANK:POWERDOWN\n",
-					__func__);
-				hbtp_data->lcd_on = false;
-			}
-		} else if (event == FB_R_EARLY_EVENT_BLANK) {
-			if (blank == FB_BLANK_UNBLANK) {
-				pr_debug("%s: receives R_EARLY_BALNK:UNBLANK\n",
-					__func__);
-				hbtp_data->lcd_on = false;
-				hbtp_fb_suspend(hbtp_data);
-			} else if (blank == FB_BLANK_POWERDOWN) {
-				pr_debug("%s: receives R_EARLY_BALNK:POWERDOWN\n",
-					__func__);
-				hbtp_data->lcd_on = true;
-			}
+			hbtp_data->lcd_on = true;
+			hbtp_dsi_panel_early_resume(hbtp_data);
+		} else if (blank == MSM_DRM_BLANK_POWERDOWN) {
+			pr_debug("%s: receives EARLY_BLANK:POWERDOWN\n",
+				__func__);
+			hbtp_data->lcd_on = false;
+		} else {
+			pr_err("%s: receives wrong data EARLY_BLANK:%d\n",
+				__func__, blank);
 		}
 	}
 
-	if (evdata && evdata->data && hbtp_data &&
-		event == FB_EVENT_BLANK) {
+	if (hbtp_data && event == MSM_DRM_EVENT_BLANK) {
 		blank = *(int *)(evdata->data);
-		if (blank == FB_BLANK_POWERDOWN) {
+		if (blank == MSM_DRM_BLANK_POWERDOWN) {
 			pr_debug("%s: receives BLANK:POWERDOWN\n", __func__);
-			hbtp_fb_suspend(hbtp_data);
-		} else if (blank == FB_BLANK_UNBLANK) {
+			hbtp_dsi_panel_suspend(hbtp_data);
+		} else if (blank == MSM_DRM_BLANK_UNBLANK) {
 			pr_debug("%s: receives BLANK:UNBLANK\n", __func__);
-			hbtp_fb_resume(hbtp_data);
+		} else {
+			pr_err("%s: receives wrong data BLANK:%d\n",
+				__func__, blank);
 		}
 	}
 	return 0;
 }
-#endif
 
 static ssize_t hbtp_sensor_roi_show(struct file *dev, struct kobject *kobj,
 		struct bin_attribute *attr, char *buf, loff_t pos,
@@ -1134,7 +1117,7 @@
 	}
 
 	if (of_property_read_u32(np, "qcom,fb-resume-delay-us",
-			&data->fb_resume_seq_delay)) {
+			&data->dsi_panel_resume_seq_delay)) {
 		dev_warn(&data->pdev->dev, "Can not find fb resume seq delay\n");
 	}
 
@@ -1164,7 +1147,7 @@
 	return rc;
 }
 
-static int hbtp_fb_suspend(struct hbtp_data *ts)
+static int hbtp_dsi_panel_suspend(struct hbtp_data *ts)
 {
 	int rc;
 	char *envp[2] = {HBTP_EVENT_TYPE_DISPLAY, NULL};
@@ -1189,29 +1172,30 @@
 			goto err_power_disable;
 		}
 		ts->power_suspended = true;
-	}
+		if (ts->input_dev) {
+			kobject_uevent_env(&ts->input_dev->dev.kobj,
+					KOBJ_OFFLINE, envp);
 
-	if (ts->input_dev) {
-		kobject_uevent_env(&ts->input_dev->dev.kobj,
-				KOBJ_OFFLINE, envp);
-
-		if (ts->power_sig_enabled) {
-			pr_debug("%s: power_sig is enabled, wait for signal\n",
-				__func__);
-			mutex_unlock(&hbtp->mutex);
-			rc = wait_for_completion_interruptible(
-				&hbtp->power_suspend_sig);
-			if (rc != 0) {
-				pr_err("%s: wait for suspend is interrupted\n",
+			if (ts->power_sig_enabled) {
+				pr_debug("%s: power_sig is enabled, wait for signal\n",
+					__func__);
+				mutex_unlock(&hbtp->mutex);
+				rc = wait_for_completion_interruptible_timeout(
+					&hbtp->power_suspend_sig,
+					msecs_to_jiffies(HBTP_WAIT_TIMEOUT_MS));
+				if (rc <= 0) {
+					pr_err("%s: wait for suspend is interrupted\n",
+						__func__);
+				}
+				mutex_lock(&hbtp->mutex);
+				pr_debug("%s: Wait is done for suspend\n",
+					__func__);
+			} else {
+				pr_debug("%s: power_sig is NOT enabled\n",
 					__func__);
 			}
-			mutex_lock(&hbtp->mutex);
-			pr_debug("%s: Wait is done for suspend\n", __func__);
-		} else {
-			pr_debug("%s: power_sig is NOT enabled", __func__);
 		}
 	}
-
 	mutex_unlock(&hbtp->mutex);
 	return 0;
 err_power_disable:
@@ -1221,15 +1205,12 @@
 	return rc;
 }
 
-static int hbtp_fb_early_resume(struct hbtp_data *ts)
+static int hbtp_dsi_panel_early_resume(struct hbtp_data *ts)
 {
 	char *envp[2] = {HBTP_EVENT_TYPE_DISPLAY, NULL};
 	int rc;
 
 	mutex_lock(&hbtp->mutex);
-
-	pr_debug("%s: hbtp_fb_early_resume\n", __func__);
-
 	if (ts->pdev && ts->power_sync_enabled) {
 		pr_debug("%s: power_sync is enabled\n", __func__);
 		if (!ts->power_suspended) {
@@ -1261,9 +1242,10 @@
 				pr_err("%s: power_sig is enabled, wait for signal\n",
 					__func__);
 				mutex_unlock(&hbtp->mutex);
-				rc = wait_for_completion_interruptible(
-					&hbtp->power_resume_sig);
-				if (rc != 0) {
+				rc = wait_for_completion_interruptible_timeout(
+					&hbtp->power_resume_sig,
+					msecs_to_jiffies(HBTP_WAIT_TIMEOUT_MS));
+				if (rc <= 0) {
 					pr_err("%s: wait for resume is interrupted\n",
 						__func__);
 				}
@@ -1274,12 +1256,13 @@
 					__func__);
 			}
 
-			if (ts->fb_resume_seq_delay) {
-				usleep_range(ts->fb_resume_seq_delay,
-					ts->fb_resume_seq_delay +
+			if (ts->dsi_panel_resume_seq_delay) {
+				usleep_range(ts->dsi_panel_resume_seq_delay,
+					ts->dsi_panel_resume_seq_delay +
 					HBTP_HOLD_DURATION_US);
-				pr_err("%s: fb_resume_seq_delay = %u\n",
-					__func__, ts->fb_resume_seq_delay);
+				pr_err("%s: dsi_panel_resume_seq_delay = %u\n",
+					__func__,
+					ts->dsi_panel_resume_seq_delay);
 			}
 		}
 	}
@@ -1293,22 +1276,6 @@
 	return rc;
 }
 
-static int hbtp_fb_resume(struct hbtp_data *ts)
-{
-	char *envp[2] = {HBTP_EVENT_TYPE_DISPLAY, NULL};
-
-	mutex_lock(&hbtp->mutex);
-	if (!ts->power_sync_enabled) {
-		pr_debug("%s: power_sync is disabled, send uevent\n", __func__);
-		if (ts->input_dev) {
-			kobject_uevent_env(&ts->input_dev->dev.kobj,
-				KOBJ_ONLINE, envp);
-		}
-	}
-	mutex_unlock(&hbtp->mutex);
-	return 0;
-}
-
 static int hbtp_pdev_probe(struct platform_device *pdev)
 {
 	int error;
@@ -1434,14 +1401,16 @@
 		mutex_unlock(&hbtp->mutex);
 		return ret;
 	}
-	if (status) {
-		pr_debug("hbtp: display power on!\n");
-		kobject_uevent_env(&hbtp->input_dev->dev.kobj,
-			KOBJ_ONLINE, envp);
-	} else {
-		pr_debug("hbtp: display power off!\n");
-		kobject_uevent_env(&hbtp->input_dev->dev.kobj,
-			KOBJ_OFFLINE, envp);
+	if (!hbtp->power_sync_enabled) {
+		if (status) {
+			pr_debug("hbtp: display power on!\n");
+			kobject_uevent_env(&hbtp->input_dev->dev.kobj,
+				KOBJ_ONLINE, envp);
+		} else {
+			pr_debug("hbtp: display power off!\n");
+			kobject_uevent_env(&hbtp->input_dev->dev.kobj,
+				KOBJ_OFFLINE, envp);
+		}
 	}
 	mutex_unlock(&hbtp->mutex);
 	return count;
@@ -1462,6 +1431,7 @@
 		__ATTR(display_pwr, 0660, hbtp_display_pwr_show,
 			hbtp_display_pwr_store);
 
+
 static int __init hbtp_init(void)
 {
 	int error = 0;
@@ -1485,15 +1455,13 @@
 		goto err_misc_reg;
 	}
 
-#if defined(CONFIG_FB)
-	hbtp->fb_notif.notifier_call = fb_notifier_callback;
-	error = fb_register_client(&hbtp->fb_notif);
+	hbtp->dsi_panel_notif.notifier_call = dsi_panel_notifier_callback;
+	error = msm_drm_register_client(&hbtp->dsi_panel_notif);
 	if (error) {
-		pr_err("%s: Unable to register fb_notifier: %d\n",
+		pr_err("%s: Unable to register dsi_panel_notifier: %d\n",
 			HBTP_INPUT_NAME, error);
-		goto err_fb_reg;
+		goto err_dsi_panel_reg;
 	}
-#endif
 
 	sensor_kobject = kobject_create_and_add("hbtpsensor", kernel_kobj);
 	if (!sensor_kobject) {
@@ -1542,10 +1510,8 @@
 err_sysfs_create_capdata:
 	kobject_put(sensor_kobject);
 err_kobject_create:
-#if defined(CONFIG_FB)
-	fb_unregister_client(&hbtp->fb_notif);
-err_fb_reg:
-#endif
+	msm_drm_unregister_client(&hbtp->dsi_panel_notif);
+err_dsi_panel_reg:
 	misc_deregister(&hbtp_input_misc);
 err_misc_reg:
 	kfree(hbtp->sensor_data);
@@ -1566,9 +1532,7 @@
 	if (hbtp->input_dev)
 		input_unregister_device(hbtp->input_dev);
 
-#if defined(CONFIG_FB)
-	fb_unregister_client(&hbtp->fb_notif);
-#endif
+	msm_drm_unregister_client(&hbtp->dsi_panel_notif);
 
 	platform_driver_unregister(&hbtp_pdev_driver);
 
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 681dce1..b8c50d8 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1240,6 +1240,7 @@
 	{ "ELAN0605", 0 },
 	{ "ELAN0609", 0 },
 	{ "ELAN060B", 0 },
+	{ "ELAN0611", 0 },
 	{ "ELAN1000", 0 },
 	{ }
 };
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index abf09ac..339a0e2 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -230,13 +230,17 @@
 
 	/* Walk  this report and pull out the info we need */
 	while (i < length) {
-		prefix = report[i];
-
-		/* Skip over prefix */
-		i++;
+		prefix = report[i++];
 
 		/* Determine data size and save the data in the proper variable */
-		size = PREF_SIZE(prefix);
+		size = (1U << PREF_SIZE(prefix)) >> 1;
+		if (i + size > length) {
+			dev_err(ddev,
+				"Not enough data (need %d, have %d)\n",
+				i + size, length);
+			break;
+		}
+
 		switch (size) {
 		case 1:
 			data = report[i];
@@ -244,8 +248,7 @@
 		case 2:
 			data16 = get_unaligned_le16(&report[i]);
 			break;
-		case 3:
-			size = 4;
+		case 4:
 			data32 = get_unaligned_le32(&report[i]);
 			break;
 		}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 333836c..eae12c9 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -433,7 +433,7 @@
 #define ARM_SMMU_OPT_3LVL_TABLES	(1 << 4)
 #define ARM_SMMU_OPT_NO_ASID_RETENTION	(1 << 5)
 #define ARM_SMMU_OPT_DISABLE_ATOS	(1 << 6)
-#define ARM_SMMU_OPT_QCOM_MMU500_ERRATA1	(1 << 7)
+#define ARM_SMMU_OPT_MMU500_ERRATA1	(1 << 7)
 	u32				options;
 	enum arm_smmu_arch_version	version;
 	enum arm_smmu_implementation	model;
@@ -559,7 +559,7 @@
 	{ ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
 	{ ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
 	{ ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
-	{ ARM_SMMU_OPT_QCOM_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
+	{ ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
 	{ 0, NULL},
 };
 
@@ -1792,7 +1792,7 @@
 		quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
 
 	tlb = &arm_smmu_gather_ops;
-	if (smmu->options & ARM_SMMU_OPT_QCOM_MMU500_ERRATA1)
+	if (smmu->options & ARM_SMMU_OPT_MMU500_ERRATA1)
 		tlb = &qsmmuv500_errata1_smmu_gather_ops;
 
 	ret = arm_smmu_alloc_cb(domain, smmu, dev);
@@ -2899,7 +2899,7 @@
 			& (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
 		ret = 0;
 		break;
-	case DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_ALIGN:
+	case DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN:
 		*((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
 		ret = 0;
 		break;
@@ -3192,65 +3192,6 @@
 	arm_smmu_power_off(smmu->pwr);
 }
 
-static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
-				       unsigned long offset)
-{
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-	struct arm_smmu_device *smmu;
-	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-	void __iomem *cb_base;
-	unsigned long val;
-
-	if (offset >= SZ_4K) {
-		pr_err("Invalid offset: 0x%lx\n", offset);
-		return 0;
-	}
-
-	smmu = smmu_domain->smmu;
-	if (!smmu) {
-		WARN(1, "Can't read registers of a detached domain\n");
-		val = 0;
-		return val;
-	}
-
-	if (arm_smmu_power_on(smmu->pwr))
-		return 0;
-
-	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
-	val = readl_relaxed(cb_base + offset);
-
-	arm_smmu_power_off(smmu->pwr);
-	return val;
-}
-
-static void arm_smmu_reg_write(struct iommu_domain *domain,
-			       unsigned long offset, unsigned long val)
-{
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-	struct arm_smmu_device *smmu;
-	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-	void __iomem *cb_base;
-
-	if (offset >= SZ_4K) {
-		pr_err("Invalid offset: 0x%lx\n", offset);
-		return;
-	}
-
-	smmu = smmu_domain->smmu;
-	if (!smmu) {
-		WARN(1, "Can't read registers of a detached domain\n");
-		return;
-	}
-
-	if (arm_smmu_power_on(smmu->pwr))
-		return;
-
-	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
-	writel_relaxed(val, cb_base + offset);
-
-	arm_smmu_power_off(smmu->pwr);
-}
-
 static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
 {
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -3292,8 +3233,6 @@
 	.of_xlate		= arm_smmu_of_xlate,
 	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
 	.trigger_fault		= arm_smmu_trigger_fault,
-	.reg_read		= arm_smmu_reg_read,
-	.reg_write		= arm_smmu_reg_write,
 	.tlbi_domain		= arm_smmu_tlbi_domain,
 	.enable_config_clocks	= arm_smmu_enable_config_clocks,
 	.disable_config_clocks	= arm_smmu_disable_config_clocks,
@@ -4334,16 +4273,20 @@
 {
 	static bool registered;
 	int ret = 0;
+	ktime_t cur;
 
 	if (registered)
 		return 0;
 
+	cur = ktime_get();
 	ret = platform_driver_register(&qsmmuv500_tbu_driver);
 	if (ret)
 		return ret;
 
 	ret = platform_driver_register(&arm_smmu_driver);
 	registered = !ret;
+	trace_smmu_init(ktime_us_delta(ktime_get(), cur));
+
 	return ret;
 }
 
@@ -4534,13 +4477,14 @@
 	struct qsmmuv500_archdata *data =
 			get_qsmmuv500_archdata(smmu_domain->smmu);
 	ktime_t cur;
+	unsigned long flags;
 	bool errata;
 
 	cur = ktime_get();
 	trace_errata_tlbi_start(dev, 0);
 
 	errata = qsmmuv500_errata1_required(smmu_domain, data);
-	remote_spin_lock(&data->errata1_lock);
+	remote_spin_lock_irqsave(&data->errata1_lock, flags);
 	if (errata) {
 		s64 delta;
 
@@ -4554,7 +4498,7 @@
 	} else {
 		__qsmmuv500_errata1_tlbiall(smmu_domain);
 	}
-	remote_spin_unlock(&data->errata1_lock);
+	remote_spin_unlock_irqrestore(&data->errata1_lock, flags);
 
 	trace_errata_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
 }
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a65214b..57ae0dd 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -132,7 +132,7 @@
 	int min_iova_align = 0;
 
 	iommu_domain_get_attr(domain,
-			DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_ALIGN,
+			DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
 			&min_iova_align);
 	iommu_domain_get_attr(domain, DOMAIN_ATTR_SECURE_VMID, &vmid);
 	if (vmid >= VMID_LAST || vmid < 0)
@@ -290,11 +290,12 @@
 	unsigned long shift = iova_shift(iovad);
 	unsigned long guard_len;
 
-	if (cookie->min_iova_align)
+	if (cookie->min_iova_align) {
 		guard_len = ALIGN(size, cookie->min_iova_align) - size;
-	else
+		iommu_unmap(domain, iova + size, guard_len);
+	} else {
 		guard_len = 0;
-	iommu_unmap(domain, iova + size, guard_len);
+	}
 
 	free_iova_fast(iovad, iova >> shift, (size + guard_len) >> shift);
 }
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 73090df..ad7ee11 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -14,11 +14,13 @@
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping-fast.h>
 #include <linux/io-pgtable-fast.h>
+#include <linux/pci.h>
 #include <linux/vmalloc.h>
 #include <asm/cacheflush.h>
 #include <asm/dma-iommu.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <trace/events/iommu.h>
 
 #include <soc/qcom/secure_buffer.h>
 #include <linux/arm-smmu-errata.h>
@@ -309,13 +311,14 @@
 	unsigned long nbits;
 	unsigned long guard_len;
 
-	if (mapping->min_iova_align)
+	if (mapping->min_iova_align) {
 		guard_len = ALIGN(size, mapping->min_iova_align) - size;
-	else
+		iommu_unmap(mapping->domain, iova + size, guard_len);
+	} else {
 		guard_len = 0;
+	}
 	nbits = (size + guard_len) >> FAST_PAGE_SHIFT;
 
-	iommu_unmap(mapping->domain, iova + size, guard_len);
 
 	/*
 	 * We don't invalidate TLBs on unmap.  We invalidate TLBs on map
@@ -403,6 +406,8 @@
 	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
 
 	spin_unlock_irqrestore(&mapping->lock, flags);
+
+	trace_map(mapping->domain, iova, phys_to_map, len, prot);
 	return iova + offset_from_phys_to_map;
 
 fail_free_iova:
@@ -432,8 +437,10 @@
 	spin_lock_irqsave(&mapping->lock, flags);
 	av8l_fast_unmap_public(pmd, len);
 	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
-	__fast_smmu_free_iova(mapping, iova, len);
+	__fast_smmu_free_iova(mapping, iova - offset, len);
 	spin_unlock_irqrestore(&mapping->lock, flags);
+
+	trace_unmap(mapping->domain, iova - offset, len, len);
 }
 
 static void fast_smmu_sync_single_for_cpu(struct device *dev,
@@ -738,7 +745,7 @@
 
 	iommu_unmap(mapping->domain, addr - offset, len);
 	spin_lock_irqsave(&mapping->lock, flags);
-	__fast_smmu_free_iova(mapping, addr, len);
+	__fast_smmu_free_iova(mapping, addr - offset, len);
 	spin_unlock_irqrestore(&mapping->lock, flags);
 }
 
@@ -893,7 +900,7 @@
 	int min_iova_align = 0;
 
 	iommu_domain_get_attr(mapping->domain,
-			DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_ALIGN,
+			DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
 			&min_iova_align);
 	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_SECURE_VMID, &vmid);
 	if (vmid >= VMID_LAST || vmid < 0)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 83cbf20..c333a36 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -426,6 +426,7 @@
 	if (ret)
 		goto err_put_group;
 
+
 	/* Notify any listeners about change to group. */
 	blocking_notifier_call_chain(&group->notifier,
 				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
@@ -1077,6 +1078,7 @@
 	domain->type = type;
 	/* Assume all sizes by default; the driver may override this later */
 	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
+	memset(domain->name, 0, IOMMU_DOMAIN_NAME_LEN);
 
 	return domain;
 }
@@ -1105,6 +1107,11 @@
 	if (!ret) {
 		trace_attach_device_to_domain(dev);
 		iommu_debug_attach_device(domain, dev);
+
+		if (!strnlen(domain->name, IOMMU_DOMAIN_NAME_LEN)) {
+			strlcpy(domain->name, dev_name(dev),
+				IOMMU_DOMAIN_NAME_LEN);
+		}
 	}
 	return ret;
 }
@@ -1398,7 +1405,7 @@
 	if (ret)
 		iommu_unmap(domain, orig_iova, orig_size - size);
 	else
-		trace_map(orig_iova, orig_paddr, orig_size);
+		trace_map(domain, orig_iova, orig_paddr, orig_size, prot);
 
 	return ret;
 }
@@ -1451,11 +1458,23 @@
 		unmapped += unmapped_page;
 	}
 
-	trace_unmap(orig_iova, size, unmapped);
+	trace_unmap(domain, orig_iova, size, unmapped);
 	return unmapped;
 }
 EXPORT_SYMBOL_GPL(iommu_unmap);
 
+size_t iommu_map_sg(struct iommu_domain *domain,
+				  unsigned long iova, struct scatterlist *sg,
+				  unsigned int nents, int prot)
+{
+	size_t mapped;
+
+	mapped = domain->ops->map_sg(domain, iova, sg, nents, prot);
+	trace_map_sg(domain, iova, mapped, prot);
+	return mapped;
+}
+EXPORT_SYMBOL(iommu_map_sg);
+
 size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
 			 struct scatterlist *sg, unsigned int nents, int prot)
 {
@@ -1519,8 +1538,49 @@
 }
 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
 
-struct dentry *iommu_debugfs_top;
+/**
+ * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
+ * @domain: the iommu domain where the fault has happened
+ * @dev: the device where the fault has happened
+ * @iova: the faulting address
+ * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
+ *
+ * This function should be called by the low-level IOMMU implementations
+ * whenever IOMMU faults happen, to allow high-level users, that are
+ * interested in such events, to know about them.
+ *
+ * This event may be useful for several possible use cases:
+ * - mere logging of the event
+ * - dynamic TLB/PTE loading
+ * - if restarting of the faulting device is required
+ *
+ * Returns 0 on success and an appropriate error code otherwise (if dynamic
+ * PTE/TLB loading will one day be supported, implementations will be able
+ * to tell whether it succeeded or not according to this return value).
+ *
+ * Specifically, -ENOSYS is returned if a fault handler isn't installed
+ * (though fault handlers can also return -ENOSYS, in case they want to
+ * elicit the default behavior of the IOMMU drivers).
+ */
+int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
+		       unsigned long iova, int flags)
+{
+	int ret = -ENOSYS;
 
+	/*
+	 * if upper layers showed interest and installed a fault handler,
+	 * invoke it.
+	 */
+	if (domain->handler)
+		ret = domain->handler(domain, dev, iova, flags,
+						domain->handler_token);
+
+	trace_io_page_fault(dev, iova, flags);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(report_iommu_fault);
+
+struct dentry *iommu_debugfs_top;
 static int __init iommu_init(void)
 {
 	iommu_group_kset = kset_create_and_add("iommu_groups",
@@ -1617,30 +1677,6 @@
 		domain->ops->trigger_fault(domain, flags);
 }
 
-/**
- * iommu_reg_read() - read an IOMMU register
- *
- * Reads the IOMMU register at the given offset.
- */
-unsigned long iommu_reg_read(struct iommu_domain *domain, unsigned long offset)
-{
-	if (domain->ops->reg_read)
-		return domain->ops->reg_read(domain, offset);
-	return 0;
-}
-
-/**
- * iommu_reg_write() - write an IOMMU register
- *
- * Writes the given value to the IOMMU register at the given offset.
- */
-void iommu_reg_write(struct iommu_domain *domain, unsigned long offset,
-		     unsigned long val)
-{
-	if (domain->ops->reg_write)
-		domain->ops->reg_write(domain, offset, val);
-}
-
 void iommu_get_dm_regions(struct device *dev, struct list_head *list)
 {
 	const struct iommu_ops *ops = dev->bus->iommu_ops;
diff --git a/drivers/irqchip/qcom/pdc-sdm670.c b/drivers/irqchip/qcom/pdc-sdm670.c
index 7bd6333..21bb58e 100644
--- a/drivers/irqchip/qcom/pdc-sdm670.c
+++ b/drivers/irqchip/qcom/pdc-sdm670.c
@@ -120,13 +120,13 @@
 	{106, 653}, /* core_bi_px_gpio_132 */
 	{107, 654}, /* core_bi_px_gpio_133 */
 	{108, 655}, /* core_bi_px_gpio_145 */
+	{115, 662}, /* core_bi_px_gpio_41 */
+	{116, 663}, /* core_bi_px_gpio_89 */
+	{117, 664}, /* core_bi_px_gpio_31 */
+	{118, 665}, /* core_bi_px_gpio_49 */
 	{119, 666}, /* core_bi_px_to_mpm[2] */
 	{120, 667}, /* core_bi_px_to_mpm[3] */
 	{121, 668}, /* core_bi_px_to_mpm[4] */
-	{122, 669}, /* core_bi_px_gpio_41 */
-	{123, 670}, /* core_bi_px_gpio_89 */
-	{124, 671}, /* core_bi_px_gpio_31 */
-	{125, 95}, /* core_bi_px_gpio_49 */
 	{-1}
 };
 
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 9f340bf..9dc85cd 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -63,7 +63,7 @@
 again:
 	spin_lock_irqsave(&chan->lock, flags);
 
-	if (!chan->msg_count || chan->active_req)
+	if (!chan->msg_count || (chan->active_req && err != -EAGAIN))
 		goto exit;
 
 	count = chan->msg_count;
diff --git a/drivers/mailbox/qcom-rpmh-mailbox.c b/drivers/mailbox/qcom-rpmh-mailbox.c
index 7bf8a18..160b858 100644
--- a/drivers/mailbox/qcom-rpmh-mailbox.c
+++ b/drivers/mailbox/qcom-rpmh-mailbox.c
@@ -29,7 +29,7 @@
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
 #include <linux/spinlock.h>
-
+#include <asm/arch_timer.h>
 #include <asm-generic/io.h>
 
 #include <soc/qcom/tcs.h>
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index f2d39a9..0eadf08 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -4028,6 +4028,9 @@
 		hcount = 3 + dfil->todo;
 		if (hcount > count)
 			hcount = count;
+		if (hcount == 0)
+			return done;
+
 		result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer,
 						file->f_flags & O_NONBLOCK,
 						buf, hcount, ppos);
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index 48fa1c0..9e0aee9 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -10,3 +10,4 @@
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme/
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
index 3fbb3f0..6d699cf 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
@@ -67,11 +67,15 @@
 	return false;
 }
 
-void cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
-	enum cam_camnoc_irq_type evt_type, uint32_t evt_data)
+bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
 {
-	CAM_ERR(CAM_CDM, "CPAS error callback type=%d with data=%x", evt_type,
-		evt_data);
+	if (!irq_data)
+		return false;
+
+	CAM_DBG(CAM_CDM, "CPAS error callback type=%d", irq_data->irq_type);
+
+	return false;
 }
 
 struct cam_cdm_utils_ops *cam_cdm_get_ops(
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
index fa3ae04..497832b 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
@@ -32,8 +32,8 @@
 	uint32_t arg_size);
 bool cam_cdm_set_cam_hw_version(
 	uint32_t ver, struct cam_hw_version *cam_version);
-void cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
-	enum cam_camnoc_irq_type evt_type, uint32_t evt_data);
+bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data);
 struct cam_cdm_utils_ops *cam_cdm_get_ops(
 	uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
 int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index f7938e9..d039d75 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -14,6 +14,7 @@
 #include <linux/uaccess.h>
 #include "cam_context.h"
 #include "cam_debug_util.h"
+#include "cam_node.h"
 
 static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
 	void *evt_data)
@@ -332,14 +333,15 @@
 			ctx, cmd);
 	else
 		/* stop device can be optional for some driver */
-		CAM_WARN(CAM_CORE, "No stop device in dev %d, state %d",
-			ctx->dev_hdl, ctx->state);
+		CAM_WARN(CAM_CORE, "No stop device in dev %d, name %s state %d",
+			ctx->dev_hdl, ctx->dev_name, ctx->state);
 	mutex_unlock(&ctx->ctx_mutex);
 
 	return rc;
 }
 
 int cam_context_init(struct cam_context *ctx,
+	const char *dev_name,
 	struct cam_req_mgr_kmd_ops *crm_node_intf,
 	struct cam_hw_mgr_intf *hw_mgr_intf,
 	struct cam_ctx_request *req_list,
@@ -359,8 +361,10 @@
 	ctx->session_hdl = -1;
 	INIT_LIST_HEAD(&ctx->list);
 	mutex_init(&ctx->ctx_mutex);
+	mutex_init(&ctx->sync_mutex);
 	spin_lock_init(&ctx->lock);
 
+	ctx->dev_name = dev_name;
 	ctx->ctx_crm_intf = NULL;
 	ctx->crm_ctx_intf = crm_node_intf;
 	ctx->hw_mgr_intf = hw_mgr_intf;
@@ -375,6 +379,7 @@
 	for (i = 0; i < req_size; i++) {
 		INIT_LIST_HEAD(&ctx->req_list[i].list);
 		list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
+		ctx->req_list[i].ctx = ctx;
 	}
 	ctx->state = CAM_CTX_AVAILABLE;
 	ctx->state_machine = NULL;
@@ -400,3 +405,20 @@
 
 	return 0;
 }
+
+void cam_context_putref(struct cam_context *ctx)
+{
+	kref_put(&ctx->refcount, cam_node_put_ctxt_to_free_list);
+	CAM_DBG(CAM_CORE, "ctx device hdl %ld, ref count %d",
+		ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)));
+}
+
+void cam_context_getref(struct cam_context *ctx)
+{
+	if (kref_get_unless_zero(&ctx->refcount) == 0) {
+		/* should never happen */
+		WARN(1, "cam_context_getref fail\n");
+	}
+	CAM_DBG(CAM_CORE, "ctx device hdl %ld, ref count %d",
+		ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)));
+}
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index 10285cb..6d1589e 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -15,6 +15,7 @@
 
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
+#include <linux/kref.h>
 #include "cam_req_mgr_interface.h"
 #include "cam_hw_mgr_intf.h"
 
@@ -54,6 +55,8 @@
  * @num_out_map_entries:   Number of out map entries
  * @num_in_acked:          Number of in fence acked
  * @num_out_acked:         Number of out fence acked
+ * @flushed:               Request is flushed
+ * @ctx:                   The context to which this request belongs
  *
  */
 struct cam_ctx_request {
@@ -69,6 +72,8 @@
 	uint32_t                      num_out_map_entries;
 	uint32_t                      num_in_acked;
 	uint32_t                      num_out_acked;
+	int                           flushed;
+	struct cam_context           *ctx;
 };
 
 /**
@@ -135,6 +140,7 @@
 /**
  * struct cam_context - camera context object for the subdevice node
  *
+ * @dev_name:              String giving name of device associated
  * @list:                  Link list entry
  * @sessoin_hdl:           Session handle
  * @dev_hdl:               Device handle
@@ -155,9 +161,13 @@
  * @state_machine:         Top level state machine
  * @ctx_priv:              Private context pointer
  * @ctxt_to_hw_map:        Context to hardware mapping pointer
+ * @refcount:              Context object refcount
+ * @node:                  The main node to which this context belongs
+ * @sync_mutex:            mutex to sync with sync cb thread
  *
  */
 struct cam_context {
+	const char                  *dev_name;
 	struct list_head             list;
 	int32_t                      session_hdl;
 	int32_t                      dev_hdl;
@@ -183,6 +193,10 @@
 
 	void                        *ctx_priv;
 	void                        *ctxt_to_hw_map;
+
+	struct kref                  refcount;
+	void                        *node;
+	struct mutex                 sync_mutex;
 };
 
 /**
@@ -331,6 +345,7 @@
  * @brief:        Camera context initialize function
  *
  * @ctx:                   Object pointer for cam_context
+ * @dev_name:              String giving name of device associated
  * @crm_node_intf:         Function table for crm to context interface
  * @hw_mgr_intf:           Function table for context to hw interface
  * @req_list:              Requests storage
@@ -338,10 +353,30 @@
  *
  */
 int cam_context_init(struct cam_context *ctx,
+		const char *dev_name,
 		struct cam_req_mgr_kmd_ops *crm_node_intf,
 		struct cam_hw_mgr_intf *hw_mgr_intf,
 		struct cam_ctx_request *req_list,
 		uint32_t req_size);
 
+/**
+ * cam_context_putref()
+ *
+ * @brief:       Put back context reference.
+ *
+ * @ctx:                  Context for which ref is returned
+ *
+ */
+void cam_context_putref(struct cam_context *ctx);
+
+/**
+ * cam_context_getref()
+ *
+ * @brief:       Get back context reference.
+ *
+ * @ctx:                  Context for which ref is taken
+ *
+ */
+void cam_context_getref(struct cam_context *ctx);
 
 #endif  /* _CAM_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index 714891e..f8c0692 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -18,6 +18,7 @@
 #include <media/cam_defs.h>
 
 #include "cam_context.h"
+#include "cam_context_utils.h"
 #include "cam_mem_mgr.h"
 #include "cam_node.h"
 #include "cam_req_mgr_util.h"
@@ -25,6 +26,15 @@
 #include "cam_trace.h"
 #include "cam_debug_util.h"
 
+static inline int cam_context_validate_thread(void)
+{
+	if (in_interrupt()) {
+		WARN(1, "Invalid execution context\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
 int cam_context_buf_done_from_hw(struct cam_context *ctx,
 	void *done_event_data, uint32_t bubble_state)
 {
@@ -33,17 +43,23 @@
 	struct cam_ctx_request *req;
 	struct cam_hw_done_event_data *done =
 		(struct cam_hw_done_event_data *)done_event_data;
+	int rc;
 
 	if (!ctx || !done) {
 		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, done);
 		return -EINVAL;
 	}
 
+	rc = cam_context_validate_thread();
+	if (rc)
+		return rc;
+
+	spin_lock(&ctx->lock);
 	if (list_empty(&ctx->active_req_list)) {
 		CAM_ERR(CAM_CTXT, "no active request");
+		spin_unlock(&ctx->lock);
 		return -EIO;
 	}
-
 	req = list_first_entry(&ctx->active_req_list,
 		struct cam_ctx_request, list);
 
@@ -52,15 +68,22 @@
 	if (done->request_id != req->request_id) {
 		CAM_ERR(CAM_CTXT, "mismatch: done req[%lld], active req[%lld]",
 			done->request_id, req->request_id);
+		spin_unlock(&ctx->lock);
 		return -EIO;
 	}
 
 	if (!req->num_out_map_entries) {
 		CAM_ERR(CAM_CTXT, "no output fence to signal");
+		spin_unlock(&ctx->lock);
 		return -EIO;
 	}
 
+	/*
+	 * since another thread may be adding/removing from active
+	 * list, so hold the lock
+	 */
 	list_del_init(&req->list);
+	spin_unlock(&ctx->lock);
 	if (!bubble_state)
 		result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
 	else
@@ -71,41 +94,34 @@
 		req->out_map_entries[j].sync_id = -1;
 	}
 
+	/*
+	 * another thread may be adding/removing from free list,
+	 * so hold the lock
+	 */
+	spin_lock(&ctx->lock);
 	list_add_tail(&req->list, &ctx->free_req_list);
+	req->ctx = NULL;
+	spin_unlock(&ctx->lock);
 
 	return 0;
 }
 
-int cam_context_apply_req_to_hw(struct cam_context *ctx,
+static int cam_context_apply_req_to_hw(struct cam_ctx_request *req,
 	struct cam_req_mgr_apply_request *apply)
 {
 	int rc = 0;
-	struct cam_ctx_request *req;
+	struct cam_context *ctx = req->ctx;
 	struct cam_hw_config_args cfg;
 
-	if (!ctx || !apply) {
-		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, apply);
-		rc = -EINVAL;
-		goto end;
-	}
-
 	if (!ctx->hw_mgr_intf) {
 		CAM_ERR(CAM_CTXT, "HW interface is not ready");
 		rc = -EFAULT;
 		goto end;
 	}
 
-	if (list_empty(&ctx->pending_req_list)) {
-		CAM_ERR(CAM_CTXT, "No available request for Apply id %lld",
-			apply->request_id);
-		rc = -EFAULT;
-		goto end;
-	}
-
 	spin_lock(&ctx->lock);
-	req = list_first_entry(&ctx->pending_req_list,
-		struct cam_ctx_request, list);
 	list_del_init(&req->list);
+	list_add_tail(&req->list, &ctx->active_req_list);
 	spin_unlock(&ctx->lock);
 
 	cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
@@ -114,11 +130,13 @@
 	cfg.out_map_entries = req->out_map_entries;
 	cfg.num_out_map_entries = req->num_out_map_entries;
 	cfg.priv = req->req_priv;
-	list_add_tail(&req->list, &ctx->active_req_list);
 
 	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
-	if (rc)
+	if (rc) {
+		spin_lock(&ctx->lock);
 		list_del_init(&req->list);
+		spin_unlock(&ctx->lock);
+	}
 
 end:
 	return rc;
@@ -126,39 +144,51 @@
 
 static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
 {
-	struct cam_context *ctx = data;
-	struct cam_ctx_request *req = NULL;
+	struct cam_ctx_request *req = data;
+	struct cam_context *ctx = NULL;
 	struct cam_req_mgr_apply_request apply;
+	int rc;
 
-	if (!ctx) {
+	if (!req) {
 		CAM_ERR(CAM_CTXT, "Invalid input param");
 		return;
 	}
-
-	spin_lock(&ctx->lock);
-	if (!list_empty(&ctx->pending_req_list))
-		req = list_first_entry(&ctx->pending_req_list,
-			struct cam_ctx_request, list);
-	spin_unlock(&ctx->lock);
-
-	if (!req) {
-		CAM_ERR(CAM_CTXT, "No more request obj free");
+	rc = cam_context_validate_thread();
+	if (rc)
 		return;
-	}
 
+	ctx = req->ctx;
 	req->num_in_acked++;
 	if (req->num_in_acked == req->num_in_map_entries) {
 		apply.request_id = req->request_id;
-		cam_context_apply_req_to_hw(ctx, &apply);
+		/*
+		 * take mutex to ensure that another thread does
+		 * not flush the request while this
+		 * thread is submitting it to h/w. The submit to
+		 * h/w and adding to the active list should happen
+		 * in a critical section which is provided by this
+		 * mutex.
+		 */
+		mutex_lock(&ctx->sync_mutex);
+		if (!req->flushed) {
+			cam_context_apply_req_to_hw(req, &apply);
+			mutex_unlock(&ctx->sync_mutex);
+		} else {
+			mutex_unlock(&ctx->sync_mutex);
+			req->ctx = NULL;
+			req->flushed = 0;
+			spin_lock(&ctx->lock);
+			list_add_tail(&req->list, &ctx->free_req_list);
+			spin_unlock(&ctx->lock);
+		}
 	}
+	cam_context_putref(ctx);
 }
 
 int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
 	struct cam_release_dev_cmd *cmd)
 {
-	int i;
 	struct cam_hw_release_args arg;
-	struct cam_ctx_request *req;
 
 	if (!ctx) {
 		CAM_ERR(CAM_CTXT, "Invalid input param");
@@ -170,12 +200,9 @@
 		return -EINVAL;
 	}
 
+	cam_context_stop_dev_to_hw(ctx);
 	arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
-	if ((list_empty(&ctx->active_req_list)) &&
-		(list_empty(&ctx->pending_req_list)))
-		arg.active_req = false;
-	else
-		arg.active_req = true;
+	arg.active_req = false;
 
 	ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
 	ctx->ctxt_to_hw_map = NULL;
@@ -184,38 +211,6 @@
 	ctx->dev_hdl = -1;
 	ctx->link_hdl = -1;
 
-	while (!list_empty(&ctx->active_req_list)) {
-		req = list_first_entry(&ctx->active_req_list,
-			struct cam_ctx_request, list);
-		list_del_init(&req->list);
-		CAM_DBG(CAM_CTXT, "signal fence in active list, num %d",
-			req->num_out_map_entries);
-		for (i = 0; i < req->num_out_map_entries; i++) {
-			if (req->out_map_entries[i].sync_id > 0)
-				cam_sync_signal(req->out_map_entries[i].sync_id,
-					CAM_SYNC_STATE_SIGNALED_ERROR);
-		}
-		list_add_tail(&req->list, &ctx->free_req_list);
-	}
-
-	while (!list_empty(&ctx->pending_req_list)) {
-		req = list_first_entry(&ctx->pending_req_list,
-			struct cam_ctx_request, list);
-		list_del_init(&req->list);
-		for (i = 0; i < req->num_in_map_entries; i++)
-			if (req->in_map_entries[i].sync_id > 0)
-				cam_sync_deregister_callback(
-					cam_context_sync_callback, ctx,
-					req->in_map_entries[i].sync_id);
-		CAM_DBG(CAM_CTXT, "signal fence in pending list, num %d",
-			req->num_out_map_entries);
-		for (i = 0; i < req->num_out_map_entries; i++)
-			if (req->out_map_entries[i].sync_id > 0)
-				cam_sync_signal(req->out_map_entries[i].sync_id,
-					CAM_SYNC_STATE_SIGNALED_ERROR);
-		list_add_tail(&req->list, &ctx->free_req_list);
-	}
-
 	return 0;
 }
 
@@ -241,6 +236,9 @@
 		rc = -EFAULT;
 		goto end;
 	}
+	rc = cam_context_validate_thread();
+	if (rc)
+		return rc;
 
 	spin_lock(&ctx->lock);
 	if (!list_empty(&ctx->free_req_list)) {
@@ -258,6 +256,7 @@
 
 	memset(req, 0, sizeof(*req));
 	INIT_LIST_HEAD(&req->list);
+	req->ctx = ctx;
 
 	/* for config dev, only memory handle is supported */
 	/* map packet from the memhandle */
@@ -303,10 +302,18 @@
 		list_add_tail(&req->list, &ctx->pending_req_list);
 		spin_unlock(&ctx->lock);
 		for (i = 0; i < req->num_in_map_entries; i++) {
+			cam_context_getref(ctx);
 			rc = cam_sync_register_callback(
 					cam_context_sync_callback,
-					(void *)ctx,
+					(void *)req,
 					req->in_map_entries[i].sync_id);
+			if (rc) {
+				CAM_ERR(CAM_CTXT,
+					"Failed register fence cb: %d ret = %d",
+					req->in_map_entries[i].sync_id, rc);
+				cam_context_putref(ctx);
+				goto free_req;
+			}
 			CAM_DBG(CAM_CTXT, "register in fence cb: %d ret = %d",
 				req->in_map_entries[i].sync_id, rc);
 		}
@@ -318,6 +325,7 @@
 free_req:
 	spin_lock(&ctx->lock);
 	list_add_tail(&req->list, &ctx->free_req_list);
+	req->ctx = NULL;
 	spin_unlock(&ctx->lock);
 end:
 	return rc;
@@ -452,6 +460,7 @@
 	uint32_t i;
 	struct cam_hw_stop_args stop;
 	struct cam_ctx_request *req;
+	struct list_head temp_list;
 
 	if (!ctx) {
 		CAM_ERR(CAM_CTXT, "Invalid input param");
@@ -465,6 +474,32 @@
 		goto end;
 	}
 
+	rc = cam_context_validate_thread();
+	if (rc)
+		goto end;
+
+	/*
+	 * flush pending requests, take the sync lock to synchronize with the
+	 * sync callback thread so that the sync cb thread does not try to
+	 * submit request to h/w while the request is being flushed
+	 */
+	mutex_lock(&ctx->sync_mutex);
+	INIT_LIST_HEAD(&temp_list);
+	spin_lock(&ctx->lock);
+	list_splice_init(&ctx->pending_req_list, &temp_list);
+	spin_unlock(&ctx->lock);
+	while (!list_empty(&temp_list)) {
+		req = list_first_entry(&temp_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req->flushed = 1;
+		for (i = 0; i < req->num_out_map_entries; i++)
+			if (req->out_map_entries[i].sync_id != -1)
+				cam_sync_signal(req->out_map_entries[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+	}
+	mutex_unlock(&ctx->sync_mutex);
+
 	/* stop hw first */
 	if (ctx->ctxt_to_hw_map) {
 		stop.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
@@ -473,22 +508,17 @@
 				&stop);
 	}
 
-	/* flush pending and active queue */
-	while (!list_empty(&ctx->pending_req_list)) {
-		req = list_first_entry(&ctx->pending_req_list,
-				struct cam_ctx_request, list);
-		list_del_init(&req->list);
-		CAM_DBG(CAM_CTXT, "signal fence in pending list. fence num %d",
-			req->num_out_map_entries);
-		for (i = 0; i < req->num_out_map_entries; i++)
-			if (req->out_map_entries[i].sync_id != -1)
-				cam_sync_signal(req->out_map_entries[i].sync_id,
-					CAM_SYNC_STATE_SIGNALED_ERROR);
-		list_add_tail(&req->list, &ctx->free_req_list);
-	}
+	/*
+	 * flush active queue, at this point h/w layer below does not have any
+	 * reference to requests in active queue.
+	 */
+	INIT_LIST_HEAD(&temp_list);
+	spin_lock(&ctx->lock);
+	list_splice_init(&ctx->active_req_list, &temp_list);
+	spin_unlock(&ctx->lock);
 
-	while (!list_empty(&ctx->active_req_list)) {
-		req = list_first_entry(&ctx->active_req_list,
+	while (!list_empty(&temp_list)) {
+		req = list_first_entry(&temp_list,
 				struct cam_ctx_request, list);
 		list_del_init(&req->list);
 		CAM_DBG(CAM_CTXT, "signal fence in active list. fence num %d",
@@ -497,7 +527,14 @@
 			if (req->out_map_entries[i].sync_id != -1)
 				cam_sync_signal(req->out_map_entries[i].sync_id,
 					CAM_SYNC_STATE_SIGNALED_ERROR);
+		/*
+		 * The spin lock should be taken here to guard the free list,
+		 * as sync cb thread could be adding a pending req to free list
+		 */
+		spin_lock(&ctx->lock);
 		list_add_tail(&req->list, &ctx->free_req_list);
+		req->ctx = NULL;
+		spin_unlock(&ctx->lock);
 	}
 
 end:
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
index f7982eb..45d9e56 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
@@ -17,8 +17,6 @@
 
 int cam_context_buf_done_from_hw(struct cam_context *ctx,
 	void *done_event_data, uint32_t bubble_state);
-int cam_context_apply_req_to_hw(struct cam_context *ctx,
-	struct cam_req_mgr_apply_request *apply);
 int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
 	struct cam_release_dev_cmd *cmd);
 int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
index aab75d5..4746152 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
@@ -203,8 +203,8 @@
  * @hw_write:              Function pointer for Write hardware registers
  * @hw_cmd:                Function pointer for any customized commands for the
  *                         hardware manager
- * @download_fw:           Function pointer for firmware downloading
- * @hw_close:              Function pointer for subdev close
+ * @hw_open:               Function pointer for HW init
+ * @hw_close:              Function pointer for HW deinit
  *
  */
 struct cam_hw_mgr_intf {
@@ -220,7 +220,7 @@
 	int (*hw_read)(void *hw_priv, void *read_args);
 	int (*hw_write)(void *hw_priv, void *write_args);
 	int (*hw_cmd)(void *hw_priv, void *write_args);
-	int (*download_fw)(void *hw_priv, void *fw_download_args);
+	int (*hw_open)(void *hw_priv, void *fw_download_args);
 	int (*hw_close)(void *hw_priv, void *hw_close_args);
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 11e9290..1f0213e 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -18,6 +18,34 @@
 #include "cam_trace.h"
 #include "cam_debug_util.h"
 
+static struct cam_context *cam_node_get_ctxt_from_free_list(
+		struct cam_node *node)
+{
+	struct cam_context *ctx = NULL;
+
+	mutex_lock(&node->list_mutex);
+	if (!list_empty(&node->free_ctx_list)) {
+		ctx = list_first_entry(&node->free_ctx_list,
+			struct cam_context, list);
+		list_del_init(&ctx->list);
+	}
+	mutex_unlock(&node->list_mutex);
+	if (ctx)
+		kref_init(&ctx->refcount);
+	return ctx;
+}
+
+void cam_node_put_ctxt_to_free_list(struct kref *ref)
+{
+	struct cam_context *ctx =
+		container_of(ref, struct cam_context, refcount);
+	struct cam_node *node = ctx->node;
+
+	mutex_lock(&node->list_mutex);
+	list_add_tail(&ctx->list, &node->free_ctx_list);
+	mutex_unlock(&node->list_mutex);
+}
+
 static int __cam_node_handle_query_cap(struct cam_node *node,
 	struct cam_query_cap_cmd *query)
 {
@@ -45,13 +73,7 @@
 	if (!acquire)
 		return -EINVAL;
 
-	mutex_lock(&node->list_mutex);
-	if (!list_empty(&node->free_ctx_list)) {
-		ctx = list_first_entry(&node->free_ctx_list,
-			struct cam_context, list);
-		list_del_init(&ctx->list);
-	}
-	mutex_unlock(&node->list_mutex);
+	ctx = cam_node_get_ctxt_from_free_list(node);
 	if (!ctx) {
 		rc = -ENOMEM;
 		goto err;
@@ -66,9 +88,7 @@
 
 	return 0;
 free_ctx:
-	mutex_lock(&node->list_mutex);
-	list_add_tail(&ctx->list, &node->free_ctx_list);
-	mutex_unlock(&node->list_mutex);
+	cam_context_putref(ctx);
 err:
 	return rc;
 }
@@ -207,9 +227,7 @@
 		CAM_ERR(CAM_CORE, "destroy device handle is failed node %s",
 			node->name);
 
-	mutex_lock(&node->list_mutex);
-	list_add_tail(&ctx->list, &node->free_ctx_list);
-	mutex_unlock(&node->list_mutex);
+	cam_context_putref(ctx);
 	return rc;
 }
 
@@ -312,8 +330,7 @@
 		if (node->ctx_list[i].dev_hdl >= 0) {
 			cam_context_shutdown(&(node->ctx_list[i]));
 			cam_destroy_device_hdl(node->ctx_list[i].dev_hdl);
-			list_add_tail(&(node->ctx_list[i].list),
-				&node->free_ctx_list);
+			cam_context_putref(&(node->ctx_list[i]));
 		}
 	}
 
@@ -358,6 +375,7 @@
 		}
 		INIT_LIST_HEAD(&ctx_list[i].list);
 		list_add_tail(&ctx_list[i].list, &node->free_ctx_list);
+		ctx_list[i].node = node;
 	}
 
 	node->state = CAM_NODE_STATE_INIT;
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.h b/drivers/media/platform/msm/camera/cam_core/cam_node.h
index 02e153d..4303ee3 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.h
@@ -13,6 +13,7 @@
 #ifndef _CAM_NODE_H_
 #define _CAM_NODE_H_
 
+#include <linux/kref.h>
 #include "cam_context.h"
 #include "cam_hw_mgr_intf.h"
 #include "cam_req_mgr_interface.h"
@@ -97,4 +98,14 @@
 int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
 	struct cam_context *ctx_list, uint32_t ctx_size, char *name);
 
+/**
+ * cam_node_put_ctxt_to_free_list()
+ *
+ * @brief:       Put context in node free list.
+ *
+ * @ref:         Context's kref object
+ *
+ */
+void cam_node_put_ctxt_to_free_list(struct kref *ref);
+
 #endif /* _CAM_NODE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 8518862..fc84d9d 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -21,6 +21,8 @@
 #include "cam_cpas_hw_intf.h"
 #include "cam_cpas_soc.h"
 
+#define CAM_CPAS_AXI_MIN_BW (2048 * 1024)
+
 int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
 	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
 {
@@ -116,6 +118,12 @@
 	bus_client->curr_vote_level = idx;
 	mutex_unlock(&bus_client->lock);
 
+	if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_BW))
+		ab = CAM_CPAS_AXI_MIN_BW;
+
+	if ((ib > 0) && (ib < CAM_CPAS_AXI_MIN_BW))
+		ib = CAM_CPAS_AXI_MIN_BW;
+
 	pdata = bus_client->pdata;
 	path = &(pdata->usecase[idx]);
 	path->vectors[0].ab = ab;
@@ -362,7 +370,7 @@
 	list_for_each_entry_safe(curr_port, temp_port,
 		&cpas_core->axi_ports_list_head, sibling_port) {
 		rc = cam_cpas_util_vote_bus_client_bw(&curr_port->mnoc_bus,
-			mnoc_bw, 0);
+			mnoc_bw, mnoc_bw);
 		if (rc) {
 			CAM_ERR(CAM_CPAS,
 				"Failed in mnoc vote, enable=%d, rc=%d",
@@ -372,7 +380,7 @@
 
 		if (soc_private->axi_camnoc_based) {
 			cam_cpas_util_vote_bus_client_bw(
-				&curr_port->camnoc_bus, camnoc_bw, 0);
+				&curr_port->camnoc_bus, 0, camnoc_bw);
 			if (rc) {
 				CAM_ERR(CAM_CPAS,
 					"Failed in mnoc vote, enable=%d, %d",
@@ -563,7 +571,7 @@
 		camnoc_bw, mnoc_bw);
 
 	rc = cam_cpas_util_vote_bus_client_bw(&axi_port->mnoc_bus,
-		mnoc_bw, 0);
+		mnoc_bw, mnoc_bw);
 	if (rc) {
 		CAM_ERR(CAM_CPAS,
 			"Failed in mnoc vote ab[%llu] ib[%llu] rc=%d",
@@ -573,11 +581,11 @@
 
 	if (soc_private->axi_camnoc_based) {
 		rc = cam_cpas_util_vote_bus_client_bw(&axi_port->camnoc_bus,
-			camnoc_bw, 0);
+			0, camnoc_bw);
 		if (rc) {
 			CAM_ERR(CAM_CPAS,
 				"Failed camnoc vote ab[%llu] ib[%llu] rc=%d",
-				camnoc_bw, camnoc_bw, rc);
+				0, camnoc_bw, rc);
 			goto unlock_axi_port;
 		}
 	}
@@ -1250,7 +1258,7 @@
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
 	int i;
 
-	for (i = 0; i < CPAS_MAX_CLIENTS; i++) {
+	for (i = 0; i < CAM_CPAS_MAX_CLIENTS; i++) {
 		mutex_init(&cpas_core->client_mutex[i]);
 		cpas_core->cpas_client[i] = NULL;
 	}
@@ -1263,7 +1271,7 @@
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
 	int i;
 
-	for (i = 0; i < CPAS_MAX_CLIENTS; i++) {
+	for (i = 0; i < CAM_CPAS_MAX_CLIENTS; i++) {
 		if (cpas_core->cpas_client[i]) {
 			cam_cpas_hw_unregister_client(cpas_hw, i);
 			cpas_core->cpas_client[i] = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index bbc99b7..aa3663d 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -17,13 +17,14 @@
 #include "cam_cpas_hw_intf.h"
 #include "cam_common_util.h"
 
-#define CPAS_MAX_CLIENTS 20
+#define CAM_CPAS_MAX_CLIENTS 30
 #define CAM_CPAS_INFLIGHT_WORKS 5
 
 #define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
 #define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
 
-#define CAM_CPAS_CLIENT_VALID(indx) ((indx >= 0) && (indx < CPAS_MAX_CLIENTS))
+#define CAM_CPAS_CLIENT_VALID(indx) \
+	((indx >= 0) && (indx < CAM_CPAS_MAX_CLIENTS))
 #define CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)        \
 	((CAM_CPAS_CLIENT_VALID(indx)) && \
 	(cpas_core->cpas_client[indx]))
@@ -176,8 +177,8 @@
  */
 struct cam_cpas {
 	struct cam_cpas_hw_caps hw_caps;
-	struct cam_cpas_client *cpas_client[CPAS_MAX_CLIENTS];
-	struct mutex client_mutex[CPAS_MAX_CLIENTS];
+	struct cam_cpas_client *cpas_client[CAM_CPAS_MAX_CLIENTS];
+	struct mutex client_mutex[CAM_CPAS_MAX_CLIENTS];
 	uint32_t num_clients;
 	uint32_t registered_clients;
 	uint32_t streamon_clients;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
index 0ba3bb2..d5108f6 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -79,22 +79,24 @@
 
 int cam_cpas_get_hw_info(uint32_t *camera_family,
 	struct cam_hw_version *camera_version,
-	struct cam_hw_version *cpas_version)
+	struct cam_hw_version *cpas_version,
+	uint32_t *cam_caps)
 {
 	if (!CAM_CPAS_INTF_INITIALIZED()) {
 		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
 		return -ENODEV;
 	}
 
-	if (!camera_family || !camera_version || !cpas_version) {
-		CAM_ERR(CAM_CPAS, "invalid input %pK %pK %pK", camera_family,
-			camera_version, cpas_version);
+	if (!camera_family || !camera_version || !cpas_version || !cam_caps) {
+		CAM_ERR(CAM_CPAS, "invalid input %pK %pK %pK %pK",
+			camera_family, camera_version, cpas_version, cam_caps);
 		return -EINVAL;
 	}
 
 	*camera_family = g_cpas_intf->hw_caps.camera_family;
 	*camera_version = g_cpas_intf->hw_caps.camera_version;
 	*cpas_version = g_cpas_intf->hw_caps.cpas_version;
+	*cam_caps = g_cpas_intf->hw_caps.camera_capability;
 
 	return 0;
 }
@@ -364,6 +366,7 @@
 	switch (cmd->op_code) {
 	case CAM_QUERY_CAP: {
 		struct cam_cpas_query_cap query;
+		uint32_t cam_cpas;
 
 		rc = copy_from_user(&query, (void __user *) cmd->handle,
 			sizeof(query));
@@ -374,7 +377,7 @@
 		}
 
 		rc = cam_cpas_get_hw_info(&query.camera_family,
-			&query.camera_version, &query.cpas_version);
+			&query.camera_version, &query.cpas_version, &cam_cpas);
 		if (rc)
 			break;
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
index b2ad513..d4fc039 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -14,8 +14,8 @@
 #define _CAM_CPAS_SOC_H_
 
 #include "cam_soc_util.h"
+#include "cam_cpas_hw.h"
 
-#define CAM_CPAS_MAX_CLIENTS 20
 #define CAM_REGULATOR_LEVEL_MAX 16
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index 4b0cc74..0e5ce85 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -24,6 +24,18 @@
 
 struct cam_camnoc_info *camnoc_info;
 
+#define CAMNOC_SLAVE_MAX_ERR_CODE 7
+static const char * const camnoc_salve_err_code[] = {
+	"Target Error",              /* err code 0 */
+	"Address decode error",      /* err code 1 */
+	"Unsupported request",       /* err code 2 */
+	"Disconnected target",       /* err code 3 */
+	"Security violation",        /* err code 4 */
+	"Hidden security violation", /* err code 5 */
+	"Timeout Error",             /* err code 6 */
+	"Unknown Error",             /* unknown err code */
+};
+
 static int cam_cpastop_get_hw_info(struct cam_hw_info *cpas_hw,
 	struct cam_cpas_hw_caps *hw_caps)
 {
@@ -106,91 +118,155 @@
 }
 
 static int cam_cpastop_handle_errlogger(struct cam_cpas *cpas_core,
-	struct cam_hw_soc_info *soc_info)
+	struct cam_hw_soc_info *soc_info,
+	struct cam_camnoc_irq_slave_err_data *slave_err)
 {
-	uint32_t reg_value[4];
-	int i;
-	int size = camnoc_info->error_logger_size;
 	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	int err_code_index = 0;
 
-	for (i = 0; (i + 3) < size; i = i + 4) {
-		reg_value[0] = cam_io_r_mb(
-			soc_info->reg_map[camnoc_index].mem_base +
-			camnoc_info->error_logger[i]);
-		reg_value[1] = cam_io_r_mb(
-			soc_info->reg_map[camnoc_index].mem_base +
-			camnoc_info->error_logger[i + 1]);
-		reg_value[2] = cam_io_r_mb(
-			soc_info->reg_map[camnoc_index].mem_base +
-			camnoc_info->error_logger[i + 2]);
-		reg_value[3] = cam_io_r_mb(
-			soc_info->reg_map[camnoc_index].mem_base +
-			camnoc_info->error_logger[i + 3]);
-		CAM_ERR(CAM_CPAS,
-			"offset[0x%x] values [0x%x] [0x%x] [0x%x] [0x%x]",
-			camnoc_info->error_logger[i], reg_value[0],
-			reg_value[1], reg_value[2], reg_value[3]);
+	if (!camnoc_info->err_logger) {
+		CAM_ERR_RATE_LIMIT(CAM_CPAS, "Invalid err logger info");
+		return -EINVAL;
 	}
 
-	if ((i + 2) < size) {
-		reg_value[0] = cam_io_r_mb(
-			soc_info->reg_map[camnoc_index].mem_base +
-			camnoc_info->error_logger[i]);
-		reg_value[1] = cam_io_r_mb(
-			soc_info->reg_map[camnoc_index].mem_base +
-			camnoc_info->error_logger[i + 1]);
-		reg_value[2] = cam_io_r_mb(
-			soc_info->reg_map[camnoc_index].mem_base +
-			camnoc_info->error_logger[i + 2]);
-		CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x] [0x%x] [0x%x]",
-			camnoc_info->error_logger[i], reg_value[0],
-			reg_value[1], reg_value[2]);
-		i = i + 3;
-	}
+	slave_err->mainctrl.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->mainctrl);
 
-	if ((i + 1) < size) {
-		reg_value[0] = cam_io_r_mb(
-			soc_info->reg_map[camnoc_index].mem_base +
-			camnoc_info->error_logger[i]);
-		reg_value[1] = cam_io_r_mb(
-			soc_info->reg_map[camnoc_index].mem_base +
-			camnoc_info->error_logger[i + 1]);
-		CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x] [0x%x]",
-			camnoc_info->error_logger[i], reg_value[0],
-			reg_value[1]);
-		i = i + 2;
-	}
+	slave_err->errvld.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errvld);
 
-	if (i < size) {
-		reg_value[0] = cam_io_r_mb(
-			soc_info->reg_map[camnoc_index].mem_base +
-			camnoc_info->error_logger[i]);
-		CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x]",
-			camnoc_info->error_logger[i], reg_value[0]);
-	}
+	slave_err->errlog0_low.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog0_low);
+
+	slave_err->errlog0_high.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog0_high);
+
+	slave_err->errlog1_low.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog1_low);
+
+	slave_err->errlog1_high.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog1_high);
+
+	slave_err->errlog2_low.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog2_low);
+
+	slave_err->errlog2_high.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog2_high);
+
+	slave_err->errlog3_low.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog3_low);
+
+	slave_err->errlog3_high.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog3_high);
+
+	CAM_ERR_RATE_LIMIT(CAM_CPAS,
+		"Possible memory configuration issue, fault at SMMU raised as CAMNOC SLAVE_IRQ");
+
+	CAM_ERR_RATE_LIMIT(CAM_CPAS,
+		"mainctrl[0x%x 0x%x] errvld[0x%x 0x%x] stall_en=%d, fault_en=%d, err_vld=%d",
+		camnoc_info->err_logger->mainctrl,
+		slave_err->mainctrl.value,
+		camnoc_info->err_logger->errvld,
+		slave_err->errvld.value,
+		slave_err->mainctrl.stall_en,
+		slave_err->mainctrl.fault_en,
+		slave_err->errvld.err_vld);
+
+	err_code_index = slave_err->errlog0_low.err_code;
+	if (err_code_index > CAMNOC_SLAVE_MAX_ERR_CODE)
+		err_code_index = CAMNOC_SLAVE_MAX_ERR_CODE;
+
+	CAM_ERR_RATE_LIMIT(CAM_CPAS,
+		"errlog0 low[0x%x 0x%x] high[0x%x 0x%x] loginfo_vld=%d, word_error=%d, non_secure=%d, device=%d, opc=%d, err_code=%d(%s) sizef=%d, addr_space=%d, len1=%d",
+		camnoc_info->err_logger->errlog0_low,
+		slave_err->errlog0_low.value,
+		camnoc_info->err_logger->errlog0_high,
+		slave_err->errlog0_high.value,
+		slave_err->errlog0_low.loginfo_vld,
+		slave_err->errlog0_low.word_error,
+		slave_err->errlog0_low.non_secure,
+		slave_err->errlog0_low.device,
+		slave_err->errlog0_low.opc,
+		slave_err->errlog0_low.err_code,
+		camnoc_salve_err_code[err_code_index],
+		slave_err->errlog0_low.sizef,
+		slave_err->errlog0_low.addr_space,
+		slave_err->errlog0_high.len1);
+
+	CAM_ERR_RATE_LIMIT(CAM_CPAS,
+		"errlog1_low[0x%x 0x%x]  errlog1_high[0x%x 0x%x] errlog2_low[0x%x 0x%x]  errlog2_high[0x%x 0x%x] errlog3_low[0x%x 0x%x]  errlog3_high[0x%x 0x%x]",
+		camnoc_info->err_logger->errlog1_low,
+		slave_err->errlog1_low.value,
+		camnoc_info->err_logger->errlog1_high,
+		slave_err->errlog1_high.value,
+		camnoc_info->err_logger->errlog2_low,
+		slave_err->errlog2_low.value,
+		camnoc_info->err_logger->errlog2_high,
+		slave_err->errlog2_high.value,
+		camnoc_info->err_logger->errlog3_low,
+		slave_err->errlog3_low.value,
+		camnoc_info->err_logger->errlog3_high,
+		slave_err->errlog3_high.value);
 
 	return 0;
 }
 
-static int cam_cpastop_handle_ubwc_err(struct cam_cpas *cpas_core,
-	struct cam_hw_soc_info *soc_info, int i)
+static int cam_cpastop_handle_ubwc_enc_err(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info, int i,
+	struct cam_camnoc_irq_ubwc_enc_data *enc_err)
 {
-	uint32_t reg_value;
 	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
 
-	reg_value = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+	enc_err->encerr_status.value =
+		cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
 		camnoc_info->irq_err[i].err_status.offset);
 
-	CAM_ERR(CAM_CPAS,
-		"Dumping ubwc error status [%d]: offset[0x%x] value[0x%x]",
-		i, camnoc_info->irq_err[i].err_status.offset, reg_value);
+	/* Let clients handle the UBWC errors */
+	CAM_DBG(CAM_CPAS,
+		"ubwc enc err [%d]: offset[0x%x] value[0x%x]",
+		i, camnoc_info->irq_err[i].err_status.offset,
+		enc_err->encerr_status.value);
 
-	return reg_value;
+	return 0;
 }
 
-static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw)
+static int cam_cpastop_handle_ubwc_dec_err(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info, int i,
+	struct cam_camnoc_irq_ubwc_dec_data *dec_err)
 {
-	CAM_ERR(CAM_CPAS, "ahb timout error");
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+	dec_err->decerr_status.value =
+		cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_err[i].err_status.offset);
+
+	/* Let clients handle the UBWC errors */
+	CAM_DBG(CAM_CPAS,
+		"ubwc dec err status [%d]: offset[0x%x] value[0x%x] thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+		i, camnoc_info->irq_err[i].err_status.offset,
+		dec_err->decerr_status.value,
+		dec_err->decerr_status.thr_err,
+		dec_err->decerr_status.fcl_err,
+		dec_err->decerr_status.len_md_err,
+		dec_err->decerr_status.format_err);
+
+	return 0;
+}
+
+static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw,
+	struct cam_camnoc_irq_ahb_timeout_data *ahb_err)
+{
+	CAM_ERR_RATE_LIMIT(CAM_CPAS, "ahb timout error");
 
 	return 0;
 }
@@ -228,10 +304,11 @@
 }
 
 static void cam_cpastop_notify_clients(struct cam_cpas *cpas_core,
-	enum cam_camnoc_hw_irq_type irq_type, uint32_t irq_data)
+	struct cam_cpas_irq_data *irq_data)
 {
 	int i;
 	struct cam_cpas_client *cpas_client;
+	bool error_handled = false;
 
 	CAM_DBG(CAM_CPAS,
 		"Notify CB : num_clients=%d, registered=%d, started=%d",
@@ -243,13 +320,15 @@
 			cpas_client = cpas_core->cpas_client[i];
 			if (cpas_client->data.cam_cpas_client_cb) {
 				CAM_DBG(CAM_CPAS,
-					"Calling client CB %d : %d 0x%x",
-					i, irq_type, irq_data);
-				cpas_client->data.cam_cpas_client_cb(
+					"Calling client CB %d : %d",
+					i, irq_data->irq_type);
+				error_handled =
+					cpas_client->data.cam_cpas_client_cb(
 					cpas_client->data.client_handle,
 					cpas_client->data.userdata,
-					(enum cam_camnoc_irq_type)irq_type,
 					irq_data);
+				if (error_handled)
+					break;
 			}
 		}
 	}
@@ -263,7 +342,7 @@
 	struct cam_hw_soc_info *soc_info;
 	int i;
 	enum cam_camnoc_hw_irq_type irq_type;
-	uint32_t irq_data;
+	struct cam_cpas_irq_data irq_data;
 
 	payload = container_of(work, struct cam_cpas_work_payload, work);
 	if (!payload) {
@@ -280,23 +359,30 @@
 			(camnoc_info->irq_err[i].enable)) {
 			irq_type = camnoc_info->irq_err[i].irq_type;
 			CAM_ERR(CAM_CPAS, "Error occurred, type=%d", irq_type);
-			irq_data = 0;
+			memset(&irq_data, 0x0, sizeof(irq_data));
+			irq_data.irq_type = (enum cam_camnoc_irq_type)irq_type;
 
 			switch (irq_type) {
 			case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR:
-				irq_data = cam_cpastop_handle_errlogger(
-					cpas_core, soc_info);
+				cam_cpastop_handle_errlogger(
+					cpas_core, soc_info,
+					&irq_data.u.slave_err);
 				break;
 			case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
 			case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
-			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
 			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
-				irq_data = cam_cpastop_handle_ubwc_err(
-					cpas_core, soc_info, i);
+				cam_cpastop_handle_ubwc_enc_err(
+					cpas_core, soc_info, i,
+					&irq_data.u.enc_err);
+				break;
+			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+				cam_cpastop_handle_ubwc_dec_err(
+					cpas_core, soc_info, i,
+					&irq_data.u.dec_err);
 				break;
 			case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT:
-				irq_data = cam_cpastop_handle_ahb_timeout_err(
-					cpas_hw);
+				cam_cpastop_handle_ahb_timeout_err(
+					cpas_hw, &irq_data.u.ahb_err);
 				break;
 			case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
 				CAM_DBG(CAM_CPAS, "TEST IRQ");
@@ -306,8 +392,7 @@
 				break;
 			}
 
-			cam_cpastop_notify_clients(cpas_core, irq_type,
-				irq_data);
+			cam_cpastop_notify_clients(cpas_core, &irq_data);
 
 			payload->irq_status &=
 				~camnoc_info->irq_err[i].sbm_port;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
index e3639a6..73f7e9b 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
@@ -173,6 +173,34 @@
 };
 
 /**
+ * struct cam_camnoc_err_logger_info : CAMNOC error logger register offsets
+ *
+ * @mainctrl: Register offset for mainctrl
+ * @errvld: Register offset for errvld
+ * @errlog0_low: Register offset for errlog0_low
+ * @errlog0_high: Register offset for errlog0_high
+ * @errlog1_low: Register offset for errlog1_low
+ * @errlog1_high: Register offset for errlog1_high
+ * @errlog2_low: Register offset for errlog2_low
+ * @errlog2_high: Register offset for errlog2_high
+ * @errlog3_low: Register offset for errlog3_low
+ * @errlog3_high: Register offset for errlog3_high
+ *
+ */
+struct cam_camnoc_err_logger_info {
+	uint32_t mainctrl;
+	uint32_t errvld;
+	uint32_t errlog0_low;
+	uint32_t errlog0_high;
+	uint32_t errlog1_low;
+	uint32_t errlog1_high;
+	uint32_t errlog2_low;
+	uint32_t errlog2_high;
+	uint32_t errlog3_low;
+	uint32_t errlog3_high;
+};
+
+/**
  * struct cam_camnoc_info : Overall CAMNOC settings info
  *
  * @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings
@@ -180,8 +208,7 @@
  * @irq_sbm: Pointer to CAMNOC IRQ SBM settings
  * @irq_err: Pointer to CAMNOC IRQ Error settings
  * @irq_err_size: Array size of IRQ Error settings
- * @error_logger: Pointer to CAMNOC IRQ Error logger read registers
- * @error_logger_size: Array size of IRQ Error logger
+ * @err_logger: Pointer to CAMNOC IRQ Error logger read registers
  * @errata_wa_list: HW Errata workaround info
  *
  */
@@ -191,8 +218,7 @@
 	struct cam_camnoc_irq_sbm *irq_sbm;
 	struct cam_camnoc_irq_err *irq_err;
 	int irq_err_size;
-	uint32_t *error_logger;
-	int error_logger_size;
+	struct cam_camnoc_err_logger_info *err_logger;
 	struct cam_cpas_hw_errata_wa_list *errata_wa_list;
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
index b30cd05..2654b47 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
@@ -498,19 +498,17 @@
 	}
 };
 
-uint32_t slave_error_logger[] = {
-	0x2700, /* ERRLOGGER_SWID_LOW */
-	0x2704, /* ERRLOGGER_SWID_HIGH */
-	0x2708, /* ERRLOGGER_MAINCTL_LOW */
-	0x2710, /* ERRLOGGER_ERRVLD_LOW */
-	0x2720, /* ERRLOGGER_ERRLOG0_LOW */
-	0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
-	0x2728, /* ERRLOGGER_ERRLOG1_LOW */
-	0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
-	0x2730, /* ERRLOGGER_ERRLOG2_LOW */
-	0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
-	0x2738, /* ERRLOGGER_ERRLOG3_LOW */
-	0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+static struct cam_camnoc_err_logger_info cam170_cpas100_err_logger_offsets = {
+	.mainctrl     =  0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
 };
 
 static struct cam_cpas_hw_errata_wa_list cam170_cpas100_errata_wa_list = {
@@ -533,9 +531,7 @@
 	.irq_err = &cam_cpas100_irq_err[0],
 	.irq_err_size = sizeof(cam_cpas100_irq_err) /
 		sizeof(cam_cpas100_irq_err[0]),
-	.error_logger = &slave_error_logger[0],
-	.error_logger_size = sizeof(slave_error_logger) /
-		sizeof(slave_error_logger[0]),
+	.err_logger = &cam170_cpas100_err_logger_offsets,
 	.errata_wa_list = &cam170_cpas100_errata_wa_list,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
index 55cb07b..4418fb1 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
@@ -258,14 +258,14 @@
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
-			.value = 0x66665433,
+			.value = 0x44443333,
 		},
 		.priority_lut_high = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
-			.value = 0x66666666,
+			.value = 0x66665555,
 		},
 		.urgency = {
 			.enable = true,
@@ -306,14 +306,14 @@
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
-			.value = 0x66665433,
+			.value = 0x44443333,
 		},
 		.priority_lut_high = {
 			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.masked_value = 0,
 			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
-			.value = 0x66666666,
+			.value = 0x66665555,
 		},
 		.urgency = {
 			.enable = true,
@@ -505,19 +505,17 @@
 	},
 };
 
-static uint32_t cam_cpas110_slave_error_logger[] = {
-	0x2700, /* ERRLOGGER_SWID_LOW */
-	0x2704, /* ERRLOGGER_SWID_HIGH */
-	0x2708, /* ERRLOGGER_MAINCTL_LOW */
-	0x2710, /* ERRLOGGER_ERRVLD_LOW */
-	0x2720, /* ERRLOGGER_ERRLOG0_LOW */
-	0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
-	0x2728, /* ERRLOGGER_ERRLOG1_LOW */
-	0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
-	0x2730, /* ERRLOGGER_ERRLOG2_LOW */
-	0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
-	0x2738, /* ERRLOGGER_ERRLOG3_LOW */
-	0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+static struct cam_camnoc_err_logger_info cam170_cpas110_err_logger_offsets = {
+	.mainctrl     =  0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
 };
 
 static struct cam_cpas_hw_errata_wa_list cam170_cpas110_errata_wa_list = {
@@ -540,9 +538,7 @@
 	.irq_err = &cam_cpas110_irq_err[0],
 	.irq_err_size = sizeof(cam_cpas110_irq_err) /
 		sizeof(cam_cpas110_irq_err[0]),
-	.error_logger = &cam_cpas110_slave_error_logger[0],
-	.error_logger_size = sizeof(cam_cpas110_slave_error_logger) /
-		sizeof(cam_cpas110_slave_error_logger[0]),
+	.err_logger = &cam170_cpas110_err_logger_offsets,
 	.errata_wa_list = &cam170_cpas110_errata_wa_list,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index aa8b266..c844ef7 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -82,6 +82,183 @@
 };
 
 /**
+ * struct cam_camnoc_irq_slave_err_data : Data for Slave error.
+ *
+ * @mainctrl     : Err logger mainctrl info
+ * @errvld       : Err logger errvld info
+ * @errlog0_low  : Err logger errlog0_low info
+ * @errlog0_high : Err logger errlog0_high info
+ * @errlog1_low  : Err logger errlog1_low info
+ * @errlog1_high : Err logger errlog1_high info
+ * @errlog2_low  : Err logger errlog2_low info
+ * @errlog2_high : Err logger errlog2_high info
+ * @errlog3_low  : Err logger errlog3_low info
+ * @errlog3_high : Err logger errlog3_high info
+ *
+ */
+struct cam_camnoc_irq_slave_err_data {
+	union {
+		struct {
+			uint32_t stall_en : 1; /* bit 0 */
+			uint32_t fault_en : 1; /* bit 1 */
+			uint32_t rsv      : 30; /* bits 2-31 */
+		};
+		uint32_t value;
+	} mainctrl;
+	union {
+		struct {
+			uint32_t err_vld : 1; /* bit 0 */
+			uint32_t rsv     : 31; /* bits 1-31 */
+		};
+		uint32_t value;
+	} errvld;
+	union {
+		struct {
+			uint32_t loginfo_vld : 1; /* bit 0 */
+			uint32_t word_error  : 1; /* bit 1 */
+			uint32_t non_secure  : 1; /* bit 2 */
+			uint32_t device      : 1; /* bit 3 */
+			uint32_t opc         : 3; /* bits 4 - 6 */
+			uint32_t rsv0        : 1; /* bit 7 */
+			uint32_t err_code    : 3; /* bits 8 - 10 */
+			uint32_t sizef       : 3; /* bits 11 - 13 */
+			uint32_t rsv1        : 2; /* bits 14 - 15 */
+			uint32_t addr_space  : 6; /* bits 16 - 21 */
+			uint32_t rsv2        : 10; /* bits 22 - 31 */
+		};
+		uint32_t value;
+	}  errlog0_low;
+	union {
+		struct {
+			uint32_t len1 : 10; /* bits 0 - 9 */
+			uint32_t rsv  : 22; /* bits 10 - 31 */
+		};
+		uint32_t value;
+	} errlog0_high;
+	union {
+		struct {
+			uint32_t path : 16; /* bits 0 - 15 */
+			uint32_t rsv  : 16; /* bits 16 - 31 */
+		};
+		uint32_t value;
+	} errlog1_low;
+	union {
+		struct {
+			uint32_t extid : 18; /* bits 0 - 17 */
+			uint32_t rsv   : 14; /* bits 18 - 31 */
+		};
+		uint32_t value;
+	} errlog1_high;
+	union {
+		struct {
+			uint32_t errlog2_lsb : 32; /* bits 0 - 31 */
+		};
+		uint32_t value;
+	} errlog2_low;
+	union {
+		struct {
+			uint32_t errlog2_msb : 16; /* bits 0 - 16 */
+			uint32_t rsv         : 16; /* bits 16 - 31 */
+		};
+		uint32_t value;
+	} errlog2_high;
+	union {
+		struct {
+			uint32_t errlog3_lsb : 32; /* bits 0 - 31 */
+		};
+		uint32_t value;
+	} errlog3_low;
+	union {
+		struct {
+			uint32_t errlog3_msb : 32; /* bits 0 - 31 */
+		};
+		uint32_t value;
+	} errlog3_high;
+};
+
+/**
+ * struct cam_camnoc_irq_ubwc_enc_data : Data for UBWC Encode error.
+ *
+ * @encerr_status : Encode error status
+ *
+ */
+struct cam_camnoc_irq_ubwc_enc_data {
+	union {
+		struct {
+			uint32_t encerrstatus : 3; /* bits 0 - 2 */
+			uint32_t rsv          : 29; /* bits 3 - 31 */
+		};
+		uint32_t value;
+	} encerr_status;
+};
+
+/**
+ * struct cam_camnoc_irq_ubwc_dec_data : Data for UBWC Decode error.
+ *
+ * @decerr_status : Decoder error status
+ * @thr_err       : Set to 1 if
+ *                  At least one of the bflc_len fields in the bit steam exceeds
+ *                  its threshold value. This error is possible only for
+ *                  RGBA1010102, TP10, and RGB565 formats
+ * @fcl_err       : Set to 1 if
+ *                  Fast clear with a legal non-RGB format
+ * @len_md_err    : Set to 1 if
+ *                  The calculated burst length does not match burst length
+ *                  specified by the metadata value
+ * @format_err    : Set to 1 if
+ *                  Illegal format
+ *                  1. bad format :2,3,6
+ *                  2. For 32B MAL, metadata=6
+ *                  3. For 32B MAL RGB565, Metadata != 0,1,7
+ *                  4. For 64B MAL RGB565, metadata[3:1] == 1,2
+ *
+ */
+struct cam_camnoc_irq_ubwc_dec_data {
+	union {
+		struct {
+			uint32_t thr_err    : 1; /* bit 0 */
+			uint32_t fcl_err    : 1; /* bit 1 */
+			uint32_t len_md_err : 1; /* bit 2 */
+			uint32_t format_err : 1; /* bit 3 */
+			uint32_t rsv        : 28; /* bits 4 - 31 */
+		};
+		uint32_t value;
+	} decerr_status;
+};
+
+struct cam_camnoc_irq_ahb_timeout_data {
+	uint32_t data;
+};
+
+/**
+ * struct cam_cpas_irq_data : CAMNOC IRQ data
+ *
+ * @irq_type  : To identify the type of IRQ
+ * @u         : Union of irq err data information
+ * @slave_err : Data for Slave error.
+ *              Valid if type is CAM_CAMNOC_IRQ_SLAVE_ERROR
+ * @enc_err   : Data for UBWC Encode error.
+ *              Valid if type is one of below:
+ *              CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR
+ *              CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR
+ *              CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR
+ * @dec_err   : Data for UBWC Decode error.
+ *              Valid if type is CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR
+ * @ahb_err   : Data for Slave error.
+ *              Valid if type is CAM_CAMNOC_IRQ_AHB_TIMEOUT
+ *
+ */
+struct cam_cpas_irq_data {
+	enum cam_camnoc_irq_type irq_type;
+	union {
+		struct cam_camnoc_irq_slave_err_data   slave_err;
+		struct cam_camnoc_irq_ubwc_enc_data    enc_err;
+		struct cam_camnoc_irq_ubwc_dec_data    dec_err;
+		struct cam_camnoc_irq_ahb_timeout_data ahb_err;
+	} u;
+};
+
+/**
  * struct cam_cpas_register_params : Register params for cpas client
  *
  * @identifier        : Input identifier string which is the device label
@@ -107,11 +284,10 @@
 	uint32_t        cell_index;
 	struct device  *dev;
 	void           *userdata;
-	void          (*cam_cpas_client_cb)(
+	bool          (*cam_cpas_client_cb)(
 			uint32_t                  client_handle,
 			void                     *userdata,
-			enum cam_camnoc_irq_type  event_type,
-			uint32_t                  event_data);
+			struct cam_cpas_irq_data *irq_data);
 	uint32_t        client_handle;
 };
 
@@ -314,6 +490,7 @@
  *                   CAM_FAMILY_CPAS_SS
  * @camera_version : Camera platform version
  * @cpas_version   : Camera cpas version
+ * @cam_caps       : Camera capability
  *
  * @return 0 on success.
  *
@@ -321,7 +498,8 @@
 int cam_cpas_get_hw_info(
 	uint32_t                 *camera_family,
 	struct cam_hw_version    *camera_version,
-	struct cam_hw_version    *cpas_version);
+	struct cam_hw_version    *cpas_version,
+	uint32_t                 *cam_caps);
 
 /**
  * cam_cpas_get_cpas_hw_version()
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
index f23c4c1..78c1dd3 100644
--- a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
@@ -17,6 +17,8 @@
 #include "cam_fd_context.h"
 #include "cam_trace.h"
 
+static const char fd_dev_name[] = "fd";
+
 /* Functions in Available state */
 static int __cam_fd_ctx_acquire_dev_in_available(struct cam_context *ctx,
 	struct cam_acquire_dev_cmd *cmd)
@@ -208,8 +210,8 @@
 
 	memset(fd_ctx, 0, sizeof(*fd_ctx));
 
-	rc = cam_context_init(base_ctx, NULL, hw_intf, fd_ctx->req_base,
-		CAM_CTX_REQ_MAX);
+	rc = cam_context_init(base_ctx, fd_dev_name, NULL, hw_intf,
+		fd_ctx->req_base, CAM_CTX_REQ_MAX);
 	if (rc) {
 		CAM_ERR(CAM_FD, "Camera Context Base init failed, rc=%d", rc);
 		return rc;
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
index 37e6954..bff42f4 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -865,6 +865,8 @@
 	}
 
 	hw_device->ready_to_process = false;
+	hw_device->cur_hw_ctx = hw_ctx;
+	hw_device->req_id = frame_req->request_id;
 	mutex_unlock(&hw_device->lock);
 
 	rc = cam_fd_mgr_util_put_frame_req(
@@ -1026,6 +1028,8 @@
 	 */
 	mutex_lock(&hw_device->lock);
 	hw_device->ready_to_process = true;
+	hw_device->req_id = -1;
+	hw_device->cur_hw_ctx = NULL;
 	CAM_DBG(CAM_FD, "ready_to_process=%d", hw_device->ready_to_process);
 	mutex_unlock(&hw_device->lock);
 
@@ -1206,6 +1210,7 @@
 	if (rc)
 		CAM_ERR(CAM_FD, "Failed in release device, rc=%d", rc);
 
+	hw_ctx->ctx_in_use = false;
 	list_del_init(&hw_ctx->list);
 	cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
 
@@ -1261,6 +1266,82 @@
 	return rc;
 }
 
+static int cam_fd_mgr_hw_flush(void *hw_mgr_priv,
+	struct cam_fd_hw_mgr_ctx *hw_ctx)
+{
+	int rc = 0;
+	struct cam_fd_mgr_frame_request *frame_req, *req_temp;
+	struct cam_fd_hw_stop_args hw_stop_args;
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_fd_device *hw_device;
+
+	if (!hw_mgr_priv || !hw_ctx) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+			hw_mgr_priv, hw_ctx);
+		return -EINVAL;
+	}
+
+	if (!hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+	CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	mutex_lock(&hw_mgr->frame_req_mutex);
+	list_for_each_entry_safe(frame_req, req_temp,
+		&hw_mgr->frame_pending_list_high, list) {
+		if (frame_req->hw_ctx != hw_ctx)
+			continue;
+
+		list_del_init(&frame_req->list);
+	}
+
+	list_for_each_entry_safe(frame_req, req_temp,
+		&hw_mgr->frame_pending_list_normal, list) {
+		if (frame_req->hw_ctx != hw_ctx)
+			continue;
+
+		list_del_init(&frame_req->list);
+	}
+
+	list_for_each_entry_safe(frame_req, req_temp,
+		&hw_mgr->frame_processing_list, list) {
+		if (frame_req->hw_ctx != hw_ctx)
+			continue;
+
+		list_del_init(&frame_req->list);
+	}
+	mutex_unlock(&hw_mgr->frame_req_mutex);
+
+	mutex_lock(&hw_device->lock);
+	if ((hw_device->ready_to_process == true) ||
+		(hw_device->cur_hw_ctx != hw_ctx))
+		goto end;
+
+	if (hw_device->hw_intf->hw_ops.stop) {
+		hw_stop_args.hw_ctx = hw_ctx;
+		rc = hw_device->hw_intf->hw_ops.stop(
+			hw_device->hw_intf->hw_priv, &hw_stop_args,
+			sizeof(hw_stop_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
+			goto end;
+		}
+		hw_device->ready_to_process = true;
+	}
+
+end:
+	mutex_unlock(&hw_device->lock);
+	return rc;
+}
+
 static int cam_fd_mgr_hw_stop(void *hw_mgr_priv, void *mgr_stop_args)
 {
 	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
@@ -1268,7 +1349,6 @@
 		(struct cam_hw_stop_args *)mgr_stop_args;
 	struct cam_fd_hw_mgr_ctx *hw_ctx;
 	struct cam_fd_device *hw_device;
-	struct cam_fd_hw_stop_args hw_stop_args;
 	struct cam_fd_hw_deinit_args hw_deinit_args;
 	int rc = 0;
 
@@ -1295,21 +1375,9 @@
 	CAM_DBG(CAM_FD, "FD Device ready_to_process = %d",
 		hw_device->ready_to_process);
 
-	if ((hw_device->hw_intf->hw_ops.stop) &&
-		(hw_device->ready_to_process == false)) {
-		/*
-		 * Even if device is in processing state, we should submit
-		 * stop command only if this ctx is running on hw
-		 */
-		hw_stop_args.hw_ctx = hw_ctx;
-		rc = hw_device->hw_intf->hw_ops.stop(
-			hw_device->hw_intf->hw_priv, &hw_stop_args,
-			sizeof(hw_stop_args));
-		if (rc) {
-			CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
-			return rc;
-		}
-	}
+	rc = cam_fd_mgr_hw_flush(hw_mgr, hw_ctx);
+	if (rc)
+		CAM_ERR(CAM_FD, "FD failed to flush");
 
 	if (hw_device->hw_intf->hw_ops.deinit) {
 		hw_deinit_args.hw_ctx = hw_ctx;
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
index 135e006..db5d100 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
@@ -80,14 +80,18 @@
  * @num_ctxts        : Number of context currently running on this device
  * @valid            : Whether this device is valid
  * @lock             : Lock used for protectin
+ * @cur_hw_ctx       : current hw context running in the device
+ * @req_id           : current processing req id
  */
 struct cam_fd_device {
-	struct cam_fd_hw_caps    hw_caps;
-	struct cam_hw_intf      *hw_intf;
-	bool                     ready_to_process;
-	uint32_t                 num_ctxts;
-	bool                     valid;
-	struct mutex             lock;
+	struct cam_fd_hw_caps     hw_caps;
+	struct cam_hw_intf       *hw_intf;
+	bool                      ready_to_process;
+	uint32_t                  num_ctxts;
+	bool                      valid;
+	struct mutex              lock;
+	struct cam_fd_hw_mgr_ctx *cur_hw_ctx;
+	int64_t                   req_id;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
index d9be53d..51c8e4a 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
@@ -643,6 +643,7 @@
 	struct cam_fd_hw_init_args *init_args =
 		(struct cam_fd_hw_init_args *)init_hw_args;
 	int rc = 0;
+	unsigned long flags;
 
 	if (!fd_hw || !init_args) {
 		CAM_ERR(CAM_FD, "Invalid argument %pK %pK", fd_hw, init_args);
@@ -671,6 +672,11 @@
 		goto unlock_return;
 	}
 
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_hw->hw_state = CAM_HW_STATE_POWER_UP;
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
 	rc = cam_fd_hw_reset(hw_priv, NULL, 0);
 	if (rc) {
 		CAM_ERR(CAM_FD, "Reset Failed, rc=%d", rc);
@@ -679,15 +685,10 @@
 
 	cam_fd_hw_util_enable_power_on_settings(fd_hw);
 
-	fd_hw->hw_state = CAM_HW_STATE_POWER_UP;
-	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
-
 cdm_streamon:
 	fd_hw->open_count++;
 	CAM_DBG(CAM_FD, "FD HW Init ref count after %d", fd_hw->open_count);
 
-	mutex_unlock(&fd_hw->hw_mutex);
-
 	if (init_args->ctx_hw_private) {
 		struct cam_fd_ctx_hw_private *ctx_hw_private =
 			init_args->ctx_hw_private;
@@ -696,15 +697,24 @@
 		if (rc) {
 			CAM_ERR(CAM_FD, "CDM StreamOn fail :handle=0x%x, rc=%d",
 				ctx_hw_private->cdm_handle, rc);
-			return rc;
+			fd_hw->open_count--;
+			if (!fd_hw->open_count)
+				goto disable_soc;
 		}
 	}
 
+	mutex_unlock(&fd_hw->hw_mutex);
+
 	return rc;
 
 disable_soc:
 	if (cam_fd_soc_disable_resources(&fd_hw->soc_info))
 		CAM_ERR(CAM_FD, "Error in disable soc resources");
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
 unlock_return:
 	mutex_unlock(&fd_hw->hw_mutex);
 	return rc;
@@ -717,6 +727,7 @@
 	struct cam_fd_hw_deinit_args *deinit_args =
 		(struct cam_fd_hw_deinit_args *)deinit_hw_args;
 	int rc = 0;
+	unsigned long flags;
 
 	if (!fd_hw || !deinit_hw_args) {
 		CAM_ERR(CAM_FD, "Invalid argument");
@@ -754,8 +765,9 @@
 	/* With the ref_cnt correct, this should never happen */
 	WARN_ON(!fd_core);
 
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
 	fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
-
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
 positive_ref_cnt:
 	if (deinit_args->ctx_hw_private) {
 		struct cam_fd_ctx_hw_private *ctx_hw_private =
@@ -792,7 +804,8 @@
 	soc_info = &fd_hw->soc_info;
 
 	spin_lock_irqsave(&fd_core->spin_lock, flags);
-	if (fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS) {
+	if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) ||
+		(fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) {
 		CAM_ERR(CAM_FD, "Reset not allowed in %d state",
 			fd_core->core_state);
 		spin_unlock_irqrestore(&fd_core->spin_lock, flags);
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
index 9045dc1..f27d016 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
@@ -20,11 +20,16 @@
 #include "cam_fd_hw_core.h"
 #include "cam_fd_hw_soc.h"
 
-static void cam_fd_hw_util_cpas_callback(uint32_t handle, void *userdata,
-	enum cam_camnoc_irq_type event_type, uint32_t event_data)
+static bool cam_fd_hw_util_cpas_callback(uint32_t handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
 {
-	CAM_DBG(CAM_FD, "CPAS hdl=%d, udata=%pK, event=%d, event_data=%d",
-		handle, userdata, event_type, event_data);
+	if (!irq_data)
+		return false;
+
+	CAM_DBG(CAM_FD, "CPAS hdl=%d, udata=%pK, irq_type=%d",
+		handle, userdata, irq_data->irq_type);
+
+	return false;
 }
 
 static int cam_fd_hw_soc_util_setup_regbase_indices(
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index 15bd98c..0c37994 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -26,6 +26,8 @@
 #include "cam_trace.h"
 #include "cam_debug_util.h"
 
+static const char icp_dev_name[] = "icp";
+
 static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx,
 	struct cam_acquire_dev_cmd *cmd)
 {
@@ -171,8 +173,8 @@
 		goto err;
 	}
 
-	rc = cam_context_init(ctx->base, NULL, hw_intf, ctx->req_base,
-		CAM_CTX_REQ_MAX);
+	rc = cam_context_init(ctx->base, icp_dev_name, NULL, hw_intf,
+		ctx->req_base, CAM_CTX_REQ_MAX);
 	if (rc) {
 		CAM_ERR(CAM_ICP, "Camera Context Base init failed");
 		goto err;
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
index 905cc97..51499de 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
@@ -76,7 +76,7 @@
 	}
 
 	hw_mgr_intf = &node->hw_mgr_intf;
-	rc = hw_mgr_intf->download_fw(hw_mgr_intf->hw_mgr_priv, NULL);
+	rc = hw_mgr_intf->hw_open(hw_mgr_intf->hw_mgr_priv, NULL);
 	if (rc < 0) {
 		CAM_ERR(CAM_ICP, "FW download failed");
 		goto end;
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
index 455943b..ce7a8b3 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -60,10 +60,12 @@
  * hfi_read_message() - function for hfi read
  * @pmsg: buffer to place read message for hfi queue
  * @q_id: queue id
+ * @words_read: total number of words read from the queue
+ *              returned as output to the caller
  *
- * Returns size read in words/failure(negative value)
+ * Returns success(zero)/failure(non zero)
  */
-int64_t hfi_read_message(uint32_t *pmsg, uint8_t q_id);
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id, uint32_t *words_read);
 
 /**
  * hfi_init() - function initialize hfi after firmware download
@@ -124,4 +126,10 @@
  */
 int hfi_enable_ipe_bps_pc(bool enable);
 
+/**
+ * hfi_cmd_ubwc_config() - UBWC configuration to firmware
+ * @ubwc_cfg: UBWC configuration parameters
+ */
+int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg);
+
 #endif /* _HFI_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
index 65dc4b3..aaa18bb 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
@@ -156,6 +156,7 @@
 #define HFI_PROPERTY_ICP_COMMON_START  (HFI_DOMAIN_BASE_ICP + 0x0)
 
 #define HFI_PROP_SYS_DEBUG_CFG         (HFI_PROPERTY_ICP_COMMON_START + 0x1)
+#define HFI_PROP_SYS_UBWC_CFG          (HFI_PROPERTY_ICP_COMMON_START + 0x2)
 #define HFI_PROP_SYS_IMAGE_VER         (HFI_PROPERTY_ICP_COMMON_START + 0x3)
 #define HFI_PROP_SYS_SUPPORTED         (HFI_PROPERTY_ICP_COMMON_START + 0x4)
 #define HFI_PROP_SYS_IPEBPS_PC         (HFI_PROPERTY_ICP_COMMON_START + 0x5)
@@ -201,6 +202,8 @@
 #define HFI_DEBUG_MODE_QUEUE     0x00000001
 #define HFI_DEBUG_MODE_QDSS      0x00000002
 
+#define HFI_DEV_VERSION_MAX      0x5
+
 /**
  * start of sys command packet types
  * These commands are used to get system level information
@@ -257,6 +260,17 @@
 } __packed;
 
 /**
+ * struct hfi_cmd_ubwc_cfg
+ * Payload structure to configure HFI_PROP_SYS_UBWC_CFG
+ * @ubwc_fetch_cfg: UBWC configuration for fecth
+ * @ubwc_write_cfg: UBWC configuration for write
+ */
+struct hfi_cmd_ubwc_cfg {
+	uint32_t ubwc_fetch_cfg;
+	uint32_t ubwc_write_cfg;
+};
+
+/**
  * struct hfi_cmd_sys_init
  * command to initialization of system session
  * @size: packet size in bytes
@@ -371,14 +385,30 @@
 } __packed;
 
 /**
+ * struct hfi_msg_init_done_data
+ * @api_ver:    Firmware API version
+ * @dev_ver:    Device version
+ * @num_icp_hw: Number of ICP hardware information
+ * @dev_hw_ver: Supported hardware version information
+ * @reserved:   Reserved field
+ */
+struct hfi_msg_init_done_data {
+	uint32_t api_ver;
+	uint32_t dev_ver;
+	uint32_t num_icp_hw;
+	uint32_t dev_hw_ver[HFI_DEV_VERSION_MAX];
+	uint32_t reserved;
+};
+
+/**
  * struct hfi_msg_init_done
  * system init done message from firmware. Many system level properties
  * are returned with the packet
- * @size: packet size in bytes
- * @pkt_type: opcode of a packet
- * @err_type: error code associated with response
- * @num_prop: number of default capability info
- * @prop_data: array of property ids and corresponding structure pairs
+ * @size:      Packet size in bytes
+ * @pkt_type:  Opcode of a packet
+ * @err_type:  Error code associated with response
+ * @num_prop:  Number of default capability info
+ * @prop_data: Array of property ids and corresponding structure pairs
  */
 struct hfi_msg_init_done {
 	uint32_t size;
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
index afdd571..e51d350 100644
--- a/drivers/media/platform/msm/camera/cam_icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -27,6 +27,7 @@
 #include "hfi_intf.h"
 #include "cam_icp_hw_mgr_intf.h"
 #include "cam_debug_util.h"
+#include "cam_soc_util.h"
 
 #define HFI_VERSION_INFO_MAJOR_VAL  1
 #define HFI_VERSION_INFO_MINOR_VAL  1
@@ -39,9 +40,6 @@
 #define HFI_VERSION_INFO_STEP_BMSK   0xFF
 #define HFI_VERSION_INFO_STEP_SHFT  0
 
-#define SOC_VERSION_HW1             0x10000
-#define SOC_VERSION_HW2             0x20000
-
 static struct hfi_info *g_hfi;
 unsigned int g_icp_mmu_hdl;
 static DEFINE_MUTEX(hfi_cmd_q_mutex);
@@ -119,13 +117,14 @@
 	return rc;
 }
 
-int64_t hfi_read_message(uint32_t *pmsg, uint8_t q_id)
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id,
+	uint32_t *words_read)
 {
 	struct hfi_qtbl *q_tbl_ptr;
 	struct hfi_q_hdr *q;
 	uint32_t new_read_idx, size_in_words, word_diff, temp;
 	uint32_t *read_q, *read_ptr, *write_ptr;
-	int64_t rc = 0;
+	int rc = 0;
 
 	if (!pmsg) {
 		CAM_ERR(CAM_HFI, "Invalid msg");
@@ -169,7 +168,6 @@
 
 	read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx);
 	write_ptr = (uint32_t *)(read_q + q->qhdr_write_idx);
-	size_in_words = (*read_ptr) >> BYTE_WORD_SHIFT;
 
 	if (write_ptr > read_ptr)
 		size_in_words = write_ptr - read_ptr;
@@ -205,12 +203,39 @@
 	}
 
 	q->qhdr_read_idx = new_read_idx;
-	rc = size_in_words;
+	*words_read = size_in_words;
 err:
 	mutex_unlock(&hfi_msg_q_mutex);
 	return rc;
 }
 
+int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg)
+{
+	uint8_t *prop;
+	struct hfi_cmd_prop *dbg_prop;
+	uint32_t size = 0;
+
+	size = sizeof(struct hfi_cmd_prop) +
+		sizeof(struct hfi_cmd_ubwc_cfg);
+
+	prop = kzalloc(size, GFP_KERNEL);
+	if (!prop)
+		return -ENOMEM;
+
+	dbg_prop = (struct hfi_cmd_prop *)prop;
+	dbg_prop->size = size;
+	dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+	dbg_prop->num_prop = 1;
+	dbg_prop->prop_data[0] = HFI_PROP_SYS_UBWC_CFG;
+	dbg_prop->prop_data[1] = ubwc_cfg[0];
+	dbg_prop->prop_data[2] = ubwc_cfg[1];
+
+	hfi_write_cmd(prop);
+	kfree(prop);
+
+	return 0;
+}
+
 int hfi_enable_ipe_bps_pc(bool enable)
 {
 	uint8_t *prop;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index e200f6f..635d0df 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -266,8 +266,8 @@
 
 	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
 	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
-	cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
-	cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
+	cpas_vote.axi_vote.compressed_bw = CAM_ICP_A5_BW_BYTES_VOTE;
+	cpas_vote.axi_vote.uncompressed_bw = CAM_ICP_A5_BW_BYTES_VOTE;
 
 	rc = cam_cpas_start(core_info->cpas_handle,
 		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
@@ -367,6 +367,7 @@
 	struct cam_hw_soc_info *soc_info = NULL;
 	struct cam_a5_device_core_info *core_info = NULL;
 	struct cam_a5_device_hw_info *hw_info = NULL;
+	struct a5_soc_info *a5_soc = NULL;
 	int rc = 0;
 
 	if (!device_priv) {
@@ -456,6 +457,14 @@
 			core_info->cpas_start = false;
 		}
 		break;
+	case CAM_ICP_A5_CMD_UBWC_CFG:
+		a5_soc = soc_info->soc_private;
+		if (!a5_soc) {
+			CAM_ERR(CAM_ICP, "A5 private soc info is NULL");
+			return -EINVAL;
+		}
+		rc = hfi_cmd_ubwc_config(a5_soc->ubwc_cfg);
+		break;
 	default:
 		break;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
index 99e2e79..14c3c9c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
@@ -50,6 +50,40 @@
 };
 EXPORT_SYMBOL(cam_a5_hw_info);
 
+static bool cam_a5_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
+{
+	bool error_handled = false;
+
+	if (!irq_data)
+		return error_handled;
+
+	switch (irq_data->irq_type) {
+	case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+		CAM_ERR_RATE_LIMIT(CAM_ICP,
+			"IPE/BPS UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+			irq_data->irq_type,
+			irq_data->u.dec_err.decerr_status.value,
+			irq_data->u.dec_err.decerr_status.thr_err,
+			irq_data->u.dec_err.decerr_status.fcl_err,
+			irq_data->u.dec_err.decerr_status.len_md_err,
+			irq_data->u.dec_err.decerr_status.format_err);
+		error_handled = true;
+		break;
+	case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+		CAM_ERR_RATE_LIMIT(CAM_ICP,
+			"IPE/BPS UBWC Encode error type=%d status=%x",
+			irq_data->irq_type,
+			irq_data->u.enc_err.encerr_status.value);
+		error_handled = true;
+		break;
+	default:
+		break;
+	}
+
+	return error_handled;
+}
+
 int cam_a5_register_cpas(struct cam_hw_soc_info *soc_info,
 			struct cam_a5_device_core_info *core_info,
 			uint32_t hw_idx)
@@ -59,7 +93,7 @@
 
 	cpas_register_params.dev = &soc_info->pdev->dev;
 	memcpy(cpas_register_params.identifier, "icp", sizeof("icp"));
-	cpas_register_params.cam_cpas_client_cb = NULL;
+	cpas_register_params.cam_cpas_client_cb = cam_a5_cpas_cb;
 	cpas_register_params.cell_index = hw_idx;
 	cpas_register_params.userdata = NULL;
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
index f252931..3177513 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
@@ -22,11 +22,12 @@
 
 static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
 {
-	int rc = 0;
+	int rc = 0, i;
 	const char *fw_name;
 	struct a5_soc_info *camp_a5_soc_info;
 	struct device_node *of_node = NULL;
 	struct platform_device *pdev = NULL;
+	int num_ubwc_cfg;
 
 	pdev = soc_info->pdev;
 	of_node = pdev->dev.of_node;
@@ -41,9 +42,28 @@
 	fw_name = camp_a5_soc_info->fw_name;
 
 	rc = of_property_read_string(of_node, "fw_name", &fw_name);
-	if (rc < 0)
+	if (rc < 0) {
 		CAM_ERR(CAM_ICP, "fw_name read failed");
+		goto end;
+	}
 
+	num_ubwc_cfg = of_property_count_u32_elems(of_node, "ubwc-cfg");
+	if ((num_ubwc_cfg < 0) || (num_ubwc_cfg > ICP_UBWC_MAX)) {
+		CAM_ERR(CAM_ICP, "wrong ubwc_cfg: %d", num_ubwc_cfg);
+		rc = num_ubwc_cfg;
+		goto end;
+	}
+
+	for (i = 0; i < num_ubwc_cfg; i++) {
+		rc = of_property_read_u32_index(of_node, "ubwc-cfg",
+			i, &camp_a5_soc_info->ubwc_cfg[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ICP, "unable to read ubwc cfg values");
+			break;
+		}
+	}
+
+end:
 	return rc;
 }
 
@@ -81,7 +101,7 @@
 	int rc = 0;
 
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
-		CAM_TURBO_VOTE, true);
+		CAM_SVS_VOTE, true);
 	if (rc)
 		CAM_ERR(CAM_ICP, "enable platform failed");
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
index 916143d..3593cfb 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
@@ -15,8 +15,11 @@
 
 #include "cam_soc_util.h"
 
+#define ICP_UBWC_MAX 2
+
 struct a5_soc_info {
 	char *fw_name;
+	uint32_t ubwc_cfg[ICP_UBWC_MAX];
 };
 
 int cam_a5_init_soc_resources(struct cam_hw_soc_info *soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
index 2477e7d..400e1e7 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
@@ -65,7 +65,7 @@
 	int rc = 0;
 
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
-		CAM_TURBO_VOTE, false);
+		CAM_SVS_VOTE, false);
 	if (rc)
 		CAM_ERR(CAM_ICP, "enable platform failed");
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 2a7d961..93926a78 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -24,6 +24,7 @@
 #include <linux/debugfs.h>
 #include <media/cam_defs.h>
 #include <media/cam_icp.h>
+#include <media/cam_cpas.h>
 
 #include "cam_sync_api.h"
 #include "cam_packet_util.h"
@@ -47,12 +48,33 @@
 #include "cam_debug_util.h"
 #include "cam_soc_util.h"
 #include "cam_trace.h"
+#include "cam_cpas_api.h"
 
 #define ICP_WORKQ_TASK_CMD_TYPE 1
 #define ICP_WORKQ_TASK_MSG_TYPE 2
 
 static struct cam_icp_hw_mgr icp_hw_mgr;
 
+static int cam_icp_send_ubwc_cfg(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	int rc;
+
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is NULL");
+		return -EINVAL;
+	}
+
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_UBWC_CFG, NULL, 0);
+	if (rc)
+		CAM_ERR(CAM_ICP, "CAM_ICP_A5_CMD_UBWC_CFG is failed");
+
+	return rc;
+}
+
 static void cam_icp_hw_mgr_clk_info_update(struct cam_icp_hw_mgr *hw_mgr,
 	struct cam_icp_hw_ctx_data *ctx_data)
 {
@@ -73,13 +95,13 @@
 
 	for (i = 0; i < ICP_CLK_HW_MAX; i++) {
 		hw_mgr->clk_info[i].base_clk = 0;
-		hw_mgr->clk_info[i].curr_clk = ICP_TURBO_VOTE;
+		hw_mgr->clk_info[i].curr_clk = ICP_CLK_SVS_HZ;
 		hw_mgr->clk_info[i].threshold = ICP_OVER_CLK_THRESHOLD;
 		hw_mgr->clk_info[i].over_clked = 0;
 		hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
 		hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
 	}
-	hw_mgr->icp_default_clk = ICP_SVS_VOTE;
+	hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
 }
 
 static int cam_icp_get_actual_clk_rate_idx(
@@ -209,14 +231,14 @@
 	int i;
 
 	for (i = 0; i < ICP_CLK_HW_MAX; i++) {
-		hw_mgr->clk_info[i].base_clk = ICP_TURBO_VOTE;
-		hw_mgr->clk_info[i].curr_clk = ICP_TURBO_VOTE;
+		hw_mgr->clk_info[i].base_clk = ICP_CLK_SVS_HZ;
+		hw_mgr->clk_info[i].curr_clk = ICP_CLK_SVS_HZ;
 		hw_mgr->clk_info[i].threshold = ICP_OVER_CLK_THRESHOLD;
 		hw_mgr->clk_info[i].over_clked = 0;
 		hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
 		hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
 	}
-	hw_mgr->icp_default_clk = ICP_SVS_VOTE;
+	hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
 
 	return 0;
 }
@@ -444,7 +466,7 @@
 
 static bool cam_icp_debug_clk_update(struct cam_icp_clk_info *hw_mgr_clk_info)
 {
-	if (icp_hw_mgr.icp_debug_clk < ICP_TURBO_VOTE &&
+	if (icp_hw_mgr.icp_debug_clk < ICP_CLK_TURBO_HZ &&
 		icp_hw_mgr.icp_debug_clk &&
 		icp_hw_mgr.icp_debug_clk != hw_mgr_clk_info->curr_clk) {
 		mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
@@ -1128,11 +1150,12 @@
 {
 	uint32_t *msg_ptr = NULL, *pkt_ptr = NULL;
 	struct hfi_msg_debug *dbg_msg;
-	int64_t read_len, size_processed = 0;
+	uint32_t read_len, size_processed = 0;
 	char *dbg_buf;
+	int rc = 0;
 
-	read_len = hfi_read_message(icp_hw_mgr.dbg_buf, Q_DBG);
-	if (read_len < 0)
+	rc = hfi_read_message(icp_hw_mgr.dbg_buf, Q_DBG, &read_len);
+	if (rc)
 		return;
 
 	msg_ptr = (uint32_t *)icp_hw_mgr.dbg_buf;
@@ -1157,7 +1180,8 @@
 
 static int cam_icp_process_msg_pkt_type(
 	struct cam_icp_hw_mgr *hw_mgr,
-	uint32_t *msg_ptr)
+	uint32_t *msg_ptr,
+	uint32_t *msg_processed_len)
 {
 	int rc = 0;
 	int size_processed = 0;
@@ -1208,19 +1232,17 @@
 		break;
 	}
 
-	if (rc)
-		return rc;
-
-	return size_processed;
+	*msg_processed_len = size_processed;
+	return rc;
 }
 
 static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
 {
-	int64_t read_len, msg_processed_len;
-	int rc = 0;
+	uint32_t read_len, msg_processed_len;
 	uint32_t *msg_ptr = NULL;
 	struct hfi_msg_work_data *task_data;
 	struct cam_icp_hw_mgr *hw_mgr;
+	int rc = 0;
 
 	if (!data || !priv) {
 		CAM_ERR(CAM_ICP, "Invalid data");
@@ -1230,23 +1252,24 @@
 	task_data = data;
 	hw_mgr = priv;
 
-	read_len = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG);
-	if (read_len < 0) {
-		rc = read_len;
+	rc = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG, &read_len);
+	if (rc) {
 		CAM_DBG(CAM_ICP, "Unable to read msg q");
 	} else {
+		read_len = read_len << BYTE_WORD_SHIFT;
 		msg_ptr = (uint32_t *)icp_hw_mgr.msg_buf;
 		while (true) {
-			msg_processed_len = cam_icp_process_msg_pkt_type(
-			hw_mgr, msg_ptr);
-			if (msg_processed_len < 0) {
-				rc = msg_processed_len;
+			rc = cam_icp_process_msg_pkt_type(hw_mgr, msg_ptr,
+				&msg_processed_len);
+			if (rc)
 				return rc;
-			}
 
 			read_len -= msg_processed_len;
-			if (read_len > 0)
-				msg_ptr += msg_processed_len;
+			if (read_len > 0) {
+				msg_ptr += (msg_processed_len >>
+				BYTE_WORD_SHIFT);
+				msg_processed_len = 0;
+			}
 			else
 				break;
 		}
@@ -1679,15 +1702,6 @@
 		return -EINVAL;
 	}
 
-	irq_cb.icp_hw_mgr_cb = NULL;
-	irq_cb.data = NULL;
-	rc = a5_dev_intf->hw_ops.process_cmd(
-		a5_dev_intf->hw_priv,
-		CAM_ICP_A5_SET_IRQ_CB,
-		&irq_cb, sizeof(irq_cb));
-	if (rc)
-		CAM_ERR(CAM_ICP, "deregister irq call back failed");
-
 	fw_buf_info.kva = 0;
 	fw_buf_info.iova = 0;
 	fw_buf_info.len = 0;
@@ -1706,6 +1720,16 @@
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	cam_hfi_deinit();
 	cam_icp_mgr_device_deinit(hw_mgr);
+
+	irq_cb.icp_hw_mgr_cb = NULL;
+	irq_cb.data = NULL;
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_SET_IRQ_CB,
+		&irq_cb, sizeof(irq_cb));
+	if (rc)
+		CAM_ERR(CAM_ICP, "deregister irq call back failed");
+
 	cam_icp_free_hfi_mem();
 	hw_mgr->fw_download = false;
 	hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
@@ -1887,7 +1911,7 @@
 	return rc;
 }
 
-static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
+static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args)
 {
 	struct cam_hw_intf *a5_dev_intf = NULL;
 	struct cam_hw_info *a5_dev = NULL;
@@ -2145,7 +2169,7 @@
 {
 	int i, j, k, rc = 0;
 	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
-	int32_t sync_in_obj[CAM_MAX_OUT_RES];
+	int32_t sync_in_obj[CAM_MAX_IN_RES];
 	int32_t merged_sync_in_obj;
 
 	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
@@ -2423,9 +2447,16 @@
 	}
 
 	ctx_data = release_hw->ctxt_to_hw_map;
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "NULL ctx");
+		return -EINVAL;
+	}
+
 	ctx_id = ctx_data->ctx_id;
-	if (ctx_id < 0 || ctx_id >= CAM_ICP_CTX_MAX)
+	if (ctx_id < 0 || ctx_id >= CAM_ICP_CTX_MAX) {
 		CAM_ERR(CAM_ICP, "Invalid ctx id: %d", ctx_id);
+		return -EINVAL;
+	}
 
 	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 	if (!hw_mgr->ctx_data[ctx_id].in_use) {
@@ -2721,12 +2752,16 @@
 		rc = cam_icp_clk_info_init(hw_mgr, ctx_data);
 		if (rc)
 			goto get_io_buf_failed;
-		rc = cam_icp_mgr_download_fw(hw_mgr, ctx_data);
+		rc = cam_icp_mgr_hw_open(hw_mgr, ctx_data);
 		if (rc)
 			goto get_io_buf_failed;
 		rc = cam_icp_mgr_ipe_bps_resume(hw_mgr, ctx_data);
 		if (rc)
 			goto ipe_bps_resume_failed;
+
+		rc = cam_icp_send_ubwc_cfg(hw_mgr);
+		if (rc)
+			goto ubwc_cfg_failed;
 		mutex_lock(&hw_mgr->hw_mgr_mutex);
 	}
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -2784,6 +2819,7 @@
 	cam_icp_mgr_destroy_handle(ctx_data);
 create_handle_failed:
 send_ping_failed:
+ubwc_cfg_failed:
 	cam_icp_mgr_ipe_bps_power_collapse(hw_mgr, ctx_data, 0);
 ipe_bps_resume_failed:
 	if (!hw_mgr->ctxt_cnt)
@@ -2859,6 +2895,9 @@
 		goto num_ipe_failed;
 	}
 
+	if (!icp_hw_mgr.ipe1_enable)
+		num_dev = 1;
+
 	icp_hw_mgr.devices[CAM_ICP_DEV_IPE] = kzalloc(
 		sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
 	if (!icp_hw_mgr.devices[CAM_ICP_DEV_IPE]) {
@@ -2937,9 +2976,10 @@
 		if (!child_dev_intf) {
 			CAM_ERR(CAM_ICP, "no child device");
 			of_node_put(child_node);
+			if (!icp_hw_mgr.ipe1_enable)
+				continue;
 			goto compat_hw_name_failed;
 		}
-
 		icp_hw_mgr.devices[child_dev_intf->hw_type]
 			[child_dev_intf->hw_idx] = child_dev_intf;
 
@@ -3015,6 +3055,8 @@
 {
 	int i, rc = 0;
 	struct cam_hw_mgr_intf *hw_mgr_intf;
+	struct cam_cpas_query_cap query;
+	uint32_t cam_caps;
 
 	hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
 	if (!of_node || !hw_mgr_intf) {
@@ -3029,7 +3071,7 @@
 	hw_mgr_intf->hw_release = cam_icp_mgr_release_hw;
 	hw_mgr_intf->hw_prepare_update = cam_icp_mgr_prepare_hw_update;
 	hw_mgr_intf->hw_config = cam_icp_mgr_config_hw;
-	hw_mgr_intf->download_fw = cam_icp_mgr_download_fw;
+	hw_mgr_intf->hw_open = cam_icp_mgr_hw_open;
 	hw_mgr_intf->hw_close = cam_icp_mgr_hw_close;
 
 	icp_hw_mgr.secure_mode = CAM_SECURE_MODE_NON_SECURE;
@@ -3039,6 +3081,15 @@
 	for (i = 0; i < CAM_ICP_CTX_MAX; i++)
 		mutex_init(&icp_hw_mgr.ctx_data[i].ctx_mutex);
 
+	cam_cpas_get_hw_info(&query.camera_family,
+		&query.camera_version, &query.cpas_version, &cam_caps);
+	if (cam_caps & CPAS_IPE0_BIT)
+		icp_hw_mgr.ipe0_enable = true;
+	if (cam_caps & CPAS_IPE1_BIT)
+		icp_hw_mgr.ipe1_enable = true;
+	if (cam_caps & CPAS_BPS_BIT)
+		icp_hw_mgr.bps_enable = true;
+
 	rc = cam_icp_mgr_init_devs(of_node);
 	if (rc)
 		goto dev_init_failed;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index a5eb122..321f10c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -32,6 +32,7 @@
 #define CAM_FRAME_CMD_MAX       20
 
 #define CAM_MAX_OUT_RES         6
+#define CAM_MAX_IN_RES          8
 
 #define ICP_WORKQ_NUM_TASK      100
 #define ICP_WORKQ_TASK_CMD_TYPE 1
@@ -53,6 +54,10 @@
 
 #define ICP_OVER_CLK_THRESHOLD  15
 
+#define CPAS_IPE0_BIT           0x1000
+#define CPAS_IPE1_BIT           0x2000
+#define CPAS_BPS_BIT            0x400
+
 /**
  * struct icp_hfi_mem_info
  * @qtbl: Memory info of queue table
@@ -235,6 +240,9 @@
  * @a5_jtag_debug: entry to enable A5 JTAG debugging
  * @a5_debug_q : entry to enable FW debug message
  * @a5_dbg_lvl : debug level set to FW.
+ * @ipe0_enable: Flag for IPE0
+ * @ipe1_enable: Flag for IPE1
+ * @bps_enable: Flag for BPS
  */
 struct cam_icp_hw_mgr {
 	struct mutex hw_mgr_mutex;
@@ -268,8 +276,12 @@
 	bool a5_jtag_debug;
 	bool a5_debug_q;
 	u64 a5_dbg_lvl;
+	bool ipe0_enable;
+	bool ipe1_enable;
+	bool bps_enable;
 };
 
 static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
-static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args);
+static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args);
+
 #endif /* CAM_ICP_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
index 2686877..dad7736 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
@@ -31,6 +31,7 @@
 	CAM_ICP_A5_CMD_VOTE_CPAS,
 	CAM_ICP_A5_CMD_CPAS_START,
 	CAM_ICP_A5_CMD_CPAS_STOP,
+	CAM_ICP_A5_CMD_UBWC_CFG,
 	CAM_ICP_A5_CMD_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
index 4f6fce8..d2e04ef 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
@@ -18,10 +18,15 @@
 #include <linux/of.h>
 #include "cam_cpas_api.h"
 
-#define ICP_TURBO_VOTE           600000000
-#define ICP_SVS_VOTE             400000000
+#define ICP_CLK_TURBO_HZ         600000000
+#define ICP_CLK_SVS_HZ           400000000
+
+#define CAM_ICP_A5_BW_BYTES_VOTE 100000000
+
 #define CAM_ICP_CTX_MAX          36
 
+#define CPAS_IPE1_BIT            0x2000
+
 int cam_icp_hw_mgr_init(struct device_node *of_node,
 	uint64_t *hw_mgr_hdl);
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
index cbd9d84..cc2b1b1 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
@@ -72,14 +72,25 @@
 	struct cam_ipe_device_core_info   *core_info = NULL;
 	struct cam_ipe_device_hw_info     *hw_info = NULL;
 	int                                rc = 0;
+	struct cam_cpas_query_cap query;
+	uint32_t cam_caps;
+	uint32_t hw_idx;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &hw_idx);
+
+	cam_cpas_get_hw_info(&query.camera_family,
+		&query.camera_version, &query.cpas_version, &cam_caps);
+	if ((!(cam_caps & CPAS_IPE1_BIT)) && (hw_idx)) {
+		CAM_ERR(CAM_ICP, "IPE1 hw idx = %d\n", hw_idx);
+		return -EINVAL;
+	}
 
 	ipe_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
 	if (!ipe_dev_intf)
 		return -ENOMEM;
 
-	of_property_read_u32(pdev->dev.of_node,
-		"cell-index", &ipe_dev_intf->hw_idx);
-
+	ipe_dev_intf->hw_idx = hw_idx;
 	ipe_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
 	if (!ipe_dev) {
 		kfree(ipe_dev_intf);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
index 49176b5..71af1a2 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
@@ -116,7 +116,7 @@
 	int rc = 0;
 
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
-		CAM_TURBO_VOTE, false);
+		CAM_SVS_VOTE, false);
 	if (rc) {
 		CAM_ERR(CAM_ICP, "enable platform failed");
 		return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 14d32ad..ae6b149 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -24,6 +24,8 @@
 #include "cam_trace.h"
 #include "cam_debug_util.h"
 
+static const char isp_dev_name[] = "isp";
+
 static int __cam_isp_ctx_enqueue_request_in_order(
 	struct cam_context *ctx, struct cam_ctx_request *req)
 {
@@ -425,6 +427,13 @@
 	return rc;
 }
 
+static int __cam_isp_ctx_reg_upd_in_hw_error(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	return 0;
+}
+
 static int __cam_isp_ctx_sof_in_activated_state(
 	struct cam_isp_context *ctx_isp, void *evt_data)
 {
@@ -687,8 +696,13 @@
 	void *evt_data)
 {
 	int                              rc = 0;
-	struct cam_ctx_request          *req;
+	uint32_t                         i = 0;
+	bool                             found = 0;
+	struct cam_ctx_request          *req = NULL;
+	struct cam_ctx_request          *req_temp;
+	struct cam_isp_ctx_req          *req_isp = NULL;
 	struct cam_req_mgr_error_notify  notify;
+	uint64_t                         error_request_id;
 
 	struct cam_context *ctx = ctx_isp->base;
 	struct cam_isp_hw_error_event_data  *error_event_data =
@@ -699,7 +713,7 @@
 	CAM_DBG(CAM_ISP, "Enter error_type = %d", error_type);
 	if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
 		(error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW))
-		notify.error = CRM_KMD_ERR_FATAL;
+		notify.error = CRM_KMD_ERR_OVERFLOW;
 
 	/*
 	 * Need to check the active req
@@ -710,31 +724,92 @@
 	if (list_empty(&ctx->active_req_list)) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
 			"handling error with no active request");
-		rc = -EINVAL;
-		goto end;
+	} else {
+		list_for_each_entry_safe(req, req_temp,
+			&ctx->active_req_list, list) {
+			req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+			if (!req_isp->bubble_report) {
+				for (i = 0; i < req_isp->num_fence_map_out;
+					i++) {
+					CAM_ERR(CAM_ISP, "req %llu, Sync fd %x",
+						req->request_id,
+						req_isp->fence_map_out[i].
+						sync_id);
+					if (req_isp->fence_map_out[i].sync_id
+						!= -1) {
+						rc = cam_sync_signal(
+						req_isp->fence_map_out[i].
+						sync_id,
+						CAM_SYNC_STATE_SIGNALED_ERROR);
+						req_isp->fence_map_out[i].
+						sync_id = -1;
+					}
+				}
+				list_del_init(&req->list);
+				list_add_tail(&req->list, &ctx->free_req_list);
+				ctx_isp->active_req_cnt--;
+			} else {
+				found = 1;
+				break;
+			}
+		}
 	}
 
-	req = list_first_entry(&ctx->active_req_list,
-				struct cam_ctx_request, list);
+	if (found) {
+		list_for_each_entry_safe_reverse(req, req_temp,
+			&ctx->active_req_list, list) {
+			req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+			list_del_init(&req->list);
+			list_add(&req->list, &ctx->pending_req_list);
+			ctx_isp->active_req_cnt--;
+		}
+	}
+
+	do {
+		if (list_empty(&ctx->pending_req_list)) {
+			error_request_id = ctx_isp->last_applied_req_id + 1;
+			req_isp = NULL;
+			break;
+		}
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		error_request_id = ctx_isp->last_applied_req_id;
+
+		if (req_isp->bubble_report)
+			break;
+
+		for (i = 0; i < req_isp->num_fence_map_out; i++) {
+			if (req_isp->fence_map_out[i].sync_id != -1)
+				rc = cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			req_isp->fence_map_out[i].sync_id = -1;
+		}
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->free_req_list);
+
+	} while (req->request_id < ctx_isp->last_applied_req_id);
+
 
 	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) {
 		notify.link_hdl = ctx->link_hdl;
 		notify.dev_hdl = ctx->dev_hdl;
-		notify.req_id = req->request_id;
+		notify.req_id = error_request_id;
+
+		if (req_isp && req_isp->bubble_report)
+			notify.error = CRM_KMD_ERR_BUBBLE;
+
+		CAM_WARN(CAM_ISP, "Notify CRM: req %lld, frame %lld\n",
+			error_request_id, ctx_isp->frame_id);
 
 		ctx->ctx_crm_intf->notify_err(&notify);
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "Notify CRM about ERROR frame %lld",
-			ctx_isp->frame_id);
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HW_ERROR;
 	} else {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify ERRROR to CRM");
 		rc = -EFAULT;
 	}
 
-	list_del_init(&req->list);
-	list_add(&req->list, &ctx->pending_req_list);
-	/* might need to check if active list is empty */
-
-end:
 	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
 }
@@ -744,7 +819,7 @@
 	/* SOF */
 	{
 		.irq_ops = {
-			NULL,
+			__cam_isp_ctx_handle_error,
 			__cam_isp_ctx_sof_in_activated_state,
 			__cam_isp_ctx_reg_upd_in_sof,
 			__cam_isp_ctx_notify_sof_in_actived_state,
@@ -777,7 +852,7 @@
 	/* BUBBLE */
 	{
 		.irq_ops = {
-			NULL,
+			__cam_isp_ctx_handle_error,
 			__cam_isp_ctx_sof_in_activated_state,
 			NULL,
 			__cam_isp_ctx_notify_sof_in_actived_state,
@@ -788,7 +863,7 @@
 	/* Bubble Applied */
 	{
 		.irq_ops = {
-			NULL,
+			__cam_isp_ctx_handle_error,
 			__cam_isp_ctx_sof_in_activated_state,
 			__cam_isp_ctx_reg_upd_in_activated_state,
 			__cam_isp_ctx_epoch_in_bubble_applied,
@@ -796,6 +871,17 @@
 			__cam_isp_ctx_buf_done_in_bubble_applied,
 		},
 	},
+	/* HW ERROR */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_sof_in_activated_state,
+			__cam_isp_ctx_reg_upd_in_hw_error,
+			NULL,
+			NULL,
+			NULL,
+		},
+	},
 	/* HALT */
 	{
 	},
@@ -876,7 +962,9 @@
 	} else {
 		spin_lock_bh(&ctx->lock);
 		ctx_isp->substate_activated = next_state;
-		CAM_DBG(CAM_ISP, "new state %d", next_state);
+		ctx_isp->last_applied_req_id = apply->request_id;
+		CAM_DBG(CAM_ISP, "new substate state %d, applied req %lld",
+			next_state, ctx_isp->last_applied_req_id);
 		spin_unlock_bh(&ctx->lock);
 	}
 end:
@@ -1611,7 +1699,7 @@
 	req->request_id = packet->header.request_id;
 	req->status = 1;
 
-	if (ctx->state == CAM_CTX_ACTIVATED && ctx->ctx_crm_intf->add_req) {
+	if (ctx->state >= CAM_CTX_READY && ctx->ctx_crm_intf->add_req) {
 		add_req.link_hdl = ctx->link_hdl;
 		add_req.dev_hdl  = ctx->dev_hdl;
 		add_req.req_id   = req->request_id;
@@ -2003,6 +2091,24 @@
 	return rc;
 }
 
+static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc = 0;
+
+	CAM_WARN(CAM_ISP,
+		"Received unlink in activated state. It's unexpected");
+	rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
+	if (rc)
+		CAM_WARN(CAM_ISP, "Stop device failed rc=%d", rc);
+
+	rc = __cam_isp_ctx_unlink_in_ready(ctx, unlink);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
+
+	return rc;
+}
+
 static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
 	struct cam_req_mgr_apply_request *apply)
 {
@@ -2114,6 +2220,7 @@
 			.config_dev = __cam_isp_ctx_config_dev_in_top_state,
 		},
 		.crm_ops = {
+			.unlink = __cam_isp_ctx_unlink_in_activated,
 			.apply_req = __cam_isp_ctx_apply_req,
 			.flush_req = __cam_isp_ctx_flush_req_in_top_state,
 		},
@@ -2154,8 +2261,8 @@
 	}
 
 	/* camera context setup */
-	rc = cam_context_init(ctx_base, crm_node_intf, hw_intf, ctx->req_base,
-		CAM_CTX_REQ_MAX);
+	rc = cam_context_init(ctx_base, isp_dev_name, crm_node_intf, hw_intf,
+		ctx->req_base, CAM_CTX_REQ_MAX);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Camera Context Base init failed");
 		goto err;
@@ -2182,4 +2289,3 @@
 	memset(ctx, 0, sizeof(*ctx));
 	return rc;
 }
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
index 621d652..347290c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -31,7 +31,7 @@
  * Maxiimum configuration entry size  - This is based on the
  * worst case DUAL IFE use case plus some margin.
  */
-#define CAM_ISP_CTX_CFG_MAX                     20
+#define CAM_ISP_CTX_CFG_MAX                     22
 
 /* forward declaration */
 struct cam_isp_context;
@@ -50,6 +50,7 @@
 	CAM_ISP_CTX_ACTIVATED_EPOCH,
 	CAM_ISP_CTX_ACTIVATED_BUBBLE,
 	CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED,
+	CAM_ISP_CTX_ACTIVATED_HW_ERROR,
 	CAM_ISP_CTX_ACTIVATED_HALT,
 	CAM_ISP_CTX_ACTIVATED_MAX,
 };
@@ -111,6 +112,7 @@
  * @reported_req_id:       Last reported request id
  * @subscribe_event:       The irq event mask that CRM subscribes to, IFE will
  *                         invoke CRM cb at those event.
+ * @last_applied_req_id:   Last applied request id
  *
  */
 struct cam_isp_context {
@@ -129,6 +131,7 @@
 	int32_t                          active_req_cnt;
 	int64_t                          reported_req_id;
 	uint32_t                         subscribe_event;
+	int64_t                          last_applied_req_id;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
index 7e3c353..1f7dc76 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
@@ -8,6 +8,7 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
 
 obj-$(CONFIG_SPECTRA_CAMERA) += hw_utils/ isp_hw/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_hw_mgr.o cam_ife_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index f7b40a4..d5a5347 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -14,6 +14,7 @@
 #include <linux/string.h>
 #include <linux/uaccess.h>
 #include <linux/debugfs.h>
+#include <soc/qcom/scm.h>
 #include <uapi/media/cam_isp.h>
 #include "cam_smmu_api.h"
 #include "cam_req_mgr_workq.h"
@@ -26,11 +27,60 @@
 #include "cam_cdm_intf_api.h"
 #include "cam_packet_util.h"
 #include "cam_debug_util.h"
+#include "cam_cpas_api.h"
 
 #define CAM_IFE_HW_ENTRIES_MAX  20
 
+#define TZ_SVC_SMMU_PROGRAM 0x15
+#define TZ_SAFE_SYSCALL_ID  0x3
+#define CAM_IFE_SAFE_DISABLE 0
+#define CAM_IFE_SAFE_ENABLE 1
+#define SMMU_SE_IFE 0
+
+#define CAM_ISP_PACKET_META_MAX                     \
+	(CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON + 1)
+
+#define CAM_ISP_GENERIC_BLOB_TYPE_MAX               \
+	(CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG + 1)
+
+static uint32_t blob_type_hw_cmd_map[CAM_ISP_GENERIC_BLOB_TYPE_MAX] = {
+	CAM_ISP_HW_CMD_GET_HFR_UPDATE,
+};
+
 static struct cam_ife_hw_mgr g_ife_hw_mgr;
 
+static int cam_ife_notify_safe_lut_scm(bool safe_trigger)
+{
+	uint32_t camera_hw_version, rc = 0;
+	struct scm_desc desc = {0};
+
+	rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+	if (!rc) {
+		switch (camera_hw_version) {
+		case CAM_CPAS_TITAN_170_V100:
+		case CAM_CPAS_TITAN_170_V110:
+		case CAM_CPAS_TITAN_175_V100:
+
+			desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
+			desc.args[0] = SMMU_SE_IFE;
+			desc.args[1] = safe_trigger;
+
+			CAM_DBG(CAM_ISP, "Safe scm call %d", safe_trigger);
+			if (scm_call2(SCM_SIP_FNID(TZ_SVC_SMMU_PROGRAM,
+					TZ_SAFE_SYSCALL_ID), &desc)) {
+				CAM_ERR(CAM_ISP,
+					"scm call to Enable Safe failed");
+				rc = -EINVAL;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+	return rc;
+}
+
 static int cam_ife_mgr_get_hw_caps(void *hw_mgr_priv,
 	void *hw_caps_args)
 {
@@ -88,6 +138,39 @@
 	return rc;
 }
 
+static int cam_ife_hw_mgr_reset_csid_res(
+	struct cam_ife_hw_mgr_res   *isp_hw_res)
+{
+	int i;
+	int rc = 0;
+	struct cam_hw_intf      *hw_intf;
+	struct cam_csid_reset_cfg_args  csid_reset_args;
+
+	csid_reset_args.reset_type = CAM_IFE_CSID_RESET_PATH;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		csid_reset_args.node_res = isp_hw_res->hw_res[i];
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		CAM_DBG(CAM_ISP, "Resetting csid hardware %d",
+			hw_intf->hw_idx);
+		if (hw_intf->hw_ops.reset) {
+			rc = hw_intf->hw_ops.reset(hw_intf->hw_priv,
+				&csid_reset_args,
+				sizeof(struct cam_csid_reset_cfg_args));
+			if (rc <= 0)
+				goto err;
+		}
+	}
+
+	return 0;
+err:
+	CAM_ERR(CAM_ISP, "RESET HW res failed: (type:%d, id:%d)",
+		isp_hw_res->res_type, isp_hw_res->res_id);
+	return rc;
+}
+
 static int cam_ife_hw_mgr_init_hw_res(
 	struct cam_ife_hw_mgr_res   *isp_hw_res)
 {
@@ -1271,16 +1354,17 @@
 
 	cdm_acquire.id = CAM_CDM_VIRTUAL;
 	cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback;
-	if (!cam_cdm_acquire(&cdm_acquire)) {
-		CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
-			cdm_acquire.handle);
-		ife_ctx->cdm_handle = cdm_acquire.handle;
-		ife_ctx->cdm_ops = cdm_acquire.ops;
-	} else {
+	rc = cam_cdm_acquire(&cdm_acquire);
+	if (rc) {
 		CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW");
-		goto err;
+		goto free_ctx;
 	}
 
+	CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
+		cdm_acquire.handle);
+	ife_ctx->cdm_handle = cdm_acquire.handle;
+	ife_ctx->cdm_ops = cdm_acquire.ops;
+
 	isp_resource = (struct cam_isp_resource *)acquire_args->acquire_info;
 
 	/* acquire HW resources */
@@ -1325,7 +1409,7 @@
 	rc = cam_ife_mgr_process_base_info(ife_ctx);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Process base info failed");
-		return -EINVAL;
+		goto free_res;
 	}
 
 	acquire_args->ctxt_to_hw_map = ife_ctx;
@@ -1338,6 +1422,8 @@
 	return 0;
 free_res:
 	cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
+	cam_cdm_release(ife_ctx->cdm_handle);
+free_ctx:
 	cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
 err:
 	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
@@ -1371,6 +1457,8 @@
 		CAM_ERR(CAM_ISP, "Invalid context parameters");
 		return -EPERM;
 	}
+	if (atomic_read(&ctx->overflow_pending))
+		return -EINVAL;
 
 	CAM_DBG(CAM_ISP, "Enter ctx id:%d num_hw_upd_entries %d",
 		ctx->ctx_index, cfg->num_hw_update_entries);
@@ -1402,8 +1490,7 @@
 	return rc;
 }
 
-static int cam_ife_mgr_stop_hw_in_overflow(void *hw_mgr_priv,
-		void *stop_hw_args)
+static int cam_ife_mgr_stop_hw_in_overflow(void *stop_hw_args)
 {
 	int                               rc        = 0;
 	struct cam_hw_stop_args          *stop_args = stop_hw_args;
@@ -1411,7 +1498,7 @@
 	struct cam_ife_hw_mgr_ctx        *ctx;
 	uint32_t                          i, master_base_idx = 0;
 
-	if (!hw_mgr_priv || !stop_hw_args) {
+	if (!stop_hw_args) {
 		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
@@ -1424,7 +1511,6 @@
 	CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
 		ctx->ctx_index);
 
-	/* stop resource will remove the irq mask from the hardware */
 	if (!ctx->num_base) {
 		CAM_ERR(CAM_ISP, "Number of bases are zero");
 		return -EINVAL;
@@ -1438,17 +1524,13 @@
 		}
 	}
 
-	/*
-	 * if Context does not have PIX resources and has only RDI resource
-	 * then take the first base index.
-	 */
-
 	if (i == ctx->num_base)
 		master_base_idx = ctx->base[0].idx;
 
+
 	/* stop the master CIDs first */
 	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
-			master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
+		master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
 
 	/* stop rest of the CIDs  */
 	for (i = 0; i < ctx->num_base; i++) {
@@ -1460,7 +1542,7 @@
 
 	/* stop the master CSID path first */
 	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
-			master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
+		master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
 
 	/* Stop rest of the CSID paths  */
 	for (i = 0; i < ctx->num_base; i++) {
@@ -1480,8 +1562,9 @@
 	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
 		cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]);
 
-	/* update vote bandwidth should be done at the HW layer */
 
+	/* Stop tasklet for context */
+	cam_tasklet_stop(ctx->common.tasklet_info);
 	CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d",
 		ctx->ctx_index, rc);
 
@@ -1597,43 +1680,41 @@
 
 	CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d", ctx->ctx_index, rc);
 
+	mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+	if (!atomic_dec_return(&g_ife_hw_mgr.active_ctx_cnt)) {
+		rc = cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_DISABLE);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"SAFE SCM call failed:Check TZ/HYP dependency");
+			rc = 0;
+		}
+	}
+	mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+
 	return rc;
 }
 
-static int cam_ife_mgr_reset_hw(struct cam_ife_hw_mgr *hw_mgr,
+static int cam_ife_mgr_reset_vfe_hw(struct cam_ife_hw_mgr *hw_mgr,
 			uint32_t hw_idx)
 {
 	uint32_t i = 0;
-	struct cam_hw_intf             *csid_hw_intf;
 	struct cam_hw_intf             *vfe_hw_intf;
-	struct cam_csid_reset_cfg_args  csid_reset_args;
+	uint32_t vfe_reset_type;
 
 	if (!hw_mgr) {
 		CAM_DBG(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
-
-	/* Reset IFE CSID HW */
-	csid_reset_args.reset_type = CAM_IFE_CSID_RESET_GLOBAL;
-
-	for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
-		if (hw_idx != hw_mgr->csid_devices[i]->hw_idx)
-			continue;
-
-		csid_hw_intf = hw_mgr->csid_devices[i];
-		csid_hw_intf->hw_ops.reset(csid_hw_intf->hw_priv,
-			&csid_reset_args,
-			sizeof(struct cam_csid_reset_cfg_args));
-		break;
-	}
-
 	/* Reset VFE HW*/
+	vfe_reset_type = CAM_VFE_HW_RESET_HW;
+
 	for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
 		if (hw_idx != hw_mgr->ife_devices[i]->hw_idx)
 			continue;
 		CAM_DBG(CAM_ISP, "VFE (id = %d) reset", hw_idx);
 		vfe_hw_intf = hw_mgr->ife_devices[i];
-		vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv, NULL, 0);
+		vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv,
+			&vfe_reset_type, sizeof(vfe_reset_type));
 		break;
 	}
 
@@ -1641,8 +1722,7 @@
 	return 0;
 }
 
-static int cam_ife_mgr_restart_hw(void *hw_mgr_priv,
-		void *start_hw_args)
+static int cam_ife_mgr_restart_hw(void *start_hw_args)
 {
 	int                               rc = -1;
 	struct cam_hw_start_args         *start_args = start_hw_args;
@@ -1650,7 +1730,7 @@
 	struct cam_ife_hw_mgr_res        *hw_mgr_res;
 	uint32_t                          i;
 
-	if (!hw_mgr_priv || !start_hw_args) {
+	if (!start_hw_args) {
 		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
@@ -1661,9 +1741,10 @@
 		return -EPERM;
 	}
 
-	CAM_DBG(CAM_ISP, "Enter... ctx id:%d", ctx->ctx_index);
-
 	CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d", ctx->ctx_index);
+
+	cam_tasklet_start(ctx->common.tasklet_info);
+
 	/* start the IFE out devices */
 	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
 		rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
@@ -1696,22 +1777,12 @@
 	}
 
 	CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d", ctx->ctx_index);
-	/* Start the IFE CID HW devices */
-	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
-		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
-		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
-				 hw_mgr_res->res_id);
-			goto err;
-		}
-	}
-
 	/* Start IFE root node: do nothing */
 	CAM_DBG(CAM_ISP, "Exit...(success)");
 	return 0;
 
 err:
-	cam_ife_mgr_stop_hw(hw_mgr_priv, start_hw_args);
+	cam_ife_mgr_stop_hw_in_overflow(start_hw_args);
 	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
 	return rc;
 }
@@ -1805,6 +1876,17 @@
 		}
 	}
 
+	mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+	if (!atomic_fetch_inc(&g_ife_hw_mgr.active_ctx_cnt)) {
+		rc = cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_ENABLE);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"SAFE SCM call failed:Check TZ/HYP dependency");
+			rc = -1;
+		}
+	}
+	mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+
 	CAM_DBG(CAM_ISP, "start cdm interface");
 	rc = cam_cdm_stream_on(ctx->cdm_handle);
 	if (rc) {
@@ -1864,7 +1946,7 @@
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
 			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
-				 hw_mgr_res->res_id);
+				hw_mgr_res->res_id);
 			goto err;
 		}
 	}
@@ -1929,6 +2011,149 @@
 	return rc;
 }
 
+static int cam_isp_blob_hfr_update(
+	uint32_t                               blob_type,
+	struct cam_isp_generic_blob_info      *blob_info,
+	struct cam_isp_resource_hfr_config    *hfr_config,
+	struct cam_hw_prepare_update_args     *prepare)
+{
+	struct cam_isp_port_hfr_config        *port_hfr_config;
+	struct cam_kmd_buf_info               *kmd_buf_info;
+	struct cam_ife_hw_mgr_ctx             *ctx = NULL;
+	struct cam_ife_hw_mgr_res             *hw_mgr_res;
+	uint32_t                               res_id_out, i;
+	uint32_t                               total_used_bytes = 0;
+	uint32_t                               kmd_buf_remain_size;
+	uint32_t                              *cmd_buf_addr;
+	uint32_t                               bytes_used = 0;
+	int                                    num_ent, rc = 0;
+
+	ctx = prepare->ctxt_to_hw_map;
+	CAM_DBG(CAM_ISP, "num_ports= %d",
+		hfr_config->num_ports);
+
+	/* Max one hw entries required for hfr config update */
+	if (prepare->num_hw_update_entries + 1 >=
+			prepare->max_hw_update_entries) {
+		CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
+			prepare->num_hw_update_entries,
+			prepare->max_hw_update_entries);
+		return -EINVAL;
+	}
+
+	kmd_buf_info = blob_info->kmd_buf_info;
+	for (i = 0; i < hfr_config->num_ports; i++) {
+		port_hfr_config = &hfr_config->port_hfr_config[i];
+		res_id_out = port_hfr_config->resource_type & 0xFF;
+
+		CAM_DBG(CAM_ISP, "hfr config idx %d, type=%d", i,
+			res_id_out);
+
+		if (res_id_out >= CAM_IFE_HW_OUT_RES_MAX) {
+			CAM_ERR(CAM_ISP, "invalid out restype:%x",
+				port_hfr_config->resource_type);
+			return -EINVAL;
+		}
+
+		if ((kmd_buf_info->used_bytes
+			+ total_used_bytes) < kmd_buf_info->size) {
+			kmd_buf_remain_size = kmd_buf_info->size -
+			(kmd_buf_info->used_bytes +
+			total_used_bytes);
+		} else {
+			CAM_ERR(CAM_ISP,
+			"no free kmd memory for base %d",
+			blob_info->base_info->idx);
+			rc = -ENOMEM;
+			return rc;
+		}
+
+		cmd_buf_addr = kmd_buf_info->cpu_addr +
+			kmd_buf_info->used_bytes/4 +
+			total_used_bytes/4;
+		hw_mgr_res = &ctx->res_list_ife_out[res_id_out];
+
+		rc = cam_isp_add_cmd_buf_update(
+			hw_mgr_res, blob_type,
+			blob_type_hw_cmd_map[blob_type],
+			blob_info->base_info->idx,
+			(void *)cmd_buf_addr,
+			kmd_buf_remain_size,
+			(void *)port_hfr_config,
+			&bytes_used);
+		if (rc < 0) {
+			CAM_ERR(CAM_ISP,
+				"Failed cmd_update, base_idx=%d, rc=%d",
+				blob_info->base_info->idx, bytes_used);
+			return rc;
+		}
+
+		total_used_bytes += bytes_used;
+	}
+
+	if (total_used_bytes) {
+		/* Update the HW entries */
+		num_ent = prepare->num_hw_update_entries;
+		prepare->hw_update_entries[num_ent].handle =
+			kmd_buf_info->handle;
+		prepare->hw_update_entries[num_ent].len = total_used_bytes;
+		prepare->hw_update_entries[num_ent].offset =
+			kmd_buf_info->offset;
+		num_ent++;
+
+		kmd_buf_info->used_bytes += total_used_bytes;
+		kmd_buf_info->offset     += total_used_bytes;
+		prepare->num_hw_update_entries = num_ent;
+	}
+
+	return rc;
+}
+
+static int cam_isp_packet_generic_blob_handler(void *user_data,
+	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
+{
+	int rc = 0;
+	struct cam_isp_generic_blob_info  *blob_info = user_data;
+	struct cam_hw_prepare_update_args *prepare = NULL;
+
+	if (!blob_data || (blob_size == 0) || !blob_info) {
+		CAM_ERR(CAM_ISP, "Invalid info blob %pK %d prepare %pK",
+			blob_data, blob_size, prepare);
+		return -EINVAL;
+	}
+
+	if (blob_type >= CAM_ISP_GENERIC_BLOB_TYPE_MAX) {
+		CAM_ERR(CAM_ISP, "Invalid Blob Type %d Max %d", blob_type,
+			CAM_ISP_GENERIC_BLOB_TYPE_MAX);
+		return -EINVAL;
+	}
+
+	prepare = blob_info->prepare;
+	if (!prepare) {
+		CAM_ERR(CAM_ISP, "Failed. prepare is NULL, blob_type %d",
+			blob_type);
+		return -EINVAL;
+	}
+
+	switch (blob_type) {
+	case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG: {
+		struct cam_isp_resource_hfr_config    *hfr_config =
+			(struct cam_isp_resource_hfr_config *)blob_data;
+
+		rc = cam_isp_blob_hfr_update(blob_type, blob_info,
+			hfr_config, prepare);
+		if (rc)
+			CAM_ERR(CAM_ISP, "HFR Update Failed");
+	}
+		break;
+	default:
+		CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type);
+		break;
+	}
+
+	return rc;
+}
+
 static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 	void *prepare_hw_update_args)
 {
@@ -1940,7 +2165,6 @@
 	struct cam_kmd_buf_info           kmd_buf;
 	uint32_t                          i;
 	bool                              fill_fence = true;
-	struct cam_isp_generic_blob_info  blob_info;
 
 	if (!hw_mgr_priv || !prepare_hw_update_args) {
 		CAM_ERR(CAM_ISP, "Invalid args");
@@ -1969,14 +2193,6 @@
 		return rc;
 	}
 
-	memset(&blob_info, 0x0, sizeof(struct cam_isp_generic_blob_info));
-	rc = cam_isp_process_generic_cmd_buffer(prepare, &blob_info);
-	if (rc) {
-		CAM_ERR(CAM_ISP, "Failed in generic blob cmd buffer, rc=%d",
-			rc);
-		goto end;
-	}
-
 	prepare->num_hw_update_entries = 0;
 	prepare->num_in_map_entries = 0;
 	prepare->num_out_map_entries = 0;
@@ -1997,26 +2213,14 @@
 
 		/* get command buffers */
 		if (ctx->base[i].split_id != CAM_ISP_HW_SPLIT_MAX) {
-			rc = cam_isp_add_command_buffers(prepare,
-			ctx->base[i].split_id, ctx->base[i].idx,
-			ctx->res_list_ife_out, CAM_IFE_HW_OUT_RES_MAX);
-				if (rc) {
-					CAM_ERR(CAM_ISP,
-						"Failed in add cmdbuf, i=%d, split_id=%d, rc=%d",
-						i, ctx->base[i].split_id, rc);
-					goto end;
-				}
-		}
-
-		if (blob_info.hfr_config) {
-			rc = cam_isp_add_hfr_config_hw_update(
-				blob_info.hfr_config, prepare,
-				ctx->base[i].idx, &kmd_buf,
+			rc = cam_isp_add_command_buffers(prepare, &kmd_buf,
+				&ctx->base[i],
+				cam_isp_packet_generic_blob_handler,
 				ctx->res_list_ife_out, CAM_IFE_HW_OUT_RES_MAX);
 			if (rc) {
 				CAM_ERR(CAM_ISP,
-					"Failed in hfr config, i=%d, rc=%d",
-					i, rc);
+					"Failed in add cmdbuf, i=%d, split_id=%d, rc=%d",
+					i, ctx->base[i].split_id, rc);
 				goto end;
 			}
 		}
@@ -2047,7 +2251,7 @@
 	 * of op_code has some difference from KMD.
 	 */
 	if (((prepare->packet->header.op_code + 1) & 0xF) ==
-					CAM_ISP_PACKET_INIT_DEV)
+		CAM_ISP_PACKET_INIT_DEV)
 		goto end;
 
 	/* add reg update commands */
@@ -2074,7 +2278,6 @@
 	}
 
 end:
-	kfree(blob_info.hfr_config);
 	return rc;
 }
 
@@ -2167,11 +2370,12 @@
 static int cam_ife_mgr_process_recovery_cb(void *priv, void *data)
 {
 	int32_t rc = 0;
-	struct cam_hw_event_recovery_data   *recovery_data = priv;
-	struct cam_hw_start_args     start_args;
-	struct cam_ife_hw_mgr   *ife_hw_mgr = NULL;
-	uint32_t   hw_mgr_priv;
-	uint32_t i = 0;
+	struct cam_hw_event_recovery_data   *recovery_data = data;
+	struct cam_hw_start_args             start_args;
+	struct cam_hw_stop_args              stop_args;
+	struct cam_ife_hw_mgr               *ife_hw_mgr = priv;
+	struct cam_ife_hw_mgr_res           *hw_mgr_res;
+	uint32_t                             i = 0;
 
 	uint32_t error_type = recovery_data->error_type;
 	struct cam_ife_hw_mgr_ctx        *ctx = NULL;
@@ -2188,20 +2392,57 @@
 			kfree(recovery_data);
 			return 0;
 		}
+		/* stop resources here */
+		CAM_DBG(CAM_ISP, "STOP: Number of affected context: %d",
+			recovery_data->no_of_context);
+		for (i = 0; i < recovery_data->no_of_context; i++) {
+			stop_args.ctxt_to_hw_map =
+				recovery_data->affected_ctx[i];
+			rc = cam_ife_mgr_stop_hw_in_overflow(&stop_args);
+			if (rc) {
+				CAM_ERR(CAM_ISP, "CTX stop failed(%d)", rc);
+				return rc;
+			}
+		}
 
-		ctx = recovery_data->affected_ctx[0];
-		ife_hw_mgr = ctx->hw_mgr;
+		CAM_DBG(CAM_ISP, "RESET: CSID PATH");
+		for (i = 0; i < recovery_data->no_of_context; i++) {
+			ctx = recovery_data->affected_ctx[i];
+			list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid,
+				list) {
+				rc = cam_ife_hw_mgr_reset_csid_res(hw_mgr_res);
+				if (rc) {
+					CAM_ERR(CAM_ISP, "Failed RESET (%d)",
+						hw_mgr_res->res_id);
+					return rc;
+				}
+			}
+		}
+
+		CAM_DBG(CAM_ISP, "RESET: Calling VFE reset");
 
 		for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
 			if (recovery_data->affected_core[i])
-				rc = cam_ife_mgr_reset_hw(ife_hw_mgr, i);
+				cam_ife_mgr_reset_vfe_hw(ife_hw_mgr, i);
 		}
 
+		CAM_DBG(CAM_ISP, "START: Number of affected context: %d",
+			recovery_data->no_of_context);
+
 		for (i = 0; i < recovery_data->no_of_context; i++) {
-			start_args.ctxt_to_hw_map =
-				recovery_data->affected_ctx[i];
-			rc = cam_ife_mgr_restart_hw(&hw_mgr_priv, &start_args);
+			ctx =  recovery_data->affected_ctx[i];
+			start_args.ctxt_to_hw_map = ctx;
+
+			atomic_set(&ctx->overflow_pending, 0);
+
+			rc = cam_ife_mgr_restart_hw(&start_args);
+			if (rc) {
+				CAM_ERR(CAM_ISP, "CTX start failed(%d)", rc);
+				return rc;
+			}
+			CAM_DBG(CAM_ISP, "Started resources rc (%d)", rc);
 		}
+		CAM_DBG(CAM_ISP, "Recovery Done rc (%d)", rc);
 
 		break;
 
@@ -2227,8 +2468,6 @@
 	struct crm_workq_task        *task = NULL;
 	struct cam_hw_event_recovery_data  *recovery_data = NULL;
 
-	return 0;
-
 	recovery_data = kzalloc(sizeof(struct cam_hw_event_recovery_data),
 		GFP_ATOMIC);
 	if (!recovery_data)
@@ -2247,7 +2486,9 @@
 	}
 
 	task->process_cb = &cam_ife_mgr_process_recovery_cb;
-	rc = cam_req_mgr_workq_enqueue_task(task, recovery_data,
+	task->payload = recovery_data;
+	rc = cam_req_mgr_workq_enqueue_task(task,
+		recovery_data->affected_ctx[0]->hw_mgr,
 		CRM_TASK_PRIORITY_0);
 
 	return rc;
@@ -2260,9 +2501,9 @@
  *      affected_core[]
  *  b. Return 0 i.e.SUCCESS
  */
-static int cam_ife_hw_mgr_match_hw_idx(
+static int cam_ife_hw_mgr_is_ctx_affected(
 	struct cam_ife_hw_mgr_ctx   *ife_hwr_mgr_ctx,
-	uint32_t *affected_core)
+	uint32_t *affected_core, uint32_t size)
 {
 
 	int32_t rc = -EPERM;
@@ -2272,22 +2513,25 @@
 
 	CAM_DBG(CAM_ISP, "Enter:max_idx = %d", max_idx);
 
-	while (i < max_idx) {
+	if ((max_idx >= CAM_IFE_HW_NUM_MAX) ||
+		(size > CAM_IFE_HW_NUM_MAX)) {
+		CAM_ERR(CAM_ISP, "invalid parameter = %d", max_idx);
+		return rc;
+	}
+
+	for (i = 0; i < max_idx; i++) {
 		if (affected_core[ife_hwr_mgr_ctx->base[i].idx])
 			rc = 0;
 		else {
 			ctx_affected_core_idx[j] = ife_hwr_mgr_ctx->base[i].idx;
 			j = j + 1;
 		}
-
-		i = i + 1;
 	}
 
 	if (rc == 0) {
 		while (j) {
 			if (affected_core[ctx_affected_core_idx[j-1]] != 1)
 				affected_core[ctx_affected_core_idx[j-1]] = 1;
-
 			j = j - 1;
 		}
 	}
@@ -2303,7 +2547,7 @@
  *  d. For any dual VFE context, if copanion VFE is also serving
  *     other context it should also notify the CRM with fatal error
  */
-static int  cam_ife_hw_mgr_handle_overflow(
+static int  cam_ife_hw_mgr_process_overflow(
 	struct cam_ife_hw_mgr_ctx   *curr_ife_hwr_mgr_ctx,
 	struct cam_isp_hw_error_event_data *error_event_data,
 	uint32_t curr_core_idx,
@@ -2313,12 +2557,10 @@
 	struct cam_ife_hw_mgr_ctx   *ife_hwr_mgr_ctx = NULL;
 	cam_hw_event_cb_func	         ife_hwr_irq_err_cb;
 	struct cam_ife_hw_mgr		*ife_hwr_mgr = NULL;
-	uint32_t                            hw_mgr_priv = 1;
 	struct cam_hw_stop_args          stop_args;
 	uint32_t i = 0;
 
 	CAM_DBG(CAM_ISP, "Enter");
-	return 0;
 
 	if (!recovery_data) {
 		CAM_ERR(CAM_ISP, "recovery_data parameter is NULL",
@@ -2339,9 +2581,12 @@
 		 * with this context
 		 */
 		CAM_DBG(CAM_ISP, "Calling match Hw idx");
-		if (cam_ife_hw_mgr_match_hw_idx(ife_hwr_mgr_ctx, affected_core))
+		if (cam_ife_hw_mgr_is_ctx_affected(ife_hwr_mgr_ctx,
+			affected_core, CAM_IFE_HW_NUM_MAX))
 			continue;
 
+		atomic_set(&ife_hwr_mgr_ctx->overflow_pending, 1);
+
 		ife_hwr_irq_err_cb =
 		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_ERROR];
 
@@ -2355,16 +2600,13 @@
 				ife_hwr_mgr_ctx;
 
 		/*
-		 * Stop the hw resources associated with this context
-		 * and call the error callback. In the call back function
-		 * corresponding ISP context will update CRM about fatal Error
+		 * In the call back function corresponding ISP context
+		 * will update CRM about fatal Error
 		 */
-		if (!cam_ife_mgr_stop_hw_in_overflow(&hw_mgr_priv,
-			&stop_args)) {
-			CAM_DBG(CAM_ISP, "Calling Error handler CB");
-			ife_hwr_irq_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
-				CAM_ISP_HW_EVENT_ERROR, error_event_data);
-		}
+
+		ife_hwr_irq_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_ERROR, error_event_data);
+
 	}
 	/* fill the affected_core in recovery data */
 	for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
@@ -2376,11 +2618,85 @@
 	return 0;
 }
 
+static int cam_ife_hw_mgr_get_err_type(
+	void                              *handler_priv,
+	void                              *payload)
+{
+	struct cam_isp_resource_node         *hw_res_l = NULL;
+	struct cam_isp_resource_node         *hw_res_r = NULL;
+	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
+	struct cam_vfe_top_irq_evt_payload   *evt_payload;
+	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
+	uint32_t  status = 0;
+	uint32_t  core_idx;
+
+	ife_hwr_mgr_ctx = handler_priv;
+	evt_payload = payload;
+
+	if (!evt_payload) {
+		CAM_ERR(CAM_ISP, "No payload");
+		return IRQ_HANDLED;
+	}
+
+	core_idx = evt_payload->core_index;
+	evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
+
+	list_for_each_entry(isp_ife_camif_res,
+		&ife_hwr_mgr_ctx->res_list_ife_src, list) {
+
+		if ((isp_ife_camif_res->res_type ==
+			CAM_IFE_HW_MGR_RES_UNINIT) ||
+			(isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
+			continue;
+
+		hw_res_l = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_LEFT];
+		hw_res_r = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT];
+
+		CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d\n",
+			isp_ife_camif_res->is_dual_vfe);
+
+		/* ERROR check for Left VFE */
+		if (!hw_res_l) {
+			CAM_DBG(CAM_ISP, "VFE(L) Device is NULL");
+			break;
+		}
+
+		CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
+			hw_res_l->hw_intf->hw_idx);
+
+		if (core_idx == hw_res_l->hw_intf->hw_idx) {
+			status = hw_res_l->bottom_half_handler(
+				hw_res_l, evt_payload);
+		}
+
+		if (status)
+			break;
+
+		/* ERROR check for Right  VFE */
+		if (!hw_res_r) {
+			CAM_DBG(CAM_ISP, "VFE(R) Device is NULL");
+			continue;
+		}
+		CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
+			hw_res_r->hw_intf->hw_idx);
+
+		if (core_idx == hw_res_r->hw_intf->hw_idx) {
+			status = hw_res_r->bottom_half_handler(
+				hw_res_r, evt_payload);
+		}
+
+		if (status)
+			break;
+	}
+	CAM_DBG(CAM_ISP, "Exit (status = %d)!", status);
+	return status;
+}
+
 static int  cam_ife_hw_mgr_handle_camif_error(
 	void                              *handler_priv,
 	void                              *payload)
 {
-	int32_t  rc = 0;
+	int32_t  error_status = CAM_ISP_HW_ERROR_NONE;
 	uint32_t core_idx;
 	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
 	struct cam_vfe_top_irq_evt_payload      *evt_payload;
@@ -2391,17 +2707,22 @@
 	evt_payload = payload;
 	core_idx = evt_payload->core_index;
 
-	rc = evt_payload->error_type;
-	CAM_DBG(CAM_ISP, "Enter: error_type (%d)", evt_payload->error_type);
-	switch (evt_payload->error_type) {
+	error_status = cam_ife_hw_mgr_get_err_type(ife_hwr_mgr_ctx,
+		evt_payload);
+
+	if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+		return error_status;
+
+	switch (error_status) {
 	case CAM_ISP_HW_ERROR_OVERFLOW:
 	case CAM_ISP_HW_ERROR_P2I_ERROR:
 	case CAM_ISP_HW_ERROR_VIOLATION:
+		CAM_DBG(CAM_ISP, "Enter: error_type (%d)", error_status);
 
 		error_event_data.error_type =
 				CAM_ISP_HW_ERROR_OVERFLOW;
 
-		cam_ife_hw_mgr_handle_overflow(ife_hwr_mgr_ctx,
+		cam_ife_hw_mgr_process_overflow(ife_hwr_mgr_ctx,
 				&error_event_data,
 				core_idx,
 				&recovery_data);
@@ -2411,12 +2732,10 @@
 		cam_ife_hw_mgr_do_error_recovery(&recovery_data);
 		break;
 	default:
-		CAM_DBG(CAM_ISP, "None error. Error type (%d)",
-			evt_payload->error_type);
+		CAM_DBG(CAM_ISP, "None error (%d)", error_status);
 	}
 
-	CAM_DBG(CAM_ISP, "Exit (%d)", rc);
-	return rc;
+	return error_status;
 }
 
 /*
@@ -2481,6 +2800,8 @@
 				rup_status = hw_res->bottom_half_handler(
 					hw_res, evt_payload);
 			}
+			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+				break;
 
 			if (!rup_status) {
 				ife_hwr_irq_rup_cb(
@@ -2512,6 +2833,8 @@
 				rup_status = hw_res->bottom_half_handler(
 					hw_res, evt_payload);
 
+			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+				break;
 			if (!rup_status) {
 				/* Send the Reg update hw event */
 				ife_hwr_irq_rup_cb(
@@ -2633,6 +2956,9 @@
 			if (core_idx == hw_res_l->hw_intf->hw_idx) {
 				epoch_status = hw_res_l->bottom_half_handler(
 					hw_res_l, evt_payload);
+				if (atomic_read(
+					&ife_hwr_mgr_ctx->overflow_pending))
+					break;
 				if (!epoch_status)
 					ife_hwr_irq_epoch_cb(
 						ife_hwr_mgr_ctx->common.cb_priv,
@@ -2680,6 +3006,8 @@
 					core_index1,
 					evt_payload->evt_id);
 
+			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+				break;
 			if (!rc)
 				ife_hwr_irq_epoch_cb(
 					ife_hwr_mgr_ctx->common.cb_priv,
@@ -2740,6 +3068,8 @@
 		if (core_idx == hw_res_l->hw_intf->hw_idx) {
 			sof_status = hw_res_l->bottom_half_handler(hw_res_l,
 				evt_payload);
+			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+				break;
 			if (!sof_status) {
 				cam_ife_mgr_cmd_get_sof_timestamp(
 					ife_hwr_mgr_ctx,
@@ -2795,12 +3125,20 @@
 		core_index0 = hw_res_l->hw_intf->hw_idx;
 		core_index1 = hw_res_r->hw_intf->hw_idx;
 
+		if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+			break;
+
 		rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hwr_mgr_ctx,
 			core_index0, core_index1, evt_payload->evt_id);
 
-		if (!rc)
+		if (!rc) {
+			cam_ife_mgr_cmd_get_sof_timestamp(
+					ife_hwr_mgr_ctx,
+					&sof_done_event_data.timestamp);
+
 			ife_hwr_irq_sof_cb(ife_hwr_mgr_ctx->common.cb_priv,
 				CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
+		}
 
 		break;
 
@@ -2948,6 +3286,9 @@
 			if (core_idx == hw_res_l->hw_intf->hw_idx) {
 				eof_status = hw_res_l->bottom_half_handler(
 					hw_res_l, evt_payload);
+				if (atomic_read(
+					&ife_hwr_mgr_ctx->overflow_pending))
+					break;
 				if (!eof_status)
 					ife_hwr_irq_eof_cb(
 						ife_hwr_mgr_ctx->common.cb_priv,
@@ -2992,6 +3333,9 @@
 					core_index1,
 					evt_payload->evt_id);
 
+			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+				break;
+
 			if (!rc)
 				ife_hwr_irq_eof_cb(
 					ife_hwr_mgr_ctx->common.cb_priv,
@@ -3036,6 +3380,8 @@
 	ife_hwr_irq_wm_done_cb =
 		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
 
+	evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
+
 	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
 		isp_ife_out_res = &ife_hwr_mgr_ctx->res_list_ife_out[i];
 
@@ -3092,6 +3438,8 @@
 			buf_done_event_data.resource_handle[0] =
 				isp_ife_out_res->res_id;
 
+			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+				break;
 			/* Report for Successful buf_done event if any */
 			if (buf_done_event_data.num_handles > 0 &&
 				ife_hwr_irq_wm_done_cb) {
@@ -3129,7 +3477,7 @@
 	 * the affected context and any successful buf_done event is not
 	 * reported.
 	 */
-	rc = cam_ife_hw_mgr_handle_overflow(ife_hwr_mgr_ctx,
+	rc = cam_ife_hw_mgr_process_overflow(ife_hwr_mgr_ctx,
 		&error_event_data, evt_payload->core_index,
 		&recovery_data);
 
@@ -3168,8 +3516,6 @@
 		evt_payload->irq_reg_val[5]);
 	CAM_DBG(CAM_ISP, "bus_irq_dual_comp_owrt: = %x",
 		evt_payload->irq_reg_val[6]);
-
-	CAM_DBG(CAM_ISP, "Calling Buf_done");
 	/* WM Done */
 	return cam_ife_hw_mgr_handle_buf_done_for_hw_res(ife_hwr_mgr_ctx,
 		evt_payload_priv);
@@ -3200,14 +3546,25 @@
 	 * for this context it needs to be handled remaining
 	 * interrupts are ignored.
 	 */
-	rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
-		evt_payload_priv);
+	if (g_ife_hw_mgr.debug_cfg.enable_recovery) {
+		CAM_DBG(CAM_ISP, "IFE Mgr recovery is enabled");
+	       rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
+		    evt_payload_priv);
+	} else {
+		CAM_DBG(CAM_ISP, "recovery is not enabled");
+		rc = 0;
+	}
+
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Encountered Error (%d), ignoring other irqs",
 			 rc);
 		return IRQ_HANDLED;
 	}
 
+	CAM_DBG(CAM_ISP, "Calling EOF");
+	cam_ife_hw_mgr_handle_eof_for_camif_hw_res(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+
 	CAM_DBG(CAM_ISP, "Calling SOF");
 	/* SOF IRQ */
 	cam_ife_hw_mgr_handle_sof(ife_hwr_mgr_ctx,
@@ -3222,8 +3579,6 @@
 	/* EPOCH IRQ */
 	cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(ife_hwr_mgr_ctx,
 		evt_payload_priv);
-	cam_ife_hw_mgr_handle_eof_for_camif_hw_res(ife_hwr_mgr_ctx,
-		evt_payload_priv);
 
 	return IRQ_HANDLED;
 }
@@ -3298,6 +3653,15 @@
 		goto err;
 	}
 
+	if (!debugfs_create_u32("enable_recovery",
+		0644,
+		g_ife_hw_mgr.debug_cfg.dentry,
+		&g_ife_hw_mgr.debug_cfg.enable_recovery)) {
+		CAM_ERR(CAM_ISP, "failed to create enable_recovery");
+		goto err;
+	}
+	g_ife_hw_mgr.debug_cfg.enable_recovery = 0;
+
 	return 0;
 
 err:
@@ -3404,6 +3768,7 @@
 		g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure = -1;
 	}
 
+	atomic_set(&g_ife_hw_mgr.active_ctx_cnt, 0);
 	for (i = 0; i < CAM_CTX_MAX; i++) {
 		memset(&g_ife_hw_mgr.ctx_pool[i], 0,
 			sizeof(g_ife_hw_mgr.ctx_pool[i]));
@@ -3496,4 +3861,3 @@
 	g_ife_hw_mgr.mgr_common.img_iommu_hdl = -1;
 	return rc;
 }
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index 2e66210..4d26138 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -69,10 +69,10 @@
 
 
 /**
- * struct ctx_base_info - base hardware information for the context
+ * struct ctx_base_info - Base hardware information for the context
  *
  * @idx:                 Base resource index
- * @split_id:            split info for the base resource
+ * @split_id:            Split info for the base resource
  *
  */
 struct ctx_base_info {
@@ -85,11 +85,13 @@
  *
  * @dentry:              Debugfs entry
  * @csid_debug:          csid debug information
+ * @enable_recovery      enable recovery
  *
  */
 struct cam_ife_hw_mgr_debug {
-	struct dentry                  *dentry;
-	uint64_t                        csid_debug;
+	struct dentry  *dentry;
+	uint64_t       csid_debug;
+	uint32_t       enable_recovery;
 };
 
 /**
@@ -171,6 +173,7 @@
  * @ife_csid_dev_caps      csid device capability stored per core
  * @ife_dev_caps           ife device capability per core
  * @work q                 work queue for IFE hw manager
+ * @debug_cfg              debug configuration
  */
 struct cam_ife_hw_mgr {
 	struct cam_isp_hw_mgr          mgr_common;
@@ -179,6 +182,7 @@
 	struct cam_soc_reg_map        *cdm_reg_map[CAM_IFE_HW_NUM_MAX];
 
 	struct mutex                   ctx_mutex;
+	atomic_t                       active_ctx_cnt;
 	struct list_head               free_ctx_list;
 	struct list_head               used_ctx_list;
 	struct cam_ife_hw_mgr_ctx      ctx_pool[CAM_CTX_MAX];
@@ -186,8 +190,8 @@
 	struct cam_ife_csid_hw_caps    ife_csid_dev_caps[
 						CAM_IFE_CSID_HW_NUM_MAX];
 	struct cam_vfe_hw_get_hw_cap   ife_dev_caps[CAM_IFE_HW_NUM_MAX];
-	struct cam_req_mgr_core_workq  *workq;
-	struct cam_ife_hw_mgr_debug     debug_cfg;
+	struct cam_req_mgr_core_workq *workq;
+	struct cam_ife_hw_mgr_debug    debug_cfg;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index 8514ab3..876a540 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -13,6 +13,7 @@
 #include <uapi/media/cam_defs.h>
 #include <uapi/media/cam_isp.h>
 #include "cam_mem_mgr.h"
+#include "cam_isp_hw.h"
 #include "cam_vfe_hw_intf.h"
 #include "cam_isp_packet_parser.h"
 #include "cam_debug_util.h"
@@ -26,7 +27,7 @@
 	int rc = -EINVAL;
 	struct cam_ife_hw_mgr_res       *hw_mgr_res;
 	struct cam_isp_resource_node    *res;
-	struct cam_isp_hw_get_cdm_args   get_base;
+	struct cam_isp_hw_get_cmd_update get_base;
 	struct cam_hw_update_entry      *hw_entry;
 	uint32_t                         num_ent, i;
 
@@ -53,24 +54,25 @@
 				continue;
 
 			get_base.res  = res;
-			get_base.cmd_buf_addr = kmd_buf_info->cpu_addr +
+			get_base.cmd_type = CAM_ISP_HW_CMD_GET_CHANGE_BASE;
+			get_base.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
 				kmd_buf_info->used_bytes/4;
-			get_base.size  = kmd_buf_info->size -
+			get_base.cmd.size  = kmd_buf_info->size -
 					kmd_buf_info->used_bytes;
 
 			rc = res->hw_intf->hw_ops.process_cmd(
 				res->hw_intf->hw_priv,
-				CAM_VFE_HW_CMD_GET_CHANGE_BASE, &get_base,
-				sizeof(struct cam_isp_hw_get_cdm_args));
+				CAM_ISP_HW_CMD_GET_CHANGE_BASE, &get_base,
+				sizeof(struct cam_isp_hw_get_cmd_update));
 			if (rc)
 				return rc;
 
 			hw_entry[num_ent].handle = kmd_buf_info->handle;
-			hw_entry[num_ent].len    = get_base.used_bytes;
+			hw_entry[num_ent].len    = get_base.cmd.used_bytes;
 			hw_entry[num_ent].offset = kmd_buf_info->offset;
 
-			kmd_buf_info->used_bytes += get_base.used_bytes;
-			kmd_buf_info->offset     += get_base.used_bytes;
+			kmd_buf_info->used_bytes += get_base.cmd.used_bytes;
+			kmd_buf_info->offset     += get_base.cmd.used_bytes;
 			num_ent++;
 			prepare->num_hw_update_entries = num_ent;
 
@@ -125,7 +127,7 @@
 			dual_isp_update_args.dual_cfg = dual_config;
 			rc = res->hw_intf->hw_ops.process_cmd(
 				res->hw_intf->hw_priv,
-				CAM_VFE_HW_CMD_STRIPE_UPDATE,
+				CAM_ISP_HW_CMD_STRIPE_UPDATE,
 				&dual_isp_update_args,
 				sizeof(struct cam_isp_hw_dual_isp_update_args));
 			if (rc)
@@ -136,20 +138,83 @@
 	return rc;
 }
 
+int cam_isp_add_cmd_buf_update(
+	struct cam_ife_hw_mgr_res            *hw_mgr_res,
+	uint32_t                              cmd_type,
+	uint32_t                              hw_cmd_type,
+	uint32_t                              base_idx,
+	uint32_t                             *cmd_buf_addr,
+	uint32_t                              kmd_buf_remain_size,
+	void                                 *cmd_update_data,
+	uint32_t                             *bytes_used)
+{
+	int rc = 0;
+	struct cam_isp_resource_node       *res;
+	struct cam_isp_hw_get_cmd_update    cmd_update;
+	uint32_t                            i;
+	uint32_t                            total_used_bytes = 0;
+
+	if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
+		CAM_ERR(CAM_ISP, "io res id:%d not valid",
+			hw_mgr_res->res_type);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!hw_mgr_res->hw_res[i])
+			continue;
+
+		if (hw_mgr_res->hw_res[i]->hw_intf->hw_idx != base_idx)
+			continue;
+
+		res = hw_mgr_res->hw_res[i];
+		cmd_update.res = res;
+		cmd_update.cmd_type = hw_cmd_type;
+		cmd_update.cmd.cmd_buf_addr = cmd_buf_addr;
+		cmd_update.cmd.size = kmd_buf_remain_size;
+		cmd_update.data = cmd_update_data;
+
+		CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
+			cmd_update.cmd.cmd_buf_addr,
+			cmd_update.cmd.size);
+		rc = res->hw_intf->hw_ops.process_cmd(
+			res->hw_intf->hw_priv,
+			cmd_update.cmd_type, &cmd_update,
+			sizeof(struct cam_isp_hw_get_cmd_update));
+
+		if (rc) {
+			CAM_ERR(CAM_ISP, "get buf cmd error:%d",
+				res->res_id);
+			rc = -ENOMEM;
+			return rc;
+		}
+
+		total_used_bytes += cmd_update.cmd.used_bytes;
+	}
+	*bytes_used = total_used_bytes;
+	CAM_DBG(CAM_ISP, "total_used_bytes %u", total_used_bytes);
+	return rc;
+}
+
 int cam_isp_add_command_buffers(
 	struct cam_hw_prepare_update_args  *prepare,
-	enum cam_isp_hw_split_id            split_id,
-	uint32_t                            base_idx,
+	struct cam_kmd_buf_info            *kmd_buf_info,
+	struct ctx_base_info               *base_info,
+	cam_packet_generic_blob_handler     blob_handler_cb,
 	struct cam_ife_hw_mgr_res          *res_list_isp_out,
 	uint32_t                            size_isp_out)
 {
 	int rc = 0;
-	uint32_t  cmd_meta_data, num_ent, i;
-	struct cam_cmd_buf_desc       *cmd_desc = NULL;
-	struct cam_hw_update_entry    *hw_entry;
+	uint32_t                           cmd_meta_data, num_ent, i;
+	uint32_t                           base_idx;
+	enum cam_isp_hw_split_id           split_id;
+	struct cam_cmd_buf_desc           *cmd_desc = NULL;
+	struct cam_hw_update_entry        *hw_entry;
 
 	hw_entry = prepare->hw_update_entries;
-	num_ent = prepare->num_hw_update_entries;
+	split_id = base_info->split_id;
+	base_idx = base_info->idx;
+
 	/*
 	 * set the cmd_desc to point the first command descriptor in the
 	 * packet
@@ -162,6 +227,7 @@
 		split_id, prepare->packet->num_cmd_buf);
 
 	for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+		num_ent = prepare->num_hw_update_entries;
 		if (!cmd_desc[i].length)
 			continue;
 
@@ -232,238 +298,75 @@
 			if (rc)
 				return rc;
 			break;
-		case CAM_ISP_PACKET_META_GENERIC_BLOB:
+		case CAM_ISP_PACKET_META_GENERIC_BLOB_LEFT:
+			if (split_id == CAM_ISP_HW_SPLIT_LEFT) {
+				struct cam_isp_generic_blob_info   blob_info;
+
+				prepare->num_hw_update_entries = num_ent;
+				blob_info.prepare = prepare;
+				blob_info.base_info = base_info;
+				blob_info.kmd_buf_info = kmd_buf_info;
+
+				rc = cam_packet_util_process_generic_cmd_buffer(
+					&cmd_desc[i],
+					blob_handler_cb,
+					&blob_info);
+				if (rc) {
+					CAM_ERR(CAM_ISP,
+						"Failed in processing blobs %d",
+						rc);
+					return rc;
+				}
+				num_ent = prepare->num_hw_update_entries;
+			}
+			break;
+		case CAM_ISP_PACKET_META_GENERIC_BLOB_RIGHT:
+			if (split_id == CAM_ISP_HW_SPLIT_RIGHT) {
+				struct cam_isp_generic_blob_info   blob_info;
+
+				prepare->num_hw_update_entries = num_ent;
+				blob_info.prepare = prepare;
+				blob_info.base_info = base_info;
+				blob_info.kmd_buf_info = kmd_buf_info;
+
+				rc = cam_packet_util_process_generic_cmd_buffer(
+					&cmd_desc[i],
+					blob_handler_cb,
+					&blob_info);
+				if (rc) {
+					CAM_ERR(CAM_ISP,
+						"Failed in processing blobs %d",
+						rc);
+					return rc;
+				}
+				num_ent = prepare->num_hw_update_entries;
+			}
+			break;
+		case CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON: {
+			struct cam_isp_generic_blob_info   blob_info;
+
+			prepare->num_hw_update_entries = num_ent;
+			blob_info.prepare = prepare;
+			blob_info.base_info = base_info;
+			blob_info.kmd_buf_info = kmd_buf_info;
+
+			rc = cam_packet_util_process_generic_cmd_buffer(
+				&cmd_desc[i],
+				blob_handler_cb,
+				&blob_info);
+			if (rc) {
+				CAM_ERR(CAM_ISP,
+					"Failed in processing blobs %d", rc);
+				return rc;
+			}
+			num_ent = prepare->num_hw_update_entries;
+		}
 			break;
 		default:
 			CAM_ERR(CAM_ISP, "invalid cdm command meta data %d",
 				cmd_meta_data);
 			return -EINVAL;
 		}
-	}
-
-	prepare->num_hw_update_entries = num_ent;
-
-	return rc;
-}
-
-static int cam_isp_handle_hfr_config(
-	struct cam_isp_generic_blob_info *blob_info,
-	struct cam_isp_resource_hfr_config *hfr_config, uint32_t blob_size)
-{
-	uint32_t cal_blob_size =
-		sizeof(struct cam_isp_resource_hfr_config) +
-		(sizeof(struct cam_isp_port_hfr_config) *
-		(hfr_config->num_io_configs - 1));
-
-	if (cal_blob_size != blob_size) {
-		CAM_ERR(CAM_ISP, "Invalid blob size %d %d",
-			cal_blob_size, blob_size);
-		return -EINVAL;
-	}
-
-	CAM_DBG(CAM_ISP, "HFR num_io_config = %d", hfr_config->num_io_configs);
-
-	if (blob_info->hfr_config) {
-		CAM_WARN(CAM_ISP,
-			"Ignoring previous hfr_config, prev=%d, curr=%d",
-			blob_info->hfr_config->num_io_configs,
-			hfr_config->num_io_configs);
-		kfree(blob_info->hfr_config);
-	}
-
-	blob_info->hfr_config = kzalloc(blob_size, GFP_ATOMIC);
-	if (!blob_info->hfr_config)
-		return -ENOMEM;
-
-	memcpy(blob_info->hfr_config, hfr_config, blob_size);
-
-	return 0;
-}
-
-static int cam_isp_packet_generic_blob_handler(void *user_data,
-	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
-{
-	int rc = 0;
-
-	if (!blob_data || (blob_size == 0)) {
-		CAM_ERR(CAM_ISP, "Invalid blob info %pK %d", blob_data,
-			blob_size);
-		return -EINVAL;
-	}
-
-	switch (blob_type) {
-	case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG:
-		rc = cam_isp_handle_hfr_config(user_data,
-			(struct cam_isp_resource_hfr_config *)blob_data,
-			blob_size);
-		if (rc)
-			CAM_ERR(CAM_ISP, "Failed in handling hfr config %d",
-				rc);
-
-		break;
-	default:
-		CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type);
-		break;
-	}
-
-	return rc;
-}
-
-int cam_isp_process_generic_cmd_buffer(
-	struct cam_hw_prepare_update_args *prepare,
-	struct cam_isp_generic_blob_info  *blob_info)
-{
-	int i, rc = 0;
-	struct cam_cmd_buf_desc *cmd_desc = NULL;
-
-	/*
-	 * set the cmd_desc to point the first command descriptor in the
-	 * packet
-	 */
-	cmd_desc = (struct cam_cmd_buf_desc *)
-			((uint8_t *)&prepare->packet->payload +
-			prepare->packet->cmd_buf_offset);
-
-	for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
-		if (!cmd_desc[i].length)
-			continue;
-
-		if (cmd_desc[i].meta_data != CAM_ISP_PACKET_META_GENERIC_BLOB)
-			continue;
-
-		rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
-		if (rc)
-			return rc;
-
-		rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i],
-			cam_isp_packet_generic_blob_handler, blob_info);
-		if (rc)
-			CAM_ERR(CAM_ISP, "Failed in processing blobs %d", rc);
-
-		break;
-	}
-
-	return rc;
-}
-
-int cam_isp_add_hfr_config_hw_update(
-	struct cam_isp_resource_hfr_config   *hfr_config,
-	struct cam_hw_prepare_update_args    *prepare,
-	uint32_t                              base_idx,
-	struct cam_kmd_buf_info              *kmd_buf_info,
-	struct cam_ife_hw_mgr_res            *res_list_isp_out,
-	uint32_t                              size_isp_out)
-{
-	int rc = 0;
-	struct cam_isp_resource_node       *res;
-	struct cam_ife_hw_mgr_res          *hw_mgr_res;
-	struct cam_isp_hw_get_hfr_update    update_hfr;
-	struct cam_isp_port_hfr_config     *io_hfr_config;
-	uint32_t                            kmd_buf_remain_size;
-	uint32_t                            i, j;
-	uint32_t                            res_id_out;
-	uint32_t                            hfr_cfg_used_bytes, num_ent;
-
-	hfr_cfg_used_bytes = 0;
-
-	/* Max one hw entries required for hfr config update */
-	if (prepare->num_hw_update_entries + 1 >=
-			prepare->max_hw_update_entries) {
-		CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
-			prepare->num_hw_update_entries,
-			prepare->max_hw_update_entries);
-		return -EINVAL;
-	}
-
-	CAM_DBG(CAM_ISP, "num_io_configs= %d", hfr_config->num_io_configs);
-
-	for (i = 0; i < hfr_config->num_io_configs; i++) {
-		io_hfr_config = &hfr_config->io_hfr_config[i];
-		res_id_out = io_hfr_config->resource_type & 0xFF;
-
-		CAM_DBG(CAM_ISP, "hfr config idx %d, type=%d", i, res_id_out);
-
-		if (res_id_out >= size_isp_out) {
-			CAM_ERR(CAM_ISP, "invalid out restype:%x",
-				io_hfr_config->resource_type);
-			return -EINVAL;
-		}
-
-		hw_mgr_res = &res_list_isp_out[res_id_out];
-		if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
-			CAM_ERR(CAM_ISP, "io res id:%d not valid",
-				io_hfr_config->resource_type);
-			return -EINVAL;
-		}
-
-		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
-			if (!hw_mgr_res->hw_res[j])
-				continue;
-
-			if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx)
-				continue;
-
-			res = hw_mgr_res->hw_res[j];
-			if (res->res_id !=
-				io_hfr_config->resource_type) {
-				CAM_ERR(CAM_ISP,
-					"wm err res id:%d io res id:%d",
-					res->res_id,
-					io_hfr_config->resource_type);
-				return -EINVAL;
-			}
-
-			if ((kmd_buf_info->used_bytes + hfr_cfg_used_bytes) <
-				kmd_buf_info->size) {
-				kmd_buf_remain_size = kmd_buf_info->size -
-					(kmd_buf_info->used_bytes +
-					hfr_cfg_used_bytes);
-			} else {
-				CAM_ERR(CAM_ISP,
-					"no free kmd memory for base %d",
-					base_idx);
-				rc = -ENOMEM;
-				return rc;
-			}
-
-			update_hfr.cdm.res = res;
-			update_hfr.cdm.cmd_buf_addr = kmd_buf_info->cpu_addr +
-				kmd_buf_info->used_bytes/4 +
-					hfr_cfg_used_bytes/4;
-			update_hfr.cdm.size = kmd_buf_remain_size;
-			update_hfr.io_hfr_cfg    = io_hfr_config;
-
-			CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
-				update_hfr.cdm.cmd_buf_addr,
-				update_hfr.cdm.size);
-			rc = res->hw_intf->hw_ops.process_cmd(
-				res->hw_intf->hw_priv,
-				CAM_VFE_HW_CMD_GET_HFR_UPDATE, &update_hfr,
-				sizeof(struct cam_isp_hw_get_hfr_update));
-
-			if (rc) {
-				CAM_ERR(CAM_ISP, "get buf cmd error:%d",
-					res->res_id);
-				rc = -ENOMEM;
-				return rc;
-			}
-			hfr_cfg_used_bytes += update_hfr.cdm.used_bytes;
-		}
-	}
-
-	CAM_DBG(CAM_ISP, "hfr_cfg_used_bytes %d", hfr_cfg_used_bytes);
-	if (hfr_cfg_used_bytes) {
-		/* Update the HW entries */
-		num_ent = prepare->num_hw_update_entries;
-		prepare->hw_update_entries[num_ent].handle =
-					kmd_buf_info->handle;
-		prepare->hw_update_entries[num_ent].len = hfr_cfg_used_bytes;
-		prepare->hw_update_entries[num_ent].offset =
-			kmd_buf_info->offset;
-		num_ent++;
-
-		kmd_buf_info->used_bytes += hfr_cfg_used_bytes;
-		kmd_buf_info->offset     += hfr_cfg_used_bytes;
 		prepare->num_hw_update_entries = num_ent;
 	}
 
@@ -485,7 +388,8 @@
 	struct cam_buf_io_cfg              *io_cfg;
 	struct cam_isp_resource_node       *res;
 	struct cam_ife_hw_mgr_res          *hw_mgr_res;
-	struct cam_isp_hw_get_buf_update    update_buf;
+	struct cam_isp_hw_get_cmd_update    update_buf;
+	struct cam_isp_hw_get_wm_update     wm_update;
 	uint32_t                            kmd_buf_remain_size;
 	uint32_t                            i, j, num_out_buf, num_in_buf;
 	uint32_t                            res_id_out, res_id_in, plane_id;
@@ -606,7 +510,7 @@
 
 				hdl = io_cfg[i].mem_handle[plane_id];
 				if (res->process_cmd(res,
-						CAM_VFE_HW_CMD_GET_SECURE_MODE,
+						CAM_ISP_HW_CMD_GET_SECURE_MODE,
 						&mode,
 						sizeof(bool)))
 					return -EINVAL;
@@ -670,22 +574,24 @@
 				rc = -ENOMEM;
 				return rc;
 			}
-			update_buf.cdm.res = res;
-			update_buf.cdm.cmd_buf_addr = kmd_buf_info->cpu_addr +
+			update_buf.res = res;
+			update_buf.cmd_type = CAM_ISP_HW_CMD_GET_BUF_UPDATE;
+			update_buf.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
 				kmd_buf_info->used_bytes/4 +
 					io_cfg_used_bytes/4;
-			update_buf.cdm.size = kmd_buf_remain_size;
-			update_buf.image_buf = io_addr;
-			update_buf.num_buf   = plane_id;
-			update_buf.io_cfg    = &io_cfg[i];
+			wm_update.image_buf = io_addr;
+			wm_update.num_buf   = plane_id;
+			wm_update.io_cfg    = &io_cfg[i];
+			update_buf.cmd.size = kmd_buf_remain_size;
+			update_buf.wm_update = &wm_update;
 
 			CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
-				update_buf.cdm.cmd_buf_addr,
-				update_buf.cdm.size);
+				update_buf.cmd.cmd_buf_addr,
+				update_buf.cmd.size);
 			rc = res->hw_intf->hw_ops.process_cmd(
 				res->hw_intf->hw_priv,
-				CAM_VFE_HW_CMD_GET_BUF_UPDATE, &update_buf,
-				sizeof(struct cam_isp_hw_get_buf_update));
+				CAM_ISP_HW_CMD_GET_BUF_UPDATE, &update_buf,
+				sizeof(struct cam_isp_hw_get_cmd_update));
 
 			if (rc) {
 				CAM_ERR(CAM_ISP, "get buf cmd error:%d",
@@ -693,7 +599,7 @@
 				rc = -ENOMEM;
 				return rc;
 			}
-			io_cfg_used_bytes += update_buf.cdm.used_bytes;
+			io_cfg_used_bytes += update_buf.cmd.used_bytes;
 		}
 	}
 
@@ -733,7 +639,7 @@
 	struct cam_isp_resource_node         *res;
 	struct cam_ife_hw_mgr_res            *hw_mgr_res;
 	struct cam_hw_update_entry           *hw_entry;
-	struct cam_isp_hw_get_cdm_args        get_regup;
+	struct cam_isp_hw_get_cmd_update      get_regup;
 	uint32_t kmd_buf_remain_size, num_ent, i, reg_update_size;
 
 	hw_entry = prepare->hw_update_entries;
@@ -773,22 +679,23 @@
 				return rc;
 			}
 
-			get_regup.cmd_buf_addr = kmd_buf_info->cpu_addr +
+			get_regup.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
 				kmd_buf_info->used_bytes/4 +
 				reg_update_size/4;
-			get_regup.size = kmd_buf_remain_size;
+			get_regup.cmd.size = kmd_buf_remain_size;
+			get_regup.cmd_type = CAM_ISP_HW_CMD_GET_REG_UPDATE;
 			get_regup.res = res;
 
 			rc = res->hw_intf->hw_ops.process_cmd(
 				res->hw_intf->hw_priv,
-				CAM_VFE_HW_CMD_GET_REG_UPDATE, &get_regup,
-				sizeof(struct cam_isp_hw_get_cdm_args));
+				CAM_ISP_HW_CMD_GET_REG_UPDATE, &get_regup,
+				sizeof(struct cam_isp_hw_get_cmd_update));
 			if (rc)
 				return rc;
 
 			CAM_DBG(CAM_ISP, "Reg update added for res %d hw_id %d",
 				res->res_id, res->hw_intf->hw_idx);
-			reg_update_size += get_regup.used_bytes;
+			reg_update_size += get_regup.cmd.used_bytes;
 		}
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
index 4a7eff8..8863275 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
@@ -261,6 +261,15 @@
 	*tasklet_info = NULL;
 }
 
+static void cam_tasklet_flush(void  *tasklet_info)
+{
+	unsigned long data;
+	struct cam_tasklet_info *tasklet = tasklet_info;
+
+	data = (unsigned long)tasklet;
+	cam_tasklet_action(data);
+}
+
 int cam_tasklet_start(void  *tasklet_info)
 {
 	struct cam_tasklet_info       *tasklet = tasklet_info;
@@ -290,6 +299,7 @@
 {
 	struct cam_tasklet_info  *tasklet = tasklet_info;
 
+	cam_tasklet_flush(tasklet);
 	atomic_set(&tasklet->tasklet_active, 0);
 	tasklet_disable(&tasklet->tasklet);
 }
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
index cce0071..e3f2ce2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
@@ -20,26 +20,32 @@
 #include "cam_hw_intf.h"
 #include "cam_packet_util.h"
 
-/**
- * struct cam_isp_generic_blob_info  Generic blob information
+/*
+ * struct cam_isp_generic_blob_info
  *
- * @hfr_config             Initial configuration required to enable HFR
- *
+ * @prepare:            Payload for prepare command
+ * @ctx_base_info:      Base hardware information for the context
+ * @kmd_buf_info:       Kmd buffer to store the custom cmd data
  */
 struct cam_isp_generic_blob_info {
-	struct cam_isp_resource_hfr_config *hfr_config;
+	struct cam_hw_prepare_update_args     *prepare;
+	struct ctx_base_info                  *base_info;
+	struct cam_kmd_buf_info               *kmd_buf_info;
 };
 
-/**
+/*
+ * cam_isp_add_change_base()
+ *
  * @brief                  Add change base in the hw entries list
  *                         processe the isp source list get the change base from
  *                         ISP HW instance
  *
- * @prepare:               Contain the  packet and HW update variables
+ * @prepare:               Contain the packet and HW update variables
  * @res_list_isp_src:      Resource list for IFE/VFE source
  * @base_idx:              Base or dev index of the IFE/VFE HW instance for
  *                         which change change base need to be added
  * @kmd_buf_info:          Kmd buffer to store the change base command
+ *
  * @return:                0 for success
  *                         -EINVAL for Fail
  */
@@ -49,26 +55,63 @@
 	uint32_t                               base_idx,
 	struct cam_kmd_buf_info               *kmd_buf_info);
 
-/**
+/*
+ * cam_isp_add_cmd_buf_update()
+ *
+ * @brief                  Add command buffer in the HW entries list for given
+ *                         Blob Data.
+ *
+ * @hw_mgr_res:            HW resource to get the update from
+ * @cmd_type:              Cmd type to get update for
+ * @hw_cmd_type:           HW Cmd type corresponding to cmd_type
+ * @base_idx:              Base hardware index
+ * @cmd_buf_addr:          Cpu buf addr of kmd scratch buffer
+ * @kmd_buf_remain_size:   Remaining size left for cmd buffer update
+ * @cmd_update_data:       Data needed by HW to process the cmd and provide
+ *                         cmd buffer
+ * @bytes_used:            Address of the field to be populated with
+ *                         total bytes used as output to caller
+ *
+ * @return:                Negative for Failure
+ *                         otherwise returns bytes used
+ */
+int cam_isp_add_cmd_buf_update(
+	struct cam_ife_hw_mgr_res            *hw_mgr_res,
+	uint32_t                              cmd_type,
+	uint32_t                              hw_cmd_type,
+	uint32_t                              base_idx,
+	uint32_t                             *cmd_buf_addr,
+	uint32_t                              kmd_buf_remain_size,
+	void                                 *cmd_update_data,
+	uint32_t                             *bytes_used);
+
+/*
+ * cam_isp_add_command_buffers()
+ *
  * @brief                  Add command buffer in the HW entries list for given
  *                         left or right VFE/IFE instance.
  *
- * @prepare:               Contain the  packet and HW update variables
- * @split_id:              Left or right command buffers to be extracted
- * @base_idx:              Base or dev index of the IFE/VFE HW instance
+ * @prepare:               Contain the packet and HW update variables
+ * @kmd_buf_info:          KMD buffer to store the custom cmd data
+ * @base_info:             base hardware information
+ * @blob_handler_cb:       Call_back_function for Meta handling
  * @res_list_isp_out:      IFE /VFE out resource list
  * @size_isp_out:          Size of the res_list_isp_out array
+ *
  * @return:                0 for success
- *                         -EINVAL for Fail
+ *                         Negative for Failure
  */
 int cam_isp_add_command_buffers(
 	struct cam_hw_prepare_update_args  *prepare,
-	enum cam_isp_hw_split_id            split_id,
-	uint32_t                            base_idx,
+	struct cam_kmd_buf_info            *kmd_buf_info,
+	struct ctx_base_info               *base_info,
+	cam_packet_generic_blob_handler     blob_handler_cb,
 	struct cam_ife_hw_mgr_res          *res_list_isp_out,
 	uint32_t                            size_isp_out);
 
-/**
+/*
+ * cam_isp_add_io_buffers()
+ *
  * @brief                  Add io buffer configurations in the HW entries list
  *                         processe the io configurations based on the base
  *                         index and update the HW entries list
@@ -96,8 +139,9 @@
 	uint32_t                              size_isp_out,
 	bool                                  fill_fence);
 
-
-/**
+/*
+ * cam_isp_add_reg_update()
+ *
  * @brief                  Add reg update in the hw entries list
  *                         processe the isp source list get the reg update from
  *                         ISP HW instance
@@ -115,40 +159,4 @@
 	uint32_t                              base_idx,
 	struct cam_kmd_buf_info              *kmd_buf_info);
 
-/**
- * @brief                  Add HFR configurations in the HW entries list
- *                         processe the hfr configurations based on the base
- *                         index and update the HW entries list
- *
- * @hfr_config:            HFR resource configuration info
- * @prepare:               Contain the  packet and HW update variables
- * @base_idx:              Base or dev index of the IFE/VFE HW instance
- * @kmd_buf_info:          Kmd buffer to store the change base command
- * @res_list_isp_out:      IFE /VFE out resource list
- * @size_isp_out:          Size of the res_list_isp_out array
- *
- * @return:                0 for success
- *                         -EINVAL for Fail
- */
-int cam_isp_add_hfr_config_hw_update(
-	struct cam_isp_resource_hfr_config   *hfr_config,
-	struct cam_hw_prepare_update_args    *prepare,
-	uint32_t                              base_idx,
-	struct cam_kmd_buf_info              *kmd_buf_info,
-	struct cam_ife_hw_mgr_res            *res_list_isp_out,
-	uint32_t                              size_isp_out);
-
-/**
- * @brief                  Processing Generic command buffer.
- *
- * @prepare:               Contain the  packet and HW update variables
- * @blob_info:             Information from generic blob command buffer
- *
- * @return:                0 for success
- *                         -EINVAL for Fail
- */
-int cam_isp_process_generic_cmd_buffer(
-	struct cam_hw_prepare_update_args *prepare,
-	struct cam_isp_generic_blob_info  *blob_info);
-
 #endif /*_CAM_ISP_HW_PARSER_H */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
index c6d5601..44dc5c4 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
@@ -239,7 +239,8 @@
 	int                         i;
 	int                         rc = 0;
 	uint32_t                    irq_mask;
-	unsigned long               flags;
+	unsigned long               flags = 0;
+	bool                        need_lock;
 
 	if (!controller || !handler_priv || !evt_bit_mask_arr) {
 		CAM_ERR(CAM_ISP,
@@ -301,7 +302,9 @@
 	if (controller->hdl_idx > 0x3FFFFFFF)
 		controller->hdl_idx = 1;
 
-	write_lock_irqsave(&controller->rw_lock, flags);
+	need_lock = !in_irq();
+	if (need_lock)
+		write_lock_irqsave(&controller->rw_lock, flags);
 	for (i = 0; i < controller->num_registers; i++) {
 		controller->irq_register_arr[i].top_half_enable_mask[priority]
 			|= evt_bit_mask_arr[i];
@@ -313,7 +316,8 @@
 		cam_io_w_mb(irq_mask, controller->mem_base +
 			controller->irq_register_arr[i].mask_reg_offset);
 	}
-	write_unlock_irqrestore(&controller->rw_lock, flags);
+	if (need_lock)
+		write_unlock_irqrestore(&controller->rw_lock, flags);
 
 	list_add_tail(&evt_handler->list_node,
 		&controller->evt_handler_list_head);
@@ -334,11 +338,12 @@
 	struct cam_irq_controller  *controller  = irq_controller;
 	struct cam_irq_evt_handler *evt_handler = NULL;
 	struct cam_irq_evt_handler *evt_handler_temp;
-	unsigned long               flags;
+	unsigned long               flags = 0;
 	unsigned int                i;
 	uint32_t                    irq_mask;
 	uint32_t                    found = 0;
 	int                         rc = -EINVAL;
+	bool                        need_lock;
 
 	if (!controller)
 		return rc;
@@ -356,7 +361,9 @@
 	if (!found)
 		return rc;
 
-	write_lock_irqsave(&controller->rw_lock, flags);
+	need_lock = !in_irq();
+	if (need_lock)
+		write_lock_irqsave(&controller->rw_lock, flags);
 	for (i = 0; i < controller->num_registers; i++) {
 		controller->irq_register_arr[i].
 		top_half_enable_mask[evt_handler->priority] |=
@@ -370,7 +377,8 @@
 		cam_io_w_mb(irq_mask, controller->mem_base +
 		controller->irq_register_arr[i].mask_reg_offset);
 	}
-	write_unlock_irqrestore(&controller->rw_lock, flags);
+	if (need_lock)
+		write_unlock_irqrestore(&controller->rw_lock, flags);
 
 	return rc;
 }
@@ -380,11 +388,12 @@
 	struct cam_irq_controller  *controller  = irq_controller;
 	struct cam_irq_evt_handler *evt_handler = NULL;
 	struct cam_irq_evt_handler *evt_handler_temp;
-	unsigned long               flags;
+	unsigned long               flags = 0;
 	unsigned int                i;
 	uint32_t                    irq_mask;
 	uint32_t                    found = 0;
 	int                         rc = -EINVAL;
+	bool                        need_lock;
 
 	if (!controller)
 		return rc;
@@ -402,7 +411,9 @@
 	if (!found)
 		return rc;
 
-	write_lock_irqsave(&controller->rw_lock, flags);
+	need_lock = !in_irq();
+	if (need_lock)
+		write_lock_irqsave(&controller->rw_lock, flags);
 	for (i = 0; i < controller->num_registers; i++) {
 		controller->irq_register_arr[i].
 		top_half_enable_mask[evt_handler->priority] &=
@@ -429,7 +440,8 @@
 				controller->mem_base +
 				controller->global_clear_offset);
 	}
-	write_unlock_irqrestore(&controller->rw_lock, flags);
+	if (need_lock)
+		write_unlock_irqrestore(&controller->rw_lock, flags);
 
 	return rc;
 }
@@ -443,8 +455,9 @@
 	uint32_t                    i;
 	uint32_t                    found = 0;
 	uint32_t                    irq_mask;
-	unsigned long               flags;
+	unsigned long               flags = 0;
 	int                         rc = -EINVAL;
+	bool                        need_lock;
 
 	list_for_each_entry_safe(evt_handler, evt_handler_temp,
 		&controller->evt_handler_list_head, list_node) {
@@ -458,8 +471,11 @@
 		}
 	}
 
+	need_lock = !in_irq();
+
 	if (found) {
-		write_lock_irqsave(&controller->rw_lock, flags);
+		if (need_lock)
+			write_lock_irqsave(&controller->rw_lock, flags);
 		for (i = 0; i < controller->num_registers; i++) {
 			controller->irq_register_arr[i].
 				top_half_enable_mask[evt_handler->priority] &=
@@ -485,7 +501,8 @@
 					controller->mem_base +
 					controller->global_clear_offset);
 		}
-		write_unlock_irqrestore(&controller->rw_lock, flags);
+		if (need_lock)
+			write_unlock_irqrestore(&controller->rw_lock, flags);
 
 		kfree(evt_handler->evt_bit_mask_arr);
 		kfree(evt_handler);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index cd92035..9a368cf 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -32,6 +32,15 @@
 #define CAM_IFE_CSID_TIMEOUT_SLEEP_US                  1000
 #define CAM_IFE_CSID_TIMEOUT_ALL_US                    1000000
 
+/*
+ * Constant Factors needed to change QTimer ticks to nanoseconds
+ * QTimer Freq = 19.2 MHz
+ * Time(us) = ticks/19.2
+ * Time(ns) = ticks/19.2 * 1000
+ */
+#define CAM_IFE_CSID_QTIMER_MUL_FACTOR                 10000
+#define CAM_IFE_CSID_QTIMER_DIV_FACTOR                 192
+
 static int cam_ife_csid_is_ipp_format_supported(
 	uint32_t in_format)
 {
@@ -1972,6 +1981,11 @@
 		time_stamp->time_stamp_val |= time_32;
 	}
 
+	time_stamp->time_stamp_val = mul_u64_u32_div(
+		time_stamp->time_stamp_val,
+		CAM_IFE_CSID_QTIMER_MUL_FACTOR,
+		CAM_IFE_CSID_QTIMER_DIV_FACTOR);
+
 	return 0;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
index c036bca..e11ff63 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -143,7 +143,7 @@
 	}
 
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
-		CAM_TURBO_VOTE, true);
+		CAM_SVS_VOTE, true);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "enable platform failed");
 		goto stop_cpas;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index 3a0c6a7..c81e6db 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -15,6 +15,7 @@
 
 #include <linux/completion.h>
 #include "cam_hw.h"
+#include <uapi/media/cam_isp.h>
 #include "cam_soc_util.h"
 #include "cam_irq_controller.h"
 #include <uapi/media/cam_isp.h>
@@ -82,6 +83,16 @@
 	CAM_ISP_RESOURCE_MAX,
 };
 
+enum cam_isp_hw_cmd_type {
+	CAM_ISP_HW_CMD_GET_CHANGE_BASE,
+	CAM_ISP_HW_CMD_GET_BUF_UPDATE,
+	CAM_ISP_HW_CMD_GET_REG_UPDATE,
+	CAM_ISP_HW_CMD_GET_HFR_UPDATE,
+	CAM_ISP_HW_CMD_GET_SECURE_MODE,
+	CAM_ISP_HW_CMD_STRIPE_UPDATE,
+	CAM_ISP_HW_CMD_MAX,
+};
+
 /*
  * struct cam_isp_resource_node:
  *
@@ -99,6 +110,8 @@
  * @tasklet_info:                 Tasklet structure that will be used to
  *                                schedule IRQ events related to this resource
  * @irq_handle:                   handle returned on subscribing for IRQ event
+ * @init:                         function pointer to init the HW resource
+ * @deinit:                       function pointer to deinit the HW resource
  * @start:                        function pointer to start the HW resource
  * @stop:                         function pointer to stop the HW resource
  * @process_cmd:                  function pointer for processing commands
@@ -117,6 +130,10 @@
 	void                          *tasklet_info;
 	int                            irq_handle;
 
+	int (*init)(struct cam_isp_resource_node *rsrc_node,
+		void *init_args, uint32_t arg_size);
+	int (*deinit)(struct cam_isp_resource_node *rsrc_node,
+		void *deinit_args, uint32_t arg_size);
 	int (*start)(struct cam_isp_resource_node *rsrc_node);
 	int (*stop)(struct cam_isp_resource_node *rsrc_node);
 	int (*process_cmd)(struct cam_isp_resource_node *rsrc_node,
@@ -126,54 +143,56 @@
 };
 
 /*
- * struct cam_isp_hw_get_cdm_args:
+ * struct cam_isp_hw_cmd_buf_update:
  *
- * @Brief:           Contain the command buffer information
- *                   to store the CDM commands.
+ * @Brief:           Contain the new created command buffer information
  *
- * @res:             Resource node
  * @cmd_buf_addr:    Command buffer to store the change base command
  * @size:            Size of the buffer in bytes
  * @used_bytes:      Consumed bytes in the command buffer
  *
  */
-struct cam_isp_hw_get_cdm_args {
-	struct cam_isp_resource_node   *res;
+struct cam_isp_hw_cmd_buf_update {
 	uint32_t                       *cmd_buf_addr;
 	uint32_t                        size;
 	uint32_t                        used_bytes;
 };
 
 /*
- * struct cam_isp_hw_get_buf_update:
+ * struct cam_isp_hw_get_wm_update:
  *
- * @Brief:         Get cdm commands for buffer updates.
+ * @Brief:         Get cmd buffer for WM updates.
  *
- * @ cdm:          Command buffer information
  * @ image_buf:    image buffer address array
  * @ num_buf:      Number of buffers in the image_buf array
  * @ io_cfg:       IO buffer config information sent from UMD
  *
  */
-struct cam_isp_hw_get_buf_update {
-	struct cam_isp_hw_get_cdm_args  cdm;
+struct cam_isp_hw_get_wm_update {
 	uint64_t                       *image_buf;
 	uint32_t                        num_buf;
 	struct cam_buf_io_cfg          *io_cfg;
 };
 
 /*
- * struct cam_isp_hw_get_hfr_update:
+ * struct cam_isp_hw_get_cmd_update:
  *
- * @Brief:         Get cdm commands for HFR updates.
+ * @Brief:         Get cmd buffer update for different CMD types
  *
- * @ cdm:          Command buffer information
- * @ io_hfr_cfg:   IO buffer config information sent from UMD
+ * @res:           Resource node
+ * @cmd_type:      Command type for which to get update
+ * @cmd:           Command buffer information
  *
  */
-struct cam_isp_hw_get_hfr_update {
-	struct cam_isp_hw_get_cdm_args  cdm;
-	struct cam_isp_port_hfr_config *io_hfr_cfg;
+struct cam_isp_hw_get_cmd_update {
+	struct cam_isp_resource_node     *res;
+	enum cam_isp_hw_cmd_type          cmd_type;
+	struct cam_isp_hw_cmd_buf_update  cmd;
+	union {
+		void                                 *data;
+		struct cam_isp_hw_get_wm_update      *wm_update;
+		struct cam_isp_port_hfr_config       *hfr_update;
+	};
 };
 
 /*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index 96263de..b7ec511 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -44,16 +44,6 @@
 	CAM_ISP_HW_VFE_CORE_MAX,
 };
 
-enum cam_vfe_hw_cmd_type {
-	CAM_VFE_HW_CMD_GET_CHANGE_BASE,
-	CAM_VFE_HW_CMD_GET_BUF_UPDATE,
-	CAM_VFE_HW_CMD_GET_REG_UPDATE,
-	CAM_VFE_HW_CMD_GET_HFR_UPDATE,
-	CAM_VFE_HW_CMD_GET_SECURE_MODE,
-	CAM_VFE_HW_CMD_STRIPE_UPDATE,
-	CAM_VFE_HW_CMD_MAX,
-};
-
 enum cam_vfe_hw_irq_status {
 	CAM_VFE_IRQ_STATUS_ERR_COMP             = -3,
 	CAM_VFE_IRQ_STATUS_COMP_OWRT            = -2,
@@ -80,6 +70,12 @@
 	CAM_IFE_BUS_IRQ_REGISTERS_MAX,
 };
 
+enum cam_vfe_reset_type {
+	CAM_VFE_HW_RESET_HW_AND_REG,
+	CAM_VFE_HW_RESET_HW,
+	CAM_VFE_HW_RESET_MAX,
+};
+
 /*
  * struct cam_vfe_hw_get_hw_cap:
  *
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index 8e83cb0..d1e1605 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -25,7 +25,6 @@
 #include "cam_debug_util.h"
 
 static const char drv_name[] = "vfe";
-
 static uint32_t irq_reg_offset[CAM_IFE_IRQ_REGISTERS_MAX] = {
 	0x0000006C,
 	0x00000070,
@@ -34,7 +33,12 @@
 
 static uint32_t camif_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
 	0x0003FD1F,
-	0x0FFF7EBC,
+	0x00000000,
+};
+
+static uint32_t camif_irq_err_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+	0x00000000,
+	0x0FFF7E80,
 };
 
 static uint32_t rdi_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
@@ -83,6 +87,7 @@
 	}
 
 	spin_lock_irqsave(&vfe_core_info->spin_lock, flags);
+	(*evt_payload)->error_type = 0;
 	list_add_tail(&(*evt_payload)->list, &vfe_core_info->free_payload_list);
 	spin_unlock_irqrestore(&vfe_core_info->spin_lock, flags);
 
@@ -125,9 +130,6 @@
 	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
 
 	if (th_payload->evt_status_arr[0] & (1<<31)) {
-		CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
-		complete(handler_priv->reset_complete);
-
 		/*
 		 * Clear All IRQs to avoid spurious IRQs immediately
 		 * after Reset Done.
@@ -135,6 +137,9 @@
 		cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x64);
 		cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x68);
 		cam_io_w(0x1, handler_priv->mem_base + 0x58);
+		CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
+		complete(handler_priv->reset_complete);
+
 
 		rc = 0;
 	}
@@ -143,12 +148,69 @@
 	return rc;
 }
 
+static int cam_vfe_irq_err_top_half(uint32_t    evt_id,
+	struct cam_irq_th_payload   *th_payload)
+{
+	int32_t                              rc;
+	int                                  i;
+	struct cam_vfe_irq_handler_priv     *handler_priv;
+	struct cam_vfe_top_irq_evt_payload  *evt_payload;
+	struct cam_vfe_hw_core_info         *core_info;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_1 = %x",
+		th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]);
+
+	handler_priv = th_payload->handler_priv;
+	core_info =  handler_priv->core_info;
+	/*
+	 *  need to handle overflow condition here, otherwise irq storm
+	 *  will block everything
+	 */
+
+	if (th_payload->evt_status_arr[1]) {
+		CAM_ERR(CAM_ISP, "IRQ status_1: %x, Masking all interrupts",
+			th_payload->evt_status_arr[1]);
+		cam_irq_controller_disable_irq(core_info->vfe_irq_controller,
+			core_info->irq_err_handle);
+	}
+
+	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue\n");
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	evt_payload->core_index = handler_priv->core_index;
+	evt_payload->core_info  = handler_priv->core_info;
+	evt_payload->evt_id  = evt_id;
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	for (; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) {
+		evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
+			irq_reg_offset[i]);
+	}
+
+	CAM_DBG(CAM_ISP, "Violation status = %x", evt_payload->irq_reg_val[2]);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
 int cam_vfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size)
 {
 	struct cam_hw_info                *vfe_hw = hw_priv;
 	struct cam_hw_soc_info            *soc_info = NULL;
 	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_isp_resource_node      *isp_res = NULL;
 	int rc = 0;
+	uint32_t                           reset_core_args =
+					CAM_VFE_HW_RESET_HW_AND_REG;
 
 	CAM_DBG(CAM_ISP, "Enter");
 	if (!hw_priv) {
@@ -177,23 +239,35 @@
 		goto decrement_open_cnt;
 	}
 
+	isp_res   = (struct cam_isp_resource_node *)init_hw_args;
+	if (isp_res && isp_res->init) {
+		rc = isp_res->init(isp_res, NULL, 0);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "init Failed rc=%d", rc);
+			goto disable_soc;
+		}
+	}
+
 	CAM_DBG(CAM_ISP, "Enable soc done");
 
 	/* Do HW Reset */
-	rc = cam_vfe_reset(hw_priv, NULL, 0);
+	rc = cam_vfe_reset(hw_priv, &reset_core_args, sizeof(uint32_t));
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Reset Failed rc=%d", rc);
-		goto disable_soc;
+		goto deinint_vfe_res;
 	}
 
 	rc = core_info->vfe_bus->hw_ops.init(core_info->vfe_bus->bus_priv,
 		NULL, 0);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Bus HW init Failed rc=%d", rc);
-		goto disable_soc;
+		goto deinint_vfe_res;
 	}
 
 	return 0;
+deinint_vfe_res:
+	if (isp_res && isp_res->deinit)
+		isp_res->deinit(isp_res, NULL, 0);
 disable_soc:
 	cam_vfe_disable_soc_resources(soc_info);
 decrement_open_cnt:
@@ -207,6 +281,8 @@
 {
 	struct cam_hw_info                *vfe_hw = hw_priv;
 	struct cam_hw_soc_info            *soc_info = NULL;
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_isp_resource_node      *isp_res = NULL;
 	int rc = 0;
 
 	CAM_DBG(CAM_ISP, "Enter");
@@ -230,6 +306,19 @@
 	mutex_unlock(&vfe_hw->hw_mutex);
 
 	soc_info = &vfe_hw->soc_info;
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+	rc = core_info->vfe_bus->hw_ops.deinit(core_info->vfe_bus->bus_priv,
+		NULL, 0);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Bus HW deinit Failed rc=%d", rc);
+
+	isp_res   = (struct cam_isp_resource_node *)deinit_hw_args;
+	if (isp_res && isp_res->deinit) {
+		rc = isp_res->deinit(isp_res, NULL, 0);
+		if (rc)
+			CAM_ERR(CAM_ISP, "deinit failed");
+	}
 
 	/* Turn OFF Regulators, Clocks and other SOC resources */
 	CAM_DBG(CAM_ISP, "Disable SOC resource");
@@ -278,7 +367,8 @@
 	reinit_completion(&vfe_hw->hw_complete);
 
 	CAM_DBG(CAM_ISP, "calling RESET");
-	core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv, NULL, 0);
+	core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv,
+		reset_core_args, arg_size);
 	CAM_DBG(CAM_ISP, "waiting for vfe reset complete");
 	/* Wait for Completion or Timeout of 500ms */
 	rc = wait_for_completion_timeout(&vfe_hw->hw_complete, 500);
@@ -305,20 +395,37 @@
 	time_stamp->mono_time.tv_usec   = ts.tv_nsec/1000;
 }
 
-
-int cam_vfe_irq_top_half(uint32_t    evt_id,
+static int cam_vfe_irq_top_half(uint32_t    evt_id,
 	struct cam_irq_th_payload   *th_payload)
 {
 	int32_t                              rc;
 	int                                  i;
 	struct cam_vfe_irq_handler_priv     *handler_priv;
 	struct cam_vfe_top_irq_evt_payload  *evt_payload;
+	struct cam_vfe_hw_core_info         *core_info;
 
 	handler_priv = th_payload->handler_priv;
 
 	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
 	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
 
+	/*
+	 *  need to handle non-recoverable condition here, otherwise irq storm
+	 *  will block everything.
+	 */
+	if (th_payload->evt_status_arr[0] & 0x3FC00) {
+		CAM_ERR(CAM_ISP,
+			"Encountered Error Irq_status0=0x%x Status1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		CAM_ERR(CAM_ISP,
+			"Stopping further IRQ processing from this HW index=%d",
+			handler_priv->core_index);
+		cam_io_w(0, handler_priv->mem_base + 0x60);
+		cam_io_w(0, handler_priv->mem_base + 0x5C);
+		return 0;
+	}
+
 	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
 	if (rc) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
@@ -326,6 +433,7 @@
 		return rc;
 	}
 
+	core_info =  handler_priv->core_info;
 	cam_isp_hw_get_timestamp(&evt_payload->ts);
 
 	evt_payload->core_index = handler_priv->core_index;
@@ -341,22 +449,6 @@
 	}
 	CAM_DBG(CAM_ISP, "Violation status = %x", evt_payload->irq_reg_val[2]);
 
-	/*
-	 *  need to handle overflow condition here, otherwise irq storm
-	 *  will block everything.
-	 */
-	if (evt_payload->irq_reg_val[1]) {
-		CAM_ERR(CAM_ISP,
-			"Encountered Error Irq_status1=0x%x. Stopping further IRQ processing from this HW",
-			evt_payload->irq_reg_val[1]);
-		CAM_ERR(CAM_ISP, "Violation status = %x",
-			evt_payload->irq_reg_val[2]);
-		cam_io_w(0, handler_priv->mem_base + 0x60);
-		cam_io_w(0, handler_priv->mem_base + 0x5C);
-
-		evt_payload->error_type = CAM_ISP_HW_ERROR_OVERFLOW;
-	}
-
 	th_payload->evt_payload_priv = evt_payload;
 
 	CAM_DBG(CAM_ISP, "Exit");
@@ -447,6 +539,7 @@
 
 	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
 	isp_res = (struct cam_isp_resource_node  *)start_args;
+	core_info->tasklet_info = isp_res->tasklet_info;
 
 	mutex_lock(&vfe_hw->hw_mutex);
 	if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
@@ -465,6 +558,12 @@
 				cam_vfe_irq_top_half, cam_ife_mgr_do_tasklet,
 				isp_res->tasklet_info, cam_tasklet_enqueue_cmd);
 
+		core_info->irq_err_handle = cam_irq_controller_subscribe_irq(
+			core_info->vfe_irq_controller, CAM_IRQ_PRIORITY_0,
+			camif_irq_err_reg_mask, &core_info->irq_payload,
+			cam_vfe_irq_err_top_half, cam_ife_mgr_do_tasklet,
+			core_info->tasklet_info, cam_tasklet_enqueue_cmd);
+
 		if (isp_res->irq_handle > 0)
 			rc = core_info->vfe_top->hw_ops.start(
 				core_info->vfe_top->top_priv, isp_res,
@@ -477,7 +576,6 @@
 	} else {
 		CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
 	}
-
 	mutex_unlock(&vfe_hw->hw_mutex);
 
 	return rc;
@@ -506,6 +604,10 @@
 		rc = core_info->vfe_top->hw_ops.stop(
 			core_info->vfe_top->top_priv, isp_res,
 			sizeof(struct cam_isp_resource_node));
+
+	cam_irq_controller_unsubscribe_irq(
+		core_info->vfe_irq_controller, core_info->irq_err_handle);
+
 	} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
 		rc = core_info->vfe_bus->hw_ops.stop(isp_res, NULL, 0);
 	} else {
@@ -546,16 +648,16 @@
 	hw_info = core_info->vfe_hw_info;
 
 	switch (cmd_type) {
-	case CAM_VFE_HW_CMD_GET_CHANGE_BASE:
-	case CAM_VFE_HW_CMD_GET_REG_UPDATE:
+	case CAM_ISP_HW_CMD_GET_CHANGE_BASE:
+	case CAM_ISP_HW_CMD_GET_REG_UPDATE:
 		rc = core_info->vfe_top->hw_ops.process_cmd(
 			core_info->vfe_top->top_priv, cmd_type, cmd_args,
 			arg_size);
 
 		break;
-	case CAM_VFE_HW_CMD_GET_BUF_UPDATE:
-	case CAM_VFE_HW_CMD_GET_HFR_UPDATE:
-	case CAM_VFE_HW_CMD_STRIPE_UPDATE:
+	case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
+	case CAM_ISP_HW_CMD_GET_HFR_UPDATE:
+	case CAM_ISP_HW_CMD_STRIPE_UPDATE:
 		rc = core_info->vfe_bus->hw_ops.process_cmd(
 			core_info->vfe_bus->bus_priv, cmd_type, cmd_args,
 			arg_size);
@@ -671,4 +773,3 @@
 
 	return rc;
 }
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
index ee29e1cf..0674a6ad 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
@@ -50,12 +50,13 @@
 	void                               *vfe_irq_controller;
 	struct cam_vfe_top                 *vfe_top;
 	struct cam_vfe_bus                 *vfe_bus;
-
+	void                               *tasklet_info;
 	struct cam_vfe_top_irq_evt_payload  evt_payload[CAM_VFE_EVT_MAX];
 	struct list_head                    free_payload_list;
 	struct cam_vfe_irq_handler_priv     irq_payload;
 	uint32_t                            cpas_handle;
 	int                                 irq_handle;
+	int                                 irq_err_handle;
 	spinlock_t                          spin_lock;
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
index b5ca432..0f93664 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -15,6 +15,30 @@
 #include "cam_vfe_soc.h"
 #include "cam_debug_util.h"
 
+static bool cam_vfe_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
+{
+	bool error_handled = false;
+
+	if (!irq_data)
+		return error_handled;
+
+	switch (irq_data->irq_type) {
+	case CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR:
+	case CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR:
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IFE UBWC Encode error type=%d status=%x",
+			irq_data->irq_type,
+			irq_data->u.enc_err.encerr_status.value);
+		error_handled = true;
+		break;
+	default:
+		break;
+	}
+
+	return error_handled;
+}
+
 static int cam_vfe_get_dt_properties(struct cam_hw_soc_info *soc_info)
 {
 	int rc = 0;
@@ -76,6 +100,12 @@
 		goto free_soc_private;
 	}
 
+	rc = cam_soc_util_get_option_clk_by_name(soc_info,
+		CAM_VFE_DSP_CLK_NAME, &soc_private->dsp_clk,
+		&soc_private->dsp_clk_index, &soc_private->dsp_clk_rate);
+	if (rc)
+		CAM_WARN(CAM_ISP, "option clk get failed");
+
 	rc = cam_vfe_request_platform_resource(soc_info, vfe_irq_handler,
 		irq_data);
 	if (rc < 0) {
@@ -89,6 +119,8 @@
 		CAM_HW_IDENTIFIER_LENGTH);
 	cpas_register_param.cell_index = soc_info->index;
 	cpas_register_param.dev = soc_info->dev;
+	cpas_register_param.cam_cpas_client_cb = cam_vfe_cpas_cb;
+	cpas_register_param.userdata = soc_info;
 	rc = cam_cpas_register_client(&cpas_register_param);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
@@ -132,6 +164,11 @@
 		CAM_ERR(CAM_ISP,
 			"Error! Release platform resources failed rc=%d", rc);
 
+	rc = cam_soc_util_clk_put(&soc_private->dsp_clk);
+	if (rc < 0)
+		CAM_ERR(CAM_ISP,
+			"Error Put dsp clk failed rc=%d", rc);
+
 	kfree(soc_private);
 
 	return rc;
@@ -179,6 +216,54 @@
 	return rc;
 }
 
+int cam_vfe_soc_enable_clk(struct cam_hw_soc_info *soc_info,
+	const char *clk_name)
+{
+	int  rc = 0;
+	struct cam_vfe_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		rc = -EINVAL;
+		return rc;
+	}
+	soc_private = soc_info->soc_private;
+
+	if (strcmp(clk_name, CAM_VFE_DSP_CLK_NAME) == 0) {
+		rc = cam_soc_util_clk_enable(soc_private->dsp_clk,
+			CAM_VFE_DSP_CLK_NAME, soc_private->dsp_clk_rate);
+		if (rc)
+			CAM_ERR(CAM_ISP,
+			"Error enable dsp clk failed rc=%d", rc);
+	}
+
+	return rc;
+}
+
+int cam_vfe_soc_disable_clk(struct cam_hw_soc_info *soc_info,
+	const char *clk_name)
+{
+	int  rc = 0;
+	struct cam_vfe_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		rc = -EINVAL;
+		return rc;
+	}
+	soc_private = soc_info->soc_private;
+
+	if (strcmp(clk_name, CAM_VFE_DSP_CLK_NAME) == 0) {
+		rc = cam_soc_util_clk_disable(soc_private->dsp_clk,
+			CAM_VFE_DSP_CLK_NAME);
+		if (rc)
+			CAM_ERR(CAM_ISP,
+			"Error enable dsp clk failed rc=%d", rc);
+	}
+
+	return rc;
+}
+
 
 int cam_vfe_disable_soc_resources(struct cam_hw_soc_info *soc_info)
 {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
index 094c977..7a4dbea 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
@@ -16,6 +16,8 @@
 #include "cam_soc_util.h"
 #include "cam_isp_hw.h"
 
+#define CAM_VFE_DSP_CLK_NAME "ife_dsp_clk"
+
 /*
  * struct cam_vfe_soc_private:
  *
@@ -26,7 +28,10 @@
  *                           with CPAS.
  */
 struct cam_vfe_soc_private {
-	uint32_t cpas_handle;
+	uint32_t    cpas_handle;
+	struct clk *dsp_clk;
+	int32_t     dsp_clk_index;
+	int32_t     dsp_clk_rate;
 };
 
 /*
@@ -80,4 +85,32 @@
  */
 int cam_vfe_disable_soc_resources(struct cam_hw_soc_info *soc_info);
 
+/*
+ * cam_vfe_soc_enable_clk()
+ *
+ * @brief:                   Enable clock with given name
+ *
+ * @soc_info:                Device soc information
+ * @clk_name:                Name of clock to enable
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_soc_enable_clk(struct cam_hw_soc_info *soc_info,
+	const char *clk_name);
+
+/*
+ * cam_vfe_soc_disable_dsp_clk()
+ *
+ * @brief:                   Disable clock with given name
+ *
+ * @soc_info:                Device soc information
+ * @clk_name:                Name of clock to enable
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_soc_disable_clk(struct cam_hw_soc_info *soc_info,
+	const char *clk_name);
+
 #endif /* _CAM_VFE_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
index 5773bbe..a4ba2e1 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -67,12 +67,18 @@
 	.extern_reg_update_mask          = 1,
 	.pixel_pattern_shift             = 0,
 	.pixel_pattern_mask              = 0x7,
+	.dsp_mode_shift                  = 23,
+	.dsp_mode_mask                   = 0x1,
+	.dsp_en_shift                    = 3,
+	.dsp_en_mask                     = 0x1,
 	.reg_update_cmd_data             = 0x1,
 	.epoch_line_cfg                  = 0x00140014,
 	.sof_irq_mask                    = 0x00000001,
 	.epoch0_irq_mask                 = 0x00000004,
 	.reg_update_irq_mask             = 0x00000010,
 	.eof_irq_mask                    = 0x00000002,
+	.error_irq_mask0                 = 0x0003FC00,
+	.error_irq_mask1                 = 0x0FFF7E80,
 };
 
 struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_170_reg = {
@@ -193,6 +199,7 @@
 	.meta_offset      = 0x0000253C,
 	.meta_stride      = 0x00002540,
 	.mode_cfg         = 0x00002544,
+	.bw_limit         = 0x000025A0,
 };
 
 static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_4 = {
@@ -203,6 +210,7 @@
 	.meta_offset      = 0x0000263C,
 	.meta_stride      = 0x00002640,
 	.mode_cfg         = 0x00002644,
+	.bw_limit         = 0x000026A0,
 };
 
 static struct cam_vfe_bus_ver2_hw_info vfe170_bus_hw_info = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index bf49ddc..e94bb62 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -57,6 +57,12 @@
 		buf_array[index++] = val;                  \
 	} while (0)
 
+static uint32_t bus_error_irq_mask[3] = {
+	0x7800,
+	0x0000,
+	0x00C0,
+};
+
 enum cam_vfe_bus_packer_format {
 	PACKER_FMT_PLAIN_128                   = 0x0,
 	PACKER_FMT_PLAIN_8                     = 0x1,
@@ -197,6 +203,7 @@
 	struct list_head                    used_comp_grp;
 
 	uint32_t                            irq_handle;
+	uint32_t                            error_irq_handle;
 };
 
 static int cam_vfe_bus_process_cmd(
@@ -209,7 +216,7 @@
 {
 	if (list_empty(&common_data->free_payload_list)) {
 		*evt_payload = NULL;
-		CAM_ERR(CAM_ISP, "No free payload");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
 		return -ENODEV;
 	}
 
@@ -256,7 +263,7 @@
 		CAM_ERR(CAM_ISP, "No payload to put");
 		return -EINVAL;
 	}
-
+	(*evt_payload)->error_type = 0;
 	ife_irq_regs = (*evt_payload)->irq_reg_val;
 	status_reg0 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
 	status_reg1 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
@@ -463,6 +470,7 @@
 		case CAM_FORMAT_UBWC_NV12_4R:
 		case CAM_FORMAT_UBWC_TP10:
 		case CAM_FORMAT_UBWC_P010:
+		case CAM_FORMAT_PLAIN16_10:
 			return 2;
 		default:
 			break;
@@ -738,10 +746,12 @@
 }
 
 static enum cam_vfe_bus_packer_format
-	cam_vfe_bus_get_packer_fmt(uint32_t out_fmt)
+	cam_vfe_bus_get_packer_fmt(uint32_t out_fmt, int wm_index)
 {
 	switch (out_fmt) {
 	case CAM_FORMAT_NV21:
+		if (wm_index == 4 || wm_index == 6)
+			return PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN;
 	case CAM_FORMAT_NV12:
 	case CAM_FORMAT_UBWC_NV12:
 	case CAM_FORMAT_UBWC_NV12_4R:
@@ -817,7 +827,8 @@
 	rsrc_data->irq_enabled = subscribe_irq;
 	rsrc_data->ctx = ctx;
 	rsrc_data->format = out_port_info->format;
-	rsrc_data->pack_fmt = cam_vfe_bus_get_packer_fmt(rsrc_data->format);
+	rsrc_data->pack_fmt = cam_vfe_bus_get_packer_fmt(rsrc_data->format,
+		wm_idx);
 
 	rsrc_data->width = out_port_info->width;
 	rsrc_data->height = out_port_info->height;
@@ -951,6 +962,19 @@
 				return -EINVAL;
 			}
 			break;
+		case CAM_FORMAT_PLAIN16_10:
+			switch (plane) {
+			case PLANE_C:
+				rsrc_data->height /= 2;
+				break;
+			case PLANE_Y:
+				break;
+			default:
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+				return -EINVAL;
+			}
+			rsrc_data->width *= 2;
+			break;
 		default:
 			CAM_ERR(CAM_ISP, "Invalid format %d",
 				rsrc_data->format);
@@ -968,16 +992,30 @@
 		rsrc_data->width = rsrc_data->width * 2;
 		rsrc_data->stride = rsrc_data->width;
 		rsrc_data->en_cfg = 0x1;
+
+		/* LSB aligned */
+		rsrc_data->pack_fmt |= 0x10;
 	}  else {
 		/* Write master 5-6 DS ports, 10 PDAF */
+		uint32_t align_width;
 		rsrc_data->width = rsrc_data->width * 4;
 		rsrc_data->height = rsrc_data->height / 2;
 		rsrc_data->en_cfg = 0x1;
+		CAM_DBG(CAM_ISP, "before width %d", rsrc_data->width);
+		align_width = ALIGNUP(rsrc_data->width, 16);
+		if (align_width != rsrc_data->width) {
+			CAM_WARN(CAM_ISP,
+				"Override width %u with expected %u",
+				rsrc_data->width, align_width);
+			rsrc_data->width = align_width;
+		}
 	}
 
 	*client_done_mask = (1 << wm_idx);
 	*wm_res = wm_res_local;
 
+	CAM_DBG(CAM_ISP, "WM %d: processed width %d, processed  height %d",
+		rsrc_data->index, rsrc_data->width, rsrc_data->height);
 	return 0;
 }
 
@@ -1108,6 +1146,8 @@
 		common_data->mem_base + common_data->common_reg->sw_reset);
 
 	wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	rsrc_data->init_cfg_done = false;
+	rsrc_data->hfr_cfg_done = false;
 
 	return rc;
 }
@@ -2115,6 +2155,7 @@
 
 	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
 		vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_DBG(CAM_ISP, "vfe_out res_state is %d", vfe_out->res_state);
 		return rc;
 	}
 
@@ -2258,38 +2299,57 @@
 		bus_priv->common_data.bus_irq_controller);
 }
 
-static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
+static int cam_vfe_bus_error_irq_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	int i = 0;
+	struct cam_vfe_bus_ver2_priv  *bus_priv = th_payload->handler_priv;
+
+	CAM_ERR_RATE_LIMIT(CAM_ISP, "Bus Err IRQ");
+	for (i = 0; i < th_payload->num_registers; i++) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ_Status%d: 0x%x", i,
+			th_payload->evt_status_arr[i]);
+	}
+	cam_irq_controller_disable_irq(bus_priv->common_data.bus_irq_controller,
+		bus_priv->error_irq_handle);
+
+	/* Returning error stops from enqueuing bottom half */
+	return -EFAULT;
+}
+
+static int cam_vfe_bus_update_wm(void *priv, void *cmd_args,
 	uint32_t arg_size)
 {
 	struct cam_vfe_bus_ver2_priv             *bus_priv;
-	struct cam_isp_hw_get_buf_update         *update_buf;
+	struct cam_isp_hw_get_cmd_update         *update_buf;
 	struct cam_buf_io_cfg                    *io_cfg;
 	struct cam_vfe_bus_ver2_vfe_out_data     *vfe_out_data = NULL;
 	struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
 	uint32_t *reg_val_pair;
 	uint32_t  i, j, size = 0;
-	uint32_t  frame_inc = 0;
+	uint32_t  frame_inc = 0, ubwc_bw_limit = 0, camera_hw_version, val;
+	int rc = 0;
 
 	bus_priv = (struct cam_vfe_bus_ver2_priv  *) priv;
-	update_buf =  (struct cam_isp_hw_get_buf_update *) cmd_args;
+	update_buf =  (struct cam_isp_hw_get_cmd_update *) cmd_args;
 
 	vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
-		update_buf->cdm.res->res_priv;
+		update_buf->res->res_priv;
 
 	if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
 		CAM_ERR(CAM_ISP, "Failed! Invalid data");
 		return -EINVAL;
 	}
 
-	if (update_buf->num_buf != vfe_out_data->num_wm) {
+	if (update_buf->wm_update->num_buf != vfe_out_data->num_wm) {
 		CAM_ERR(CAM_ISP,
 			"Failed! Invalid number buffers:%d required:%d",
-			update_buf->num_buf, vfe_out_data->num_wm);
+			update_buf->wm_update->num_buf, vfe_out_data->num_wm);
 		return -EINVAL;
 	}
 
 	reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
-	io_cfg = update_buf->io_cfg;
+	io_cfg = update_buf->wm_update->io_cfg;
 
 	for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
 		if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
@@ -2306,17 +2366,27 @@
 			wm_data->hw_regs->buffer_width_cfg,
 			wm_data->width);
 		CAM_DBG(CAM_ISP, "WM %d image width 0x%x",
-			wm_data->index, wm_data->width);
+			wm_data->index, reg_val_pair[j-1]);
 
 		/* For initial configuration program all bus registers */
-		if ((wm_data->stride != io_cfg->planes[i].plane_stride ||
+		val = io_cfg->planes[i].plane_stride;
+		CAM_DBG(CAM_ISP, "before stride %d", val);
+		val = ALIGNUP(val, 16);
+		if (val != io_cfg->planes[i].plane_stride &&
+			val != wm_data->stride)
+			CAM_WARN(CAM_ISP,
+				"Warning stride %u expected %u",
+				io_cfg->planes[i].plane_stride,
+				val);
+
+		if ((wm_data->stride != val ||
 			!wm_data->init_cfg_done) && (wm_data->index >= 3)) {
 			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
 				wm_data->hw_regs->stride,
 				io_cfg->planes[i].plane_stride);
-			wm_data->stride = io_cfg->planes[i].plane_stride;
+			wm_data->stride = val;
 			CAM_DBG(CAM_ISP, "WM %d image stride 0x%x",
-				wm_data->index, wm_data->stride);
+				wm_data->index, reg_val_pair[j-1]);
 		}
 
 		if (wm_data->framedrop_pattern != io_cfg->framedrop_pattern ||
@@ -2326,8 +2396,7 @@
 				io_cfg->framedrop_pattern);
 			wm_data->framedrop_pattern = io_cfg->framedrop_pattern;
 			CAM_DBG(CAM_ISP, "WM %d framedrop pattern 0x%x",
-				wm_data->index,
-				wm_data->framedrop_pattern);
+				wm_data->index, reg_val_pair[j-1]);
 		}
 
 
@@ -2338,8 +2407,7 @@
 				io_cfg->framedrop_period);
 			wm_data->framedrop_period = io_cfg->framedrop_period;
 			CAM_DBG(CAM_ISP, "WM %d framedrop period 0x%x",
-				wm_data->index,
-				wm_data->framedrop_period);
+				wm_data->index, reg_val_pair[j-1]);
 		}
 
 		if (wm_data->irq_subsample_period != io_cfg->subsample_period
@@ -2350,8 +2418,7 @@
 			wm_data->irq_subsample_period =
 				io_cfg->subsample_period;
 			CAM_DBG(CAM_ISP, "WM %d irq subsample period 0x%x",
-				wm_data->index,
-				wm_data->irq_subsample_period);
+				wm_data->index, reg_val_pair[j-1]);
 		}
 
 		if (wm_data->irq_subsample_pattern != io_cfg->subsample_pattern
@@ -2362,8 +2429,7 @@
 			wm_data->irq_subsample_pattern =
 				io_cfg->subsample_pattern;
 			CAM_DBG(CAM_ISP, "WM %d irq subsample pattern 0x%x",
-				wm_data->index,
-				wm_data->irq_subsample_pattern);
+				wm_data->index, reg_val_pair[j-1]);
 		}
 
 		if (wm_data->en_ubwc) {
@@ -2381,7 +2447,7 @@
 				wm_data->packer_cfg =
 					io_cfg->planes[i].packer_config;
 				CAM_DBG(CAM_ISP, "WM %d packer cfg 0x%x",
-					wm_data->index, wm_data->packer_cfg);
+					wm_data->index, reg_val_pair[j-1]);
 			}
 
 			if (wm_data->is_dual) {
@@ -2397,21 +2463,21 @@
 				wm_data->tile_cfg =
 					io_cfg->planes[i].tile_config;
 				CAM_DBG(CAM_ISP, "WM %d tile cfg 0x%x",
-					wm_data->index, wm_data->tile_cfg);
+					wm_data->index, reg_val_pair[j-1]);
 			}
 
 			if (wm_data->is_dual) {
 				if ((wm_data->h_init != wm_data->offset) ||
 					!wm_data->init_cfg_done) {
-				/*
-				 * For dual ife h init value need to take from
-				 * offset.Striping config update offset value
-				 */
+					/*
+					 * For dual ife h init value need to
+					 * take from offset.  Striping config
+					 * update offset value.
+					 */
 					CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair,
 						j,
 						wm_data->hw_regs->ubwc_regs->
-						h_init,
-						wm_data->offset);
+						h_init, wm_data->offset);
 					wm_data->h_init = wm_data->offset;
 				}
 			} else if (wm_data->h_init !=
@@ -2422,7 +2488,7 @@
 					io_cfg->planes[i].h_init);
 				wm_data->h_init = io_cfg->planes[i].h_init;
 				CAM_DBG(CAM_ISP, "WM %d h_init 0x%x",
-					wm_data->index, wm_data->h_init);
+					wm_data->index, reg_val_pair[j-1]);
 			}
 
 			if (wm_data->v_init != io_cfg->planes[i].v_init ||
@@ -2432,7 +2498,7 @@
 					io_cfg->planes[i].v_init);
 				wm_data->v_init = io_cfg->planes[i].v_init;
 				CAM_DBG(CAM_ISP, "WM %d v_init 0x%x",
-					wm_data->index, wm_data->v_init);
+					wm_data->index, reg_val_pair[j-1]);
 			}
 
 			if (wm_data->ubwc_meta_stride !=
@@ -2445,8 +2511,7 @@
 				wm_data->ubwc_meta_stride =
 					io_cfg->planes[i].meta_stride;
 				CAM_DBG(CAM_ISP, "WM %d meta stride 0x%x",
-					wm_data->index,
-					wm_data->ubwc_meta_stride);
+					wm_data->index, reg_val_pair[j-1]);
 			}
 
 			if (wm_data->ubwc_mode_cfg !=
@@ -2458,7 +2523,7 @@
 				wm_data->ubwc_mode_cfg =
 					io_cfg->planes[i].mode_config;
 				CAM_DBG(CAM_ISP, "WM %d ubwc mode cfg 0x%x",
-					wm_data->index, wm_data->ubwc_mode_cfg);
+					wm_data->index, reg_val_pair[j-1]);
 			}
 
 			if (wm_data->ubwc_meta_offset !=
@@ -2471,30 +2536,54 @@
 				wm_data->ubwc_meta_offset =
 					io_cfg->planes[i].meta_offset;
 				CAM_DBG(CAM_ISP, "WM %d ubwc meta offset 0x%x",
-					wm_data->index,
-					wm_data->ubwc_meta_offset);
+					wm_data->index, reg_val_pair[j-1]);
 			}
 
 			/* UBWC meta address */
 			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
 				wm_data->hw_regs->ubwc_regs->meta_addr,
-				update_buf->image_buf[i]);
+				update_buf->wm_update->image_buf[i]);
 			CAM_DBG(CAM_ISP, "WM %d ubwc meta addr 0x%llx",
-				wm_data->index, update_buf->image_buf[i]);
+				wm_data->index,
+				update_buf->wm_update->image_buf[i]);
+
+			/* Enable UBWC bandwidth limit if required */
+			rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+			if (camera_hw_version == CAM_CPAS_TITAN_170_V110
+					&& !rc) {
+				switch (wm_data->format) {
+				case CAM_FORMAT_UBWC_TP10:
+					ubwc_bw_limit = 0x8 | BIT(0);
+					break;
+				case CAM_FORMAT_UBWC_NV12_4R:
+					ubwc_bw_limit = 0xB | BIT(0);
+					break;
+				default:
+					ubwc_bw_limit = 0;
+					break;
+				}
+			}
+
+			if (ubwc_bw_limit) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->bw_limit,
+					ubwc_bw_limit);
+				CAM_DBG(CAM_ISP, "WM %d ubwc bw limit 0x%x",
+					wm_data->index, ubwc_bw_limit);
+			}
 		}
 
 		/* WM Image address */
 		if (wm_data->en_ubwc)
 			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
 				wm_data->hw_regs->image_addr,
-				(update_buf->image_buf[i] +
+				(update_buf->wm_update->image_buf[i] +
 				io_cfg->planes[i].meta_size));
 		else
 			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
-			wm_data->hw_regs->image_addr,
-			update_buf->image_buf[i] +
-			wm_data->offset);
-
+				wm_data->hw_regs->image_addr,
+				update_buf->wm_update->image_buf[i] +
+				wm_data->offset);
 		CAM_DBG(CAM_ISP, "WM %d image address 0x%x",
 			wm_data->index, reg_val_pair[j-1]);
 
@@ -2503,7 +2592,7 @@
 		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
 			wm_data->hw_regs->frame_inc, frame_inc);
 		CAM_DBG(CAM_ISP, "WM %d frame_inc %d",
-			wm_data->index, frame_inc);
+			wm_data->index, reg_val_pair[j-1]);
 
 
 		/* enable the WM */
@@ -2519,18 +2608,18 @@
 	size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
 
 	/* cdm util returns dwords, need to convert to bytes */
-	if ((size * 4) > update_buf->cdm.size) {
+	if ((size * 4) > update_buf->cmd.size) {
 		CAM_ERR(CAM_ISP,
 			"Failed! Buf size:%d insufficient, expected size:%d",
-			update_buf->cdm.size, size);
+			update_buf->cmd.size, size);
 		return -ENOMEM;
 	}
 
 	vfe_out_data->cdm_util_ops->cdm_write_regrandom(
-		update_buf->cdm.cmd_buf_addr, j/2, reg_val_pair);
+		update_buf->cmd.cmd_buf_addr, j/2, reg_val_pair);
 
 	/* cdm util returns dwords, need to convert to bytes */
-	update_buf->cdm.used_bytes = size * 4;
+	update_buf->cmd.used_bytes = size * 4;
 
 	return 0;
 }
@@ -2539,7 +2628,7 @@
 	uint32_t arg_size)
 {
 	struct cam_vfe_bus_ver2_priv             *bus_priv;
-	struct cam_isp_hw_get_hfr_update         *update_hfr;
+	struct cam_isp_hw_get_cmd_update         *update_hfr;
 	struct cam_vfe_bus_ver2_vfe_out_data     *vfe_out_data = NULL;
 	struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
 	struct cam_isp_port_hfr_config           *hfr_cfg = NULL;
@@ -2547,10 +2636,10 @@
 	uint32_t  i, j, size = 0;
 
 	bus_priv = (struct cam_vfe_bus_ver2_priv  *) priv;
-	update_hfr =  (struct cam_isp_hw_get_hfr_update *) cmd_args;
+	update_hfr =  (struct cam_isp_hw_get_cmd_update *) cmd_args;
 
 	vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
-		update_hfr->cdm.res->res_priv;
+		update_hfr->res->res_priv;
 
 	if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
 		CAM_ERR(CAM_ISP, "Failed! Invalid data");
@@ -2558,7 +2647,7 @@
 	}
 
 	reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
-	hfr_cfg = update_hfr->io_hfr_cfg;
+	hfr_cfg = update_hfr->hfr_update;
 
 	for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
 		if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
@@ -2621,18 +2710,18 @@
 	size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
 
 	/* cdm util returns dwords, need to convert to bytes */
-	if ((size * 4) > update_hfr->cdm.size) {
+	if ((size * 4) > update_hfr->cmd.size) {
 		CAM_ERR(CAM_ISP,
 			"Failed! Buf size:%d insufficient, expected size:%d",
-			update_hfr->cdm.size, size);
+			update_hfr->cmd.size, size);
 		return -ENOMEM;
 	}
 
 	vfe_out_data->cdm_util_ops->cdm_write_regrandom(
-		update_hfr->cdm.cmd_buf_addr, j/2, reg_val_pair);
+		update_hfr->cmd.cmd_buf_addr, j/2, reg_val_pair);
 
 	/* cdm util returns dwords, need to convert to bytes */
-	update_hfr->cdm.used_bytes = size * 4;
+	update_hfr->cmd.used_bytes = size * 4;
 
 	return 0;
 }
@@ -2721,6 +2810,21 @@
 		return -EFAULT;
 	}
 
+	bus_priv->error_irq_handle = cam_irq_controller_subscribe_irq(
+		bus_priv->common_data.bus_irq_controller,
+		CAM_IRQ_PRIORITY_0,
+		bus_error_irq_mask,
+		bus_priv,
+		cam_vfe_bus_error_irq_top_half,
+		NULL,
+		NULL,
+		NULL);
+
+	if (bus_priv->irq_handle <= 0) {
+		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+		return -EFAULT;
+	}
+
 	/* BUS_WR_INPUT_IF_ADDR_SYNC_FRAME_HEADER */
 	cam_io_w_mb(0x0, bus_priv->common_data.mem_base +
 		bus_priv->common_data.common_reg->addr_sync_frame_hdr);
@@ -2745,17 +2849,28 @@
 	struct cam_vfe_bus_ver2_priv    *bus_priv = hw_priv;
 	int                              rc;
 
-	if (!bus_priv || (bus_priv->irq_handle <= 0)) {
+	if (!bus_priv || (bus_priv->irq_handle <= 0) ||
+		(bus_priv->error_irq_handle <= 0)) {
 		CAM_ERR(CAM_ISP, "Error: Invalid args");
 		return -EINVAL;
 	}
 
 	rc = cam_irq_controller_unsubscribe_irq(
+		bus_priv->common_data.bus_irq_controller,
+		bus_priv->error_irq_handle);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Failed to unsubscribe error irq rc=%d", rc);
+
+	bus_priv->error_irq_handle = 0;
+
+	rc = cam_irq_controller_unsubscribe_irq(
 		bus_priv->common_data.vfe_irq_controller,
 		bus_priv->irq_handle);
 	if (rc)
 		CAM_ERR(CAM_ISP, "Failed to unsubscribe irq rc=%d", rc);
 
+	bus_priv->irq_handle = 0;
+
 	return rc;
 }
 
@@ -2777,16 +2892,16 @@
 	}
 
 	switch (cmd_type) {
-	case CAM_VFE_HW_CMD_GET_BUF_UPDATE:
-		rc = cam_vfe_bus_update_buf(priv, cmd_args, arg_size);
+	case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
+		rc = cam_vfe_bus_update_wm(priv, cmd_args, arg_size);
 		break;
-	case CAM_VFE_HW_CMD_GET_HFR_UPDATE:
+	case CAM_ISP_HW_CMD_GET_HFR_UPDATE:
 		rc = cam_vfe_bus_update_hfr(priv, cmd_args, arg_size);
 		break;
-	case CAM_VFE_HW_CMD_GET_SECURE_MODE:
+	case CAM_ISP_HW_CMD_GET_SECURE_MODE:
 		rc = cam_vfe_bus_get_secure_mode(priv, cmd_args, arg_size);
 		break;
-	case CAM_VFE_HW_CMD_STRIPE_UPDATE:
+	case CAM_ISP_HW_CMD_STRIPE_UPDATE:
 		rc = cam_vfe_bus_update_stripe_cfg(priv, cmd_args, arg_size);
 		break;
 	default:
@@ -3006,4 +3121,3 @@
 
 	return rc;
 }
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
index ed7d5fe..5a12f74 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
@@ -98,6 +98,7 @@
 	uint32_t meta_offset;
 	uint32_t meta_stride;
 	uint32_t mode_cfg;
+	uint32_t bw_limit;
 };
 
 /*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
index 0a94746..ac8b497 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
@@ -4,6 +4,7 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index e81a9f2..9848454 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -16,6 +16,7 @@
 #include "cam_isp_hw_mgr_intf.h"
 #include "cam_isp_hw.h"
 #include "cam_vfe_hw_intf.h"
+#include "cam_vfe_soc.h"
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver2.h"
 #include "cam_vfe_camif_ver2.h"
@@ -28,8 +29,10 @@
 	struct cam_vfe_camif_ver2_reg               *camif_reg;
 	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
 	struct cam_vfe_camif_reg_data               *reg_data;
+	struct cam_hw_soc_info                      *soc_info;
 
 	enum cam_isp_hw_sync_mode          sync_mode;
+	uint32_t                           dsp_mode;
 	uint32_t                           pix_pattern;
 	uint32_t                           first_pixel;
 	uint32_t                           first_line;
@@ -66,11 +69,11 @@
 {
 	uint32_t                          size = 0;
 	uint32_t                          reg_val_pair[2];
-	struct cam_isp_hw_get_cdm_args   *cdm_args = cmd_args;
+	struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args;
 	struct cam_cdm_utils_ops         *cdm_util_ops = NULL;
 	struct cam_vfe_mux_camif_data    *rsrc_data = NULL;
 
-	if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
+	if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
 		CAM_ERR(CAM_ISP, "Invalid cmd size");
 		return -EINVAL;
 	}
@@ -89,9 +92,9 @@
 
 	size = cdm_util_ops->cdm_required_size_reg_random(1);
 	/* since cdm returns dwords, we need to convert it into bytes */
-	if ((size * 4) > cdm_args->size) {
+	if ((size * 4) > cdm_args->cmd.size) {
 		CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
-			cdm_args->size, size);
+			cdm_args->cmd.size, size);
 		return -EINVAL;
 	}
 
@@ -101,10 +104,10 @@
 	CAM_DBG(CAM_ISP, "CAMIF reg_update_cmd %x offset %x",
 		reg_val_pair[1], reg_val_pair[0]);
 
-	cdm_util_ops->cdm_write_regrandom(cdm_args->cmd_buf_addr,
+	cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr,
 		1, reg_val_pair);
 
-	cdm_args->used_bytes = size * 4;
+	cdm_args->cmd.used_bytes = size * 4;
 
 	return 0;
 }
@@ -113,8 +116,8 @@
 	struct cam_isp_resource_node  *camif_res,
 	void                          *acquire_param)
 {
-	struct cam_vfe_mux_camif_data      *camif_data;
-	struct cam_vfe_acquire_args        *acquire_data;
+	struct cam_vfe_mux_camif_data    *camif_data;
+	struct cam_vfe_acquire_args      *acquire_data;
 
 	int rc = 0;
 
@@ -128,6 +131,7 @@
 
 	camif_data->sync_mode   = acquire_data->vfe_in.sync_mode;
 	camif_data->pix_pattern = acquire_data->vfe_in.in_port->test_pattern;
+	camif_data->dsp_mode    = acquire_data->vfe_in.in_port->dsp_mode;
 	camif_data->first_pixel = acquire_data->vfe_in.in_port->left_start;
 	camif_data->last_pixel  = acquire_data->vfe_in.in_port->left_stop;
 	camif_data->first_line  = acquire_data->vfe_in.in_port->line_start;
@@ -136,6 +140,61 @@
 	return rc;
 }
 
+static int cam_vfe_camif_resource_init(
+	struct cam_isp_resource_node        *camif_res,
+	void *init_args, uint32_t arg_size)
+{
+	struct cam_vfe_mux_camif_data    *camif_data;
+	struct cam_hw_soc_info           *soc_info;
+	int rc = 0;
+
+	if (!camif_res) {
+		CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+		return -EINVAL;
+	}
+
+	camif_data   = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+
+	soc_info = camif_data->soc_info;
+
+	if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+		(camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+		rc = cam_vfe_soc_enable_clk(soc_info, CAM_VFE_DSP_CLK_NAME);
+		if (rc)
+			CAM_ERR(CAM_ISP, "failed to enable dsp clk");
+	}
+
+	return rc;
+}
+
+static int cam_vfe_camif_resource_deinit(
+	struct cam_isp_resource_node        *camif_res,
+	void *init_args, uint32_t arg_size)
+{
+	struct cam_vfe_mux_camif_data    *camif_data;
+	struct cam_hw_soc_info           *soc_info;
+	int rc = 0;
+
+	if (!camif_res) {
+		CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+		return -EINVAL;
+	}
+
+	camif_data   = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+
+	soc_info = camif_data->soc_info;
+
+	if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+		(camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+		rc = cam_vfe_soc_disable_clk(soc_info, CAM_VFE_DSP_CLK_NAME);
+		if (rc)
+			CAM_ERR(CAM_ISP, "failed to disable dsp clk");
+	}
+
+	return rc;
+
+}
+
 static int cam_vfe_camif_resource_start(
 	struct cam_isp_resource_node        *camif_res)
 {
@@ -161,6 +220,15 @@
 	if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
 		val |= (1 << rsrc_data->reg_data->extern_reg_update_shift);
 
+	if ((rsrc_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+		(rsrc_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+		/* DSP mode reg val is CAM_ISP_DSP_MODE - 1 */
+		val |= (((rsrc_data->dsp_mode - 1) &
+			rsrc_data->reg_data->dsp_mode_mask) <<
+			rsrc_data->reg_data->dsp_mode_shift);
+		val |= (0x1 << rsrc_data->reg_data->dsp_en_shift);
+	}
+
 	cam_io_w_mb(val, rsrc_data->mem_base + rsrc_data->common_reg->core_cfg);
 
 	CAM_DBG(CAM_ISP, "hw id:%d core_cfg val:%d", camif_res->hw_intf->hw_idx,
@@ -194,6 +262,7 @@
 	struct cam_vfe_mux_camif_data       *camif_priv;
 	struct cam_vfe_camif_ver2_reg       *camif_reg;
 	int rc = 0;
+	uint32_t val = 0;
 
 	if (!camif_res) {
 		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -207,6 +276,15 @@
 	camif_priv = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
 	camif_reg = camif_priv->camif_reg;
 
+	if ((camif_priv->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+		(camif_priv->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+		val = cam_io_r_mb(camif_priv->mem_base +
+				camif_priv->common_reg->core_cfg);
+		val &= (~(1 << camif_priv->reg_data->dsp_en_shift));
+		cam_io_w_mb(val, camif_priv->mem_base +
+			camif_priv->common_reg->core_cfg);
+	}
+
 	if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
 		camif_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 
@@ -224,7 +302,7 @@
 	}
 
 	switch (cmd_type) {
-	case CAM_VFE_HW_CMD_GET_REG_UPDATE:
+	case CAM_ISP_HW_CMD_GET_REG_UPDATE:
 		rc = cam_vfe_camif_get_reg_update(rsrc_node, cmd_args,
 			arg_size);
 		break;
@@ -251,6 +329,7 @@
 	struct cam_vfe_mux_camif_data        *camif_priv;
 	struct cam_vfe_top_irq_evt_payload   *payload;
 	uint32_t                              irq_status0;
+	uint32_t                              irq_status1;
 
 	if (!handler_priv || !evt_payload_priv)
 		return ret;
@@ -259,6 +338,7 @@
 	camif_priv = camif_node->res_priv;
 	payload = evt_payload_priv;
 	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+	irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
 
 	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
 	CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
@@ -289,6 +369,15 @@
 			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 		}
 		break;
+	case CAM_ISP_HW_EVENT_ERROR:
+		if (irq_status1 & camif_priv->reg_data->error_irq_mask1) {
+			CAM_DBG(CAM_ISP, "Received ERROR\n");
+			ret = CAM_ISP_HW_ERROR_OVERFLOW;
+			cam_vfe_put_evt_payload(payload->core_info, &payload);
+		} else {
+			ret = CAM_ISP_HW_ERROR_NONE;
+		}
+		break;
 	default:
 		break;
 	}
@@ -315,14 +404,17 @@
 
 	camif_node->res_priv = camif_priv;
 
-	camif_priv->mem_base   = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
-	camif_priv->camif_reg  = camif_info->camif_reg;
-	camif_priv->common_reg = camif_info->common_reg;
-	camif_priv->reg_data   = camif_info->reg_data;
-	camif_priv->hw_intf    = hw_intf;
+	camif_priv->mem_base    = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
+	camif_priv->camif_reg   = camif_info->camif_reg;
+	camif_priv->common_reg  = camif_info->common_reg;
+	camif_priv->reg_data    = camif_info->reg_data;
+	camif_priv->hw_intf     = hw_intf;
+	camif_priv->soc_info    = soc_info;
 
-	camif_node->start = cam_vfe_camif_resource_start;
-	camif_node->stop  = cam_vfe_camif_resource_stop;
+	camif_node->init    = cam_vfe_camif_resource_init;
+	camif_node->deinit  = cam_vfe_camif_resource_deinit;
+	camif_node->start   = cam_vfe_camif_resource_start;
+	camif_node->stop    = cam_vfe_camif_resource_stop;
 	camif_node->process_cmd = cam_vfe_camif_process_cmd;
 	camif_node->top_half_handler = cam_vfe_camif_handle_irq_top_half;
 	camif_node->bottom_half_handler = cam_vfe_camif_handle_irq_bottom_half;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
index 847b7d5..4a73bd7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
@@ -50,12 +50,19 @@
 	uint32_t     pixel_pattern_shift;
 	uint32_t     pixel_pattern_mask;
 
+	uint32_t     dsp_mode_shift;
+	uint32_t     dsp_mode_mask;
+	uint32_t     dsp_en_shift;
+	uint32_t     dsp_en_mask;
+
 	uint32_t     reg_update_cmd_data;
 	uint32_t     epoch_line_cfg;
 	uint32_t     sof_irq_mask;
 	uint32_t     epoch0_irq_mask;
 	uint32_t     reg_update_irq_mask;
 	uint32_t     eof_irq_mask;
+	uint32_t     error_irq_mask0;
+	uint32_t     error_irq_mask1;
 };
 
 struct cam_vfe_camif_ver2_hw_info {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
index 797873c..28e99f2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -35,11 +35,11 @@
 {
 	uint32_t                          size = 0;
 	uint32_t                          reg_val_pair[2];
-	struct cam_isp_hw_get_cdm_args   *cdm_args = cmd_args;
+	struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args;
 	struct cam_cdm_utils_ops         *cdm_util_ops = NULL;
 	struct cam_vfe_mux_rdi_data      *rsrc_data = NULL;
 
-	if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
+	if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
 		CAM_ERR(CAM_ISP, "Error - Invalid cmd size");
 		return -EINVAL;
 	}
@@ -57,10 +57,10 @@
 
 	size = cdm_util_ops->cdm_required_size_reg_random(1);
 	/* since cdm returns dwords, we need to convert it into bytes */
-	if ((size * 4) > cdm_args->size) {
+	if ((size * 4) > cdm_args->cmd.size) {
 		CAM_ERR(CAM_ISP,
 			"Error - buf size:%d is not sufficient, expected: %d",
-			cdm_args->size, size * 4);
+			cdm_args->cmd.size, size * 4);
 		return -EINVAL;
 	}
 
@@ -70,9 +70,9 @@
 	CAM_DBG(CAM_ISP, "RDI%d reg_update_cmd %x",
 		rdi_res->res_id - CAM_ISP_HW_VFE_IN_RDI0, reg_val_pair[1]);
 
-	cdm_util_ops->cdm_write_regrandom(cdm_args->cmd_buf_addr,
+	cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr,
 		1, reg_val_pair);
-	cdm_args->used_bytes = size * 4;
+	cdm_args->cmd.used_bytes = size * 4;
 
 	return 0;
 }
@@ -158,7 +158,7 @@
 	}
 
 	switch (cmd_type) {
-	case CAM_VFE_HW_CMD_GET_REG_UPDATE:
+	case CAM_ISP_HW_CMD_GET_REG_UPDATE:
 		rc = cam_vfe_rdi_get_reg_update(rsrc_node, cmd_args,
 			arg_size);
 		break;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index f87953d..7baac45 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -18,6 +18,9 @@
 #include "cam_vfe_top_ver2.h"
 #include "cam_debug_util.h"
 
+#define CAM_VFE_HW_RESET_HW_AND_REG_VAL   0x00003F9F
+#define CAM_VFE_HW_RESET_HW_VAL           0x00003F87
+
 struct cam_vfe_top_ver2_common_data {
 	struct cam_hw_soc_info                     *soc_info;
 	struct cam_hw_intf                         *hw_intf;
@@ -35,10 +38,10 @@
 {
 	uint32_t                          size = 0;
 	uint32_t                          mem_base = 0;
-	struct cam_isp_hw_get_cdm_args   *cdm_args  = cmd_args;
+	struct cam_isp_hw_get_cmd_update *cdm_args  = cmd_args;
 	struct cam_cdm_utils_ops         *cdm_util_ops = NULL;
 
-	if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
+	if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
 		CAM_ERR(CAM_ISP, "Error! Invalid cmd size");
 		return -EINVAL;
 	}
@@ -59,9 +62,9 @@
 
 	size = cdm_util_ops->cdm_required_size_changebase();
 	/* since cdm returns dwords, we need to convert it into bytes */
-	if ((size * 4) > cdm_args->size) {
+	if ((size * 4) > cdm_args->cmd.size) {
 		CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
-			cdm_args->size, size);
+			cdm_args->cmd.size, size);
 		return -EINVAL;
 	}
 
@@ -70,8 +73,9 @@
 	CAM_DBG(CAM_ISP, "core %d mem_base 0x%x",
 		top_priv->common_data.soc_info->index, mem_base);
 
-	cdm_util_ops->cdm_write_changebase(cdm_args->cmd_buf_addr, mem_base);
-	cdm_args->used_bytes = (size * 4);
+	cdm_util_ops->cdm_write_changebase(
+	cdm_args->cmd.cmd_buf_addr, mem_base);
+	cdm_args->cmd.used_bytes = (size * 4);
 
 	return 0;
 }
@@ -80,11 +84,11 @@
 	struct cam_vfe_top_ver2_priv *top_priv,
 	void *cmd_args, uint32_t arg_size)
 {
-	struct cam_isp_hw_get_cdm_args   *cdm_args = cmd_args;
+	struct cam_isp_hw_get_cmd_update  *cmd_update = cmd_args;
 
-	if (cdm_args->res->process_cmd)
-		return cdm_args->res->process_cmd(cdm_args->res,
-			CAM_VFE_HW_CMD_GET_REG_UPDATE, cmd_args, arg_size);
+	if (cmd_update->res->process_cmd)
+		return cmd_update->res->process_cmd(cmd_update->res,
+			CAM_ISP_HW_CMD_GET_REG_UPDATE, cmd_args, arg_size);
 
 	return -EINVAL;
 }
@@ -107,12 +111,24 @@
 	struct cam_vfe_top_ver2_priv   *top_priv = device_priv;
 	struct cam_hw_soc_info         *soc_info = NULL;
 	struct cam_vfe_top_ver2_reg_offset_common *reg_common = NULL;
+	uint32_t *reset_reg_args = reset_core_args;
+	uint32_t reset_reg_val;
 
-	if (!top_priv) {
+	if (!top_priv || !reset_reg_args) {
 		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 
+	switch (*reset_reg_args) {
+	case CAM_VFE_HW_RESET_HW_AND_REG:
+		reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+		break;
+	default:
+		reset_reg_val = CAM_VFE_HW_RESET_HW_VAL;
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "reset reg value: %x", reset_reg_val);
 	soc_info = top_priv->common_data.soc_info;
 	reg_common = top_priv->common_data.common_reg;
 
@@ -121,7 +137,7 @@
 		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) + 0x5C);
 
 	/* Reset HW */
-	cam_io_w_mb(0x00003F9F,
+	cam_io_w_mb(reset_reg_val,
 		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) +
 		reg_common->global_reset_cmd);
 
@@ -280,10 +296,10 @@
 	top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
 
 	switch (cmd_type) {
-	case CAM_VFE_HW_CMD_GET_CHANGE_BASE:
+	case CAM_ISP_HW_CMD_GET_CHANGE_BASE:
 		rc = cam_vfe_top_mux_get_base(top_priv, cmd_args, arg_size);
 		break;
-	case CAM_VFE_HW_CMD_GET_REG_UPDATE:
+	case CAM_ISP_HW_CMD_GET_REG_UPDATE:
 		rc = cam_vfe_top_mux_get_reg_update(top_priv, cmd_args,
 			arg_size);
 		break;
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
index a299179..6fcd7f6 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
@@ -21,6 +21,8 @@
 #include "cam_context_utils.h"
 #include "cam_debug_util.h"
 
+static const char jpeg_dev_name[] = "jpeg";
+
 static int __cam_jpeg_ctx_acquire_dev_in_available(struct cam_context *ctx,
 	struct cam_acquire_dev_cmd *cmd)
 {
@@ -109,8 +111,8 @@
 	for (i = 0; i < CAM_CTX_REQ_MAX; i++)
 		ctx->req_base[i].req_priv = ctx;
 
-	rc = cam_context_init(ctx_base, NULL, hw_intf, ctx->req_base,
-		CAM_CTX_REQ_MAX);
+	rc = cam_context_init(ctx_base, jpeg_dev_name, NULL, hw_intf,
+		ctx->req_base, CAM_CTX_REQ_MAX);
 	if (rc) {
 		CAM_ERR(CAM_JPEG, "Camera Context Base init failed");
 		goto err;
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index 35c2717..df95100 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -312,8 +312,6 @@
 	struct cam_jpeg_set_irq_cb irq_cb;
 	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
 	struct cam_hw_done_event_data buf_data;
-	uint32_t size = 0;
-	uint32_t mem_cam_base = 0;
 
 	if (!hw_mgr || !task_data) {
 		CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK",
@@ -427,35 +425,11 @@
 	cdm_cmd->cookie = 0;
 	cdm_cmd->cmd_arrary_count = 0;
 
-	/* if for backward compat */
-	if (config_args->hw_update_entries[CAM_JPEG_CHBASE].handle) {
-		rc = cam_jpeg_insert_cdm_change_base(config_args,
-			ctx_data, hw_mgr);
-		if (rc) {
-			CAM_ERR(CAM_JPEG, "insert change base failed %d", rc);
-			goto end_callcb;
-		}
-	} else {
-		mem_cam_base = hw_mgr->cdm_reg_map[dev_type][0]->
-			mem_cam_base;
-		size = hw_mgr->cdm_info[dev_type][0].cdm_ops->
-			cdm_required_size_changebase();
-		hw_mgr->cdm_info[dev_type][0].cdm_ops->
-			cdm_write_changebase(ctx_data->cmd_chbase_buf_addr,
-			hw_mgr->cdm_reg_map[dev_type][0]->mem_cam_base);
-		ctx_data->cdm_cmd_chbase->cmd_arrary_count = 1;
-		ctx_data->cdm_cmd_chbase->type =
-			CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA;
-		ctx_data->cdm_cmd_chbase->flag = false;
-		ctx_data->cdm_cmd_chbase->userdata = NULL;
-		ctx_data->cdm_cmd_chbase->cookie = 0;
-		ctx_data->cdm_cmd_chbase->cmd[0].bl_addr.kernel_iova =
-			ctx_data->cmd_chbase_buf_addr;
-		ctx_data->cdm_cmd_chbase->cmd[0].offset = 0;
-		ctx_data->cdm_cmd_chbase->cmd[0].len = size;
-		cam_cdm_submit_bls(hw_mgr->cdm_info[dev_type][0].
-			cdm_handle,
-			ctx_data->cdm_cmd_chbase);
+	rc = cam_jpeg_insert_cdm_change_base(config_args,
+		ctx_data, hw_mgr);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "insert change base failed %d", rc);
+		goto end_callcb;
 	}
 
 	CAM_DBG(CAM_JPEG, "num hw up %d", config_args->num_hw_update_entries);
@@ -659,13 +633,10 @@
 		return -EINVAL;
 	}
 
-	/* if for backward compat */
-	if (packet->kmd_cmd_buf_index != -1) {
-		rc = cam_packet_util_validate_packet(packet);
-		if (rc) {
-			CAM_ERR(CAM_JPEG, "invalid packet %d", rc);
-			return rc;
-		}
+	rc = cam_packet_util_validate_packet(packet);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "invalid packet %d", rc);
+		return rc;
 	}
 
 	if ((packet->num_cmd_buf > 5) || !packet->num_patches ||
@@ -715,16 +686,12 @@
 			i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence);
 	}
 
+
 	j = prepare_args->num_hw_update_entries;
-	/* if-else for backward compat */
-	if (packet->kmd_cmd_buf_index != -1) {
-		rc = cam_packet_util_get_kmd_buffer(packet, &kmd_buf);
-		if (rc) {
-			CAM_ERR(CAM_JPEG, "get kmd buf failed %d", rc);
-			return rc;
-		}
-	} else {
-		memset(&kmd_buf, 0x0, sizeof(kmd_buf));
+	rc = cam_packet_util_get_kmd_buffer(packet, &kmd_buf);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "get kmd buf failed %d", rc);
+		return rc;
 	}
 	/* fill kmd buf info into 1st hw update entry */
 	prepare_args->hw_update_entries[j].len =
@@ -859,6 +826,11 @@
 	ctx_data->jpeg_dev_acquire_info = jpeg_dev_acquire_info;
 	mutex_unlock(&ctx_data->ctx_mutex);
 
+	if (ctx_data->jpeg_dev_acquire_info.dev_type >=
+		CAM_JPEG_RES_TYPE_MAX) {
+		rc = -EINVAL;
+		goto acq_cdm_hdl_failed;
+	}
 	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
 	if (!hw_mgr->cdm_info[dev_type][0].ref_cnt) {
 
diff --git a/drivers/media/platform/msm/camera/cam_lrme/Makefile b/drivers/media/platform/msm/camera/cam_lrme/Makefile
new file mode 100644
index 0000000..fba4529
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += lrme_hw_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_dev.o cam_lrme_context.o
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
new file mode 100644
index 0000000..0aa5ade
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
@@ -0,0 +1,241 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_debug_util.h"
+#include "cam_lrme_context.h"
+
+static int __cam_lrme_ctx_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc = 0;
+	uint64_t ctxt_to_hw_map = (uint64_t)ctx->ctxt_to_hw_map;
+	struct cam_lrme_context *lrme_ctx = ctx->ctx_priv;
+
+	CAM_DBG(CAM_LRME, "Enter");
+
+	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to acquire");
+		return rc;
+	}
+
+	ctxt_to_hw_map |= (lrme_ctx->index << CAM_LRME_CTX_INDEX_SHIFT);
+	ctx->ctxt_to_hw_map = (void *)ctxt_to_hw_map;
+
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+static int __cam_lrme_ctx_release_dev_in_acquired(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_LRME, "Enter");
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to release");
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_AVAILABLE;
+
+	return rc;
+}
+
+static int __cam_lrme_ctx_start_dev_in_acquired(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_LRME, "Enter");
+
+	rc = cam_context_start_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to start");
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_ACTIVATED;
+
+	return rc;
+}
+
+static int __cam_lrme_ctx_config_dev_in_activated(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	CAM_DBG(CAM_LRME, "Enter");
+
+	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to config");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __cam_lrme_ctx_stop_dev_in_activated(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_LRME, "Enter");
+
+	rc = cam_context_stop_dev_to_hw(ctx);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to stop dev");
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+static int __cam_lrme_ctx_release_dev_in_activated(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_LRME, "Enter");
+
+	rc = __cam_lrme_ctx_stop_dev_in_activated(ctx, NULL);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to stop");
+		return rc;
+	}
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to release");
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_AVAILABLE;
+
+	return rc;
+}
+
+static int __cam_lrme_ctx_handle_irq_in_activated(void *context,
+	uint32_t evt_id, void *evt_data)
+{
+	int rc;
+
+	CAM_DBG(CAM_LRME, "Enter");
+
+	rc = cam_context_buf_done_from_hw(context, evt_data, evt_id);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed in buf done, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+	cam_lrme_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_lrme_ctx_acquire_dev_in_available,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_lrme_ctx_release_dev_in_acquired,
+			.start_dev = __cam_lrme_ctx_start_dev_in_acquired,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Activate */
+	{
+		.ioctl_ops = {
+			.config_dev = __cam_lrme_ctx_config_dev_in_activated,
+			.release_dev = __cam_lrme_ctx_release_dev_in_activated,
+			.stop_dev = __cam_lrme_ctx_stop_dev_in_activated,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_lrme_ctx_handle_irq_in_activated,
+	},
+};
+
+int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx,
+	struct cam_context *base_ctx,
+	struct cam_hw_mgr_intf *hw_intf,
+	uint64_t index)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_LRME, "Enter");
+
+	if (!base_ctx || !lrme_ctx) {
+		CAM_ERR(CAM_LRME, "Invalid input");
+		return -EINVAL;
+	}
+
+	memset(lrme_ctx, 0, sizeof(*lrme_ctx));
+
+	rc = cam_context_init(base_ctx, "lrme", NULL, hw_intf,
+		lrme_ctx->req_base, CAM_CTX_REQ_MAX);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to init context");
+		return rc;
+	}
+	lrme_ctx->base = base_ctx;
+	lrme_ctx->index = index;
+	base_ctx->ctx_priv = lrme_ctx;
+	base_ctx->state_machine = cam_lrme_ctx_state_machine;
+
+	return rc;
+}
+
+int cam_lrme_context_deinit(struct cam_lrme_context *lrme_ctx)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_LRME, "Enter");
+
+	if (!lrme_ctx) {
+		CAM_ERR(CAM_LRME, "No ctx to deinit");
+		return -EINVAL;
+	}
+
+	rc = cam_context_deinit(lrme_ctx->base);
+
+	memset(lrme_ctx, 0, sizeof(*lrme_ctx));
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
new file mode 100644
index 0000000..882f7ac
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_CONTEXT_H_
+#define _CAM_LRME_CONTEXT_H_
+
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_sync_api.h"
+
+#define CAM_LRME_CTX_INDEX_SHIFT 32
+
+/**
+ * struct cam_lrme_context
+ *
+ * @base      : Base context pointer for this LRME context
+ * @req_base  : List of base request for this LRME context
+ */
+struct cam_lrme_context {
+	struct cam_context         *base;
+	struct cam_ctx_request      req_base[CAM_CTX_REQ_MAX];
+	uint64_t index;
+};
+
+int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx,
+	struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf,
+	uint64_t index);
+int cam_lrme_context_deinit(struct cam_lrme_context *lrme_ctx);
+
+#endif /* _CAM_LRME_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
new file mode 100644
index 0000000..5be16ef
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
@@ -0,0 +1,233 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_lrme_context.h"
+#include "cam_lrme_hw_mgr.h"
+#include "cam_lrme_hw_mgr_intf.h"
+
+#define CAM_LRME_DEV_NAME "cam-lrme"
+
+/**
+ * struct cam_lrme_dev
+ *
+ * @sd       : Subdev information
+ * @ctx      : List of base contexts
+ * @lrme_ctx : List of LRME contexts
+ * @lock     : Mutex for LRME subdev
+ * @open_cnt : Open count of LRME subdev
+ */
+struct cam_lrme_dev {
+	struct cam_subdev        sd;
+	struct cam_context       ctx[CAM_CTX_MAX];
+	struct cam_lrme_context  lrme_ctx[CAM_CTX_MAX];
+	struct mutex             lock;
+	uint32_t                 open_cnt;
+};
+
+static struct cam_lrme_dev *g_lrme_dev;
+
+static int cam_lrme_dev_buf_done_cb(void *ctxt_to_hw_map, uint32_t evt_id,
+	void *evt_data)
+{
+	uint64_t index;
+	struct cam_context *ctx;
+	int rc;
+
+	index = CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map);
+	CAM_DBG(CAM_LRME, "ctx index %llu, evt_id %u\n", index, evt_id);
+	ctx = &g_lrme_dev->ctx[index];
+	rc = ctx->irq_cb_intf(ctx, evt_id, evt_data);
+	if (rc)
+		CAM_ERR(CAM_LRME, "irq callback failed");
+
+	return rc;
+}
+
+static int cam_lrme_dev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_lrme_dev *lrme_dev = g_lrme_dev;
+
+	if (!lrme_dev) {
+		CAM_ERR(CAM_LRME,
+			"LRME Dev not initialized, dev=%pK", lrme_dev);
+		return -ENODEV;
+	}
+
+	mutex_lock(&lrme_dev->lock);
+	lrme_dev->open_cnt++;
+	mutex_unlock(&lrme_dev->lock);
+
+	return 0;
+}
+
+static int cam_lrme_dev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_lrme_dev *lrme_dev = g_lrme_dev;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+
+	if (!lrme_dev) {
+		CAM_ERR(CAM_LRME, "Invalid args");
+		return -ENODEV;
+	}
+
+	mutex_lock(&lrme_dev->lock);
+	lrme_dev->open_cnt--;
+	mutex_unlock(&lrme_dev->lock);
+
+	if (!node) {
+		CAM_ERR(CAM_LRME, "Node is NULL");
+		return -EINVAL;
+	}
+
+	if (lrme_dev->open_cnt == 0)
+		cam_node_shutdown(node);
+
+	return 0;
+}
+
+static const struct v4l2_subdev_internal_ops cam_lrme_subdev_internal_ops = {
+	.open = cam_lrme_dev_open,
+	.close = cam_lrme_dev_close,
+};
+
+static int cam_lrme_dev_probe(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+	struct cam_hw_mgr_intf hw_mgr_intf;
+	struct cam_node *node;
+
+	g_lrme_dev = kzalloc(sizeof(struct cam_lrme_dev), GFP_KERNEL);
+	if (!g_lrme_dev) {
+		CAM_ERR(CAM_LRME, "No memory");
+		return -ENOMEM;
+	}
+	g_lrme_dev->sd.internal_ops = &cam_lrme_subdev_internal_ops;
+
+	mutex_init(&g_lrme_dev->lock);
+
+	rc = cam_subdev_probe(&g_lrme_dev->sd, pdev, CAM_LRME_DEV_NAME,
+		CAM_LRME_DEVICE_TYPE);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "LRME cam_subdev_probe failed");
+		goto free_mem;
+	}
+	node = (struct cam_node *)g_lrme_dev->sd.token;
+
+	rc = cam_lrme_hw_mgr_init(&hw_mgr_intf, cam_lrme_dev_buf_done_cb);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Can not initialized LRME HW manager");
+		goto unregister;
+	}
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_lrme_context_init(&g_lrme_dev->lrme_ctx[i],
+				&g_lrme_dev->ctx[i],
+				&node->hw_mgr_intf, i);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "LRME context init failed");
+			goto deinit_ctx;
+		}
+	}
+
+	rc = cam_node_init(node, &hw_mgr_intf, g_lrme_dev->ctx, CAM_CTX_MAX,
+		CAM_LRME_DEV_NAME);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "LRME node init failed");
+		goto deinit_ctx;
+	}
+
+	CAM_DBG(CAM_LRME, "%s probe complete", g_lrme_dev->sd.name);
+
+	return 0;
+
+deinit_ctx:
+	for (--i; i >= 0; i--) {
+		if (cam_lrme_context_deinit(&g_lrme_dev->lrme_ctx[i]))
+			CAM_ERR(CAM_LRME, "LRME context %d deinit failed", i);
+	}
+unregister:
+	if (cam_subdev_remove(&g_lrme_dev->sd))
+		CAM_ERR(CAM_LRME, "Failed in subdev remove");
+free_mem:
+	kfree(g_lrme_dev);
+
+	return rc;
+}
+
+static int cam_lrme_dev_remove(struct platform_device *pdev)
+{
+	int i;
+	int rc = 0;
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_lrme_context_deinit(&g_lrme_dev->lrme_ctx[i]);
+		if (rc)
+			CAM_ERR(CAM_LRME, "LRME context %d deinit failed", i);
+	}
+
+	rc = cam_lrme_hw_mgr_deinit();
+	if (rc)
+		CAM_ERR(CAM_LRME, "Failed in hw mgr deinit, rc=%d", rc);
+
+	rc = cam_subdev_remove(&g_lrme_dev->sd);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Unregister failed");
+
+	mutex_destroy(&g_lrme_dev->lock);
+	kfree(g_lrme_dev);
+	g_lrme_dev = NULL;
+
+	return rc;
+}
+
+static const struct of_device_id cam_lrme_dt_match[] = {
+	{
+		.compatible = "qcom,cam-lrme"
+	},
+	{}
+};
+
+static struct platform_driver cam_lrme_driver = {
+	.probe = cam_lrme_dev_probe,
+	.remove = cam_lrme_dev_remove,
+	.driver = {
+		.name = "cam_lrme",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_lrme_dt_match,
+	},
+};
+
+static int __init cam_lrme_dev_init_module(void)
+{
+	return platform_driver_register(&cam_lrme_driver);
+}
+
+static void __exit cam_lrme_dev_exit_module(void)
+{
+	platform_driver_unregister(&cam_lrme_driver);
+}
+
+module_init(cam_lrme_dev_init_module);
+module_exit(cam_lrme_dev_exit_module);
+MODULE_DESCRIPTION("MSM LRME driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/Makefile
new file mode 100644
index 0000000..e4c8e0d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += lrme_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
new file mode 100644
index 0000000..448086d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -0,0 +1,1034 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+#include "cam_packet_util.h"
+#include "cam_lrme_context.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_lrme_hw_mgr_intf.h"
+#include "cam_lrme_hw_mgr.h"
+
+static struct cam_lrme_hw_mgr g_lrme_hw_mgr;
+
+static int cam_lrme_mgr_util_reserve_device(struct cam_lrme_hw_mgr *hw_mgr,
+	struct cam_lrme_acquire_args *lrme_acquire_args)
+{
+	int i, index = 0;
+	uint32_t min_ctx = UINT_MAX;
+	struct cam_lrme_device *hw_device = NULL;
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (!hw_mgr->device_count) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_ERR(CAM_LRME, "No device is registered");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < hw_mgr->device_count && i < CAM_LRME_HW_MAX; i++) {
+		hw_device = &hw_mgr->hw_device[i];
+		if (!hw_device->num_context) {
+			index = i;
+			break;
+		}
+		if (hw_device->num_context < min_ctx) {
+			min_ctx = hw_device->num_context;
+			index = i;
+		}
+	}
+
+	hw_device = &hw_mgr->hw_device[index];
+	hw_device->num_context++;
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	CAM_DBG(CAM_LRME, "reserve device index %d", index);
+
+	return index;
+}
+
+static int cam_lrme_mgr_util_get_device(struct cam_lrme_hw_mgr *hw_mgr,
+	uint32_t device_index, struct cam_lrme_device **hw_device)
+{
+	if (!hw_mgr) {
+		CAM_ERR(CAM_LRME, "invalid params hw_mgr %pK", hw_mgr);
+		return -EINVAL;
+	}
+
+	if (device_index >= CAM_LRME_HW_MAX) {
+		CAM_ERR(CAM_LRME, "Wrong device index %d", device_index);
+		return -EINVAL;
+	}
+
+	*hw_device = &hw_mgr->hw_device[device_index];
+
+	return 0;
+}
+
+static int cam_lrme_mgr_util_packet_validate(struct cam_packet *packet)
+{
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	int i, rc;
+
+	if (!packet) {
+		CAM_ERR(CAM_LRME, "Invalid args");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_LRME, "Packet request=%d, op_code=0x%x, size=%d, flags=%d",
+		packet->header.request_id, packet->header.op_code,
+		packet->header.size, packet->header.flags);
+	CAM_DBG(CAM_LRME,
+		"Packet cmdbuf(offset=%d, num=%d) io(offset=%d, num=%d)",
+		packet->cmd_buf_offset, packet->num_cmd_buf,
+		packet->io_configs_offset, packet->num_io_configs);
+	CAM_DBG(CAM_LRME,
+		"Packet Patch(offset=%d, num=%d) kmd(offset=%d, num=%d)",
+		packet->patch_offset, packet->num_patches,
+		packet->kmd_cmd_buf_offset, packet->kmd_cmd_buf_index);
+
+	if (cam_packet_util_validate_packet(packet)) {
+		CAM_ERR(CAM_LRME, "invalid packet:%d %d %d %d %d",
+			packet->kmd_cmd_buf_index,
+			packet->num_cmd_buf, packet->cmd_buf_offset,
+			packet->io_configs_offset, packet->header.size);
+		return -EINVAL;
+	}
+
+	if (!packet->num_io_configs) {
+		CAM_ERR(CAM_LRME, "no io configs");
+		return -EINVAL;
+	}
+
+	cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)&packet->payload +
+		packet->cmd_buf_offset);
+
+	for (i = 0; i < packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+		CAM_DBG(CAM_LRME,
+			"CmdBuf[%d] hdl=%d, offset=%d, size=%d, len=%d, type=%d, meta_data=%d",
+			i,
+			cmd_desc[i].mem_handle, cmd_desc[i].offset,
+			cmd_desc[i].size, cmd_desc[i].length, cmd_desc[i].type,
+			cmd_desc[i].meta_data);
+
+		rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Invalid cmd buffer %d", i);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int cam_lrme_mgr_util_prepare_io_buffer(int32_t iommu_hdl,
+	struct cam_hw_prepare_update_args *prepare,
+	struct cam_lrme_hw_io_buffer *input_buf,
+	struct cam_lrme_hw_io_buffer *output_buf, uint32_t io_buf_size)
+{
+	int rc = -EINVAL;
+	uint32_t num_in_buf, num_out_buf, i, j, plane;
+	struct cam_buf_io_cfg *io_cfg;
+	uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+	size_t size;
+
+	num_in_buf = 0;
+	num_out_buf = 0;
+	io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)
+		 &prepare->packet->payload +
+		 prepare->packet->io_configs_offset);
+
+	for (i = 0; i < prepare->packet->num_io_configs; i++) {
+		CAM_DBG(CAM_LRME,
+			"IOConfig[%d] : handle[%d] Dir[%d] Res[%d] Fence[%d], Format[%d]",
+			i, io_cfg[i].mem_handle[0], io_cfg[i].direction,
+			io_cfg[i].resource_type,
+			io_cfg[i].fence, io_cfg[i].format);
+
+		if ((num_in_buf > io_buf_size) ||
+			(num_out_buf > io_buf_size)) {
+			CAM_ERR(CAM_LRME, "Invalid number of buffers %d %d %d",
+				num_in_buf, num_out_buf, io_buf_size);
+			return -EINVAL;
+		}
+
+		memset(io_addr, 0, sizeof(io_addr));
+		for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) {
+			if (!io_cfg[i].mem_handle[plane])
+				break;
+
+			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[plane],
+				iommu_hdl, &io_addr[plane], &size);
+			if (rc) {
+				CAM_ERR(CAM_LRME, "Cannot get io buf for %d %d",
+					plane, rc);
+				return -ENOMEM;
+			}
+
+			io_addr[plane] += io_cfg[i].offsets[plane];
+
+			if (io_addr[plane] >> 32) {
+				CAM_ERR(CAM_LRME, "Invalid io addr for %d %d",
+					plane, rc);
+				return -ENOMEM;
+			}
+
+			CAM_DBG(CAM_LRME, "IO Address[%d][%d] : %llu",
+				io_cfg[i].direction, plane, io_addr[plane]);
+		}
+
+		switch (io_cfg[i].direction) {
+		case CAM_BUF_INPUT: {
+			prepare->in_map_entries[num_in_buf].resource_handle =
+				io_cfg[i].resource_type;
+			prepare->in_map_entries[num_in_buf].sync_id =
+				io_cfg[i].fence;
+
+			input_buf[num_in_buf].valid = true;
+			for (j = 0; j < plane; j++)
+				input_buf[num_in_buf].io_addr[j] = io_addr[j];
+			input_buf[num_in_buf].num_plane = plane;
+			input_buf[num_in_buf].io_cfg = &io_cfg[i];
+
+			num_in_buf++;
+			break;
+		}
+		case CAM_BUF_OUTPUT: {
+			prepare->out_map_entries[num_out_buf].resource_handle =
+				io_cfg[i].resource_type;
+			prepare->out_map_entries[num_out_buf].sync_id =
+				io_cfg[i].fence;
+
+			output_buf[num_out_buf].valid = true;
+			for (j = 0; j < plane; j++)
+				output_buf[num_out_buf].io_addr[j] = io_addr[j];
+			output_buf[num_out_buf].num_plane = plane;
+			output_buf[num_out_buf].io_cfg = &io_cfg[i];
+
+			num_out_buf++;
+			break;
+		}
+		default:
+			CAM_ERR(CAM_LRME, "Unsupported io direction %d",
+				io_cfg[i].direction);
+			return -EINVAL;
+		}
+	}
+	prepare->num_in_map_entries = num_in_buf;
+	prepare->num_out_map_entries = num_out_buf;
+
+	return 0;
+}
+
+static int cam_lrme_mgr_util_prepare_hw_update_entries(
+	struct cam_lrme_hw_mgr *hw_mgr,
+	struct cam_hw_prepare_update_args *prepare,
+	struct cam_lrme_hw_cmd_config_args *config_args,
+	struct cam_kmd_buf_info *kmd_buf_info)
+{
+	int i, rc = 0;
+	struct cam_lrme_device *hw_device = NULL;
+	uint32_t *kmd_buf_addr;
+	uint32_t num_entry;
+	uint32_t kmd_buf_max_size;
+	uint32_t kmd_buf_used_bytes = 0;
+	struct cam_hw_update_entry *hw_entry;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+
+	hw_device = config_args->hw_device;
+	if (!hw_device) {
+		CAM_ERR(CAM_LRME, "Invalid hw_device");
+		return -EINVAL;
+	}
+
+	kmd_buf_addr = (uint32_t *)((uint8_t *)kmd_buf_info->cpu_addr +
+		kmd_buf_info->used_bytes);
+	kmd_buf_max_size = kmd_buf_info->size - kmd_buf_info->used_bytes;
+
+	config_args->cmd_buf_addr = kmd_buf_addr;
+	config_args->size = kmd_buf_max_size;
+	config_args->config_buf_size = 0;
+
+	if (hw_device->hw_intf.hw_ops.process_cmd) {
+		rc = hw_device->hw_intf.hw_ops.process_cmd(
+			hw_device->hw_intf.hw_priv,
+			CAM_LRME_HW_CMD_PREPARE_HW_UPDATE,
+			config_args,
+			sizeof(struct cam_lrme_hw_cmd_config_args));
+		if (rc) {
+			CAM_ERR(CAM_LRME,
+				"Failed in CMD_PREPARE_HW_UPDATE %d", rc);
+			return rc;
+		}
+	} else {
+		CAM_ERR(CAM_LRME, "Can't find handle function");
+		return -EINVAL;
+	}
+
+	kmd_buf_used_bytes += config_args->config_buf_size;
+
+	if (!kmd_buf_used_bytes || (kmd_buf_used_bytes > kmd_buf_max_size)) {
+		CAM_ERR(CAM_LRME, "Invalid kmd used bytes %d (%d)",
+			kmd_buf_used_bytes, kmd_buf_max_size);
+		return -ENOMEM;
+	}
+
+	hw_entry = prepare->hw_update_entries;
+	num_entry = 0;
+
+	if (config_args->config_buf_size) {
+		if ((num_entry + 1) >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_LRME, "Insufficient  HW entries :%d %d",
+				num_entry, prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		hw_entry[num_entry].handle = kmd_buf_info->handle;
+		hw_entry[num_entry].len = config_args->config_buf_size;
+		hw_entry[num_entry].offset = kmd_buf_info->offset;
+
+		kmd_buf_info->used_bytes += config_args->config_buf_size;
+		kmd_buf_info->offset += config_args->config_buf_size;
+		num_entry++;
+	}
+
+	cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)
+		&prepare->packet->payload + prepare->packet->cmd_buf_offset);
+
+	for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+		if ((num_entry + 1) >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_LRME, "Exceed max num of entry");
+			return -EINVAL;
+		}
+
+		hw_entry[num_entry].handle = cmd_desc[i].mem_handle;
+		hw_entry[num_entry].len = cmd_desc[i].length;
+		hw_entry[num_entry].offset = cmd_desc[i].offset;
+		num_entry++;
+	}
+	prepare->num_hw_update_entries = num_entry;
+
+	CAM_DBG(CAM_LRME, "FinalConfig : hw_entries=%d, Sync(in=%d, out=%d)",
+		prepare->num_hw_update_entries, prepare->num_in_map_entries,
+		prepare->num_out_map_entries);
+
+	return rc;
+}
+
+static void cam_lrme_mgr_util_put_frame_req(
+	struct list_head *src_list,
+	struct list_head *list,
+	spinlock_t *lock)
+{
+	spin_lock(lock);
+	list_add_tail(list, src_list);
+	spin_unlock(lock);
+}
+
+static int cam_lrme_mgr_util_get_frame_req(
+	struct list_head *src_list,
+	struct cam_lrme_frame_request **frame_req,
+	spinlock_t *lock)
+{
+	int rc = 0;
+	struct cam_lrme_frame_request *req_ptr = NULL;
+
+	spin_lock(lock);
+	if (!list_empty(src_list)) {
+		req_ptr = list_first_entry(src_list,
+			struct cam_lrme_frame_request, frame_list);
+		list_del_init(&req_ptr->frame_list);
+	} else {
+		rc = -ENOENT;
+	}
+	*frame_req = req_ptr;
+	spin_unlock(lock);
+
+	return rc;
+}
+
+
+static int cam_lrme_mgr_util_submit_req(void *priv, void *data)
+{
+	struct cam_lrme_device *hw_device;
+	struct cam_lrme_hw_mgr *hw_mgr;
+	struct cam_lrme_frame_request *frame_req = NULL;
+	struct cam_lrme_hw_submit_args submit_args;
+	struct cam_lrme_mgr_work_data *work_data;
+	int rc;
+	int req_prio = 0;
+
+	if (!priv) {
+		CAM_ERR(CAM_LRME, "worker doesn't have private data");
+		return -EINVAL;
+	}
+
+	hw_mgr = (struct cam_lrme_hw_mgr *)priv;
+	work_data = (struct cam_lrme_mgr_work_data *)data;
+	hw_device = work_data->hw_device;
+
+	rc = cam_lrme_mgr_util_get_frame_req(&hw_device->
+		frame_pending_list_high, &frame_req, &hw_device->high_req_lock);
+
+	if (!frame_req) {
+		rc = cam_lrme_mgr_util_get_frame_req(&hw_device->
+				frame_pending_list_normal, &frame_req,
+				&hw_device->normal_req_lock);
+		if (frame_req)
+			req_prio = 1;
+	}
+
+	if (!frame_req) {
+		CAM_DBG(CAM_LRME, "No pending request");
+		return 0;
+	}
+
+	if (hw_device->hw_intf.hw_ops.process_cmd) {
+		submit_args.hw_update_entries = frame_req->hw_update_entries;
+		submit_args.num_hw_update_entries =
+			frame_req->num_hw_update_entries;
+		submit_args.frame_req = frame_req;
+
+		rc = hw_device->hw_intf.hw_ops.process_cmd(
+			hw_device->hw_intf.hw_priv,
+			CAM_LRME_HW_CMD_SUBMIT,
+			&submit_args, sizeof(struct cam_lrme_hw_submit_args));
+
+		if (rc == -EBUSY)
+			CAM_DBG(CAM_LRME, "device busy");
+		else if (rc)
+			CAM_ERR(CAM_LRME, "submit request failed rc %d", rc);
+		if (rc) {
+			req_prio == 0 ? spin_lock(&hw_device->high_req_lock) :
+				spin_lock(&hw_device->normal_req_lock);
+			list_add(&frame_req->frame_list,
+				(req_prio == 0 ?
+				 &hw_device->frame_pending_list_high :
+				 &hw_device->frame_pending_list_normal));
+			req_prio == 0 ? spin_unlock(&hw_device->high_req_lock) :
+				spin_unlock(&hw_device->normal_req_lock);
+		}
+		if (rc == -EBUSY)
+			rc = 0;
+	} else {
+		req_prio == 0 ? spin_lock(&hw_device->high_req_lock) :
+			spin_lock(&hw_device->normal_req_lock);
+		list_add(&frame_req->frame_list,
+			(req_prio == 0 ?
+			 &hw_device->frame_pending_list_high :
+			 &hw_device->frame_pending_list_normal));
+		req_prio == 0 ? spin_unlock(&hw_device->high_req_lock) :
+			spin_unlock(&hw_device->normal_req_lock);
+		rc = -EINVAL;
+	}
+
+	CAM_DBG(CAM_LRME, "End of submit, rc %d", rc);
+
+	return rc;
+}
+
+static int cam_lrme_mgr_util_schedule_frame_req(
+	struct cam_lrme_hw_mgr *hw_mgr, struct cam_lrme_device *hw_device)
+{
+	int rc = 0;
+	struct crm_workq_task *task;
+	struct cam_lrme_mgr_work_data *work_data;
+
+	task = cam_req_mgr_workq_get_task(hw_device->work);
+	if (!task) {
+		CAM_ERR(CAM_LRME, "Can not get task for worker");
+		return -ENOMEM;
+	}
+
+	work_data = (struct cam_lrme_mgr_work_data *)task->payload;
+	work_data->hw_device = hw_device;
+
+	task->process_cb = cam_lrme_mgr_util_submit_req;
+	CAM_DBG(CAM_LRME, "enqueue submit task");
+	rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+static int cam_lrme_mgr_util_release(struct cam_lrme_hw_mgr *hw_mgr,
+	uint32_t device_index)
+{
+	int rc = 0;
+	struct cam_lrme_device *hw_device;
+
+	rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	hw_device->num_context--;
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
+static int cam_lrme_mgr_cb(void *data,
+	struct cam_lrme_hw_cb_args *cb_args)
+{
+	struct cam_lrme_hw_mgr *hw_mgr = &g_lrme_hw_mgr;
+	int rc = 0;
+	bool frame_abort = true;
+	struct cam_lrme_frame_request *frame_req;
+	struct cam_lrme_device *hw_device;
+
+	if (!data || !cb_args) {
+		CAM_ERR(CAM_LRME, "Invalid input args");
+		return -EINVAL;
+	}
+
+	hw_device = (struct cam_lrme_device *)data;
+	frame_req = cb_args->frame_req;
+
+	if (cb_args->cb_type & CAM_LRME_CB_PUT_FRAME) {
+		memset(frame_req, 0x0, sizeof(*frame_req));
+		INIT_LIST_HEAD(&frame_req->frame_list);
+		cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+				&frame_req->frame_list,
+				&hw_mgr->free_req_lock);
+		cb_args->cb_type &= ~CAM_LRME_CB_PUT_FRAME;
+		frame_req = NULL;
+	}
+
+	if (cb_args->cb_type & CAM_LRME_CB_COMP_REG_UPDATE) {
+		cb_args->cb_type &= ~CAM_LRME_CB_COMP_REG_UPDATE;
+		CAM_DBG(CAM_LRME, "Reg update");
+	}
+
+	if (!frame_req)
+		return rc;
+
+	if (cb_args->cb_type & CAM_LRME_CB_BUF_DONE) {
+		cb_args->cb_type &= ~CAM_LRME_CB_BUF_DONE;
+		frame_abort = false;
+	} else if (cb_args->cb_type & CAM_LRME_CB_ERROR) {
+		cb_args->cb_type &= ~CAM_LRME_CB_ERROR;
+		frame_abort = true;
+	} else {
+		CAM_ERR(CAM_LRME, "Wrong cb type %d, req %lld",
+			cb_args->cb_type, frame_req->req_id);
+		return -EINVAL;
+	}
+
+	if (hw_mgr->event_cb) {
+		struct cam_hw_done_event_data buf_data;
+
+		buf_data.request_id = frame_req->req_id;
+		CAM_DBG(CAM_LRME, "frame req %llu, frame_abort %d",
+			frame_req->req_id, frame_abort);
+		rc = hw_mgr->event_cb(frame_req->ctxt_to_hw_map,
+			frame_abort, &buf_data);
+	} else {
+		CAM_ERR(CAM_LRME, "No cb function");
+	}
+	memset(frame_req, 0x0, sizeof(*frame_req));
+	INIT_LIST_HEAD(&frame_req->frame_list);
+	cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+				&frame_req->frame_list,
+				&hw_mgr->free_req_lock);
+
+	rc = cam_lrme_mgr_util_schedule_frame_req(hw_mgr, hw_device);
+
+	return rc;
+}
+
+static int cam_lrme_mgr_get_caps(void *hw_mgr_priv, void *hw_get_caps_args)
+{
+	int rc = 0;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd *args = hw_get_caps_args;
+
+	if (sizeof(struct cam_lrme_query_cap_cmd) != args->size) {
+		CAM_ERR(CAM_LRME,
+			"sizeof(struct cam_query_cap_cmd) = %lu, args->size = %d",
+			sizeof(struct cam_query_cap_cmd), args->size);
+		return -EFAULT;
+	}
+
+	if (copy_to_user((void __user *)args->caps_handle, &(hw_mgr->lrme_caps),
+		sizeof(struct cam_lrme_query_cap_cmd))) {
+		CAM_ERR(CAM_LRME, "copy to user failed");
+		return -EFAULT;
+	}
+
+	return rc;
+}
+
+static int cam_lrme_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args)
+{
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_acquire_args *args =
+		(struct cam_hw_acquire_args *)hw_acquire_args;
+	struct cam_lrme_acquire_args lrme_acquire_args;
+	uint64_t device_index;
+
+	if (!hw_mgr_priv || !args) {
+		CAM_ERR(CAM_LRME,
+		"Invalid input params hw_mgr_priv %pK, acquire_args %pK",
+		hw_mgr_priv, args);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&lrme_acquire_args,
+		(void __user *)args->acquire_info,
+		sizeof(struct cam_lrme_acquire_args))) {
+		CAM_ERR(CAM_LRME, "Failed to copy acquire args from user");
+		return -EFAULT;
+	}
+
+	device_index = cam_lrme_mgr_util_reserve_device(hw_mgr,
+		&lrme_acquire_args);
+	CAM_DBG(CAM_LRME, "Get device id %llu", device_index);
+
+	if (device_index >= hw_mgr->device_count) {
+		CAM_ERR(CAM_LRME, "Get wrong device id %llu", device_index);
+		return -EINVAL;
+	}
+
+	/* device_index is the right 4 bit in ctxt_to_hw_map */
+	args->ctxt_to_hw_map = (void *)device_index;
+
+	return 0;
+}
+
+static int cam_lrme_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args)
+{
+	int rc = 0;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_release_args *args =
+		(struct cam_hw_release_args *)hw_release_args;
+	uint64_t device_index;
+
+	if (!hw_mgr_priv || !hw_release_args) {
+		CAM_ERR(CAM_LRME, "Invalid arguments %pK, %pK",
+			hw_mgr_priv, hw_release_args);
+		return -EINVAL;
+	}
+
+	device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+	if (device_index >= hw_mgr->device_count) {
+		CAM_ERR(CAM_LRME, "Invalid device index %llu", device_index);
+		return -EPERM;
+	}
+
+	rc = cam_lrme_mgr_util_release(hw_mgr, device_index);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Failed in release device, rc=%d", rc);
+
+	return rc;
+}
+
+static int cam_lrme_mgr_hw_start(void *hw_mgr_priv, void *hw_start_args)
+{
+	int rc = 0;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_start_args *args =
+		(struct cam_hw_start_args *)hw_start_args;
+	struct cam_lrme_device *hw_device;
+	uint32_t device_index;
+
+	if (!hw_mgr || !args) {
+		CAM_ERR(CAM_LRME, "Invald input params");
+		return -EINVAL;
+	}
+
+	device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+	if (device_index >= hw_mgr->device_count) {
+		CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_LRME, "Start device index %d", device_index);
+
+	rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to get hw device");
+		return rc;
+	}
+
+	if (hw_device->hw_intf.hw_ops.start) {
+		rc = hw_device->hw_intf.hw_ops.start(
+			hw_device->hw_intf.hw_priv, NULL, 0);
+	} else {
+		CAM_ERR(CAM_LRME, "Invald start function");
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_lrme_mgr_hw_stop(void *hw_mgr_priv, void *stop_args)
+{
+	int rc = 0;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_stop_args *args =
+		(struct cam_hw_stop_args *)stop_args;
+	struct cam_lrme_device *hw_device;
+	uint32_t device_index;
+
+	if (!hw_mgr_priv || !stop_args) {
+		CAM_ERR(CAM_LRME, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+	if (device_index >= hw_mgr->device_count) {
+		CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_LRME, "Stop device index %d", device_index);
+
+	rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to get hw device");
+		return rc;
+	}
+
+	if (hw_device->hw_intf.hw_ops.stop) {
+		rc = hw_device->hw_intf.hw_ops.stop(
+			hw_device->hw_intf.hw_priv, NULL, 0);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Failed in HW stop %d", rc);
+			goto end;
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int cam_lrme_mgr_hw_prepare_update(void *hw_mgr_priv,
+	void *hw_prepare_update_args)
+{
+	int rc = 0, i;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_prepare_update_args *args =
+		(struct cam_hw_prepare_update_args *)hw_prepare_update_args;
+	struct cam_lrme_device *hw_device;
+	struct cam_kmd_buf_info kmd_buf;
+	struct cam_lrme_hw_cmd_config_args config_args;
+	struct cam_lrme_frame_request *frame_req = NULL;
+	uint32_t device_index;
+
+	if (!hw_mgr_priv || !hw_prepare_update_args) {
+		CAM_ERR(CAM_LRME, "Invalid args %pK %pK",
+			hw_mgr_priv, hw_prepare_update_args);
+		return -EINVAL;
+	}
+
+	device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+	if (device_index >= hw_mgr->device_count) {
+		CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+		return -EPERM;
+	}
+
+	rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+		goto error;
+	}
+
+	rc = cam_lrme_mgr_util_packet_validate(args->packet);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in packet validation %d", rc);
+		goto error;
+	}
+
+	rc = cam_packet_util_get_kmd_buffer(args->packet, &kmd_buf);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in get kmd buf buffer %d", rc);
+		goto error;
+	}
+
+	CAM_DBG(CAM_LRME,
+		"KMD Buf : hdl=%d, cpu_addr=%pK, offset=%d, size=%d, used=%d",
+		kmd_buf.handle, kmd_buf.cpu_addr, kmd_buf.offset,
+		kmd_buf.size, kmd_buf.used_bytes);
+
+	rc = cam_packet_util_process_patches(args->packet,
+		hw_mgr->device_iommu.non_secure, hw_mgr->device_iommu.secure);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Patch packet failed, rc=%d", rc);
+		return rc;
+	}
+
+	memset(&config_args, 0, sizeof(config_args));
+	config_args.hw_device = hw_device;
+
+	rc = cam_lrme_mgr_util_prepare_io_buffer(
+		hw_mgr->device_iommu.non_secure, args,
+		config_args.input_buf, config_args.output_buf,
+		CAM_LRME_MAX_IO_BUFFER);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in prepare IO Buf %d", rc);
+		goto error;
+	}
+	/* Check port number */
+	if (args->num_in_map_entries == 0 || args->num_out_map_entries == 0) {
+		CAM_ERR(CAM_LRME, "Error in port number in %d, out %d",
+			args->num_in_map_entries, args->num_out_map_entries);
+		goto error;
+	}
+
+	rc = cam_lrme_mgr_util_prepare_hw_update_entries(hw_mgr, args,
+		&config_args, &kmd_buf);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in hw update entries %d", rc);
+		goto error;
+	}
+
+	rc = cam_lrme_mgr_util_get_frame_req(&hw_mgr->frame_free_list,
+		&frame_req, &hw_mgr->free_req_lock);
+	if (rc || !frame_req) {
+		CAM_ERR(CAM_LRME, "Can not get free frame request");
+		goto error;
+	}
+
+	frame_req->ctxt_to_hw_map = args->ctxt_to_hw_map;
+	frame_req->req_id = args->packet->header.request_id;
+	frame_req->hw_device = hw_device;
+	frame_req->num_hw_update_entries = args->num_hw_update_entries;
+	for (i = 0; i < args->num_hw_update_entries; i++)
+		frame_req->hw_update_entries[i] = args->hw_update_entries[i];
+
+	args->priv = frame_req;
+
+	CAM_DBG(CAM_LRME, "FramePrepare : Frame[%lld]", frame_req->req_id);
+
+	return 0;
+
+error:
+	return rc;
+}
+
+static int cam_lrme_mgr_hw_config(void *hw_mgr_priv,
+	void *hw_config_args)
+{
+	int rc = 0;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_config_args *args =
+		(struct cam_hw_config_args *)hw_config_args;
+	struct cam_lrme_frame_request *frame_req;
+	struct cam_lrme_device *hw_device = NULL;
+	enum cam_lrme_hw_mgr_ctx_priority priority;
+
+	if (!hw_mgr_priv || !hw_config_args) {
+		CAM_ERR(CAM_LRME, "Invalid arguments, hw_mgr %pK, config %pK",
+			hw_mgr_priv, hw_config_args);
+		return -EINVAL;
+	}
+
+	if (!args->num_hw_update_entries) {
+		CAM_ERR(CAM_LRME, "No hw update entries");
+		return -EINVAL;
+	}
+
+	frame_req = (struct cam_lrme_frame_request *)args->priv;
+	if (!frame_req) {
+		CAM_ERR(CAM_LRME, "No frame request");
+		return -EINVAL;
+	}
+
+	hw_device = frame_req->hw_device;
+	if (!hw_device)
+		return -EINVAL;
+
+	priority = CAM_LRME_DECODE_PRIORITY(args->ctxt_to_hw_map);
+	if (priority == CAM_LRME_PRIORITY_HIGH) {
+		cam_lrme_mgr_util_put_frame_req(
+			&hw_device->frame_pending_list_high,
+			&frame_req->frame_list, &hw_device->high_req_lock);
+	} else {
+		cam_lrme_mgr_util_put_frame_req(
+			&hw_device->frame_pending_list_normal,
+			&frame_req->frame_list, &hw_device->normal_req_lock);
+	}
+
+	CAM_DBG(CAM_LRME, "schedule req %llu", frame_req->req_id);
+	rc = cam_lrme_mgr_util_schedule_frame_req(hw_mgr, hw_device);
+
+	return rc;
+}
+
+int cam_lrme_mgr_register_device(
+	struct cam_hw_intf *lrme_hw_intf,
+	struct cam_iommu_handle *device_iommu,
+	struct cam_iommu_handle *cdm_iommu)
+{
+	struct cam_lrme_device *hw_device;
+	char buf[128];
+	int i, rc;
+
+	hw_device = &g_lrme_hw_mgr.hw_device[lrme_hw_intf->hw_idx];
+
+	g_lrme_hw_mgr.device_iommu = *device_iommu;
+	g_lrme_hw_mgr.cdm_iommu = *cdm_iommu;
+
+	memcpy(&hw_device->hw_intf, lrme_hw_intf, sizeof(struct cam_hw_intf));
+
+	spin_lock_init(&hw_device->high_req_lock);
+	spin_lock_init(&hw_device->normal_req_lock);
+	INIT_LIST_HEAD(&hw_device->frame_pending_list_high);
+	INIT_LIST_HEAD(&hw_device->frame_pending_list_normal);
+
+	rc = snprintf(buf, sizeof(buf), "cam_lrme_device_submit_worker%d",
+		lrme_hw_intf->hw_idx);
+	CAM_DBG(CAM_LRME, "Create submit workq for %s", buf);
+	rc = cam_req_mgr_workq_create(buf,
+		CAM_LRME_WORKQ_NUM_TASK,
+		&hw_device->work, CRM_WORKQ_USAGE_NON_IRQ);
+	if (rc) {
+		CAM_ERR(CAM_LRME,
+			"Unable to create a worker, rc=%d", rc);
+		return rc;
+	}
+
+	for (i = 0; i < CAM_LRME_WORKQ_NUM_TASK; i++)
+		hw_device->work->task.pool[i].payload =
+			&hw_device->work_data[i];
+
+	if (hw_device->hw_intf.hw_ops.process_cmd) {
+		struct cam_lrme_hw_cmd_set_cb cb_args;
+
+		cb_args.cam_lrme_hw_mgr_cb = cam_lrme_mgr_cb;
+		cb_args.data = hw_device;
+
+		rc = hw_device->hw_intf.hw_ops.process_cmd(
+			hw_device->hw_intf.hw_priv,
+			CAM_LRME_HW_CMD_REGISTER_CB,
+			&cb_args, sizeof(cb_args));
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Register cb failed");
+			goto destroy_workqueue;
+		}
+		CAM_DBG(CAM_LRME, "cb registered");
+	}
+
+	if (hw_device->hw_intf.hw_ops.get_hw_caps) {
+		rc = hw_device->hw_intf.hw_ops.get_hw_caps(
+			hw_device->hw_intf.hw_priv, &hw_device->hw_caps,
+			sizeof(hw_device->hw_caps));
+		if (rc)
+			CAM_ERR(CAM_LRME, "Get caps failed");
+	} else {
+		CAM_ERR(CAM_LRME, "No get_hw_caps function");
+		goto destroy_workqueue;
+	}
+	g_lrme_hw_mgr.lrme_caps.dev_caps[lrme_hw_intf->hw_idx] =
+		hw_device->hw_caps;
+	g_lrme_hw_mgr.device_count++;
+	g_lrme_hw_mgr.lrme_caps.device_iommu = g_lrme_hw_mgr.device_iommu;
+	g_lrme_hw_mgr.lrme_caps.cdm_iommu = g_lrme_hw_mgr.cdm_iommu;
+	g_lrme_hw_mgr.lrme_caps.num_devices = g_lrme_hw_mgr.device_count;
+
+	hw_device->valid = true;
+
+	CAM_DBG(CAM_LRME, "device registration done");
+	return 0;
+
+destroy_workqueue:
+	cam_req_mgr_workq_destroy(&hw_device->work);
+
+	return rc;
+}
+
+int cam_lrme_mgr_deregister_device(int device_index)
+{
+	struct cam_lrme_device *hw_device;
+
+	hw_device = &g_lrme_hw_mgr.hw_device[device_index];
+	cam_req_mgr_workq_destroy(&hw_device->work);
+	memset(hw_device, 0x0, sizeof(struct cam_lrme_device));
+	g_lrme_hw_mgr.device_count--;
+
+	return 0;
+}
+
+int cam_lrme_hw_mgr_deinit(void)
+{
+	mutex_destroy(&g_lrme_hw_mgr.hw_mgr_mutex);
+	memset(&g_lrme_hw_mgr, 0x0, sizeof(g_lrme_hw_mgr));
+
+	return 0;
+}
+
+int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf,
+	cam_hw_event_cb_func cam_lrme_dev_buf_done_cb)
+{
+	int i, rc = 0;
+	struct cam_lrme_frame_request *frame_req;
+
+	if (!hw_mgr_intf)
+		return -EINVAL;
+
+	CAM_DBG(CAM_LRME, "device count %d", g_lrme_hw_mgr.device_count);
+	if (g_lrme_hw_mgr.device_count > CAM_LRME_HW_MAX) {
+		CAM_ERR(CAM_LRME, "Invalid count of devices");
+		return -EINVAL;
+	}
+
+	memset(hw_mgr_intf, 0, sizeof(*hw_mgr_intf));
+
+	mutex_init(&g_lrme_hw_mgr.hw_mgr_mutex);
+	spin_lock_init(&g_lrme_hw_mgr.free_req_lock);
+	INIT_LIST_HEAD(&g_lrme_hw_mgr.frame_free_list);
+
+	/* Init hw mgr frame requests and add to free list */
+	for (i = 0; i < CAM_CTX_REQ_MAX * CAM_CTX_MAX; i++) {
+		frame_req = &g_lrme_hw_mgr.frame_req[i];
+
+		memset(frame_req, 0x0, sizeof(*frame_req));
+		INIT_LIST_HEAD(&frame_req->frame_list);
+
+		list_add_tail(&frame_req->frame_list,
+			&g_lrme_hw_mgr.frame_free_list);
+	}
+
+	hw_mgr_intf->hw_mgr_priv = &g_lrme_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_lrme_mgr_get_caps;
+	hw_mgr_intf->hw_acquire = cam_lrme_mgr_hw_acquire;
+	hw_mgr_intf->hw_release = cam_lrme_mgr_hw_release;
+	hw_mgr_intf->hw_start = cam_lrme_mgr_hw_start;
+	hw_mgr_intf->hw_stop = cam_lrme_mgr_hw_stop;
+	hw_mgr_intf->hw_prepare_update = cam_lrme_mgr_hw_prepare_update;
+	hw_mgr_intf->hw_config = cam_lrme_mgr_hw_config;
+	hw_mgr_intf->hw_read = NULL;
+	hw_mgr_intf->hw_write = NULL;
+	hw_mgr_intf->hw_close = NULL;
+
+	g_lrme_hw_mgr.event_cb = cam_lrme_dev_buf_done_cb;
+
+	CAM_DBG(CAM_LRME, "Hw mgr init done");
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
new file mode 100644
index 0000000..f7ce4d2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
@@ -0,0 +1,120 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_MGR_H_
+#define _CAM_LRME_HW_MGR_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <media/cam_lrme.h>
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_context.h"
+
+#define CAM_LRME_HW_MAX 1
+#define CAM_LRME_WORKQ_NUM_TASK 10
+
+#define CAM_LRME_DECODE_DEVICE_INDEX(ctxt_to_hw_map) \
+	((uint64_t)ctxt_to_hw_map & 0xF)
+
+#define CAM_LRME_DECODE_PRIORITY(ctxt_to_hw_map) \
+	(((uint64_t)ctxt_to_hw_map & 0xF0) >> 4)
+
+#define CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map) \
+	((uint64_t)ctxt_to_hw_map >> CAM_LRME_CTX_INDEX_SHIFT)
+
+/**
+ * enum cam_lrme_hw_mgr_ctx_priority
+ *
+ * CAM_LRME_PRIORITY_HIGH   : High priority client
+ * CAM_LRME_PRIORITY_NORMAL : Normal priority client
+ */
+enum cam_lrme_hw_mgr_ctx_priority {
+	CAM_LRME_PRIORITY_HIGH,
+	CAM_LRME_PRIORITY_NORMAL,
+};
+
+/**
+ * struct cam_lrme_mgr_work_data : HW Mgr work data
+ *
+ * hw_device : Pointer to the hw device
+ */
+struct cam_lrme_mgr_work_data {
+	struct cam_lrme_device *hw_device;
+};
+
+/**
+ * struct cam_lrme_device     : LRME HW device
+ *
+ * @hw_caps                   : HW device's capabilities
+ * @hw_intf                   : HW device's interface information
+ * @num_context               : Number of contexts using this device
+ * @valid                     : Whether this device is valid
+ * @work                      : HW device's work queue
+ * @work_data                 : HW device's work data
+ * @frame_pending_list_high   : High priority request queue
+ * @frame_pending_list_normal : Normal priority request queue
+ * @high_req_lock             : Spinlock of high priority queue
+ * @normal_req_lock           : Spinlock of normal priority queue
+ */
+struct cam_lrme_device {
+	struct cam_lrme_dev_cap        hw_caps;
+	struct cam_hw_intf             hw_intf;
+	uint32_t                       num_context;
+	bool                           valid;
+	struct cam_req_mgr_core_workq *work;
+	struct cam_lrme_mgr_work_data  work_data[CAM_LRME_WORKQ_NUM_TASK];
+	struct list_head               frame_pending_list_high;
+	struct list_head               frame_pending_list_normal;
+	spinlock_t                     high_req_lock;
+	spinlock_t                     normal_req_lock;
+};
+
+/**
+ * struct cam_lrme_hw_mgr : LRME HW manager
+ *
+ * @device_count    : Number of HW devices
+ * @frame_free_list : List of free frame request
+ * @hw_mgr_mutex    : Mutex to protect HW manager data
+ * @free_req_lock   :Spinlock to protect frame_free_list
+ * @hw_device       : List of HW devices
+ * @device_iommu    : Device iommu
+ * @cdm_iommu       : cdm iommu
+ * @frame_req       : List of frame request to use
+ * @lrme_caps       : LRME capabilities
+ * @event_cb        : IRQ callback function
+ */
+struct cam_lrme_hw_mgr {
+	uint32_t                      device_count;
+	struct list_head              frame_free_list;
+	struct mutex                  hw_mgr_mutex;
+	spinlock_t                    free_req_lock;
+	struct cam_lrme_device        hw_device[CAM_LRME_HW_MAX];
+	struct cam_iommu_handle       device_iommu;
+	struct cam_iommu_handle       cdm_iommu;
+	struct cam_lrme_frame_request frame_req[CAM_CTX_REQ_MAX * CAM_CTX_MAX];
+	struct cam_lrme_query_cap_cmd lrme_caps;
+	cam_hw_event_cb_func          event_cb;
+};
+
+int cam_lrme_mgr_register_device(struct cam_hw_intf *lrme_hw_intf,
+	struct cam_iommu_handle *device_iommu,
+	struct cam_iommu_handle *cdm_iommu);
+int cam_lrme_mgr_deregister_device(int device_index);
+
+#endif /* _CAM_LRME_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h
new file mode 100644
index 0000000..8bb609c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_MGR_INTF_H_
+#define _CAM_LRME_HW_MGR_INTF_H_
+
+#include <linux/of.h>
+
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+
+int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf,
+	cam_hw_event_cb_func cam_lrme_dev_buf_done_cb);
+int cam_lrme_hw_mgr_deinit(void);
+
+#endif /* _CAM_LRME_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile
new file mode 100644
index 0000000..c65d862
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera0
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_hw_dev.o cam_lrme_hw_core.o cam_lrme_hw_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
new file mode 100644
index 0000000..0318739
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
@@ -0,0 +1,1022 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_smmu_api.h"
+
+static void cam_lrme_cdm_write_reg_val_pair(uint32_t *buffer,
+	uint32_t *index, uint32_t reg_offset, uint32_t reg_value)
+{
+	buffer[(*index)++] = reg_offset;
+	buffer[(*index)++] = reg_value;
+}
+
+static void cam_lrme_hw_util_fill_fe_reg(struct cam_lrme_hw_io_buffer *io_buf,
+	uint32_t index, uint32_t *reg_val_pair, uint32_t *num_cmd,
+	struct cam_lrme_hw_info *hw_info)
+{
+	uint32_t reg_val;
+
+	/* 1. config buffer size */
+	reg_val = io_buf->io_cfg->planes[0].width;
+	reg_val |= (io_buf->io_cfg->planes[0].height << 16);
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_rd_reg.bus_client_reg[index].rd_buffer_size,
+		reg_val);
+
+	CAM_DBG(CAM_LRME,
+		"width %d", io_buf->io_cfg->planes[0].width);
+	CAM_DBG(CAM_LRME,
+		"height %d", io_buf->io_cfg->planes[0].height);
+
+	/* 2. config image address */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_rd_reg.bus_client_reg[index].addr_image,
+		io_buf->io_addr[0]);
+
+	CAM_DBG(CAM_LRME, "io addr %llu", io_buf->io_addr[0]);
+
+	/* 3. config stride */
+	reg_val = io_buf->io_cfg->planes[0].plane_stride;
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_rd_reg.bus_client_reg[index].rd_stride,
+		reg_val);
+
+	CAM_DBG(CAM_LRME, "plane_stride %d",
+		io_buf->io_cfg->planes[0].plane_stride);
+
+	/* 4. enable client */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_rd_reg.bus_client_reg[index].core_cfg, 0x1);
+
+	/* 5. unpack_cfg */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0, 0x0);
+}
+
+static void cam_lrme_hw_util_fill_we_reg(struct cam_lrme_hw_io_buffer *io_buf,
+	uint32_t index, uint32_t *reg_val_pair, uint32_t *num_cmd,
+	struct cam_lrme_hw_info *hw_info)
+{
+	/* config client mode */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_wr_reg.bus_client_reg[index].cfg,
+		0x1);
+
+	/* image address */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_wr_reg.bus_client_reg[index].addr_image,
+		io_buf->io_addr[0]);
+	CAM_DBG(CAM_LRME, "io addr %llu", io_buf->io_addr[0]);
+
+	/* buffer width and height */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_wr_reg.bus_client_reg[index].buffer_width_cfg,
+		io_buf->io_cfg->planes[0].width);
+	CAM_DBG(CAM_LRME, "width %d", io_buf->io_cfg->planes[0].width);
+
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_wr_reg.bus_client_reg[index].buffer_height_cfg,
+		io_buf->io_cfg->planes[0].height);
+	CAM_DBG(CAM_LRME, "height %d", io_buf->io_cfg->planes[0].height);
+
+	/* packer cfg */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_wr_reg.bus_client_reg[index].packer_cfg,
+		(index == 0) ? 0x1 : 0x5);
+
+	/* client stride */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_wr_reg.bus_client_reg[index].wr_stride,
+		io_buf->io_cfg->planes[0].meta_stride);
+	CAM_DBG(CAM_LRME, "plane_stride %d",
+		io_buf->io_cfg->planes[0].plane_stride);
+}
+
+
+static int cam_lrme_hw_util_process_config_hw(struct cam_hw_info *lrme_hw,
+	struct cam_lrme_hw_cmd_config_args *config_args)
+{
+	int i;
+	struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+	struct cam_lrme_cdm_info *hw_cdm_info;
+	uint32_t *cmd_buf_addr = config_args->cmd_buf_addr;
+	uint32_t reg_val_pair[CAM_LRME_MAX_REG_PAIR_NUM];
+	struct cam_lrme_hw_io_buffer *io_buf;
+	struct cam_lrme_hw_info *hw_info =
+		((struct cam_lrme_core *)lrme_hw->core_info)->hw_info;
+	uint32_t num_cmd = 0;
+	uint32_t size;
+	uint32_t mem_base, available_size = config_args->size;
+	uint32_t output_res_mask = 0, input_res_mask = 0;
+
+
+	if (!cmd_buf_addr) {
+		CAM_ERR(CAM_LRME, "Invalid input args");
+		return -EINVAL;
+	}
+
+	hw_cdm_info =
+		((struct cam_lrme_core *)lrme_hw->core_info)->hw_cdm_info;
+
+	for (i = 0; i < CAM_LRME_MAX_IO_BUFFER; i++) {
+		io_buf = &config_args->input_buf[i];
+
+		if (io_buf->valid == false)
+			break;
+
+		if (io_buf->io_cfg->direction != CAM_BUF_INPUT) {
+			CAM_ERR(CAM_LRME, "Incorrect direction %d %d",
+				io_buf->io_cfg->direction, CAM_BUF_INPUT);
+			return -EINVAL;
+		}
+		CAM_DBG(CAM_LRME,
+			"resource_type %d", io_buf->io_cfg->resource_type);
+
+		switch (io_buf->io_cfg->resource_type) {
+		case CAM_LRME_IO_TYPE_TAR:
+			cam_lrme_hw_util_fill_fe_reg(io_buf, 0, reg_val_pair,
+				&num_cmd, hw_info);
+
+			input_res_mask |= CAM_LRME_INPUT_PORT_TYPE_TAR;
+			break;
+		case CAM_LRME_IO_TYPE_REF:
+			cam_lrme_hw_util_fill_fe_reg(io_buf, 1, reg_val_pair,
+				&num_cmd, hw_info);
+
+			input_res_mask |= CAM_LRME_INPUT_PORT_TYPE_REF;
+			break;
+		default:
+			CAM_ERR(CAM_LRME, "wrong resource_type %d",
+				io_buf->io_cfg->resource_type);
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < CAM_LRME_BUS_RD_MAX_CLIENTS; i++)
+		if (!((input_res_mask >> i) & 0x1))
+			cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+				hw_info->bus_rd_reg.bus_client_reg[i].core_cfg,
+				0x0);
+
+	for (i = 0; i < CAM_LRME_MAX_IO_BUFFER; i++) {
+		io_buf = &config_args->output_buf[i];
+
+		if (io_buf->valid == false)
+			break;
+
+		if (io_buf->io_cfg->direction != CAM_BUF_OUTPUT) {
+			CAM_ERR(CAM_LRME, "Incorrect direction %d %d",
+				io_buf->io_cfg->direction, CAM_BUF_INPUT);
+			return -EINVAL;
+		}
+
+		CAM_DBG(CAM_LRME, "resource_type %d",
+			io_buf->io_cfg->resource_type);
+		switch (io_buf->io_cfg->resource_type) {
+		case CAM_LRME_IO_TYPE_DS2:
+			cam_lrme_hw_util_fill_we_reg(io_buf, 0, reg_val_pair,
+				&num_cmd, hw_info);
+
+			output_res_mask |= CAM_LRME_OUTPUT_PORT_TYPE_DS2;
+			break;
+		case CAM_LRME_IO_TYPE_RES:
+			cam_lrme_hw_util_fill_we_reg(io_buf, 1, reg_val_pair,
+				&num_cmd, hw_info);
+
+			output_res_mask |= CAM_LRME_OUTPUT_PORT_TYPE_RES;
+			break;
+
+		default:
+			CAM_ERR(CAM_LRME, "wrong resource_type %d",
+				io_buf->io_cfg->resource_type);
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < CAM_LRME_BUS_RD_MAX_CLIENTS; i++)
+		if (!((output_res_mask >> i) & 0x1))
+			cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+				hw_info->bus_wr_reg.bus_client_reg[i].cfg, 0x0);
+
+	if (output_res_mask) {
+		/* write composite mask */
+		cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+			hw_info->bus_wr_reg.common_reg.composite_mask_0,
+			output_res_mask);
+	}
+
+	size = hw_cdm_info->cdm_ops->cdm_required_size_changebase();
+	if ((size * 4) > available_size) {
+		CAM_ERR(CAM_LRME, "buf size:%d is not sufficient, expected: %d",
+			available_size, size);
+		return -EINVAL;
+	}
+
+	mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info, CAM_LRME_BASE_IDX);
+
+	hw_cdm_info->cdm_ops->cdm_write_changebase(cmd_buf_addr, mem_base);
+	cmd_buf_addr += size;
+	available_size -= (size * 4);
+
+	size = hw_cdm_info->cdm_ops->cdm_required_size_reg_random(
+		num_cmd / 2);
+
+	if ((size * 4) > available_size) {
+		CAM_ERR(CAM_LRME, "buf size:%d is not sufficient, expected: %d",
+			available_size, size);
+		return -ENOMEM;
+	}
+
+	hw_cdm_info->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmd / 2,
+		reg_val_pair);
+	cmd_buf_addr += size;
+	available_size -= (size * 4);
+
+	config_args->config_buf_size =
+		config_args->size - available_size;
+
+	return 0;
+}
+
+static int cam_lrme_hw_util_submit_go(struct cam_hw_info *lrme_hw)
+{
+	struct cam_lrme_core *lrme_core;
+	struct cam_hw_soc_info *soc_info;
+	struct cam_lrme_hw_info   *hw_info;
+
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+	hw_info = lrme_core->hw_info;
+	soc_info = &lrme_hw->soc_info;
+
+	cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+		hw_info->bus_rd_reg.common_reg.cmd);
+
+	return 0;
+}
+
+static int cam_lrme_hw_util_reset(struct cam_hw_info *lrme_hw,
+	uint32_t reset_type)
+{
+	struct cam_lrme_core *lrme_core;
+	struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+	struct cam_lrme_hw_info *hw_info;
+	long time_left;
+
+	lrme_core = lrme_hw->core_info;
+	hw_info = lrme_core->hw_info;
+
+	switch (reset_type) {
+	case CAM_LRME_HW_RESET_TYPE_HW_RESET:
+		reinit_completion(&lrme_core->reset_complete);
+		cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+			hw_info->titan_reg.top_rst_cmd);
+		time_left = wait_for_completion_timeout(
+			&lrme_core->reset_complete,
+			msecs_to_jiffies(CAM_LRME_HW_RESET_TIMEOUT));
+		if (time_left <= 0) {
+			CAM_ERR(CAM_LRME,
+				"HW reset wait failed time_left=%ld",
+				time_left);
+			return -ETIMEDOUT;
+		}
+		break;
+	case CAM_LRME_HW_RESET_TYPE_SW_RESET:
+		cam_io_w_mb(0x3, soc_info->reg_map[0].mem_base +
+			hw_info->bus_wr_reg.common_reg.sw_reset);
+		cam_io_w_mb(0x3, soc_info->reg_map[0].mem_base +
+			hw_info->bus_rd_reg.common_reg.sw_reset);
+		reinit_completion(&lrme_core->reset_complete);
+		cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base +
+			hw_info->titan_reg.top_rst_cmd);
+		time_left = wait_for_completion_timeout(
+			&lrme_core->reset_complete,
+			msecs_to_jiffies(CAM_LRME_HW_RESET_TIMEOUT));
+		if (time_left <= 0) {
+			CAM_ERR(CAM_LRME,
+				"SW reset wait failed time_left=%ld",
+				time_left);
+			return -ETIMEDOUT;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+int cam_lrme_hw_util_get_caps(struct cam_hw_info *lrme_hw,
+	struct cam_lrme_dev_cap *hw_caps)
+{
+	struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+	struct cam_lrme_hw_info *hw_info =
+		((struct cam_lrme_core *)lrme_hw->core_info)->hw_info;
+	uint32_t reg_value;
+
+	if (!hw_info) {
+		CAM_ERR(CAM_LRME, "Invalid hw info data");
+		return -EINVAL;
+	}
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		hw_info->clc_reg.clc_hw_version);
+	hw_caps->clc_hw_version.gen =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+	hw_caps->clc_hw_version.rev =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->clc_hw_version.step =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		hw_info->bus_rd_reg.common_reg.hw_version);
+	hw_caps->bus_rd_hw_version.gen =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+	hw_caps->bus_rd_hw_version.rev =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->bus_rd_hw_version.step =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		hw_info->bus_wr_reg.common_reg.hw_version);
+	hw_caps->bus_wr_hw_version.gen =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+	hw_caps->bus_wr_hw_version.rev =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->bus_wr_hw_version.step =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		hw_info->titan_reg.top_hw_version);
+	hw_caps->top_hw_version.gen =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+	hw_caps->top_hw_version.rev =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->top_hw_version.step =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		hw_info->titan_reg.top_titan_version);
+	hw_caps->top_titan_version.gen =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+	hw_caps->top_titan_version.rev =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->top_titan_version.step =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	return 0;
+}
+
+static int cam_lrme_hw_util_submit_req(struct cam_lrme_core *lrme_core,
+	struct cam_lrme_frame_request *frame_req)
+{
+	struct cam_lrme_cdm_info *hw_cdm_info =
+		lrme_core->hw_cdm_info;
+	struct cam_cdm_bl_request *cdm_cmd = hw_cdm_info->cdm_cmd;
+	struct cam_hw_update_entry *cmd;
+	int i, rc = 0;
+
+	if (frame_req->num_hw_update_entries > 0) {
+		cdm_cmd->cmd_arrary_count = frame_req->num_hw_update_entries;
+		cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+		cdm_cmd->flag = false;
+		cdm_cmd->userdata = NULL;
+		cdm_cmd->cookie = 0;
+
+		for (i = 0; i <= frame_req->num_hw_update_entries; i++) {
+			cmd = (frame_req->hw_update_entries + i);
+			cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+			cdm_cmd->cmd[i].offset = cmd->offset;
+			cdm_cmd->cmd[i].len = cmd->len;
+		}
+
+		rc = cam_cdm_submit_bls(hw_cdm_info->cdm_handle, cdm_cmd);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Failed to submit cdm commands");
+			return -EINVAL;
+		}
+	} else {
+		CAM_ERR(CAM_LRME, "No hw update entry");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_lrme_hw_util_process_err(struct cam_hw_info *lrme_hw)
+{
+	struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+	struct cam_lrme_frame_request *req_proc, *req_submit;
+	struct cam_lrme_hw_cb_args cb_args;
+	int rc;
+
+	req_proc = lrme_core->req_proc;
+	req_submit = lrme_core->req_submit;
+	cb_args.cb_type = CAM_LRME_CB_ERROR;
+
+	if ((lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING) &&
+		(lrme_core->state != CAM_LRME_CORE_STATE_REQ_PENDING) &&
+		(lrme_core->state != CAM_LRME_CORE_STATE_REQ_PROC_PEND)) {
+		CAM_ERR(CAM_LRME, "Get error irq in wrong state %d",
+			lrme_core->state);
+	}
+
+	CAM_ERR_RATE_LIMIT(CAM_LRME, "Start recovery");
+	lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY;
+	rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Failed to reset");
+
+	lrme_core->req_proc = NULL;
+	lrme_core->req_submit = NULL;
+	if (!rc)
+		lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+	cb_args.frame_req = req_proc;
+	lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->hw_mgr_cb.data,
+		&cb_args);
+
+	cb_args.frame_req = req_submit;
+	lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->hw_mgr_cb.data,
+		&cb_args);
+
+	return rc;
+}
+
+static int cam_lrme_hw_util_process_reg_update(
+	struct cam_hw_info *lrme_hw, struct cam_lrme_hw_cb_args *cb_args)
+{
+	struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+	int rc = 0;
+
+	cb_args->cb_type |= CAM_LRME_CB_COMP_REG_UPDATE;
+	if (lrme_core->state == CAM_LRME_CORE_STATE_REQ_PENDING) {
+		lrme_core->state = CAM_LRME_CORE_STATE_PROCESSING;
+	} else {
+		CAM_ERR(CAM_LRME, "Reg update in wrong state %d",
+			lrme_core->state);
+		rc = cam_lrme_hw_util_process_err(lrme_hw);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Failed to reset");
+		return -EINVAL;
+	}
+
+	lrme_core->req_proc = lrme_core->req_submit;
+	lrme_core->req_submit = NULL;
+
+	return 0;
+}
+
+static int cam_lrme_hw_util_process_idle(
+	struct cam_hw_info *lrme_hw, struct cam_lrme_hw_cb_args *cb_args)
+{
+	struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+	int rc = 0;
+
+	cb_args->cb_type |= CAM_LRME_CB_BUF_DONE;
+	switch (lrme_core->state) {
+	case CAM_LRME_CORE_STATE_REQ_PROC_PEND:
+		cam_lrme_hw_util_submit_go(lrme_hw);
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+		break;
+
+	case CAM_LRME_CORE_STATE_PROCESSING:
+		lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+		break;
+
+	default:
+		CAM_ERR(CAM_LRME, "Idle in wrong state %d",
+			lrme_core->state);
+		rc = cam_lrme_hw_util_process_err(lrme_hw);
+		return rc;
+	}
+	cb_args->frame_req = lrme_core->req_proc;
+	lrme_core->req_proc = NULL;
+
+	return 0;
+}
+
+void cam_lrme_set_irq(struct cam_hw_info *lrme_hw,
+	enum cam_lrme_irq_set set)
+{
+	struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+	struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+	struct cam_lrme_hw_info *hw_info = lrme_core->hw_info;
+
+	switch (set) {
+	case CAM_LRME_IRQ_ENABLE:
+		cam_io_w_mb(0xFFFF,
+			soc_info->reg_map[0].mem_base +
+			hw_info->titan_reg.top_irq_mask);
+		cam_io_w_mb(0xFFFF,
+			soc_info->reg_map[0].mem_base +
+			hw_info->bus_wr_reg.common_reg.irq_mask_0);
+		cam_io_w_mb(0xFFFF,
+			soc_info->reg_map[0].mem_base +
+			hw_info->bus_wr_reg.common_reg.irq_mask_1);
+		cam_io_w_mb(0xFFFF,
+			soc_info->reg_map[0].mem_base +
+			hw_info->bus_rd_reg.common_reg.irq_mask);
+		break;
+
+	case CAM_LRME_IRQ_DISABLE:
+		cam_io_w_mb(0x0,
+			soc_info->reg_map[0].mem_base +
+			hw_info->titan_reg.top_irq_mask);
+		cam_io_w_mb(0x0,
+			soc_info->reg_map[0].mem_base +
+			hw_info->bus_wr_reg.common_reg.irq_mask_0);
+		cam_io_w_mb(0x0,
+			soc_info->reg_map[0].mem_base +
+			hw_info->bus_wr_reg.common_reg.irq_mask_1);
+		cam_io_w_mb(0x0,
+			soc_info->reg_map[0].mem_base +
+			hw_info->bus_rd_reg.common_reg.irq_mask);
+		break;
+	}
+}
+
+
+int cam_lrme_hw_process_irq(void *priv, void *data)
+{
+	struct cam_lrme_hw_work_data *work_data;
+	struct cam_hw_info *lrme_hw;
+	struct cam_lrme_core *lrme_core;
+	int rc = 0;
+	uint32_t top_irq_status, fe_irq_status;
+	uint32_t *we_irq_status;
+	struct cam_lrme_hw_cb_args cb_args;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_LRME, "Invalid data %pK %pK", data, priv);
+		return -EINVAL;
+	}
+
+	memset(&cb_args, 0, sizeof(struct cam_lrme_hw_cb_args));
+	lrme_hw = (struct cam_hw_info *)priv;
+	work_data = (struct cam_lrme_hw_work_data *)data;
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+	top_irq_status = work_data->top_irq_status;
+	fe_irq_status = work_data->fe_irq_status;
+	we_irq_status = work_data->we_irq_status;
+
+	CAM_DBG(CAM_LRME,
+		"top status %x, fe status %x, we status0 %x, we status1 %x",
+		top_irq_status, fe_irq_status, we_irq_status[0],
+		we_irq_status[1]);
+	CAM_DBG(CAM_LRME, "Current state %d", lrme_core->state);
+
+	mutex_lock(&lrme_hw->hw_mutex);
+
+	if (top_irq_status & (1 << 3)) {
+		CAM_DBG(CAM_LRME, "Error");
+		rc = cam_lrme_hw_util_process_err(lrme_hw);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Process error failed");
+		goto end;
+	}
+
+	if (we_irq_status[0] & (1 << 1)) {
+		CAM_DBG(CAM_LRME, "reg update");
+		rc = cam_lrme_hw_util_process_reg_update(lrme_hw, &cb_args);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Process reg_update failed");
+			goto end;
+		}
+	}
+
+	if (top_irq_status & (1 << 4)) {
+		CAM_DBG(CAM_LRME, "IDLE");
+
+		rc = cam_lrme_hw_util_process_idle(lrme_hw, &cb_args);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Process idle failed");
+			goto end;
+		}
+	}
+
+	if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb) {
+		lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->
+			hw_mgr_cb.data, &cb_args);
+	} else {
+		CAM_ERR(CAM_LRME, "No hw mgr cb");
+		rc = -EINVAL;
+	}
+
+end:
+	mutex_unlock(&lrme_hw->hw_mutex);
+	return rc;
+}
+
+int cam_lrme_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size)
+{
+	struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+	int rc = 0;
+	struct cam_lrme_core *lrme_core;
+
+	if (!lrme_hw) {
+		CAM_ERR(CAM_LRME,
+			"Invalid input params, lrme_hw %pK",
+			lrme_hw);
+		return -EINVAL;
+	}
+
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+	mutex_lock(&lrme_hw->hw_mutex);
+
+	if (lrme_hw->open_count > 0) {
+		CAM_DBG(CAM_LRME, "This device is activated before");
+		goto unlock;
+	}
+
+	rc = cam_lrme_soc_enable_resources(lrme_hw);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to enable soc resources");
+		goto unlock;
+	}
+
+	rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to reset hw");
+		goto disable_soc;
+	}
+
+	if (lrme_core->hw_cdm_info) {
+		struct cam_lrme_cdm_info *hw_cdm_info =
+			lrme_core->hw_cdm_info;
+
+		rc = cam_cdm_stream_on(hw_cdm_info->cdm_handle);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Failed to stream on cdm");
+			goto disable_soc;
+		}
+	}
+
+	lrme_hw->hw_state = CAM_HW_STATE_POWER_UP;
+	lrme_hw->open_count++;
+	lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+	mutex_unlock(&lrme_hw->hw_mutex);
+	return rc;
+
+disable_soc:
+	if (cam_lrme_soc_disable_resources(lrme_hw))
+		CAM_ERR(CAM_LRME, "Error in disable soc resources");
+unlock:
+	mutex_unlock(&lrme_hw->hw_mutex);
+	return rc;
+}
+
+int cam_lrme_hw_stop(void *hw_priv, void *hw_stop_args, uint32_t arg_size)
+{
+	struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+	int rc = 0;
+	struct cam_lrme_core *lrme_core;
+
+	if (!lrme_hw) {
+		CAM_ERR(CAM_LRME, "Invalid argument");
+		return -EINVAL;
+	}
+
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+	mutex_lock(&lrme_hw->hw_mutex);
+
+	if (lrme_hw->open_count == 0) {
+		mutex_unlock(&lrme_hw->hw_mutex);
+		CAM_ERR(CAM_LRME, "Error Unbalanced stop");
+		return -EINVAL;
+	}
+	lrme_hw->open_count--;
+
+	if (lrme_hw->open_count)
+		goto unlock;
+
+	lrme_core->req_proc = NULL;
+	lrme_core->req_submit = NULL;
+
+	if (lrme_core->hw_cdm_info) {
+		struct cam_lrme_cdm_info *hw_cdm_info =
+			lrme_core->hw_cdm_info;
+
+		rc = cam_cdm_stream_off(hw_cdm_info->cdm_handle);
+		if (rc) {
+			CAM_ERR(CAM_LRME,
+				"Failed in CDM StreamOff, handle=0x%x, rc=%d",
+				hw_cdm_info->cdm_handle, rc);
+			goto unlock;
+		}
+	}
+
+	rc = cam_lrme_soc_disable_resources(lrme_hw);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed in Disable SOC, rc=%d", rc);
+		goto unlock;
+	}
+
+	lrme_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	if (lrme_core->state == CAM_LRME_CORE_STATE_IDLE) {
+		lrme_core->state = CAM_LRME_CORE_STATE_INIT;
+	} else {
+		CAM_ERR(CAM_LRME, "HW in wrong state %d", lrme_core->state);
+		return -EINVAL;
+	}
+
+unlock:
+	mutex_unlock(&lrme_hw->hw_mutex);
+	return rc;
+}
+
+int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_lrme_core *lrme_core;
+	struct cam_lrme_hw_submit_args *args =
+		(struct cam_lrme_hw_submit_args *)hw_submit_args;
+	int rc = 0;
+	struct cam_lrme_frame_request *frame_req;
+
+
+	if (!hw_priv || !hw_submit_args) {
+		CAM_ERR(CAM_LRME, "Invalid input");
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_lrme_hw_submit_args) != arg_size) {
+		CAM_ERR(CAM_LRME,
+			"size of args %lu, arg_size %d",
+			sizeof(struct cam_lrme_hw_submit_args), arg_size);
+		return -EINVAL;
+	}
+
+	frame_req = args->frame_req;
+
+	mutex_lock(&lrme_hw->hw_mutex);
+
+	if (lrme_hw->open_count == 0) {
+		CAM_ERR(CAM_LRME, "HW is not open");
+		mutex_unlock(&lrme_hw->hw_mutex);
+		return -EINVAL;
+	}
+
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+	if (lrme_core->state != CAM_LRME_CORE_STATE_IDLE &&
+		lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING) {
+		mutex_unlock(&lrme_hw->hw_mutex);
+		CAM_DBG(CAM_LRME, "device busy, can not submit, state %d",
+			lrme_core->state);
+		return -EBUSY;
+	}
+
+	if (lrme_core->req_submit != NULL) {
+		CAM_ERR(CAM_LRME, "req_submit is not NULL");
+		return -EBUSY;
+	}
+
+	rc = cam_lrme_hw_util_submit_req(lrme_core, frame_req);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Submit req failed");
+		goto error;
+	}
+
+	switch (lrme_core->state) {
+	case CAM_LRME_CORE_STATE_PROCESSING:
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PROC_PEND;
+		break;
+
+	case CAM_LRME_CORE_STATE_IDLE:
+		cam_lrme_hw_util_submit_go(lrme_hw);
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+		break;
+
+	default:
+		CAM_ERR(CAM_LRME, "Wrong hw state");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	lrme_core->req_submit = frame_req;
+	mutex_unlock(&lrme_hw->hw_mutex);
+	CAM_DBG(CAM_LRME, "Release lock, submit done for req %llu",
+		frame_req->req_id);
+
+	return 0;
+
+error:
+	mutex_unlock(&lrme_hw->hw_mutex);
+
+	return rc;
+
+}
+
+int cam_lrme_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
+{
+	struct cam_hw_info *lrme_hw = hw_priv;
+	struct cam_lrme_core *lrme_core;
+	struct cam_lrme_hw_reset_args *lrme_reset_args = reset_core_args;
+	int rc;
+
+	if (!hw_priv) {
+		CAM_ERR(CAM_LRME, "Invalid input args");
+		return -EINVAL;
+	}
+
+	if (!reset_core_args ||
+		sizeof(struct cam_lrme_hw_reset_args) != arg_size) {
+		CAM_ERR(CAM_LRME, "Invalid reset args");
+		return -EINVAL;
+	}
+
+	lrme_core = lrme_hw->core_info;
+
+	mutex_lock(&lrme_hw->hw_mutex);
+	if (lrme_core->state == CAM_LRME_CORE_STATE_RECOVERY) {
+		mutex_unlock(&lrme_hw->hw_mutex);
+		CAM_ERR(CAM_LRME, "Reset not allowed in %d state",
+			lrme_core->state);
+		return -EINVAL;
+	}
+
+	lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY;
+
+	rc = cam_lrme_hw_util_reset(lrme_hw, lrme_reset_args->reset_type);
+	if (rc) {
+		mutex_unlock(&lrme_hw->hw_mutex);
+		CAM_ERR(CAM_FD, "Failed to reset");
+		return rc;
+	}
+
+	lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+	mutex_unlock(&lrme_hw->hw_mutex);
+
+	return 0;
+}
+
+int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *lrme_hw;
+	struct cam_lrme_core *lrme_core;
+	struct cam_lrme_dev_cap *lrme_hw_caps =
+		(struct cam_lrme_dev_cap *)get_hw_cap_args;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		CAM_ERR(CAM_LRME, "Invalid input pointers %pK %pK",
+			hw_priv, get_hw_cap_args);
+		return -EINVAL;
+	}
+
+	lrme_hw = (struct cam_hw_info *)hw_priv;
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+	*lrme_hw_caps = lrme_core->hw_caps;
+
+	return 0;
+}
+
+irqreturn_t cam_lrme_hw_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *lrme_hw;
+	struct cam_lrme_core *lrme_core;
+	struct cam_hw_soc_info *soc_info;
+	struct cam_lrme_hw_info   *hw_info;
+	struct crm_workq_task *task;
+	struct cam_lrme_hw_work_data *work_data;
+	uint32_t top_irq_status, fe_irq_status, we_irq_status0, we_irq_status1;
+	int rc;
+
+	if (!data) {
+		CAM_ERR(CAM_LRME, "Invalid data in IRQ callback");
+		return -EINVAL;
+	}
+
+	lrme_hw = (struct cam_hw_info *)data;
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+	soc_info = &lrme_hw->soc_info;
+	hw_info = lrme_core->hw_info;
+
+	top_irq_status = cam_io_r_mb(
+		soc_info->reg_map[0].mem_base +
+		hw_info->titan_reg.top_irq_status);
+	CAM_DBG(CAM_LRME, "top_irq_status %x", top_irq_status);
+	cam_io_w_mb(top_irq_status,
+		soc_info->reg_map[0].mem_base +
+		hw_info->titan_reg.top_irq_clear);
+	top_irq_status &= CAM_LRME_TOP_IRQ_MASK;
+
+	fe_irq_status = cam_io_r_mb(
+		soc_info->reg_map[0].mem_base +
+		hw_info->bus_rd_reg.common_reg.irq_status);
+	CAM_DBG(CAM_LRME, "fe_irq_status %x", fe_irq_status);
+	cam_io_w_mb(fe_irq_status,
+		soc_info->reg_map[0].mem_base +
+		hw_info->bus_rd_reg.common_reg.irq_clear);
+	fe_irq_status &= CAM_LRME_FE_IRQ_MASK;
+
+	we_irq_status0 = cam_io_r_mb(
+		soc_info->reg_map[0].mem_base +
+		hw_info->bus_wr_reg.common_reg.irq_status_0);
+	CAM_DBG(CAM_LRME, "we_irq_status[0] %x", we_irq_status0);
+	cam_io_w_mb(we_irq_status0,
+		soc_info->reg_map[0].mem_base +
+		hw_info->bus_wr_reg.common_reg.irq_clear_0);
+	we_irq_status0 &= CAM_LRME_WE_IRQ_MASK_0;
+
+	we_irq_status1 = cam_io_r_mb(
+		soc_info->reg_map[0].mem_base +
+		hw_info->bus_wr_reg.common_reg.irq_status_1);
+	CAM_DBG(CAM_LRME, "we_irq_status[1] %x", we_irq_status1);
+	cam_io_w_mb(we_irq_status1,
+		soc_info->reg_map[0].mem_base +
+		hw_info->bus_wr_reg.common_reg.irq_clear_1);
+	we_irq_status1 &= CAM_LRME_WE_IRQ_MASK_1;
+
+	cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+		hw_info->titan_reg.top_irq_cmd);
+	cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+		hw_info->bus_wr_reg.common_reg.irq_cmd);
+	cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+		hw_info->bus_rd_reg.common_reg.irq_cmd);
+
+	if (top_irq_status & 0x1) {
+		complete(&lrme_core->reset_complete);
+		top_irq_status &= (~0x1);
+	}
+
+	if (top_irq_status || fe_irq_status ||
+		we_irq_status0 || we_irq_status1) {
+		task = cam_req_mgr_workq_get_task(lrme_core->work);
+		if (!task) {
+			CAM_ERR(CAM_LRME, "no empty task available");
+			return -ENOMEM;
+		}
+		work_data = (struct cam_lrme_hw_work_data *)task->payload;
+		work_data->top_irq_status = top_irq_status;
+		work_data->fe_irq_status = fe_irq_status;
+		work_data->we_irq_status[0] = we_irq_status0;
+		work_data->we_irq_status[1] = we_irq_status1;
+		task->process_cb = cam_lrme_hw_process_irq;
+		rc = cam_req_mgr_workq_enqueue_task(task, data,
+			CRM_TASK_PRIORITY_0);
+		if (rc)
+			CAM_ERR(CAM_LRME,
+				"Failed in enqueue work task, rc=%d", rc);
+	}
+
+	return IRQ_HANDLED;
+}
+
+int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+	int rc = 0;
+
+	switch (cmd_type) {
+	case CAM_LRME_HW_CMD_PREPARE_HW_UPDATE: {
+		struct cam_lrme_hw_cmd_config_args *config_args;
+
+		config_args = (struct cam_lrme_hw_cmd_config_args *)cmd_args;
+		rc = cam_lrme_hw_util_process_config_hw(lrme_hw, config_args);
+		break;
+	}
+
+	case CAM_LRME_HW_CMD_REGISTER_CB: {
+		struct cam_lrme_hw_cmd_set_cb *cb_args;
+		struct cam_lrme_device *hw_device;
+		struct cam_lrme_core *lrme_core =
+			(struct cam_lrme_core *)lrme_hw->core_info;
+		cb_args = (struct cam_lrme_hw_cmd_set_cb *)cmd_args;
+		lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb =
+			cb_args->cam_lrme_hw_mgr_cb;
+		lrme_core->hw_mgr_cb.data = cb_args->data;
+		hw_device = cb_args->data;
+		rc = 0;
+		break;
+	}
+
+	case CAM_LRME_HW_CMD_SUBMIT: {
+		struct cam_lrme_hw_submit_args *submit_args;
+
+		submit_args = (struct cam_lrme_hw_submit_args *)cmd_args;
+		rc = cam_lrme_hw_submit_req(hw_priv,
+			submit_args, arg_size);
+		break;
+	}
+
+	default:
+		break;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
new file mode 100644
index 0000000..bf2f370
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
@@ -0,0 +1,457 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_CORE_H_
+#define _CAM_LRME_HW_CORE_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_defs.h>
+#include <media/cam_lrme.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_req_mgr_workq.h"
+
+#define CAM_LRME_HW_RESET_TIMEOUT 3000
+
+#define CAM_LRME_BUS_RD_MAX_CLIENTS 2
+#define CAM_LRME_BUS_WR_MAX_CLIENTS 2
+
+#define CAM_LRME_HW_WORKQ_NUM_TASK 30
+
+#define CAM_LRME_TOP_IRQ_MASK          0x19
+#define CAM_LRME_WE_IRQ_MASK_0         0x2
+#define CAM_LRME_WE_IRQ_MASK_1         0x0
+#define CAM_LRME_FE_IRQ_MASK           0x0
+
+#define CAM_LRME_MAX_REG_PAIR_NUM 60
+
+/**
+ * enum cam_lrme_irq_set
+ *
+ * @CAM_LRME_IRQ_ENABLE  : Enable irqs
+ * @CAM_LRME_IRQ_DISABLE : Disable irqs
+ */
+enum cam_lrme_irq_set {
+	CAM_LRME_IRQ_ENABLE,
+	CAM_LRME_IRQ_DISABLE,
+};
+
+/**
+ * struct cam_lrme_cdm_info : information used to submit cdm command
+ *
+ * @cdm_handle      : CDM handle for this device
+ * @cdm_ops         : CDM ops
+ * @cdm_cmd         : CDM command pointer
+ */
+struct cam_lrme_cdm_info {
+	uint32_t                   cdm_handle;
+	struct cam_cdm_utils_ops  *cdm_ops;
+	struct cam_cdm_bl_request *cdm_cmd;
+};
+
+/**
+ * struct cam_lrme_hw_work_data : Work data for HW work queue
+ *
+ * @top_irq_status : Top registers irq status
+ * @fe_irq_status  : FE engine irq status
+ * @we_irq_status  : WE engine irq status
+ */
+struct cam_lrme_hw_work_data {
+	uint32_t                          top_irq_status;
+	uint32_t                          fe_irq_status;
+	uint32_t                          we_irq_status[2];
+};
+
+/**
+ *  enum cam_lrme_core_state : LRME core states
+ *
+ * @CAM_LRME_CORE_STATE_UNINIT        : LRME is in uninit state
+ * @CAM_LRME_CORE_STATE_INIT          : LRME is in init state after probe
+ * @ CAM_LRME_CORE_STATE_IDLE         : LRME is in idle state. Hardware is in
+ *                                      this state when no frame is processing
+ *                                      or waiting for this core.
+ * @CAM_LRME_CORE_STATE_REQ_PENDING   : LRME is in pending state. One frame is
+ *                                      waiting for processing
+ * @CAM_LRME_CORE_STATE_PROCESSING    : LRME is in processing state. HW manager
+ *                                      can submit one more frame to HW
+ * @CAM_LRME_CORE_STATE_REQ_PROC_PEND : Indicate two frames are inside HW.
+ * @CAM_LRME_CORE_STATE_RECOVERY      : Indicate core is in the process of reset
+ * @CAM_LRME_CORE_STATE_MAX           : upper limit of states
+ */
+enum cam_lrme_core_state {
+	CAM_LRME_CORE_STATE_UNINIT,
+	CAM_LRME_CORE_STATE_INIT,
+	CAM_LRME_CORE_STATE_IDLE,
+	CAM_LRME_CORE_STATE_REQ_PENDING,
+	CAM_LRME_CORE_STATE_PROCESSING,
+	CAM_LRME_CORE_STATE_REQ_PROC_PEND,
+	CAM_LRME_CORE_STATE_RECOVERY,
+	CAM_LRME_CORE_STATE_MAX,
+};
+
+/**
+ *  struct cam_lrme_core : LRME HW core information
+ *
+ * @hw_info        : Pointer to base HW information structure
+ * @device_iommu   : Device iommu handle
+ * @cdm_iommu      : CDM iommu handle
+ * @hw_caps        : Hardware capabilities
+ * @state          : Hardware state
+ * @reset_complete : Reset completion
+ * @work           : Hardware workqueue to handle irq events
+ * @work_data      : Work data used by hardware workqueue
+ * @hw_mgr_cb      : Hw manager callback
+ * @req_proc       : Pointer to the processing frame request
+ * @req_submit     : Pointer to the frame request waiting for processing
+ * @hw_cdm_info    : CDM information used by this device
+ * @hw_idx         : Hardware index
+ */
+struct cam_lrme_core {
+	struct cam_lrme_hw_info          *hw_info;
+	struct cam_iommu_handle           device_iommu;
+	struct cam_iommu_handle           cdm_iommu;
+	struct cam_lrme_dev_cap           hw_caps;
+	enum cam_lrme_core_state          state;
+	struct completion                 reset_complete;
+	struct cam_req_mgr_core_workq    *work;
+	struct cam_lrme_hw_work_data      work_data[CAM_LRME_HW_WORKQ_NUM_TASK];
+	struct cam_lrme_hw_cmd_set_cb     hw_mgr_cb;
+	struct cam_lrme_frame_request    *req_proc;
+	struct cam_lrme_frame_request    *req_submit;
+	struct cam_lrme_cdm_info         *hw_cdm_info;
+	uint32_t                          hw_idx;
+};
+
+/**
+ * struct cam_lrme_bus_rd_reg_common : Offsets of FE common registers
+ *
+ * @hw_version    : Offset of hw_version register
+ * @hw_capability : Offset of hw_capability register
+ * @sw_reset      : Offset of sw_reset register
+ * @cgc_override  : Offset of cgc_override register
+ * @irq_mask      : Offset of irq_mask register
+ * @irq_clear     : Offset of irq_clear register
+ * @irq_cmd       : Offset of irq_cmd register
+ * @irq_status    : Offset of irq_status register
+ * @cmd           : Offset of cmd register
+ * @irq_set       : Offset of irq_set register
+ * @misr_reset    : Offset of misr_reset register
+ * @security_cfg  : Offset of security_cfg register
+ * @pwr_iso_cfg   : Offset of pwr_iso_cfg register
+ * @pwr_iso_seed  : Offset of pwr_iso_seed register
+ * @test_bus_ctrl : Offset of test_bus_ctrl register
+ * @spare         : Offset of spare register
+ */
+struct cam_lrme_bus_rd_reg_common {
+	uint32_t hw_version;
+	uint32_t hw_capability;
+	uint32_t sw_reset;
+	uint32_t cgc_override;
+	uint32_t irq_mask;
+	uint32_t irq_clear;
+	uint32_t irq_cmd;
+	uint32_t irq_status;
+	uint32_t cmd;
+	uint32_t irq_set;
+	uint32_t misr_reset;
+	uint32_t security_cfg;
+	uint32_t pwr_iso_cfg;
+	uint32_t pwr_iso_seed;
+	uint32_t test_bus_ctrl;
+	uint32_t spare;
+};
+
+/**
+ * struct cam_lrme_bus_wr_reg_common : Offset of WE common registers
+ * @hw_version        : Offset of hw_version register
+ * @hw_capability     : Offset of hw_capability register
+ * @sw_reset          : Offset of sw_reset register
+ * @cgc_override      : Offset of cgc_override register
+ * @misr_reset        : Offset of misr_reset register
+ * @pwr_iso_cfg       : Offset of pwr_iso_cfg register
+ * @test_bus_ctrl     : Offset of test_bus_ctrl register
+ * @composite_mask_0  : Offset of composite_mask_0 register
+ * @irq_mask_0        : Offset of irq_mask_0 register
+ * @irq_mask_1        : Offset of irq_mask_1 register
+ * @irq_clear_0       : Offset of irq_clear_0 register
+ * @irq_clear_1       : Offset of irq_clear_1 register
+ * @irq_status_0      : Offset of irq_status_0 register
+ * @irq_status_1      : Offset of irq_status_1 register
+ * @irq_cmd           : Offset of irq_cmd register
+ * @irq_set_0         : Offset of irq_set_0 register
+ * @irq_set_1         : Offset of irq_set_1 register
+ * @addr_fifo_status  : Offset of addr_fifo_status register
+ * @frame_header_cfg0 : Offset of frame_header_cfg0 register
+ * @frame_header_cfg1 : Offset of frame_header_cfg1 register
+ * @spare             : Offset of spare register
+ */
+struct cam_lrme_bus_wr_reg_common {
+	uint32_t hw_version;
+	uint32_t hw_capability;
+	uint32_t sw_reset;
+	uint32_t cgc_override;
+	uint32_t misr_reset;
+	uint32_t pwr_iso_cfg;
+	uint32_t test_bus_ctrl;
+	uint32_t composite_mask_0;
+	uint32_t irq_mask_0;
+	uint32_t irq_mask_1;
+	uint32_t irq_clear_0;
+	uint32_t irq_clear_1;
+	uint32_t irq_status_0;
+	uint32_t irq_status_1;
+	uint32_t irq_cmd;
+	uint32_t irq_set_0;
+	uint32_t irq_set_1;
+	uint32_t addr_fifo_status;
+	uint32_t frame_header_cfg0;
+	uint32_t frame_header_cfg1;
+	uint32_t spare;
+};
+
+/**
+ * struct cam_lrme_bus_rd_bus_client : Offset of FE registers
+ *
+ * @core_cfg                : Offset of core_cfg register
+ * @ccif_meta_data          : Offset of ccif_meta_data register
+ * @addr_image              : Offset of addr_image register
+ * @rd_buffer_size          : Offset of rd_buffer_size register
+ * @rd_stride               : Offset of rd_stride register
+ * @unpack_cfg_0            : Offset of unpack_cfg_0 register
+ * @latency_buff_allocation : Offset of latency_buff_allocation register
+ * @burst_limit_cfg         : Offset of burst_limit_cfg register
+ * @misr_cfg_0              : Offset of misr_cfg_0 register
+ * @misr_cfg_1              : Offset of misr_cfg_1 register
+ * @misr_rd_val             : Offset of misr_rd_val register
+ * @debug_status_cfg        : Offset of debug_status_cfg register
+ * @debug_status_0          : Offset of debug_status_0 register
+ * @debug_status_1          : Offset of debug_status_1 register
+ */
+struct cam_lrme_bus_rd_bus_client {
+	uint32_t core_cfg;
+	uint32_t ccif_meta_data;
+	uint32_t addr_image;
+	uint32_t rd_buffer_size;
+	uint32_t rd_stride;
+	uint32_t unpack_cfg_0;
+	uint32_t latency_buff_allocation;
+	uint32_t burst_limit_cfg;
+	uint32_t misr_cfg_0;
+	uint32_t misr_cfg_1;
+	uint32_t misr_rd_val;
+	uint32_t debug_status_cfg;
+	uint32_t debug_status_0;
+	uint32_t debug_status_1;
+};
+
+/**
+ * struct cam_lrme_bus_wr_bus_client : Offset of WE registers
+ *
+ * @status_0                  : Offset of status_0 register
+ * @status_1                  : Offset of status_1 register
+ * @cfg                       : Offset of cfg register
+ * @addr_frame_header         : Offset of addr_frame_header register
+ * @frame_header_cfg          : Offset of frame_header_cfg register
+ * @addr_image                : Offset of addr_image register
+ * @addr_image_offset         : Offset of addr_image_offset register
+ * @buffer_width_cfg          : Offset of buffer_width_cfg register
+ * @buffer_height_cfg         : Offset of buffer_height_cfg register
+ * @packer_cfg                : Offset of packer_cfg register
+ * @wr_stride                 : Offset of wr_stride register
+ * @irq_subsample_cfg_period  : Offset of irq_subsample_cfg_period register
+ * @irq_subsample_cfg_pattern : Offset of irq_subsample_cfg_pattern register
+ * @burst_limit_cfg           : Offset of burst_limit_cfg register
+ * @misr_cfg                  : Offset of misr_cfg register
+ * @misr_rd_word_sel          : Offset of misr_rd_word_sel register
+ * @misr_val                  : Offset of misr_val register
+ * @debug_status_cfg          : Offset of debug_status_cfg register
+ * @debug_status_0            : Offset of debug_status_0 register
+ * @debug_status_1            : Offset of debug_status_1 register
+ */
+struct cam_lrme_bus_wr_bus_client {
+	uint32_t status_0;
+	uint32_t status_1;
+	uint32_t cfg;
+	uint32_t addr_frame_header;
+	uint32_t frame_header_cfg;
+	uint32_t addr_image;
+	uint32_t addr_image_offset;
+	uint32_t buffer_width_cfg;
+	uint32_t buffer_height_cfg;
+	uint32_t packer_cfg;
+	uint32_t wr_stride;
+	uint32_t irq_subsample_cfg_period;
+	uint32_t irq_subsample_cfg_pattern;
+	uint32_t burst_limit_cfg;
+	uint32_t misr_cfg;
+	uint32_t misr_rd_word_sel;
+	uint32_t misr_val;
+	uint32_t debug_status_cfg;
+	uint32_t debug_status_0;
+	uint32_t debug_status_1;
+};
+
+/**
+ * struct cam_lrme_bus_rd_hw_info : FE registers information
+ *
+ * @common_reg     : FE common register
+ * @bus_client_reg : List of FE bus registers information
+ */
+struct cam_lrme_bus_rd_hw_info {
+	struct cam_lrme_bus_rd_reg_common common_reg;
+	struct cam_lrme_bus_rd_bus_client
+		bus_client_reg[CAM_LRME_BUS_RD_MAX_CLIENTS];
+};
+
+/**
+ * struct cam_lrme_bus_wr_hw_info : WE engine registers information
+ *
+ * @common_reg     : WE common register
+ * @bus_client_reg : List of WE bus registers information
+ */
+struct cam_lrme_bus_wr_hw_info {
+	struct cam_lrme_bus_wr_reg_common common_reg;
+	struct cam_lrme_bus_wr_bus_client
+		bus_client_reg[CAM_LRME_BUS_WR_MAX_CLIENTS];
+};
+
+/**
+ * struct cam_lrme_clc_reg : Offset of clc registers
+ *
+ * @clc_hw_version                 : Offset of clc_hw_version register
+ * @clc_hw_status                  : Offset of clc_hw_status register
+ * @clc_hw_status_dbg              : Offset of clc_hw_status_dbg register
+ * @clc_module_cfg                 : Offset of clc_module_cfg register
+ * @clc_moduleformat               : Offset of clc_moduleformat register
+ * @clc_rangestep                  : Offset of clc_rangestep register
+ * @clc_offset                     : Offset of clc_offset register
+ * @clc_maxallowedsad              : Offset of clc_maxallowedsad register
+ * @clc_minallowedtarmad           : Offset of clc_minallowedtarmad register
+ * @clc_meaningfulsaddiff          : Offset of clc_meaningfulsaddiff register
+ * @clc_minsaddiffdenom            : Offset of clc_minsaddiffdenom register
+ * @clc_robustnessmeasuredistmap_0 : Offset of measuredistmap_0 register
+ * @clc_robustnessmeasuredistmap_1 : Offset of measuredistmap_1 register
+ * @clc_robustnessmeasuredistmap_2 : Offset of measuredistmap_2 register
+ * @clc_robustnessmeasuredistmap_3 : Offset of measuredistmap_3 register
+ * @clc_robustnessmeasuredistmap_4 : Offset of measuredistmap_4 register
+ * @clc_robustnessmeasuredistmap_5 : Offset of measuredistmap_5 register
+ * @clc_robustnessmeasuredistmap_6 : Offset of measuredistmap_6 register
+ * @clc_robustnessmeasuredistmap_7 : Offset of measuredistmap_7 register
+ * @clc_ds_crop_horizontal         : Offset of clc_ds_crop_horizontal register
+ * @clc_ds_crop_vertical           : Offset of clc_ds_crop_vertical register
+ * @clc_tar_pd_unpacker            : Offset of clc_tar_pd_unpacker register
+ * @clc_ref_pd_unpacker            : Offset of clc_ref_pd_unpacker register
+ * @clc_sw_override                : Offset of clc_sw_override register
+ * @clc_tar_height                 : Offset of clc_tar_height register
+ * @clc_test_bus_ctrl              : Offset of clc_test_bus_ctrl register
+ * @clc_spare                      : Offset of clc_spare register
+ */
+struct cam_lrme_clc_reg {
+	uint32_t clc_hw_version;
+	uint32_t clc_hw_status;
+	uint32_t clc_hw_status_dbg;
+	uint32_t clc_module_cfg;
+	uint32_t clc_moduleformat;
+	uint32_t clc_rangestep;
+	uint32_t clc_offset;
+	uint32_t clc_maxallowedsad;
+	uint32_t clc_minallowedtarmad;
+	uint32_t clc_meaningfulsaddiff;
+	uint32_t clc_minsaddiffdenom;
+	uint32_t clc_robustnessmeasuredistmap_0;
+	uint32_t clc_robustnessmeasuredistmap_1;
+	uint32_t clc_robustnessmeasuredistmap_2;
+	uint32_t clc_robustnessmeasuredistmap_3;
+	uint32_t clc_robustnessmeasuredistmap_4;
+	uint32_t clc_robustnessmeasuredistmap_5;
+	uint32_t clc_robustnessmeasuredistmap_6;
+	uint32_t clc_robustnessmeasuredistmap_7;
+	uint32_t clc_ds_crop_horizontal;
+	uint32_t clc_ds_crop_vertical;
+	uint32_t clc_tar_pd_unpacker;
+	uint32_t clc_ref_pd_unpacker;
+	uint32_t clc_sw_override;
+	uint32_t clc_tar_height;
+	uint32_t clc_ref_height;
+	uint32_t clc_test_bus_ctrl;
+	uint32_t clc_spare;
+};
+
+/**
+ * struct cam_lrme_titan_reg : Offset of LRME top registers
+ *
+ * @top_hw_version           : Offset of top_hw_version register
+ * @top_titan_version        : Offset of top_titan_version register
+ * @top_rst_cmd              : Offset of top_rst_cmd register
+ * @top_core_clk_cfg         : Offset of top_core_clk_cfg register
+ * @top_irq_status           : Offset of top_irq_status register
+ * @top_irq_mask             : Offset of top_irq_mask register
+ * @top_irq_clear            : Offset of top_irq_clear register
+ * @top_irq_set              : Offset of top_irq_set register
+ * @top_irq_cmd              : Offset of top_irq_cmd register
+ * @top_violation_status     : Offset of top_violation_status register
+ * @top_spare                : Offset of top_spare register
+ */
+struct cam_lrme_titan_reg {
+	uint32_t top_hw_version;
+	uint32_t top_titan_version;
+	uint32_t top_rst_cmd;
+	uint32_t top_core_clk_cfg;
+	uint32_t top_irq_status;
+	uint32_t top_irq_mask;
+	uint32_t top_irq_clear;
+	uint32_t top_irq_set;
+	uint32_t top_irq_cmd;
+	uint32_t top_violation_status;
+	uint32_t top_spare;
+};
+
+/**
+ * struct cam_lrme_hw_info : LRME registers information
+ *
+ * @clc_reg    : LRME CLC registers
+ * @bus_rd_reg : LRME FE registers
+ * @bus_wr_reg : LRME WE registers
+ * @titan_reg  : LRME top reisters
+ */
+struct cam_lrme_hw_info {
+	struct cam_lrme_clc_reg clc_reg;
+	struct cam_lrme_bus_rd_hw_info bus_rd_reg;
+	struct cam_lrme_bus_wr_hw_info bus_wr_reg;
+	struct cam_lrme_titan_reg titan_reg;
+};
+
+int cam_lrme_hw_process_irq(void *priv, void *data);
+int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
+	uint32_t arg_size);
+int cam_lrme_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size);
+int cam_lrme_hw_stop(void *hw_priv, void *stop_args, uint32_t arg_size);
+int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args,
+	uint32_t arg_size);
+irqreturn_t cam_lrme_hw_irq(int irq_num, void *data);
+int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+int cam_lrme_hw_util_get_caps(struct cam_hw_info *lrme_hw,
+	struct cam_lrme_dev_cap *hw_caps);
+int cam_lrme_hw_start(void *hw_priv, void *hw_init_args, uint32_t arg_size);
+int cam_lrme_hw_flush(void *hw_priv, void *hw_flush_args, uint32_t arg_size);
+void cam_lrme_set_irq(struct cam_hw_info *lrme_hw, enum cam_lrme_irq_set set);
+
+#endif /* _CAM_LRME_HW_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
new file mode 100644
index 0000000..2e63752
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
@@ -0,0 +1,320 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_subdev.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_lrme_hw_reg.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_lrme_hw_mgr.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+
+#define CAM_LRME_HW_WORKQ_NUM_TASK 30
+
+static int cam_lrme_hw_dev_util_cdm_acquire(struct cam_lrme_core *lrme_core,
+	struct cam_hw_info *lrme_hw)
+{
+	int rc, i;
+	struct cam_cdm_bl_request *cdm_cmd;
+	struct cam_cdm_acquire_data cdm_acquire;
+	struct cam_lrme_cdm_info *hw_cdm_info;
+
+	hw_cdm_info = kzalloc(sizeof(struct cam_lrme_cdm_info),
+		GFP_KERNEL);
+	if (!hw_cdm_info) {
+		CAM_ERR(CAM_LRME, "No memory for hw_cdm_info");
+		return -ENOMEM;
+	}
+
+	cdm_cmd = kzalloc((sizeof(struct cam_cdm_bl_request) +
+		((CAM_LRME_MAX_HW_ENTRIES - 1) *
+		sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+	if (!cdm_cmd) {
+		CAM_ERR(CAM_LRME, "No memory for cdm_cmd");
+		kfree(hw_cdm_info);
+		return -ENOMEM;
+	}
+
+	memset(&cdm_acquire, 0, sizeof(cdm_acquire));
+	strlcpy(cdm_acquire.identifier, "lrmecdm", sizeof("lrmecdm"));
+	cdm_acquire.cell_index = lrme_hw->soc_info.index;
+	cdm_acquire.handle = 0;
+	cdm_acquire.userdata = hw_cdm_info;
+	cdm_acquire.cam_cdm_callback = NULL;
+	cdm_acquire.id = CAM_CDM_VIRTUAL;
+	cdm_acquire.base_array_cnt = lrme_hw->soc_info.num_reg_map;
+	for (i = 0; i < lrme_hw->soc_info.num_reg_map; i++)
+		cdm_acquire.base_array[i] = &lrme_hw->soc_info.reg_map[i];
+
+	rc = cam_cdm_acquire(&cdm_acquire);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Can't acquire cdm");
+		goto error;
+	}
+
+	hw_cdm_info->cdm_cmd = cdm_cmd;
+	hw_cdm_info->cdm_ops = cdm_acquire.ops;
+	hw_cdm_info->cdm_handle = cdm_acquire.handle;
+
+	lrme_core->hw_cdm_info = hw_cdm_info;
+	CAM_DBG(CAM_LRME, "cdm acquire done");
+
+	return 0;
+error:
+	kfree(cdm_cmd);
+	kfree(hw_cdm_info);
+	return rc;
+}
+
+static int cam_lrme_hw_dev_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *lrme_hw;
+	struct cam_hw_intf lrme_hw_intf;
+	struct cam_lrme_core *lrme_core;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_lrme_hw_info *hw_info;
+	int rc, i;
+
+	lrme_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!lrme_hw) {
+		CAM_ERR(CAM_LRME, "No memory to create lrme_hw");
+		return -ENOMEM;
+	}
+
+	lrme_core = kzalloc(sizeof(struct cam_lrme_core), GFP_KERNEL);
+	if (!lrme_core) {
+		CAM_ERR(CAM_LRME, "No memory to create lrme_core");
+		kfree(lrme_hw);
+		return -ENOMEM;
+	}
+
+	lrme_hw->core_info = lrme_core;
+	lrme_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	lrme_hw->soc_info.pdev = pdev;
+	lrme_hw->soc_info.dev = &pdev->dev;
+	lrme_hw->soc_info.dev_name = pdev->name;
+	lrme_hw->open_count = 0;
+	lrme_core->state = CAM_LRME_CORE_STATE_INIT;
+
+	mutex_init(&lrme_hw->hw_mutex);
+	spin_lock_init(&lrme_hw->hw_lock);
+	init_completion(&lrme_hw->hw_complete);
+	init_completion(&lrme_core->reset_complete);
+
+	rc = cam_req_mgr_workq_create("cam_lrme_hw_worker",
+		CAM_LRME_HW_WORKQ_NUM_TASK,
+		&lrme_core->work, CRM_WORKQ_USAGE_IRQ);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Unable to create a workq, rc=%d", rc);
+		goto free_memory;
+	}
+
+	for (i = 0; i < CAM_LRME_HW_WORKQ_NUM_TASK; i++)
+		lrme_core->work->task.pool[i].payload =
+			&lrme_core->work_data[i];
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev || !match_dev->data) {
+		CAM_ERR(CAM_LRME, "No Of_match data, %pK", match_dev);
+		rc = -EINVAL;
+		goto destroy_workqueue;
+	}
+	hw_info = (struct cam_lrme_hw_info *)match_dev->data;
+	lrme_core->hw_info = hw_info;
+
+	rc = cam_lrme_soc_init_resources(&lrme_hw->soc_info,
+		cam_lrme_hw_irq, lrme_hw);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to init soc, rc=%d", rc);
+		goto destroy_workqueue;
+	}
+
+	rc = cam_lrme_hw_dev_util_cdm_acquire(lrme_core, lrme_hw);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to acquire cdm");
+		goto deinit_platform_res;
+	}
+
+	rc = cam_smmu_get_handle("lrme", &lrme_core->device_iommu.non_secure);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Get iommu handle failed");
+		goto release_cdm;
+	}
+
+	rc = cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_ATTACH);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "LRME attach iommu handle failed, rc=%d", rc);
+		goto destroy_smmu;
+	}
+
+	rc = cam_lrme_hw_start(lrme_hw, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to hw init, rc=%d", rc);
+		goto detach_smmu;
+	}
+
+	rc = cam_lrme_hw_util_get_caps(lrme_hw, &lrme_core->hw_caps);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to get hw caps, rc=%d", rc);
+		if (cam_lrme_hw_stop(lrme_hw, NULL, 0))
+			CAM_ERR(CAM_LRME, "Failed in hw deinit");
+		goto detach_smmu;
+	}
+
+	rc = cam_lrme_hw_stop(lrme_hw, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to deinit hw, rc=%d", rc);
+		goto detach_smmu;
+	}
+
+	lrme_core->hw_idx = lrme_hw->soc_info.index;
+	lrme_hw_intf.hw_priv = lrme_hw;
+	lrme_hw_intf.hw_idx = lrme_hw->soc_info.index;
+	lrme_hw_intf.hw_ops.get_hw_caps = cam_lrme_hw_get_caps;
+	lrme_hw_intf.hw_ops.init = NULL;
+	lrme_hw_intf.hw_ops.deinit = NULL;
+	lrme_hw_intf.hw_ops.reset = cam_lrme_hw_reset;
+	lrme_hw_intf.hw_ops.reserve = NULL;
+	lrme_hw_intf.hw_ops.release = NULL;
+	lrme_hw_intf.hw_ops.start = cam_lrme_hw_start;
+	lrme_hw_intf.hw_ops.stop = cam_lrme_hw_stop;
+	lrme_hw_intf.hw_ops.read = NULL;
+	lrme_hw_intf.hw_ops.write = NULL;
+	lrme_hw_intf.hw_ops.process_cmd = cam_lrme_hw_process_cmd;
+	lrme_hw_intf.hw_type = CAM_HW_LRME;
+
+	rc = cam_cdm_get_iommu_handle("lrmecdm", &lrme_core->cdm_iommu);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to acquire the CDM iommu handles");
+		goto detach_smmu;
+	}
+
+	rc = cam_lrme_mgr_register_device(&lrme_hw_intf,
+		&lrme_core->device_iommu,
+		&lrme_core->cdm_iommu);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to register device");
+		goto detach_smmu;
+	}
+
+	platform_set_drvdata(pdev, lrme_hw);
+	CAM_DBG(CAM_LRME, "LRME-%d probe successful", lrme_hw_intf.hw_idx);
+
+	return rc;
+
+detach_smmu:
+	cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_DETACH);
+destroy_smmu:
+	cam_smmu_destroy_handle(lrme_core->device_iommu.non_secure);
+release_cdm:
+	cam_cdm_release(lrme_core->hw_cdm_info->cdm_handle);
+	kfree(lrme_core->hw_cdm_info->cdm_cmd);
+	kfree(lrme_core->hw_cdm_info);
+deinit_platform_res:
+	if (cam_lrme_soc_deinit_resources(&lrme_hw->soc_info))
+		CAM_ERR(CAM_LRME, "Failed in soc deinit");
+	mutex_destroy(&lrme_hw->hw_mutex);
+destroy_workqueue:
+	cam_req_mgr_workq_destroy(&lrme_core->work);
+free_memory:
+	mutex_destroy(&lrme_hw->hw_mutex);
+	kfree(lrme_hw);
+	kfree(lrme_core);
+
+	return rc;
+}
+
+static int cam_lrme_hw_dev_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct cam_hw_info *lrme_hw;
+	struct cam_lrme_core *lrme_core;
+
+	lrme_hw = platform_get_drvdata(pdev);
+	if (!lrme_hw) {
+		CAM_ERR(CAM_LRME, "Invalid lrme_hw from fd_hw_intf");
+		rc = -ENODEV;
+		goto deinit_platform_res;
+	}
+
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+	if (!lrme_core) {
+		CAM_ERR(CAM_LRME, "Invalid lrme_core from fd_hw");
+		rc = -EINVAL;
+		goto deinit_platform_res;
+	}
+
+	cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_DETACH);
+	cam_smmu_destroy_handle(lrme_core->device_iommu.non_secure);
+	cam_cdm_release(lrme_core->hw_cdm_info->cdm_handle);
+	cam_lrme_mgr_deregister_device(lrme_core->hw_idx);
+
+	kfree(lrme_core->hw_cdm_info->cdm_cmd);
+	kfree(lrme_core->hw_cdm_info);
+	kfree(lrme_core);
+
+deinit_platform_res:
+	rc = cam_lrme_soc_deinit_resources(&lrme_hw->soc_info);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Error in LRME soc deinit, rc=%d", rc);
+
+	mutex_destroy(&lrme_hw->hw_mutex);
+	kfree(lrme_hw);
+
+	return rc;
+}
+
+static const struct of_device_id cam_lrme_hw_dt_match[] = {
+	{
+		.compatible = "qcom,lrme",
+		.data = &cam_lrme10_hw_info,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, cam_lrme_hw_dt_match);
+
+static struct platform_driver cam_lrme_hw_driver = {
+	.probe = cam_lrme_hw_dev_probe,
+	.remove = cam_lrme_hw_dev_remove,
+	.driver = {
+		.name = "cam_lrme_hw",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_lrme_hw_dt_match,
+	},
+};
+
+static int __init cam_lrme_hw_init_module(void)
+{
+	return platform_driver_register(&cam_lrme_hw_driver);
+}
+
+static void __exit cam_lrme_hw_exit_module(void)
+{
+	platform_driver_unregister(&cam_lrme_hw_driver);
+}
+
+module_init(cam_lrme_hw_init_module);
+module_exit(cam_lrme_hw_exit_module);
+MODULE_DESCRIPTION("CAM LRME HW driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
new file mode 100644
index 0000000..d16b174
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
@@ -0,0 +1,200 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_INTF_H_
+#define _CAM_LRME_HW_INTF_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_lrme.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_subdev.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+
+
+#define CAM_LRME_MAX_IO_BUFFER 2
+#define CAM_LRME_MAX_HW_ENTRIES 5
+
+#define CAM_LRME_BASE_IDX 0
+
+/**
+ *  enum cam_lrme_hw_type : Enum for LRME HW type
+ *
+ * @CAM_HW_LRME : LRME HW type
+ */
+enum cam_lrme_hw_type {
+	CAM_HW_LRME,
+};
+
+/**
+ * enum cam_lrme_cb_type : HW manager call back type
+ *
+ * @CAM_LRME_CB_BUF_DONE        : Indicate buf done has been generated
+ * @CAM_LRME_CB_COMP_REG_UPDATE : Indicate receiving WE comp reg update
+ * @CAM_LRME_CB_PUT_FRAME       : Request HW manager to put back the frame
+ * @CAM_LRME_CB_ERROR           : Indicate error irq has been generated
+ */
+enum cam_lrme_cb_type {
+	CAM_LRME_CB_BUF_DONE = 1,
+	CAM_LRME_CB_COMP_REG_UPDATE = 1 << 1,
+	CAM_LRME_CB_PUT_FRAME = 1 << 2,
+	CAM_LRME_CB_ERROR = 1 << 3,
+};
+
+/**
+ * enum cam_lrme_hw_cmd_type : HW CMD type
+ *
+ * @CAM_LRME_HW_CMD_prepare_hw_update : Prepare HW update
+ * @CAM_LRME_HW_CMD_REGISTER_CB       : register HW manager callback
+ * @CAM_LRME_HW_CMD_SUBMIT            : Submit frame to HW
+ */
+enum cam_lrme_hw_cmd_type {
+	CAM_LRME_HW_CMD_PREPARE_HW_UPDATE,
+	CAM_LRME_HW_CMD_REGISTER_CB,
+	CAM_LRME_HW_CMD_SUBMIT,
+};
+
+/**
+ * enum cam_lrme_hw_reset_type : Type of reset
+ *
+ * @CAM_LRME_HW_RESET_TYPE_HW_RESET : HW reset
+ * @CAM_LRME_HW_RESET_TYPE_SW_RESET : SW reset
+ */
+enum cam_lrme_hw_reset_type {
+	CAM_LRME_HW_RESET_TYPE_HW_RESET,
+	CAM_LRME_HW_RESET_TYPE_SW_RESET,
+};
+
+/**
+ *struct cam_lrme_frame_request : LRME frame request
+ *
+ * @frame_list            : List head
+ * @req_id                : Request ID
+ * @ctxt_to_hw_map        : Information about context id, priority and device id
+ * @hw_device             : Pointer to HW device
+ * @hw_update_entries     : List of hw_update_entries
+ * @num_hw_update_entries : number of hw_update_entries
+ */
+struct cam_lrme_frame_request {
+	struct list_head           frame_list;
+	uint64_t                   req_id;
+	void                      *ctxt_to_hw_map;
+	struct cam_lrme_device    *hw_device;
+	struct cam_hw_update_entry hw_update_entries[CAM_LRME_MAX_HW_ENTRIES];
+	uint32_t                   num_hw_update_entries;
+};
+
+/**
+ * struct cam_lrme_hw_io_buffer : IO buffer information
+ *
+ * @valid     : Indicate whether this IO config is valid
+ * @io_cfg    : Pointer to IO configuration
+ * @num_buf   : Number of buffers
+ * @num_plane : Number of planes
+ * @io_addr   : List of IO address
+ */
+struct cam_lrme_hw_io_buffer {
+	bool                   valid;
+	struct cam_buf_io_cfg *io_cfg;
+	uint32_t               num_buf;
+	uint32_t               num_plane;
+	uint64_t               io_addr[CAM_PACKET_MAX_PLANES];
+};
+
+/**
+ * struct cam_lrme_hw_cmd_config_args : Args for prepare HW update
+ *
+ * @hw_device       : Pointer to HW device
+ * @input_buf       : List of input buffers
+ * @output_buf      : List of output buffers
+ * @cmd_buf_addr    : Pointer to available KMD buffer
+ * @size            : Available KMD buffer size
+ * @config_buf_size : Size used to prepare update
+ */
+struct cam_lrme_hw_cmd_config_args {
+	struct cam_lrme_device *hw_device;
+	struct cam_lrme_hw_io_buffer input_buf[CAM_LRME_MAX_IO_BUFFER];
+	struct cam_lrme_hw_io_buffer output_buf[CAM_LRME_MAX_IO_BUFFER];
+	uint32_t *cmd_buf_addr;
+	uint32_t size;
+	uint32_t config_buf_size;
+};
+
+/**
+ * struct cam_lrme_hw_flush_args : Args for flush HW
+ *
+ * @ctxt_to_hw_map : Identity of context
+ * @req_to_flush   : Pointer to the frame need to flush in
+ *                   case of single frame flush
+ * @flush_type     : Flush type
+ */
+struct cam_lrme_hw_flush_args {
+	void                          *ctxt_to_hw_map;
+	struct cam_lrme_frame_request *req_to_flush;
+	uint32_t                       flush_type;
+};
+
+/**
+ * struct cam_lrme_hw_reset_args : Args for reset HW
+ *
+ * @reset_type : Enum cam_lrme_hw_reset_type
+ */
+struct cam_lrme_hw_reset_args {
+	uint32_t reset_type;
+};
+
+/**
+ * struct cam_lrme_hw_cb_args : HW manager callback args
+ *
+ * @cb_type   : Callback event type
+ * @frame_req : Pointer to the frame associated with the cb
+ */
+struct cam_lrme_hw_cb_args {
+	uint32_t                       cb_type;
+	struct cam_lrme_frame_request *frame_req;
+};
+
+/**
+ * struct cam_lrme_hw_cmd_set_cb : Args for set callback function
+ *
+ * @cam_lrme_hw_mgr_cb : Callback function pointer
+ * @data               : Data sent along with callback function
+ */
+struct cam_lrme_hw_cmd_set_cb {
+	 int (*cam_lrme_hw_mgr_cb)(void *data,
+		struct cam_lrme_hw_cb_args *args);
+	 void *data;
+};
+
+/**
+ * struct cam_lrme_hw_submit_args : Args for submit request
+ *
+ * @hw_update_entries     : List of hw update entries used to program registers
+ * @num_hw_update_entries : Number of hw update entries
+ * @frame_req             : Pointer to the frame request
+ */
+struct cam_lrme_hw_submit_args {
+	 struct cam_hw_update_entry    *hw_update_entries;
+	 uint32_t            num_hw_update_entries;
+	 struct cam_lrme_frame_request *frame_req;
+};
+
+#endif /* _CAM_LRME_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h
new file mode 100644
index 0000000..39cfde7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h
@@ -0,0 +1,193 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_REG_H_
+#define _CAM_LRME_HW_REG_H_
+
+#include "cam_lrme_hw_core.h"
+
+static struct cam_lrme_hw_info cam_lrme10_hw_info = {
+	.clc_reg = {
+		.clc_hw_version                 = 0x00000000,
+		.clc_hw_status                  = 0x00000004,
+		.clc_hw_status_dbg              = 0x00000008,
+		.clc_module_cfg                 = 0x00000060,
+		.clc_moduleformat               = 0x000000A8,
+		.clc_rangestep                  = 0x00000068,
+		.clc_offset                     = 0x0000006C,
+		.clc_maxallowedsad              = 0x00000070,
+		.clc_minallowedtarmad           = 0x00000074,
+		.clc_meaningfulsaddiff          = 0x00000078,
+		.clc_minsaddiffdenom            = 0x0000007C,
+		.clc_robustnessmeasuredistmap_0 = 0x00000080,
+		.clc_robustnessmeasuredistmap_1 = 0x00000084,
+		.clc_robustnessmeasuredistmap_2 = 0x00000088,
+		.clc_robustnessmeasuredistmap_3 = 0x0000008C,
+		.clc_robustnessmeasuredistmap_4 = 0x00000090,
+		.clc_robustnessmeasuredistmap_5 = 0x00000094,
+		.clc_robustnessmeasuredistmap_6 = 0x00000098,
+		.clc_robustnessmeasuredistmap_7 = 0x0000009C,
+		.clc_ds_crop_horizontal         = 0x000000A0,
+		.clc_ds_crop_vertical           = 0x000000A4,
+		.clc_tar_pd_unpacker            = 0x000000AC,
+		.clc_ref_pd_unpacker            = 0x000000B0,
+		.clc_sw_override                = 0x000000B4,
+		.clc_tar_height                 = 0x000000B8,
+		.clc_ref_height                 = 0x000000BC,
+		.clc_test_bus_ctrl              = 0x000001F8,
+		.clc_spare                      = 0x000001FC,
+	},
+	.bus_rd_reg = {
+		.common_reg = {
+			.hw_version     = 0x00000200,
+			.hw_capability  = 0x00000204,
+			.sw_reset       = 0x00000208,
+			.cgc_override   = 0x0000020C,
+			.irq_mask       = 0x00000210,
+			.irq_clear      = 0x00000214,
+			.irq_cmd        = 0x00000218,
+			.irq_status     = 0x0000021C,
+			.cmd            = 0x00000220,
+			.irq_set        = 0x00000224,
+			.misr_reset     = 0x0000022C,
+			.security_cfg   = 0x00000230,
+			.pwr_iso_cfg    = 0x00000234,
+			.pwr_iso_seed   = 0x00000238,
+			.test_bus_ctrl  = 0x00000248,
+			.spare          = 0x0000024C,
+		},
+		.bus_client_reg = {
+			/* bus client 0 */
+			{
+				.core_cfg                = 0x00000250,
+				.ccif_meta_data          = 0x00000254,
+				.addr_image              = 0x00000258,
+				.rd_buffer_size          = 0x0000025C,
+				.rd_stride               = 0x00000260,
+				.unpack_cfg_0            = 0x00000264,
+				.latency_buff_allocation = 0x00000278,
+				.burst_limit_cfg         = 0x00000280,
+				.misr_cfg_0              = 0x00000284,
+				.misr_cfg_1              = 0x00000288,
+				.misr_rd_val             = 0x0000028C,
+				.debug_status_cfg        = 0x00000290,
+				.debug_status_0          = 0x00000294,
+				.debug_status_1          = 0x00000298,
+			},
+			/* bus client 1 */
+			{
+				.core_cfg                = 0x000002F0,
+				.ccif_meta_data          = 0x000002F4,
+				.addr_image              = 0x000002F8,
+				.rd_buffer_size          = 0x000002FC,
+				.rd_stride               = 0x00000300,
+				.unpack_cfg_0            = 0x00000304,
+				.latency_buff_allocation = 0x00000318,
+				.burst_limit_cfg         = 0x00000320,
+				.misr_cfg_0              = 0x00000324,
+				.misr_cfg_1              = 0x00000328,
+				.misr_rd_val             = 0x0000032C,
+				.debug_status_cfg        = 0x00000330,
+				.debug_status_0          = 0x00000334,
+				.debug_status_1          = 0x00000338,
+			},
+		},
+	},
+	.bus_wr_reg = {
+		.common_reg = {
+			.hw_version        = 0x00000500,
+			.hw_capability     = 0x00000504,
+			.sw_reset          = 0x00000508,
+			.cgc_override      = 0x0000050C,
+			.misr_reset        = 0x000005C8,
+			.pwr_iso_cfg       = 0x000005CC,
+			.test_bus_ctrl     = 0x0000061C,
+			.composite_mask_0  = 0x00000510,
+			.irq_mask_0        = 0x00000544,
+			.irq_mask_1        = 0x00000548,
+			.irq_clear_0       = 0x00000550,
+			.irq_clear_1       = 0x00000554,
+			.irq_status_0      = 0x0000055C,
+			.irq_status_1      = 0x00000560,
+			.irq_cmd           = 0x00000568,
+			.irq_set_0         = 0x000005BC,
+			.irq_set_1         = 0x000005C0,
+			.addr_fifo_status  = 0x000005A8,
+			.frame_header_cfg0 = 0x000005AC,
+			.frame_header_cfg1 = 0x000005B0,
+			.spare             = 0x00000620,
+		},
+		.bus_client_reg = {
+			/* bus client 0 */
+			{
+				.status_0                  = 0x00000700,
+				.status_1                  = 0x00000704,
+				.cfg                       = 0x00000708,
+				.addr_frame_header         = 0x0000070C,
+				.frame_header_cfg          = 0x00000710,
+				.addr_image                = 0x00000714,
+				.addr_image_offset         = 0x00000718,
+				.buffer_width_cfg          = 0x0000071C,
+				.buffer_height_cfg         = 0x00000720,
+				.packer_cfg                = 0x00000724,
+				.wr_stride                 = 0x00000728,
+				.irq_subsample_cfg_period  = 0x00000748,
+				.irq_subsample_cfg_pattern = 0x0000074C,
+				.burst_limit_cfg           = 0x0000075C,
+				.misr_cfg                  = 0x00000760,
+				.misr_rd_word_sel          = 0x00000764,
+				.misr_val                  = 0x00000768,
+				.debug_status_cfg          = 0x0000076C,
+				.debug_status_0            = 0x00000770,
+				.debug_status_1            = 0x00000774,
+			},
+			/* bus client 1 */
+			{
+				.status_0                  = 0x00000800,
+				.status_1                  = 0x00000804,
+				.cfg                       = 0x00000808,
+				.addr_frame_header         = 0x0000080C,
+				.frame_header_cfg          = 0x00000810,
+				.addr_image                = 0x00000814,
+				.addr_image_offset         = 0x00000818,
+				.buffer_width_cfg          = 0x0000081C,
+				.buffer_height_cfg         = 0x00000820,
+				.packer_cfg                = 0x00000824,
+				.wr_stride                 = 0x00000828,
+				.irq_subsample_cfg_period  = 0x00000848,
+				.irq_subsample_cfg_pattern = 0x0000084C,
+				.burst_limit_cfg           = 0x0000085C,
+				.misr_cfg                  = 0x00000860,
+				.misr_rd_word_sel          = 0x00000864,
+				.misr_val                  = 0x00000868,
+				.debug_status_cfg          = 0x0000086C,
+				.debug_status_0            = 0x00000870,
+				.debug_status_1            = 0x00000874,
+			},
+		},
+	},
+	.titan_reg = {
+		.top_hw_version        = 0x00000900,
+		.top_titan_version     = 0x00000904,
+		.top_rst_cmd           = 0x00000908,
+		.top_core_clk_cfg      = 0x00000920,
+		.top_irq_status        = 0x0000090C,
+		.top_irq_mask          = 0x00000910,
+		.top_irq_clear         = 0x00000914,
+		.top_irq_set           = 0x00000918,
+		.top_irq_cmd           = 0x0000091C,
+		.top_violation_status  = 0x00000924,
+		.top_spare             = 0x000009FC,
+	},
+};
+
+#endif /* _CAM_LRME_HW_REG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c
new file mode 100644
index 0000000..75de0dd
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c
@@ -0,0 +1,158 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+
+
+int cam_lrme_soc_enable_resources(struct cam_hw_info *lrme_hw)
+{
+	struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+	struct cam_lrme_soc_private *soc_private =
+		(struct cam_lrme_soc_private *)soc_info->soc_private;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	int rc = 0;
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.compressed_bw = 7200000;
+	axi_vote.uncompressed_bw = 7200000;
+	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to start cpas, rc %d", rc);
+		return -EFAULT;
+	}
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, CAM_SVS_VOTE,
+		true);
+	if (rc) {
+		CAM_ERR(CAM_LRME,
+			"Failed to enable platform resource, rc %d", rc);
+		goto stop_cpas;
+	}
+
+	cam_lrme_set_irq(lrme_hw, CAM_LRME_IRQ_ENABLE);
+
+	return rc;
+
+stop_cpas:
+	if (cam_cpas_stop(soc_private->cpas_handle))
+		CAM_ERR(CAM_LRME, "Failed to stop cpas");
+
+	return rc;
+}
+
+int cam_lrme_soc_disable_resources(struct cam_hw_info *lrme_hw)
+{
+	struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+	struct cam_lrme_soc_private *soc_private;
+	int rc = 0;
+
+	soc_private = soc_info->soc_private;
+
+	cam_lrme_set_irq(lrme_hw, CAM_LRME_IRQ_DISABLE);
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to disable platform resource");
+		return rc;
+	}
+	rc = cam_cpas_stop(soc_private->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Failed to stop cpas");
+
+	return rc;
+}
+
+int cam_lrme_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *private_data)
+{
+	struct cam_lrme_soc_private *soc_private;
+	struct cam_cpas_register_params cpas_register_param;
+	int rc;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed in get_dt_properties, rc=%d", rc);
+		return rc;
+	}
+
+	rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+		private_data);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed in request_platform_resource rc=%d",
+			rc);
+		return rc;
+	}
+
+	soc_private = kzalloc(sizeof(struct cam_lrme_soc_private), GFP_KERNEL);
+	if (!soc_private) {
+		rc = -ENOMEM;
+		goto release_res;
+	}
+	soc_info->soc_private = soc_private;
+
+	memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+	strlcpy(cpas_register_param.identifier,
+		"lrmecpas", CAM_HW_IDENTIFIER_LENGTH);
+	cpas_register_param.cell_index = soc_info->index;
+	cpas_register_param.dev = &soc_info->pdev->dev;
+	cpas_register_param.userdata = private_data;
+	cpas_register_param.cam_cpas_client_cb = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_param);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "CPAS registration failed");
+		goto free_soc_private;
+	}
+	soc_private->cpas_handle = cpas_register_param.client_handle;
+	CAM_DBG(CAM_LRME, "CPAS handle=%d", soc_private->cpas_handle);
+
+	return rc;
+
+free_soc_private:
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+release_res:
+	cam_soc_util_release_platform_resource(soc_info);
+
+	return rc;
+}
+
+int cam_lrme_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+	struct cam_lrme_soc_private *soc_private =
+		(struct cam_lrme_soc_private *)soc_info->soc_private;
+	int rc;
+
+	rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Unregister cpas failed, handle=%d, rc=%d",
+			soc_private->cpas_handle, rc);
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+	if (rc)
+		CAM_ERR(CAM_LRME, "release platform failed, rc=%d", rc);
+
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h
new file mode 100644
index 0000000..44e8486
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_SOC_H_
+#define _CAM_LRME_HW_SOC_H_
+
+#include "cam_soc_util.h"
+
+struct cam_lrme_soc_private {
+	uint32_t cpas_handle;
+};
+
+int cam_lrme_soc_enable_resources(struct cam_hw_info *lrme_hw);
+int cam_lrme_soc_disable_resources(struct cam_hw_info *lrme_hw);
+int cam_lrme_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *private_data);
+int cam_lrme_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_LRME_HW_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index 008bba9..6ad0934 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 #include <linux/mutex.h>
 #include <linux/msm_ion.h>
+#include <linux/slab.h>
 #include <asm/cacheflush.h>
 
 #include "cam_req_mgr_util.h"
@@ -299,7 +300,40 @@
 }
 EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
 
-static int cam_mem_util_get_ion_buffer(size_t len,
+static int cam_mem_util_get_dma_buf(size_t len,
+	size_t align,
+	unsigned int heap_id_mask,
+	unsigned int flags,
+	struct ion_handle **hdl,
+	struct dma_buf **buf)
+{
+	int rc = 0;
+
+	if (!hdl || !buf) {
+		CAM_ERR(CAM_CRM, "Invalid params");
+		return -EINVAL;
+	}
+
+	*hdl = ion_alloc(tbl.client, len, align, heap_id_mask, flags);
+	if (IS_ERR_OR_NULL(*hdl))
+		return -ENOMEM;
+
+	*buf = ion_share_dma_buf(tbl.client, *hdl);
+	if (IS_ERR_OR_NULL(*buf)) {
+		CAM_ERR(CAM_CRM, "get dma buf fail");
+		rc = -EINVAL;
+		goto get_buf_fail;
+	}
+
+	return rc;
+
+get_buf_fail:
+	ion_free(tbl.client, *hdl);
+	return rc;
+
+}
+
+static int cam_mem_util_get_dma_buf_fd(size_t len,
 	size_t align,
 	unsigned int heap_id_mask,
 	unsigned int flags,
@@ -308,13 +342,18 @@
 {
 	int rc = 0;
 
+	if (!hdl || !fd) {
+		CAM_ERR(CAM_CRM, "Invalid params");
+		return -EINVAL;
+	}
+
 	*hdl = ion_alloc(tbl.client, len, align, heap_id_mask, flags);
 	if (IS_ERR_OR_NULL(*hdl))
 		return -ENOMEM;
 
 	*fd = ion_share_dma_buf_fd(tbl.client, *hdl);
 	if (*fd < 0) {
-		CAM_ERR(CAM_CRM, "dma buf get fd fail");
+		CAM_ERR(CAM_CRM, "get fd fail");
 		rc = -EINVAL;
 		goto get_fd_fail;
 	}
@@ -346,7 +385,7 @@
 	else
 		ion_flag &= ~ION_FLAG_CACHED;
 
-	rc = cam_mem_util_get_ion_buffer(cmd->len,
+	rc = cam_mem_util_get_dma_buf_fd(cmd->len,
 		cmd->align,
 		heap_id,
 		ion_flag,
@@ -441,7 +480,7 @@
 		}
 	} else {
 		for (i = 0; i < num_hdls; i++) {
-			rc = cam_smmu_map_iova(mmu_hdls[i],
+			rc = cam_smmu_map_user_iova(mmu_hdls[i],
 				fd,
 				dir,
 				(dma_addr_t *)hw_vaddr,
@@ -462,7 +501,7 @@
 			cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
 	else
 		for (--i; i > 0; i--)
-			cam_smmu_unmap_iova(mmu_hdls[i],
+			cam_smmu_unmap_user_iova(mmu_hdls[i],
 				fd,
 				CAM_SMMU_REGION_IO);
 	return rc;
@@ -530,6 +569,7 @@
 
 	mutex_lock(&tbl.bufq[idx].q_lock);
 	tbl.bufq[idx].fd = ion_fd;
+	tbl.bufq[idx].dma_buf = NULL;
 	tbl.bufq[idx].flags = cmd->flags;
 	tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, ion_fd);
 	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
@@ -615,6 +655,7 @@
 
 	mutex_lock(&tbl.bufq[idx].q_lock);
 	tbl.bufq[idx].fd = cmd->fd;
+	tbl.bufq[idx].dma_buf = NULL;
 	tbl.bufq[idx].flags = cmd->flags;
 	tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
 	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
@@ -645,7 +686,8 @@
 }
 
 static int cam_mem_util_unmap_hw_va(int32_t idx,
-	enum cam_smmu_region_id region)
+	enum cam_smmu_region_id region,
+	enum cam_smmu_mapping_client client)
 {
 	int i;
 	uint32_t flags;
@@ -672,15 +714,27 @@
 		}
 	} else {
 		for (i = 0; i < num_hdls; i++) {
-			rc = cam_smmu_unmap_iova(mmu_hdls[i],
-				fd,
-				region);
+			if (client == CAM_SMMU_MAPPING_USER) {
+			rc = cam_smmu_unmap_user_iova(mmu_hdls[i],
+				fd, region);
+			} else if (client == CAM_SMMU_MAPPING_KERNEL) {
+				rc = cam_smmu_unmap_kernel_iova(mmu_hdls[i],
+					tbl.bufq[idx].dma_buf, region);
+			} else {
+				CAM_ERR(CAM_CRM,
+					"invalid caller for unmapping : %d",
+					client);
+				rc = -EINVAL;
+			}
 			if (rc < 0)
 				goto unmap_end;
 		}
 	}
 
+	return rc;
+
 unmap_end:
+	CAM_ERR(CAM_CRM, "unmapping failed");
 	return rc;
 }
 
@@ -693,7 +747,7 @@
 	else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
 		region = CAM_SMMU_REGION_IO;
 
-	cam_mem_util_unmap_hw_va(idx, region);
+	cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
 }
 
 static int cam_mem_mgr_cleanup_table(void)
@@ -748,7 +802,8 @@
 	mutex_destroy(&tbl.m_lock);
 }
 
-static int cam_mem_util_unmap(int32_t idx)
+static int cam_mem_util_unmap(int32_t idx,
+	enum cam_smmu_mapping_client client)
 {
 	int rc = 0;
 	enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
@@ -775,7 +830,7 @@
 	if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
 		(tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
 		(tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE))
-		rc = cam_mem_util_unmap_hw_va(idx, region);
+		rc = cam_mem_util_unmap_hw_va(idx, region, client);
 
 
 	mutex_lock(&tbl.bufq[idx].q_lock);
@@ -786,9 +841,10 @@
 		sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
 
 	CAM_DBG(CAM_CRM,
-		"Ion handle at idx = %d freeing = %pK, fd = %d, imported %d",
+		"Ion handle at idx = %d freeing = %pK, fd = %d, imported %d dma_buf %pK",
 		idx, tbl.bufq[idx].i_hdl, tbl.bufq[idx].fd,
-		tbl.bufq[idx].is_imported);
+		tbl.bufq[idx].is_imported,
+		tbl.bufq[idx].dma_buf);
 
 	if (tbl.bufq[idx].i_hdl) {
 		ion_free(tbl.client, tbl.bufq[idx].i_hdl);
@@ -796,6 +852,7 @@
 	}
 
 	tbl.bufq[idx].fd = -1;
+	tbl.bufq[idx].dma_buf = NULL;
 	tbl.bufq[idx].is_imported = false;
 	tbl.bufq[idx].len = 0;
 	tbl.bufq[idx].num_hdl = 0;
@@ -833,7 +890,7 @@
 	}
 
 	CAM_DBG(CAM_CRM, "Releasing hdl = %u", cmd->buf_handle);
-	rc = cam_mem_util_unmap(idx);
+	rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
 
 	return rc;
 }
@@ -842,17 +899,19 @@
 	struct cam_mem_mgr_memory_desc *out)
 {
 	struct ion_handle *hdl;
-	int ion_fd;
+	struct dma_buf *buf = NULL;
+	int ion_fd = -1;
 	int rc = 0;
 	uint32_t heap_id;
 	int32_t ion_flag = 0;
 	uint64_t kvaddr;
 	dma_addr_t iova = 0;
 	size_t request_len = 0;
-	int32_t idx;
 	uint32_t mem_handle;
+	int32_t idx;
 	int32_t smmu_hdl = 0;
 	int32_t num_hdl = 0;
+
 	enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
 
 	if (!inp || !out) {
@@ -874,18 +933,18 @@
 
 	heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
 
-	rc = cam_mem_util_get_ion_buffer(inp->size,
+	rc = cam_mem_util_get_dma_buf(inp->size,
 		inp->align,
 		heap_id,
 		ion_flag,
 		&hdl,
-		&ion_fd);
+		&buf);
 
 	if (rc) {
 		CAM_ERR(CAM_CRM, "ION alloc failed for shared buffer");
 		goto ion_fail;
 	} else {
-		CAM_DBG(CAM_CRM, "Got ION fd = %d, hdl = %pK", ion_fd, hdl);
+		CAM_DBG(CAM_CRM, "Got dma_buf = %pK, hdl = %pK", buf, hdl);
 	}
 
 	rc = cam_mem_util_map_cpu_va(hdl, &kvaddr, &request_len);
@@ -908,8 +967,8 @@
 			region = CAM_SMMU_REGION_IO;
 	}
 
-	rc = cam_smmu_map_iova(inp->smmu_hdl,
-		ion_fd,
+	rc = cam_smmu_map_kernel_iova(inp->smmu_hdl,
+		buf,
 		CAM_SMMU_MAP_RW,
 		&iova,
 		&request_len,
@@ -931,7 +990,8 @@
 
 	mutex_lock(&tbl.bufq[idx].q_lock);
 	mem_handle = GET_MEM_HANDLE(idx, ion_fd);
-	tbl.bufq[idx].fd = ion_fd;
+	tbl.bufq[idx].dma_buf = buf;
+	tbl.bufq[idx].fd = -1;
 	tbl.bufq[idx].flags = inp->flags;
 	tbl.bufq[idx].buf_handle = mem_handle;
 	tbl.bufq[idx].kmdvaddr = kvaddr;
@@ -955,9 +1015,8 @@
 
 	return rc;
 slot_fail:
-	cam_smmu_unmap_iova(inp->smmu_hdl,
-		ion_fd,
-		region);
+	cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
+	buf, region);
 smmu_fail:
 	ion_unmap_kernel(tbl.client, hdl);
 map_fail:
@@ -995,7 +1054,7 @@
 	}
 
 	CAM_DBG(CAM_CRM, "Releasing hdl = %X", inp->mem_handle);
-	rc = cam_mem_util_unmap(idx);
+	rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
 
 	return rc;
 }
@@ -1006,13 +1065,14 @@
 	struct cam_mem_mgr_memory_desc *out)
 {
 	struct ion_handle *hdl;
-	int ion_fd;
+	struct dma_buf *buf = NULL;
 	int rc = 0;
+	int ion_fd = -1;
 	uint32_t heap_id;
 	dma_addr_t iova = 0;
 	size_t request_len = 0;
-	int32_t idx;
 	uint32_t mem_handle;
+	int32_t idx;
 	int32_t smmu_hdl = 0;
 	int32_t num_hdl = 0;
 
@@ -1032,24 +1092,25 @@
 	}
 
 	heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
-	rc = cam_mem_util_get_ion_buffer(inp->size,
+	rc = cam_mem_util_get_dma_buf(inp->size,
 		inp->align,
 		heap_id,
 		0,
 		&hdl,
-		&ion_fd);
+		&buf);
 
 	if (rc) {
 		CAM_ERR(CAM_CRM, "ION alloc failed for sec heap buffer");
 		goto ion_fail;
 	} else {
-		CAM_DBG(CAM_CRM, "Got ION fd = %d, hdl = %pK", ion_fd, hdl);
+		CAM_DBG(CAM_CRM, "Got dma_buf = %pK, hdl = %pK", buf, hdl);
 	}
 
 	rc = cam_smmu_reserve_sec_heap(inp->smmu_hdl,
-		ion_fd,
+		buf,
 		&iova,
 		&request_len);
+
 	if (rc) {
 		CAM_ERR(CAM_CRM, "Reserving secondary heap failed");
 		goto smmu_fail;
@@ -1066,7 +1127,8 @@
 
 	mutex_lock(&tbl.bufq[idx].q_lock);
 	mem_handle = GET_MEM_HANDLE(idx, ion_fd);
-	tbl.bufq[idx].fd = ion_fd;
+	tbl.bufq[idx].fd = -1;
+	tbl.bufq[idx].dma_buf = buf;
 	tbl.bufq[idx].flags = inp->flags;
 	tbl.bufq[idx].buf_handle = mem_handle;
 	tbl.bufq[idx].kmdvaddr = 0;
@@ -1154,7 +1216,7 @@
 	}
 
 	CAM_DBG(CAM_CRM, "Releasing hdl = %X", inp->mem_handle);
-	rc = cam_mem_util_unmap(idx);
+	rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
 	if (rc)
 		CAM_ERR(CAM_CRM, "unmapping secondary heap failed");
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
index 06588c4..83727d2 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
@@ -14,15 +14,23 @@
 #define _CAM_MEM_MGR_H_
 
 #include <linux/mutex.h>
+#include <linux/dma-buf.h>
 #include <media/cam_req_mgr.h>
 #include "cam_mem_mgr_api.h"
 
 #define CAM_MEM_BUFQ_MAX 1024
 
+/*Enum for possible SMMU operations */
+enum cam_smmu_mapping_client {
+	CAM_SMMU_MAPPING_USER,
+	CAM_SMMU_MAPPING_KERNEL,
+};
+
 /**
  * struct cam_mem_buf_queue
  *
  * @i_hdl:       ion handle for the buffer
+ * @dma_buf:     pointer to the allocated dma_buf in the table
  * @q_lock:      mutex lock for buffer
  * @hdls:        list of mapped handles
  * @num_hdl:     number of handles
@@ -38,6 +46,7 @@
  */
 struct cam_mem_buf_queue {
 	struct ion_handle *i_hdl;
+	struct dma_buf *dma_buf;
 	struct mutex q_lock;
 	int32_t hdls[CAM_MEM_MMU_MAX_HANDLE];
 	int32_t num_hdl;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index a6b097d..f38af7d 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -564,6 +564,7 @@
 			  * hence try again in next sof
 			  */
 			slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+			spin_lock_bh(&link->link_state_spin_lock);
 			if (link->state == CAM_CRM_LINK_STATE_ERR) {
 				/*
 				 * During error recovery all tables should be
@@ -576,6 +577,7 @@
 					in_q->slot[in_q->rd_idx].status);
 				rc = -EPERM;
 			}
+			spin_unlock_bh(&link->link_state_spin_lock);
 			return rc;
 		}
 	}
@@ -592,13 +594,14 @@
 	} else {
 		link->trigger_mask |= trigger;
 
+		spin_lock_bh(&link->link_state_spin_lock);
 		if (link->state == CAM_CRM_LINK_STATE_ERR) {
 			CAM_WARN(CAM_CRM, "Err recovery done idx %d",
 				in_q->rd_idx);
-			mutex_lock(&link->lock);
 			link->state = CAM_CRM_LINK_STATE_READY;
-			mutex_unlock(&link->lock);
 		}
+		spin_unlock_bh(&link->link_state_spin_lock);
+
 		if (link->trigger_mask == link->subscribe_event) {
 			slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
 			link->trigger_mask = 0;
@@ -867,8 +870,6 @@
 	struct cam_req_mgr_connected_device    *dev;
 	struct cam_req_mgr_core_dev_link_setup  link_data;
 
-	mutex_lock(&link->lock);
-
 	link_data.link_enable = 0;
 	link_data.link_hdl = link->link_hdl;
 	link_data.crm_cb = NULL;
@@ -895,7 +896,6 @@
 	link->num_devs = 0;
 	link->max_delay = 0;
 
-	mutex_unlock(&link->lock);
 }
 
 /**
@@ -938,6 +938,7 @@
 		return NULL;
 	}
 	mutex_init(&link->lock);
+	spin_lock_init(&link->link_state_spin_lock);
 
 	mutex_lock(&link->lock);
 	link->state = CAM_CRM_LINK_STATE_AVAILABLE;
@@ -1348,9 +1349,9 @@
 			__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
 			in_q->rd_idx = idx;
 			in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
-			mutex_lock(&link->lock);
+			spin_lock_bh(&link->link_state_spin_lock);
 			link->state = CAM_CRM_LINK_STATE_ERR;
-			mutex_unlock(&link->lock);
+			spin_unlock_bh(&link->link_state_spin_lock);
 		}
 	}
 	mutex_unlock(&link->req.lock);
@@ -1401,11 +1402,14 @@
 	CAM_DBG(CAM_CRM, "link_hdl %x curent idx %d req_status %d",
 		link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
 
+	spin_lock_bh(&link->link_state_spin_lock);
 	if (link->state == CAM_CRM_LINK_STATE_ERR)
 		CAM_WARN(CAM_CRM, "Error recovery idx %d status %d",
 			in_q->rd_idx,
 			in_q->slot[in_q->rd_idx].status);
 
+	spin_unlock_bh(&link->link_state_spin_lock);
+
 	if (in_q->slot[in_q->rd_idx].status == CRM_SLOT_STATUS_REQ_APPLIED) {
 		/*
 		 * Do NOT reset req q slot data here, it can not be done
@@ -1446,8 +1450,7 @@
 
 	if (!add_req) {
 		CAM_ERR(CAM_CRM, "sof_data is NULL");
-		rc = -EINVAL;
-		goto end;
+		return -EINVAL;
 	}
 
 	CAM_DBG(CAM_CRM, "E: dev %x dev req %lld",
@@ -1457,9 +1460,18 @@
 
 	if (!link) {
 		CAM_DBG(CAM_CRM, "link ptr NULL %x", add_req->link_hdl);
-		rc = -EINVAL;
+		return -EINVAL;
+	}
+
+	mutex_lock(&link->lock);
+	spin_lock_bh(&link->link_state_spin_lock);
+	if (link->state != CAM_CRM_LINK_STATE_READY) {
+		CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
+		rc = -EPERM;
+		spin_unlock_bh(&link->link_state_spin_lock);
 		goto end;
 	}
+	spin_unlock_bh(&link->link_state_spin_lock);
 
 	/* Validate if req id is present in input queue */
 	idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
@@ -1490,6 +1502,7 @@
 		add_req->dev_hdl, add_req->req_id);
 
 end:
+	mutex_unlock(&link->lock);
 	return rc;
 }
 
@@ -1525,6 +1538,15 @@
 		goto end;
 	}
 
+	spin_lock_bh(&link->link_state_spin_lock);
+	if (link->state != CAM_CRM_LINK_STATE_READY) {
+		CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
+		spin_unlock_bh(&link->link_state_spin_lock);
+		rc = -EPERM;
+		goto end;
+	}
+	spin_unlock_bh(&link->link_state_spin_lock);
+
 	crm_timer_reset(link->watchdog);
 	task = cam_req_mgr_workq_get_task(link->workq);
 	if (!task) {
@@ -1579,6 +1601,15 @@
 		goto end;
 	}
 
+	spin_lock_bh(&link->link_state_spin_lock);
+	if (link->state != CAM_CRM_LINK_STATE_READY) {
+		CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
+		spin_unlock_bh(&link->link_state_spin_lock);
+		rc = -EPERM;
+		goto end;
+	}
+	spin_unlock_bh(&link->link_state_spin_lock);
+
 	crm_timer_reset(link->watchdog);
 	task = cam_req_mgr_workq_get_task(link->workq);
 	if (!task) {
@@ -1639,7 +1670,6 @@
 	if (rc < 0)
 		return rc;
 
-	mutex_lock(&link->lock);
 	max_delay = CAM_PIPELINE_DELAY_0;
 	for (i = 0; i < link_info->num_devices; i++) {
 		dev = &link->l_dev[i];
@@ -1742,7 +1772,6 @@
 	/* At start, expect max pd devices, all are in skip state */
 	__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
 
-	mutex_unlock(&link->lock);
 	return 0;
 
 error:
@@ -1882,11 +1911,9 @@
 	if (link->link_hdl < 0) {
 		CAM_ERR(CAM_CRM,
 			"Insufficient memory to create new device handle");
-		mutex_unlock(&link->lock);
 		rc = link->link_hdl;
 		goto link_hdl_fail;
 	}
-	mutex_unlock(&link->lock);
 	link_info->link_hdl = link->link_hdl;
 
 	/* Allocate memory to hold data of all linked devs */
@@ -1903,9 +1930,9 @@
 	if (rc < 0)
 		goto setup_failed;
 
-	mutex_lock(&link->lock);
+	spin_lock_bh(&link->link_state_spin_lock);
 	link->state = CAM_CRM_LINK_STATE_READY;
-	mutex_unlock(&link->lock);
+	spin_unlock_bh(&link->link_state_spin_lock);
 
 	/* Create worker for current link */
 	snprintf(buf, sizeof(buf), "%x-%x",
@@ -1936,6 +1963,7 @@
 		goto setup_failed;
 	}
 
+	mutex_unlock(&link->lock);
 	mutex_unlock(&g_crm_core_dev->crm_lock);
 	return rc;
 setup_failed:
@@ -1944,6 +1972,7 @@
 	cam_destroy_device_hdl(link->link_hdl);
 	link_info->link_hdl = 0;
 link_hdl_fail:
+	mutex_unlock(&link->lock);
 	__cam_req_mgr_unreserve_link(cam_session, &link);
 	mutex_unlock(&g_crm_core_dev->crm_lock);
 	return rc;
@@ -1979,6 +2008,11 @@
 		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
+
+	mutex_lock(&link->lock);
+	spin_lock_bh(&link->link_state_spin_lock);
+	link->state = CAM_CRM_LINK_STATE_IDLE;
+	spin_unlock_bh(&link->link_state_spin_lock);
 	__cam_req_mgr_print_req_tbl(&link->req);
 
 	/* Destroy workq payload data */
@@ -2004,6 +2038,7 @@
 	}
 
 	/* Free curent link and put back into session's free pool of links */
+	mutex_unlock(&link->lock);
 	__cam_req_mgr_unreserve_link(cam_session, &link);
 	mutex_unlock(&g_crm_core_dev->crm_lock);
 
@@ -2127,10 +2162,10 @@
 	flush->link_hdl = flush_info->link_hdl;
 	flush->flush_type = flush_info->flush_type;
 	task->process_cb = &cam_req_mgr_process_flush_req;
+	init_completion(&link->workq_comp);
 	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
 
 	/* Blocking call */
-	init_completion(&link->workq_comp);
 	rc = wait_for_completion_timeout(
 		&link->workq_comp,
 		msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index db34157..e17047d6 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -12,6 +12,7 @@
 #ifndef _CAM_REQ_MGR_CORE_H_
 #define _CAM_REQ_MGR_CORE_H_
 
+#include <linux/spinlock.h>
 #include "cam_req_mgr_interface.h"
 #include "cam_req_mgr_core_defs.h"
 #include "cam_req_mgr_timer.h"
@@ -259,27 +260,28 @@
 /**
  * struct cam_req_mgr_core_link
  * -  Link Properties
- * @link_hdl       : Link identifier
- * @num_devs       : num of connected devices to this link
- * @max_delay      : Max of pipeline delay of all connected devs
- * @workq          : Pointer to handle workq related jobs
- * @pd_mask        : each set bit indicates the device with pd equal to bit
- *                   position is available.
+ * @link_hdl             : Link identifier
+ * @num_devs             : num of connected devices to this link
+ * @max_delay            : Max of pipeline delay of all connected devs
+ * @workq                : Pointer to handle workq related jobs
+ * @pd_mask              : each set bit indicates the device with pd equal to
+ *                          bit position is available.
  * - List of connected devices
- * @l_dev          : List of connected devices to this link
+ * @l_dev                : List of connected devices to this link
  * - Request handling data struct
- * @req            : req data holder.
+ * @req                  : req data holder.
  * - Timer
- * @watchdog       : watchdog timer to recover from sof freeze
+ * @watchdog             : watchdog timer to recover from sof freeze
  * - Link private data
- * @workq_comp     : conditional variable to block user thread for workq to
- *                   finish schedule request processing
- * @state          : link state machine
- * @parent         : pvt data - link's parent is session
- * @lock           : mutex lock to guard link data operations
- * @subscribe_event: irqs that link subscribes, IFE should send notification
- * to CRM at those hw events.
- * @trigger_mask   : mask on which irq the req is already applied
+ * @workq_comp           : conditional variable to block user thread for workq
+ *                          to finish schedule request processing
+ * @state                : link state machine
+ * @parent               : pvt data - link's parent is session
+ * @lock                 : mutex lock to guard link data operations
+ * @link_state_spin_lock : spin lock to protect link state variable
+ * @subscribe_event      : irqs that link subscribes, IFE should send
+ *                         notification to CRM at those hw events.
+ * @trigger_mask         : mask on which irq the req is already applied
  */
 struct cam_req_mgr_core_link {
 	int32_t                              link_hdl;
@@ -294,6 +296,7 @@
 	enum cam_req_mgr_link_state          state;
 	void                                *parent;
 	struct mutex                         lock;
+	spinlock_t                           link_state_spin_lock;
 	uint32_t                             subscribe_event;
 	uint32_t                             trigger_mask;
 };
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
index 94a591c..65c2327 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_res_mgr/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_utils/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_cci/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
index 4e8ea8b..c0bebfd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
@@ -1,6 +1,7 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index 85db1b1..abfc190 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -15,6 +15,129 @@
 #include "cam_actuator_core.h"
 #include "cam_sensor_util.h"
 #include "cam_trace.h"
+#include "cam_res_mgr_api.h"
+
+int32_t cam_actuator_construct_default_power_setting(
+	struct cam_sensor_power_ctrl_t *power_info)
+{
+	int rc = 0;
+
+	power_info->power_setting_size = 1;
+	power_info->power_setting =
+		(struct cam_sensor_power_setting *)
+		kzalloc(sizeof(struct cam_sensor_power_setting),
+			GFP_KERNEL);
+	if (!power_info->power_setting)
+		return -ENOMEM;
+
+	power_info->power_setting[0].seq_type = SENSOR_VAF;
+	power_info->power_setting[0].seq_val = CAM_VAF;
+	power_info->power_setting[0].config_val = 1;
+
+	power_info->power_down_setting_size = 1;
+	power_info->power_down_setting =
+		(struct cam_sensor_power_setting *)
+		kzalloc(sizeof(struct cam_sensor_power_setting),
+			GFP_KERNEL);
+	if (!power_info->power_down_setting) {
+		rc = -ENOMEM;
+		goto free_power_settings;
+	}
+
+	power_info->power_setting[0].seq_type = SENSOR_VAF;
+	power_info->power_setting[0].seq_val = CAM_VAF;
+	power_info->power_setting[0].config_val = 0;
+
+	return rc;
+
+free_power_settings:
+	kfree(power_info->power_setting);
+	return rc;
+}
+
+static int32_t cam_actuator_power_up(struct cam_actuator_ctrl_t *a_ctrl)
+{
+	int rc = 0;
+	struct cam_hw_soc_info  *soc_info =
+		&a_ctrl->soc_info;
+	struct cam_actuator_soc_private  *soc_private;
+	struct cam_sensor_power_ctrl_t *power_info;
+
+	soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
+	/* Parse and fill vreg params for power up settings */
+	rc = msm_camera_fill_vreg_params(
+		&a_ctrl->soc_info,
+		power_info->power_setting,
+		power_info->power_setting_size);
+	if (rc) {
+		CAM_ERR(CAM_ACTUATOR,
+			"failed to fill vreg params for power up rc:%d", rc);
+		return rc;
+	}
+
+	/* Parse and fill vreg params for power down settings*/
+	rc = msm_camera_fill_vreg_params(
+		&a_ctrl->soc_info,
+		power_info->power_down_setting,
+		power_info->power_down_setting_size);
+	if (rc) {
+		CAM_ERR(CAM_ACTUATOR,
+			"failed to fill vreg params power down rc:%d", rc);
+		return rc;
+	}
+
+	power_info->dev = soc_info->dev;
+
+	rc = cam_sensor_core_power_up(power_info, soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ACTUATOR, "failed in ois power up rc %d", rc);
+		return rc;
+	}
+
+	/* VREG needs some delay to power up */
+	usleep_range(2000, 2050);
+
+	rc = camera_io_init(&a_ctrl->io_master_info);
+	if (rc < 0)
+		CAM_ERR(CAM_ACTUATOR, "cci_init failed: rc: %d", rc);
+
+	return rc;
+}
+
+static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl)
+{
+	int32_t rc = 0;
+	struct cam_sensor_power_ctrl_t *power_info;
+	struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info;
+	struct cam_actuator_soc_private  *soc_private;
+
+	if (!a_ctrl) {
+		CAM_ERR(CAM_ACTUATOR, "failed: e_ctrl %pK", a_ctrl);
+		return -EINVAL;
+	}
+
+	soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+	soc_info = &a_ctrl->soc_info;
+
+	if (!power_info) {
+		CAM_ERR(CAM_ACTUATOR, "failed: power_info %pK", power_info);
+		return -EINVAL;
+	}
+	rc = msm_camera_power_down(power_info, soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ACTUATOR, "power down the core is failed:%d", rc);
+		return rc;
+	}
+
+	camera_io_release(&a_ctrl->io_master_info);
+
+	return rc;
+}
 
 static int32_t cam_actuator_i2c_modes_util(
 	struct camera_io_master *io_master_info,
@@ -324,6 +447,19 @@
 				rc);
 			return rc;
 		}
+
+		rc = cam_actuator_apply_settings(a_ctrl,
+			&a_ctrl->i2c_data.init_settings);
+		if (rc < 0)
+			CAM_ERR(CAM_ACTUATOR, "Cannot apply Init settings");
+
+		/* Delete the request even if the apply is failed */
+		rc = delete_request(&a_ctrl->i2c_data.init_settings);
+		if (rc < 0) {
+			CAM_WARN(CAM_ACTUATOR,
+				"Fail in deleting the Init settings");
+			rc = 0;
+		}
 	} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
 		CAM_ACTUATOR_PACKET_AUTO_MOVE_LENS) {
 		a_ctrl->setting_apply_state =
@@ -383,92 +519,6 @@
 	return rc;
 }
 
-static int32_t cam_actuator_vreg_control(
-	struct cam_actuator_ctrl_t *a_ctrl,
-	int config)
-{
-	int rc = 0, cnt;
-	struct cam_hw_soc_info  *soc_info;
-
-	soc_info = &a_ctrl->soc_info;
-	cnt = soc_info->num_rgltr;
-
-	if (!cnt)
-		return 0;
-
-	if (cnt >= CAM_SOC_MAX_REGULATOR) {
-		CAM_ERR(CAM_ACTUATOR, "Regulators more than supported %d", cnt);
-		return -EINVAL;
-	}
-
-	if (config) {
-		rc = cam_soc_util_request_platform_resource(soc_info,
-			NULL, NULL);
-		rc = cam_soc_util_enable_platform_resource(soc_info, false, 0,
-			false);
-	} else {
-		rc = cam_soc_util_disable_platform_resource(soc_info, false,
-			false);
-		rc = cam_soc_util_release_platform_resource(soc_info);
-	}
-
-	return rc;
-}
-
-static int32_t cam_actuator_power_up(struct cam_actuator_ctrl_t *a_ctrl)
-{
-	int rc = 0;
-	struct cam_hw_soc_info  *soc_info =
-		&a_ctrl->soc_info;
-	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
-
-	rc = cam_actuator_vreg_control(a_ctrl, 1);
-	if (rc < 0) {
-		CAM_ERR(CAM_ACTUATOR, "Actuator Reg Failed %d", rc);
-		return rc;
-	}
-
-	gpio_num_info = a_ctrl->gpio_num_info;
-
-	if (soc_info->gpio_data &&
-		gpio_num_info &&
-		gpio_num_info->valid[SENSOR_VAF] == 1) {
-		gpio_set_value_cansleep(
-			gpio_num_info->gpio_num[SENSOR_VAF],
-			1);
-	}
-
-	/* VREG needs some delay to power up */
-	usleep_range(2000, 2050);
-
-	return rc;
-}
-
-static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl)
-{
-	int32_t rc = 0;
-	struct cam_hw_soc_info *soc_info =
-		&a_ctrl->soc_info;
-	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
-
-	gpio_num_info = a_ctrl->gpio_num_info;
-
-	if (soc_info->gpio_data &&
-		gpio_num_info &&
-		gpio_num_info->valid[SENSOR_VAF] == 1) {
-
-		gpio_set_value_cansleep(
-			gpio_num_info->gpio_num[SENSOR_VAF],
-			GPIOF_OUT_INIT_LOW);
-	}
-
-	rc = cam_actuator_vreg_control(a_ctrl, 0);
-	if (rc < 0)
-		CAM_ERR(CAM_ACTUATOR, "Disable Regulator Failed: %d", rc);
-
-	return rc;
-}
-
 void cam_actuator_shutdown(struct cam_actuator_ctrl_t *a_ctrl)
 {
 	int rc;
@@ -476,17 +526,12 @@
 	if (a_ctrl->cam_act_state == CAM_ACTUATOR_INIT)
 		return;
 
-	if (a_ctrl->cam_act_state == CAM_ACTUATOR_START) {
-		rc = camera_io_release(&a_ctrl->io_master_info);
-		if (rc < 0)
-			CAM_ERR(CAM_ACTUATOR, "Failed in releasing CCI");
+	if ((a_ctrl->cam_act_state == CAM_ACTUATOR_START) ||
+		(a_ctrl->cam_act_state == CAM_ACTUATOR_ACQUIRE)) {
 		rc = cam_actuator_power_down(a_ctrl);
 		if (rc < 0)
 			CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
-		a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE;
-	}
 
-	if (a_ctrl->cam_act_state == CAM_ACTUATOR_ACQUIRE) {
 		rc = cam_destroy_device_hdl(a_ctrl->bridge_intf.device_hdl);
 		if (rc < 0)
 			CAM_ERR(CAM_ACTUATOR, "destroying  dhdl failed");
@@ -508,7 +553,7 @@
 		return -EINVAL;
 	}
 
-	pr_debug("Opcode to Actuator: %d", cmd->op_code);
+	CAM_DBG(CAM_ACTUATOR, "Opcode to Actuator: %d", cmd->op_code);
 
 	mutex_lock(&(a_ctrl->actuator_mutex));
 	switch (cmd->op_code) {
@@ -549,10 +594,31 @@
 			rc = -EFAULT;
 			goto release_mutex;
 		}
+
+		rc = cam_actuator_power_up(a_ctrl);
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR, " Actuator Power up failed");
+			goto release_mutex;
+		}
+
 		a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE;
 	}
 		break;
 	case CAM_RELEASE_DEV: {
+		if (a_ctrl->cam_act_state != CAM_ACTUATOR_ACQUIRE) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_ACTUATOR,
+			"Not in right state to release : %d",
+			a_ctrl->cam_act_state);
+			goto release_mutex;
+		}
+
+		rc = cam_actuator_power_down(a_ctrl);
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
+			goto release_mutex;
+		}
+
 		if (a_ctrl->bridge_intf.device_hdl == -1) {
 			CAM_ERR(CAM_ACTUATOR, "link hdl: %d device hdl: %d",
 				a_ctrl->bridge_intf.device_hdl,
@@ -582,28 +648,11 @@
 	}
 		break;
 	case CAM_START_DEV: {
-		rc = cam_actuator_power_up(a_ctrl);
-		if (rc < 0) {
-			CAM_ERR(CAM_ACTUATOR, " Actuator Power up failed");
-			goto release_mutex;
-		}
-		rc = camera_io_init(&a_ctrl->io_master_info);
-		if (rc < 0) {
-			CAM_ERR(CAM_ACTUATOR, "cci_init failed");
-			cam_actuator_power_down(a_ctrl);
-		}
-
-		rc = cam_actuator_apply_settings(a_ctrl,
-			&a_ctrl->i2c_data.init_settings);
-		if (rc < 0)
-			CAM_ERR(CAM_ACTUATOR, "Cannot apply Init settings");
-
-		/* Delete the request even if the apply is failed */
-		rc = delete_request(&a_ctrl->i2c_data.init_settings);
-		if (rc < 0) {
-			CAM_ERR(CAM_ACTUATOR,
-				"Fail in deleting the Init settings");
+		if (a_ctrl->cam_act_state != CAM_ACTUATOR_ACQUIRE) {
 			rc = -EINVAL;
+			CAM_WARN(CAM_ACTUATOR,
+			"Not in right state to start : %d",
+			a_ctrl->cam_act_state);
 			goto release_mutex;
 		}
 		a_ctrl->cam_act_state = CAM_ACTUATOR_START;
@@ -613,14 +662,14 @@
 		struct i2c_settings_array *i2c_set = NULL;
 		int i;
 
-		rc = camera_io_release(&a_ctrl->io_master_info);
-		if (rc < 0)
-			CAM_ERR(CAM_ACTUATOR, "Failed in releasing CCI");
-		rc = cam_actuator_power_down(a_ctrl);
-		if (rc < 0) {
-			CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
+		if (a_ctrl->cam_act_state != CAM_ACTUATOR_START) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_ACTUATOR,
+			"Not in right state to stop : %d",
+			a_ctrl->cam_act_state);
 			goto release_mutex;
 		}
+
 		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
 			i2c_set = &(a_ctrl->i2c_data.per_frame[i]);
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h
index f24070e..c28d79d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h
@@ -16,6 +16,16 @@
 #include "cam_actuator_dev.h"
 
 /**
+ * @power_info: power setting info to control the power
+ *
+ * This API construct the default actuator power setting.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int32_t cam_actuator_construct_default_power_setting(
+	struct cam_sensor_power_ctrl_t *power_info);
+
+/**
  * @apply: Req mgr structure for applying request
  *
  * This API applies the request that is mentioned
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
index 465f5e2..c5c9b0a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -140,9 +140,11 @@
 static int32_t cam_actuator_driver_i2c_probe(struct i2c_client *client,
 	const struct i2c_device_id *id)
 {
-	int32_t rc = 0, i = 0;
-	struct cam_actuator_ctrl_t *a_ctrl;
-	struct cam_hw_soc_info *soc_info = NULL;
+	int32_t                         rc = 0;
+	int32_t                         i = 0;
+	struct cam_actuator_ctrl_t      *a_ctrl;
+	struct cam_hw_soc_info          *soc_info = NULL;
+	struct cam_actuator_soc_private *soc_private = NULL;
 
 	if (client == NULL || id == NULL) {
 		CAM_ERR(CAM_ACTUATOR, "Invalid Args client: %pK id: %pK",
@@ -164,6 +166,14 @@
 
 	i2c_set_clientdata(client, a_ctrl);
 
+	soc_private = kzalloc(sizeof(struct cam_actuator_soc_private),
+		GFP_KERNEL);
+	if (!soc_private) {
+		rc = -ENOMEM;
+		goto free_ctrl;
+	}
+	a_ctrl->soc_info.soc_private = soc_private;
+
 	a_ctrl->io_master_info.client = client;
 	soc_info = &a_ctrl->soc_info;
 	soc_info->dev = &client->dev;
@@ -178,7 +188,11 @@
 
 	rc = cam_actuator_init_subdev(a_ctrl);
 	if (rc)
-		goto free_ctrl;
+		goto free_soc;
+
+	if (soc_private->i2c_info.slave_addr != 0)
+		a_ctrl->io_master_info.client->addr =
+			soc_private->i2c_info.slave_addr;
 
 	a_ctrl->i2c_data.per_frame =
 		(struct i2c_settings_array *)
@@ -194,14 +208,6 @@
 	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
 		INIT_LIST_HEAD(&(a_ctrl->i2c_data.per_frame[i].list_head));
 
-	rc = cam_soc_util_request_platform_resource(&a_ctrl->soc_info,
-		NULL, NULL);
-	if (rc < 0) {
-		CAM_ERR(CAM_ACTUATOR,
-			"Requesting Platform Resources failed rc %d", rc);
-		goto free_mem;
-	}
-
 	a_ctrl->bridge_intf.device_hdl = -1;
 	a_ctrl->bridge_intf.ops.get_dev_info =
 		cam_actuator_publish_dev_info;
@@ -212,6 +218,14 @@
 
 	v4l2_set_subdevdata(&(a_ctrl->v4l2_dev_str.sd), a_ctrl);
 
+	rc = cam_actuator_construct_default_power_setting(
+		&soc_private->power_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_ACTUATOR,
+			"Construct default actuator power setting failed.");
+		goto free_mem;
+	}
+
 	a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
 
 	return rc;
@@ -219,6 +233,8 @@
 	kfree(a_ctrl->i2c_data.per_frame);
 unreg_subdev:
 	cam_unregister_subdev(&(a_ctrl->v4l2_dev_str));
+free_soc:
+	kfree(soc_private);
 free_ctrl:
 	kfree(a_ctrl);
 	return rc;
@@ -226,8 +242,10 @@
 
 static int32_t cam_actuator_platform_remove(struct platform_device *pdev)
 {
-	struct cam_actuator_ctrl_t  *a_ctrl;
 	int32_t rc = 0;
+	struct cam_actuator_ctrl_t      *a_ctrl;
+	struct cam_actuator_soc_private *soc_private;
+	struct cam_sensor_power_ctrl_t  *power_info;
 
 	a_ctrl = platform_get_drvdata(pdev);
 	if (!a_ctrl) {
@@ -235,8 +253,15 @@
 		return 0;
 	}
 
+	soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
 	kfree(a_ctrl->io_master_info.cci_client);
 	a_ctrl->io_master_info.cci_client = NULL;
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	kfree(a_ctrl->soc_info.soc_private);
 	kfree(a_ctrl->i2c_data.per_frame);
 	a_ctrl->i2c_data.per_frame = NULL;
 	devm_kfree(&pdev->dev, a_ctrl);
@@ -246,17 +271,29 @@
 
 static int32_t cam_actuator_driver_i2c_remove(struct i2c_client *client)
 {
-	struct cam_actuator_ctrl_t  *a_ctrl = i2c_get_clientdata(client);
 	int32_t rc = 0;
+	struct cam_actuator_ctrl_t      *a_ctrl =
+		i2c_get_clientdata(client);
+	struct cam_actuator_soc_private *soc_private;
+	struct cam_sensor_power_ctrl_t  *power_info;
 
 	/* Handle I2C Devices */
 	if (!a_ctrl) {
 		CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL");
 		return -EINVAL;
 	}
+
+	soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
 	/*Free Allocated Mem */
 	kfree(a_ctrl->i2c_data.per_frame);
 	a_ctrl->i2c_data.per_frame = NULL;
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	kfree(a_ctrl->soc_info.soc_private);
+	a_ctrl->soc_info.soc_private = NULL;
 	kfree(a_ctrl);
 	return rc;
 }
@@ -269,8 +306,10 @@
 static int32_t cam_actuator_driver_platform_probe(
 	struct platform_device *pdev)
 {
-	int32_t rc = 0, i = 0;
-	struct cam_actuator_ctrl_t *a_ctrl = NULL;
+	int32_t                         rc = 0;
+	int32_t                         i = 0;
+	struct cam_actuator_ctrl_t      *a_ctrl = NULL;
+	struct cam_actuator_soc_private *soc_private = NULL;
 
 	/* Create sensor control structure */
 	a_ctrl = devm_kzalloc(&pdev->dev,
@@ -287,15 +326,28 @@
 
 	a_ctrl->io_master_info.cci_client = kzalloc(sizeof(
 		struct cam_sensor_cci_client), GFP_KERNEL);
-	if (!(a_ctrl->io_master_info.cci_client))
-		return -ENOMEM;
+	if (!(a_ctrl->io_master_info.cci_client)) {
+		rc = -ENOMEM;
+		goto free_ctrl;
+	}
+
+	soc_private = kzalloc(sizeof(struct cam_actuator_soc_private),
+		GFP_KERNEL);
+	if (!soc_private) {
+		rc = -ENOMEM;
+		goto free_cci_client;
+	}
+	a_ctrl->soc_info.soc_private = soc_private;
+	soc_private->power_info.dev = &pdev->dev;
 
 	a_ctrl->i2c_data.per_frame =
 		(struct i2c_settings_array *)
 		kzalloc(sizeof(struct i2c_settings_array) *
 		MAX_PER_FRAME_ARRAY, GFP_KERNEL);
-	if (a_ctrl->i2c_data.per_frame == NULL)
-		return -ENOMEM;
+	if (a_ctrl->i2c_data.per_frame == NULL) {
+		rc = -ENOMEM;
+		goto free_soc;
+	}
 
 	INIT_LIST_HEAD(&(a_ctrl->i2c_data.init_settings.list_head));
 
@@ -305,7 +357,7 @@
 	rc = cam_actuator_parse_dt(a_ctrl, &(pdev->dev));
 	if (rc < 0) {
 		CAM_ERR(CAM_ACTUATOR, "Paring actuator dt failed rc %d", rc);
-		goto free_ctrl;
+		goto free_mem;
 	}
 
 	/* Fill platform device id*/
@@ -315,14 +367,6 @@
 	if (rc)
 		goto free_mem;
 
-	rc = cam_soc_util_request_platform_resource(&a_ctrl->soc_info,
-			NULL, NULL);
-	if (rc < 0) {
-		CAM_ERR(CAM_ACTUATOR,
-			"Requesting Platform Resources failed rc %d", rc);
-		goto unreg_subdev;
-	}
-
 	a_ctrl->bridge_intf.device_hdl = -1;
 	a_ctrl->bridge_intf.ops.get_dev_info =
 		cam_actuator_publish_dev_info;
@@ -336,11 +380,23 @@
 	platform_set_drvdata(pdev, a_ctrl);
 	v4l2_set_subdevdata(&a_ctrl->v4l2_dev_str.sd, a_ctrl);
 
+	rc = cam_actuator_construct_default_power_setting(
+		&soc_private->power_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_ACTUATOR,
+			"Construct default actuator power setting failed.");
+		goto unreg_subdev;
+	}
+
 	return rc;
 unreg_subdev:
 	cam_unregister_subdev(&(a_ctrl->v4l2_dev_str));
 free_mem:
 	kfree(a_ctrl->i2c_data.per_frame);
+free_soc:
+	kfree(soc_private);
+free_cci_client:
+	kfree(a_ctrl->io_master_info.cci_client);
 free_ctrl:
 	devm_kfree(&pdev->dev, a_ctrl);
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
index bd5d50f..8b8b1ef 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -53,11 +53,25 @@
 	ACT_APPLY_SETTINGS_LATER,
 };
 
-enum cam_actator_state {
+enum cam_actuator_state {
 	CAM_ACTUATOR_INIT,
 	CAM_ACTUATOR_ACQUIRE,
 	CAM_ACTUATOR_START,
-	CAM_ACTUATOR_RELEASE,
+};
+
+/**
+ * struct cam_actuator_i2c_info_t - I2C info
+ * @slave_addr      :   slave address
+ * @i2c_freq_mode   :   i2c frequency mode
+ */
+struct cam_actuator_i2c_info_t {
+	uint16_t slave_addr;
+	uint8_t i2c_freq_mode;
+};
+
+struct cam_actuator_soc_private {
+	struct cam_actuator_i2c_info_t i2c_info;
+	struct cam_sensor_power_ctrl_t power_info;
 };
 
 /**
@@ -102,8 +116,7 @@
 	struct mutex actuator_mutex;
 	uint32_t id;
 	enum cam_actuator_apply_state_t setting_apply_state;
-	enum cam_actator_state cam_act_state;
-	struct msm_camera_gpio_num_info *gpio_num_info;
+	enum cam_actuator_state cam_act_state;
 	uint8_t cam_pinctrl_status;
 	struct cam_subdev v4l2_dev_str;
 	struct i2c_data_settings i2c_data;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
index f47ec2f..55b7c72 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
@@ -22,9 +22,12 @@
 int32_t cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl,
 	struct device *dev)
 {
-	int32_t                   rc = 0;
-	struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info;
-	struct device_node *of_node = NULL;
+	int32_t                         rc = 0;
+	struct cam_hw_soc_info          *soc_info = &a_ctrl->soc_info;
+	struct cam_actuator_soc_private *soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t  *power_info = &soc_private->power_info;
+	struct device_node              *of_node = NULL;
 
 	/* Initialize mutex */
 	mutex_init(&(a_ctrl->actuator_mutex));
@@ -61,9 +64,8 @@
 	}
 
 	rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
-		&a_ctrl->gpio_num_info);
-
-	if ((rc < 0) || (!a_ctrl->gpio_num_info)) {
+		&power_info->gpio_num_info);
+	if ((rc < 0) || (!power_info->gpio_num_info)) {
 		CAM_ERR(CAM_ACTUATOR, "No/Error Actuator GPIOs");
 		return -EINVAL;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
index c62b251..d7a6504 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -441,7 +441,7 @@
 			if (cmd->reg_addr + 1 ==
 				(cmd+1)->reg_addr) {
 				len += data_len;
-				*pack += data_len;
+				(*pack)++;
 			} else {
 				break;
 			}
@@ -730,10 +730,30 @@
 					reg_addr++;
 			} else {
 				if ((i + 1) <= cci_dev->payload_size) {
-					data[i++] = (i2c_cmd->reg_data &
-						0xFF00) >> 8; /* MSB */
-					data[i++] = i2c_cmd->reg_data &
-						0x00FF; /* LSB */
+					switch (i2c_msg->data_type) {
+					case CAMERA_SENSOR_I2C_TYPE_DWORD:
+						data[i++] = (i2c_cmd->reg_data &
+							0xFF000000) >> 24;
+						/* fallthrough */
+					case CAMERA_SENSOR_I2C_TYPE_3B:
+						data[i++] = (i2c_cmd->reg_data &
+							0x00FF0000) >> 16;
+						/* fallthrough */
+					case CAMERA_SENSOR_I2C_TYPE_WORD:
+						data[i++] = (i2c_cmd->reg_data &
+							0x0000FF00) >> 8;
+						/* fallthrough */
+					case CAMERA_SENSOR_I2C_TYPE_BYTE:
+						data[i++] = i2c_cmd->reg_data &
+							0x000000FF;
+						break;
+					default:
+						CAM_ERR(CAM_CCI,
+							"invalid data type: %d",
+							i2c_msg->data_type);
+						return -EINVAL;
+					}
+
 					if (c_ctrl->cmd ==
 						MSM_CCI_I2C_WRITE_SEQ)
 						reg_addr++;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
index 4c996e08..d0ee0f6 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -289,7 +289,7 @@
 irqreturn_t cam_cci_irq(int irq_num, void *data);
 
 #ifdef CONFIG_SPECTRA_CAMERA
-struct v4l2_subdev *cam_cci_get_subdev(void);
+extern struct v4l2_subdev *cam_cci_get_subdev(void);
 #else
 static inline struct v4l2_subdev *cam_cci_get_subdev(void)
 {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
index 8de4472..cf7a65f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -99,7 +99,7 @@
 
 	/* Enable Regulators and IRQ*/
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
-		CAM_TURBO_VOTE, true);
+		CAM_LOWSVS_VOTE, true);
 	if (rc < 0) {
 		CAM_DBG(CAM_CCI, "request platform resources failed");
 		goto platform_enable_failed;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 7cc26c1..cb44cb8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -494,8 +494,8 @@
 	}
 		break;
 	case CAM_STOP_DEV: {
-		if (csiphy_dev->csiphy_state !=
-			CAM_CSIPHY_START) {
+		if ((csiphy_dev->csiphy_state != CAM_CSIPHY_START) ||
+			!csiphy_dev->start_dev_count) {
 			CAM_ERR(CAM_CSIPHY, "Not in right state to stop : %d",
 				csiphy_dev->csiphy_state);
 			goto release_mutex;
@@ -508,16 +508,13 @@
 		}
 
 		rc = cam_csiphy_disable_hw(csiphy_dev);
-		if (rc < 0) {
+		if (rc < 0)
 			CAM_ERR(CAM_CSIPHY, "Failed in csiphy release");
-			cam_cpas_stop(csiphy_dev->cpas_handle);
-			goto release_mutex;
-		}
+
 		rc = cam_cpas_stop(csiphy_dev->cpas_handle);
-		if (rc < 0) {
+		if (rc < 0)
 			CAM_ERR(CAM_CSIPHY, "de-voting CPAS: %d", rc);
-			goto release_mutex;
-		}
+
 		csiphy_dev->csiphy_state = CAM_CSIPHY_ACQUIRE;
 	}
 		break;
@@ -547,8 +544,7 @@
 		} else {
 			csiphy_dev->bridge_intf.device_hdl[1] = -1;
 			csiphy_dev->bridge_intf.link_hdl[1] = -1;
-			csiphy_dev->bridge_intf.
-				session_hdl[1] = -1;
+			csiphy_dev->bridge_intf.session_hdl[1] = -1;
 			csiphy_dev->is_acquired_dev_combo_mode = 0;
 		}
 
@@ -587,10 +583,10 @@
 		struct cam_ahb_vote ahb_vote;
 		struct cam_axi_vote axi_vote;
 
-		csiphy_dev->start_dev_count++;
-
-		if (csiphy_dev->csiphy_state == CAM_CSIPHY_START)
+		if (csiphy_dev->csiphy_state == CAM_CSIPHY_START) {
+			csiphy_dev->start_dev_count++;
 			goto release_mutex;
+		}
 
 		ahb_vote.type = CAM_VOTE_ABSOLUTE;
 		ahb_vote.vote.level = CAM_SVS_VOTE;
@@ -616,9 +612,11 @@
 
 		if (rc < 0) {
 			CAM_ERR(CAM_CSIPHY, "cam_csiphy_config_dev failed");
+			cam_csiphy_disable_hw(csiphy_dev);
 			cam_cpas_stop(csiphy_dev->cpas_handle);
 			goto release_mutex;
 		}
+		csiphy_dev->start_dev_count++;
 		csiphy_dev->csiphy_state = CAM_CSIPHY_START;
 	}
 		break;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
index d2a8467..6db5a97 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
@@ -90,7 +90,7 @@
 	}
 
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
-		CAM_TURBO_VOTE, ENABLE_IRQ);
+		CAM_SVS_VOTE, ENABLE_IRQ);
 	if (rc < 0) {
 		CAM_ERR(CAM_CSIPHY, "failed to enable platform resources %d",
 			rc);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index bd9f0fe..72b1779 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -176,6 +176,8 @@
 		return rc;
 	}
 
+	power_info->dev = soc_info->dev;
+
 	rc = cam_sensor_core_power_up(power_info, soc_info);
 	if (rc) {
 		CAM_ERR(CAM_EEPROM, "failed in eeprom power up rc %d", rc);
@@ -289,6 +291,8 @@
 		CAM_ERR(CAM_EEPROM, "failed: eeprom power up rc %d", rc);
 		goto data_mem_free;
 	}
+
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_CONFIG;
 	if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE) {
 		rc = cam_eeprom_match_id(e_ctrl);
 		if (rc) {
@@ -305,6 +309,8 @@
 	rc = cam_eeprom_power_down(e_ctrl);
 	if (rc)
 		CAM_ERR(CAM_EEPROM, "failed: eeprom power down rc %d", rc);
+
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
 	return rc;
 power_down:
 	cam_eeprom_power_down(e_ctrl);
@@ -313,6 +319,7 @@
 	kfree(e_ctrl->cal_data.map);
 	e_ctrl->cal_data.num_data = 0;
 	e_ctrl->cal_data.num_map = 0;
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
 	return rc;
 }
 
@@ -650,8 +657,15 @@
 	struct cam_packet              *csl_packet = NULL;
 	struct cam_eeprom_soc_private  *soc_private =
 		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
 
 	ioctl_ctrl = (struct cam_control *)arg;
+
+	if (ioctl_ctrl->handle_type != CAM_HANDLE_USER_POINTER) {
+		CAM_ERR(CAM_EEPROM, "Invalid Handle Type");
+		return -EINVAL;
+	}
+
 	if (copy_from_user(&dev_config, (void __user *) ioctl_ctrl->handle,
 		sizeof(dev_config)))
 		return -EFAULT;
@@ -662,6 +676,14 @@
 			"error in converting command Handle Error: %d", rc);
 		return rc;
 	}
+
+	if (dev_config.offset > pkt_len) {
+		CAM_ERR(CAM_EEPROM,
+			"Offset is out of bound: off: %lld, %zu",
+			dev_config.offset, pkt_len);
+		return -EINVAL;
+	}
+
 	csl_packet = (struct cam_packet *)
 		(generic_pkt_addr + dev_config.offset);
 	switch (csl_packet->header.op_code & 0xFFFFFF) {
@@ -680,7 +702,7 @@
 			e_ctrl->cal_data.num_map = 0;
 			CAM_DBG(CAM_EEPROM,
 				"Returning the data using kernel probe");
-		break;
+			break;
 		}
 		rc = cam_eeprom_init_pkt_parser(e_ctrl, csl_packet);
 		if (rc) {
@@ -704,6 +726,7 @@
 			goto memdata_free;
 		}
 
+		e_ctrl->cam_eeprom_state = CAM_EEPROM_CONFIG;
 		rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
 		if (rc) {
 			CAM_ERR(CAM_EEPROM,
@@ -713,6 +736,7 @@
 
 		rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
 		rc = cam_eeprom_power_down(e_ctrl);
+		e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
 		kfree(e_ctrl->cal_data.mapdata);
 		kfree(e_ctrl->cal_data.map);
 		e_ctrl->cal_data.num_data = 0;
@@ -727,23 +751,26 @@
 memdata_free:
 	kfree(e_ctrl->cal_data.mapdata);
 error:
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
 	kfree(e_ctrl->cal_data.map);
 	e_ctrl->cal_data.num_data = 0;
 	e_ctrl->cal_data.num_map = 0;
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
 	return rc;
 }
 
 void cam_eeprom_shutdown(struct cam_eeprom_ctrl_t *e_ctrl)
 {
 	int rc;
+	struct cam_eeprom_soc_private  *soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
 
 	if (e_ctrl->cam_eeprom_state == CAM_EEPROM_INIT)
 		return;
 
-	if (e_ctrl->cam_eeprom_state == CAM_EEPROM_START) {
-		rc = camera_io_release(&e_ctrl->io_master_info);
-		if (rc < 0)
-			CAM_ERR(CAM_EEPROM, "Failed in releasing CCI");
+	if (e_ctrl->cam_eeprom_state == CAM_EEPROM_CONFIG) {
 		rc = cam_eeprom_power_down(e_ctrl);
 		if (rc < 0)
 			CAM_ERR(CAM_EEPROM, "EEPROM Power down failed");
@@ -754,9 +781,13 @@
 		rc = cam_destroy_device_hdl(e_ctrl->bridge_intf.device_hdl);
 		if (rc < 0)
 			CAM_ERR(CAM_EEPROM, "destroying the device hdl");
+
 		e_ctrl->bridge_intf.device_hdl = -1;
 		e_ctrl->bridge_intf.link_hdl = -1;
 		e_ctrl->bridge_intf.session_hdl = -1;
+
+		kfree(power_info->power_setting);
+		kfree(power_info->power_down_setting);
 	}
 
 	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
@@ -807,6 +838,14 @@
 		e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
 		break;
 	case CAM_RELEASE_DEV:
+		if (e_ctrl->cam_eeprom_state != CAM_EEPROM_ACQUIRE) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_EEPROM,
+			"Not in right state to release : %d",
+			e_ctrl->cam_eeprom_state);
+			goto release_mutex;
+		}
+
 		if (e_ctrl->bridge_intf.device_hdl == -1) {
 			CAM_ERR(CAM_EEPROM,
 				"Invalid Handles: link hdl: %d device hdl: %d",
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
index d667cf4..5eb29c3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -201,13 +201,6 @@
 		goto free_soc;
 	}
 
-	soc_private = (struct cam_eeprom_soc_private *)(id->driver_data);
-	if (!soc_private) {
-		CAM_ERR(CAM_EEPROM, "board info NULL");
-		rc = -EINVAL;
-		goto ectrl_free;
-	}
-
 	rc = cam_eeprom_init_subdev(e_ctrl);
 	if (rc)
 		goto free_soc;
@@ -260,10 +253,9 @@
 		return -EINVAL;
 	}
 
-	if (soc_private) {
-		kfree(soc_private->power_info.gpio_num_info);
+	if (soc_private)
 		kfree(soc_private);
-	}
+
 	kfree(e_ctrl);
 
 	return 0;
@@ -451,6 +443,9 @@
 
 	platform_set_drvdata(pdev, e_ctrl);
 	v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, e_ctrl);
+
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+
 	return rc;
 free_soc:
 	kfree(soc_private);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
index fa4a3dd..4a2190d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
@@ -40,8 +40,7 @@
 enum cam_eeprom_state {
 	CAM_EEPROM_INIT,
 	CAM_EEPROM_ACQUIRE,
-	CAM_EEPROM_START,
-	CAM_EEPROM_RELEASE,
+	CAM_EEPROM_CONFIG,
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
index 9aab0e4..c7889a5 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
@@ -1,5 +1,6 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
index 8573f00..55da264 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -14,6 +14,7 @@
 
 #include "cam_sensor_cmn_header.h"
 #include "cam_flash_core.h"
+#include "cam_res_mgr_api.h"
 
 int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
 	enum cam_flash_state state)
@@ -25,7 +26,7 @@
 		return -EINVAL;
 	}
 
-	if ((state == CAM_FLASH_STATE_INIT) &&
+	if ((state == CAM_FLASH_STATE_START) &&
 		(flash_ctrl->is_regulator_enabled == false)) {
 		rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
 			ENABLE_REGULATOR, NULL);
@@ -35,7 +36,8 @@
 			return rc;
 		}
 		flash_ctrl->is_regulator_enabled = true;
-	} else if ((state == CAM_FLASH_STATE_RELEASE) &&
+		flash_ctrl->flash_state = CAM_FLASH_STATE_START;
+	} else if ((state == CAM_FLASH_STATE_STOP) &&
 		(flash_ctrl->is_regulator_enabled == true)) {
 		rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
 			DISABLE_REGULATOR, NULL);
@@ -45,7 +47,7 @@
 			return rc;
 		}
 		flash_ctrl->is_regulator_enabled = false;
-		flash_ctrl->flash_state = CAM_FLASH_STATE_RELEASE;
+		flash_ctrl->flash_state = CAM_FLASH_STATE_ACQUIRE;
 	} else {
 		CAM_ERR(CAM_FLASH, "Wrong Flash State : %d",
 			flash_ctrl->flash_state);
@@ -55,6 +57,74 @@
 	return rc;
 }
 
+static int cam_flash_flush_nrt(struct cam_flash_ctrl *fctrl)
+{
+	int j = 0;
+	struct cam_flash_frame_setting *nrt_settings;
+
+	if (!fctrl)
+		return -EINVAL;
+
+	nrt_settings = &fctrl->nrt_info;
+
+	if (nrt_settings->cmn_attr.cmd_type ==
+		CAMERA_SENSOR_FLASH_CMD_TYPE_INIT) {
+		fctrl->flash_init_setting.cmn_attr.is_settings_valid = false;
+	} else if ((nrt_settings->cmn_attr.cmd_type ==
+		CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
+		(nrt_settings->cmn_attr.cmd_type ==
+		CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) {
+		fctrl->nrt_info.cmn_attr.is_settings_valid = false;
+		fctrl->nrt_info.cmn_attr.count = 0;
+		fctrl->nrt_info.num_iterations = 0;
+		fctrl->nrt_info.led_on_delay_ms = 0;
+		fctrl->nrt_info.led_off_delay_ms = 0;
+		for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+			fctrl->nrt_info.led_current_ma[j] = 0;
+	}
+
+	return 0;
+}
+
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
+{
+	int rc = 0;
+	int i = 0, j = 0;
+	struct cam_flash_ctrl *fctrl = NULL;
+	int frame_offset = 0;
+
+	fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl);
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Device data is NULL");
+		return -EINVAL;
+	}
+
+	if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+	/* flush all requests*/
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+			fctrl->per_frame[i].cmn_attr.request_id = 0;
+			fctrl->per_frame[i].cmn_attr.is_settings_valid = false;
+			fctrl->per_frame[i].cmn_attr.count = 0;
+			for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+				fctrl->per_frame[i].led_current_ma[j] = 0;
+		}
+
+		rc = cam_flash_flush_nrt(fctrl);
+		if (rc)
+			CAM_ERR(CAM_FLASH, "NonRealTime flush error");
+	} else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+	/* flush request with req_id*/
+		frame_offset = flush->req_id % MAX_PER_FRAME_ARRAY;
+		fctrl->per_frame[frame_offset].cmn_attr.request_id = 0;
+		fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
+			false;
+		fctrl->per_frame[frame_offset].cmn_attr.count = 0;
+		for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++)
+			fctrl->per_frame[frame_offset].led_current_ma[i] = 0;
+	}
+	return rc;
+}
+
 static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl,
 	struct cam_flash_frame_setting *flash_data, enum camera_flash_opcode op)
 {
@@ -83,7 +153,8 @@
 
 				CAM_DBG(CAM_FLASH,
 					"Led_Current[%d] = %d", i, curr);
-				led_trigger_event(flash_ctrl->torch_trigger[i],
+				cam_res_mgr_led_trigger_event(
+					flash_ctrl->torch_trigger[i],
 					curr);
 			}
 		}
@@ -100,7 +171,8 @@
 
 				CAM_DBG(CAM_FLASH, "LED flash_current[%d]: %d",
 					i, curr);
-				led_trigger_event(flash_ctrl->flash_trigger[i],
+				cam_res_mgr_led_trigger_event(
+					flash_ctrl->flash_trigger[i],
 					curr);
 			}
 		}
@@ -110,7 +182,9 @@
 	}
 
 	if (flash_ctrl->switch_trigger)
-		led_trigger_event(flash_ctrl->switch_trigger, LED_SWITCH_ON);
+		cam_res_mgr_led_trigger_event(
+			flash_ctrl->switch_trigger,
+			LED_SWITCH_ON);
 
 	return 0;
 }
@@ -126,18 +200,21 @@
 
 	for (i = 0; i < flash_ctrl->flash_num_sources; i++)
 		if (flash_ctrl->flash_trigger[i])
-			led_trigger_event(flash_ctrl->flash_trigger[i],
+			cam_res_mgr_led_trigger_event(
+				flash_ctrl->flash_trigger[i],
 				LED_OFF);
 
 	for (i = 0; i < flash_ctrl->torch_num_sources; i++)
 		if (flash_ctrl->torch_trigger[i])
-			led_trigger_event(flash_ctrl->torch_trigger[i],
+			cam_res_mgr_led_trigger_event(
+				flash_ctrl->torch_trigger[i],
 				LED_OFF);
 
 	if (flash_ctrl->switch_trigger)
-		led_trigger_event(flash_ctrl->switch_trigger,
+		cam_res_mgr_led_trigger_event(flash_ctrl->switch_trigger,
 			LED_SWITCH_OFF);
 
+	flash_ctrl->flash_state = CAM_FLASH_STATE_START;
 	return 0;
 }
 
@@ -154,7 +231,8 @@
 
 	for (i = 0; i < flash_ctrl->flash_num_sources; i++)
 		if (flash_ctrl->flash_trigger[i])
-			led_trigger_event(flash_ctrl->flash_trigger[i],
+			cam_res_mgr_led_trigger_event(
+				flash_ctrl->flash_trigger[i],
 				LED_OFF);
 
 	rc = cam_flash_ops(flash_ctrl, flash_data,
@@ -178,7 +256,8 @@
 
 	for (i = 0; i < flash_ctrl->torch_num_sources; i++)
 		if (flash_ctrl->torch_trigger[i])
-			led_trigger_event(flash_ctrl->torch_trigger[i],
+			cam_res_mgr_led_trigger_event(
+				flash_ctrl->torch_trigger[i],
 				LED_OFF);
 
 	rc = cam_flash_ops(flash_ctrl, flash_data,
@@ -237,72 +316,44 @@
 			flash_data = &fctrl->nrt_info;
 			if (flash_data->opcode ==
 				CAMERA_SENSOR_FLASH_OP_FIRELOW) {
-				if (!(fctrl->is_regulator_enabled)) {
-					rc = cam_flash_prepare(fctrl,
-						CAM_FLASH_STATE_INIT);
-					if (rc) {
-						CAM_ERR(CAM_FLASH,
-							"Reg Enable Failed %d",
-							rc);
-						goto nrt_del_req;
-					}
-					fctrl->flash_state =
-						CAM_FLASH_STATE_INIT;
-					rc = cam_flash_low(fctrl, flash_data);
-					if (rc) {
-						CAM_ERR(CAM_FLASH,
-							"Torch ON failed : %d",
-							rc);
-						goto nrt_del_req;
-					}
-					fctrl->flash_state =
-						CAM_FLASH_STATE_LOW;
-				}
-			} else if (flash_data->opcode ==
-				CAMERA_SENSOR_FLASH_OP_OFF) {
-				if (fctrl->flash_state !=
-					CAM_FLASH_STATE_INIT) {
-					rc = cam_flash_off(fctrl);
-					if (rc)
-						CAM_ERR(CAM_FLASH,
-							"LED off failed: %d",
-							rc);
-				}
-
-				rc = cam_flash_prepare(fctrl,
-					CAM_FLASH_STATE_RELEASE);
+				rc = cam_flash_low(fctrl, flash_data);
 				if (rc) {
 					CAM_ERR(CAM_FLASH,
-						"Regulator Disable failed %d",
+						"Torch ON failed : %d",
 						rc);
 					goto nrt_del_req;
 				}
-
 				fctrl->flash_state =
-					CAM_FLASH_STATE_RELEASE;
-				fctrl->is_regulator_enabled = false;
+						CAM_FLASH_STATE_LOW;
+			} else if (flash_data->opcode ==
+				CAMERA_SENSOR_FLASH_OP_OFF) {
+				if (fctrl->flash_state ==
+					CAM_FLASH_STATE_LOW) {
+					rc = cam_flash_off(fctrl);
+					if (rc)
+						CAM_ERR(CAM_FLASH,
+						"LED off failed: %d",
+						rc);
+				}
 			}
 		} else if (fctrl->nrt_info.cmn_attr.cmd_type ==
 			CAMERA_SENSOR_FLASH_CMD_TYPE_RER) {
 			flash_data = &fctrl->nrt_info;
 
-			if (fctrl->flash_state != CAM_FLASH_STATE_INIT) {
+			if (fctrl->flash_state != CAM_FLASH_STATE_START) {
 				rc = cam_flash_off(fctrl);
 				if (rc) {
 					CAM_ERR(CAM_FLASH,
 						"Flash off failed: %d",
 						rc);
-				} else {
-					fctrl->flash_state =
-						CAM_FLASH_STATE_INIT;
+					goto nrt_del_req;
 				}
 			}
-
 			num_iterations = flash_data->num_iterations;
 			for (i = 0; i < num_iterations; i++) {
 				/* Turn On Torch */
 				if (fctrl->flash_state ==
-					CAM_FLASH_STATE_INIT) {
+					CAM_FLASH_STATE_START) {
 					rc = cam_flash_low(fctrl, flash_data);
 					if (rc) {
 						CAM_ERR(CAM_FLASH,
@@ -311,11 +362,12 @@
 					}
 					fctrl->flash_state =
 						CAM_FLASH_STATE_LOW;
-				}
-				usleep_range(
-				flash_data->led_on_delay_ms * 1000,
-				flash_data->led_on_delay_ms * 1000 + 100);
 
+					usleep_range(
+					flash_data->led_on_delay_ms * 1000,
+					flash_data->led_on_delay_ms * 1000 +
+						100);
+				}
 				/* Turn Off Torch */
 				rc = cam_flash_off(fctrl);
 				if (rc) {
@@ -324,7 +376,7 @@
 						rc);
 					continue;
 				}
-				fctrl->flash_state = CAM_FLASH_STATE_INIT;
+				fctrl->flash_state = CAM_FLASH_STATE_START;
 				usleep_range(
 				flash_data->led_off_delay_ms * 1000,
 				flash_data->led_off_delay_ms * 1000 + 100);
@@ -338,7 +390,7 @@
 			(flash_data->cmn_attr.is_settings_valid) &&
 			(flash_data->cmn_attr.request_id == req_id)) {
 			/* Turn On Flash */
-			if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
+			if (fctrl->flash_state == CAM_FLASH_STATE_START) {
 				rc = cam_flash_high(fctrl, flash_data);
 				if (rc) {
 					CAM_ERR(CAM_FLASH,
@@ -353,7 +405,7 @@
 			(flash_data->cmn_attr.is_settings_valid) &&
 			(flash_data->cmn_attr.request_id == req_id)) {
 			/* Turn On Torch */
-			if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
+			if (fctrl->flash_state == CAM_FLASH_STATE_START) {
 				rc = cam_flash_low(fctrl, flash_data);
 				if (rc) {
 					CAM_ERR(CAM_FLASH,
@@ -366,15 +418,13 @@
 		} else if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF) &&
 			(flash_data->cmn_attr.is_settings_valid) &&
 			(flash_data->cmn_attr.request_id == req_id)) {
-			if ((fctrl->flash_state != CAM_FLASH_STATE_RELEASE) ||
-				(fctrl->flash_state != CAM_FLASH_STATE_INIT)) {
+			if ((fctrl->flash_state == CAM_FLASH_STATE_LOW) ||
+				(fctrl->flash_state == CAM_FLASH_STATE_HIGH)) {
 				rc = cam_flash_off(fctrl);
 				if (rc) {
 					CAM_ERR(CAM_FLASH,
 						"Flash off failed %d", rc);
-				} else {
-					fctrl->flash_state =
-						CAM_FLASH_STATE_INIT;
+					goto apply_setting_err;
 				}
 			}
 		} else {
@@ -684,77 +734,14 @@
 	return 0;
 }
 
-static int cam_flash_flush_nrt(struct cam_flash_ctrl *fctrl)
+
+int cam_flash_stop_dev(struct cam_flash_ctrl *fctrl)
 {
-	int j = 0;
-	struct cam_flash_frame_setting *nrt_settings;
+	int rc = 0, i, j;
 
-	if (!fctrl)
-		return -EINVAL;
-
-	nrt_settings = &fctrl->nrt_info;
-
-	if (nrt_settings->cmn_attr.cmd_type ==
-		CAMERA_SENSOR_FLASH_CMD_TYPE_INIT) {
-		fctrl->flash_init_setting.cmn_attr.is_settings_valid = false;
-	} else if ((nrt_settings->cmn_attr.cmd_type ==
-		CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
-		(nrt_settings->cmn_attr.cmd_type ==
-		CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) {
-		fctrl->nrt_info.cmn_attr.is_settings_valid = false;
-		fctrl->nrt_info.cmn_attr.count = 0;
-		fctrl->nrt_info.num_iterations = 0;
-		fctrl->nrt_info.led_on_delay_ms = 0;
-		fctrl->nrt_info.led_off_delay_ms = 0;
-		for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
-			fctrl->nrt_info.led_current_ma[j] = 0;
-	}
-
-	return 0;
-}
-
-int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
-{
-	int rc = 0;
-	int i = 0, j = 0;
-	struct cam_flash_ctrl *fctrl = NULL;
-	int frame_offset = 0;
-
-	fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl);
-	if (!fctrl) {
-		CAM_ERR(CAM_FLASH, "Device data is NULL");
-		return -EINVAL;
-	}
-
-	if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
-	/* flush all requests*/
-		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
-			fctrl->per_frame[i].cmn_attr.request_id = 0;
-			fctrl->per_frame[i].cmn_attr.is_settings_valid = false;
-			fctrl->per_frame[i].cmn_attr.count = 0;
-			for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
-				fctrl->per_frame[i].led_current_ma[j] = 0;
-		}
-
-		rc = cam_flash_flush_nrt(fctrl);
-		if (rc)
-			CAM_ERR(CAM_FLASH, "NonRealTime flush error");
-	} else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
-	/* flush request with req_id*/
-		frame_offset = flush->req_id % MAX_PER_FRAME_ARRAY;
-		fctrl->per_frame[frame_offset].cmn_attr.request_id = 0;
-		fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
-			false;
-		fctrl->per_frame[frame_offset].cmn_attr.count = 0;
-		for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++)
-			fctrl->per_frame[frame_offset].led_current_ma[i] = 0;
-	}
-	return rc;
-}
-
-void cam_flash_shutdown(struct cam_flash_ctrl *fctrl)
-{
-	int rc, i, j;
+	if ((fctrl->flash_state == CAM_FLASH_STATE_LOW) ||
+		(fctrl->flash_state == CAM_FLASH_STATE_HIGH))
+		cam_flash_off(fctrl);
 
 	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
 		fctrl->per_frame[i].cmn_attr.request_id = 0;
@@ -764,27 +751,63 @@
 			fctrl->per_frame[i].led_current_ma[j] = 0;
 	}
 
-	cam_flash_flush_nrt(fctrl);
+	rc = cam_flash_flush_nrt(fctrl);
+	if (rc) {
+		CAM_ERR(CAM_FLASH,
+			"NonRealTime Dev flush failed rc: %d", rc);
+		return rc;
+	}
 
-	if ((fctrl->flash_state != CAM_FLASH_STATE_RELEASE) &&
+	if ((fctrl->flash_state == CAM_FLASH_STATE_START) &&
 		(fctrl->is_regulator_enabled == true)) {
-		rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_RELEASE);
+		rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_STOP);
 		if (rc)
-			CAM_ERR(CAM_FLASH, "Disable Regulator Failed ret = %d",
+			CAM_ERR(CAM_FLASH, "Disable Regulator Failed rc: %d",
 				rc);
 	}
 
-	if (fctrl->bridge_intf.device_hdl != -1) {
-		rc = cam_destroy_device_hdl(fctrl->bridge_intf.
-			device_hdl);
+	return rc;
+}
+
+int cam_flash_release_dev(struct cam_flash_ctrl *fctrl)
+{
+	int rc = 0;
+
+	if (fctrl->bridge_intf.device_hdl != 1) {
+		rc = cam_destroy_device_hdl(fctrl->bridge_intf.device_hdl);
 		if (rc)
 			CAM_ERR(CAM_FLASH,
-				"Failed in destroying the device Handle rc= %d",
+				"Failed in destroying device handle rc = %d",
 				rc);
 		fctrl->bridge_intf.device_hdl = -1;
 		fctrl->bridge_intf.link_hdl = -1;
 		fctrl->bridge_intf.session_hdl = -1;
 	}
+
+	return rc;
+}
+
+void cam_flash_shutdown(struct cam_flash_ctrl *fctrl)
+{
+	int rc;
+
+	if (fctrl->flash_state == CAM_FLASH_STATE_INIT)
+		return;
+
+	if (fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE) {
+		cam_flash_release_dev(fctrl);
+		return;
+	}
+
+	rc = cam_flash_stop_dev(fctrl);
+	if (rc)
+		CAM_ERR(CAM_FLASH, "Stop Failed rc: %d", rc);
+
+	rc = cam_flash_release_dev(fctrl);
+	if (rc)
+		CAM_ERR(CAM_FLASH, "Release failed rc: %d", rc);
+
+	fctrl->flash_state = CAM_FLASH_STATE_INIT;
 }
 
 int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply)
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
index f2a782b..d5ea04c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
@@ -30,4 +30,6 @@
 int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
 	enum cam_flash_state state);
 void cam_flash_shutdown(struct cam_flash_ctrl *flash_ctrl);
+int cam_flash_stop_dev(struct cam_flash_ctrl *flash_ctrl);
+int cam_flash_release_dev(struct cam_flash_ctrl *fctrl);
 #endif /*_CAM_FLASH_CORE_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
index 57f1f0f..2b371a3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -36,6 +36,13 @@
 		struct cam_create_dev_hdl bridge_params;
 
 		CAM_DBG(CAM_FLASH, "CAM_ACQUIRE_DEV");
+
+		if (fctrl->flash_state != CAM_FLASH_STATE_INIT) {
+			CAM_ERR(CAM_FLASH,
+				"Cannot apply Acquire dev: Prev state: %d",
+				fctrl->flash_state);
+		}
+
 		if (fctrl->bridge_intf.device_hdl != -1) {
 			CAM_ERR(CAM_FLASH, "Device is already acquired");
 			rc = -EINVAL;
@@ -70,12 +77,19 @@
 			rc = -EFAULT;
 			goto release_mutex;
 		}
-		break;
 		fctrl->flash_state = CAM_FLASH_STATE_ACQUIRE;
+		break;
 	}
 	case CAM_RELEASE_DEV: {
 		CAM_DBG(CAM_FLASH, "CAM_RELEASE_DEV");
-		if (fctrl->bridge_intf.device_hdl == -1) {
+		if (fctrl->flash_state != CAM_FLASH_STATE_ACQUIRE) {
+			CAM_WARN(CAM_FLASH,
+				"Cannot apply Release dev: Prev state:%d",
+				fctrl->flash_state);
+		}
+
+		if (fctrl->bridge_intf.device_hdl == -1 &&
+			fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE) {
 			CAM_ERR(CAM_FLASH,
 				"Invalid Handle: Link Hdl: %d device hdl: %d",
 				fctrl->bridge_intf.device_hdl,
@@ -83,16 +97,13 @@
 			rc = -EINVAL;
 			goto release_mutex;
 		}
-		rc = cam_destroy_device_hdl(fctrl->bridge_intf.device_hdl);
+		rc = cam_flash_release_dev(fctrl);
 		if (rc)
 			CAM_ERR(CAM_FLASH,
 				"Failed in destroying the device Handle rc= %d",
 				rc);
-		fctrl->bridge_intf.device_hdl = -1;
-		fctrl->bridge_intf.link_hdl = -1;
-		fctrl->bridge_intf.session_hdl = -1;
+		fctrl->flash_state = CAM_FLASH_STATE_INIT;
 		break;
-		fctrl->flash_state = CAM_FLASH_STATE_RELEASE;
 	}
 	case CAM_QUERY_CAP: {
 		struct cam_flash_query_cap_info flash_cap = {0};
@@ -120,29 +131,38 @@
 	}
 	case CAM_START_DEV: {
 		CAM_DBG(CAM_FLASH, "CAM_START_DEV");
-		rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_INIT);
+		if (fctrl->flash_state != CAM_FLASH_STATE_ACQUIRE) {
+			CAM_WARN(CAM_FLASH,
+				"Cannot apply Start Dev: Prev state: %d",
+				fctrl->flash_state);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_START);
 		if (rc) {
 			CAM_ERR(CAM_FLASH,
 				"Enable Regulator Failed rc = %d", rc);
 			goto release_mutex;
 		}
-		fctrl->flash_state = CAM_FLASH_STATE_INIT;
 		rc = cam_flash_apply_setting(fctrl, 0);
 		if (rc) {
 			CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc);
 			goto release_mutex;
 		}
-		fctrl->flash_state = CAM_FLASH_STATE_INIT;
+		fctrl->flash_state = CAM_FLASH_STATE_START;
 		break;
 	}
 	case CAM_STOP_DEV: {
-		CAM_DBG(CAM_FLASH, "CAM_STOP_DEV");
-		if (fctrl->flash_state != CAM_FLASH_STATE_INIT)
-			cam_flash_off(fctrl);
+		if (fctrl->flash_state != CAM_FLASH_STATE_START) {
+			CAM_WARN(CAM_FLASH,
+				"Cannot apply Stop dev: Prev state is: %d",
+				fctrl->flash_state);
+		}
 
-		rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_RELEASE);
+		rc = cam_flash_stop_dev(fctrl);
 		if (rc) {
-			CAM_ERR(CAM_FLASH, "Disable Regulator Failed ret = %d",
+			CAM_ERR(CAM_FLASH, "Stop Dev Failed rc = %d",
 				rc);
 			goto release_mutex;
 		}
@@ -344,6 +364,7 @@
 	mutex_init(&(flash_ctrl->flash_mutex));
 	mutex_init(&(flash_ctrl->flash_wq_mutex));
 
+	flash_ctrl->flash_state = CAM_FLASH_STATE_INIT;
 	CAM_DBG(CAM_FLASH, "Probe success");
 	return rc;
 free_resource:
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
index 1583c27..bacf088 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
@@ -50,9 +50,10 @@
 enum cam_flash_state {
 	CAM_FLASH_STATE_INIT,
 	CAM_FLASH_STATE_ACQUIRE,
+	CAM_FLASH_STATE_START,
 	CAM_FLASH_STATE_LOW,
 	CAM_FLASH_STATE_HIGH,
-	CAM_FLASH_STATE_RELEASE,
+	CAM_FLASH_STATE_STOP,
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
index a9ab169..a195762 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
@@ -13,6 +13,7 @@
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 #include "cam_flash_soc.h"
+#include "cam_res_mgr_api.h"
 
 static int32_t cam_get_source_node_info(
 	struct device_node *of_node,
@@ -38,7 +39,7 @@
 		} else {
 			CAM_DBG(CAM_FLASH, "switch trigger %s",
 				soc_private->switch_trigger_name);
-			led_trigger_register_simple(
+			cam_res_mgr_led_trigger_register(
 				soc_private->switch_trigger_name,
 				&fctrl->switch_trigger);
 		}
@@ -111,7 +112,7 @@
 			CAM_DBG(CAM_FLASH, "max_current[%d]: %d",
 				i, soc_private->flash_max_current[i]);
 
-			led_trigger_register_simple(
+			cam_res_mgr_led_trigger_register(
 				soc_private->flash_trigger_name[i],
 				&fctrl->flash_trigger[i]);
 		}
@@ -172,7 +173,7 @@
 			CAM_DBG(CAM_FLASH, "max_current[%d]: %d",
 				i, soc_private->torch_max_current[i]);
 
-			led_trigger_register_simple(
+			cam_res_mgr_led_trigger_register(
 				soc_private->torch_trigger_name[i],
 				&fctrl->torch_trigger[i]);
 		}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
index ec1d2fd..9397c68 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
@@ -1,6 +1,7 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
index 2a877fd..d825f5e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -17,6 +17,46 @@
 #include "cam_ois_soc.h"
 #include "cam_sensor_util.h"
 #include "cam_debug_util.h"
+#include "cam_res_mgr_api.h"
+
+int32_t cam_ois_construct_default_power_setting(
+	struct cam_sensor_power_ctrl_t *power_info)
+{
+	int rc = 0;
+
+	power_info->power_setting_size = 1;
+	power_info->power_setting =
+		(struct cam_sensor_power_setting *)
+		kzalloc(sizeof(struct cam_sensor_power_setting),
+			GFP_KERNEL);
+	if (!power_info->power_setting)
+		return -ENOMEM;
+
+	power_info->power_setting[0].seq_type = SENSOR_VAF;
+	power_info->power_setting[0].seq_val = CAM_VAF;
+	power_info->power_setting[0].config_val = 1;
+
+	power_info->power_down_setting_size = 1;
+	power_info->power_down_setting =
+		(struct cam_sensor_power_setting *)
+		kzalloc(sizeof(struct cam_sensor_power_setting),
+			GFP_KERNEL);
+	if (!power_info->power_down_setting) {
+		rc = -ENOMEM;
+		goto free_power_settings;
+	}
+
+	power_info->power_setting[0].seq_type = SENSOR_VAF;
+	power_info->power_setting[0].seq_val = CAM_VAF;
+	power_info->power_setting[0].config_val = 0;
+
+	return rc;
+
+free_power_settings:
+	kfree(power_info->power_setting);
+	return rc;
+}
+
 
 /**
  * cam_ois_get_dev_handle - get device handle
@@ -60,87 +100,88 @@
 	return 0;
 }
 
-static int cam_ois_vreg_control(struct cam_ois_ctrl_t *o_ctrl,
-	int config)
-{
-	int rc = 0, cnt;
-	struct cam_hw_soc_info  *soc_info;
-
-	soc_info = &o_ctrl->soc_info;
-	cnt = soc_info->num_rgltr;
-
-	if (!cnt)
-		return 0;
-
-	if (cnt >= CAM_SOC_MAX_REGULATOR) {
-		CAM_ERR(CAM_OIS, "Regulators more than supported %d", cnt);
-		return -EINVAL;
-	}
-
-	if (config) {
-		rc = cam_soc_util_request_platform_resource(soc_info,
-			NULL, NULL);
-		rc = cam_soc_util_enable_platform_resource(soc_info, false, 0,
-			false);
-	} else {
-		rc = cam_soc_util_disable_platform_resource(soc_info, false,
-			false);
-		rc = cam_soc_util_release_platform_resource(soc_info);
-	}
-
-	return rc;
-}
-
 static int cam_ois_power_up(struct cam_ois_ctrl_t *o_ctrl)
 {
-	int rc = 0;
-	struct cam_hw_soc_info  *soc_info =
+	int                             rc = 0;
+	struct cam_hw_soc_info          *soc_info =
 		&o_ctrl->soc_info;
-	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+	struct cam_ois_soc_private *soc_private;
+	struct cam_sensor_power_ctrl_t  *power_info;
 
-	rc = cam_ois_vreg_control(o_ctrl, 1);
-	if (rc < 0) {
-		CAM_ERR(CAM_OIS, "OIS Reg Failed %d", rc);
+	soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
+	/* Parse and fill vreg params for power up settings */
+	rc = msm_camera_fill_vreg_params(
+		&o_ctrl->soc_info,
+		power_info->power_setting,
+		power_info->power_setting_size);
+	if (rc) {
+		CAM_ERR(CAM_OIS,
+			"failed to fill vreg params for power up rc:%d", rc);
 		return rc;
 	}
 
-	gpio_num_info = o_ctrl->gpio_num_info;
+	/* Parse and fill vreg params for power down settings*/
+	rc = msm_camera_fill_vreg_params(
+		&o_ctrl->soc_info,
+		power_info->power_down_setting,
+		power_info->power_down_setting_size);
+	if (rc) {
+		CAM_ERR(CAM_OIS,
+			"failed to fill vreg params power down rc:%d", rc);
+		return rc;
+	}
 
-	if (soc_info->gpio_data &&
-		gpio_num_info &&
-		gpio_num_info->valid[SENSOR_VAF] == 1) {
-		gpio_set_value_cansleep(
-			gpio_num_info->gpio_num[SENSOR_VAF],
-			1);
+	power_info->dev = soc_info->dev;
+
+	rc = cam_sensor_core_power_up(power_info, soc_info);
+	if (rc) {
+		CAM_ERR(CAM_OIS, "failed in ois power up rc %d", rc);
+		return rc;
 	}
 
 	/* VREG needs some delay to power up */
 	usleep_range(2000, 2050);
 
+	rc = camera_io_init(&o_ctrl->io_master_info);
+	if (rc)
+		CAM_ERR(CAM_OIS, "cci_init failed: rc: %d", rc);
+
 	return rc;
 }
 
 static int cam_ois_power_down(struct cam_ois_ctrl_t *o_ctrl)
 {
-	int32_t rc = 0;
-	struct cam_hw_soc_info *soc_info =
+	int32_t                         rc = 0;
+	struct cam_sensor_power_ctrl_t  *power_info;
+	struct cam_hw_soc_info          *soc_info =
 		&o_ctrl->soc_info;
-	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+	struct cam_ois_soc_private *soc_private;
 
-	gpio_num_info = o_ctrl->gpio_num_info;
-
-	if (soc_info->gpio_data &&
-		gpio_num_info &&
-		gpio_num_info->valid[SENSOR_VAF] == 1) {
-
-		gpio_set_value_cansleep(
-			gpio_num_info->gpio_num[SENSOR_VAF],
-			GPIOF_OUT_INIT_LOW);
+	if (!o_ctrl) {
+		CAM_ERR(CAM_OIS, "failed: o_ctrl %pK", o_ctrl);
+		return -EINVAL;
 	}
 
-	rc = cam_ois_vreg_control(o_ctrl, 0);
-	if (rc < 0)
-		CAM_ERR(CAM_OIS, "Disable regualtor Failed %d", rc);
+	soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+	soc_info = &o_ctrl->soc_info;
+
+	if (!power_info) {
+		CAM_ERR(CAM_OIS, "failed: power_info %pK", power_info);
+		return -EINVAL;
+	}
+
+	rc = msm_camera_power_down(power_info, soc_info);
+	if (rc) {
+		CAM_ERR(CAM_OIS, "power down the core is failed:%d", rc);
+		return rc;
+	}
+
+	camera_io_release(&o_ctrl->io_master_info);
 
 	return rc;
 }
@@ -436,6 +477,42 @@
 				return rc;
 			}
 		}
+
+		if (o_ctrl->ois_fw_flag) {
+			rc = cam_ois_fw_download(o_ctrl);
+			if (rc) {
+				CAM_ERR(CAM_OIS, "Failed OIS FW Download");
+				goto pwr_dwn;
+			}
+		}
+
+		rc = cam_ois_apply_settings(o_ctrl, &o_ctrl->i2c_init_data);
+		if (rc < 0) {
+			CAM_ERR(CAM_OIS, "Cannot apply Init settings");
+			goto pwr_dwn;
+		}
+
+		if (o_ctrl->is_ois_calib) {
+			rc = cam_ois_apply_settings(o_ctrl,
+				&o_ctrl->i2c_calib_data);
+			if (rc) {
+				CAM_ERR(CAM_OIS, "Cannot apply calib data");
+				goto pwr_dwn;
+			}
+		}
+
+		rc = delete_request(&o_ctrl->i2c_init_data);
+		if (rc < 0) {
+			CAM_WARN(CAM_OIS,
+				"Fail deleting Init data: rc: %d", rc);
+			rc = 0;
+		}
+		rc = delete_request(&o_ctrl->i2c_calib_data);
+		if (rc < 0) {
+			CAM_WARN(CAM_OIS,
+				"Fail deleting Calibration data: rc: %d", rc);
+			rc = 0;
+		}
 		break;
 	case CAM_OIS_PACKET_OPCODE_OIS_CONTROL:
 		offset = (uint32_t *)&csl_packet->payload;
@@ -452,13 +529,23 @@
 		}
 
 		rc = cam_ois_apply_settings(o_ctrl, i2c_reg_settings);
-		if (rc < 0)
+		if (rc < 0) {
 			CAM_ERR(CAM_OIS, "Cannot apply mode settings");
+			return rc;
+		}
+
+		rc = delete_request(i2c_reg_settings);
+		if (rc < 0)
+			CAM_ERR(CAM_OIS,
+				"Fail deleting Mode data: rc: %d", rc);
 		break;
 	default:
 		break;
 	}
 	return rc;
+pwr_dwn:
+	cam_ois_power_down(o_ctrl);
+	return rc;
 }
 
 void cam_ois_shutdown(struct cam_ois_ctrl_t *o_ctrl)
@@ -468,17 +555,12 @@
 	if (o_ctrl->cam_ois_state == CAM_OIS_INIT)
 		return;
 
-	if (o_ctrl->cam_ois_state == CAM_OIS_START) {
-		rc = camera_io_release(&o_ctrl->io_master_info);
-		if (rc < 0)
-			CAM_ERR(CAM_OIS, "Failed in releasing CCI");
+	if ((o_ctrl->cam_ois_state == CAM_OIS_START) ||
+		(o_ctrl->cam_ois_state == CAM_OIS_ACQUIRE)) {
 		rc = cam_ois_power_down(o_ctrl);
 		if (rc < 0)
 			CAM_ERR(CAM_OIS, "OIS Power down failed");
-		o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
-	}
 
-	if (o_ctrl->cam_ois_state == CAM_OIS_ACQUIRE) {
 		rc = cam_destroy_device_hdl(o_ctrl->bridge_intf.device_hdl);
 		if (rc < 0)
 			CAM_ERR(CAM_OIS, "destroying the device hdl");
@@ -517,7 +599,7 @@
 			&ois_cap,
 			sizeof(struct cam_ois_query_cap_t))) {
 			CAM_ERR(CAM_OIS, "Failed Copy to User");
-			return -EFAULT;
+			rc = -EFAULT;
 			goto release_mutex;
 		}
 		CAM_DBG(CAM_OIS, "ois_cap: ID: %d", ois_cap.slot_info);
@@ -528,41 +610,22 @@
 			CAM_ERR(CAM_OIS, "Failed to acquire dev");
 			goto release_mutex;
 		}
-		o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
-		break;
-	case CAM_START_DEV:
+
 		rc = cam_ois_power_up(o_ctrl);
 		if (rc) {
 			CAM_ERR(CAM_OIS, " OIS Power up failed");
 			goto release_mutex;
 		}
-		rc = camera_io_init(&o_ctrl->io_master_info);
-		if (rc) {
-			CAM_ERR(CAM_OIS, "cci_init failed");
-			goto pwr_dwn;
-		}
 
-		if (o_ctrl->ois_fw_flag) {
-			rc = cam_ois_fw_download(o_ctrl);
-			if (rc) {
-				CAM_ERR(CAM_OIS, "Failed OIS FW Download");
-				goto pwr_dwn;
-			}
-		}
-
-		rc = cam_ois_apply_settings(o_ctrl, &o_ctrl->i2c_init_data);
-		if (rc < 0) {
-			CAM_ERR(CAM_OIS, "Cannot apply Init settings");
-			goto pwr_dwn;
-		}
-
-		if (o_ctrl->is_ois_calib) {
-			rc = cam_ois_apply_settings(o_ctrl,
-				&o_ctrl->i2c_calib_data);
-			if (rc) {
-				CAM_ERR(CAM_OIS, "Cannot apply calib data");
-				goto pwr_dwn;
-			}
+		o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
+		break;
+	case CAM_START_DEV:
+		if (o_ctrl->cam_ois_state != CAM_OIS_ACQUIRE) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_OIS,
+			"Not in right state for start : %d",
+			o_ctrl->cam_ois_state);
+			goto release_mutex;
 		}
 		o_ctrl->cam_ois_state = CAM_OIS_START;
 		break;
@@ -574,6 +637,20 @@
 		}
 		break;
 	case CAM_RELEASE_DEV:
+		if (o_ctrl->cam_ois_state != CAM_OIS_ACQUIRE) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_OIS,
+			"Not in right state for release : %d",
+			o_ctrl->cam_ois_state);
+			goto release_mutex;
+		}
+
+		rc = cam_ois_power_down(o_ctrl);
+		if (rc < 0) {
+			CAM_ERR(CAM_OIS, "OIS Power down failed");
+			goto release_mutex;
+		}
+
 		if (o_ctrl->bridge_intf.device_hdl == -1) {
 			CAM_ERR(CAM_OIS, "link hdl: %d device hdl: %d",
 				o_ctrl->bridge_intf.device_hdl,
@@ -590,13 +667,11 @@
 		o_ctrl->cam_ois_state = CAM_OIS_INIT;
 		break;
 	case CAM_STOP_DEV:
-		rc = camera_io_release(&o_ctrl->io_master_info);
-		if (rc < 0)
-			CAM_ERR(CAM_OIS, "Failed in releasing CCI");
-		rc = cam_ois_power_down(o_ctrl);
-		if (rc < 0) {
-			CAM_ERR(CAM_OIS, "OIS Power down failed");
-			goto release_mutex;
+		if (o_ctrl->cam_ois_state != CAM_OIS_START) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_OIS,
+			"Not in right state for stop : %d",
+			o_ctrl->cam_ois_state);
 		}
 		o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
 		break;
@@ -604,10 +679,7 @@
 		CAM_ERR(CAM_OIS, "invalid opcode");
 		goto release_mutex;
 	}
-pwr_dwn:
-	cam_ois_power_down(o_ctrl);
 release_mutex:
 	mutex_unlock(&(o_ctrl->ois_mutex));
-
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.h
index 6f81d09..516ac88 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.h
@@ -14,6 +14,17 @@
 
 #include "cam_ois_dev.h"
 
+/**
+ * @power_info: power setting info to control the power
+ *
+ * This API construct the default ois power setting.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int32_t cam_ois_construct_default_power_setting(
+	struct cam_sensor_power_ctrl_t *power_info);
+
+
 int cam_ois_driver_cmd(struct cam_ois_ctrl_t *e_ctrl, void *arg);
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
index 2629180..9eca4c7 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
@@ -203,10 +203,21 @@
 	rc = cam_ois_init_subdev_param(o_ctrl);
 	if (rc)
 		goto octrl_free;
+
+	rc = cam_ois_construct_default_power_setting(
+		&soc_private->power_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_OIS,
+			"Construct default ois power setting failed.");
+		goto unreg_subdev;
+	}
+
 	o_ctrl->cam_ois_state = CAM_OIS_INIT;
 
 	return rc;
 
+unreg_subdev:
+	cam_unregister_subdev(&(o_ctrl->v4l2_dev_str));
 octrl_free:
 	kfree(o_ctrl);
 probe_failure:
@@ -215,13 +226,21 @@
 
 static int cam_ois_i2c_driver_remove(struct i2c_client *client)
 {
-	struct cam_ois_ctrl_t       *o_ctrl = i2c_get_clientdata(client);
+	struct cam_ois_ctrl_t          *o_ctrl = i2c_get_clientdata(client);
+	struct cam_ois_soc_private     *soc_private;
+	struct cam_sensor_power_ctrl_t *power_info;
 
 	if (!o_ctrl) {
 		CAM_ERR(CAM_OIS, "ois device is NULL");
 		return -EINVAL;
 	}
 
+	soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
 	kfree(o_ctrl->soc_info.soc_private);
 	kfree(o_ctrl);
 
@@ -280,9 +299,19 @@
 	}
 	o_ctrl->bridge_intf.device_hdl = -1;
 
+	rc = cam_ois_construct_default_power_setting(
+		&soc_private->power_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_OIS,
+			"Construct default ois power setting failed.");
+		goto unreg_subdev;
+	}
+
 	platform_set_drvdata(pdev, o_ctrl);
 	v4l2_set_subdevdata(&o_ctrl->v4l2_dev_str.sd, o_ctrl);
 
+	o_ctrl->cam_ois_state = CAM_OIS_INIT;
+
 	return rc;
 unreg_subdev:
 	cam_unregister_subdev(&(o_ctrl->v4l2_dev_str));
@@ -297,7 +326,9 @@
 
 static int cam_ois_platform_driver_remove(struct platform_device *pdev)
 {
-	struct cam_ois_ctrl_t  *o_ctrl;
+	struct cam_ois_ctrl_t          *o_ctrl;
+	struct cam_ois_soc_private     *soc_private;
+	struct cam_sensor_power_ctrl_t *power_info;
 
 	o_ctrl = platform_get_drvdata(pdev);
 	if (!o_ctrl) {
@@ -305,6 +336,12 @@
 		return -EINVAL;
 	}
 
+	soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
 	kfree(o_ctrl->soc_info.soc_private);
 	kfree(o_ctrl->io_master_info.cci_client);
 	kfree(o_ctrl);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
index e341bb7..80f1e84 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
@@ -35,7 +35,6 @@
 	CAM_OIS_INIT,
 	CAM_OIS_ACQUIRE,
 	CAM_OIS_START,
-	CAM_OIS_RELEASE,
 };
 
 /**
@@ -94,7 +93,6 @@
  * @ois_mutex       :   ois mutex
  * @soc_info        :   ois soc related info
  * @io_master_info  :   Information about the communication master
- * @gpio_num_info   :   gpio info
  * @cci_i2c_master  :   I2C structure
  * @v4l2_dev_str    :   V4L2 device structure
  * @bridge_intf     :   bridge interface params
@@ -115,7 +113,6 @@
 	struct mutex ois_mutex;
 	struct cam_hw_soc_info soc_info;
 	struct camera_io_master io_master_info;
-	struct msm_camera_gpio_num_info *gpio_num_info;
 	enum cci_i2c_master_t cci_i2c_master;
 	struct cam_subdev v4l2_dev_str;
 	struct cam_ois_intf_params bridge_intf;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/Makefile
new file mode 100644
index 0000000..516faf5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_res_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c
new file mode 100644
index 0000000..d588b24
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c
@@ -0,0 +1,710 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include "cam_debug_util.h"
+#include "cam_res_mgr_api.h"
+#include "cam_res_mgr_private.h"
+
+static struct cam_res_mgr *cam_res;
+
+static void cam_res_mgr_free_res(void)
+{
+	struct cam_dev_res *dev_res, *dev_temp;
+	struct cam_gpio_res *gpio_res, *gpio_temp;
+	struct cam_flash_res *flash_res, *flash_temp;
+
+	if (!cam_res)
+		return;
+
+	mutex_lock(&cam_res->gpio_res_lock);
+	list_for_each_entry_safe(gpio_res, gpio_temp,
+		&cam_res->gpio_res_list, list) {
+		list_for_each_entry_safe(dev_res, dev_temp,
+			&gpio_res->dev_list, list) {
+			list_del_init(&dev_res->list);
+			kfree(dev_res);
+		}
+		list_del_init(&gpio_res->list);
+		kfree(gpio_res);
+	}
+	mutex_unlock(&cam_res->gpio_res_lock);
+
+	mutex_lock(&cam_res->flash_res_lock);
+	list_for_each_entry_safe(flash_res, flash_temp,
+		&cam_res->flash_res_list, list) {
+		list_del_init(&flash_res->list);
+		kfree(flash_res);
+	}
+	mutex_unlock(&cam_res->flash_res_lock);
+}
+
+void cam_res_mgr_led_trigger_register(const char *name, struct led_trigger **tp)
+{
+	bool found = false;
+	struct cam_flash_res *flash_res;
+
+	if (!cam_res) {
+		/*
+		 * If this driver not probed, then just register the
+		 * led trigger.
+		 */
+		led_trigger_register_simple(name, tp);
+		return;
+	}
+
+	mutex_lock(&cam_res->flash_res_lock);
+	list_for_each_entry(flash_res, &cam_res->flash_res_list, list) {
+		if (!strcmp(flash_res->name, name)) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&cam_res->flash_res_lock);
+
+	if (found) {
+		*tp = flash_res->trigger;
+	} else {
+		flash_res = kzalloc(sizeof(struct cam_flash_res), GFP_KERNEL);
+		if (!flash_res) {
+			CAM_ERR(CAM_RES,
+				"Failed to malloc memory for flash_res:%s",
+				name);
+			*tp = NULL;
+			return;
+		}
+
+		led_trigger_register_simple(name, tp);
+		INIT_LIST_HEAD(&flash_res->list);
+		flash_res->trigger = *tp;
+		flash_res->name = name;
+
+		mutex_lock(&cam_res->flash_res_lock);
+		list_add_tail(&flash_res->list, &cam_res->flash_res_list);
+		mutex_unlock(&cam_res->flash_res_lock);
+	}
+}
+EXPORT_SYMBOL(cam_res_mgr_led_trigger_register);
+
+void cam_res_mgr_led_trigger_unregister(struct led_trigger *tp)
+{
+	bool found = false;
+	struct cam_flash_res *flash_res;
+
+	if (!cam_res) {
+		/*
+		 * If this driver not probed, then just unregister the
+		 * led trigger.
+		 */
+		led_trigger_unregister_simple(tp);
+		return;
+	}
+
+	mutex_lock(&cam_res->flash_res_lock);
+	list_for_each_entry(flash_res, &cam_res->flash_res_list, list) {
+		if (flash_res->trigger == tp) {
+			found = true;
+			break;
+		}
+	}
+
+	if (found) {
+		led_trigger_unregister_simple(tp);
+		list_del_init(&flash_res->list);
+		kfree(flash_res);
+	}
+	mutex_unlock(&cam_res->flash_res_lock);
+}
+EXPORT_SYMBOL(cam_res_mgr_led_trigger_unregister);
+
+void cam_res_mgr_led_trigger_event(struct led_trigger *trig,
+	enum led_brightness brightness)
+{
+	bool found = false;
+	struct cam_flash_res *flash_res;
+
+	if (!cam_res) {
+		/*
+		 * If this driver not probed, then just trigger
+		 * the led event.
+		 */
+		led_trigger_event(trig, brightness);
+		return;
+	}
+
+	mutex_lock(&cam_res->flash_res_lock);
+	list_for_each_entry(flash_res, &cam_res->flash_res_list, list) {
+		if (flash_res->trigger == trig) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&cam_res->flash_res_lock);
+
+	if (found)
+		led_trigger_event(trig, brightness);
+}
+EXPORT_SYMBOL(cam_res_mgr_led_trigger_event);
+
+int cam_res_mgr_shared_pinctrl_init(void)
+{
+	struct cam_soc_pinctrl_info *pinctrl_info;
+
+	/*
+	 * We allow the cam_res is NULL or shared_gpio_enabled
+	 * is false, it means this driver no probed or doesn't
+	 * have shared gpio in this device.
+	 */
+	if (!cam_res || !cam_res->shared_gpio_enabled) {
+		CAM_DBG(CAM_RES, "Not support shared gpio.");
+		return 0;
+	}
+
+	if (cam_res->pstatus != PINCTRL_STATUS_PUT) {
+		CAM_DBG(CAM_RES, "The shared pinctrl already been got.");
+		return 0;
+	}
+
+	pinctrl_info = &cam_res->dt.pinctrl_info;
+
+	pinctrl_info->pinctrl =
+		devm_pinctrl_get(cam_res->dev);
+	if (IS_ERR_OR_NULL(pinctrl_info->pinctrl)) {
+		CAM_ERR(CAM_RES, "Pinctrl not available");
+		cam_res->shared_gpio_enabled = false;
+		return -EINVAL;
+	}
+
+	pinctrl_info->gpio_state_active =
+		pinctrl_lookup_state(pinctrl_info->pinctrl,
+			CAM_RES_MGR_DEFAULT);
+	if (IS_ERR_OR_NULL(pinctrl_info->gpio_state_active)) {
+		CAM_ERR(CAM_RES,
+			"Failed to get the active state pinctrl handle");
+		cam_res->shared_gpio_enabled = false;
+		return -EINVAL;
+	}
+
+	pinctrl_info->gpio_state_suspend =
+		pinctrl_lookup_state(pinctrl_info->pinctrl,
+			CAM_RES_MGR_SLEEP);
+	if (IS_ERR_OR_NULL(pinctrl_info->gpio_state_suspend)) {
+		CAM_ERR(CAM_RES,
+			"Failed to get the active state pinctrl handle");
+		cam_res->shared_gpio_enabled = false;
+		return -EINVAL;
+	}
+
+	mutex_lock(&cam_res->gpio_res_lock);
+	cam_res->pstatus = PINCTRL_STATUS_GOT;
+	mutex_unlock(&cam_res->gpio_res_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_init);
+
+static bool cam_res_mgr_shared_pinctrl_check_hold(void)
+{
+	int index = 0;
+	int dev_num = 0;
+	bool hold = false;
+	struct list_head *list;
+	struct cam_gpio_res *gpio_res;
+	struct cam_res_mgr_dt *dt = &cam_res->dt;
+
+	for (; index < dt->num_shared_gpio; index++) {
+		list_for_each_entry(gpio_res,
+			&cam_res->gpio_res_list, list) {
+
+			if (gpio_res->gpio ==
+				dt->shared_gpio[index]) {
+				list_for_each(list, &gpio_res->dev_list)
+					dev_num++;
+
+				if (dev_num >= 2) {
+					hold = true;
+					break;
+				}
+			}
+		}
+	}
+
+	return hold;
+}
+
+void cam_res_mgr_shared_pinctrl_put(void)
+{
+	struct cam_soc_pinctrl_info *pinctrl_info;
+
+	if (!cam_res || !cam_res->shared_gpio_enabled) {
+		CAM_DBG(CAM_RES, "Not support shared gpio.");
+		return;
+	}
+
+	mutex_lock(&cam_res->gpio_res_lock);
+	if (cam_res->pstatus == PINCTRL_STATUS_PUT) {
+		CAM_DBG(CAM_RES, "The shared pinctrl already been put");
+		return;
+	}
+
+	if (cam_res_mgr_shared_pinctrl_check_hold()) {
+		CAM_INFO(CAM_RES, "Need hold put this pinctrl");
+		return;
+	}
+
+	pinctrl_info = &cam_res->dt.pinctrl_info;
+
+	devm_pinctrl_put(pinctrl_info->pinctrl);
+
+	cam_res->pstatus = PINCTRL_STATUS_PUT;
+	mutex_unlock(&cam_res->gpio_res_lock);
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_put);
+
+int cam_res_mgr_shared_pinctrl_select_state(bool active)
+{
+	int rc = 0;
+	struct cam_soc_pinctrl_info *pinctrl_info;
+
+	if (!cam_res || !cam_res->shared_gpio_enabled) {
+		CAM_DBG(CAM_RES, "Not support shared gpio.");
+		return 0;
+	}
+
+	mutex_lock(&cam_res->gpio_res_lock);
+	if (cam_res->pstatus == PINCTRL_STATUS_PUT) {
+		CAM_DBG(CAM_RES, "The shared pinctrl alerady been put.!");
+		mutex_unlock(&cam_res->gpio_res_lock);
+		return 0;
+	}
+
+	pinctrl_info = &cam_res->dt.pinctrl_info;
+
+	if (active && (cam_res->pstatus != PINCTRL_STATUS_ACTIVE)) {
+		rc = pinctrl_select_state(pinctrl_info->pinctrl,
+			pinctrl_info->gpio_state_active);
+		cam_res->pstatus = PINCTRL_STATUS_ACTIVE;
+	} else if (!active &&
+		!cam_res_mgr_shared_pinctrl_check_hold()) {
+		rc = pinctrl_select_state(pinctrl_info->pinctrl,
+			pinctrl_info->gpio_state_suspend);
+		cam_res->pstatus = PINCTRL_STATUS_SUSPEND;
+	}
+	mutex_unlock(&cam_res->gpio_res_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_select_state);
+
+int cam_res_mgr_shared_pinctrl_post_init(void)
+{
+	int ret = 0;
+	struct cam_soc_pinctrl_info *pinctrl_info;
+
+	if (!cam_res || !cam_res->shared_gpio_enabled) {
+		CAM_DBG(CAM_RES, "Not support shared gpio.");
+		return ret;
+	}
+
+	mutex_lock(&cam_res->gpio_res_lock);
+	if (cam_res->pstatus == PINCTRL_STATUS_PUT) {
+		CAM_DBG(CAM_RES, "The shared pinctrl alerady been put.!");
+		mutex_unlock(&cam_res->gpio_res_lock);
+		return ret;
+	}
+
+	pinctrl_info = &cam_res->dt.pinctrl_info;
+
+	/*
+	 * If no gpio resource in gpio_res_list, it means
+	 * this device don't have shared gpio
+	 */
+	if (list_empty(&cam_res->gpio_res_list)) {
+		ret = pinctrl_select_state(pinctrl_info->pinctrl,
+			pinctrl_info->gpio_state_suspend);
+		devm_pinctrl_put(pinctrl_info->pinctrl);
+		cam_res->pstatus = PINCTRL_STATUS_PUT;
+	}
+	mutex_unlock(&cam_res->gpio_res_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_post_init);
+
+static int cam_res_mgr_add_device(struct device *dev,
+	struct cam_gpio_res *gpio_res)
+{
+	struct cam_dev_res *dev_res = NULL;
+
+	dev_res = kzalloc(sizeof(struct cam_dev_res), GFP_KERNEL);
+	if (!dev_res)
+		return -ENOMEM;
+
+	dev_res->dev = dev;
+	INIT_LIST_HEAD(&dev_res->list);
+
+	list_add_tail(&dev_res->list, &gpio_res->dev_list);
+
+	return 0;
+}
+
+static bool cam_res_mgr_gpio_is_shared(uint gpio)
+{
+	int index = 0;
+	bool found = false;
+	struct cam_res_mgr_dt *dt = &cam_res->dt;
+
+	for (; index < dt->num_shared_gpio; index++) {
+		if (gpio == dt->shared_gpio[index]) {
+			found = true;
+			break;
+		}
+	}
+
+	return found;
+}
+
+int cam_res_mgr_gpio_request(struct device *dev, uint gpio,
+		unsigned long flags, const char *label)
+{
+	int rc = 0;
+	bool found = false;
+	struct cam_gpio_res *gpio_res = NULL;
+
+	if (cam_res && cam_res->shared_gpio_enabled) {
+		mutex_lock(&cam_res->gpio_res_lock);
+		list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) {
+			if (gpio == gpio_res->gpio) {
+				found = true;
+				break;
+			}
+		}
+		mutex_unlock(&cam_res->gpio_res_lock);
+	}
+
+	/*
+	 * found equal to false has two situation:
+	 * 1. shared gpio not enabled
+	 * 2. shared gpio enabled, but not find this gpio
+	 *    from the gpio_res_list
+	 * These two situation both need request gpio.
+	 */
+	if (!found) {
+		rc = gpio_request_one(gpio, flags, label);
+		if (rc) {
+			CAM_ERR(CAM_RES, "gpio %d:%s request fails",
+				gpio, label);
+			return rc;
+		}
+	}
+
+	/*
+	 * If the gpio is in the shared list, and not find
+	 * from gpio_res_list, then insert a cam_gpio_res
+	 * to gpio_res_list.
+	 */
+	if (!found && cam_res
+		&& cam_res->shared_gpio_enabled &&
+		cam_res_mgr_gpio_is_shared(gpio)) {
+
+		gpio_res = kzalloc(sizeof(struct cam_gpio_res), GFP_KERNEL);
+		if (!gpio_res)
+			return -ENOMEM;
+
+		gpio_res->gpio = gpio;
+		gpio_res->power_on_count = 0;
+		INIT_LIST_HEAD(&gpio_res->list);
+		INIT_LIST_HEAD(&gpio_res->dev_list);
+
+		rc = cam_res_mgr_add_device(dev, gpio_res);
+		if (rc) {
+			kfree(gpio_res);
+			return rc;
+		}
+
+		mutex_lock(&cam_res->gpio_res_lock);
+		list_add_tail(&gpio_res->list, &cam_res->gpio_res_list);
+		mutex_unlock(&cam_res->gpio_res_lock);
+	}
+
+	if (found && cam_res
+		&& cam_res->shared_gpio_enabled) {
+		struct cam_dev_res *dev_res = NULL;
+
+		found = 0;
+		mutex_lock(&cam_res->gpio_res_lock);
+		list_for_each_entry(dev_res, &gpio_res->dev_list, list) {
+			if (dev_res->dev == dev) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (!found)
+			rc = cam_res_mgr_add_device(dev, gpio_res);
+
+		mutex_unlock(&cam_res->gpio_res_lock);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_res_mgr_gpio_request);
+
+static void cam_res_mgr_gpio_free(struct device *dev, uint gpio)
+{
+	bool found = false;
+	bool need_free = true;
+	int dev_num =  0;
+	struct cam_gpio_res *gpio_res = NULL;
+
+	if (cam_res && cam_res->shared_gpio_enabled) {
+		mutex_lock(&cam_res->gpio_res_lock);
+		list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) {
+			if (gpio == gpio_res->gpio) {
+				found = true;
+				break;
+			}
+		}
+		mutex_unlock(&cam_res->gpio_res_lock);
+	}
+
+	if (found && cam_res
+		&& cam_res->shared_gpio_enabled) {
+		struct list_head *list;
+		struct cam_dev_res *dev_res = NULL;
+
+		mutex_lock(&cam_res->gpio_res_lock);
+		/* Count the dev number in the dev_list */
+		list_for_each(list, &gpio_res->dev_list)
+			dev_num++;
+
+		/*
+		 * Need free the gpio if only has last 1 device
+		 * in the dev_list, otherwise, not free this
+		 * gpio.
+		 */
+		if (dev_num == 1) {
+			dev_res = list_first_entry(&gpio_res->dev_list,
+				struct cam_dev_res, list);
+			list_del_init(&dev_res->list);
+			kfree(dev_res);
+
+			list_del_init(&gpio_res->list);
+			kfree(gpio_res);
+		} else {
+			list_for_each_entry(dev_res,
+				&gpio_res->dev_list, list) {
+				if (dev_res->dev == dev) {
+					list_del_init(&dev_res->list);
+					kfree(dev_res);
+					need_free = false;
+					break;
+				}
+			}
+		}
+		mutex_unlock(&cam_res->gpio_res_lock);
+	}
+
+	if (need_free)
+		gpio_free(gpio);
+}
+
+void cam_res_mgr_gpio_free_arry(struct device *dev,
+		const struct gpio *array, size_t num)
+{
+	while (num--)
+		cam_res_mgr_gpio_free(dev, (array[num]).gpio);
+}
+EXPORT_SYMBOL(cam_res_mgr_gpio_free_arry);
+
+int cam_res_mgr_gpio_set_value(unsigned int gpio, int value)
+{
+	int rc = 0;
+	bool found = false;
+	struct cam_gpio_res *gpio_res = NULL;
+
+	if (cam_res && cam_res->shared_gpio_enabled) {
+		mutex_lock(&cam_res->gpio_res_lock);
+		list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) {
+			if (gpio == gpio_res->gpio) {
+				found = true;
+				break;
+			}
+		}
+		mutex_unlock(&cam_res->gpio_res_lock);
+	}
+
+	/*
+	 * Set the value directly if can't find the gpio from
+	 * gpio_res_list, otherwise, need add ref count support
+	 **/
+	if (!found) {
+		gpio_set_value_cansleep(gpio, value);
+	} else {
+		if (value) {
+			gpio_res->power_on_count++;
+			if (gpio_res->power_on_count < 2) {
+				gpio_set_value_cansleep(gpio, value);
+				CAM_DBG(CAM_RES,
+					"Shared GPIO(%d) : HIGH", gpio);
+			}
+		} else {
+			gpio_res->power_on_count--;
+			if (gpio_res->power_on_count < 1) {
+				gpio_set_value_cansleep(gpio, value);
+				CAM_DBG(CAM_RES,
+					"Shared GPIO(%d) : LOW", gpio);
+			}
+		}
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_res_mgr_gpio_set_value);
+
+static int cam_res_mgr_parse_dt(struct device *dev)
+{
+	int rc = 0;
+	struct device_node *of_node = NULL;
+	struct cam_res_mgr_dt *dt = &cam_res->dt;
+
+	of_node = dev->of_node;
+
+	dt->num_shared_gpio = of_property_count_u32_elems(of_node,
+		"shared-gpios");
+
+	if (dt->num_shared_gpio > MAX_SHARED_GPIO_SIZE ||
+		dt->num_shared_gpio <= 0) {
+		/*
+		 * Not really an error, it means dtsi not configure
+		 * the shared gpio.
+		 */
+		CAM_DBG(CAM_RES, "Invalid GPIO number %d. No shared gpio.",
+			dt->num_shared_gpio);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of_node, "shared-gpios",
+		dt->shared_gpio, dt->num_shared_gpio);
+	if (rc) {
+		CAM_ERR(CAM_RES, "Get shared gpio array failed.");
+		return -EINVAL;
+	}
+
+	dt->pinctrl_info.pinctrl = devm_pinctrl_get(dev);
+	if (IS_ERR_OR_NULL(dt->pinctrl_info.pinctrl)) {
+		CAM_ERR(CAM_RES, "Pinctrl not available");
+		return -EINVAL;
+	}
+
+	/*
+	 * Check the pinctrl state to make sure the gpio
+	 * shared enabled.
+	 */
+	dt->pinctrl_info.gpio_state_active =
+		pinctrl_lookup_state(dt->pinctrl_info.pinctrl,
+			CAM_RES_MGR_DEFAULT);
+	if (IS_ERR_OR_NULL(dt->pinctrl_info.gpio_state_active)) {
+		CAM_ERR(CAM_RES,
+			"Failed to get the active state pinctrl handle");
+		return -EINVAL;
+	}
+
+	dt->pinctrl_info.gpio_state_suspend =
+		pinctrl_lookup_state(dt->pinctrl_info.pinctrl,
+			CAM_RES_MGR_SLEEP);
+	if (IS_ERR_OR_NULL(dt->pinctrl_info.gpio_state_suspend)) {
+		CAM_ERR(CAM_RES,
+			"Failed to get the active state pinctrl handle");
+		return -EINVAL;
+	}
+
+	devm_pinctrl_put(dt->pinctrl_info.pinctrl);
+
+	return rc;
+}
+
+static int cam_res_mgr_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	cam_res = kzalloc(sizeof(*cam_res), GFP_KERNEL);
+	if (!cam_res)
+		return -ENOMEM;
+
+	cam_res->dev = &pdev->dev;
+	mutex_init(&cam_res->flash_res_lock);
+	mutex_init(&cam_res->gpio_res_lock);
+
+	rc = cam_res_mgr_parse_dt(&pdev->dev);
+	if (rc) {
+		CAM_INFO(CAM_RES, "Disable shared gpio support.");
+		cam_res->shared_gpio_enabled = false;
+	} else {
+		CAM_INFO(CAM_RES, "Enable shared gpio support.");
+		cam_res->shared_gpio_enabled = true;
+	}
+
+	cam_res->pstatus = PINCTRL_STATUS_PUT;
+
+	INIT_LIST_HEAD(&cam_res->gpio_res_list);
+	INIT_LIST_HEAD(&cam_res->flash_res_list);
+
+	return 0;
+}
+
+static int cam_res_mgr_remove(struct platform_device *pdev)
+{
+	if (cam_res) {
+		cam_res_mgr_free_res();
+		kfree(cam_res);
+		cam_res = NULL;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id cam_res_mgr_dt_match[] = {
+	{.compatible = "qcom,cam-res-mgr"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_res_mgr_dt_match);
+
+static struct platform_driver cam_res_mgr_driver = {
+	.probe = cam_res_mgr_probe,
+	.remove = cam_res_mgr_remove,
+	.driver = {
+		.name = "cam_res_mgr",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_res_mgr_dt_match,
+	},
+};
+
+static int __init cam_res_mgr_init(void)
+{
+	return platform_driver_register(&cam_res_mgr_driver);
+}
+
+static void __exit cam_res_mgr_exit(void)
+{
+	platform_driver_unregister(&cam_res_mgr_driver);
+}
+
+module_init(cam_res_mgr_init);
+module_exit(cam_res_mgr_exit);
+MODULE_DESCRIPTION("Camera resource manager driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h
new file mode 100644
index 0000000..1c4c6c8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h
@@ -0,0 +1,137 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_RES_MGR_API_H__
+#define __CAM_RES_MGR_API_H__
+
+#include <linux/leds.h>
+
+/**
+ * @brief: Register the led trigger
+ *
+ *  The newly registered led trigger is assigned to flash_res_list.
+ *
+ * @name  : Pointer to int led trigger name
+ * @tp    : Save the returned led trigger
+ *
+ * @return None
+ */
+void cam_res_mgr_led_trigger_register(const char *name,
+	struct led_trigger **tp);
+
+/**
+ * @brief: Unregister the led trigger
+ *
+ *  Free the flash_res if this led trigger isn't used by other device .
+ *
+ * @tp : Pointer to the led trigger
+ *
+ * @return None
+ */
+void cam_res_mgr_led_trigger_unregister(struct led_trigger *tp);
+
+/**
+ * @brief: Trigger the event to led core
+ *
+ * @trig       : Pointer to the led trigger
+ * @brightness : The brightness need to fire
+ *
+ * @return None
+ */
+void cam_res_mgr_led_trigger_event(struct led_trigger *trig,
+	enum led_brightness brightness);
+
+/**
+ * @brief: Get the corresponding pinctrl of dev
+ *
+ *  Init the shared pinctrl if shared pinctrl enabled.
+ *
+ * @return None
+ */
+int cam_res_mgr_shared_pinctrl_init(void);
+
+/**
+ * @brief: Put the pinctrl
+ *
+ *  Put the shared pinctrl.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+void cam_res_mgr_shared_pinctrl_put(void);
+
+/**
+ * @brief: Select the corresponding state
+ *
+ *  Active state can be selected directly, but need hold to suspend the
+ *  pinctrl if the gpios in this pinctrl also held by other pinctrl.
+ *
+ * @active   : The flag to indicate whether active or suspend
+ * the shared pinctrl.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_res_mgr_shared_pinctrl_select_state(bool active);
+
+/**
+ * @brief: Post init shared pinctrl
+ *
+ *  Post init to check if the device really has shared gpio,
+ *  suspend and put the pinctrl if not use shared gpio.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_res_mgr_shared_pinctrl_post_init(void);
+
+/**
+ * @brief: Request a gpio
+ *
+ *  Will alloc a gpio_res for the new gpio, other find the corresponding
+ *  gpio_res.
+ *
+ * @dev   : Pointer to the device
+ * @gpio  : The GPIO number
+ * @flags : GPIO configuration as specified by GPIOF_*
+ * @label : A literal description string of this GPIO
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_res_mgr_gpio_request(struct device *dev, unsigned int gpio,
+		unsigned long flags, const char *label);
+
+/**
+ * @brief: Free a array GPIO
+ *
+ *  Free the GPIOs and release corresponding gpio_res.
+ *
+ * @dev   : Pointer to the device
+ * @gpio  : Array of the GPIO number
+ * @num   : The number of gpio
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+void cam_res_mgr_gpio_free_arry(struct device *dev,
+	const struct gpio *array, size_t num);
+
+/**
+ * @brief: Set GPIO power level
+ *
+ *  Add ref count support for shared GPIOs.
+ *
+ * @gpio   : The GPIO number
+ * @value  : The power level need to setup
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ * -EINVAL will be returned if the gpio can't be found in gpio_res_list.
+ */
+int cam_res_mgr_gpio_set_value(unsigned int gpio, int value);
+
+#endif /* __CAM_RES_MGR_API_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h
new file mode 100644
index 0000000..4d46c8e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h
@@ -0,0 +1,113 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_RES_MGR_PRIVATE_H__
+#define __CAM_RES_MGR_PRIVATE_H__
+
+#include <linux/list.h>
+#include <linux/leds.h>
+#include "cam_soc_util.h"
+
+#define MAX_SHARED_GPIO_SIZE 16
+
+/* pinctrl states name */
+#define CAM_RES_MGR_SLEEP	"cam_res_mgr_suspend"
+#define CAM_RES_MGR_DEFAULT	"cam_res_mgr_default"
+
+/**
+ * enum pinctrl_status - Enum for pinctrl status
+ */
+enum pinctrl_status {
+	PINCTRL_STATUS_GOT = 0,
+	PINCTRL_STATUS_ACTIVE,
+	PINCTRL_STATUS_SUSPEND,
+	PINCTRL_STATUS_PUT,
+};
+
+/**
+ * struct cam_dev_res
+ *
+ * @list : List member used to append this node to a dev list
+ * @dev  : Device pointer associated with device
+ */
+struct cam_dev_res {
+	struct list_head list;
+	struct device    *dev;
+};
+
+/**
+ * struct cam_gpio_res
+ *
+ * @list           : List member used to append this node to a gpio list
+ * @dev_list       : List the device which request this gpio
+ * @gpio           : Gpio value
+ * @power_on_count : Record the power on times of this gpio
+ */
+struct cam_gpio_res {
+	struct list_head list;
+	struct list_head dev_list;
+	unsigned int     gpio;
+	int              power_on_count;
+};
+
+/**
+ * struct cam_pinctrl_res
+ *
+ * @list           : List member used to append this node to a linked list
+ * @name           : Pointer to the flash trigger's name.
+ * @trigger        : Pointer to the flash trigger
+ */
+struct cam_flash_res {
+	struct list_head   list;
+	const char         *name;
+	struct led_trigger *trigger;
+};
+
+/**
+ * struct cam_res_mgr_dt
+ *
+ * @shared_gpio     : Shared gpios list in the device tree
+ * @num_shared_gpio : The number of shared gpio
+ * @pinctrl_info    : Pinctrl information
+ */
+struct cam_res_mgr_dt {
+	uint                        shared_gpio[MAX_SHARED_GPIO_SIZE];
+	int                         num_shared_gpio;
+	struct cam_soc_pinctrl_info pinctrl_info;
+};
+
+/**
+ * struct cam_pinctrl_res
+ *
+ * @dev                 : Pointer to the device
+ * @dt                  : Device tree resource
+ * @shared_gpio_enabled : The flag to indicate if support shared gpio
+ * @pstatus             : Shared pinctrl status
+ * @gpio_res_list       : List head of the gpio resource
+ * @flash_res_list      : List head of the flash resource
+ * @gpio_res_lock       : GPIO resource lock
+ * @flash_res_lock      : Flash resource lock
+ */
+struct cam_res_mgr {
+	struct device         *dev;
+	struct cam_res_mgr_dt dt;
+
+	bool                  shared_gpio_enabled;
+	enum pinctrl_status   pstatus;
+
+	struct list_head      gpio_res_list;
+	struct list_head      flash_res_list;
+	struct mutex          gpio_res_lock;
+	struct mutex          flash_res_lock;
+};
+
+#endif /* __CAM_RES_MGR_PRIVATE_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index c3ca7d3..97158e4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -182,12 +182,7 @@
 				return rc;
 			}
 		}
-
-		i2c_reg_settings->request_id =
-			csl_packet->header.request_id;
-		i2c_reg_settings->is_settings_valid = 1;
-		cam_sensor_update_req_mgr(s_ctrl, csl_packet);
-		break;
+	break;
 	}
 	case CAM_SENSOR_PACKET_OPCODE_SENSOR_NOP: {
 		cam_sensor_update_req_mgr(s_ctrl, csl_packet);
@@ -207,6 +202,14 @@
 		CAM_ERR(CAM_SENSOR, "Fail parsing I2C Pkt: %d", rc);
 		return rc;
 	}
+
+	if ((csl_packet->header.op_code & 0xFFFFFF) ==
+		CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE) {
+		i2c_reg_settings->request_id =
+			csl_packet->header.request_id;
+		cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+	}
+
 	return rc;
 }
 
@@ -473,12 +476,9 @@
 
 	cam_sensor_release_resource(s_ctrl);
 
-	if (s_ctrl->sensor_state == CAM_SENSOR_START) {
+	if ((s_ctrl->sensor_state == CAM_SENSOR_START) ||
+		(s_ctrl->sensor_state == CAM_SENSOR_ACQUIRE)) {
 		cam_sensor_power_down(s_ctrl);
-		s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
-	}
-
-	if (s_ctrl->sensor_state == CAM_SENSOR_ACQUIRE) {
 		rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
 		if (rc < 0)
 			CAM_ERR(CAM_SENSOR, " failed destroying dhdl");
@@ -686,9 +686,31 @@
 			rc = -EFAULT;
 			goto release_mutex;
 		}
+
+		rc = cam_sensor_power_up(s_ctrl);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "Sensor Power up failed");
+			goto release_mutex;
+		}
+
+		s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
 	}
 		break;
 	case CAM_RELEASE_DEV: {
+		if (s_ctrl->sensor_state != CAM_SENSOR_ACQUIRE) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_SENSOR,
+			"Not in right state to release : %d",
+			s_ctrl->sensor_state);
+			goto release_mutex;
+		}
+
+		rc = cam_sensor_power_down(s_ctrl);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "Sensor Power Down failed");
+			goto release_mutex;
+		}
+
 		cam_sensor_release_resource(s_ctrl);
 		if (s_ctrl->bridge_intf.device_hdl == -1) {
 			CAM_ERR(CAM_SENSOR,
@@ -705,6 +727,8 @@
 		s_ctrl->bridge_intf.device_hdl = -1;
 		s_ctrl->bridge_intf.link_hdl = -1;
 		s_ctrl->bridge_intf.session_hdl = -1;
+
+		s_ctrl->sensor_state = CAM_SENSOR_INIT;
 	}
 		break;
 	case CAM_QUERY_CAP: {
@@ -720,6 +744,14 @@
 		break;
 	}
 	case CAM_START_DEV: {
+		if (s_ctrl->sensor_state != CAM_SENSOR_ACQUIRE) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_SENSOR,
+			"Not in right state to start : %d",
+			s_ctrl->sensor_state);
+			goto release_mutex;
+		}
+
 		if (s_ctrl->i2c_data.streamon_settings.is_settings_valid &&
 			(s_ctrl->i2c_data.streamon_settings.request_id == 0)) {
 			rc = cam_sensor_apply_settings(s_ctrl, 0,
@@ -734,6 +766,14 @@
 	}
 		break;
 	case CAM_STOP_DEV: {
+		if (s_ctrl->sensor_state != CAM_SENSOR_START) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_SENSOR,
+			"Not in right state to stop : %d",
+			s_ctrl->sensor_state);
+			goto release_mutex;
+		}
+
 		if (s_ctrl->i2c_data.streamoff_settings.is_settings_valid &&
 			(s_ctrl->i2c_data.streamoff_settings.request_id == 0)) {
 			rc = cam_sensor_apply_settings(s_ctrl, 0,
@@ -743,11 +783,7 @@
 				"cannot apply streamoff settings");
 			}
 		}
-		rc = cam_sensor_power_down(s_ctrl);
-		if (rc < 0) {
-			CAM_ERR(CAM_SENSOR, "Sensor Power Down failed");
-			goto release_mutex;
-		}
+		s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
 	}
 		break;
 	case CAM_CONFIG_DEV: {
@@ -758,11 +794,7 @@
 		}
 		if (s_ctrl->i2c_data.init_settings.is_settings_valid &&
 			(s_ctrl->i2c_data.init_settings.request_id == 0)) {
-			rc = cam_sensor_power_up(s_ctrl);
-			if (rc < 0) {
-				CAM_ERR(CAM_SENSOR, "Sensor Power up failed");
-				goto release_mutex;
-			}
+
 			rc = cam_sensor_apply_settings(s_ctrl, 0,
 				CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG);
 			if (rc < 0) {
@@ -889,13 +921,9 @@
 		return rc;
 	}
 
-	if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
-		rc = camera_io_init(&(s_ctrl->io_master_info));
-		if (rc < 0) {
-			CAM_ERR(CAM_SENSOR, "cci_init failed");
-			return -EINVAL;
-		}
-	}
+	rc = camera_io_init(&(s_ctrl->io_master_info));
+	if (rc < 0)
+		CAM_ERR(CAM_SENSOR, "cci_init failed: rc: %d", rc);
 
 	return rc;
 }
@@ -924,8 +952,7 @@
 		return rc;
 	}
 
-	if (s_ctrl->io_master_info.master_type == CCI_MASTER)
-		camera_io_release(&(s_ctrl->io_master_info));
+	camera_io_release(&(s_ctrl->io_master_info));
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
index bf61fb3..98ee3ae 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
@@ -2,6 +2,7 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
 
 obj-$(CONFIG_SPECTRA_CAMERA) +=  cam_sensor_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 85d7b74..b3de092 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include "cam_sensor_util.h"
 #include <cam_mem_mgr.h>
+#include "cam_res_mgr_api.h"
 
 #define CAM_SENSOR_PINCTRL_STATE_SLEEP "cam_suspend"
 #define CAM_SENSOR_PINCTRL_STATE_DEFAULT "cam_default"
@@ -614,7 +615,8 @@
 
 	if (gpio_en) {
 		for (i = 0; i < size; i++) {
-			rc = gpio_request_one(gpio_tbl[i].gpio,
+			rc = cam_res_mgr_gpio_request(soc_info->dev,
+					gpio_tbl[i].gpio,
 					gpio_tbl[i].flags, gpio_tbl[i].label);
 			if (rc) {
 				/*
@@ -627,7 +629,7 @@
 			}
 		}
 	} else {
-		gpio_free_array(gpio_tbl, size);
+		cam_res_mgr_gpio_free_arry(soc_info->dev, gpio_tbl, size);
 	}
 
 	return rc;
@@ -1157,7 +1159,7 @@
 
 	sensor_pctrl->pinctrl = devm_pinctrl_get(dev);
 	if (IS_ERR_OR_NULL(sensor_pctrl->pinctrl)) {
-		CAM_ERR(CAM_SENSOR, "Getting pinctrl handle failed");
+		CAM_DBG(CAM_SENSOR, "Getting pinctrl handle failed");
 		return -EINVAL;
 	}
 	sensor_pctrl->gpio_state_active =
@@ -1176,8 +1178,16 @@
 			"Failed to get the suspend state pinctrl handle");
 		return -EINVAL;
 	}
+
+	if (cam_res_mgr_shared_pinctrl_init()) {
+		CAM_ERR(CAM_SENSOR,
+			"Failed to init shared pinctrl");
+		return -EINVAL;
+	}
+
 	return 0;
 }
+
 int msm_cam_sensor_handle_reg_gpio(int seq_type,
 	struct msm_camera_gpio_num_info *gpio_num_info, int val)
 {
@@ -1195,7 +1205,7 @@
 	if (gpio_num_info->valid[gpio_offset] == 1) {
 		CAM_DBG(CAM_SENSOR, "VALID GPIO offset: %d, seqtype: %d",
 			 gpio_offset, seq_type);
-		gpio_set_value_cansleep(
+		cam_res_mgr_gpio_set_value(
 			gpio_num_info->gpio_num
 			[gpio_offset], val);
 	}
@@ -1227,7 +1237,8 @@
 
 	ret = msm_camera_pinctrl_init(&(ctrl->pinctrl_info), ctrl->dev);
 	if (ret < 0) {
-		CAM_ERR(CAM_SENSOR, "Initialization of pinctrl failed");
+		/* Some sensor subdev no pinctrl. */
+		CAM_DBG(CAM_SENSOR, "Initialization of pinctrl failed");
 		ctrl->cam_pinctrl_status = 0;
 	} else {
 		ctrl->cam_pinctrl_status = 1;
@@ -1238,14 +1249,25 @@
 		no_gpio = rc;
 
 	if (ctrl->cam_pinctrl_status) {
-		ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+		ret = pinctrl_select_state(
+			ctrl->pinctrl_info.pinctrl,
 			ctrl->pinctrl_info.gpio_state_active);
 		if (ret)
 			CAM_ERR(CAM_SENSOR, "cannot set pin to active state");
+
+		ret = cam_res_mgr_shared_pinctrl_select_state(true);
+		if (ret)
+			CAM_ERR(CAM_SENSOR,
+				"Cannot set shared pin to active state");
+
+		ret = cam_res_mgr_shared_pinctrl_post_init();
+		if (ret)
+			CAM_ERR(CAM_SENSOR,
+				"Failed to post init shared pinctrl");
 	}
 
 	for (index = 0; index < ctrl->power_setting_size; index++) {
-		CAM_DBG(CAM_SENSOR, "index: %d",  index);
+		CAM_DBG(CAM_SENSOR, "index: %d", index);
 		power_setting = &ctrl->power_setting[index];
 		CAM_DBG(CAM_SENSOR, "seq_type %d", power_setting->seq_type);
 
@@ -1423,7 +1445,7 @@
 			if (!gpio_num_info->valid
 				[power_setting->seq_type])
 				continue;
-			gpio_set_value_cansleep(
+			cam_res_mgr_gpio_set_value(
 				gpio_num_info->gpio_num
 				[power_setting->seq_type], GPIOF_OUT_INIT_LOW);
 			break;
@@ -1471,11 +1493,14 @@
 		}
 	}
 	if (ctrl->cam_pinctrl_status) {
-		ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+		ret = pinctrl_select_state(
+				ctrl->pinctrl_info.pinctrl,
 				ctrl->pinctrl_info.gpio_state_suspend);
 		if (ret)
 			CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
-		devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+		cam_res_mgr_shared_pinctrl_select_state(false);
+		pinctrl_put(ctrl->pinctrl_info.pinctrl);
+		cam_res_mgr_shared_pinctrl_put();
 	}
 	ctrl->cam_pinctrl_status = 0;
 
@@ -1599,7 +1624,7 @@
 			if (!gpio_num_info->valid[pd->seq_type])
 				continue;
 
-			gpio_set_value_cansleep(
+			cam_res_mgr_gpio_set_value(
 				gpio_num_info->gpio_num
 				[pd->seq_type],
 				(int) pd->config_val);
@@ -1662,11 +1687,15 @@
 	}
 
 	if (ctrl->cam_pinctrl_status) {
-		ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+		ret = pinctrl_select_state(
+				ctrl->pinctrl_info.pinctrl,
 				ctrl->pinctrl_info.gpio_state_suspend);
 		if (ret)
 			CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
-		devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+
+		cam_res_mgr_shared_pinctrl_select_state(false);
+		pinctrl_put(ctrl->pinctrl_info.pinctrl);
+		cam_res_mgr_shared_pinctrl_put();
 	}
 
 	ctrl->cam_pinctrl_status = 0;
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index 607087f..7824102 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -73,7 +73,7 @@
 
 enum cam_smmu_buf_state {
 	CAM_SMMU_BUFF_EXIST,
-	CAM_SMMU_BUFF_NOT_EXIST
+	CAM_SMMU_BUFF_NOT_EXIST,
 };
 
 enum cam_smmu_init_dir {
@@ -120,6 +120,7 @@
 	struct secheap_buf_info secheap_buf;
 
 	struct list_head smmu_buf_list;
+	struct list_head smmu_buf_kernel_list;
 	struct mutex lock;
 	int handle;
 	enum cam_smmu_ops_param state;
@@ -189,6 +190,9 @@
 static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
 	int ion_fd);
 
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_dma_buf(int idx,
+	struct dma_buf *buf);
+
 static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx,
 	int ion_fd);
 
@@ -208,7 +212,11 @@
 
 static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
 	enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
-	size_t *len_ptr,
+	size_t *len_ptr, enum cam_smmu_region_id region_id);
+
+static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
+	struct dma_buf *buf, enum dma_data_direction dma_dir,
+	dma_addr_t *paddr_ptr, size_t *len_ptr,
 	enum cam_smmu_region_id region_id);
 
 static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
@@ -224,9 +232,13 @@
 	struct cam_dma_buff_info *mapping_info,
 	int idx);
 
-static void cam_smmu_clean_buffer_list(int idx);
+static void cam_smmu_clean_user_buffer_list(int idx);
 
-static void cam_smmu_print_list(int idx);
+static void cam_smmu_clean_kernel_buffer_list(int idx);
+
+static void cam_smmu_print_user_list(int idx);
+
+static void cam_smmu_print_kernel_list(int idx);
 
 static void cam_smmu_print_table(void);
 
@@ -271,7 +283,7 @@
 	kfree(payload);
 }
 
-static void cam_smmu_print_list(int idx)
+static void cam_smmu_print_user_list(int idx)
 {
 	struct cam_dma_buff_info *mapping;
 
@@ -286,6 +298,21 @@
 	}
 }
 
+static void cam_smmu_print_kernel_list(int idx)
+{
+	struct cam_dma_buff_info *mapping;
+
+	CAM_ERR(CAM_SMMU, "index = %d", idx);
+	list_for_each_entry(mapping,
+		&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) {
+		CAM_ERR(CAM_SMMU,
+			"dma_buf = %pK, paddr= 0x%pK, len = %u, region = %d",
+			 mapping->buf, (void *)mapping->paddr,
+			 (unsigned int)mapping->len,
+			 mapping->region_id);
+	}
+}
+
 static void cam_smmu_print_table(void)
 {
 	int i;
@@ -489,6 +516,7 @@
 	for (i = 0; i < iommu_cb_set.cb_num; i++) {
 		iommu_cb_set.cb_info[i].handle = HANDLE_INIT;
 		INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_list);
+		INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_kernel_list);
 		iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH;
 		iommu_cb_set.cb_info[i].dev = NULL;
 		iommu_cb_set.cb_info[i].cb_count = 0;
@@ -719,7 +747,13 @@
 {
 	struct cam_dma_buff_info *mapping;
 
-	list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+	if (ion_fd < 0) {
+		CAM_ERR(CAM_SMMU, "Invalid fd %d", ion_fd);
+		return NULL;
+	}
+
+	list_for_each_entry(mapping,
+			&iommu_cb_set.cb_info[idx].smmu_buf_list,
 			list) {
 		if (mapping->ion_fd == ion_fd) {
 			CAM_DBG(CAM_SMMU, "find ion_fd %d", ion_fd);
@@ -727,8 +761,31 @@
 		}
 	}
 
-	CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d by index %d",
-		ion_fd, idx);
+	CAM_ERR(CAM_SMMU, "Error: Cannot find entry by index %d", idx);
+
+	return NULL;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_dma_buf(int idx,
+	struct dma_buf *buf)
+{
+	struct cam_dma_buff_info *mapping;
+
+	if (!buf) {
+		CAM_ERR(CAM_SMMU, "Invalid dma_buf");
+		return NULL;
+	}
+
+	list_for_each_entry(mapping,
+			&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list,
+			list) {
+		if (mapping->buf == buf) {
+			CAM_DBG(CAM_SMMU, "find dma_buf %pK", buf);
+			return mapping;
+		}
+	}
+
+	CAM_ERR(CAM_SMMU, "Error: Cannot find entry by index %d", idx);
 
 	return NULL;
 }
@@ -750,7 +807,7 @@
 	return NULL;
 }
 
-static void cam_smmu_clean_buffer_list(int idx)
+static void cam_smmu_clean_user_buffer_list(int idx)
 {
 	int ret;
 	struct cam_dma_buff_info *mapping_info, *temp;
@@ -787,6 +844,40 @@
 	}
 }
 
+static void cam_smmu_clean_kernel_buffer_list(int idx)
+{
+	int ret;
+	struct cam_dma_buff_info *mapping_info, *temp;
+
+	list_for_each_entry_safe(mapping_info, temp,
+			&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) {
+		CAM_DBG(CAM_SMMU,
+			"Free mapping address %pK, i = %d, dma_buf = %pK",
+			(void *)mapping_info->paddr, idx,
+			mapping_info->buf);
+
+		/* Clean up regular mapped buffers */
+		ret = cam_smmu_unmap_buf_and_remove_from_list(
+				mapping_info,
+				idx);
+
+		if (ret < 0) {
+			CAM_ERR(CAM_SMMU,
+				"Buffer delete in kernel list failed: idx = %d",
+				idx);
+			CAM_ERR(CAM_SMMU,
+				"Buffer delete failed: addr = %lx, dma_buf = %pK",
+				(unsigned long)mapping_info->paddr,
+				mapping_info->buf);
+			/*
+			 * Ignore this error and continue to delete other
+			 * buffers in the list
+			 */
+			continue;
+		}
+	}
+}
+
 static int cam_smmu_attach(int idx)
 {
 	int ret;
@@ -1157,7 +1248,7 @@
 EXPORT_SYMBOL(cam_smmu_get_region_info);
 
 int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
-	int ion_fd,
+	struct dma_buf *buf,
 	dma_addr_t *iova,
 	size_t *request_len)
 {
@@ -1190,21 +1281,20 @@
 		return rc;
 	}
 
-	secheap_buf = &iommu_cb_set.cb_info[idx].secheap_buf;
-	secheap_buf->buf = dma_buf_get(ion_fd);
-	if (IS_ERR_OR_NULL(secheap_buf->buf)) {
-		rc = PTR_ERR(secheap_buf->buf);
-		CAM_ERR(CAM_SMMU, "Error: dma get buf failed. fd = %d", ion_fd);
-		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	if (IS_ERR_OR_NULL(buf)) {
+		rc = PTR_ERR(buf);
+		CAM_ERR(CAM_SMMU,
+			"Error: dma get buf failed. rc = %d", rc);
 		goto err_out;
 	}
 
+	secheap_buf = &iommu_cb_set.cb_info[idx].secheap_buf;
+	secheap_buf->buf = buf;
 	secheap_buf->attach = dma_buf_attach(secheap_buf->buf,
 		iommu_cb_set.cb_info[idx].dev);
 	if (IS_ERR_OR_NULL(secheap_buf->attach)) {
 		rc = PTR_ERR(secheap_buf->attach);
 		CAM_ERR(CAM_SMMU, "Error: dma buf attach failed");
-		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 		goto err_put;
 	}
 
@@ -1213,7 +1303,6 @@
 	if (IS_ERR_OR_NULL(secheap_buf->table)) {
 		rc = PTR_ERR(secheap_buf->table);
 		CAM_ERR(CAM_SMMU, "Error: dma buf map attachment failed");
-		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 		goto err_detach;
 	}
 
@@ -1226,7 +1315,6 @@
 		IOMMU_READ | IOMMU_WRITE);
 	if (size != sec_heap_iova_len) {
 		CAM_ERR(CAM_SMMU, "IOMMU mapping failed");
-		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 		goto err_unmap_sg;
 	}
 
@@ -1247,6 +1335,7 @@
 err_put:
 	dma_buf_put(secheap_buf->buf);
 err_out:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 	return rc;
 }
 EXPORT_SYMBOL(cam_smmu_reserve_sec_heap);
@@ -1303,25 +1392,28 @@
 }
 EXPORT_SYMBOL(cam_smmu_release_sec_heap);
 
-static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
-	 enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
-	 size_t *len_ptr,
-	 enum cam_smmu_region_id region_id)
+static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
+	int idx, enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr, enum cam_smmu_region_id region_id,
+	struct cam_dma_buff_info **mapping_info)
 {
-	int rc = -1;
-	struct cam_dma_buff_info *mapping_info;
-	struct dma_buf *buf = NULL;
 	struct dma_buf_attachment *attach = NULL;
 	struct sg_table *table = NULL;
 	struct iommu_domain *domain;
 	size_t size = 0;
 	uint32_t iova = 0;
+	int rc = 0;
 
-	/* allocate memory for each buffer information */
-	buf = dma_buf_get(ion_fd);
 	if (IS_ERR_OR_NULL(buf)) {
 		rc = PTR_ERR(buf);
-		CAM_ERR(CAM_SMMU, "Error: dma get buf failed. fd = %d", ion_fd);
+		CAM_ERR(CAM_SMMU,
+			"Error: dma get buf failed. rc = %d", rc);
+		goto err_out;
+	}
+
+	if (!mapping_info) {
+		rc = -EINVAL;
+		CAM_ERR(CAM_SMMU, "Error: mapping_info is invalid");
 		goto err_out;
 	}
 
@@ -1356,18 +1448,13 @@
 			goto err_unmap_sg;
 		}
 
-		size = iommu_map_sg(domain,
-			iova,
-			table->sgl,
-			table->nents,
-			IOMMU_READ | IOMMU_WRITE);
+		size = iommu_map_sg(domain, iova, table->sgl, table->nents,
+				IOMMU_READ | IOMMU_WRITE);
 
 		if (size < 0) {
 			CAM_ERR(CAM_SMMU, "IOMMU mapping failed");
 			rc = cam_smmu_free_iova(iova,
-				size,
-				iommu_cb_set.cb_info[idx].handle);
-
+				size, iommu_cb_set.cb_info[idx].handle);
 			if (rc)
 				CAM_ERR(CAM_SMMU, "IOVA free failed");
 			rc = -ENOMEM;
@@ -1379,7 +1466,7 @@
 		}
 	} else if (region_id == CAM_SMMU_REGION_IO) {
 		rc = msm_dma_map_sg_lazy(iommu_cb_set.cb_info[idx].dev,
-			table->sgl, table->nents, dma_dir, buf);
+		table->sgl, table->nents, dma_dir, buf);
 
 		if (rc != table->nents) {
 			CAM_ERR(CAM_SMMU, "Error: msm_dma_map_sg_lazy failed");
@@ -1411,33 +1498,31 @@
 	}
 
 	/* fill up mapping_info */
-	mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
-	if (!mapping_info) {
+	*mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+	if (!(*mapping_info)) {
 		rc = -ENOSPC;
 		goto err_alloc;
 	}
-	mapping_info->ion_fd = ion_fd;
-	mapping_info->buf = buf;
-	mapping_info->attach = attach;
-	mapping_info->table = table;
-	mapping_info->paddr = *paddr_ptr;
-	mapping_info->len = *len_ptr;
-	mapping_info->dir = dma_dir;
-	mapping_info->ref_count = 1;
-	mapping_info->region_id = region_id;
+
+	(*mapping_info)->buf = buf;
+	(*mapping_info)->attach = attach;
+	(*mapping_info)->table = table;
+	(*mapping_info)->paddr = *paddr_ptr;
+	(*mapping_info)->len = *len_ptr;
+	(*mapping_info)->dir = dma_dir;
+	(*mapping_info)->ref_count = 1;
+	(*mapping_info)->region_id = region_id;
 
 	if (!*paddr_ptr || !*len_ptr) {
 		CAM_ERR(CAM_SMMU, "Error: Space Allocation failed");
-		kfree(mapping_info);
+		kfree(*mapping_info);
 		rc = -ENOSPC;
 		goto err_alloc;
 	}
-	CAM_DBG(CAM_SMMU, "ion_fd = %d, dev = %pK, paddr= %pK, len = %u",
-		ion_fd, (void *)iommu_cb_set.cb_info[idx].dev,
+	CAM_DBG(CAM_SMMU, "dma_buf = %pK, dev = %pK, paddr= %pK, len = %u",
+		buf, (void *)iommu_cb_set.cb_info[idx].dev,
 		(void *)*paddr_ptr, (unsigned int)*len_ptr);
 
-	/* add to the list */
-	list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
 	return 0;
 
 err_alloc:
@@ -1466,6 +1551,60 @@
 	return rc;
 }
 
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+	 enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+	 size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+	int rc = -1;
+	struct cam_dma_buff_info *mapping_info = NULL;
+	struct dma_buf *buf = NULL;
+
+	/* returns the dma_buf structure related to an fd */
+	buf = dma_buf_get(ion_fd);
+
+	rc = cam_smmu_map_buffer_validate(buf, idx, dma_dir, paddr_ptr, len_ptr,
+		region_id, &mapping_info);
+
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "buffer validation failure");
+		return rc;
+	}
+
+	mapping_info->ion_fd = ion_fd;
+	/* add to the list */
+	list_add(&mapping_info->list,
+		&iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+	return 0;
+}
+
+static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
+	struct dma_buf *buf, enum dma_data_direction dma_dir,
+	dma_addr_t *paddr_ptr, size_t *len_ptr,
+	enum cam_smmu_region_id region_id)
+{
+	int rc = -1;
+	struct cam_dma_buff_info *mapping_info = NULL;
+
+	rc = cam_smmu_map_buffer_validate(buf, idx, dma_dir, paddr_ptr, len_ptr,
+		region_id, &mapping_info);
+
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "buffer validation failure");
+		return rc;
+	}
+
+	mapping_info->ion_fd = -1;
+
+	/* add to the list */
+	list_add(&mapping_info->list,
+		&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list);
+
+	return 0;
+}
+
+
 static int cam_smmu_unmap_buf_and_remove_from_list(
 	struct cam_dma_buff_info *mapping_info,
 	int idx)
@@ -1532,8 +1671,7 @@
 }
 
 static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
-	int ion_fd, dma_addr_t *paddr_ptr,
-	size_t *len_ptr)
+	int ion_fd, dma_addr_t *paddr_ptr, size_t *len_ptr)
 {
 	struct cam_dma_buff_info *mapping;
 
@@ -1549,6 +1687,23 @@
 	return CAM_SMMU_BUFF_NOT_EXIST;
 }
 
+static enum cam_smmu_buf_state cam_smmu_check_dma_buf_in_list(int idx,
+	struct dma_buf *buf, dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+	struct cam_dma_buff_info *mapping;
+
+	list_for_each_entry(mapping,
+		&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) {
+		if (mapping->buf == buf) {
+			*paddr_ptr = mapping->paddr;
+			*len_ptr = mapping->len;
+			return CAM_SMMU_BUFF_EXIST;
+		}
+	}
+
+	return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
 static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
 					int ion_fd, dma_addr_t *paddr_ptr,
 					size_t *len_ptr)
@@ -2146,13 +2301,13 @@
 }
 EXPORT_SYMBOL(cam_smmu_unmap_stage2_iova);
 
-int cam_smmu_map_iova(int handle, int ion_fd,
-	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
-	size_t *len_ptr, enum cam_smmu_region_id region_id)
+static int cam_smmu_map_iova_validate_params(int handle,
+	enum cam_smmu_map_dir dir,
+	dma_addr_t *paddr_ptr, size_t *len_ptr,
+	enum cam_smmu_region_id region_id)
 {
-	int idx, rc;
+	int idx, rc = 0;
 	enum dma_data_direction dma_dir;
-	enum cam_smmu_buf_state buf_state;
 
 	if (!paddr_ptr || !len_ptr) {
 		CAM_ERR(CAM_SMMU, "Input pointers are invalid");
@@ -2182,13 +2337,34 @@
 		return -EINVAL;
 	}
 
+	return rc;
+}
+
+int cam_smmu_map_user_iova(int handle, int ion_fd,
+	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+	int idx, rc = 0;
+	enum cam_smmu_buf_state buf_state;
+	enum dma_data_direction dma_dir;
+
+	rc = cam_smmu_map_iova_validate_params(handle, dir, paddr_ptr,
+		len_ptr, region_id);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "initial checks failed, unable to proceed");
+		return rc;
+	}
+
+	dma_dir = cam_smmu_translate_dir(dir);
+	idx = GET_SMMU_TABLE_IDX(handle);
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].is_secure) {
 		CAM_ERR(CAM_SMMU,
 			"Error: can't map non-secure mem to secure cb");
-		return -EINVAL;
+		rc = -EINVAL;
+		goto get_addr_end;
 	}
 
-	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
 		CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
@@ -2199,18 +2375,19 @@
 	if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
 		CAM_ERR(CAM_SMMU,
 			"Err:Dev %s should call SMMU attach before map buffer",
-				iommu_cb_set.cb_info[idx].name);
+			iommu_cb_set.cb_info[idx].name);
 		rc = -EINVAL;
 		goto get_addr_end;
 	}
 
-	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr,
-		len_ptr);
+	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
 	if (buf_state == CAM_SMMU_BUFF_EXIST) {
-		CAM_ERR(CAM_SMMU, "ion_fd:%d already in the list", ion_fd);
+		CAM_ERR(CAM_SMMU,
+			"ion_fd: %d already in the list", ion_fd);
 		rc = -EALREADY;
 		goto get_addr_end;
 	}
+
 	rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir,
 			paddr_ptr, len_ptr, region_id);
 	if (rc < 0)
@@ -2220,8 +2397,67 @@
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 	return rc;
 }
-EXPORT_SYMBOL(cam_smmu_map_iova);
+EXPORT_SYMBOL(cam_smmu_map_user_iova);
 
+int cam_smmu_map_kernel_iova(int handle, struct dma_buf *buf,
+	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+	int idx, rc = 0;
+	enum cam_smmu_buf_state buf_state;
+	enum dma_data_direction dma_dir;
+
+	rc = cam_smmu_map_iova_validate_params(handle, dir, paddr_ptr,
+		len_ptr, region_id);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "initial checks failed, unable to proceed");
+		return rc;
+	}
+
+	dma_dir = cam_smmu_translate_dir(dir);
+	idx = GET_SMMU_TABLE_IDX(handle);
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't map non-secure mem to secure cb");
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+		CAM_ERR(CAM_SMMU,
+			"Err:Dev %s should call SMMU attach before map buffer",
+			iommu_cb_set.cb_info[idx].name);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_check_dma_buf_in_list(idx, buf,
+	paddr_ptr, len_ptr);
+	if (buf_state == CAM_SMMU_BUFF_EXIST) {
+		CAM_ERR(CAM_SMMU,
+			"dma_buf :%pK already in the list", buf);
+		rc = -EALREADY;
+		goto get_addr_end;
+	}
+
+	rc = cam_smmu_map_kernel_buffer_and_add_to_list(idx, buf, dma_dir,
+			paddr_ptr, len_ptr, region_id);
+	if (rc < 0)
+		CAM_ERR(CAM_SMMU, "mapping or add list fail");
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_map_kernel_iova);
 
 int cam_smmu_get_iova(int handle, int ion_fd,
 	dma_addr_t *paddr_ptr, size_t *len_ptr)
@@ -2339,12 +2575,9 @@
 }
 EXPORT_SYMBOL(cam_smmu_get_stage2_iova);
 
-int cam_smmu_unmap_iova(int handle,
-	int ion_fd,
-	enum cam_smmu_region_id region_id)
+static int cam_smmu_unmap_validate_params(int handle)
 {
-	int idx, rc;
-	struct cam_dma_buff_info *mapping_info;
+	int idx;
 
 	if (handle == HANDLE_INIT) {
 		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
@@ -2360,13 +2593,30 @@
 		return -EINVAL;
 	}
 
+	return 0;
+}
+
+int cam_smmu_unmap_user_iova(int handle,
+	int ion_fd, enum cam_smmu_region_id region_id)
+{
+	int idx, rc;
+	struct cam_dma_buff_info *mapping_info;
+
+	rc = cam_smmu_unmap_validate_params(handle);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "unmap util validation failure");
+		return rc;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].is_secure) {
 		CAM_ERR(CAM_SMMU,
 			"Error: can't unmap non-secure mem from secure cb");
-		return -EINVAL;
+		rc = -EINVAL;
+		goto unmap_end;
 	}
 
-	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
 		CAM_ERR(CAM_SMMU,
 			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
@@ -2375,10 +2625,12 @@
 		goto unmap_end;
 	}
 
-	/* Based on ion fd and index, we can find mapping info of buffer */
+	/* Based on ion_fd & index, we can find mapping info of buffer */
 	mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+
 	if (!mapping_info) {
-		CAM_ERR(CAM_SMMU, "Error: Invalid params idx = %d, fd = %d",
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid params idx = %d, fd = %d",
 			idx, ion_fd);
 		rc = -EINVAL;
 		goto unmap_end;
@@ -2394,7 +2646,60 @@
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 	return rc;
 }
-EXPORT_SYMBOL(cam_smmu_unmap_iova);
+EXPORT_SYMBOL(cam_smmu_unmap_user_iova);
+
+int cam_smmu_unmap_kernel_iova(int handle,
+	struct dma_buf *buf, enum cam_smmu_region_id region_id)
+{
+	int idx, rc;
+	struct cam_dma_buff_info *mapping_info;
+
+	rc = cam_smmu_unmap_validate_params(handle);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "unmap util validation failure");
+		return rc;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't unmap non-secure mem from secure cb");
+		rc = -EINVAL;
+		goto unmap_end;
+	}
+
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto unmap_end;
+	}
+
+	/* Based on dma_buf & index, we can find mapping info of buffer */
+	mapping_info = cam_smmu_find_mapping_by_dma_buf(idx, buf);
+
+	if (!mapping_info) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid params idx = %d, dma_buf = %pK",
+			idx, buf);
+		rc = -EINVAL;
+		goto unmap_end;
+	}
+
+	/* Unmapping one buffer from device */
+	CAM_DBG(CAM_SMMU, "SMMU: removing buffer idx = %d", idx);
+	rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
+	if (rc < 0)
+		CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
+
+unmap_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_kernel_iova);
+
 
 int cam_smmu_put_iova(int handle, int ion_fd)
 {
@@ -2467,10 +2772,18 @@
 	}
 
 	if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) {
-		CAM_ERR(CAM_SMMU, "Client %s buffer list is not clean",
+		CAM_ERR(CAM_SMMU, "UMD %s buffer list is not clean",
 			iommu_cb_set.cb_info[idx].name);
-		cam_smmu_print_list(idx);
-		cam_smmu_clean_buffer_list(idx);
+		cam_smmu_print_user_list(idx);
+		cam_smmu_clean_user_buffer_list(idx);
+	}
+
+	if (!list_empty_careful(
+		&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list)) {
+		CAM_ERR(CAM_SMMU, "KMD %s buffer list is not clean",
+			iommu_cb_set.cb_info[idx].name);
+		cam_smmu_print_kernel_list(idx);
+		cam_smmu_clean_kernel_buffer_list(idx);
 	}
 
 	if (&iommu_cb_set.cb_info[idx].is_secure) {
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
index 15e1fa4..b062258 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -86,7 +86,7 @@
 int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
 
 /**
- * @brief       : Maps IOVA for calling driver
+ * @brief       : Maps user space IOVA for calling driver
  *
  * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
  * @param ion_fd: ION handle identifying the memory buffer.
@@ -96,25 +96,54 @@
  *                returned if region_id is CAM_SMMU_REGION_IO. If region_id is
  *                CAM_SMMU_REGION_SHARED, dma_addr is used as an input parameter
  *                which specifies the cpu virtual address to map.
- * @len         : Length of buffer mapped returned by CAM SMMU driver.
+ * @len_ptr     : Length of buffer mapped returned by CAM SMMU driver.
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
-int cam_smmu_map_iova(int handle,
+int cam_smmu_map_user_iova(int handle,
 	int ion_fd, enum cam_smmu_map_dir dir,
 	dma_addr_t *dma_addr, size_t *len_ptr,
 	enum cam_smmu_region_id region_id);
 
 /**
- * @brief       : Unmaps IOVA for calling driver
+ * @brief        : Maps kernel space IOVA for calling driver
+ *
+ * @param handle : Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param buf    : dma_buf allocated for kernel usage in mem_mgr
+ * @dir          : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
+ *                 DMA_TO_DEVICE or DMA_FROM_DEVICE
+ * @dma_addr     : Pointer to physical address where mapped address will be
+ *                 returned if region_id is CAM_SMMU_REGION_IO. If region_id is
+ *                 CAM_SMMU_REGION_SHARED, dma_addr is used as an input
+ *                 parameter which specifies the cpu virtual address to map.
+ * @len_ptr      : Length of buffer mapped returned by CAM SMMU driver.
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_kernel_iova(int handle,
+	struct dma_buf *buf, enum cam_smmu_map_dir dir,
+	dma_addr_t *dma_addr, size_t *len_ptr,
+	enum cam_smmu_region_id region_id);
+
+/**
+ * @brief       : Unmaps user space IOVA for calling driver
  *
  * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
  * @param ion_fd: ION handle identifying the memory buffer.
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
-int cam_smmu_unmap_iova(int handle,
-	int ion_fd,
-	enum cam_smmu_region_id region_id);
+int cam_smmu_unmap_user_iova(int handle,
+	int ion_fd, enum cam_smmu_region_id region_id);
+
+/**
+ * @brief       : Unmaps kernel IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param buf   : dma_buf allocated for the kernel
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_kernel_iova(int handle,
+	struct dma_buf *buf, enum cam_smmu_region_id region_id);
 
 /**
  * @brief          : Allocates a scratch buffer
@@ -296,14 +325,14 @@
  * @brief Reserves secondary heap
  *
  * @param smmu_hdl: SMMU handle identifying the context bank
- * @param ion_fd: ION fd backing the secondary heap in DDR
  * @param iova: IOVA of secondary heap after reservation has completed
+ * @param buf: Allocated dma_buf for secondary heap
  * @param request_len: Length of secondary heap after reservation has completed
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
 int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
-	int ion_fd,
+	struct dma_buf *buf,
 	dma_addr_t *iova,
 	size_t *request_len);
 
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index ee3e3b4..2422016 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -25,22 +25,25 @@
 {
 	int rc;
 	long idx;
+	bool bit;
 
 	do {
 		idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
-			if (idx >= CAM_SYNC_MAX_OBJS)
-				return -ENOMEM;
-	} while (!spin_trylock_bh(&sync_dev->row_spinlocks[idx]));
+		if (idx >= CAM_SYNC_MAX_OBJS)
+			return -ENOMEM;
+		bit = test_and_set_bit(idx, sync_dev->bitmap);
+	} while (bit);
 
+	spin_lock_bh(&sync_dev->row_spinlocks[idx]);
 	rc = cam_sync_init_object(sync_dev->sync_table, idx, name);
 	if (rc) {
 		CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
 			idx);
+		clear_bit(idx, sync_dev->bitmap);
 		spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 		return -EINVAL;
 	}
 
-	set_bit(idx, sync_dev->bitmap);
 	*sync_obj = idx;
 	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 
@@ -299,6 +302,7 @@
 {
 	int rc;
 	long idx = 0;
+	bool bit;
 
 	if (!sync_obj || !merged_obj) {
 		CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
@@ -312,30 +316,28 @@
 		return -EINVAL;
 	}
 
-	rc = cam_sync_util_find_and_set_empty_row(sync_dev, &idx);
-	if (rc < 0) {
-		CAM_ERR(CAM_SYNC,
-			"Error: Unable to find empty row, table full");
-		return -EINVAL;
-	}
+	do {
+		idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+		if (idx >= CAM_SYNC_MAX_OBJS)
+			return -ENOMEM;
+		bit = test_and_set_bit(idx, sync_dev->bitmap);
+	} while (bit);
 
-	if (idx <= 0 || idx >= CAM_SYNC_MAX_OBJS) {
-		CAM_ERR(CAM_SYNC,
-			"Error: Invalid empty row index returned = %ld", idx);
-		return -EINVAL;
-	}
+	spin_lock_bh(&sync_dev->row_spinlocks[idx]);
 
 	rc = cam_sync_init_group_object(sync_dev->sync_table,
 		idx, sync_obj,
 		num_objs);
-
 	if (rc < 0) {
 		CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
 			idx);
+		clear_bit(idx, sync_dev->bitmap);
+		spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 		return -EINVAL;
 	}
 
 	*merged_obj = idx;
+	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 
 	return 0;
 }
@@ -347,15 +349,19 @@
 	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
 		return -EINVAL;
 
+	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
 	row = sync_dev->sync_table + sync_obj;
 	if (row->state == CAM_SYNC_STATE_INVALID) {
 		CAM_ERR(CAM_SYNC,
 			"Error: accessing an uninitialized sync obj: idx = %d",
 			sync_obj);
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 		return -EINVAL;
 	}
 
 	cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+
 	return 0;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index 4268f0b..f66b882 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -131,7 +131,6 @@
 	struct sync_table_row *row = table + idx;
 	struct sync_table_row *child_row = NULL;
 
-	spin_lock_bh(&sync_dev->row_spinlocks[idx]);
 	INIT_LIST_HEAD(&row->parents_list);
 
 	INIT_LIST_HEAD(&row->children_list);
@@ -147,7 +146,6 @@
 		if (!child_info) {
 			cam_sync_util_cleanup_children_list(
 				&row->children_list);
-			spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 			return -ENOMEM;
 		}
 
@@ -166,7 +164,6 @@
 			cam_sync_util_cleanup_children_list(
 				&row->children_list);
 			spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
-			spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 			return -ENOMEM;
 		}
 		parent_info->sync_id = idx;
@@ -182,7 +179,6 @@
 		sync_objs, num_objs);
 	if (remaining < 0) {
 		CAM_ERR(CAM_SYNC, "Failed getting remaining count");
-		spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 		return -ENODEV;
 	}
 
@@ -195,7 +191,6 @@
 	if (row->state != CAM_SYNC_STATE_ACTIVE)
 		complete_all(&row->signaled);
 
-	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 	return 0;
 }
 
@@ -210,7 +205,6 @@
 	if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
 		return -EINVAL;
 
-	spin_lock_bh(&sync_dev->row_spinlocks[idx]);
 	clear_bit(idx, sync_dev->bitmap);
 	list_for_each_entry_safe(child_info, temp_child,
 				&row->children_list, list) {
@@ -238,7 +232,6 @@
 
 	row->state = CAM_SYNC_STATE_INVALID;
 	memset(row, 0, sizeof(*row));
-	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 
 	return 0;
 }
@@ -249,11 +242,14 @@
 		struct sync_callback_info,
 		cb_dispatch_work);
 
+	spin_lock_bh(&sync_dev->row_spinlocks[cb_info->sync_obj]);
+	list_del_init(&cb_info->list);
+	spin_unlock_bh(&sync_dev->row_spinlocks[cb_info->sync_obj]);
+
 	cb_info->callback_func(cb_info->sync_obj,
 		cb_info->status,
 		cb_info->cb_data);
 
-	list_del_init(&cb_info->list);
 	kfree(cb_info);
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
index 9745d45..c0160c4 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
@@ -34,6 +34,7 @@
 #define CAM_HFI        (1 << 18)
 #define CAM_CTXT       (1 << 19)
 #define CAM_OIS        (1 << 20)
+#define CAM_RES        (1 << 21)
 
 #define STR_BUFFER_MAX_LENGTH  1024
 
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index 743dfda..611c4e9 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -196,6 +196,92 @@
 	return rc;
 }
 
+int cam_soc_util_clk_put(struct clk **clk)
+{
+	if (!(*clk)) {
+		CAM_ERR(CAM_UTIL, "Invalid params clk");
+		return -EINVAL;
+	}
+
+	clk_put(*clk);
+	*clk = NULL;
+
+	return 0;
+}
+
+static struct clk *cam_soc_util_option_clk_get(struct device_node *np,
+	int index)
+{
+	struct of_phandle_args clkspec;
+	struct clk *clk;
+	int rc;
+
+	if (index < 0)
+		return ERR_PTR(-EINVAL);
+
+	rc = of_parse_phandle_with_args(np, "clocks-option", "#clock-cells",
+		index, &clkspec);
+	if (rc)
+		return ERR_PTR(rc);
+
+	clk = of_clk_get_from_provider(&clkspec);
+	of_node_put(clkspec.np);
+
+	return clk;
+}
+
+int cam_soc_util_get_option_clk_by_name(struct cam_hw_soc_info *soc_info,
+	const char *clk_name, struct clk **clk, int32_t *clk_index,
+	int32_t *clk_rate)
+{
+	int index = 0;
+	int rc = 0;
+	struct device_node *of_node = NULL;
+
+	if (!soc_info || !clk_name || !clk) {
+		CAM_ERR(CAM_UTIL,
+			"Invalid params soc_info %pK clk_name %s clk %pK",
+			soc_info, clk_name, clk);
+		return -EINVAL;
+	}
+
+	of_node = soc_info->dev->of_node;
+
+	index = of_property_match_string(of_node, "clock-names-option",
+		clk_name);
+
+	*clk = cam_soc_util_option_clk_get(of_node, index);
+	if (IS_ERR(*clk)) {
+		CAM_ERR(CAM_UTIL, "No clk named %s found. Dev %s", clk_name,
+			soc_info->dev_name);
+		*clk_index = -1;
+		return -EFAULT;
+	}
+	*clk_index = index;
+
+	rc = of_property_read_u32_index(of_node, "clock-rates-option",
+		index, clk_rate);
+	if (rc) {
+		CAM_ERR(CAM_UTIL,
+			"Error reading clock-rates clk_name %s index %d",
+			clk_name, index);
+		cam_soc_util_clk_put(clk);
+		*clk_rate = 0;
+		return rc;
+	}
+
+	/*
+	 * Option clocks are assumed to be available to single Device here.
+	 * Hence use INIT_RATE instead of NO_SET_RATE.
+	 */
+	*clk_rate = (*clk_rate == 0) ? (int32_t)INIT_RATE : *clk_rate;
+
+	CAM_DBG(CAM_UTIL, "clk_name %s index %d clk_rate %d",
+		clk_name, *clk_index, *clk_rate);
+
+	return 0;
+}
+
 int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
 	int32_t clk_rate)
 {
@@ -407,7 +493,7 @@
 
 			soc_info->clk_rate[level][j] =
 				(soc_info->clk_rate[level][j] == 0) ?
-				(long)NO_SET_RATE :
+				(int32_t)NO_SET_RATE :
 				soc_info->clk_rate[level][j];
 
 			CAM_DBG(CAM_UTIL, "soc_info->clk_rate[%d][%d] = %d",
@@ -766,7 +852,8 @@
 
 	count = of_property_count_strings(of_node, "reg-names");
 	if (count <= 0) {
-		CAM_ERR(CAM_UTIL, "no reg-names found");
+		CAM_WARN(CAM_UTIL, "no reg-names found for: %s",
+			soc_info->dev_name);
 		count = 0;
 	}
 	soc_info->num_mem_block = count;
@@ -802,7 +889,8 @@
 	rc = of_property_read_string_index(of_node, "interrupt-names", 0,
 		&soc_info->irq_name);
 	if (rc) {
-		CAM_WARN(CAM_UTIL, "No interrupt line present");
+		CAM_WARN(CAM_UTIL, "No interrupt line preset for: %s",
+			soc_info->dev_name);
 		rc = 0;
 	} else {
 		soc_info->irq_line =
@@ -1267,14 +1355,14 @@
 	if (!soc_info)
 		return -EINVAL;
 
+	if (disble_irq)
+		rc |= cam_soc_util_irq_disable(soc_info);
+
 	if (disable_clocks)
 		cam_soc_util_clk_disable_default(soc_info);
 
 	cam_soc_util_regulator_disable_default(soc_info);
 
-	if (disble_irq)
-		rc |= cam_soc_util_irq_disable(soc_info);
-
 	if (soc_info->pinctrl_info.pinctrl &&
 		soc_info->pinctrl_info.gpio_state_suspend)
 		rc = pinctrl_select_state(soc_info->pinctrl_info.pinctrl,
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index 8bd8275..5123ec4 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -377,6 +377,35 @@
 	int32_t clk_rate);
 
 /**
+ * cam_soc_util_get_option_clk_by_name()
+ *
+ * @brief:              Get reference to optional clk using name
+ *
+ * @soc_info:           Device soc information
+ * @clk_name:           Name of clock to find reference for
+ * @clk:                Clock reference pointer to be filled if Success
+ * @clk_index:          Clk index in the option clk array to be returned
+ * @clk_rate:           Clk rate in the option clk array
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_soc_util_get_option_clk_by_name(struct cam_hw_soc_info *soc_info,
+	const char *clk_name, struct clk **clk, int32_t *clk_index,
+	int32_t *clk_rate);
+
+/**
+ * cam_soc_util_clk_put()
+ *
+ * @brief:              Put clock specified in params
+ *
+ * @clk:                Reference to the Clock that needs to be put
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_clk_put(struct clk **clk);
+
+/**
  * cam_soc_util_clk_enable()
  *
  * @brief:              Enable clock specified in params
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index 39793b6..0a01b6f 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -877,6 +877,7 @@
 		mpq_demux->sdmx_eos = 0;
 		mpq_demux->sdmx_log_level = SDMX_LOG_NO_PRINT;
 		mpq_demux->ts_packet_timestamp_source = 0;
+		mpq_demux->disable_cache_ops = 1;
 
 		if (mpq_demux->demux.feednum > MPQ_MAX_DMX_FILES) {
 			MPQ_DVB_ERR_PRINT(
@@ -6349,7 +6350,8 @@
 			continue;
 
 		/* Invalidate output buffer before processing the results */
-		mpq_sdmx_invalidate_buffer(mpq_feed);
+		if (!mpq_demux->disable_cache_ops)
+			mpq_sdmx_invalidate_buffer(mpq_feed);
 
 		if (sts->error_indicators & SDMX_FILTER_ERR_MD_BUF_FULL)
 			MPQ_DVB_ERR_PRINT(
@@ -6571,13 +6573,15 @@
 	 * We must flush the buffer before SDMX starts reading from it
 	 * so that it gets a valid data in memory.
 	 */
-	ret = msm_ion_do_cache_op(mpq_demux->ion_client,
-		ion_handle, rbuf->data,
-		rbuf->size, ION_IOC_CLEAN_CACHES);
-	if (ret)
-		MPQ_DVB_ERR_PRINT(
-			"%s: msm_ion_do_cache_op failed, ret = %d\n",
-			__func__, ret);
+	if (!mpq_demux->disable_cache_ops) {
+		ret = msm_ion_do_cache_op(mpq_demux->ion_client,
+					  ion_handle, rbuf->data,
+					  rbuf->size, ION_IOC_CLEAN_CACHES);
+		if (ret)
+			MPQ_DVB_ERR_PRINT(
+				"%s: msm_ion_do_cache_op failed, ret = %d\n",
+				__func__, ret);
+	}
 
 	return mpq_sdmx_process(mpq_demux, &buf_desc, count,
 				read_offset, mpq_demux->demux.ts_packet_size);
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
index 0c20a89..a187707 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
@@ -543,6 +543,8 @@
 
 	ktime_t last_notification_time;
 	int ts_packet_timestamp_source;
+	/* Disable cache operations on qseecom heap since not supported */
+	int disable_cache_ops;
 };
 
 /**
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
index ae01baf..dc041a7 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
@@ -139,6 +139,43 @@
 	return clk_forced_on;
 }
 
+void sde_mdp_halt_vbif_xin(struct sde_mdp_vbif_halt_params *params)
+{
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+	u32 reg_val;
+	bool forced_on;
+
+	if (!mdata || !params || !params->reg_off_mdp_clk_ctrl) {
+		SDEROT_ERR("null input parameter\n");
+		return;
+	}
+
+	if (params->xin_id > MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1) {
+		SDEROT_ERR("xin_id:%d exceed max limit\n", params->xin_id);
+		return;
+	}
+
+	forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
+		params->reg_off_mdp_clk_ctrl, true);
+
+	SDEROT_EVTLOG(forced_on, params->xin_id);
+
+	reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
+	SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+		reg_val | BIT(params->xin_id));
+
+	/* this is a polling operation */
+	sde_mdp_wait_for_xin_halt(params->xin_id);
+
+	reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
+	SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+		reg_val & ~BIT(params->xin_id));
+
+	if (forced_on)
+		force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
+			params->reg_off_mdp_clk_ctrl, false);
+}
+
 u32 sde_mdp_get_ot_limit(u32 width, u32 height, u32 pixfmt, u32 fps, u32 is_rd)
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index 400f53b..c85d255 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -63,6 +63,18 @@
 	u32 rotsts_busy_mask;
 };
 
+/*
+ * struct sde_mdp_vbif_halt_params: parameters for issue halt request to vbif
+ * @xin_id: xin port number of vbif
+ * @reg_off_mdp_clk_ctrl: reg offset for vbif clock control
+ * @bit_off_mdp_clk_ctrl: bit offset for vbif clock control
+ */
+struct sde_mdp_vbif_halt_params {
+	u32 xin_id;
+	u32 reg_off_mdp_clk_ctrl;
+	u32 bit_off_mdp_clk_ctrl;
+};
+
 enum sde_bus_vote_type {
 	VOTE_INDEX_DISABLE,
 	VOTE_INDEX_19_MHZ,
@@ -276,6 +288,8 @@
 
 void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params);
 
+void sde_mdp_halt_vbif_xin(struct sde_mdp_vbif_halt_params *params);
+
 int sde_mdp_init_vbif(void);
 
 #define SDE_VBIF_WRITE(mdata, offset, value) \
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index d19e475..c7d1074 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -3182,6 +3182,26 @@
 	devm_kfree(dev, mgr);
 }
 
+void sde_rotator_core_dump(struct sde_rot_mgr *mgr)
+{
+	if (!mgr) {
+		SDEROT_ERR("null parameters\n");
+		return;
+	}
+
+	sde_rotator_resource_ctrl(mgr, true);
+	/* dump first snapshot */
+	if (mgr->ops_hw_dump_status)
+		mgr->ops_hw_dump_status(mgr->hw_data);
+
+	SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus", "vbif_dbg_bus");
+
+	/* dump second snapshot for comparison */
+	if (mgr->ops_hw_dump_status)
+		mgr->ops_hw_dump_status(mgr->hw_data);
+	sde_rotator_resource_ctrl(mgr, false);
+}
+
 static void sde_rotator_suspend_cancel_rot_work(struct sde_rot_mgr *mgr)
 {
 	struct sde_rot_file_private *priv, *priv_next;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index 3edb2d0..e23ed7a 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -478,6 +478,7 @@
 	int (*ops_hw_get_downscale_caps)(struct sde_rot_mgr *mgr, char *caps,
 			int len);
 	int (*ops_hw_get_maxlinewidth)(struct sde_rot_mgr *mgr);
+	void (*ops_hw_dump_status)(struct sde_rot_mgr *mgr);
 
 	void *hw_data;
 };
@@ -570,6 +571,12 @@
 void sde_rotator_core_destroy(struct sde_rot_mgr *mgr);
 
 /*
+ * sde_rotator_core_dump - perform register dump
+ * @mgr: Pointer to rotator manager
+ */
+void sde_rotator_core_dump(struct sde_rot_mgr *mgr);
+
+/*
  * sde_rotator_session_open - open a new rotator per file session
  * @mgr: Pointer to rotator manager
  * @pprivate: Pointer to pointer of the newly initialized per file session
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index 46f64d2..b9158e1 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -638,18 +638,6 @@
 }
 
 /*
- * sde_rot_dump_panic - Issue evtlog dump and generic panic
- */
-void sde_rot_dump_panic(bool do_panic)
-{
-	sde_rot_evtlog_dump_all();
-	sde_rot_dump_reg_all();
-
-	if (do_panic)
-		panic("sde_rotator");
-}
-
-/*
  * sde_rot_evtlog_tout_handler - log dump timeout handler
  * @queue: boolean indicate putting log dump into queue
  * @name: function name having timeout
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
index 2fc8e3f..fa53083 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
@@ -42,7 +42,6 @@
 		SDE_ROT_EVTLOG_TOUT_DATA_LIMITER)
 
 void sde_rot_evtlog(const char *name, int line, int flag, ...);
-void sde_rot_dump_panic(bool do_panic);
 void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...);
 
 struct sde_rotator_device;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 52aadfa..13c5098 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -1731,6 +1731,7 @@
 
 		sde_rotator_req_finish(rot_dev->mgr, ctx->private, req);
 		sde_rotator_retire_request(request);
+		cmd->priv_handle = NULL;
 	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_ABORT) {
 		if (!cmd->priv_handle) {
 			ret = -EINVAL;
@@ -1739,8 +1740,9 @@
 		}
 
 		request = cmd->priv_handle;
-		sde_rotator_abort_inline_request(rot_dev->mgr,
-				ctx->private, request->req);
+		if (!sde_rotator_is_request_retired(request))
+			sde_rotator_abort_inline_request(rot_dev->mgr,
+					ctx->private, request->req);
 	}
 
 	sde_rot_mgr_unlock(rot_dev->mgr);
@@ -1762,7 +1764,22 @@
 
 void sde_rotator_inline_reg_dump(struct platform_device *pdev)
 {
-	sde_rot_dump_panic(false);
+	struct sde_rotator_device *rot_dev;
+
+	if (!pdev) {
+		SDEROT_ERR("invalid platform device\n");
+		return;
+	}
+
+	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
+	if (!rot_dev || !rot_dev->mgr) {
+		SDEROT_ERR("invalid rotator device\n");
+		return;
+	}
+
+	sde_rot_mgr_lock(rot_dev->mgr);
+	sde_rotator_core_dump(rot_dev->mgr);
+	sde_rot_mgr_unlock(rot_dev->mgr);
 }
 EXPORT_SYMBOL(sde_rotator_inline_reg_dump);
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index c5da084..c3849a8 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -55,7 +55,7 @@
  * When in sbuf mode, select a much longer wait, to allow the other driver
  * to detect timeouts and abort if necessary.
  */
-#define KOFF_TIMEOUT_SBUF	(2000)
+#define KOFF_TIMEOUT_SBUF	(10000)
 
 /* default stream buffer headroom in lines */
 #define DEFAULT_SBUF_HEADROOM	20
@@ -133,6 +133,9 @@
 #define SDE_ROTREG_READ(base, off) \
 	readl_relaxed(base + (off))
 
+#define SDE_ROTTOP_IN_OFFLINE_MODE(_rottop_op_mode_) \
+	(((_rottop_op_mode_) & ROTTOP_OP_MODE_ROT_OUT_MASK) == 0)
+
 static const u32 sde_hw_rotator_v3_inpixfmts[] = {
 	SDE_PIX_FMT_XRGB_8888,
 	SDE_PIX_FMT_ARGB_8888,
@@ -537,6 +540,8 @@
 		SDE_ROT_REGDUMP_READ },
 	{ "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
 		SDE_ROT_REGDUMP_VBIF },
+	{ "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 0,
+		SDE_ROT_REGDUMP_WRITE },
 };
 
 struct sde_rot_cdp_params {
@@ -672,10 +677,29 @@
 	}
 }
 
+static void sde_hw_rotator_halt_vbif_xin_client(void)
+{
+	struct sde_mdp_vbif_halt_params halt_params;
+
+	memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
+	halt_params.xin_id = XIN_SSPP;
+	halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
+	halt_params.bit_off_mdp_clk_ctrl =
+		MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
+	sde_mdp_halt_vbif_xin(&halt_params);
+
+	memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
+	halt_params.xin_id = XIN_WRITEBACK;
+	halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
+	halt_params.bit_off_mdp_clk_ctrl =
+		MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
+	sde_mdp_halt_vbif_xin(&halt_params);
+}
+
 /**
  * sde_hw_rotator_reset - Reset rotator hardware
  * @rot: pointer to hw rotator
- * @ctx: pointer to current rotator context during the hw hang
+ * @ctx: pointer to current rotator context during the hw hang (optional)
  */
 static int sde_hw_rotator_reset(struct sde_hw_rotator *rot,
 		struct sde_hw_rotator_context *ctx)
@@ -689,13 +713,8 @@
 	int i, j;
 	unsigned long flags;
 
-	if (!rot || !ctx) {
-		SDEROT_ERR("NULL rotator context\n");
-		return -EINVAL;
-	}
-
-	if (ctx->q_id >= ROT_QUEUE_MAX) {
-		SDEROT_ERR("context q_id out of range: %d\n", ctx->q_id);
+	if (!rot) {
+		SDEROT_ERR("NULL rotator\n");
 		return -EINVAL;
 	}
 
@@ -704,6 +723,18 @@
 	usleep_range(MS_TO_US(10), MS_TO_US(20));
 	SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 0);
 
+	/* halt vbif xin client to ensure no pending transaction */
+	sde_hw_rotator_halt_vbif_xin_client();
+
+	/* if no ctx is specified, skip ctx wake up */
+	if (!ctx)
+		return 0;
+
+	if (ctx->q_id >= ROT_QUEUE_MAX) {
+		SDEROT_ERR("context q_id out of range: %d\n", ctx->q_id);
+		return -EINVAL;
+	}
+
 	spin_lock_irqsave(&rot->rotisr_lock, flags);
 
 	/* update timestamp register with current context */
@@ -759,10 +790,11 @@
 }
 
 /**
- * sde_hw_rotator_dump_status - Dump hw rotator status on error
+ * _sde_hw_rotator_dump_status - Dump hw rotator status on error
  * @rot: Pointer to hw rotator
  */
-static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot, u32 *ubwcerr)
+static void _sde_hw_rotator_dump_status(struct sde_hw_rotator *rot,
+		u32 *ubwcerr)
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	u32 reg = 0;
@@ -794,6 +826,11 @@
 		SDE_ROTREG_READ(rot->mdss_base,
 			REGDMA_CSR_REGDMA_FSM_STATE));
 
+	SDEROT_ERR("rottop: op_mode = %x, status = %x, clk_status = %x\n",
+		SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE),
+		SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS),
+		SDE_ROTREG_READ(rot->mdss_base, ROTTOP_CLK_STATUS));
+
 	reg = SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS);
 	if (ubwcerr)
 		*ubwcerr = reg;
@@ -805,12 +842,35 @@
 		SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
 		SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
 
-	SDEROT_ERR(
-		"sbuf_status_plane0 = %x, sbuf_status_plane1 = %x\n",
-		SDE_ROTREG_READ(rot->mdss_base,
-			ROT_WB_SBUF_STATUS_PLANE0),
-		SDE_ROTREG_READ(rot->mdss_base,
-			ROT_WB_SBUF_STATUS_PLANE1));
+	SDEROT_ERR("sspp unpack wr: plane0 = %x, plane1 = %x, plane2 = %x\n",
+			SDE_ROTREG_READ(rot->mdss_base,
+				ROT_SSPP_FETCH_SMP_WR_PLANE0),
+			SDE_ROTREG_READ(rot->mdss_base,
+				ROT_SSPP_FETCH_SMP_WR_PLANE1),
+			SDE_ROTREG_READ(rot->mdss_base,
+				ROT_SSPP_FETCH_SMP_WR_PLANE2));
+	SDEROT_ERR("sspp unpack rd: plane0 = %x, plane1 = %x, plane2 = %x\n",
+			SDE_ROTREG_READ(rot->mdss_base,
+					ROT_SSPP_SMP_UNPACK_RD_PLANE0),
+			SDE_ROTREG_READ(rot->mdss_base,
+					ROT_SSPP_SMP_UNPACK_RD_PLANE1),
+			SDE_ROTREG_READ(rot->mdss_base,
+					ROT_SSPP_SMP_UNPACK_RD_PLANE2));
+	SDEROT_ERR("sspp: unpack_ln = %x, unpack_blk = %x, fill_lvl = %x\n",
+			SDE_ROTREG_READ(rot->mdss_base,
+				ROT_SSPP_UNPACK_LINE_COUNT),
+			SDE_ROTREG_READ(rot->mdss_base,
+				ROT_SSPP_UNPACK_BLK_COUNT),
+			SDE_ROTREG_READ(rot->mdss_base,
+				ROT_SSPP_FILL_LEVELS));
+
+	SDEROT_ERR("wb: sbuf0 = %x, sbuf1 = %x, sys_cache = %x\n",
+			SDE_ROTREG_READ(rot->mdss_base,
+				ROT_WB_SBUF_STATUS_PLANE0),
+			SDE_ROTREG_READ(rot->mdss_base,
+				ROT_WB_SBUF_STATUS_PLANE1),
+			SDE_ROTREG_READ(rot->mdss_base,
+				ROT_WB_SYS_CACHE_MODE));
 }
 
 /**
@@ -1931,7 +1991,7 @@
 			else if (status & REGDMA_INVALID_CMD)
 				SDEROT_ERR("REGDMA invalid command\n");
 
-			sde_hw_rotator_dump_status(rot, &ubwcerr);
+			_sde_hw_rotator_dump_status(rot, &ubwcerr);
 
 			if (ubwcerr || abort) {
 				/*
@@ -1976,12 +2036,12 @@
 		if (last_isr & REGDMA_INT_ERR_MASK) {
 			SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
 				ctx->timestamp, swts, last_isr);
-			sde_hw_rotator_dump_status(rot, NULL);
+			_sde_hw_rotator_dump_status(rot, NULL);
 			status = ROT_ERROR_BIT;
 		} else if (pending) {
 			SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
 				ctx->timestamp, swts, last_isr);
-			sde_hw_rotator_dump_status(rot, NULL);
+			_sde_hw_rotator_dump_status(rot, NULL);
 			status = ROT_ERROR_BIT;
 		} else {
 			status = 0;
@@ -2143,7 +2203,7 @@
 {
 	struct sde_hw_rotator *rot;
 	u32 l_ts, h_ts, swts, hwts;
-	u32 rotsts, regdmasts;
+	u32 rotsts, regdmasts, rotopmode;
 
 	/*
 	 * Check last HW timestamp with SW timestamp before power off event.
@@ -2168,19 +2228,37 @@
 		regdmasts = SDE_ROTREG_READ(rot->mdss_base,
 				REGDMA_CSR_REGDMA_BLOCK_STATUS);
 		rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
+		rotopmode = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE);
 
 		SDEROT_DBG(
-			"swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
-				swts, hwts, regdmasts, rotsts);
-		SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts);
+			"swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x, rottop-opmode:0x%x\n",
+				swts, hwts, regdmasts, rotsts, rotopmode);
+		SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts, rotopmode);
 
 		if ((swts != hwts) && ((regdmasts & REGDMA_BUSY) ||
 					(rotsts & ROT_STATUS_MASK))) {
 			SDEROT_ERR(
 				"Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
 				swts, hwts, regdmasts, rotsts);
+			_sde_hw_rotator_dump_status(rot, NULL);
 			SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
 					"vbif_dbg_bus", "panic");
+		} else if (!SDE_ROTTOP_IN_OFFLINE_MODE(rotopmode) &&
+				((regdmasts & REGDMA_BUSY) ||
+						(rotsts & ROT_BUSY_BIT))) {
+			/*
+			 * rotator can stuck in inline while mdp is detached
+			 */
+			SDEROT_WARN(
+				"Inline Rot busy: regdma-sts:0x%x, rottop-sts:0x%x, rottop-opmode:0x%x\n",
+				regdmasts, rotsts, rotopmode);
+			sde_hw_rotator_reset(rot, NULL);
+		} else if ((regdmasts & REGDMA_BUSY) ||
+				(rotsts & ROT_BUSY_BIT)) {
+			_sde_hw_rotator_dump_status(rot, NULL);
+			SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
+					"vbif_dbg_bus", "panic");
+			sde_hw_rotator_reset(rot, NULL);
 		}
 
 		/* Turn off rotator clock after checking rotator registers */
@@ -2484,7 +2562,7 @@
 				if (status & BIT(0)) {
 					SDEROT_ERR("rotator busy 0x%x\n",
 							status);
-					sde_hw_rotator_dump_status(rot, NULL);
+					_sde_hw_rotator_dump_status(rot, NULL);
 					SDEROT_EVTLOG_TOUT_HANDLER("rot",
 							"vbif_dbg_bus",
 							"panic");
@@ -3472,6 +3550,21 @@
 }
 
 /*
+ * sde_hw_rotator_dump_status - dump status to debug output
+ * @mgr: Pointer to rotator manager
+ * return: none
+ */
+static void sde_hw_rotator_dump_status(struct sde_rot_mgr *mgr)
+{
+	if (!mgr || !mgr->hw_data) {
+		SDEROT_ERR("null parameters\n");
+		return;
+	}
+
+	_sde_hw_rotator_dump_status(mgr->hw_data, NULL);
+}
+
+/*
  * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
  * @hw_data: Pointer to rotator hw
  * @dev: Pointer to platform device
@@ -3600,6 +3693,7 @@
 	mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
 	mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
 	mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
+	mgr->ops_hw_dump_status = sde_hw_rotator_dump_status;
 
 	ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
 	if (ret)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
index 2afd032..aaaa28c 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
@@ -50,6 +50,8 @@
 #define ROTTOP_START_CTRL_TRIG_SEL_REGDMA       2
 #define ROTTOP_START_CTRL_TRIG_SEL_MDP          3
 
+#define ROTTOP_OP_MODE_ROT_OUT_MASK             (0x3 << 4)
+
 /* SDE_ROT_SSPP:
  * OFFSET=0x0A8900
  */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index e75f36e..b817ff0 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -36,6 +36,14 @@
 #define SMMU_SDE_ROT_SEC	"qcom,smmu_sde_rot_sec"
 #define SMMU_SDE_ROT_UNSEC	"qcom,smmu_sde_rot_unsec"
 
+#ifndef SZ_4G
+#define SZ_4G	(((size_t) SZ_1G) * 4)
+#endif
+
+#ifndef SZ_2G
+#define SZ_2G	(((size_t) SZ_1G) * 2)
+#endif
+
 struct sde_smmu_domain {
 	char *ctx_name;
 	int domain;
@@ -487,9 +495,9 @@
 }
 
 static struct sde_smmu_domain sde_rot_unsec = {
-	"rot_0", SDE_IOMMU_DOMAIN_ROT_UNSECURE, SZ_128K, (SZ_1G - SZ_128K)};
+	"rot_0", SDE_IOMMU_DOMAIN_ROT_UNSECURE, SZ_2G, (SZ_4G - SZ_2G)};
 static struct sde_smmu_domain sde_rot_sec = {
-	"rot_1", SDE_IOMMU_DOMAIN_ROT_SECURE, SZ_1G, SZ_2G};
+	"rot_1", SDE_IOMMU_DOMAIN_ROT_SECURE, SZ_2G, (SZ_4G - SZ_2G)};
 
 static const struct of_device_id sde_smmu_dt_match[] = {
 	{ .compatible = SMMU_SDE_ROT_UNSEC, .data = &sde_rot_unsec},
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 89e83b8..b6f206e 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1632,7 +1632,11 @@
 			&pkt->rg_property_data[1];
 
 		hfi->input_color_primaries = hal->input_color_primaries;
-		hfi->custom_matrix_enabled = hal->custom_matrix_enabled;
+		if (hal->custom_matrix_enabled)
+			/* Bit Mask to enable all custom values */
+			hfi->custom_matrix_enabled = 0x7;
+		else
+			hfi->custom_matrix_enabled = 0x0;
 		memcpy(hfi->csc_matrix, hal->csc_matrix,
 				sizeof(hfi->csc_matrix));
 		memcpy(hfi->csc_bias, hal->csc_bias, sizeof(hfi->csc_bias));
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 52b9b32..03dfde6 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -348,11 +348,10 @@
 		info->response_type = HAL_RESPONSE_UNUSED;
 		break;
 	default:
-		/* All other errors are not expected and treated as sys error */
 		dprintk(VIDC_ERR,
-			"%s: data1 %#x, data2 %#x, treat as sys error\n",
-			__func__, pkt->event_data1, pkt->event_data2);
-		info->response_type = HAL_SYS_ERROR;
+			"%s: session %x data1 %#x, data2 %#x\n", __func__,
+			pkt->session_id, pkt->event_data1, pkt->event_data2);
+		info->response_type = HAL_SESSION_ERROR;
 		break;
 	}
 
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 286a67e..9238176 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -808,7 +808,7 @@
 
 int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
 {
-	int rc = 0;
+	int rc = 0, temp;
 	struct hal_nal_stream_format_supported stream_format;
 	struct hal_enable_picture enable_picture;
 	struct hal_enable hal_property;
@@ -1033,6 +1033,31 @@
 					rc);
 			break;
 		case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY:
+			temp_ctrl = TRY_GET_CTRL(
+				V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT);
+			switch (temp_ctrl->val) {
+			case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_UBWC:
+				temp = V4L2_PIX_FMT_NV12_UBWC;
+				break;
+			case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_TP10_UBWC:
+				temp = V4L2_PIX_FMT_NV12_TP10_UBWC;
+				break;
+			case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_NONE:
+			default:
+				dprintk(VIDC_DBG,
+					"set default dpb color format as NV12_UBWC\n");
+				temp = V4L2_PIX_FMT_NV12_UBWC;
+				break;
+			}
+			rc = msm_comm_set_color_format(inst,
+				HAL_BUFFER_OUTPUT, temp);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"%s Failed setting output color format: %#x\n",
+					__func__, rc);
+				break;
+			}
+
 			multi_stream.buffer_type = HAL_BUFFER_OUTPUT2;
 			multi_stream.enable = true;
 			pdata = &multi_stream;
@@ -1258,6 +1283,14 @@
 				}
 				rc = msm_vidc_update_host_buff_counts(inst);
 				inst->clk_data.dpb_fourcc = fourcc;
+				control.id =
+				V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT;
+				control.value = ext_control[i].value;
+				rc = msm_comm_s_ctrl(inst, &control);
+				if (rc)
+					dprintk(VIDC_ERR,
+						"%s: set control dpb color format %d failed\n",
+						__func__, control.value);
 				break;
 			default:
 				dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 85b4724..dd62fb7 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -675,7 +675,7 @@
 		.name = "Extradata Type",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
-		.maximum = V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO,
+		.maximum = V4L2_MPEG_VIDC_EXTRADATA_ROI_QP,
 		.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
 		.menu_skip_mask = ~(
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
@@ -695,8 +695,7 @@
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_LTR) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI) |
 			(1 << V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS)|
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO)
+			(1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP)
 			),
 		.qmenu = mpeg_video_vidc_extradata,
 	},
@@ -2759,6 +2758,19 @@
 			inst->bufq[fmt->type].plane_sizes[i] =
 				f->fmt.pix_mp.plane_fmt[i].sizeimage;
 		}
+		/*
+		 * Input extradata buffer size may change upon updating
+		 * CAPTURE plane buffer size.
+		 */
+
+		extra_idx = EXTRADATA_IDX(inst->bufq[OUTPUT_PORT].num_planes);
+		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+			buff_req_buffer = get_buff_req_buffer(inst,
+					HAL_BUFFER_EXTRADATA_INPUT);
+			inst->bufq[OUTPUT_PORT].plane_sizes[extra_idx] =
+				buff_req_buffer ?
+				buff_req_buffer->buffer_size : 0;
+		}
 	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		struct hal_frame_size frame_sz;
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 907e01f..dabe667 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -605,8 +605,10 @@
 	mutex_lock(&q->lock);
 	rc = vb2_streamon(&q->vb2_bufq, i);
 	mutex_unlock(&q->lock);
-	if (rc)
+	if (rc) {
 		dprintk(VIDC_ERR, "streamon failed on port: %d\n", i);
+		msm_comm_kill_session(inst);
+	}
 	return rc;
 }
 EXPORT_SYMBOL(msm_vidc_streamon);
@@ -1011,10 +1013,9 @@
 	}
 
 fail_start:
-	if (rc) {
-		dprintk(VIDC_ERR, "%s: kill session %pK\n", __func__, inst);
-		msm_comm_kill_session(inst);
-	}
+	if (rc)
+		dprintk(VIDC_ERR, "%s: inst %pK session %x failed to start\n",
+			__func__, inst, hash32_ptr(inst->session));
 	return rc;
 }
 
@@ -1781,12 +1782,6 @@
 		dprintk(VIDC_ERR,
 			"Failed to release mark_data buffers\n");
 
-	/*
-	 * At this point all buffes should be with driver
-	 * irrespective of scenario
-	 */
-	msm_comm_validate_output_buffers(inst);
-
 	msm_comm_release_eos_buffers(inst);
 
 	if (msm_comm_release_output_buffers(inst, true))
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index e258f1f..32b548a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -307,7 +307,7 @@
 			q = &inst->bufq[CAPTURE_PORT].vb2_bufq;
 			for (i = 0; i < q->num_buffers; i++) {
 				vb = q->bufs[i];
-				if (vb->state != VB2_BUF_STATE_ACTIVE &&
+				if (vb && vb->state != VB2_BUF_STATE_ACTIVE &&
 						vb->planes[0].bytesused)
 					fw_out_qsize++;
 			}
@@ -315,7 +315,7 @@
 			q = &inst->bufq[OUTPUT_PORT].vb2_bufq;
 			for (i = 0; i < q->num_buffers; i++) {
 				vb = q->bufs[i];
-				if (vb->state != VB2_BUF_STATE_ACTIVE)
+				if (vb && vb->state != VB2_BUF_STATE_ACTIVE)
 					fw_out_qsize++;
 			}
 		}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index a9dbd34..9dce3f9 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -2108,7 +2108,8 @@
 	}
 
 	hdev = inst->core->device;
-	dprintk(VIDC_WARN, "Session error received for session %pK\n", inst);
+	dprintk(VIDC_ERR, "Session error received for inst %pK session %x\n",
+		inst, hash32_ptr(inst->session));
 
 	if (response->status == VIDC_ERR_MAX_CLIENTS) {
 		dprintk(VIDC_WARN, "Too many clients, rejecting %pK", inst);
@@ -2131,6 +2132,8 @@
 		event = V4L2_EVENT_MSM_VIDC_SYS_ERROR;
 	}
 
+	/* change state before sending error to client */
+	change_inst_state(inst, MSM_VIDC_CORE_INVALID);
 	msm_vidc_queue_v4l2_event(inst, event);
 	put_inst(inst);
 }
@@ -2202,6 +2205,8 @@
 		if (!core->trigger_ssr)
 			msm_comm_print_inst_info(inst);
 	}
+	/* handle the hw error before core released to get full debug info */
+	msm_vidc_handle_hw_error(core);
 	dprintk(VIDC_DBG, "Calling core_release\n");
 	rc = call_hfi_op(hdev, core_release, hdev->hfi_device_data);
 	if (rc) {
@@ -2212,10 +2217,7 @@
 	core->state = VIDC_CORE_UNINIT;
 	mutex_unlock(&core->lock);
 
-	dprintk(VIDC_ERR,
-		"SYS_ERROR can potentially crash the system\n");
-
-	msm_vidc_handle_hw_error(core);
+	dprintk(VIDC_WARN, "SYS_ERROR handled.\n");
 }
 
 void msm_comm_session_clean(struct msm_vidc_inst *inst)
@@ -2382,8 +2384,8 @@
 	empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
 	/* If this is internal EOS buffer, handle it in driver */
 	if (is_eos_buffer(inst, empty_buf_done->packet_buffer)) {
-		dprintk(VIDC_DBG, "Received EOS buffer %pK\n",
-			(void *)(u64)empty_buf_done->packet_buffer);
+		dprintk(VIDC_DBG, "Received EOS buffer 0x%x\n",
+			empty_buf_done->packet_buffer);
 		goto exit;
 	}
 
@@ -2795,7 +2797,8 @@
 	hdev = inst->core->device;
 	abort_completion = SESSION_MSG_INDEX(HAL_SESSION_ABORT_DONE);
 
-	dprintk(VIDC_WARN, "%s: inst %pK\n", __func__, inst);
+	dprintk(VIDC_WARN, "%s: inst %pK session %x\n", __func__,
+		inst, hash32_ptr(inst->session));
 	rc = call_hfi_op(hdev, session_abort, (void *)inst->session);
 	if (rc) {
 		dprintk(VIDC_ERR,
@@ -2807,8 +2810,8 @@
 			msecs_to_jiffies(
 				inst->core->resources.msm_vidc_hw_rsp_timeout));
 	if (!rc) {
-		dprintk(VIDC_ERR, "%s: inst %pK abort timed out\n",
-				__func__, inst);
+		dprintk(VIDC_ERR, "%s: inst %pK session %x abort timed out\n",
+				__func__, inst, hash32_ptr(inst->session));
 		msm_comm_generate_sys_error(inst);
 		rc = -EBUSY;
 	} else {
@@ -3700,8 +3703,8 @@
 	if (inst->state == MSM_VIDC_CORE_INVALID) {
 		dprintk(VIDC_ERR, "%s: inst %pK is in invalid\n",
 			__func__, inst);
-		mutex_unlock(&inst->sync_lock);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto exit;
 	}
 
 	flipped_state = get_flipped_state(inst->state, state);
@@ -3782,6 +3785,8 @@
 		rc = -EINVAL;
 		break;
 	}
+
+exit:
 	mutex_unlock(&inst->sync_lock);
 
 	if (rc) {
@@ -3820,8 +3825,8 @@
 		data.timestamp = LLONG_MAX;
 		data.extradata_addr = data.device_addr;
 		data.extradata_size = 0;
-		dprintk(VIDC_DBG, "Queueing EOS buffer %pK\n",
-				(void *)(u64)data.device_addr);
+		dprintk(VIDC_DBG, "Queueing EOS buffer 0x%x\n",
+				data.device_addr);
 		hdev = inst->core->device;
 
 		rc = call_hfi_op(hdev, session_etb, inst->session,
@@ -4175,6 +4180,19 @@
 	output_count = (batch_mode ? &count_single_batch : &count_buffers)
 		(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
 
+	if (!batch_mode && mbuf) {
+		/*
+		 * don't queue output_mplane buffers if buffer queued
+		 * by client is capture_mplane type and vice versa.
+		 */
+		if (mbuf->vvb.vb2_buf.type ==
+				V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+			output_count = 0;
+		else if (mbuf->vvb.vb2_buf.type ==
+				V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+			capture_count = 0;
+	}
+
 	/*
 	 * Somewhat complicated logic to prevent queuing the buffer to hardware.
 	 * Don't queue if:
@@ -5027,6 +5045,9 @@
 	enum vidc_ports ports[] = {OUTPUT_PORT, CAPTURE_PORT};
 	int c = 0;
 
+	/* before flush ensure venus released all buffers */
+	msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+
 	for (c = 0; c < ARRAY_SIZE(ports); ++c) {
 		enum vidc_ports port = ports[c];
 
@@ -5089,6 +5110,9 @@
 		return 0;
 	}
 
+	/* enable in flush */
+	inst->in_flush = true;
+
 	mutex_lock(&inst->registeredbufs.lock);
 	list_for_each_entry_safe(mbuf, next, &inst->registeredbufs.list, list) {
 		/* don't flush input buffers if input flush is not requested */
@@ -5129,9 +5153,6 @@
 	}
 	mutex_unlock(&inst->registeredbufs.lock);
 
-	/* enable in flush */
-	inst->in_flush = true;
-
 	hdev = inst->core->device;
 	if (ip_flush) {
 		dprintk(VIDC_DBG, "Send flush on all ports to firmware\n");
@@ -5439,7 +5460,7 @@
 	int rc = 0;
 	struct hfi_device *hdev;
 	struct msm_vidc_core *core;
-	u32 output_height, output_width;
+	u32 output_height, output_width, input_height, input_width;
 	u32 rotation;
 
 	if (!inst || !inst->core || !inst->core->device) {
@@ -5462,6 +5483,22 @@
 		return -ENOTSUPP;
 	}
 
+	output_height = inst->prop.height[CAPTURE_PORT];
+	output_width = inst->prop.width[CAPTURE_PORT];
+	input_height = inst->prop.height[OUTPUT_PORT];
+	input_width = inst->prop.width[OUTPUT_PORT];
+
+	if (input_width % 2 != 0 || input_height % 2 != 0 ||
+			output_width % 2 != 0 || output_height % 2 != 0) {
+		dprintk(VIDC_ERR,
+			"Height and Width should be even numbers for NV12\n");
+		dprintk(VIDC_ERR,
+			"Input WxH = (%u)x(%u), Output WxH = (%u)x(%u)\n",
+			input_width, input_height,
+			output_width, output_height);
+		rc = -ENOTSUPP;
+	}
+
 	rotation =  msm_comm_g_ctrl_for_id(inst,
 					V4L2_CID_MPEG_VIDC_VIDEO_ROTATION);
 
@@ -5559,8 +5596,8 @@
 		return 0;
 	}
 
-	dprintk(VIDC_WARN, "%s: inst %pK, state %d\n", __func__,
-			inst, inst->state);
+	dprintk(VIDC_WARN, "%s: inst %pK, session %x state %d\n", __func__,
+			inst, hash32_ptr(inst->session), inst->state);
 	/*
 	 * We're internally forcibly killing the session, if fw is aware of
 	 * the session send session_abort to firmware to clean up and release
@@ -5571,8 +5608,9 @@
 			inst->state == MSM_VIDC_CORE_INVALID) {
 		rc = msm_comm_session_abort(inst);
 		if (rc) {
-			dprintk(VIDC_WARN, "%s: inst %pK abort failed\n",
-				__func__, inst);
+			dprintk(VIDC_ERR,
+				"%s: inst %pK session %x abort failed\n",
+				__func__, inst, hash32_ptr(inst->session));
 			change_inst_state(inst, MSM_VIDC_CORE_INVALID);
 		}
 	}
@@ -5580,7 +5618,8 @@
 	change_inst_state(inst, MSM_VIDC_CLOSE_DONE);
 	msm_comm_session_clean(inst);
 
-	dprintk(VIDC_WARN, "%s: inst %pK handled\n", __func__, inst);
+	dprintk(VIDC_WARN, "%s: inst %pK session %x handled\n", __func__,
+		inst, hash32_ptr(inst->session));
 	return rc;
 }
 
@@ -5868,8 +5907,8 @@
 		goto exit;
 	}
 
-	fps = USEC_PER_SEC;
-	do_div(fps, us_per_frame);
+	fps = us_per_frame > USEC_PER_SEC ?
+		0 : USEC_PER_SEC / (u32)us_per_frame;
 
 	if (fps % 15 == 14 || fps % 24 == 23)
 		fps = fps + 1;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index 1818788..5e5d030 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -115,6 +115,10 @@
 		.value = 1,
 	},
 	{
+		.key = "qcom,domain-attr-cache-pagetables",
+		.value = 1,
+	},
+	{
 		.key = "qcom,max-secure-instances",
 		.value = 5,
 	},
@@ -140,11 +144,11 @@
 	},
 	{
 		.key = "qcom,power-collapse-delay",
-		.value = 500,
+		.value = 1500,
 	},
 	{
 		.key = "qcom,hw-resp-timeout",
-		.value = 250,
+		.value = 1000,
 	},
 	{
 		.key = "qcom,debug-timeout",
@@ -162,6 +166,10 @@
 		.value = 1,
 	},
 	{
+		.key = "qcom,domain-attr-non-fatal-faults",
+		.value = 1,
+	},
+	{
 		.key = "qcom,max-secure-instances",
 		.value = 5,
 	},
@@ -205,6 +213,10 @@
 		.value = 1,
 	},
 	{
+		.key = "qcom,domain-attr-non-fatal-faults",
+		.value = 1,
+	},
+	{
 		.key = "qcom,max-secure-instances",
 		.value = 5,
 	},
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index a0214a2..b1a240d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -776,6 +776,8 @@
 			"qcom,hw-resp-timeout");
 	res->non_fatal_pagefaults = find_key_value(platform_data,
 			"qcom,domain-attr-non-fatal-faults");
+	res->cache_pagetables = find_key_value(platform_data,
+			"qcom,domain-attr-cache-pagetables");
 
 	res->csc_coeff_data = &platform_data->csc_data;
 
@@ -901,14 +903,14 @@
 	return VMID_INVAL;
 }
 
-static int msm_vidc_setup_context_bank(struct context_bank_info *cb,
-		struct device *dev)
+static int msm_vidc_setup_context_bank(struct msm_vidc_platform_resources *res,
+		struct context_bank_info *cb, struct device *dev)
 {
 	int rc = 0;
 	int secure_vmid = VMID_INVAL;
 	struct bus_type *bus;
 
-	if (!dev || !cb) {
+	if (!dev || !cb || !res) {
 		dprintk(VIDC_ERR,
 			"%s: Invalid Input params\n", __func__);
 		return -EINVAL;
@@ -942,6 +944,19 @@
 		}
 	}
 
+	if (res->cache_pagetables) {
+		int cache_pagetables = 1;
+
+		rc = iommu_domain_set_attr(cb->mapping->domain,
+			DOMAIN_ATTR_USE_UPSTREAM_HINT, &cache_pagetables);
+		if (rc) {
+			WARN_ONCE(rc,
+				"%s: failed to set cache pagetables attribute, %d\n",
+				__func__, rc);
+			rc = 0;
+		}
+	}
+
 	rc = arm_iommu_attach_device(cb->dev, cb->mapping);
 	if (rc) {
 		dprintk(VIDC_ERR, "%s - Couldn't arm_iommu_attach_device\n",
@@ -1055,7 +1070,7 @@
 		cb->name, cb->addr_range.start,
 		cb->addr_range.size, cb->buffer_type);
 
-	rc = msm_vidc_setup_context_bank(cb, dev);
+	rc = msm_vidc_setup_context_bank(&core->resources, cb, dev);
 	if (rc) {
 		dprintk(VIDC_ERR, "Cannot setup context bank %d\n", rc);
 		goto err_setup_cb;
@@ -1167,7 +1182,7 @@
 			goto err_setup_cb;
 		}
 
-		rc = msm_vidc_setup_context_bank(cb, cb->dev);
+		rc = msm_vidc_setup_context_bank(res, cb, cb->dev);
 		if (rc) {
 			dprintk(VIDC_ERR, "Cannot setup context bank %d\n", rc);
 			goto err_setup_cb;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 99b4e30..23e33fe 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -185,6 +185,7 @@
 	int msm_vidc_firmware_unload_delay;
 	uint32_t msm_vidc_pwr_collapse_delay;
 	bool non_fatal_pagefaults;
+	bool cache_pagetables;
 	struct msm_vidc_codec_data *codec_data;
 	int codec_data_count;
 	struct msm_vidc_csc_coeff *csc_coeff_data;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 60169e9..7e7ed47 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -503,6 +503,11 @@
 
 	if (queue->qhdr_read_idx == queue->qhdr_write_idx) {
 		queue->qhdr_rx_req = receive_request;
+		/*
+		 * mb() to ensure qhdr is updated in main memory
+		 * so that venus reads the updated header values
+		 */
+		mb();
 		*pb_tx_req_is_set = 0;
 		dprintk(VIDC_DBG,
 			"%s queue is empty, rx_req = %u, tx_req = %u, read_idx = %u\n",
@@ -550,6 +555,11 @@
 		queue->qhdr_rx_req = 0;
 	else
 		queue->qhdr_rx_req = receive_request;
+	/*
+	 * mb() to ensure qhdr is updated in main memory
+	 * so that venus reads the updated header values
+	 */
+	mb();
 
 	*pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0;
 
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index c9bf58c..04b8b87 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -23,6 +23,7 @@
 #include <linux/of_reserved_mem.h>
 #include <linux/sched.h>
 #include <linux/sizes.h>
+#include <linux/dma-mapping.h>
 
 #include "mtk_vpu.h"
 
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index edbf682..30180af 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -456,10 +456,11 @@
 }
 EXPORT_SYMBOL(mmc_clk_update_freq);
 
-void mmc_recovery_fallback_lower_speed(struct mmc_host *host)
+int mmc_recovery_fallback_lower_speed(struct mmc_host *host)
 {
+	int err = 0;
 	if (!host->card)
-		return;
+		return -EINVAL;
 
 	if (host->sdr104_wa && mmc_card_sd(host->card) &&
 	    (host->ios.timing == MMC_TIMING_UHS_SDR104) &&
@@ -467,9 +468,14 @@
 		pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
 			mmc_hostname(host), __func__);
 		mmc_host_clear_sdr104(host);
-		mmc_hw_reset(host);
+		err = mmc_hw_reset(host);
 		host->card->sdr104_blocked = true;
 	}
+	if (err)
+		pr_err("%s: %s: Fallback to lower speed mode failed with err=%d\n",
+			mmc_hostname(host), __func__, err);
+
+	return err;
 }
 
 static int mmc_devfreq_set_target(struct device *dev,
@@ -537,7 +543,7 @@
 	if (err && err != -EAGAIN) {
 		pr_err("%s: clock scale to %lu failed with error %d\n",
 			mmc_hostname(host), *freq, err);
-		mmc_recovery_fallback_lower_speed(host);
+		err = mmc_recovery_fallback_lower_speed(host);
 	} else {
 		pr_debug("%s: clock change to %lu finished successfully (%s)\n",
 			mmc_hostname(host), *freq, current->comm);
@@ -1203,6 +1209,46 @@
 	return 0;
 }
 
+static int mmc_cmdq_check_retune(struct mmc_host *host)
+{
+	bool cmdq_mode;
+	int err = 0;
+
+	if (!host->need_retune || host->doing_retune || !host->card ||
+			mmc_card_hs400es(host->card) ||
+			(host->ios.clock <= MMC_HIGH_DDR_MAX_DTR))
+		return 0;
+
+	cmdq_mode = mmc_card_cmdq(host->card);
+	if (cmdq_mode) {
+		err = mmc_cmdq_halt(host, true);
+		if (err) {
+			pr_err("%s: %s: failed halting queue (%d)\n",
+				mmc_hostname(host), __func__, err);
+			host->cmdq_ops->dumpstate(host);
+			goto halt_failed;
+		}
+	}
+
+	mmc_retune_hold(host);
+	err = mmc_retune(host);
+	mmc_retune_release(host);
+
+	if (cmdq_mode) {
+		if (mmc_cmdq_halt(host, false)) {
+			pr_err("%s: %s: cmdq unhalt failed\n",
+			mmc_hostname(host), __func__);
+			host->cmdq_ops->dumpstate(host);
+		}
+	}
+
+halt_failed:
+	pr_debug("%s: %s: Retuning done err: %d\n",
+				mmc_hostname(host), __func__, err);
+
+	return err;
+}
+
 static void mmc_start_cmdq_request(struct mmc_host *host,
 				   struct mmc_request *mrq)
 {
@@ -1227,6 +1273,7 @@
 	}
 
 	mmc_host_clk_hold(host);
+	mmc_cmdq_check_retune(host);
 	if (likely(host->cmdq_ops->request))
 		host->cmdq_ops->request(host, mrq);
 	else
@@ -4306,8 +4353,7 @@
 
 	if (ret) {
 		if (host->ops->get_cd && host->ops->get_cd(host)) {
-			mmc_recovery_fallback_lower_speed(host);
-			ret = 0;
+			ret = mmc_recovery_fallback_lower_speed(host);
 		} else {
 			mmc_card_set_removed(host->card);
 			if (host->card->sdr104_blocked) {
@@ -4360,6 +4406,18 @@
 }
 EXPORT_SYMBOL(mmc_detect_card_removed);
 
+/*
+ * This should be called to make sure that detect work(mmc_rescan)
+ * is completed.Drivers may use this function from async schedule/probe
+ * contexts to make sure that the bootdevice detection is completed on
+ * completion of async_schedule.
+ */
+void mmc_flush_detect_work(struct mmc_host *host)
+{
+	flush_delayed_work(&host->detect);
+}
+EXPORT_SYMBOL(mmc_flush_detect_work);
+
 void mmc_rescan(struct work_struct *work)
 {
 	unsigned long flags;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 64c8743..8a503b2 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -434,7 +434,8 @@
 	else
 		return 0;
 
-	if (!host->need_retune || host->doing_retune || !host->card)
+	if (!host->need_retune || host->doing_retune || !host->card ||
+			mmc_card_hs400es(host->card))
 		return 0;
 
 	host->need_retune = 0;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 32da1fd..e817a02 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1615,23 +1615,31 @@
 
 	pctrl_data->pins_drv_type_400KHz = pinctrl_lookup_state(
 			pctrl_data->pctrl, "ds_400KHz");
-	if (IS_ERR(pctrl_data->pins_drv_type_400KHz))
+	if (IS_ERR(pctrl_data->pins_drv_type_400KHz)) {
 		dev_dbg(dev, "Could not get 400K pinstates, err:%d\n", ret);
+		pctrl_data->pins_drv_type_400KHz = NULL;
+	}
 
 	pctrl_data->pins_drv_type_50MHz = pinctrl_lookup_state(
 			pctrl_data->pctrl, "ds_50MHz");
-	if (IS_ERR(pctrl_data->pins_drv_type_50MHz))
+	if (IS_ERR(pctrl_data->pins_drv_type_50MHz)) {
 		dev_dbg(dev, "Could not get 50M pinstates, err:%d\n", ret);
+		pctrl_data->pins_drv_type_50MHz = NULL;
+	}
 
 	pctrl_data->pins_drv_type_100MHz = pinctrl_lookup_state(
 			pctrl_data->pctrl, "ds_100MHz");
-	if (IS_ERR(pctrl_data->pins_drv_type_100MHz))
+	if (IS_ERR(pctrl_data->pins_drv_type_100MHz)) {
 		dev_dbg(dev, "Could not get 100M pinstates, err:%d\n", ret);
+		pctrl_data->pins_drv_type_100MHz = NULL;
+	}
 
 	pctrl_data->pins_drv_type_200MHz = pinctrl_lookup_state(
 			pctrl_data->pctrl, "ds_200MHz");
-	if (IS_ERR(pctrl_data->pins_drv_type_200MHz))
+	if (IS_ERR(pctrl_data->pins_drv_type_200MHz)) {
 		dev_dbg(dev, "Could not get 200M pinstates, err:%d\n", ret);
+		pctrl_data->pins_drv_type_200MHz = NULL;
+	}
 
 	pdata->pctrl_data = pctrl_data;
 out:
@@ -4940,6 +4948,9 @@
 		       mmc_hostname(host->mmc), __func__, ret);
 		device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
 	}
+	if (sdhci_msm_is_bootdevice(&pdev->dev))
+		mmc_flush_detect_work(host->mmc);
+
 	/* Successful initialization */
 	goto out;
 
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 68ef0a4..b0c8085 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -342,7 +342,7 @@
 
 	/* enter the selected mode */
 	mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
-	if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
+	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
 		mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
 	else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
 		mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
@@ -811,7 +811,6 @@
 	priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
 				       CAN_CTRLMODE_LISTENONLY |
 				       CAN_CTRLMODE_LOOPBACK |
-				       CAN_CTRLMODE_PRESUME_ACK |
 				       CAN_CTRLMODE_3_SAMPLES;
 	priv->base = addr;
 	priv->clk = clk;
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index be928ce..9fdb0f0 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -333,7 +333,7 @@
 		}
 
 		cf->can_id = id & ESD_IDMASK;
-		cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
+		cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
 
 		if (id & ESD_EXTID)
 			cf->can_id |= CAN_EFF_FLAG;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 05369dc..eea9aea 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -375,6 +375,8 @@
 
 		gs_free_tx_context(txc);
 
+		atomic_dec(&dev->active_tx_urbs);
+
 		netif_wake_queue(netdev);
 	}
 
@@ -463,14 +465,6 @@
 			  urb->transfer_buffer_length,
 			  urb->transfer_buffer,
 			  urb->transfer_dma);
-
-	atomic_dec(&dev->active_tx_urbs);
-
-	if (!netif_device_present(netdev))
-		return;
-
-	if (netif_queue_stopped(netdev))
-		netif_wake_queue(netdev);
 }
 
 static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index d51e0c4..4224e06 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -137,6 +137,7 @@
 #define CMD_RESET_ERROR_COUNTER		49
 #define CMD_TX_ACKNOWLEDGE		50
 #define CMD_CAN_ERROR_EVENT		51
+#define CMD_FLUSH_QUEUE_REPLY		68
 
 #define CMD_LEAF_USB_THROTTLE		77
 #define CMD_LEAF_LOG_MESSAGE		106
@@ -1301,6 +1302,11 @@
 			goto warn;
 		break;
 
+	case CMD_FLUSH_QUEUE_REPLY:
+		if (dev->family != KVASER_LEAF)
+			goto warn;
+		break;
+
 	default:
 warn:		dev_warn(dev->udev->dev.parent,
 			 "Unhandled message (%d)\n", msg->id);
@@ -1609,7 +1615,8 @@
 	if (err)
 		netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
 
-	if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
+	err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
+	if (err)
 		netdev_warn(netdev, "Cannot reset card, error %d\n", err);
 
 	err = kvaser_usb_stop_chip(priv);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index e2a459e..51030c3 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -976,7 +976,7 @@
 			 u64 *cookie)
 {
 	const u8 *buf = params->buf;
-	size_t len = params->len;
+	size_t len = params->len, total;
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 	int rc;
 	bool tx_status = false;
@@ -1001,7 +1001,11 @@
 	if (len < sizeof(struct ieee80211_hdr_3addr))
 		return -EINVAL;
 
-	cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
+	total = sizeof(*cmd) + len;
+	if (total < len)
+		return -EINVAL;
+
+	cmd = kmalloc(total, GFP_KERNEL);
 	if (!cmd) {
 		rc = -ENOMEM;
 		goto out;
@@ -1011,7 +1015,7 @@
 	cmd->len = cpu_to_le16(len);
 	memcpy(cmd->payload, buf, len);
 
-	rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
+	rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, total,
 		      WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
 	if (rc == 0)
 		tx_status = !evt.evt.status;
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 7a33792..77d1902 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -26,14 +26,17 @@
 					     prefix_type, rowsize,	\
 					     groupsize, buf, len, ascii)
 
-#define FW_ADDR_CHECK(ioaddr, val, msg) do { \
-		ioaddr = wmi_buffer(wil, val); \
-		if (!ioaddr) { \
-			wil_err_fw(wil, "bad " msg ": 0x%08x\n", \
-				   le32_to_cpu(val)); \
-			return -EINVAL; \
-		} \
-	} while (0)
+static bool wil_fw_addr_check(struct wil6210_priv *wil,
+			      void __iomem **ioaddr, __le32 val,
+			      u32 size, const char *msg)
+{
+	*ioaddr = wmi_buffer_block(wil, val, size);
+	if (!(*ioaddr)) {
+		wil_err_fw(wil, "bad %s: 0x%08x\n", msg, le32_to_cpu(val));
+		return false;
+	}
+	return true;
+}
 
 /**
  * wil_fw_verify - verify firmware file validity
@@ -160,7 +163,8 @@
 		return -EINVAL;
 	}
 
-	FW_ADDR_CHECK(dst, d->addr, "address");
+	if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+		return -EINVAL;
 	wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr),
 		   s);
 	wil_memcpy_toio_32(dst, d->data, s);
@@ -192,7 +196,8 @@
 		return -EINVAL;
 	}
 
-	FW_ADDR_CHECK(dst, d->addr, "address");
+	if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+		return -EINVAL;
 
 	v = le32_to_cpu(d->value);
 	wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n",
@@ -248,7 +253,8 @@
 		u32 v = le32_to_cpu(block[i].value);
 		u32 x, y;
 
-		FW_ADDR_CHECK(dst, block[i].addr, "address");
+		if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address"))
+			return -EINVAL;
 
 		x = readl(dst);
 		y = (x & m) | (v & ~m);
@@ -314,10 +320,15 @@
 	wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n",
 		   n, gw_cmd);
 
-	FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
-	FW_ADDR_CHECK(gwa_val, d->gateway_value_addr, "gateway_value_addr");
-	FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
-	FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+	if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+			       "gateway_addr_addr") ||
+	    !wil_fw_addr_check(wil, &gwa_val, d->gateway_value_addr, 0,
+			       "gateway_value_addr") ||
+	    !wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+			       "gateway_cmd_addr") ||
+	    !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+			       "gateway_ctrl_address"))
+		return -EINVAL;
 
 	wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x"
 		   " cmd 0x%08x ctl 0x%08x\n",
@@ -373,12 +384,19 @@
 	wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n",
 		   n, gw_cmd);
 
-	FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
+	if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+			       "gateway_addr_addr"))
+		return -EINVAL;
 	for (k = 0; k < ARRAY_SIZE(block->value); k++)
-		FW_ADDR_CHECK(gwa_val[k], d->gateway_value_addr[k],
-			      "gateway_value_addr");
-	FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
-	FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+		if (!wil_fw_addr_check(wil, &gwa_val[k],
+				       d->gateway_value_addr[k],
+				       0, "gateway_value_addr"))
+			return -EINVAL;
+	if (!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+			       "gateway_cmd_addr") ||
+	    !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+			       "gateway_ctrl_address"))
+		return -EINVAL;
 
 	wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n",
 		   le32_to_cpu(d->gateway_addr_addr),
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 59def4f..5cf3417 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -358,6 +358,25 @@
 	wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
 }
 
+static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
+{
+	size_t min_size = sizeof(struct wil6210_mbox_hdr) +
+		sizeof(struct wmi_cmd_hdr);
+
+	if (wil->mbox_ctl.rx.entry_size < min_size) {
+		wil_err(wil, "rx mbox entry too small (%d)\n",
+			wil->mbox_ctl.rx.entry_size);
+		return false;
+	}
+	if (wil->mbox_ctl.tx.entry_size < min_size) {
+		wil_err(wil, "tx mbox entry too small (%d)\n",
+			wil->mbox_ctl.tx.entry_size);
+		return false;
+	}
+
+	return true;
+}
+
 static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
 {
 	struct wil6210_priv *wil = cookie;
@@ -393,7 +412,8 @@
 	if (isr & ISR_MISC_FW_READY) {
 		wil_dbg_irq(wil, "IRQ: FW ready\n");
 		wil_cache_mbox_regs(wil);
-		set_bit(wil_status_mbox_ready, wil->status);
+		if (wil_validate_mbox_regs(wil))
+			set_bit(wil_status_mbox_ready, wil->status);
 		/**
 		 * Actual FW ready indicated by the
 		 * WMI_FW_READY_EVENTID
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index c4faa2c..cadb36a 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -777,11 +777,11 @@
 void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
 {
 	struct wiphy *wiphy = wil_to_wiphy(wil);
+	int features;
 
 	wil->keep_radio_on_during_sleep =
-		wil->platform_ops.keep_radio_on_during_sleep &&
-		wil->platform_ops.keep_radio_on_during_sleep(
-			wil->platform_handle) &&
+		test_bit(WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND,
+			 wil->platform_capa) &&
 		test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
 
 	wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
@@ -791,6 +791,16 @@
 		wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 	else
 		wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+
+	if (wil->platform_ops.set_features) {
+		features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
+				     wil->fw_capabilities) &&
+			    test_bit(WIL_PLATFORM_CAPA_EXT_CLK,
+				     wil->platform_capa)) ?
+			BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0;
+
+		wil->platform_ops.set_features(wil->platform_handle, features);
+	}
 }
 
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -986,6 +996,7 @@
 int wil_reset(struct wil6210_priv *wil, bool load_fw)
 {
 	int rc;
+	unsigned long status_flags = BIT(wil_status_resetting);
 
 	wil_dbg_misc(wil, "reset\n");
 
@@ -1006,9 +1017,18 @@
 	if (wil->hw_version == HW_VER_UNKNOWN)
 		return -ENODEV;
 
-	wil_dbg_misc(wil, "Prevent DS in BL & mark FW to set T_POWER_ON=0\n");
-	wil_s(wil, RGF_USER_USAGE_8, BIT_USER_PREVENT_DEEP_SLEEP |
-	      BIT_USER_SUPPORT_T_POWER_ON_0);
+	wil_dbg_misc(wil, "Prevent DS in BL\n");
+	wil_s(wil, RGF_USER_USAGE_8, BIT_USER_PREVENT_DEEP_SLEEP);
+
+	if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa)) {
+		wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n");
+		wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0);
+	}
+
+	if (test_bit(WIL_PLATFORM_CAPA_EXT_CLK, wil->platform_capa)) {
+		wil_dbg_misc(wil, "Notify FW on ext clock configuration\n");
+		wil_s(wil, RGF_USER_USAGE_8, BIT_USER_EXT_CLK);
+	}
 
 	if (wil->platform_ops.notify) {
 		rc = wil->platform_ops.notify(wil->platform_handle,
@@ -1019,6 +1039,14 @@
 	}
 
 	set_bit(wil_status_resetting, wil->status);
+	if (test_bit(wil_status_collecting_dumps, wil->status)) {
+		/* Device collects crash dump, cancel the reset.
+		 * following crash dump collection, reset would take place.
+		 */
+		wil_dbg_misc(wil, "reject reset while collecting crash dump\n");
+		rc = -EBUSY;
+		goto out;
+	}
 
 	cancel_work_sync(&wil->disconnect_worker);
 	wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
@@ -1033,7 +1061,11 @@
 
 	/* prevent NAPI from being scheduled and prevent wmi commands */
 	mutex_lock(&wil->wmi_mutex);
-	bitmap_zero(wil->status, wil_status_last);
+	if (test_bit(wil_status_suspending, wil->status))
+		status_flags |= BIT(wil_status_suspending);
+	bitmap_and(wil->status, wil->status, &status_flags,
+		   wil_status_last);
+	wil_dbg_misc(wil, "wil->status (0x%lx)\n", *wil->status);
 	mutex_unlock(&wil->wmi_mutex);
 
 	wil_mask_irq(wil);
@@ -1051,14 +1083,14 @@
 	wil_rx_fini(wil);
 	if (rc) {
 		wil_bl_crash_info(wil, true);
-		return rc;
+		goto out;
 	}
 
 	rc = wil_get_bl_info(wil);
 	if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
 		rc = 0;
 	if (rc)
-		return rc;
+		goto out;
 
 	wil_set_oob_mode(wil, oob_mode);
 	if (load_fw) {
@@ -1070,10 +1102,10 @@
 		/* Loading f/w from the file */
 		rc = wil_request_firmware(wil, wil->wil_fw_name, true);
 		if (rc)
-			return rc;
+			goto out;
 		rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true);
 		if (rc)
-			return rc;
+			goto out;
 
 		wil_pre_fw_config(wil);
 		wil_release_cpu(wil);
@@ -1085,6 +1117,8 @@
 	reinit_completion(&wil->wmi_call);
 	reinit_completion(&wil->halp.comp);
 
+	clear_bit(wil_status_resetting, wil->status);
+
 	if (load_fw) {
 		wil_configure_interrupt_moderation(wil);
 		wil_unmask_irq(wil);
@@ -1121,6 +1155,10 @@
 	}
 
 	return rc;
+
+out:
+	clear_bit(wil_status_resetting, wil->status);
+	return rc;
 }
 
 void wil_fw_error_recovery(struct wil6210_priv *wil)
@@ -1228,9 +1266,7 @@
 	wil_abort_scan(wil, false);
 	mutex_unlock(&wil->p2p_wdev_mutex);
 
-	wil_reset(wil, false);
-
-	return 0;
+	return wil_reset(wil, false);
 }
 
 int wil_down(struct wil6210_priv *wil)
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 89e3fbf..370068a 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -23,9 +23,9 @@
 #include <linux/rtnetlink.h>
 #include <linux/pm_runtime.h>
 
-static bool use_msi;
+static bool use_msi = true;
 module_param(use_msi, bool, 0444);
-MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - false");
+MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
 
 static bool ftm_mode;
 module_param(ftm_mode, bool, 0444);
@@ -45,9 +45,11 @@
 	u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
 	u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
 			    RGF_USER_REVISION_ID_MASK);
+	int platform_capa;
 
 	bitmap_zero(wil->hw_capabilities, hw_capability_last);
 	bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
+	bitmap_zero(wil->platform_capa, WIL_PLATFORM_CAPA_MAX);
 	wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
 			   WIL_FW_NAME_DEFAULT;
 	wil->chip_revision = chip_revision;
@@ -83,6 +85,14 @@
 
 	wil_info(wil, "Board hardware is %s\n", wil->hw_name);
 
+	/* Get platform capabilities */
+	if (wil->platform_ops.get_capa) {
+		platform_capa =
+			wil->platform_ops.get_capa(wil->platform_handle);
+		memcpy(wil->platform_capa, &platform_capa,
+		       min(sizeof(wil->platform_capa), sizeof(platform_capa)));
+	}
+
 	/* extract FW capabilities from file without loading the FW */
 	wil_request_firmware(wil, wil->wil_fw_name, false);
 	wil_refresh_fw_capabilities(wil);
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 2ef2f34..153c1cf 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -141,6 +141,13 @@
 
 	/* Prevent handling of new tx and wmi commands */
 	set_bit(wil_status_suspending, wil->status);
+	if (test_bit(wil_status_collecting_dumps, wil->status)) {
+		/* Device collects crash dump, cancel the suspend */
+		wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+		clear_bit(wil_status_suspending, wil->status);
+		wil->suspend_stats.rejected_by_host++;
+		return -EBUSY;
+	}
 	wil_update_net_queues_bh(wil, NULL, true);
 
 	if (!wil_is_tx_idle(wil)) {
@@ -251,6 +258,15 @@
 
 	wil_dbg_pm(wil, "suspend radio off\n");
 
+	set_bit(wil_status_suspending, wil->status);
+	if (test_bit(wil_status_collecting_dumps, wil->status)) {
+		/* Device collects crash dump, cancel the suspend */
+		wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+		clear_bit(wil_status_suspending, wil->status);
+		wil->suspend_stats.rejected_by_host++;
+		return -EBUSY;
+	}
+
 	/* if netif up, hardware is alive, shut it down */
 	if (ndev->flags & IFF_UP) {
 		rc = wil_down(wil);
@@ -275,6 +291,7 @@
 	set_bit(wil_status_suspended, wil->status);
 
 out:
+	clear_bit(wil_status_suspending, wil->status);
 	wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
 
 	return rc;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 8616f86..52321f4 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -165,6 +165,7 @@
 #define RGF_USER_USAGE_8		(0x880020)
 	#define BIT_USER_PREVENT_DEEP_SLEEP	BIT(0)
 	#define BIT_USER_SUPPORT_T_POWER_ON_0	BIT(1)
+	#define BIT_USER_EXT_CLK		BIT(2)
 #define RGF_USER_HW_MACHINE_STATE	(0x8801dc)
 	#define HW_MACHINE_BOOT_DONE	(0x3fffffd)
 #define RGF_USER_USER_CPU_0		(0x8801e0)
@@ -445,6 +446,7 @@
 	wil_status_suspending, /* suspend in progress */
 	wil_status_suspended, /* suspend completed, device is suspended */
 	wil_status_resuming, /* resume in progress */
+	wil_status_collecting_dumps, /* crashdump collection in progress */
 	wil_status_last /* keep last */
 };
 
@@ -648,6 +650,7 @@
 	const char *wil_fw_name;
 	DECLARE_BITMAP(hw_capabilities, hw_capability_last);
 	DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
+	DECLARE_BITMAP(platform_capa, WIL_PLATFORM_CAPA_MAX);
 	u8 n_mids; /* number of additional MIDs as reported by FW */
 	u32 recovery_count; /* num of FW recovery attempts in a short time */
 	u32 recovery_state; /* FW recovery state machine */
@@ -886,6 +889,7 @@
 int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
 void wil_set_ethtoolops(struct net_device *ndev);
 
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr, u32 size);
 void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
 void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
 int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index e53cf0c..1ed3306 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -72,6 +72,15 @@
 		return -EINVAL;
 	}
 
+	set_bit(wil_status_collecting_dumps, wil->status);
+	if (test_bit(wil_status_suspending, wil->status) ||
+	    test_bit(wil_status_suspended, wil->status) ||
+	    test_bit(wil_status_resetting, wil->status)) {
+		wil_err(wil, "cannot collect fw dump during suspend/reset\n");
+		clear_bit(wil_status_collecting_dumps, wil->status);
+		return -EINVAL;
+	}
+
 	/* copy to crash dump area */
 	for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
 		map = &fw_mapping[i];
@@ -91,6 +100,8 @@
 				     (const void __iomem * __force)data, len);
 	}
 
+	clear_bit(wil_status_collecting_dumps, wil->status);
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 621005b..620a1b3 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -27,6 +27,18 @@
 	WIL_PLATFORM_EVT_POST_SUSPEND = 4,
 };
 
+enum wil_platform_features {
+	WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0,
+	WIL_PLATFORM_FEATURE_MAX,
+};
+
+enum wil_platform_capa {
+	WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND = 0,
+	WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1,
+	WIL_PLATFORM_CAPA_EXT_CLK = 2,
+	WIL_PLATFORM_CAPA_MAX,
+};
+
 /**
  * struct wil_platform_ops - wil platform module calls from this
  * driver to platform driver
@@ -37,7 +49,8 @@
 	int (*resume)(void *handle, bool device_powered_on);
 	void (*uninit)(void *handle);
 	int (*notify)(void *handle, enum wil_platform_event evt);
-	bool (*keep_radio_on_during_sleep)(void *handle);
+	int (*get_capa)(void *handle);
+	void (*set_features)(void *handle, int features);
 };
 
 /**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 43cdaef..205c3ab 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -141,13 +141,15 @@
 /**
  * Check address validity for WMI buffer; remap if needed
  * @ptr - internal (linker) fw/ucode address
+ * @size - if non zero, validate the block does not
+ *  exceed the device memory (bar)
  *
  * Valid buffer should be DWORD aligned
  *
  * return address for accessing buffer from the host;
  * if buffer is not valid, return NULL.
  */
-void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr_, u32 size)
 {
 	u32 off;
 	u32 ptr = le32_to_cpu(ptr_);
@@ -162,10 +164,17 @@
 	off = HOSTADDR(ptr);
 	if (off > wil->bar_size - 4)
 		return NULL;
+	if (size && ((off + size > wil->bar_size) || (off + size < off)))
+		return NULL;
 
 	return wil->csr + off;
 }
 
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+	return wmi_buffer_block(wil, ptr_, 0);
+}
+
 /**
  * Check address validity
  */
@@ -223,7 +232,7 @@
 	uint retry;
 	int rc = 0;
 
-	if (sizeof(cmd) + len > r->entry_size) {
+	if (len > r->entry_size - sizeof(cmd)) {
 		wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
 			(int)(sizeof(cmd) + len), r->entry_size);
 		return -ERANGE;
@@ -1412,8 +1421,14 @@
 	};
 	int rc;
 	u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
-	struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+	struct wmi_set_appie_cmd *cmd;
 
+	if (len < ie_len) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	cmd = kzalloc(len, GFP_KERNEL);
 	if (!cmd) {
 		rc = -ENOMEM;
 		goto out;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 5263ee7..fcefdd1 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -71,6 +71,7 @@
 	WMI_FW_CAPABILITY_RSSI_REPORTING		= 12,
 	WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE		= 13,
 	WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP	= 14,
+	WMI_FW_CAPABILITY_REF_CLOCK_CONTROL		= 18,
 	WMI_FW_CAPABILITY_MAX,
 };
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 79c081f..6afcf86 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -429,7 +429,8 @@
 	if (code != BRCMF_E_IF && !fweh->evt_handler[code])
 		return;
 
-	if (datalen > BRCMF_DCMD_MAXLEN)
+	if (datalen > BRCMF_DCMD_MAXLEN ||
+	    datalen + sizeof(*event_packet) > packet_len)
 		return;
 
 	if (in_interrupt())
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index b3aab2f..ef68546 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -14764,8 +14764,8 @@
 }
 
 static void
-wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
-		       u8 len)
+wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events,
+		       const u8 *dlys, u8 len)
 {
 	u32 t1_offset, t2_offset;
 	u8 ctr;
@@ -15240,16 +15240,16 @@
 static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
 {
 	u16 currband;
-	s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
-	s8 *lna1_gain_db = NULL;
-	s8 *lna1_gain_db_2 = NULL;
-	s8 *lna2_gain_db = NULL;
-	s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
-	s8 *tia_gain_db;
-	s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
-	s8 *tia_gainbits;
-	u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
-	u16 *rfseq_init_gain;
+	static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
+	const s8 *lna1_gain_db = NULL;
+	const s8 *lna1_gain_db_2 = NULL;
+	const s8 *lna2_gain_db = NULL;
+	static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
+	const s8 *tia_gain_db;
+	static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
+	const s8 *tia_gainbits;
+	static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
+	const u16 *rfseq_init_gain;
 	u16 init_gaincode;
 	u16 clip1hi_gaincode;
 	u16 clip1md_gaincode = 0;
@@ -15310,10 +15310,9 @@
 
 			if ((freq <= 5080) || (freq == 5825)) {
 
-				s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
-				s8 lna1A_gain_db_2_rev7[] = {
-					11, 17, 22, 25};
-				s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+				static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
+				static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25};
+				static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
 
 				crsminu_th = 0x3e;
 				lna1_gain_db = lna1A_gain_db_rev7;
@@ -15321,10 +15320,9 @@
 				lna2_gain_db = lna2A_gain_db_rev7;
 			} else if ((freq >= 5500) && (freq <= 5700)) {
 
-				s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
-				s8 lna1A_gain_db_2_rev7[] = {
-					12, 18, 22, 26};
-				s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
+				static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
+				static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+				static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
 
 				crsminu_th = 0x45;
 				clip1md_gaincode_B = 0x14;
@@ -15335,10 +15333,9 @@
 				lna2_gain_db = lna2A_gain_db_rev7;
 			} else {
 
-				s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
-				s8 lna1A_gain_db_2_rev7[] = {
-					12, 18, 22, 26};
-				s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+				static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
+				static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+				static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
 
 				crsminu_th = 0x41;
 				lna1_gain_db = lna1A_gain_db_rev7;
@@ -15450,65 +15447,65 @@
 		NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
 		NPHY_RFSEQ_CMD_SET_HPF_BW
 	};
-	u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
-	s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
-	s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
-	s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
-	s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
-	s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
-	s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
-	s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
-	s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
-	s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
-	s8 *lna1_gain_db = NULL;
-	s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
-	s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
-	s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
-	s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
-	s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
-	s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
-	s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
-	s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
-	s8 *lna2_gain_db = NULL;
-	s8 tiaG_gain_db[] = {
+	static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
+	static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
+	static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
+	static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
+	static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
+	static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
+	static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
+	static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
+	static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
+	static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
+	const s8 *lna1_gain_db = NULL;
+	static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
+	static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
+	static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
+	static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
+	static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
+	static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
+	static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
+	static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
+	const s8 *lna2_gain_db = NULL;
+	static const s8 tiaG_gain_db[] = {
 		0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
-	s8 tiaA_gain_db[] = {
+	static const s8 tiaA_gain_db[] = {
 		0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
-	s8 tiaA_gain_db_rev4[] = {
+	static const s8 tiaA_gain_db_rev4[] = {
 		0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
-	s8 tiaA_gain_db_rev5[] = {
+	static const s8 tiaA_gain_db_rev5[] = {
 		0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
-	s8 tiaA_gain_db_rev6[] = {
+	static const s8 tiaA_gain_db_rev6[] = {
 		0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
-	s8 *tia_gain_db;
-	s8 tiaG_gainbits[] = {
+	const s8 *tia_gain_db;
+	static const s8 tiaG_gainbits[] = {
 		0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
-	s8 tiaA_gainbits[] = {
+	static const s8 tiaA_gainbits[] = {
 		0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
-	s8 tiaA_gainbits_rev4[] = {
+	static const s8 tiaA_gainbits_rev4[] = {
 		0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
-	s8 tiaA_gainbits_rev5[] = {
+	static const s8 tiaA_gainbits_rev5[] = {
 		0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
-	s8 tiaA_gainbits_rev6[] = {
+	static const s8 tiaA_gainbits_rev6[] = {
 		0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
-	s8 *tia_gainbits;
-	s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
-	s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
-	u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
-	u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
-	u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
-	u16 rfseqG_init_gain_rev5_elna[] = {
+	const s8 *tia_gainbits;
+	static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
+	static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
+	static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
+	static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
+	static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
+	static const u16 rfseqG_init_gain_rev5_elna[] = {
 		0x013f, 0x013f, 0x013f, 0x013f };
-	u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
-	u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
-	u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
-	u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
-	u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
-	u16 rfseqA_init_gain_rev4_elna[] = {
+	static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
+	static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
+	static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
+	static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
+	static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
+	static const u16 rfseqA_init_gain_rev4_elna[] = {
 		0x314f, 0x314f, 0x314f, 0x314f };
-	u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
-	u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
-	u16 *rfseq_init_gain;
+	static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
+	static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
+	const u16 *rfseq_init_gain;
 	u16 initG_gaincode = 0x627e;
 	u16 initG_gaincode_rev4 = 0x527e;
 	u16 initG_gaincode_rev5 = 0x427e;
@@ -15538,10 +15535,10 @@
 	u16 clip1mdA_gaincode_rev6 = 0x2084;
 	u16 clip1md_gaincode = 0;
 	u16 clip1loG_gaincode = 0x0074;
-	u16 clip1loG_gaincode_rev5[] = {
+	static const u16 clip1loG_gaincode_rev5[] = {
 		0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
 	};
-	u16 clip1loG_gaincode_rev6[] = {
+	static const u16 clip1loG_gaincode_rev6[] = {
 		0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
 	};
 	u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
@@ -16066,7 +16063,7 @@
 
 static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
 {
-	u8 rfseq_rx2tx_events[] = {
+	static const u8 rfseq_rx2tx_events[] = {
 		NPHY_RFSEQ_CMD_NOP,
 		NPHY_RFSEQ_CMD_RXG_FBW,
 		NPHY_RFSEQ_CMD_TR_SWITCH,
@@ -16076,7 +16073,7 @@
 		NPHY_RFSEQ_CMD_EXT_PA
 	};
 	u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
-	u8 rfseq_tx2rx_events[] = {
+	static const u8 rfseq_tx2rx_events[] = {
 		NPHY_RFSEQ_CMD_NOP,
 		NPHY_RFSEQ_CMD_EXT_PA,
 		NPHY_RFSEQ_CMD_TX_GAIN,
@@ -16085,8 +16082,8 @@
 		NPHY_RFSEQ_CMD_RXG_FBW,
 		NPHY_RFSEQ_CMD_CLR_HIQ_DIS
 	};
-	u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
-	u8 rfseq_tx2rx_events_rev3[] = {
+	static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
+	static const u8 rfseq_tx2rx_events_rev3[] = {
 		NPHY_REV3_RFSEQ_CMD_EXT_PA,
 		NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
 		NPHY_REV3_RFSEQ_CMD_TX_GAIN,
@@ -16096,7 +16093,7 @@
 		NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
 		NPHY_REV3_RFSEQ_CMD_END
 	};
-	u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+	static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
 	u8 rfseq_rx2tx_events_rev3[] = {
 		NPHY_REV3_RFSEQ_CMD_NOP,
 		NPHY_REV3_RFSEQ_CMD_RXG_FBW,
@@ -16110,7 +16107,7 @@
 	};
 	u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
 
-	u8 rfseq_rx2tx_events_rev3_ipa[] = {
+	static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
 		NPHY_REV3_RFSEQ_CMD_NOP,
 		NPHY_REV3_RFSEQ_CMD_RXG_FBW,
 		NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
@@ -16121,15 +16118,15 @@
 		NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
 		NPHY_REV3_RFSEQ_CMD_END
 	};
-	u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
-	u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
+	static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+	static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
 
 	s16 alpha0, alpha1, alpha2;
 	s16 beta0, beta1, beta2;
 	u32 leg_data_weights, ht_data_weights, nss1_data_weights,
 	    stbc_data_weights;
 	u8 chan_freq_range = 0;
-	u16 dac_control = 0x0002;
+	static const u16 dac_control = 0x0002;
 	u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
 	u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
 	u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
@@ -16139,8 +16136,8 @@
 	u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
 	u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
 	u16 *aux_adc_gain;
-	u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
-	u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
+	static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
+	static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
 	s32 min_nvar_val = 0x18d;
 	s32 min_nvar_offset_6mbps = 20;
 	u8 pdetrange;
@@ -16151,9 +16148,9 @@
 	u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
 	u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77;
 	u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77;
-	u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
-	u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
-	u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+	static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
+	static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+	static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
 	u16 ipalvlshift_3p3_war_en = 0;
 	u16 rccal_bcap_val, rccal_scap_val;
 	u16 rccal_tx20_11b_bcap = 0;
@@ -24291,13 +24288,13 @@
 	u16 bbmult;
 	u16 tblentry;
 
-	struct nphy_txiqcal_ladder ladder_lo[] = {
+	static const struct nphy_txiqcal_ladder ladder_lo[] = {
 		{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
 		{25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
 		{25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
 	};
 
-	struct nphy_txiqcal_ladder ladder_iq[] = {
+	static const struct nphy_txiqcal_ladder ladder_iq[] = {
 		{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
 		{25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
 		{100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
@@ -25773,67 +25770,67 @@
 	u16 cal_gain[2];
 	struct nphy_iqcal_params cal_params[2];
 	u32 tbl_len;
-	void *tbl_ptr;
+	const void *tbl_ptr;
 	bool ladder_updated[2];
 	u8 mphase_cal_lastphase = 0;
 	int bcmerror = 0;
 	bool phyhang_avoid_state = false;
 
-	u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
+	static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
 		0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
 		0x1902,
 		0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
 		0x6407
 	};
 
-	u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
+	static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
 		0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
 		0x3200,
 		0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
 		0x6407
 	};
 
-	u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
+	static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
 		0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
 		0x1202,
 		0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
 		0x4707
 	};
 
-	u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
+	static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
 		0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
 		0x2300,
 		0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
 		0x4707
 	};
 
-	u16 tbl_tx_iqlo_cal_startcoefs[] = {
+	static const u16 tbl_tx_iqlo_cal_startcoefs[] = {
 		0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
 		0x0000
 	};
 
-	u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
+	static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
 		0x8123, 0x8264, 0x8086, 0x8245, 0x8056,
 		0x9123, 0x9264, 0x9086, 0x9245, 0x9056
 	};
 
-	u16 tbl_tx_iqlo_cal_cmds_recal[] = {
+	static const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
 		0x8101, 0x8253, 0x8053, 0x8234, 0x8034,
 		0x9101, 0x9253, 0x9053, 0x9234, 0x9034
 	};
 
-	u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
+	static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
 		0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
 		0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
 		0x0000
 	};
 
-	u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
+	static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
 		0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234,
 		0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234
 	};
 
-	u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
+	static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
 		0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223,
 		0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223
 	};
diff --git a/drivers/net/wireless/cnss_genl/cnss_nl.c b/drivers/net/wireless/cnss_genl/cnss_nl.c
index fafd9ce..29dd4c9 100644
--- a/drivers/net/wireless/cnss_genl/cnss_nl.c
+++ b/drivers/net/wireless/cnss_genl/cnss_nl.c
@@ -64,6 +64,8 @@
 	[CLD80211_ATTR_VENDOR_DATA] = { .type = NLA_NESTED },
 	[CLD80211_ATTR_DATA] = { .type = NLA_BINARY,
 				 .len = CLD80211_MAX_NL_DATA },
+	[CLD80211_ATTR_META_DATA] = { .type = NLA_BINARY,
+				 .len = CLD80211_MAX_NL_DATA },
 };
 
 static int cld80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
diff --git a/drivers/net/wireless/cnss_utils/cnss_utils.c b/drivers/net/wireless/cnss_utils/cnss_utils.c
index d73846e..4955130 100644
--- a/drivers/net/wireless/cnss_utils/cnss_utils.c
+++ b/drivers/net/wireless/cnss_utils/cnss_utils.c
@@ -34,6 +34,11 @@
 	u32 no_of_mac_addr_set;
 };
 
+enum mac_type {
+	CNSS_MAC_PROVISIONED,
+	CNSS_MAC_DERIVED,
+};
+
 static struct cnss_utils_priv {
 	struct cnss_unsafe_channel_list unsafe_channel_list;
 	struct cnss_dfs_nol_info dfs_nol_info;
@@ -42,8 +47,8 @@
 	/* generic spin-lock for dfs_nol info */
 	spinlock_t dfs_nol_info_lock;
 	int driver_load_cnt;
-	bool is_wlan_mac_set;
 	struct cnss_wlan_mac_addr wlan_mac_addr;
+	struct cnss_wlan_mac_addr wlan_der_mac_addr;
 	enum cnss_utils_cc_src cc_source;
 } *cnss_utils_priv;
 
@@ -189,7 +194,8 @@
 }
 EXPORT_SYMBOL(cnss_utils_get_driver_load_cnt);
 
-int cnss_utils_set_wlan_mac_address(const u8 *in, const uint32_t len)
+static int set_wlan_mac_address(const u8 *mac_list, const uint32_t len,
+				enum mac_type type)
 {
 	struct cnss_utils_priv *priv = cnss_utils_priv;
 	u32 no_of_mac_addr;
@@ -200,11 +206,6 @@
 	if (!priv)
 		return -EINVAL;
 
-	if (priv->is_wlan_mac_set) {
-		pr_debug("WLAN MAC address is already set\n");
-		return 0;
-	}
-
 	if (len == 0 || (len % ETH_ALEN) != 0) {
 		pr_err("Invalid length %d\n", len);
 		return -EINVAL;
@@ -217,24 +218,45 @@
 		return -EINVAL;
 	}
 
-	priv->is_wlan_mac_set = true;
-	addr = &priv->wlan_mac_addr;
+	if (type == CNSS_MAC_PROVISIONED)
+		addr = &priv->wlan_mac_addr;
+	else
+		addr = &priv->wlan_der_mac_addr;
+
+	if (addr->no_of_mac_addr_set) {
+		pr_err("WLAN MAC address is already set, num %d type %d\n",
+		       addr->no_of_mac_addr_set, type);
+		return 0;
+	}
+
 	addr->no_of_mac_addr_set = no_of_mac_addr;
 	temp = &addr->mac_addr[0][0];
 
 	for (iter = 0; iter < no_of_mac_addr;
-	     ++iter, temp += ETH_ALEN, in += ETH_ALEN) {
-		ether_addr_copy(temp, in);
+	     ++iter, temp += ETH_ALEN, mac_list += ETH_ALEN) {
+		ether_addr_copy(temp, mac_list);
 		pr_debug("MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
 			 temp[0], temp[1], temp[2],
 			 temp[3], temp[4], temp[5]);
 	}
-
 	return 0;
 }
+
+int cnss_utils_set_wlan_mac_address(const u8 *mac_list, const uint32_t len)
+{
+	return set_wlan_mac_address(mac_list, len, CNSS_MAC_PROVISIONED);
+}
 EXPORT_SYMBOL(cnss_utils_set_wlan_mac_address);
 
-u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num)
+int cnss_utils_set_wlan_derived_mac_address(
+				const u8 *mac_list, const uint32_t len)
+{
+	return set_wlan_mac_address(mac_list, len, CNSS_MAC_DERIVED);
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_derived_mac_address);
+
+static u8 *get_wlan_mac_address(struct device *dev,
+				u32 *num, enum mac_type type)
 {
 	struct cnss_utils_priv *priv = cnss_utils_priv;
 	struct cnss_wlan_mac_addr *addr = NULL;
@@ -242,20 +264,36 @@
 	if (!priv)
 		goto out;
 
-	if (!priv->is_wlan_mac_set) {
-		pr_debug("WLAN MAC address is not set\n");
+	if (type == CNSS_MAC_PROVISIONED)
+		addr = &priv->wlan_mac_addr;
+	else
+		addr = &priv->wlan_der_mac_addr;
+
+	if (!addr->no_of_mac_addr_set) {
+		pr_err("WLAN MAC address is not set, type %d\n", type);
 		goto out;
 	}
-
-	addr = &priv->wlan_mac_addr;
 	*num = addr->no_of_mac_addr_set;
 	return &addr->mac_addr[0][0];
+
 out:
 	*num = 0;
 	return NULL;
 }
+
+u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num)
+{
+	return get_wlan_mac_address(dev, num, CNSS_MAC_PROVISIONED);
+}
 EXPORT_SYMBOL(cnss_utils_get_wlan_mac_address);
 
+u8 *cnss_utils_get_wlan_derived_mac_address(
+			struct device *dev, uint32_t *num)
+{
+	return get_wlan_mac_address(dev, num, CNSS_MAC_DERIVED);
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_derived_mac_address);
+
 void cnss_utils_set_cc_source(struct device *dev,
 			      enum cnss_utils_cc_src cc_source)
 {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 2cbef96..1281ebe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1128,7 +1128,7 @@
 	}
 	if (0 == tmp) {
 		read_addr = REG_DBI_RDATA + addr % 4;
-		ret = rtl_read_byte(rtlpriv, read_addr);
+		ret = rtl_read_word(rtlpriv, read_addr);
 	}
 	return ret;
 }
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e63f1a0..c8f8813 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -818,6 +818,9 @@
 	struct irq_desc *desc = irq_data_to_desc(d);
 	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
 
+	if (!parent_data)
+		return;
+
 	if (parent_data->chip->irq_ack)
 		parent_data->chip->irq_ack(parent_data);
 }
@@ -827,6 +830,9 @@
 	struct irq_desc *desc = irq_data_to_desc(d);
 	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
 
+	if (!parent_data)
+		return;
+
 	if (parent_data->chip->irq_eoi)
 		parent_data->chip->irq_eoi(parent_data);
 }
@@ -852,6 +858,9 @@
 	struct irq_desc *desc = irq_data_to_desc(d);
 	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
 
+	if (!parent_data)
+		return 0;
+
 	if (parent_data->chip->irq_set_vcpu_affinity)
 		return parent_data->chip->irq_set_vcpu_affinity(parent_data,
 				vcpu_info);
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index 8749653..6145c75 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -1596,7 +1596,7 @@
 	{24, 517},
 	{26, 518},
 	{30, 519},
-	{31, 639},
+	{31, 632},
 	{32, 521},
 	{34, 522},
 	{36, 523},
@@ -1604,12 +1604,12 @@
 	{38, 525},
 	{39, 526},
 	{40, 527},
-	{41, 637},
+	{41, 630},
 	{43, 529},
 	{44, 530},
 	{46, 531},
 	{48, 532},
-	{49, 640},
+	{49, 633},
 	{52, 534},
 	{53, 535},
 	{54, 536},
@@ -1625,7 +1625,7 @@
 	{85, 555},
 	{86, 556},
 	{88, 557},
-	{89, 638},
+	{89, 631},
 	{91, 559},
 	{92, 560},
 	{95, 561},
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 7aa7ffd..a249567 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -86,7 +86,9 @@
 	__stringify(ADD_VLAN_IFACE),
 	__stringify(DEL_VLAN_IFACE),
 	__stringify(ADD_L2TP_VLAN_MAPPING),
-	__stringify(DEL_L2TP_VLAN_MAPPING)
+	__stringify(DEL_L2TP_VLAN_MAPPING),
+	__stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
+	__stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
 };
 
 const char *ipa_hdr_l2_type_name[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
index dd59140..5228b2d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1436,6 +1436,66 @@
 			start_ipv6_filter_idx),
 	},
 	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_CLIENT_DST_PIPES_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id),
+	},
+	{
 		.data_type	= QMI_EOTI,
 		.is_array	= NO_ARRAY,
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index fb7b3a7..d3c2ca3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1016,6 +1016,49 @@
 			break;
 		}
 		break;
+
+	case IPA_IOC_ADD_RT_RULE_EXT:
+		if (copy_from_user(header,
+				(const void __user *)arg,
+				sizeof(struct ipa_ioc_add_rt_rule_ext))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_rt_rule_ext *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_rt_rule_ext) +
+		   pre_entry * sizeof(struct ipa_rt_rule_add_ext);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(
+			((struct ipa_ioc_add_rt_rule_ext *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR(" prevent memory corruption(%d not match %d)\n",
+				((struct ipa_ioc_add_rt_rule_ext *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EINVAL;
+			break;
+		}
+		if (ipa3_add_rt_rule_ext(
+			(struct ipa_ioc_add_rt_rule_ext *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
 	case IPA_IOC_ADD_RT_RULE_AFTER:
 		if (copy_from_user(header, (const void __user *)arg,
 			sizeof(struct ipa_ioc_add_rt_rule_after))) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index c740660..5da83e5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -39,16 +39,6 @@
 	const struct file_operations fops;
 };
 
-const char *ipa3_excp_name[] = {
-	__stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0),
-	__stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1),
-	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL),
-	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED),
-	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_TAG),
-	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT),
-	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_NAT),
-	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP),
-};
 
 const char *ipa3_event_name[] = {
 	__stringify(WLAN_CLIENT_CONNECT),
@@ -80,7 +70,9 @@
 	__stringify(ADD_VLAN_IFACE),
 	__stringify(DEL_VLAN_IFACE),
 	__stringify(ADD_L2TP_VLAN_MAPPING),
-	__stringify(DEL_L2TP_VLAN_MAPPING)
+	__stringify(DEL_L2TP_VLAN_MAPPING),
+	__stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
+	__stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
 };
 
 const char *ipa3_hdr_l2_type_name[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
deleted file mode 100644
index dff3a3f..0000000
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef _IPA_HW_DEFS_H
-#define _IPA_HW_DEFS_H
-#include <linux/bitops.h>
-
-/* This header defines various HW related data types */
-
-
-#define IPA_A5_MUX_HDR_EXCP_FLAG_IP		BIT(7)
-#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT		BIT(6)
-#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT	BIT(5)
-#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG		BIT(4)
-#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED	BIT(3)
-#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL		BIT(2)
-
-/**
- * struct ipa3_a5_mux_hdr - A5 MUX header definition
- * @interface_id: interface ID
- * @src_pipe_index: source pipe index
- * @flags: flags
- * @metadata: metadata
- *
- * A5 MUX header is in BE, A5 runs in LE. This struct definition
- * allows A5 SW to correctly parse the header
- */
-struct ipa3_a5_mux_hdr {
-	u16 interface_id;
-	u8 src_pipe_index;
-	u8 flags;
-	u32 metadata;
-};
-
-#endif /* _IPA_HW_DEFS_H */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 32e8d32..ad925c5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -27,7 +27,6 @@
 #include <linux/iommu.h>
 #include <linux/platform_device.h>
 #include <linux/firmware.h>
-#include "ipa_hw_defs.h"
 #include "ipa_qmi_service.h"
 #include "../ipa_api.h"
 #include "ipahal/ipahal_reg.h"
@@ -434,6 +433,7 @@
 	int id;
 	u16 prio;
 	u16 rule_id;
+	u16 rule_id_valid;
 };
 
 /**
@@ -1761,6 +1761,8 @@
  */
 int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
 
+int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules);
+
 int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules);
 
 int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index 9f27c4f..c2daa05 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -30,7 +30,6 @@
 #define IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC 3
 #define IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC 2
 #define IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC 4
-#define IPA_MAX_NUM_OF_DEL_TABLE_CMD_DESC 2
 
 enum ipa_nat_ipv6ct_table_type {
 	IPA_NAT_BASE_TBL = 0,
@@ -657,6 +656,153 @@
 	IPADBG("return\n");
 }
 
+static void ipa3_nat_create_modify_pdn_cmd(
+	struct ipahal_imm_cmd_dma_shared_mem *mem_cmd, bool zero_mem)
+{
+	size_t pdn_entry_size, mem_size;
+
+	IPADBG("\n");
+
+	ipahal_nat_entry_size(IPAHAL_NAT_IPV4_PDN, &pdn_entry_size);
+	mem_size = pdn_entry_size * IPA_MAX_PDN_NUM;
+
+	if (zero_mem)
+		memset(ipa3_ctx->nat_mem.pdn_mem.base, 0, mem_size);
+
+	/* Copy the PDN config table to SRAM */
+	mem_cmd->is_read = false;
+	mem_cmd->skip_pipeline_clear = false;
+	mem_cmd->pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	mem_cmd->size = mem_size;
+	mem_cmd->system_addr = ipa3_ctx->nat_mem.pdn_mem.phys_base;
+	mem_cmd->local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(pdn_config_ofst);
+
+	IPADBG("return\n");
+}
+
+static int ipa3_nat_send_init_cmd(struct ipahal_imm_cmd_ip_v4_nat_init *cmd,
+	bool zero_pdn_table)
+{
+	struct ipa3_desc desc[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC];
+	struct ipahal_imm_cmd_pyld *cmd_pyld[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC];
+	int i, num_cmd = 0, result;
+
+	IPADBG("\n");
+
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	cmd_pyld[num_cmd] =
+		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("failed to construct NOP imm cmd\n");
+		return -ENOMEM;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
+
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V4_NAT_INIT, cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR_RL("fail to construct NAT init imm cmd\n");
+		result = -EPERM;
+		goto destroy_imm_cmd;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
+
+		if (num_cmd >= IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC) {
+			IPAERR("number of commands is out of range\n");
+			result = -ENOBUFS;
+			goto destroy_imm_cmd;
+		}
+
+		/* Copy the PDN config table to SRAM */
+		ipa3_nat_create_modify_pdn_cmd(&mem_cmd, zero_pdn_table);
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR(
+				"fail construct dma_shared_mem cmd: for pdn table");
+			result = -ENOMEM;
+			goto destroy_imm_cmd;
+		}
+		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+		++num_cmd;
+		IPADBG("added PDN table copy cmd\n");
+	}
+
+	result = ipa3_send_cmd(num_cmd, desc);
+	if (result) {
+		IPAERR("fail to send NAT init immediate command\n");
+		goto destroy_imm_cmd;
+	}
+
+	IPADBG("return\n");
+
+destroy_imm_cmd:
+	for (i = 0; i < num_cmd; ++i)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+
+	return result;
+}
+
+static int ipa3_ipv6ct_send_init_cmd(struct ipahal_imm_cmd_ip_v6_ct_init *cmd)
+{
+	struct ipa3_desc desc[IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC];
+	struct ipahal_imm_cmd_pyld
+		*cmd_pyld[IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC];
+	int i, num_cmd = 0, result;
+
+	IPADBG("\n");
+
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	cmd_pyld[num_cmd] =
+		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("failed to construct NOP imm cmd\n");
+		return -ENOMEM;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
+
+	if (num_cmd >= IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC) {
+		IPAERR("number of commands is out of range\n");
+		result = -ENOBUFS;
+		goto destroy_imm_cmd;
+	}
+
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V6_CT_INIT, cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR_RL("fail to construct IPv6CT init imm cmd\n");
+		result = -EPERM;
+		goto destroy_imm_cmd;
+	}
+
+	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+	++num_cmd;
+
+	result = ipa3_send_cmd(num_cmd, desc);
+	if (result) {
+		IPAERR("Fail to send IPv6CT init immediate command\n");
+		goto destroy_imm_cmd;
+	}
+
+	IPADBG("return\n");
+
+destroy_imm_cmd:
+	for (i = 0; i < num_cmd; ++i)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+
+	return result;
+}
+
 /* IOCTL function handlers */
 /**
  * ipa3_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
@@ -668,11 +814,7 @@
  */
 int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
 {
-	struct ipa3_desc desc[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC];
 	struct ipahal_imm_cmd_ip_v4_nat_init cmd;
-	int i, num_cmd = 0;
-	struct ipahal_imm_cmd_pyld *cmd_pyld[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC];
-	struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
 	int result;
 
 	IPADBG("\n");
@@ -733,18 +875,6 @@
 		return result;
 	}
 
-	/* NO-OP IC for ensuring that IPA pipeline is empty */
-	cmd_pyld[num_cmd] =
-		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
-	if (!cmd_pyld[num_cmd]) {
-		IPAERR("failed to construct NOP imm cmd\n");
-		result = -ENOMEM;
-		goto bail;
-	}
-
-	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
-	++num_cmd;
-
 	if (ipa3_ctx->nat_mem.dev.is_sys_mem) {
 		IPADBG("using system memory for nat table\n");
 		/*
@@ -757,26 +887,10 @@
 		IPADBG("using shared(local) memory for nat table\n");
 		ipa3_nat_create_init_cmd(init, true, IPA_RAM_NAT_OFST, &cmd);
 	}
-	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
-		IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
-	if (!cmd_pyld[num_cmd]) {
-		IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n");
-		result = -EPERM;
-		goto destroy_imm_cmd;
-	}
-
-	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
-	++num_cmd;
 
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
 		struct ipa_pdn_entry *pdn_entries;
 
-		if (num_cmd >= IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC) {
-			IPAERR("number of commands is out of range\n");
-			result = -ENOBUFS;
-			goto destroy_imm_cmd;
-		}
-
 		/* store ip in pdn entries cache array */
 		pdn_entries = ipa3_ctx->nat_mem.pdn_mem.base;
 		pdn_entries[0].public_ip = init->ip_addr;
@@ -785,33 +899,13 @@
 		pdn_entries[0].resrvd = 0;
 
 		IPADBG("Public ip address:0x%x\n", init->ip_addr);
-
-		/* Copy the PDN config table to SRAM */
-		mem_cmd.is_read = false;
-		mem_cmd.skip_pipeline_clear = false;
-		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
-		mem_cmd.size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM;
-		mem_cmd.system_addr = ipa3_ctx->nat_mem.pdn_mem.phys_base;
-		mem_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
-			IPA_MEM_PART(pdn_config_ofst);
-		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
-			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
-		if (!cmd_pyld[num_cmd]) {
-			IPAERR(
-			"fail construct dma_shared_mem cmd: for pdn table");
-			result = -ENOMEM;
-			goto destroy_imm_cmd;
-		}
-		ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
-		++num_cmd;
-		IPADBG("added PDN table copy cmd\n");
 	}
 
-	IPADBG("posting v4 init command\n");
-	if (ipa3_send_cmd(num_cmd, desc)) {
-		IPAERR("Fail to send immediate command\n");
-		result = -EPERM;
-		goto destroy_imm_cmd;
+	IPADBG("posting NAT init command\n");
+	result = ipa3_nat_send_init_cmd(&cmd, false);
+	if (result) {
+		IPAERR("Fail to send NAT init immediate command\n");
+		return result;
 	}
 
 	ipa3_nat_ipv6ct_init_device_structure(
@@ -837,11 +931,7 @@
 
 	ipa3_ctx->nat_mem.dev.is_hw_init = true;
 	IPADBG("return\n");
-destroy_imm_cmd:
-	for (i = 0; i < num_cmd; ++i)
-		ipahal_destroy_imm_cmd(cmd_pyld[i]);
-bail:
-	return result;
+	return 0;
 }
 
 /**
@@ -854,11 +944,7 @@
  */
 int ipa3_ipv6ct_init_cmd(struct ipa_ioc_ipv6ct_init *init)
 {
-	struct ipa3_desc desc[IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC];
 	struct ipahal_imm_cmd_ip_v6_ct_init cmd;
-	int i, num_cmd = 0;
-	struct ipahal_imm_cmd_pyld
-		*cmd_pyld[IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC];
 	int result;
 
 	IPADBG("\n");
@@ -904,18 +990,6 @@
 		return result;
 	}
 
-	/* NO-OP IC for ensuring that IPA pipeline is empty */
-	cmd_pyld[num_cmd] =
-		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
-	if (!cmd_pyld[num_cmd]) {
-		IPAERR("failed to construct NOP imm cmd\n");
-		result = -ENOMEM;
-		goto bail;
-	}
-
-	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
-	++num_cmd;
-
 	if (ipa3_ctx->ipv6ct_mem.dev.is_sys_mem) {
 		IPADBG("using system memory for nat table\n");
 		/*
@@ -946,28 +1020,11 @@
 			ipa3_ctx->ipv6ct_mem.dev.name);
 	}
 
-	if (num_cmd >= IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC) {
-		IPAERR("number of commands is out of range\n");
-		result = -ENOBUFS;
-		goto destroy_imm_cmd;
-	}
-
-	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
-		IPA_IMM_CMD_IP_V6_CT_INIT, &cmd, false);
-	if (!cmd_pyld[num_cmd]) {
-		IPAERR_RL("Fail to construct ip_v6_ct_init imm cmd\n");
-		result = -EPERM;
-		goto destroy_imm_cmd;
-	}
-
-	ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
-	++num_cmd;
-
 	IPADBG("posting ip_v6_ct_init imm command\n");
-	if (ipa3_send_cmd(num_cmd, desc)) {
-		IPAERR("Fail to send immediate command\n");
-		result = -EPERM;
-		goto destroy_imm_cmd;
+	result = ipa3_ipv6ct_send_init_cmd(&cmd);
+	if (result) {
+		IPAERR("fail to send IPv6CT init immediate command\n");
+		return result;
 	}
 
 	ipa3_nat_ipv6ct_init_device_structure(
@@ -979,11 +1036,7 @@
 
 	ipa3_ctx->ipv6ct_mem.dev.is_hw_init = true;
 	IPADBG("return\n");
-destroy_imm_cmd:
-	for (i = 0; i < num_cmd; ++i)
-		ipahal_destroy_imm_cmd(cmd_pyld[i]);
-bail:
-	return result;
+	return 0;
 }
 
 /**
@@ -1036,13 +1089,7 @@
 		mdfy_pdn->dst_metadata, mdfy_pdn->src_metadata);
 
 	/* Copy the PDN config table to SRAM */
-	mem_cmd.is_read = false;
-	mem_cmd.skip_pipeline_clear = false;
-	mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
-	mem_cmd.size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM;
-	mem_cmd.system_addr = nat_ctx->pdn_mem.phys_base;
-	mem_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
-		IPA_MEM_PART(pdn_config_ofst);
+	ipa3_nat_create_modify_pdn_cmd(&mem_cmd, false);
 	cmd_pyld = ipahal_construct_imm_cmd(
 		IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
 	if (!cmd_pyld) {
@@ -1054,10 +1101,9 @@
 	ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
 
 	IPADBG("sending PDN table copy cmd\n");
-	if (ipa3_send_cmd(1, &desc)) {
-		IPAERR("Fail to send immediate command\n");
-		result = -EPERM;
-	}
+	result = ipa3_send_cmd(1, &desc);
+	if (result)
+		IPAERR("Fail to send PDN table copy immediate command\n");
 
 	ipahal_destroy_imm_cmd(cmd_pyld);
 
@@ -1233,7 +1279,7 @@
 		++num_cmd;
 	}
 	result = ipa3_send_cmd(num_cmd, desc);
-	if (result == -EPERM)
+	if (result)
 		IPAERR("Fail to send table_dma immediate command\n");
 
 	IPADBG("return\n");
@@ -1292,104 +1338,26 @@
 	IPADBG("return\n");
 }
 
-/**
- * ipa3_nat_free_mem() - free the NAT memory
- *
- * Called by NAT client driver to free the NAT memory
- */
-static int ipa3_nat_free_mem(void)
-{
-	struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
-	struct ipa3_desc desc;
-	struct ipahal_imm_cmd_pyld *cmd_pyld;
-	int result = 0;
-
-	IPADBG("\n");
-	mutex_lock(&ipa3_ctx->nat_mem.dev.lock);
-
-	ipa3_nat_ipv6ct_free_mem(&ipa3_ctx->nat_mem.dev);
-
-	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
-		size_t pdn_entry_size;
-
-		ipahal_nat_entry_size(IPAHAL_NAT_IPV4_PDN, &pdn_entry_size);
-
-		/* zero the PDN table and copy the PDN config table to SRAM */
-		IPADBG("zeroing the PDN config table\n");
-		memset(ipa3_ctx->nat_mem.pdn_mem.base, 0,
-			pdn_entry_size * IPA_MAX_PDN_NUM);
-		mem_cmd.is_read = false;
-		mem_cmd.skip_pipeline_clear = false;
-		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
-		mem_cmd.size = pdn_entry_size * IPA_MAX_PDN_NUM;
-		mem_cmd.system_addr = ipa3_ctx->nat_mem.pdn_mem.phys_base;
-		mem_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
-			IPA_MEM_PART(pdn_config_ofst);
-		cmd_pyld = ipahal_construct_imm_cmd(
-			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
-		if (!cmd_pyld) {
-			IPAERR(
-				"fail construct dma_shared_mem cmd: for pdn table");
-			result = -ENOMEM;
-			goto lbl_free_pdn;
-		}
-		ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
-
-		IPADBG("sending PDN table copy cmd\n");
-		if (ipa3_send_cmd(1, &desc)) {
-			IPAERR("Fail to send immediate command\n");
-			result = -ENOMEM;
-		}
-
-		ipahal_destroy_imm_cmd(cmd_pyld);
-lbl_free_pdn:
-		IPADBG("freeing the PDN memory\n");
-		dma_free_coherent(ipa3_ctx->pdev,
-			ipa3_ctx->nat_mem.pdn_mem.size,
-			ipa3_ctx->nat_mem.pdn_mem.base,
-			ipa3_ctx->nat_mem.pdn_mem.phys_base);
-	}
-
-	mutex_unlock(&ipa3_ctx->nat_mem.dev.lock);
-	IPADBG("return\n");
-	return result;
-}
-
-static int ipa3_nat_ipv6ct_send_del_table_cmd(
+static int ipa3_nat_ipv6ct_create_del_table_cmd(
 	uint8_t tbl_index,
 	u32 base_addr,
-	bool mem_type_shared,
 	struct ipa3_nat_ipv6ct_common_mem *dev,
-	enum ipahal_imm_cmd_name cmd_name,
-	struct ipahal_imm_cmd_nat_ipv6ct_init_common *table_init_cmd,
-	const void *cmd)
+	struct ipahal_imm_cmd_nat_ipv6ct_init_common *table_init_cmd)
 {
-	struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL, *cmd_pyld = NULL;
-	struct ipa3_desc desc[IPA_MAX_NUM_OF_DEL_TABLE_CMD_DESC];
-	int result = 0;
+	bool mem_type_shared = true;
 
 	IPADBG("\n");
 
-	if (!dev->is_hw_init) {
-		IPADBG("attempt to delete %s before HW int\n", dev->name);
-		/* Deletion of partly initialized table is not an error */
-		return 0;
-	}
-
 	if (tbl_index >= 1) {
 		IPAERR_RL("Unsupported table index %d\n", tbl_index);
 		return -EPERM;
 	}
 
-	/* NO-OP IC for ensuring that IPA pipeline is empty */
-	nop_cmd_pyld =
-		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
-	if (!nop_cmd_pyld) {
-		IPAERR("Failed to construct NOP imm cmd\n");
-		result = -ENOMEM;
-		goto bail;
+	if (dev->tmp_mem != NULL) {
+		IPADBG("using temp memory during %s del\n", dev->name);
+		mem_type_shared = false;
+		base_addr = dev->tmp_mem->dma_handle;
 	}
-	ipa3_init_imm_cmd_desc(&desc[0], nop_cmd_pyld);
 
 	table_init_cmd->table_index = tbl_index;
 	table_init_cmd->base_table_addr = base_addr;
@@ -1398,29 +1366,73 @@
 	table_init_cmd->expansion_table_addr_shared = mem_type_shared;
 	table_init_cmd->size_base_table = 0;
 	table_init_cmd->size_expansion_table = 0;
-	cmd_pyld = ipahal_construct_imm_cmd(cmd_name, &cmd, false);
-	if (!cmd_pyld) {
-		IPAERR_RL("Fail to construct table init imm cmd for %s\n",
-			dev->name);
-		result = -EPERM;
-		goto destroy_nop_imm_cmd;
-	}
-	ipa3_init_imm_cmd_desc(&desc[1], cmd_pyld);
+	IPADBG("return\n");
 
-	if (ipa3_send_cmd(IPA_MAX_NUM_OF_DEL_TABLE_CMD_DESC, desc)) {
-		IPAERR("Fail to send immediate command\n");
-		result = -EPERM;
-		goto destroy_imm_cmd;
+	return 0;
+}
+
+static int ipa3_nat_send_del_table_cmd(uint8_t tbl_index)
+{
+	struct ipahal_imm_cmd_ip_v4_nat_init cmd;
+	int result;
+
+	IPADBG("\n");
+
+	result = ipa3_nat_ipv6ct_create_del_table_cmd(
+		tbl_index,
+		IPA_NAT_PHYS_MEM_OFFSET,
+		&ipa3_ctx->nat_mem.dev,
+		&cmd.table_init);
+	if (result) {
+		IPAERR(
+			"Fail to create immediate command to delete NAT table\n");
+		return result;
+	}
+
+	cmd.index_table_addr = cmd.table_init.base_table_addr;
+	cmd.index_table_addr_shared = cmd.table_init.base_table_addr_shared;
+	cmd.index_table_expansion_addr = cmd.index_table_addr;
+	cmd.index_table_expansion_addr_shared = cmd.index_table_addr_shared;
+	cmd.public_addr_info = 0;
+
+	IPADBG("posting NAT delete command\n");
+	result = ipa3_nat_send_init_cmd(&cmd, true);
+	if (result) {
+		IPAERR("Fail to send NAT delete immediate command\n");
+		return result;
 	}
 
 	IPADBG("return\n");
+	return 0;
+}
 
-destroy_imm_cmd:
-	ipahal_destroy_imm_cmd(cmd_pyld);
-destroy_nop_imm_cmd:
-	ipahal_destroy_imm_cmd(nop_cmd_pyld);
-bail:
-	return result;
+static int ipa3_ipv6ct_send_del_table_cmd(uint8_t tbl_index)
+{
+	struct ipahal_imm_cmd_ip_v6_ct_init cmd;
+	int result;
+
+	IPADBG("\n");
+
+	result = ipa3_nat_ipv6ct_create_del_table_cmd(
+		tbl_index,
+		IPA_IPV6CT_PHYS_MEM_OFFSET,
+		&ipa3_ctx->ipv6ct_mem.dev,
+		&cmd.table_init);
+	if (result) {
+		IPAERR(
+			"Fail to create immediate command to delete IPv6CT table\n");
+		return result;
+	}
+
+	IPADBG("posting IPv6CT delete command\n");
+	result = ipa3_ipv6ct_send_init_cmd(&cmd);
+	if (result) {
+		IPAERR("Fail to send IPv6CT delete immediate command\n");
+		return result;
+	}
+
+	IPADBG("return\n");
+	return 0;
 }
 
 /**
@@ -1456,10 +1468,7 @@
  */
 int ipa3_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del)
 {
-	struct ipahal_imm_cmd_ip_v4_nat_init cmd;
-	bool mem_type_shared = true;
-	u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
-	int result;
+	int result = 0;
 
 	IPADBG("\n");
 	if (!ipa3_ctx->nat_mem.dev.is_dev_init) {
@@ -1467,36 +1476,35 @@
 		return -EPERM;
 	}
 
-	if (ipa3_ctx->nat_mem.dev.tmp_mem != NULL) {
-		IPADBG("using temp memory during nat del\n");
-		mem_type_shared = false;
-		base_addr = ipa3_ctx->nat_mem.dev.tmp_mem->dma_handle;
+	mutex_lock(&ipa3_ctx->nat_mem.dev.lock);
+
+	if (ipa3_ctx->nat_mem.dev.is_hw_init) {
+		result = ipa3_nat_send_del_table_cmd(del->table_index);
+		if (result) {
+			IPAERR(
+				"Fail to send immediate command to delete NAT table\n");
+			goto bail;
+		}
 	}
 
-	cmd.index_table_addr = base_addr;
-	cmd.index_table_addr_shared = mem_type_shared;
-	cmd.index_table_expansion_addr = base_addr;
-	cmd.index_table_expansion_addr_shared = mem_type_shared;
-	cmd.public_addr_info = 0;
-
-	result = ipa3_nat_ipv6ct_send_del_table_cmd(
-		del->table_index,
-		base_addr,
-		mem_type_shared,
-		&ipa3_ctx->nat_mem.dev,
-		IPA_IMM_CMD_IP_V4_NAT_INIT,
-		&cmd.table_init,
-		&cmd);
-	if (result)
-		goto bail;
-
 	ipa3_ctx->nat_mem.public_ip_addr = 0;
 	ipa3_ctx->nat_mem.index_table_addr = 0;
 	ipa3_ctx->nat_mem.index_table_expansion_addr = 0;
 
-	result = ipa3_nat_free_mem();
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 &&
+		ipa3_ctx->nat_mem.dev.is_mem_allocated) {
+		IPADBG("freeing the PDN memory\n");
+		dma_free_coherent(ipa3_ctx->pdev,
+			ipa3_ctx->nat_mem.pdn_mem.size,
+			ipa3_ctx->nat_mem.pdn_mem.base,
+			ipa3_ctx->nat_mem.pdn_mem.phys_base);
+	}
+
+	ipa3_nat_ipv6ct_free_mem(&ipa3_ctx->nat_mem.dev);
 	IPADBG("return\n");
+
 bail:
+	mutex_unlock(&ipa3_ctx->nat_mem.dev.lock);
 	return result;
 }
 
@@ -1510,10 +1518,7 @@
  */
 int ipa3_del_ipv6ct_table(struct ipa_ioc_nat_ipv6ct_table_del *del)
 {
-	struct ipahal_imm_cmd_ip_v6_ct_init cmd;
-	bool mem_type_shared = true;
-	u32 base_addr = IPA_IPV6CT_PHYS_MEM_OFFSET;
-	int result;
+	int result = 0;
 
 	IPADBG("\n");
 
@@ -1527,28 +1532,22 @@
 		return -EPERM;
 	}
 
-	if (ipa3_ctx->ipv6ct_mem.dev.tmp_mem != NULL) {
-		IPADBG("using temp memory during IPv6CT del\n");
-		mem_type_shared = false;
-		base_addr = ipa3_ctx->ipv6ct_mem.dev.tmp_mem->dma_handle;
+	mutex_lock(&ipa3_ctx->ipv6ct_mem.dev.lock);
+
+	if (ipa3_ctx->ipv6ct_mem.dev.is_hw_init) {
+		result = ipa3_ipv6ct_send_del_table_cmd(del->table_index);
+		if (result) {
+			IPAERR(
+				"Fail to send immediate command to delete IPv6CT table\n");
+			goto bail;
+		}
 	}
 
-	result = ipa3_nat_ipv6ct_send_del_table_cmd(
-		del->table_index,
-		base_addr,
-		mem_type_shared,
-		&ipa3_ctx->ipv6ct_mem.dev,
-		IPA_IMM_CMD_IP_V6_CT_INIT,
-		&cmd.table_init,
-		&cmd);
-	if (result)
-		goto bail;
-
-	mutex_lock(&ipa3_ctx->ipv6ct_mem.dev.lock);
 	ipa3_nat_ipv6ct_free_mem(&ipa3_ctx->ipv6ct_mem.dev);
-	mutex_unlock(&ipa3_ctx->ipv6ct_mem.dev.lock);
 	IPADBG("return\n");
+
 bail:
+	mutex_unlock(&ipa3_ctx->ipv6ct_mem.dev.lock);
 	return result;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index e3a3821..1c8715a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -758,6 +758,57 @@
 		resp.resp.error, "ipa_install_filter");
 }
 
+/* sending ul-filter-install-request to modem*/
+int ipa3_qmi_ul_filter_request_send(
+	struct ipa_configure_ul_firewall_rules_req_msg_v01 *req)
+{
+	struct ipa_configure_ul_firewall_rules_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	IPAWANDBG("IPACM pass %u rules to Q6\n",
+		req->firewall_rules_list_len);
+
+	mutex_lock(&ipa3_qmi_lock);
+	if (ipa3_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(
+		&(ipa3_qmi_ctx->ipa_configure_ul_firewall_rules_req_msg_cache[
+		ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg]),
+		req,
+		sizeof(struct
+		ipa_configure_ul_firewall_rules_req_msg_v01));
+		ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg++;
+		ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg %=
+			MAX_NUM_QMI_RULE_CACHE;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
+
+	req_desc.max_msg_len =
+		QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01;
+	req_desc.ei_array =
+		ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei;
+
+	memset(&resp, 0,
+		sizeof(struct ipa_configure_ul_firewall_rules_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01;
+	resp_desc.ei_array =
+		ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei;
+
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
+		req,
+		sizeof(
+		struct ipa_configure_ul_firewall_rules_req_msg_v01),
+		&resp_desc, &resp, sizeof(resp),
+		QMI_SEND_REQ_TIMEOUT_MS);
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_received_ul_firewall_filter");
+}
+
 int ipa3_qmi_enable_force_clear_datapath_send(
 	struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
 {
@@ -967,6 +1018,7 @@
 			       void *ind_cb_priv)
 {
 	struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind;
+	struct ipa_configure_ul_firewall_rules_ind_msg_v01 qmi_ul_firewall_ind;
 	struct msg_desc qmi_ind_desc;
 	int rc = 0;
 
@@ -995,6 +1047,36 @@
 		ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id,
 			IPA_UPSTEAM_MODEM);
 	}
+
+	if (msg_id == QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01) {
+		memset(&qmi_ul_firewall_ind, 0, sizeof(
+			struct ipa_configure_ul_firewall_rules_ind_msg_v01));
+		qmi_ind_desc.max_msg_len =
+			QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01;
+		qmi_ind_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01;
+		qmi_ind_desc.ei_array =
+			ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei;
+
+		rc = qmi_kernel_decode(
+			&qmi_ind_desc, &qmi_ul_firewall_ind, msg, msg_len);
+		if (rc < 0) {
+			IPAWANERR("Error decoding msg_id %d\n", msg_id);
+			return;
+		}
+
+		IPAWANDBG("UL firewall rules install indication on Q6");
+		if (qmi_ul_firewall_ind.result.is_success ==
+				QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01) {
+			IPAWANDBG(" : Success\n");
+			IPAWANDBG
+			("Mux ID : %d\n", qmi_ul_firewall_ind.result.mux_id);
+		} else if (qmi_ul_firewall_ind.result.is_success ==
+				QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01){
+			IPAWANERR(": Failure\n");
+		} else {
+			IPAWANERR(": Unexpected Result");
+		}
+	}
 }
 
 static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
@@ -1446,6 +1528,74 @@
 		resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
 }
 
+int ipa3_qmi_enable_per_client_stats(
+	struct ipa_enable_per_client_stats_req_msg_v01 *req,
+	struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
+{
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.max_msg_len =
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id =
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01;
+	req_desc.ei_array =
+		ipa3_enable_per_client_stats_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id =
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01;
+	resp_desc.ei_array =
+		ipa3_enable_per_client_stats_resp_msg_data_v01_ei;
+
+	IPAWANDBG("Sending QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01\n");
+
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+		sizeof(struct ipa_enable_per_client_stats_req_msg_v01),
+		&resp_desc, resp,
+		sizeof(struct ipa_enable_per_client_stats_resp_msg_v01),
+		QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG("QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01, resp->resp.result,
+		resp->resp.error, "ipa3_qmi_enable_per_client_stats");
+}
+
+int ipa3_qmi_get_per_client_packet_stats(
+	struct ipa_get_stats_per_client_req_msg_v01 *req,
+	struct ipa_get_stats_per_client_resp_msg_v01 *resp)
+{
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.max_msg_len = QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01;
+	req_desc.ei_array = ipa3_get_stats_per_client_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01;
+	resp_desc.ei_array = ipa3_get_stats_per_client_resp_msg_data_v01_ei;
+
+	IPAWANDBG("Sending QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01\n");
+
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+			sizeof(struct ipa_get_stats_per_client_req_msg_v01),
+			&resp_desc, resp,
+			sizeof(struct ipa_get_stats_per_client_resp_msg_v01),
+			QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG("QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01, resp->resp.result,
+		resp->resp.error,
+		"struct ipa_get_stats_per_client_req_msg_v01");
+}
+
 void ipa3_qmi_init(void)
 {
 	mutex_init(&ipa3_qmi_lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index d3a4ba0..3351a33 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -32,54 +32,62 @@
 
 #define IPAWANDBG(fmt, args...) \
 	do { \
-		pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
 
 #define IPAWANDBG_LOW(fmt, args...) \
 	do { \
-		pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
 #define IPAWANERR(fmt, args...) \
 	do { \
-		pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		pr_err(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
 #define IPAWANINFO(fmt, args...) \
 	do { \
-		pr_info(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		pr_info(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
 extern struct ipa3_qmi_context *ipa3_qmi_ctx;
 
 struct ipa3_qmi_context {
-struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
-u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
-int num_ipa_install_fltr_rule_req_msg;
-struct ipa_install_fltr_rule_req_msg_v01
+	struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
+	u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
+	int num_ipa_install_fltr_rule_req_msg;
+	struct ipa_install_fltr_rule_req_msg_v01
 		ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-int num_ipa_install_fltr_rule_req_ex_msg;
-struct ipa_install_fltr_rule_req_ex_msg_v01
+	int num_ipa_install_fltr_rule_req_ex_msg;
+	struct ipa_install_fltr_rule_req_ex_msg_v01
 		ipa_install_fltr_rule_req_ex_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-int num_ipa_fltr_installed_notif_req_msg;
-struct ipa_fltr_installed_notif_req_msg_v01
+	int num_ipa_fltr_installed_notif_req_msg;
+	struct ipa_fltr_installed_notif_req_msg_v01
 		ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-bool modem_cfg_emb_pipe_flt;
+	int num_ipa_configure_ul_firewall_rules_req_msg;
+	struct ipa_configure_ul_firewall_rules_req_msg_v01
+		ipa_configure_ul_firewall_rules_req_msg_cache
+			[MAX_NUM_QMI_RULE_CACHE];
+	bool modem_cfg_emb_pipe_flt;
 };
 
 struct ipa3_rmnet_mux_val {
@@ -95,16 +103,24 @@
 extern struct elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[];
 extern struct elem_info ipa3_indication_reg_req_msg_data_v01_ei[];
 extern struct elem_info ipa3_indication_reg_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
+
+extern struct elem_info
+	ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
 extern struct elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[];
 extern struct elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[];
 extern struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
+
+extern struct elem_info
+	ipa3_fltr_installed_notif_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
 extern struct elem_info
 	ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[];
+
 extern struct elem_info ipa3_config_req_msg_data_v01_ei[];
 extern struct elem_info ipa3_config_resp_msg_data_v01_ei[];
 extern struct elem_info ipa3_get_data_stats_req_msg_data_v01_ei[];
@@ -112,14 +128,44 @@
 extern struct elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[];
 extern struct elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[];
 extern struct elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
-extern struct elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
-extern struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+
+extern struct elem_info
+	ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_ul_firewall_rule_type_data_v01_ei[];
+extern struct elem_info
+	ipa3_ul_firewall_config_result_type_data_v01_ei[];
+extern struct
+	elem_info ipa3_per_client_stats_info_type_data_v01_ei[];
+extern struct elem_info
+	ipa3_enable_per_client_stats_req_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_enable_per_client_stats_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_get_stats_per_client_req_msg_data_v01_ei[];
+
+extern struct elem_info
+	ipa3_get_stats_per_client_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[];
 
 /**
  * struct ipa3_rmnet_context - IPA rmnet context
@@ -148,6 +194,9 @@
 int ipa3_qmi_filter_request_ex_send(
 	struct ipa_install_fltr_rule_req_ex_msg_v01 *req);
 
+int ipa3_qmi_ul_filter_request_send(
+	struct ipa_configure_ul_firewall_rules_req_msg_v01 *req);
+
 /* sending filter-installed-notify-request to modem*/
 int ipa3_qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01
 		*req);
@@ -194,6 +243,16 @@
 	struct wan_ioctl_query_tether_stats_all *data);
 
 int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
+int rmnet_ipa3_set_lan_client_info(struct wan_ioctl_lan_client_info *data);
+
+int rmnet_ipa3_clear_lan_client_info(struct wan_ioctl_lan_client_info *data);
+
+int rmnet_ipa3_send_lan_client_msg(struct wan_ioctl_send_lan_client_msg *data);
+
+int rmnet_ipa3_enable_per_client_stats(bool *data);
+
+int rmnet_ipa3_query_per_client_stats(
+	struct wan_ioctl_query_per_client_stats *data);
 
 int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
 	struct ipa_get_data_stats_resp_msg_v01 *resp);
@@ -210,6 +269,13 @@
 int ipa3_wwan_set_modem_perf_profile(int throughput);
 
 int ipa3_wwan_set_modem_state(struct wan_ioctl_notify_wan_state *state);
+int ipa3_qmi_enable_per_client_stats(
+	struct ipa_enable_per_client_stats_req_msg_v01 *req,
+	struct ipa_enable_per_client_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_get_per_client_packet_stats(
+	struct ipa_get_stats_per_client_req_msg_v01 *req,
+	struct ipa_get_stats_per_client_resp_msg_v01 *resp);
 
 void ipa3_qmi_init(void);
 
@@ -231,6 +297,12 @@
 	return -EPERM;
 }
 
+static inline int ipa3_qmi_ul_filter_request_send(
+	struct ipa_configure_ul_firewall_rules_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
 static inline int ipa3_qmi_filter_request_ex_send(
 	struct ipa_install_fltr_rule_req_ex_msg_v01 *req)
 {
@@ -328,16 +400,28 @@
 static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
 
 static inline int ipa3_wwan_set_modem_perf_profile(int throughput)
+static inline int ipa3_qmi_enable_per_client_stats(
+	struct ipa_enable_per_client_stats_req_msg_v01 *req,
+	struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_get_per_client_packet_stats(
+	struct ipa_get_stats_per_client_req_msg_v01 *req,
+	struct ipa_get_stats_per_client_resp_msg_v01 *resp)
 {
 	return -EPERM;
 }
 
 static inline void ipa3_qmi_init(void)
 {
+
 }
 
 static inline void ipa3_qmi_cleanup(void)
 {
+
 }
 
 #endif /* CONFIG_RMNET_IPA3 */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
index d2d4158..703acd7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
@@ -16,6 +16,8 @@
 
 #include <soc/qcom/msm_qmi_interface.h>
 
+#include "ipa_qmi_service.h"
+
 /* Type Definitions  */
 static struct elem_info ipa3_hdr_tbl_info_type_data_v01_ei[] = {
 	{
@@ -1756,6 +1758,36 @@
 			rule_id),
 	},
 	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_CLIENT_DST_PIPES_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id),
+	},
+	{
 		.data_type	= QMI_EOTI,
 		.is_array	= NO_ARRAY,
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
@@ -2923,3 +2955,432 @@
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
 	},
 };
+
+struct elem_info ipa3_per_client_stats_info_type_data_v01_ei[] = {
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				client_id),
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				src_pipe_id),
+	},
+	{
+			.data_type	= QMI_UNSIGNED_8_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint64_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_ul_ipv4_bytes),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_8_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint64_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_ul_ipv6_bytes),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_8_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint64_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_dl_ipv4_bytes),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_8_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint64_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_dl_ipv6_bytes),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_ul_ipv4_pkts),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_ul_ipv6_pkts),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_dl_ipv4_pkts),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_dl_ipv6_pkts),
+
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_ul_firewall_rule_type_data_v01_ei[] = {
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_ul_firewall_rule_type_v01,
+				ip_type),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_filter_rule_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ul_firewall_rule_type_v01,
+					filter_rule),
+		.ei_array	= ipa3_filter_rule_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_ul_firewall_config_result_type_data_v01_ei[] = {
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_ul_firewall_config_result_type_v01,
+				is_success),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ul_firewall_config_result_type_v01,
+			mux_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_enable_per_client_stats_req_msg_data_v01_ei[] = {
+	{
+				.data_type	= QMI_UNSIGNED_1_BYTE,
+				.elem_len	= 1,
+				.elem_size	= sizeof(uint8_t),
+				.is_array	= NO_ARRAY,
+				.tlv_type	= 0x01,
+				.offset		= offsetof(struct
+				ipa_enable_per_client_stats_req_msg_v01,
+				enable_per_client_stats),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_enable_per_client_stats_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_enable_per_client_stats_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_get_stats_per_client_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_req_msg_v01,
+			client_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_req_msg_v01,
+			src_pipe_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_req_msg_v01,
+			reset_stats_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_req_msg_v01,
+			reset_stats),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_get_stats_per_client_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_resp_msg_v01,
+			per_client_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_resp_msg_v01,
+			per_client_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_PER_CLIENTS_V01,
+		.elem_size	=
+			sizeof(struct ipa_per_client_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_resp_msg_v01,
+			per_client_stats_list),
+		.ei_array	=
+			ipa3_per_client_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			firewall_rules_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_UL_FIREWALL_RULES_V01,
+		.elem_size	= sizeof(struct ipa_ul_firewall_rule_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x1,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			firewall_rules_list),
+		.ei_array	=
+			ipa3_ul_firewall_rule_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x2,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			mux_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			disable_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			disable),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			are_blacklist_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			are_blacklist_filters),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(
+			struct ipa_ul_firewall_config_result_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_ind_msg_v01,
+			result),
+		.ei_array	=
+		ipa3_ul_firewall_config_result_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index edba283..2536bf4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -918,7 +918,8 @@
 static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
 		const struct ipa_rt_rule *rule,
 		struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr,
-		struct ipa3_hdr_proc_ctx_entry *proc_ctx)
+		struct ipa3_hdr_proc_ctx_entry *proc_ctx,
+		u16 rule_id)
 {
 	int id;
 
@@ -933,11 +934,16 @@
 	(*(entry))->tbl = tbl;
 	(*(entry))->hdr = hdr;
 	(*(entry))->proc_ctx = proc_ctx;
-	id = ipa3_alloc_rule_id(tbl->rule_ids);
-	if (id < 0) {
-		IPAERR("failed to allocate rule id\n");
-		WARN_ON(1);
-		goto alloc_rule_id_fail;
+	if (rule_id) {
+		id = rule_id;
+		(*(entry))->rule_id_valid = 1;
+	} else {
+		id = ipa3_alloc_rule_id(tbl->rule_ids);
+		if (id < 0) {
+			IPAERR("failed to allocate rule id\n");
+			WARN_ON(1);
+			goto alloc_rule_id_fail;
+		}
 	}
 	(*(entry))->rule_id = id;
 
@@ -984,7 +990,8 @@
 }
 
 static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
-		const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+		const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl,
+		u16 rule_id)
 {
 	struct ipa3_rt_tbl *tbl;
 	struct ipa3_rt_entry *entry;
@@ -1012,7 +1019,8 @@
 		goto error;
 	}
 
-	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx,
+		rule_id))
 		goto error;
 
 	if (at_rear)
@@ -1043,7 +1051,7 @@
 	if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
 		goto error;
 
-	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0))
 		goto error;
 
 	list_add(&entry->link, &((*add_after_entry)->link));
@@ -1088,8 +1096,54 @@
 		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
 					&rules->rules[i].rule,
 					rules->rules[i].at_rear,
-					&rules->rules[i].rt_rule_hdl)) {
-			IPAERR_RL("failed to add rt rule %d\n", i);
+					&rules->rules[i].rt_rule_hdl,
+					0)) {
+			IPAERR("failed to add rt rule %d\n", i);
+			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa3_add_rt_rule_ext() - Add the specified routing rules to SW with rule id
+ * and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules)
+{
+	int i;
+	int ret;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].rt_rule_hdl,
+					rules->rules[i].rule_id)) {
+			IPAERR("failed to add rt rule %d\n", i);
 			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
 		} else {
 			rules->rules[i].status = 0;
@@ -1237,7 +1291,9 @@
 	IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u",
 		entry->tbl->idx, entry->tbl->rule_cnt,
 		entry->rule_id, entry->tbl->ref_cnt);
-	idr_remove(entry->tbl->rule_ids, entry->rule_id);
+		/* if rule id was allocated from idr, remove it */
+	if (!entry->rule_id_valid)
+		idr_remove(entry->tbl->rule_ids, entry->rule_id);
 	if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
 		if (__ipa_del_rt_tbl(entry->tbl))
 			IPAERR_RL("fail to del RT tbl\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index e93210d..66d4b10 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -68,6 +68,9 @@
 
 #define IPA_WWAN_CONS_DESC_FIFO_SZ 256
 
+static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type);
+static void rmnet_ipa_get_stats_and_update(void);
+
 static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
 static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
 static void ipa3_wwan_msg_free_cb(void*, u32, u32);
@@ -145,6 +148,10 @@
 	u32 pm_hdl;
 	u32 q6_pm_hdl;
 	u32 q6_teth_pm_hdl;
+	struct mutex per_client_stats_guard;
+	struct ipa_tether_device_info
+		tether_device
+		[IPACM_MAX_CLIENT_DEVICE_TYPES];
 };
 
 static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
@@ -1189,7 +1196,11 @@
 
 static void ipa3_wwan_tx_timeout(struct net_device *dev)
 {
-	IPAWANERR("[%s] ipa3_wwan_tx_timeout(), data stall in UL\n", dev->name);
+	struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+
+	if (atomic_read(&wwan_ptr->outstanding_pkts) != 0)
+		IPAWANERR("[%s] data stall in UL, %d outstanding\n",
+			dev->name, atomic_read(&wwan_ptr->outstanding_pkts));
 }
 
 /**
@@ -1946,12 +1957,6 @@
 	struct ipa_rm_perf_profile profile;
 	int ret;
 
-	ret = ipa_pm_set_perf_profile(rmnet_ipa3_ctx->q6_pm_hdl, throughput);
-	if (ret)
-		return ret;
-	return ipa_pm_set_perf_profile(rmnet_ipa3_ctx->q6_teth_pm_hdl,
-		throughput);
-
 	if (ipa3_ctx->use_ipa_pm) {
 		ret = ipa_pm_set_perf_profile(rmnet_ipa3_ctx->q6_pm_hdl,
 			throughput);
@@ -2741,9 +2746,11 @@
 	}
 
 	if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
-		type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
+			type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS &&
+			type != IPA_PER_CLIENT_STATS_CONNECT_EVENT &&
+			type != IPA_PER_CLIENT_STATS_DISCONNECT_EVENT) {
 		IPAWANERR("Wrong type given. buff %p type %d\n",
-			  buff, type);
+				buff, type);
 	}
 	kfree(buff);
 }
@@ -3501,8 +3508,488 @@
 	}
 }
 
+static inline bool rmnet_ipa3_check_any_client_inited
+(
+	enum ipacm_per_client_device_type device_type
+)
+{
+	int i = 0;
+
+	for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+		if (rmnet_ipa3_ctx->tether_device[device_type].
+		lan_client[i].client_idx != -1 &&
+		rmnet_ipa3_ctx->tether_device[device_type].
+		lan_client[i].inited) {
+			IPAWANERR("Found client index: %d which is inited\n",
+				 i);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static inline int rmnet_ipa3_get_lan_client_info
+(
+	enum ipacm_per_client_device_type device_type,
+	uint8_t mac[]
+)
+{
+	int i = 0;
+
+	IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		mac[0], mac[1], mac[2],
+		mac[3], mac[4], mac[5]);
+
+	for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+		if (memcmp(
+		rmnet_ipa3_ctx->tether_device[device_type].
+		lan_client[i].mac,
+		mac,
+		IPA_MAC_ADDR_SIZE) == 0) {
+			IPAWANDBG("Matched client index: %d\n", i);
+			return i;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static inline int rmnet_ipa3_delete_lan_client_info
+(
+	enum ipacm_per_client_device_type device_type,
+	int lan_clnt_idx
+)
+{
+	struct ipa_lan_client *lan_client = NULL;
+	int i;
+
+	/* Check if the request is to clean up all clients. */
+	if (lan_clnt_idx == 0xffffffff) {
+		/* Reset the complete device info. */
+		memset(&rmnet_ipa3_ctx->tether_device[device_type], 0,
+				sizeof(struct ipa_tether_device_info));
+		rmnet_ipa3_ctx->tether_device[device_type].ul_src_pipe = -1;
+		for (i = 0; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++)
+			rmnet_ipa3_ctx->tether_device[device_type].
+				lan_client[i].client_idx = -1;
+	} else {
+		lan_client =
+			&rmnet_ipa3_ctx->tether_device[device_type].
+			lan_client[lan_clnt_idx];
+		/* Reset the client info before sending the message. */
+		memset(lan_client, 0, sizeof(struct ipa_lan_client));
+		lan_client->client_idx = -1;
+
+	}
+	return 0;
+}
+
+/* rmnet_ipa3_set_lan_client_info() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_LAN_CLIENT_INFO.
+ * It is used to store LAN client information which
+ * is used to fetch the packet stats for a client.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_set_lan_client_info(
+	struct wan_ioctl_lan_client_info *data)
+{
+
+	struct ipa_lan_client *lan_client = NULL;
+
+
+	IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		data->mac[0], data->mac[1], data->mac[2],
+		data->mac[3], data->mac[4], data->mac[5]);
+
+	/* Check if Device type is valid. */
+	if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+		data->device_type < 0) {
+		IPAWANERR("Invalid Device type: %d\n", data->device_type);
+		return -EINVAL;
+	}
+
+	/* Check if Client index is valid. */
+	if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS ||
+		data->client_idx < 0) {
+		IPAWANERR("Invalid Client Index: %d\n", data->client_idx);
+		return -EINVAL;
+	}
+
+	mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+	if (data->client_init) {
+		/* check if the client is already inited. */
+		if (rmnet_ipa3_ctx->tether_device[data->device_type]
+			.lan_client[data->client_idx].inited) {
+			IPAWANERR("Client already inited: %d:%d\n",
+				data->device_type, data->client_idx);
+			mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+			return -EINVAL;
+		}
+	}
+
+	lan_client =
+	&rmnet_ipa3_ctx->tether_device[data->device_type].
+	lan_client[data->client_idx];
+
+	memcpy(lan_client->mac, data->mac, IPA_MAC_ADDR_SIZE);
+
+	lan_client->client_idx = data->client_idx;
+
+	/* Update the Source pipe. */
+	rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe =
+			ipa3_get_ep_mapping(data->ul_src_pipe);
+
+	/* Update the header length if not set. */
+	if (!rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len)
+		rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len =
+			data->hdr_len;
+
+	lan_client->inited = true;
+
+	rmnet_ipa3_ctx->tether_device[data->device_type].num_clients++;
+
+	IPAWANDBG("Set the lan client info: %d, %d, %d\n",
+		lan_client->client_idx,
+		rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe,
+		rmnet_ipa3_ctx->tether_device[data->device_type].num_clients);
+
+	mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+	return 0;
+}
+
+/* rmnet_ipa3_delete_lan_client_info() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_DELETE_LAN_CLIENT_INFO.
+ * It is used to delete LAN client information which
+ * is used to fetch the packet stats for a client.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_clear_lan_client_info(
+	struct wan_ioctl_lan_client_info *data)
+{
+
+	struct ipa_lan_client *lan_client = NULL;
+
+
+	IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		data->mac[0], data->mac[1], data->mac[2],
+		data->mac[3], data->mac[4], data->mac[5]);
+
+	/* Check if Device type is valid. */
+	if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+		data->device_type < 0) {
+		IPAWANERR("Invalid Device type: %d\n", data->device_type);
+		return -EINVAL;
+	}
+
+	/* Check if Client index is valid. */
+	if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS ||
+		data->client_idx < 0) {
+		IPAWANERR("Invalid Client Index: %d\n", data->client_idx);
+		return -EINVAL;
+	}
+
+	mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+	lan_client =
+	&rmnet_ipa3_ctx->tether_device[data->device_type].
+	lan_client[data->client_idx];
+
+	if (!data->client_init) {
+		/* check if the client is already de-inited. */
+		if (!lan_client->inited) {
+			IPAWANERR("Client already de-inited: %d:%d\n",
+				data->device_type, data->client_idx);
+			mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+			return -EINVAL;
+		}
+	}
+
+	lan_client->inited = false;
+	mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+	return 0;
+}
+
+
+/* rmnet_ipa3_send_lan_client_msg() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SEND_LAN_CLIENT_MSG.
+ * It is used to send LAN client information to IPACM.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_send_lan_client_msg(
+	struct wan_ioctl_send_lan_client_msg *data)
+{
+	struct ipa_msg_meta msg_meta;
+	int rc;
+	struct ipa_lan_client_msg *lan_client;
+
+	/* Notify IPACM to reset the client index. */
+	lan_client = kzalloc(sizeof(struct ipa_lan_client_msg),
+		       GFP_KERNEL);
+	if (!lan_client) {
+		IPAWANERR("Can't allocate memory for tether_info\n");
+		return -ENOMEM;
+	}
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	memcpy(lan_client, &data->lan_client,
+		sizeof(struct ipa_lan_client_msg));
+	msg_meta.msg_type = data->client_event;
+	msg_meta.msg_len = sizeof(struct ipa_lan_client_msg);
+
+	rc = ipa_send_msg(&msg_meta, lan_client, rmnet_ipa_free_msg);
+	if (rc) {
+		IPAWANERR("ipa_send_msg failed: %d\n", rc);
+		kfree(lan_client);
+		return rc;
+	}
+	return 0;
+}
+
+/* rmnet_ipa3_enable_per_client_stats() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_ENABLE_PER_CLIENT_STATS.
+ * It is used to indicate Q6 to start capturing per client stats.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_enable_per_client_stats(
+	bool *data)
+{
+	struct ipa_enable_per_client_stats_req_msg_v01 *req;
+	struct ipa_enable_per_client_stats_resp_msg_v01 *resp;
+	int rc;
+
+	req =
+	kzalloc(sizeof(struct ipa_enable_per_client_stats_req_msg_v01),
+			GFP_KERNEL);
+	if (!req) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		return -ENOMEM;
+	}
+	resp =
+	kzalloc(sizeof(struct ipa_enable_per_client_stats_resp_msg_v01),
+			GFP_KERNEL);
+	if (!resp) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		kfree(req);
+		return -ENOMEM;
+	}
+	memset(req, 0,
+		sizeof(struct ipa_enable_per_client_stats_req_msg_v01));
+	memset(resp, 0,
+		sizeof(struct ipa_enable_per_client_stats_resp_msg_v01));
+
+	if (*data)
+		req->enable_per_client_stats = 1;
+	else
+		req->enable_per_client_stats = 0;
+
+	rc = ipa3_qmi_enable_per_client_stats(req, resp);
+	if (rc) {
+		IPAWANERR("can't enable per client stats\n");
+		kfree(req);
+		kfree(resp);
+		return rc;
+	}
+
+	kfree(req);
+	kfree(resp);
+	return 0;
+}
+
+int rmnet_ipa3_query_per_client_stats(
+	struct wan_ioctl_query_per_client_stats *data)
+{
+	struct ipa_get_stats_per_client_req_msg_v01 *req;
+	struct ipa_get_stats_per_client_resp_msg_v01 *resp;
+	int rc, lan_clnt_idx, lan_clnt_idx1, i;
+	struct ipa_lan_client *lan_client = NULL;
+
+
+	IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		data->client_info[0].mac[0],
+		data->client_info[0].mac[1],
+		data->client_info[0].mac[2],
+		data->client_info[0].mac[3],
+		data->client_info[0].mac[4],
+		data->client_info[0].mac[5]);
+
+	/* Check if Device type is valid. */
+	if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+		data->device_type < 0) {
+		IPAWANERR("Invalid Device type: %d\n", data->device_type);
+		return -EINVAL;
+	}
+
+	/* Check if num_clients is valid. */
+	if (data->num_clients != IPA_MAX_NUM_HW_PATH_CLIENTS &&
+		data->num_clients != 1) {
+		IPAWANERR("Invalid number of clients: %d\n", data->num_clients);
+		return -EINVAL;
+	}
+
+	mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+	if (data->num_clients == 1) {
+		/* Check if the client info is valid.*/
+		lan_clnt_idx1 = rmnet_ipa3_get_lan_client_info(
+			data->device_type,
+			data->client_info[0].mac);
+		if (lan_clnt_idx1 < 0) {
+			IPAWANERR("Client info not available return.\n");
+			mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+			return -EINVAL;
+		}
+		lan_client =
+			&rmnet_ipa3_ctx->tether_device[data->device_type].
+			lan_client[lan_clnt_idx1];
+		/*
+		 * Check if disconnect flag is set and
+		 * see if all the clients info are cleared.
+		 */
+		if (data->disconnect_clnt &&
+			lan_client->inited) {
+			IPAWANERR("Client not inited. Try again.\n");
+			mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+			return -EAGAIN;
+		}
+
+	} else {
+		/* Max number of clients. */
+		/* Check if disconnect flag is set and
+		 * see if all the clients info are cleared.
+		 */
+		if (data->disconnect_clnt &&
+			rmnet_ipa3_check_any_client_inited(data->device_type)) {
+			IPAWANERR("CLient not inited. Try again.\n");
+			mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+			return -EAGAIN;
+		}
+		lan_clnt_idx1 = 0xffffffff;
+	}
+
+	req = kzalloc(sizeof(struct ipa_get_stats_per_client_req_msg_v01),
+			GFP_KERNEL);
+	if (!req) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+		return -ENOMEM;
+	}
+	resp = kzalloc(sizeof(struct ipa_get_stats_per_client_resp_msg_v01),
+			GFP_KERNEL);
+	if (!resp) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+		kfree(req);
+		return -ENOMEM;
+	}
+	memset(req, 0, sizeof(struct ipa_get_stats_per_client_req_msg_v01));
+	memset(resp, 0, sizeof(struct ipa_get_stats_per_client_resp_msg_v01));
+
+	if (data->reset_stats) {
+		req->reset_stats_valid = true;
+		req->reset_stats = true;
+		IPAWANDBG("fetch and reset the client stats\n");
+	}
+
+	req->client_id = lan_clnt_idx1;
+	req->src_pipe_id =
+		rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe;
+
+	IPAWANDBG("fetch the client stats for %d, %d\n", req->client_id,
+		req->src_pipe_id);
+
+	rc = ipa3_qmi_get_per_client_packet_stats(req, resp);
+	if (rc) {
+		IPAWANERR("can't get per client stats\n");
+		mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+		kfree(req);
+		kfree(resp);
+		return rc;
+	}
+
+	if (resp->per_client_stats_list_valid) {
+		for (i = 0; i < resp->per_client_stats_list_len
+				&& i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+			/* Subtract the header bytes from the DL bytes. */
+			data->client_info[i].ipv4_rx_bytes =
+			(resp->per_client_stats_list[i].num_dl_ipv4_bytes) -
+			(rmnet_ipa3_ctx->
+			tether_device[data->device_type].hdr_len *
+			resp->per_client_stats_list[i].num_dl_ipv4_pkts);
+			/* UL header bytes are subtracted by Q6. */
+			data->client_info[i].ipv4_tx_bytes =
+			resp->per_client_stats_list[i].num_ul_ipv4_bytes;
+			/* Subtract the header bytes from the DL bytes. */
+			data->client_info[i].ipv6_rx_bytes =
+			(resp->per_client_stats_list[i].num_dl_ipv6_bytes) -
+			(rmnet_ipa3_ctx->
+			tether_device[data->device_type].hdr_len *
+			resp->per_client_stats_list[i].num_dl_ipv6_pkts);
+			/* UL header bytes are subtracted by Q6. */
+			data->client_info[i].ipv6_tx_bytes =
+			resp->per_client_stats_list[i].num_ul_ipv6_bytes;
+
+			IPAWANDBG("tx_b_v4(%lu)v6(%lu)rx_b_v4(%lu) v6(%lu)\n",
+			(unsigned long int) data->client_info[i].ipv4_tx_bytes,
+			(unsigned long	int) data->client_info[i].ipv6_tx_bytes,
+			(unsigned long int) data->client_info[i].ipv4_rx_bytes,
+			(unsigned long int) data->client_info[i].ipv6_rx_bytes);
+
+			/* Get the lan client index. */
+			lan_clnt_idx = resp->per_client_stats_list[i].client_id;
+			/* Check if lan_clnt_idx is valid. */
+			if (lan_clnt_idx < 0 ||
+				lan_clnt_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS) {
+				IPAWANERR("Lan client index not valid.\n");
+				mutex_unlock(
+				&rmnet_ipa3_ctx->per_client_stats_guard);
+				kfree(req);
+				kfree(resp);
+				ipa_assert();
+				return -EINVAL;
+			}
+			memcpy(data->client_info[i].mac,
+				rmnet_ipa3_ctx->
+				tether_device[data->device_type].
+				lan_client[lan_clnt_idx].mac,
+				IPA_MAC_ADDR_SIZE);
+		}
+	}
+
+	if (data->disconnect_clnt) {
+		rmnet_ipa3_delete_lan_client_info(data->device_type,
+		lan_clnt_idx1);
+	}
+
+	mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+	kfree(req);
+	kfree(resp);
+	return 0;
+}
+
 static int __init ipa3_wwan_init(void)
 {
+	int i, j;
 	rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL);
 	if (!rmnet_ipa3_ctx) {
 		IPAWANERR("no memory\n");
@@ -3514,6 +4001,14 @@
 
 	mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard);
 	mutex_init(&rmnet_ipa3_ctx->add_mux_channel_lock);
+	mutex_init(&rmnet_ipa3_ctx->per_client_stats_guard);
+	/* Reset the Lan Stats. */
+	for (i = 0; i < IPACM_MAX_CLIENT_DEVICE_TYPES; i++) {
+		rmnet_ipa3_ctx->tether_device[i].ul_src_pipe = -1;
+		for (j = 0; j < IPA_MAX_NUM_HW_PATH_CLIENTS; j++)
+			rmnet_ipa3_ctx->tether_device[i].
+				lan_client[j].client_idx = -1;
+	}
 	rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
 	rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1;
 
@@ -3536,6 +4031,7 @@
 	ipa3_qmi_cleanup();
 	mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard);
 	mutex_destroy(&rmnet_ipa3_ctx->add_mux_channel_lock);
+	mutex_destroy(&rmnet_ipa3_ctx->per_client_stats_guard);
 	ret = subsys_notif_unregister_notifier(
 		rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
 	if (ret)
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 2e43abf..0f85e12 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -53,6 +53,15 @@
 #define WAN_IOC_NOTIFY_WAN_STATE32 _IOWR(WAN_IOC_MAGIC, \
 		WAN_IOCTL_NOTIFY_WAN_STATE, \
 		compat_uptr_t)
+#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \
+			compat_uptr_t)
+#define WAN_IOCTL_QUERY_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_QUERY_PER_CLIENT_STATS, \
+			compat_uptr_t)
+#define WAN_IOCTL_SET_LAN_CLIENT_INFO32 _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_SET_LAN_CLIENT_INFO, \
+			compat_uptr_t)
 #endif
 
 static unsigned int dev_num = 1;
@@ -128,6 +137,33 @@
 		}
 		break;
 
+	case WAN_IOC_ADD_UL_FLT_RULE:
+		IPAWANDBG("device %s got WAN_IOC_UL_ADD_FLT_RULE :>>>\n",
+		DRIVER_NAME);
+		pyld_sz =
+		sizeof(struct ipa_configure_ul_firewall_rules_req_msg_v01);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_qmi_ul_filter_request_send(
+			(struct ipa_configure_ul_firewall_rules_req_msg_v01 *)
+			param)) {
+			IPAWANDBG("IPACM->Q6 add ul filter rule failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
 	case WAN_IOC_ADD_FLT_RULE_INDEX:
 		IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n",
 		DRIVER_NAME);
@@ -339,7 +375,115 @@
 			retval = -EFAULT;
 			break;
 		}
+
 		break;
+	case WAN_IOC_ENABLE_PER_CLIENT_STATS:
+		IPAWANDBG_LOW("got WAN_IOC_ENABLE_PER_CLIENT_STATS :>>>\n");
+		pyld_sz = sizeof(bool);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_enable_per_client_stats(
+			(bool *)param)) {
+			IPAWANERR("WAN_IOC_ENABLE_PER_CLIENT_STATS failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case WAN_IOC_QUERY_PER_CLIENT_STATS:
+		IPAWANDBG_LOW("got WAN_IOC_QUERY_PER_CLIENT_STATS :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_query_per_client_stats);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		retval = rmnet_ipa3_query_per_client_stats(
+			(struct wan_ioctl_query_per_client_stats *)param);
+		if (retval) {
+			IPAWANERR("WAN_IOC_QUERY_PER_CLIENT_STATS failed\n");
+			break;
+		}
+
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_SET_LAN_CLIENT_INFO:
+		IPAWANDBG_LOW("got WAN_IOC_SET_LAN_CLIENT_INFO :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_lan_client_info);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_set_lan_client_info(
+			(struct wan_ioctl_lan_client_info *)param)) {
+			IPAWANERR("WAN_IOC_SET_LAN_CLIENT_INFO failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_CLEAR_LAN_CLIENT_INFO:
+		IPAWANDBG_LOW("got WAN_IOC_CLEAR_LAN_CLIENT_INFO :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_lan_client_info);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_clear_lan_client_info(
+			(struct wan_ioctl_lan_client_info *)param)) {
+			IPAWANERR("WAN_IOC_CLEAR_LAN_CLIENT_INFO failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+
+	case WAN_IOC_SEND_LAN_CLIENT_MSG:
+		IPAWANDBG_LOW("got WAN_IOC_SEND_LAN_CLIENT_MSG :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_send_lan_client_msg);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_send_lan_client_msg(
+			(struct wan_ioctl_send_lan_client_msg *)
+			param)) {
+			IPAWANERR("IOC_SEND_LAN_CLIENT_MSG failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
 	default:
 		retval = -ENOTTY;
 	}
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index e76ff14..d55e655 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -126,6 +126,7 @@
 	struct cpumask boost_cpu;
 
 	bool keep_radio_on_during_sleep;
+	int features;
 };
 
 static LIST_HEAD(dev_list);
@@ -1444,9 +1445,19 @@
 		break;
 	case WIL_PLATFORM_EVT_PRE_RESET:
 		/*
-		 * TODO: Enable rf_clk3 clock before resetting the device to
-		 * ensure stable ref clock during the device reset
+		 * Enable rf_clk3 clock before resetting the device to ensure
+		 * stable ref clock during the device reset
 		 */
+		if (ctx->features &
+		    BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL)) {
+			rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3);
+			if (rc) {
+				dev_err(ctx->dev,
+					"failed to enable clk, rc %d\n", rc);
+				break;
+			}
+		}
+
 		/* Re-enable L1 in case it was enabled in enumeration */
 		if (ctx->l1_enabled_in_enum) {
 			rc = msm_11ad_ctrl_aspm_l1(ctx, true);
@@ -1457,9 +1468,12 @@
 		break;
 	case WIL_PLATFORM_EVT_FW_RDY:
 		/*
-		 * TODO: Disable rf_clk3 clock after the device is up to allow
+		 * Disable rf_clk3 clock after the device is up to allow
 		 * the device to control it via its GPIO for power saving
 		 */
+		if (ctx->features &
+		    BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL))
+			msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
 		break;
 	default:
 		pr_debug("%s: Unhandled event %d\n", __func__, evt);
@@ -1469,14 +1483,28 @@
 	return rc;
 }
 
-static bool ops_keep_radio_on_during_sleep(void *handle)
+static int ops_get_capa(void *handle)
 {
 	struct msm11ad_ctx *ctx = (struct msm11ad_ctx *)handle;
+	int capa;
 
 	pr_debug("%s: keep radio on during sleep is %s\n", __func__,
 		 ctx->keep_radio_on_during_sleep ? "allowed" : "not allowed");
 
-	return ctx->keep_radio_on_during_sleep;
+	capa = (ctx->keep_radio_on_during_sleep ?
+			BIT(WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND) : 0) |
+		BIT(WIL_PLATFORM_CAPA_T_PWR_ON_0) |
+		BIT(WIL_PLATFORM_CAPA_EXT_CLK);
+
+	return capa;
+}
+
+static void ops_set_features(void *handle, int features)
+{
+	struct msm11ad_ctx *ctx = (struct msm11ad_ctx *)handle;
+
+	pr_debug("%s: features 0x%x\n", __func__, features);
+	ctx->features = features;
 }
 
 void *msm_11ad_dev_init(struct device *dev, struct wil_platform_ops *ops,
@@ -1518,7 +1546,8 @@
 	ops->resume = ops_resume;
 	ops->uninit = ops_uninit;
 	ops->notify = ops_notify;
-	ops->keep_radio_on_during_sleep = ops_keep_radio_on_during_sleep;
+	ops->get_capa = ops_get_capa;
+	ops->set_features = ops_set_features;
 
 	return ctx;
 }
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index 73bf935..24b8e2c 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -147,6 +147,12 @@
 	int ret = 0;
 	int state;
 
+	if (!ext_disp->ops) {
+		pr_err("codec not registered, skip notification\n");
+		ret = -EPERM;
+		goto end;
+	}
+
 	state = ext_disp->audio_sdev.state;
 	ret = extcon_set_state_sync(&ext_disp->audio_sdev,
 			ext_disp->current_disp, !!new_state);
@@ -155,7 +161,7 @@
 			ext_disp->audio_sdev.state == state ?
 			"is same" : "switched to",
 			ext_disp->audio_sdev.state);
-
+end:
 	return ret;
 }
 
@@ -218,15 +224,10 @@
 		goto end;
 	}
 
-	if (!ext_disp->ops) {
-		pr_err("codec ops not registered\n");
-		ret = -EINVAL;
-		goto end;
-	}
-
 	if (state == EXT_DISPLAY_CABLE_CONNECT) {
 		/* connect codec with interface */
-		*ext_disp->ops = data->codec_ops;
+		if (ext_disp->ops)
+			*ext_disp->ops = data->codec_ops;
 
 		/* update pdev for interface to use */
 		ext_disp->ext_disp_data.intf_pdev = data->pdev;
@@ -285,6 +286,28 @@
 	return ret;
 }
 
+static void msm_ext_disp_ready_for_display(struct msm_ext_disp *ext_disp)
+{
+	int ret;
+	struct msm_ext_disp_init_data *data = NULL;
+
+	if (!ext_disp) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	ret = msm_ext_disp_get_intf_data(ext_disp,
+		ext_disp->current_disp, &data);
+	if (ret) {
+		pr_err("%s not found\n",
+			msm_ext_disp_name(ext_disp->current_disp));
+		return;
+	}
+
+	*ext_disp->ops = data->codec_ops;
+	data->codec_ops.ready(ext_disp->pdev);
+}
+
 int msm_hdmi_register_audio_codec(struct platform_device *pdev,
 		struct msm_ext_disp_audio_codec_ops *ops)
 {
@@ -334,6 +357,8 @@
 
 end:
 	mutex_unlock(&ext_disp->lock);
+	if (ext_disp->current_disp != EXT_DISPLAY_TYPE_MAX)
+		msm_ext_disp_ready_for_display(ext_disp);
 
 	return ret;
 }
@@ -341,6 +366,8 @@
 
 static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data)
 {
+	struct msm_ext_disp_audio_codec_ops *ops;
+
 	if (!init_data) {
 		pr_err("Invalid init_data\n");
 		return -EINVAL;
@@ -351,9 +378,15 @@
 		return -EINVAL;
 	}
 
-	if (!init_data->codec_ops.get_audio_edid_blk ||
-			!init_data->codec_ops.cable_status ||
-			!init_data->codec_ops.audio_info_setup) {
+	ops = &init_data->codec_ops;
+
+	if (!ops->audio_info_setup   ||
+	    !ops->get_audio_edid_blk ||
+	    !ops->cable_status       ||
+	    !ops->get_intf_id        ||
+	    !ops->teardown_done      ||
+	    !ops->acknowledge        ||
+	    !ops->ready) {
 		pr_err("Invalid codec operation pointers\n");
 		return -EINVAL;
 	}
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index 423c8f1..94736d4 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -19,6 +19,7 @@
 #include <linux/io.h>
 #include <linux/list.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/msm-bus.h>
 #include <linux/msm-bus-board.h>
 #include <linux/of.h>
@@ -575,13 +576,6 @@
 }
 EXPORT_SYMBOL(se_config_packing);
 
-static void se_geni_clks_off(struct se_geni_rsc *rsc)
-{
-	clk_disable_unprepare(rsc->se_clk);
-	clk_disable_unprepare(rsc->s_ahb_clk);
-	clk_disable_unprepare(rsc->m_ahb_clk);
-}
-
 static bool geni_se_check_bus_bw(struct geni_se_device *geni_se_dev)
 {
 	int i;
@@ -641,6 +635,37 @@
 }
 
 /**
+ * se_geni_clks_off() - Turn off clocks associated with the serial
+ *                      engine
+ * @rsc:	Handle to resources associated with the serial engine.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_clks_off(struct se_geni_rsc *rsc)
+{
+	int ret = 0;
+	struct geni_se_device *geni_se_dev;
+
+	if (unlikely(!rsc || !rsc->wrapper_dev))
+		return -EINVAL;
+
+	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
+	if (unlikely(!geni_se_dev || !geni_se_dev->bus_bw))
+		return -ENODEV;
+
+	clk_disable_unprepare(rsc->se_clk);
+	clk_disable_unprepare(rsc->s_ahb_clk);
+	clk_disable_unprepare(rsc->m_ahb_clk);
+
+	ret = geni_se_rmv_ab_ib(geni_se_dev, rsc);
+	if (ret)
+		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+			"%s: Error %d during bus_bw_update\n", __func__, ret);
+	return ret;
+}
+EXPORT_SYMBOL(se_geni_clks_off);
+
+/**
  * se_geni_resources_off() - Turn off resources associated with the serial
  *                           engine
  * @rsc:	Handle to resources associated with the serial engine.
@@ -665,37 +690,14 @@
 			"%s: Error %d pinctrl_select_state\n", __func__, ret);
 		return ret;
 	}
-	se_geni_clks_off(rsc);
-	ret = geni_se_rmv_ab_ib(geni_se_dev, rsc);
+	ret = se_geni_clks_off(rsc);
 	if (ret)
 		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
-			"%s: Error %d during bus_bw_update\n", __func__, ret);
+			"%s: Error %d turning off clocks\n", __func__, ret);
 	return ret;
 }
 EXPORT_SYMBOL(se_geni_resources_off);
 
-static int se_geni_clks_on(struct se_geni_rsc *rsc)
-{
-	int ret;
-
-	ret = clk_prepare_enable(rsc->m_ahb_clk);
-	if (ret)
-		return ret;
-
-	ret = clk_prepare_enable(rsc->s_ahb_clk);
-	if (ret) {
-		clk_disable_unprepare(rsc->m_ahb_clk);
-		return ret;
-	}
-
-	ret = clk_prepare_enable(rsc->se_clk);
-	if (ret) {
-		clk_disable_unprepare(rsc->s_ahb_clk);
-		clk_disable_unprepare(rsc->m_ahb_clk);
-	}
-	return ret;
-}
-
 static int geni_se_add_ab_ib(struct geni_se_device *geni_se_dev,
 			     struct se_geni_rsc *rsc)
 {
@@ -733,13 +735,13 @@
 }
 
 /**
- * se_geni_resources_on() - Turn on resources associated with the serial
- *                          engine
+ * se_geni_clks_on() - Turn on clocks associated with the serial
+ *                     engine
  * @rsc:	Handle to resources associated with the serial engine.
  *
  * Return:	0 on success, standard Linux error codes on failure/error.
  */
-int se_geni_resources_on(struct se_geni_rsc *rsc)
+int se_geni_clks_on(struct se_geni_rsc *rsc)
 {
 	int ret = 0;
 	struct geni_se_device *geni_se_dev;
@@ -758,11 +760,52 @@
 		return ret;
 	}
 
+	ret = clk_prepare_enable(rsc->m_ahb_clk);
+	if (ret)
+		goto clks_on_err1;
+
+	ret = clk_prepare_enable(rsc->s_ahb_clk);
+	if (ret)
+		goto clks_on_err2;
+
+	ret = clk_prepare_enable(rsc->se_clk);
+	if (ret)
+		goto clks_on_err3;
+	return 0;
+
+clks_on_err3:
+	clk_disable_unprepare(rsc->s_ahb_clk);
+clks_on_err2:
+	clk_disable_unprepare(rsc->m_ahb_clk);
+clks_on_err1:
+	geni_se_rmv_ab_ib(geni_se_dev, rsc);
+	return ret;
+}
+EXPORT_SYMBOL(se_geni_clks_on);
+
+/**
+ * se_geni_resources_on() - Turn on resources associated with the serial
+ *                          engine
+ * @rsc:	Handle to resources associated with the serial engine.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_resources_on(struct se_geni_rsc *rsc)
+{
+	int ret = 0;
+	struct geni_se_device *geni_se_dev;
+
+	if (unlikely(!rsc || !rsc->wrapper_dev))
+		return -EINVAL;
+
+	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
+	if (unlikely(!geni_se_dev))
+		return -EPROBE_DEFER;
+
 	ret = se_geni_clks_on(rsc);
 	if (ret) {
 		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
 			"%s: Error %d during clks_on\n", __func__, ret);
-		geni_se_rmv_ab_ib(geni_se_dev, rsc);
 		return ret;
 	}
 
@@ -771,7 +814,6 @@
 		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
 			"%s: Error %d pinctrl_select_state\n", __func__, ret);
 		se_geni_clks_off(rsc);
-		geni_se_rmv_ab_ib(geni_se_dev, rsc);
 	}
 	return ret;
 }
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index b929d8b..785cf23 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -319,6 +319,7 @@
 	POWER_SUPPLY_ATTR(pd_voltage_max),
 	POWER_SUPPLY_ATTR(pd_voltage_min),
 	POWER_SUPPLY_ATTR(sdp_current_max),
+	POWER_SUPPLY_ATTR(connector_type),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 7c10e63..9179325 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -57,6 +57,8 @@
 /* Battery missing irq votable reasons */
 #define BATT_MISS_IRQ_VOTER	"fg_batt_miss_irq"
 
+#define ESR_FCC_VOTER		"fg_esr_fcc"
+
 #define DEBUG_PRINT_BUFFER_SIZE		64
 /* 3 byte address + 1 space character */
 #define ADDR_LEN			4
@@ -403,6 +405,7 @@
 	struct votable		*awake_votable;
 	struct votable		*delta_bsoc_irq_en_votable;
 	struct votable		*batt_miss_irq_en_votable;
+	struct votable		*pl_disable_votable;
 	struct fg_sram_param	*sp;
 	struct fg_dma_address	*addr_map;
 	struct fg_alg_flag	*alg_flags;
@@ -460,6 +463,7 @@
 	struct work_struct	status_change_work;
 	struct delayed_work	ttf_work;
 	struct delayed_work	sram_dump_work;
+	struct delayed_work	pl_enable_work;
 };
 
 /* Debugfs data structures are below */
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 2a47442..df3e25f 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -762,7 +762,19 @@
 	if (rc < 0)
 		return rc;
 
-	*msoc = DIV_ROUND_CLOSEST(*msoc * FULL_CAPACITY, FULL_SOC_RAW);
+	/*
+	 * To have better endpoints for 0 and 100, it is good to tune the
+	 * calculation discarding values 0 and 255 while rounding off. Rest
+	 * of the values 1-254 will be scaled to 1-99. DIV_ROUND_UP will not
+	 * be suitable here as it rounds up any value higher than 252 to 100.
+	 */
+	if (*msoc == FULL_SOC_RAW)
+		*msoc = 100;
+	else if (*msoc == 0)
+		*msoc = 0;
+	else
+		*msoc = DIV_ROUND_CLOSEST((*msoc - 1) * (FULL_CAPACITY - 2),
+				FULL_SOC_RAW - 2) + 1;
 	return 0;
 }
 
@@ -833,7 +845,7 @@
 {
 	int debug_batt_id[2], rc;
 
-	if (!chip->batt_id_ohms)
+	if (chip->batt_id_ohms < 0)
 		return false;
 
 	rc = fg_get_debug_batt_id(chip, debug_batt_id);
@@ -2721,6 +2733,49 @@
 	return true;
 }
 
+static void fg_update_batt_profile(struct fg_chip *chip)
+{
+	int rc, offset;
+	u8 val;
+
+	rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
+			SW_CONFIG_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading SW_CONFIG_OFFSET, rc=%d\n", rc);
+		return;
+	}
+
+	/*
+	 * If the RCONN had not been updated, no need to update battery
+	 * profile. Else, update the battery profile so that the profile
+	 * modified by bootloader or HLOS matches with the profile read
+	 * from device tree.
+	 */
+
+	if (!(val & RCONN_CONFIG_BIT))
+		return;
+
+	rc = fg_sram_read(chip, ESR_RSLOW_CHG_WORD,
+			ESR_RSLOW_CHG_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading ESR_RSLOW_CHG_OFFSET, rc=%d\n", rc);
+		return;
+	}
+	offset = (ESR_RSLOW_CHG_WORD - PROFILE_LOAD_WORD) * 4
+			+ ESR_RSLOW_CHG_OFFSET;
+	chip->batt_profile[offset] = val;
+
+	rc = fg_sram_read(chip, ESR_RSLOW_DISCHG_WORD,
+			ESR_RSLOW_DISCHG_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading ESR_RSLOW_DISCHG_OFFSET, rc=%d\n", rc);
+		return;
+	}
+	offset = (ESR_RSLOW_DISCHG_WORD - PROFILE_LOAD_WORD) * 4
+			+ ESR_RSLOW_DISCHG_OFFSET;
+	chip->batt_profile[offset] = val;
+}
+
 static void clear_battery_profile(struct fg_chip *chip)
 {
 	u8 val = 0;
@@ -2778,6 +2833,16 @@
 	return rc;
 }
 
+static void pl_enable_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work,
+				struct fg_chip,
+				pl_enable_work.work);
+
+	vote(chip->pl_disable_votable, ESR_FCC_VOTER, false, 0);
+	vote(chip->awake_votable, ESR_FCC_VOTER, false, 0);
+}
+
 static void profile_load_work(struct work_struct *work)
 {
 	struct fg_chip *chip = container_of(work,
@@ -2804,6 +2869,8 @@
 	if (!chip->profile_available)
 		goto out;
 
+	fg_update_batt_profile(chip);
+
 	if (!is_profile_load_required(chip))
 		goto done;
 
@@ -2865,12 +2932,18 @@
 				rc);
 	}
 
+	rc = fg_rconn_config(chip);
+	if (rc < 0)
+		pr_err("Error in configuring Rconn, rc=%d\n", rc);
+
 	batt_psy_initialized(chip);
 	fg_notify_charger(chip);
 	chip->profile_loaded = true;
 	fg_dbg(chip, FG_STATUS, "profile loaded successfully");
 out:
 	chip->soc_reporting_ready = true;
+	vote(chip->awake_votable, ESR_FCC_VOTER, true, 0);
+	schedule_delayed_work(&chip->pl_enable_work, msecs_to_jiffies(5000));
 	vote(chip->awake_votable, PROFILE_LOAD, false, 0);
 }
 
@@ -4059,12 +4132,6 @@
 		return rc;
 	}
 
-	rc = fg_rconn_config(chip);
-	if (rc < 0) {
-		pr_err("Error in configuring Rconn, rc=%d\n", rc);
-		return rc;
-	}
-
 	fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
 		chip->dt.esr_tight_flt_upct, buf);
 	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_word,
@@ -4237,6 +4304,9 @@
 		chip->profile_available = false;
 		chip->profile_loaded = false;
 		chip->soc_reporting_ready = false;
+		chip->batt_id_ohms = -EINVAL;
+		cancel_delayed_work_sync(&chip->pl_enable_work);
+		vote(chip->pl_disable_votable, ESR_FCC_VOTER, true, 0);
 		return IRQ_HANDLED;
 	}
 
@@ -5060,6 +5130,7 @@
 	chip->prev_charge_status = -EINVAL;
 	chip->ki_coeff_full_soc = -EINVAL;
 	chip->online_status = -EINVAL;
+	chip->batt_id_ohms = -EINVAL;
 	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
 	if (!chip->regmap) {
 		dev_err(chip->dev, "Parent regmap is unavailable\n");
@@ -5091,6 +5162,12 @@
 		}
 	}
 
+	chip->pl_disable_votable = find_votable("PL_DISABLE");
+	if (chip->pl_disable_votable == NULL) {
+		rc = -EPROBE_DEFER;
+		goto exit;
+	}
+
 	chip->awake_votable = create_votable("FG_WS", VOTE_SET_ANY, fg_awake_cb,
 					chip);
 	if (IS_ERR(chip->awake_votable)) {
@@ -5135,6 +5212,7 @@
 	init_completion(&chip->soc_ready);
 	init_completion(&chip->mem_grant);
 	INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
+	INIT_DELAYED_WORK(&chip->pl_enable_work, pl_enable_work);
 	INIT_WORK(&chip->status_change_work, status_change_work);
 	INIT_DELAYED_WORK(&chip->ttf_work, ttf_work);
 	INIT_DELAYED_WORK(&chip->sram_dump_work, sram_dump_work);
@@ -5207,8 +5285,8 @@
 		rc = fg_get_battery_temp(chip, &batt_temp);
 
 	if (!rc) {
-		pr_info("battery SOC:%d voltage: %duV temp: %d id: %dKOhms\n",
-			msoc, volt_uv, batt_temp, chip->batt_id_ohms / 1000);
+		pr_info("battery SOC:%d voltage: %duV temp: %d\n",
+				msoc, volt_uv, batt_temp);
 		rc = fg_esr_filter_config(chip, batt_temp);
 		if (rc < 0)
 			pr_err("Error in configuring ESR filter rc:%d\n", rc);
diff --git a/drivers/power/supply/qcom/qpnp-fg.c b/drivers/power/supply/qcom/qpnp-fg.c
index 17b9c1d3..a12b0ad 100644
--- a/drivers/power/supply/qcom/qpnp-fg.c
+++ b/drivers/power/supply/qcom/qpnp-fg.c
@@ -549,6 +549,7 @@
 	struct fg_chip *chip;
 	struct fg_log_buffer *log; /* log buffer */
 	u8 *data;	/* fg data that is read */
+	struct mutex memif_dfs_lock; /* Prevent thread concurrency */
 };
 
 struct fg_dbgfs {
@@ -5725,6 +5726,7 @@
 	trans->addr = dbgfs_data.addr;
 	trans->chip = dbgfs_data.chip;
 	trans->offset = trans->addr;
+	mutex_init(&trans->memif_dfs_lock);
 
 	file->private_data = trans;
 	return 0;
@@ -5736,6 +5738,7 @@
 
 	if (trans && trans->log && trans->data) {
 		file->private_data = NULL;
+		mutex_destroy(&trans->memif_dfs_lock);
 		kfree(trans->log);
 		kfree(trans->data);
 		kfree(trans);
@@ -5893,10 +5896,13 @@
 	size_t ret;
 	size_t len;
 
+	mutex_lock(&trans->memif_dfs_lock);
 	/* Is the the log buffer empty */
 	if (log->rpos >= log->wpos) {
-		if (get_log_data(trans) <= 0)
-			return 0;
+		if (get_log_data(trans) <= 0) {
+			len = 0;
+			goto unlock_mutex;
+		}
 	}
 
 	len = min(count, log->wpos - log->rpos);
@@ -5904,7 +5910,8 @@
 	ret = copy_to_user(buf, &log->data[log->rpos], len);
 	if (ret == len) {
 		pr_err("error copy sram register values to user\n");
-		return -EFAULT;
+		len = -EFAULT;
+		goto unlock_mutex;
 	}
 
 	/* 'ret' is the number of bytes not copied */
@@ -5912,6 +5919,9 @@
 
 	*ppos += len;
 	log->rpos += len;
+
+unlock_mutex:
+	mutex_unlock(&trans->memif_dfs_lock);
 	return len;
 }
 
@@ -5932,15 +5942,20 @@
 	int cnt = 0;
 	u8  *values;
 	size_t ret = 0;
+	char *kbuf;
+	u32 offset;
 
 	struct fg_trans *trans = file->private_data;
-	u32 offset = trans->offset;
+
+	mutex_lock(&trans->memif_dfs_lock);
+	offset = trans->offset;
 
 	/* Make a copy of the user data */
-	char *kbuf = kmalloc(count + 1, GFP_KERNEL);
-
-	if (!kbuf)
-		return -ENOMEM;
+	kbuf = kmalloc(count + 1, GFP_KERNEL);
+	if (!kbuf) {
+		ret = -ENOMEM;
+		goto unlock_mutex;
+	}
 
 	ret = copy_from_user(kbuf, buf, count);
 	if (ret == count) {
@@ -5991,6 +6006,8 @@
 
 free_buf:
 	kfree(kbuf);
+unlock_mutex:
+	mutex_unlock(&trans->memif_dfs_lock);
 	return ret;
 }
 
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 1ab0357..ea78ddd3 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -313,8 +313,6 @@
 	chip->dt.auto_recharge_soc = of_property_read_bool(node,
 						"qcom,auto-recharge-soc");
 
-	chg->micro_usb_mode = of_property_read_bool(node, "qcom,micro-usb");
-
 	chg->dcp_icl_ua = chip->dt.usb_icl_ua;
 
 	chg->suspend_input_on_debug_batt = of_property_read_bool(node,
@@ -356,6 +354,7 @@
 	POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
 	POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
 	POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CONNECTOR_TYPE,
 };
 
 static int smb2_usb_get_prop(struct power_supply *psy,
@@ -378,9 +377,9 @@
 		if (!val->intval)
 			break;
 
-		if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
-			chg->micro_usb_mode) &&
-			chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+		if (((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
+		   || (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB))
+		   && (chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
 			val->intval = 0;
 		else
 			val->intval = 1;
@@ -409,7 +408,7 @@
 			val->intval = chg->real_charger_type;
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_MODE:
-		if (chg->micro_usb_mode)
+		if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
 			val->intval = POWER_SUPPLY_TYPEC_NONE;
 		else if (chip->bad_part)
 			val->intval = POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
@@ -417,13 +416,13 @@
 			val->intval = chg->typec_mode;
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
-		if (chg->micro_usb_mode)
+		if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
 			val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
 		else
 			rc = smblib_get_prop_typec_power_role(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION:
-		if (chg->micro_usb_mode)
+		if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
 			val->intval = 0;
 		else
 			rc = smblib_get_prop_typec_cc_orientation(chg, val);
@@ -471,6 +470,9 @@
 		val->intval = get_client_vote(chg->usb_icl_votable,
 					      USB_PSY_VOTER);
 		break;
+	case POWER_SUPPLY_PROP_CONNECTOR_TYPE:
+		val->intval = chg->connector_type;
+		break;
 	default:
 		pr_err("get prop %d is not supported in usb\n", psp);
 		rc = -EINVAL;
@@ -609,9 +611,9 @@
 		if (!val->intval)
 			break;
 
-		if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
-			chg->micro_usb_mode) &&
-			chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+		if (((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
+		   || (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB))
+			&& (chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
 			val->intval = 1;
 		else
 			val->intval = 0;
@@ -1268,7 +1270,7 @@
 	struct regulator_config cfg = {};
 	int rc = 0;
 
-	if (chg->micro_usb_mode)
+	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
 		return 0;
 
 	chg->vconn_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vconn_vreg),
@@ -1563,9 +1565,9 @@
 	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
 			true, 0);
 	vote(chg->pd_disallowed_votable_indirect, MICRO_USB_VOTER,
-			chg->micro_usb_mode, 0);
+		(chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
 	vote(chg->hvdcp_enable_votable, MICRO_USB_VOTER,
-			chg->micro_usb_mode, 0);
+		(chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
 
 	/*
 	 * AICL configuration:
@@ -1595,7 +1597,17 @@
 		return rc;
 	}
 
-	if (chg->micro_usb_mode)
+	/* Check USB connector type (typeC/microUSB) */
+	rc = smblib_read(chg, RID_CC_CONTROL_7_0_REG, &val);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't read RID_CC_CONTROL_7_0 rc=%d\n",
+			rc);
+		return rc;
+	}
+	chg->connector_type = (val & EN_MICRO_USB_MODE_BIT) ?
+					POWER_SUPPLY_CONNECTOR_MICRO_USB
+					: POWER_SUPPLY_CONNECTOR_TYPEC;
+	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
 		rc = smb2_disable_typec(chg);
 	else
 		rc = smb2_configure_typec(chg);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index ddc8701..1cd3652 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -979,8 +979,8 @@
 	u8 load_cfg;
 	bool override;
 
-	if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
-		|| chg->micro_usb_mode)
+	if (((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
+		|| (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB))
 		&& (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)) {
 		rc = get_sdp_current(chg, icl_ua);
 		if (rc < 0) {
@@ -3404,7 +3404,7 @@
 			smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
 	}
 
-	if (chg->micro_usb_mode)
+	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
 		smblib_micro_usb_plugin(chg, vbus_rising);
 
 	power_supply_changed(chg->usb_psy);
@@ -3723,7 +3723,7 @@
 	switch (apsd_result->bit) {
 	case SDP_CHARGER_BIT:
 	case CDP_CHARGER_BIT:
-		if (chg->micro_usb_mode)
+		if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
 			extcon_set_cable_state_(chg->extcon, EXTCON_USB,
 					true);
 		/* if not DCP then no hvdcp timeout happens. Enable pd here */
@@ -3765,7 +3765,8 @@
 	}
 	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
 
-	if (chg->micro_usb_mode && (stat & APSD_DTC_STATUS_DONE_BIT)
+	if ((chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+			&& (stat & APSD_DTC_STATUS_DONE_BIT)
 			&& !chg->uusb_apsd_rerun_done) {
 		/*
 		 * Force re-run APSD to handle slow insertion related
@@ -4262,7 +4263,7 @@
 	struct smb_irq_data *irq_data = data;
 	struct smb_charger *chg = irq_data->parent_data;
 
-	if (chg->micro_usb_mode) {
+	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB) {
 		cancel_delayed_work_sync(&chg->uusb_otg_work);
 		vote(chg->awake_votable, OTG_DELAY_VOTER, true, 0);
 		smblib_dbg(chg, PR_INTERRUPT, "Scheduling OTG work\n");
@@ -4674,7 +4675,7 @@
 	int rc, i;
 	u8 stat;
 
-	if (chg->micro_usb_mode)
+	if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
 		return;
 
 	smblib_err(chg, "over-current detected on VCONN\n");
@@ -5054,7 +5055,7 @@
 			return rc;
 		}
 
-		rc = qcom_step_chg_init(chg->step_chg_enabled,
+		rc = qcom_step_chg_init(chg->dev, chg->step_chg_enabled,
 						chg->sw_jeita_enabled);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't init qcom_step_chg_init rc=%d\n",
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 1046b27..351a0e9 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -324,7 +324,7 @@
 	bool			sw_jeita_enabled;
 	bool			is_hdc;
 	bool			chg_done;
-	bool			micro_usb_mode;
+	bool			connector_type;
 	bool			otg_en;
 	bool			vconn_en;
 	bool			suspend_input_on_debug_batt;
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
index c759314..a75cbbb 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.c
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -13,6 +13,8 @@
 
 #include <linux/delay.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_batterydata.h>
 #include <linux/power_supply.h>
 #include <linux/slab.h>
 #include <linux/pmic-voter.h>
@@ -56,19 +58,32 @@
 };
 
 struct step_chg_info {
+	struct device		*dev;
 	ktime_t			step_last_update_time;
 	ktime_t			jeita_last_update_time;
 	bool			step_chg_enable;
 	bool			sw_jeita_enable;
+	bool			config_is_read;
+	bool			step_chg_cfg_valid;
+	bool			sw_jeita_cfg_valid;
+	bool			soc_based_step_chg;
+	bool			batt_missing;
 	int			jeita_fcc_index;
 	int			jeita_fv_index;
 	int			step_index;
+	int			get_config_retry_count;
+
+	struct step_chg_cfg	*step_chg_config;
+	struct jeita_fcc_cfg	*jeita_fcc_config;
+	struct jeita_fv_cfg	*jeita_fv_config;
 
 	struct votable		*fcc_votable;
 	struct votable		*fv_votable;
 	struct wakeup_source	*step_chg_ws;
 	struct power_supply	*batt_psy;
+	struct power_supply	*bms_psy;
 	struct delayed_work	status_change_work;
+	struct delayed_work	get_config_work;
 	struct notifier_block	nb;
 };
 
@@ -76,69 +91,10 @@
 
 #define STEP_CHG_HYSTERISIS_DELAY_US		5000000 /* 5 secs */
 
-/*
- * Step Charging Configuration
- * Update the table based on the battery profile
- * Supports VBATT and SOC based source
- * range data must be in increasing ranges and shouldn't overlap
- */
-static struct step_chg_cfg step_chg_config = {
-	.psy_prop	= POWER_SUPPLY_PROP_VOLTAGE_NOW,
-	.prop_name	= "VBATT",
-	.hysteresis	= 100000, /* 100mV */
-	.fcc_cfg	= {
-		/* VBAT_LOW	VBAT_HIGH	FCC */
-		{3600000,	4000000,	3000000},
-		{4001000,	4200000,	2800000},
-		{4201000,	4400000,	2000000},
-	},
-	/*
-	 *	SOC STEP-CHG configuration example.
-	 *
-	 *	.psy_prop = POWER_SUPPLY_PROP_CAPACITY,
-	 *	.prop_name = "SOC",
-	 *	.fcc_cfg	= {
-	 *		//SOC_LOW	SOC_HIGH	FCC
-	 *		{20,		70,		3000000},
-	 *		{70,		90,		2750000},
-	 *		{90,		100,		2500000},
-	 *	},
-	 */
-};
-
-/*
- * Jeita Charging Configuration
- * Update the table based on the battery profile
- * Please ensure that the TEMP ranges are programmed in the hw so that
- * an interrupt is issued and a consequent psy changed will cause us to
- * react immediately.
- * range data must be in increasing ranges and shouldn't overlap.
- * Gaps are okay
- */
-static struct jeita_fcc_cfg jeita_fcc_config = {
-	.psy_prop	= POWER_SUPPLY_PROP_TEMP,
-	.prop_name	= "BATT_TEMP",
-	.hysteresis	= 10, /* 1degC hysteresis */
-	.fcc_cfg	= {
-		/* TEMP_LOW	TEMP_HIGH	FCC */
-		{0,		100,		600000},
-		{101,		200,		2000000},
-		{201,		450,		3450000},
-		{451,		550,		600000},
-	},
-};
-
-static struct jeita_fv_cfg jeita_fv_config = {
-	.psy_prop	= POWER_SUPPLY_PROP_TEMP,
-	.prop_name	= "BATT_TEMP",
-	.hysteresis	= 10, /* 1degC hysteresis */
-	.fv_cfg		= {
-		/* TEMP_LOW	TEMP_HIGH	FCC */
-		{0,		100,		4200000},
-		{101,		450,		4350000},
-		{451,		550,		4200000},
-	},
-};
+#define BATT_HOT_DECIDEGREE_MAX			600
+#define GET_CONFIG_DELAY_MS		2000
+#define GET_CONFIG_RETRY_COUNT		50
+#define WAIT_BATT_ID_READY_MS		200
 
 static bool is_batt_available(struct step_chg_info *chip)
 {
@@ -151,6 +107,240 @@
 	return true;
 }
 
+static bool is_bms_available(struct step_chg_info *chip)
+{
+	if (!chip->bms_psy)
+		chip->bms_psy = power_supply_get_by_name("bms");
+
+	if (!chip->bms_psy)
+		return false;
+
+	return true;
+}
+
+static int read_range_data_from_node(struct device_node *node,
+		const char *prop_str, struct range_data *ranges,
+		u32 max_threshold, u32 max_value)
+{
+	int rc = 0, i, length, per_tuple_length, tuples;
+
+	rc = of_property_count_elems_of_size(node, prop_str, sizeof(u32));
+	if (rc < 0) {
+		pr_err("Count %s failed, rc=%d\n", prop_str, rc);
+		return rc;
+	}
+
+	length = rc;
+	per_tuple_length = sizeof(struct range_data) / sizeof(u32);
+	if (length % per_tuple_length) {
+		pr_err("%s length (%d) should be multiple of %d\n",
+				prop_str, length, per_tuple_length);
+		return -EINVAL;
+	}
+	tuples = length / per_tuple_length;
+
+	if (tuples > MAX_STEP_CHG_ENTRIES) {
+		pr_err("too many entries(%d), only %d allowed\n",
+				tuples, MAX_STEP_CHG_ENTRIES);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(node, prop_str,
+			(u32 *)ranges, length);
+	if (rc) {
+		pr_err("Read %s failed, rc=%d", prop_str, rc);
+		return rc;
+	}
+
+	for (i = 0; i < tuples; i++) {
+		if (ranges[i].low_threshold >
+				ranges[i].high_threshold) {
+			pr_err("%s thresholds should be in ascendant ranges\n",
+						prop_str);
+			rc = -EINVAL;
+			goto clean;
+		}
+
+		if (i != 0) {
+			if (ranges[i - 1].high_threshold >
+					ranges[i].low_threshold) {
+				pr_err("%s thresholds should be in ascendant ranges\n",
+							prop_str);
+				rc = -EINVAL;
+				goto clean;
+			}
+		}
+
+		if (ranges[i].low_threshold > max_threshold)
+			ranges[i].low_threshold = max_threshold;
+		if (ranges[i].high_threshold > max_threshold)
+			ranges[i].high_threshold = max_threshold;
+		if (ranges[i].value > max_value)
+			ranges[i].value = max_value;
+	}
+
+	return rc;
+clean:
+	memset(ranges, 0, tuples * sizeof(struct range_data));
+	return rc;
+}
+
+static int get_step_chg_jeita_setting_from_profile(struct step_chg_info *chip)
+{
+	struct device_node *batt_node, *profile_node;
+	u32 max_fv_uv, max_fcc_ma;
+	const char *batt_type_str;
+	const __be32 *handle;
+	int batt_id_ohms, rc;
+	union power_supply_propval prop = {0, };
+
+	handle = of_get_property(chip->dev->of_node,
+			"qcom,battery-data", NULL);
+	if (!handle) {
+		pr_debug("ignore getting sw-jeita/step charging settings from profile\n");
+		return 0;
+	}
+
+	batt_node = of_find_node_by_phandle(be32_to_cpup(handle));
+	if (!batt_node) {
+		pr_err("Get battery data node failed\n");
+		return -EINVAL;
+	}
+
+	if (!is_bms_available(chip))
+		return -ENODEV;
+
+	power_supply_get_property(chip->bms_psy,
+			POWER_SUPPLY_PROP_RESISTANCE_ID, &prop);
+	batt_id_ohms = prop.intval;
+
+	/* bms_psy has not yet read the batt_id */
+	if (batt_id_ohms < 0)
+		return -EBUSY;
+
+	profile_node = of_batterydata_get_best_profile(batt_node,
+					batt_id_ohms / 1000, NULL);
+	if (IS_ERR(profile_node))
+		return PTR_ERR(profile_node);
+
+	if (!profile_node) {
+		pr_err("Couldn't find profile\n");
+		return -ENODATA;
+	}
+
+	rc = of_property_read_string(profile_node, "qcom,battery-type",
+					&batt_type_str);
+	if (rc < 0) {
+		pr_err("battery type unavailable, rc:%d\n", rc);
+		return rc;
+	}
+	pr_debug("battery: %s detected, getting sw-jeita/step charging settings\n",
+					batt_type_str);
+
+	rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+					&max_fv_uv);
+	if (rc < 0) {
+		pr_err("max-voltage_uv reading failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(profile_node, "qcom,fastchg-current-ma",
+					&max_fcc_ma);
+	if (rc < 0) {
+		pr_err("max-fastchg-current-ma reading failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->soc_based_step_chg =
+		of_property_read_bool(profile_node, "qcom,soc-based-step-chg");
+	if (chip->soc_based_step_chg) {
+		chip->step_chg_config->psy_prop = POWER_SUPPLY_PROP_CAPACITY,
+		chip->step_chg_config->prop_name = "SOC";
+		chip->step_chg_config->hysteresis = 0;
+	}
+
+	chip->step_chg_cfg_valid = true;
+	rc = read_range_data_from_node(profile_node,
+			"qcom,step-chg-ranges",
+			chip->step_chg_config->fcc_cfg,
+			chip->soc_based_step_chg ? 100 : max_fv_uv,
+			max_fcc_ma * 1000);
+	if (rc < 0) {
+		pr_debug("Read qcom,step-chg-ranges failed from battery profile, rc=%d\n",
+					rc);
+		chip->step_chg_cfg_valid = false;
+	}
+
+	chip->sw_jeita_cfg_valid = true;
+	rc = read_range_data_from_node(profile_node,
+			"qcom,jeita-fcc-ranges",
+			chip->jeita_fcc_config->fcc_cfg,
+			BATT_HOT_DECIDEGREE_MAX, max_fcc_ma * 1000);
+	if (rc < 0) {
+		pr_debug("Read qcom,jeita-fcc-ranges failed from battery profile, rc=%d\n",
+					rc);
+		chip->sw_jeita_cfg_valid = false;
+	}
+
+	rc = read_range_data_from_node(profile_node,
+			"qcom,jeita-fv-ranges",
+			chip->jeita_fv_config->fv_cfg,
+			BATT_HOT_DECIDEGREE_MAX, max_fv_uv);
+	if (rc < 0) {
+		pr_debug("Read qcom,jeita-fv-ranges failed from battery profile, rc=%d\n",
+					rc);
+		chip->sw_jeita_cfg_valid = false;
+	}
+
+	return rc;
+}
+
+static void get_config_work(struct work_struct *work)
+{
+	struct step_chg_info *chip = container_of(work,
+			struct step_chg_info, get_config_work.work);
+	int i, rc;
+
+	chip->config_is_read = false;
+	rc = get_step_chg_jeita_setting_from_profile(chip);
+
+	if (rc < 0) {
+		if (rc == -ENODEV || rc == -EBUSY) {
+			if (chip->get_config_retry_count++
+					< GET_CONFIG_RETRY_COUNT) {
+				pr_debug("bms_psy is not ready, retry: %d\n",
+						chip->get_config_retry_count);
+				goto reschedule;
+			}
+		}
+	}
+
+	chip->config_is_read = true;
+
+	for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+		pr_debug("step-chg-cfg: %duV(SoC) ~ %duV(SoC), %duA\n",
+			chip->step_chg_config->fcc_cfg[i].low_threshold,
+			chip->step_chg_config->fcc_cfg[i].high_threshold,
+			chip->step_chg_config->fcc_cfg[i].value);
+	for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+		pr_debug("jeita-fcc-cfg: %ddecidegree ~ %ddecidegre, %duA\n",
+			chip->jeita_fcc_config->fcc_cfg[i].low_threshold,
+			chip->jeita_fcc_config->fcc_cfg[i].high_threshold,
+			chip->jeita_fcc_config->fcc_cfg[i].value);
+	for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+		pr_debug("jeita-fv-cfg: %ddecidegree ~ %ddecidegre, %duV\n",
+			chip->jeita_fv_config->fv_cfg[i].low_threshold,
+			chip->jeita_fv_config->fv_cfg[i].high_threshold,
+			chip->jeita_fv_config->fv_cfg[i].value);
+
+	return;
+
+reschedule:
+	schedule_delayed_work(&chip->get_config_work,
+			msecs_to_jiffies(GET_CONFIG_DELAY_MS));
+
+}
+
 static int get_val(struct range_data *range, int hysteresis, int current_index,
 		int threshold,
 		int *new_index, int *val)
@@ -220,21 +410,22 @@
 	else
 		chip->step_chg_enable = pval.intval;
 
-	if (!chip->step_chg_enable) {
+	if (!chip->step_chg_enable || !chip->step_chg_cfg_valid) {
 		if (chip->fcc_votable)
 			vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
 		goto update_time;
 	}
 
 	rc = power_supply_get_property(chip->batt_psy,
-				step_chg_config.psy_prop, &pval);
+			chip->step_chg_config->psy_prop, &pval);
 	if (rc < 0) {
 		pr_err("Couldn't read %s property rc=%d\n",
-				step_chg_config.prop_name, rc);
+			chip->step_chg_config->prop_name, rc);
 		return rc;
 	}
 
-	rc = get_val(step_chg_config.fcc_cfg, step_chg_config.hysteresis,
+	rc = get_val(chip->step_chg_config->fcc_cfg,
+			chip->step_chg_config->hysteresis,
 			chip->step_index,
 			pval.intval,
 			&chip->step_index,
@@ -254,7 +445,7 @@
 	vote(chip->fcc_votable, STEP_CHG_VOTER, true, fcc_ua);
 
 	pr_debug("%s = %d Step-FCC = %duA\n",
-		step_chg_config.prop_name, pval.intval, fcc_ua);
+		chip->step_chg_config->prop_name, pval.intval, fcc_ua);
 
 update_time:
 	chip->step_last_update_time = ktime_get();
@@ -278,7 +469,7 @@
 	else
 		chip->sw_jeita_enable = pval.intval;
 
-	if (!chip->sw_jeita_enable) {
+	if (!chip->sw_jeita_enable || !chip->sw_jeita_cfg_valid) {
 		if (chip->fcc_votable)
 			vote(chip->fcc_votable, JEITA_VOTER, false, 0);
 		if (chip->fv_votable)
@@ -291,14 +482,15 @@
 		goto reschedule;
 
 	rc = power_supply_get_property(chip->batt_psy,
-				jeita_fcc_config.psy_prop, &pval);
+			chip->jeita_fcc_config->psy_prop, &pval);
 	if (rc < 0) {
 		pr_err("Couldn't read %s property rc=%d\n",
-				step_chg_config.prop_name, rc);
+				chip->jeita_fcc_config->prop_name, rc);
 		return rc;
 	}
 
-	rc = get_val(jeita_fcc_config.fcc_cfg, jeita_fcc_config.hysteresis,
+	rc = get_val(chip->jeita_fcc_config->fcc_cfg,
+			chip->jeita_fcc_config->hysteresis,
 			chip->jeita_fcc_index,
 			pval.intval,
 			&chip->jeita_fcc_index,
@@ -318,7 +510,8 @@
 
 	vote(chip->fcc_votable, JEITA_VOTER, true, fcc_ua);
 
-	rc = get_val(jeita_fv_config.fv_cfg, jeita_fv_config.hysteresis,
+	rc = get_val(chip->jeita_fv_config->fv_cfg,
+			chip->jeita_fv_config->hysteresis,
 			chip->jeita_fv_index,
 			pval.intval,
 			&chip->jeita_fv_index,
@@ -337,7 +530,7 @@
 	vote(chip->fv_votable, JEITA_VOTER, true, fv_uv);
 
 	pr_debug("%s = %d FCC = %duA FV = %duV\n",
-		step_chg_config.prop_name, pval.intval, fcc_ua, fv_uv);
+		chip->jeita_fcc_config->prop_name, pval.intval, fcc_ua, fv_uv);
 
 update_time:
 	chip->jeita_last_update_time = ktime_get();
@@ -348,6 +541,39 @@
 	return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000);
 }
 
+static int handle_battery_insertion(struct step_chg_info *chip)
+{
+	int rc;
+	union power_supply_propval pval = {0, };
+
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_PRESENT, &pval);
+	if (rc < 0) {
+		pr_err("Get battery present status failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->batt_missing != (!pval.intval)) {
+		chip->batt_missing = !pval.intval;
+		pr_debug("battery %s detected\n",
+				chip->batt_missing ? "removal" : "insertion");
+		if (chip->batt_missing) {
+			chip->step_chg_cfg_valid = false;
+			chip->sw_jeita_cfg_valid = false;
+			chip->get_config_retry_count = 0;
+		} else {
+			/*
+			 * Get config for the new inserted battery, delay
+			 * to make sure BMS has read out the batt_id.
+			 */
+			schedule_delayed_work(&chip->get_config_work,
+				msecs_to_jiffies(WAIT_BATT_ID_READY_MS));
+		}
+	}
+
+	return rc;
+}
+
 static void status_change_work(struct work_struct *work)
 {
 	struct step_chg_info *chip = container_of(work,
@@ -360,6 +586,7 @@
 	if (!is_batt_available(chip))
 		return;
 
+	handle_battery_insertion(chip);
 	/* skip elapsed_us debounce for handling battery temperature */
 	rc = handle_jeita(chip);
 	if (rc > 0)
@@ -395,6 +622,13 @@
 		schedule_delayed_work(&chip->status_change_work, 0);
 	}
 
+	if ((strcmp(psy->desc->name, "bms") == 0)) {
+		if (chip->bms_psy == NULL)
+			chip->bms_psy = psy;
+		if (!chip->config_is_read)
+			schedule_delayed_work(&chip->get_config_work, 0);
+	}
+
 	return NOTIFY_OK;
 }
 
@@ -412,7 +646,8 @@
 	return 0;
 }
 
-int qcom_step_chg_init(bool step_chg_enable, bool sw_jeita_enable)
+int qcom_step_chg_init(struct device *dev,
+		bool step_chg_enable, bool sw_jeita_enable)
 {
 	int rc;
 	struct step_chg_info *chip;
@@ -422,48 +657,46 @@
 		return -EINVAL;
 	}
 
-	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
 	if (!chip)
 		return -ENOMEM;
 
 	chip->step_chg_ws = wakeup_source_register("qcom-step-chg");
-	if (!chip->step_chg_ws) {
-		rc = -EINVAL;
-		goto cleanup;
-	}
+	if (!chip->step_chg_ws)
+		return -EINVAL;
 
+	chip->dev = dev;
 	chip->step_chg_enable = step_chg_enable;
 	chip->sw_jeita_enable = sw_jeita_enable;
-
 	chip->step_index = -EINVAL;
 	chip->jeita_fcc_index = -EINVAL;
 	chip->jeita_fv_index = -EINVAL;
 
-	if (step_chg_enable && (!step_chg_config.psy_prop ||
-				!step_chg_config.prop_name)) {
-		/* fail if step-chg configuration is invalid */
-		pr_err("Step-chg configuration not defined - fail\n");
-		rc = -ENODATA;
-		goto release_wakeup_source;
-	}
+	chip->step_chg_config = devm_kzalloc(dev,
+			sizeof(struct step_chg_cfg), GFP_KERNEL);
+	if (!chip->step_chg_config)
+		return -ENOMEM;
 
-	if (sw_jeita_enable && (!jeita_fcc_config.psy_prop ||
-				!jeita_fcc_config.prop_name)) {
-		/* fail if step-chg configuration is invalid */
-		pr_err("Jeita TEMP configuration not defined - fail\n");
-		rc = -ENODATA;
-		goto release_wakeup_source;
-	}
+	chip->step_chg_config->psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW;
+	chip->step_chg_config->prop_name = "VBATT";
+	chip->step_chg_config->hysteresis = 100000;
 
-	if (sw_jeita_enable && (!jeita_fv_config.psy_prop ||
-				!jeita_fv_config.prop_name)) {
-		/* fail if step-chg configuration is invalid */
-		pr_err("Jeita TEMP configuration not defined - fail\n");
-		rc = -ENODATA;
-		goto release_wakeup_source;
-	}
+	chip->jeita_fcc_config = devm_kzalloc(dev,
+			sizeof(struct jeita_fcc_cfg), GFP_KERNEL);
+	chip->jeita_fv_config = devm_kzalloc(dev,
+			sizeof(struct jeita_fv_cfg), GFP_KERNEL);
+	if (!chip->jeita_fcc_config || !chip->jeita_fv_config)
+		return -ENOMEM;
+
+	chip->jeita_fcc_config->psy_prop = POWER_SUPPLY_PROP_TEMP;
+	chip->jeita_fcc_config->prop_name = "BATT_TEMP";
+	chip->jeita_fcc_config->hysteresis = 10;
+	chip->jeita_fv_config->psy_prop = POWER_SUPPLY_PROP_TEMP;
+	chip->jeita_fv_config->prop_name = "BATT_TEMP";
+	chip->jeita_fv_config->hysteresis = 10;
 
 	INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
+	INIT_DELAYED_WORK(&chip->get_config_work, get_config_work);
 
 	rc = step_chg_register_notifier(chip);
 	if (rc < 0) {
@@ -471,18 +704,15 @@
 		goto release_wakeup_source;
 	}
 
-	the_chip = chip;
+	schedule_delayed_work(&chip->get_config_work,
+			msecs_to_jiffies(GET_CONFIG_DELAY_MS));
 
-	if (step_chg_enable)
-		pr_info("Step charging enabled. Using %s source\n",
-				step_chg_config.prop_name);
+	the_chip = chip;
 
 	return 0;
 
 release_wakeup_source:
 	wakeup_source_unregister(chip->step_chg_ws);
-cleanup:
-	kfree(chip);
 	return rc;
 }
 
@@ -494,8 +724,8 @@
 		return;
 
 	cancel_delayed_work_sync(&chip->status_change_work);
+	cancel_delayed_work_sync(&chip->get_config_work);
 	power_supply_unreg_notifier(&chip->nb);
 	wakeup_source_unregister(chip->step_chg_ws);
 	the_chip = NULL;
-	kfree(chip);
 }
diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h
index 5bb2b99..2404b86 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.h
+++ b/drivers/power/supply/qcom/step-chg-jeita.h
@@ -12,6 +12,7 @@
 
 #ifndef __STEP_CHG_H__
 #define __STEP_CHG_H__
-int qcom_step_chg_init(bool step_chg_enable, bool sw_jeita_enable);
+int qcom_step_chg_init(struct device *dev,
+		bool step_chg_enable, bool sw_jeita_enable);
 void qcom_step_chg_deinit(void);
 #endif /* __STEP_CHG_H__ */
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index a315e46..778f482 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -949,6 +949,8 @@
 void cprh_adjust_voltages_for_mem_acc(struct cpr3_regulator *vreg);
 int cpr3_adjust_target_quotients(struct cpr3_regulator *vreg,
 			int *fuse_volt_adjust);
+int cpr3_parse_fuse_combo_map(struct cpr3_regulator *vreg, u64 *fuse_val,
+			int fuse_count);
 
 #else
 
@@ -1133,6 +1135,12 @@
 	return 0;
 }
 
+static int cpr3_parse_fuse_combo_map(struct cpr3_regulator *vreg, u64 *fuse_val,
+			int fuse_count)
+{
+	return -EPERM;
+}
+
 #endif /* CONFIG_REGULATOR_CPR3 */
 
 #endif /* __REGULATOR_CPR_REGULATOR_H__ */
diff --git a/drivers/regulator/cpr3-util.c b/drivers/regulator/cpr3-util.c
index 3035155..39ee3c5 100644
--- a/drivers/regulator/cpr3-util.c
+++ b/drivers/regulator/cpr3-util.c
@@ -562,32 +562,41 @@
 		return -EINVAL;
 	}
 
-	rc = of_property_read_u32(node, "qcom,cpr-fuse-combos",
-				&max_fuse_combos);
-	if (rc) {
-		cpr3_err(vreg, "error reading property qcom,cpr-fuse-combos, rc=%d\n",
-			rc);
-		return rc;
-	}
-
 	/*
-	 * Sanity check against arbitrarily large value to avoid excessive
-	 * memory allocation.
+	 * Check if CPR3 regulator's fuse_combos_supported element is already
+	 * populated by fuse-combo-map logic. If not populated, then parse the
+	 * qcom,cpr-fuse-combos property.
 	 */
-	if (max_fuse_combos > 100 || max_fuse_combos == 0) {
-		cpr3_err(vreg, "qcom,cpr-fuse-combos is invalid: %u\n",
-			max_fuse_combos);
-		return -EINVAL;
-	}
+	if (vreg->fuse_combos_supported)
+		max_fuse_combos = vreg->fuse_combos_supported;
+	else {
+		rc = of_property_read_u32(node, "qcom,cpr-fuse-combos",
+					&max_fuse_combos);
+		if (rc) {
+			cpr3_err(vreg, "error reading property qcom,cpr-fuse-combos, rc=%d\n",
+				rc);
+			return rc;
+		}
 
-	if (vreg->fuse_combo >= max_fuse_combos) {
-		cpr3_err(vreg, "device tree config supports fuse combos 0-%u but the hardware has combo %d\n",
-			max_fuse_combos - 1, vreg->fuse_combo);
-		BUG_ON(1);
-		return -EINVAL;
-	}
+		/*
+		 * Sanity check against arbitrarily large value to avoid
+		 * excessive memory allocation.
+		 */
+		if (max_fuse_combos > 100 || max_fuse_combos == 0) {
+			cpr3_err(vreg, "qcom,cpr-fuse-combos is invalid: %u\n",
+				max_fuse_combos);
+			return -EINVAL;
+		}
 
-	vreg->fuse_combos_supported = max_fuse_combos;
+		if (vreg->fuse_combo >= max_fuse_combos) {
+			cpr3_err(vreg, "device tree config supports fuse combos 0-%u but the hardware has combo %d\n",
+				max_fuse_combos - 1, vreg->fuse_combo);
+			WARN_ON(1);
+			return -EINVAL;
+		}
+
+		vreg->fuse_combos_supported = max_fuse_combos;
+	}
 
 	of_property_read_u32(node, "qcom,cpr-speed-bins", &max_speed_bins);
 
@@ -2414,3 +2423,76 @@
 	kfree(ro_scale);
 	return rc;
 }
+
+/**
+ * cpr3_parse_fuse_combo_map() - parse fuse combo map data for a CPR3 regulator
+ *		from device tree.
+ * @vreg:		Pointer to the CPR3 regulator
+ * @fuse_val:		Array of selection fuse parameter values
+ * @fuse_count:		Number of selection fuse parameters used in fuse combo
+ *			map
+ *
+ * This function reads the qcom,cpr-fuse-combo-map device tree property and
+ * populates the fuse_combo element of CPR3 regulator with the row number of
+ * fuse combo map data that matches with the data in fuse_val input array.
+ *
+ * Return: 0 on success, -ENODEV if qcom,cpr-fuse-combo-map property is not
+ *		specified in device node, other errno on failure
+ */
+int cpr3_parse_fuse_combo_map(struct cpr3_regulator *vreg, u64 *fuse_val,
+			int fuse_count)
+{
+	struct device_node *node = vreg->of_node;
+	int i, j, len, num_fuse_combos, row_size, rc = 0;
+	u32 *tmp;
+
+	if (!of_find_property(node, "qcom,cpr-fuse-combo-map", &len)) {
+		/* property not specified */
+		return -ENODEV;
+	}
+
+	row_size = fuse_count * 2;
+	if (len == 0 || len % (sizeof(u32) * row_size)) {
+		cpr3_err(vreg, "qcom,cpr-fuse-combo-map length=%d is invalid\n",
+			len);
+		return -EINVAL;
+	}
+
+	num_fuse_combos = len / (sizeof(u32) * row_size);
+	vreg->fuse_combos_supported = num_fuse_combos;
+
+	tmp = kzalloc(len, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(node, "qcom,cpr-fuse-combo-map",
+			tmp, num_fuse_combos * row_size);
+	if (rc) {
+		cpr3_err(vreg, "could not read qcom,cpr-fuse-combo-map, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	for (i = 0; i < num_fuse_combos; i++) {
+		for (j = 0; j < fuse_count; j++) {
+			if (tmp[i * row_size + j * 2] > fuse_val[j]
+			      || tmp[i * row_size + j * 2 + 1] < fuse_val[j])
+				break;
+		}
+		if (j == fuse_count) {
+			vreg->fuse_combo = i;
+			break;
+		}
+	}
+
+	if (i >= num_fuse_combos) {
+		cpr3_err(vreg, "No matching CPR fuse combo found!\n");
+		WARN_ON(1);
+		rc = -EINVAL;
+		goto done;
+	}
+
+done:
+	kfree(tmp);
+	return rc;
+}
diff --git a/drivers/regulator/cpr4-apss-regulator.c b/drivers/regulator/cpr4-apss-regulator.c
index 66f9f66..a9602cb 100644
--- a/drivers/regulator/cpr4-apss-regulator.c
+++ b/drivers/regulator/cpr4-apss-regulator.c
@@ -51,6 +51,8 @@
  * @speed_bin:		Application processor speed bin fuse parameter value for
  *			the given chip
  * @cpr_fusing_rev:	CPR fusing revision fuse parameter value
+ * @foundry_id:		Foundry identifier fuse parameter value for the given
+ *			chip
  * @boost_cfg:		CPR boost configuration fuse parameter value
  * @boost_voltage:	CPR boost voltage fuse parameter value (raw, not
  *			converted to a voltage)
@@ -66,6 +68,7 @@
 	u64	quot_offset[MSM8953_APSS_FUSE_CORNERS];
 	u64	speed_bin;
 	u64	cpr_fusing_rev;
+	u64	foundry_id;
 	u64	boost_cfg;
 	u64	boost_voltage;
 	u64	misc;
@@ -149,6 +152,11 @@
 	{},
 };
 
+static const struct cpr3_fuse_param msm8953_apss_foundry_id_param[] = {
+	{37, 40, 42},
+	{},
+};
+
 static const struct cpr3_fuse_param msm8953_cpr_boost_fuse_cfg_param[] = {
 	{36, 43, 45},
 	{},
@@ -171,6 +179,15 @@
 };
 
 /*
+ * The maximum number of fuse combinations possible for the selected fuse
+ * parameters in fuse combo map logic.
+ * Here, possible speed-bin values = 8, fuse revision values = 8, and foundry
+ * identifier values = 8. Total number of combinations = 512 (i.e., 8 * 8 * 8)
+ */
+#define CPR4_MSM8953_APSS_FUSE_COMBO_MAP_MAX_COUNT	512
+
+
+/*
  * The number of possible values for misc fuse is
  * 2^(#bits defined for misc fuse)
  */
@@ -260,6 +277,14 @@
 	}
 	cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
 
+	rc = cpr3_read_fuse_param(base, msm8953_apss_foundry_id_param,
+				&fuse->foundry_id);
+	if (rc) {
+		cpr3_err(vreg, "Unable to read foundry id fuse, rc=%d\n", rc);
+		return rc;
+	}
+	cpr3_info(vreg, "foundry id = %llu\n", fuse->foundry_id);
+
 	rc = cpr3_read_fuse_param(base, msm8953_misc_fuse_volt_adj_param,
 				&fuse->misc);
 	if (rc) {
@@ -1145,6 +1170,58 @@
 	return rc;
 }
 
+/*
+ * Constants which define the selection fuse parameters used in fuse combo map
+ * logic.
+ */
+enum cpr4_msm8953_apss_fuse_combo_parameters {
+	MSM8953_APSS_SPEED_BIN = 0,
+	MSM8953_APSS_CPR_FUSE_REV,
+	MSM8953_APSS_FOUNDRY_ID,
+	MSM8953_APSS_FUSE_COMBO_PARAM_COUNT,
+};
+
+/**
+ * cpr4_parse_fuse_combo_map() - parse APSS fuse combo map data from device tree
+ *		properties of the CPR3 regulator's device node
+ * @vreg:		Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_parse_fuse_combo_map(struct cpr3_regulator *vreg)
+{
+	struct cpr4_msm8953_apss_fuses *fuse = vreg->platform_fuses;
+	u64 *fuse_val;
+	int rc;
+
+	fuse_val = kcalloc(MSM8953_APSS_FUSE_COMBO_PARAM_COUNT,
+			sizeof(*fuse_val), GFP_KERNEL);
+	if (!fuse_val)
+		return -ENOMEM;
+
+	fuse_val[MSM8953_APSS_SPEED_BIN] = fuse->speed_bin;
+	fuse_val[MSM8953_APSS_CPR_FUSE_REV] = fuse->cpr_fusing_rev;
+	fuse_val[MSM8953_APSS_FOUNDRY_ID] = fuse->foundry_id;
+	rc = cpr3_parse_fuse_combo_map(vreg, fuse_val,
+			MSM8953_APSS_FUSE_COMBO_PARAM_COUNT);
+	if (rc == -ENODEV) {
+		cpr3_debug(vreg, "using legacy fuse combo logic, rc=%d\n",
+			rc);
+		rc = 0;
+	} else if (rc < 0) {
+		cpr3_err(vreg, "error reading fuse combo map data, rc=%d\n",
+			rc);
+	} else if (vreg->fuse_combo >=
+			CPR4_MSM8953_APSS_FUSE_COMBO_MAP_MAX_COUNT) {
+		cpr3_err(vreg, "invalid CPR fuse combo = %d found\n",
+			vreg->fuse_combo);
+		rc = -EINVAL;
+	}
+
+	kfree(fuse_val);
+	return rc;
+}
+
 /**
  * cpr4_apss_init_regulator() - perform all steps necessary to initialize the
  *		configuration data for a CPR3 regulator
@@ -1165,6 +1242,13 @@
 
 	fuse = vreg->platform_fuses;
 
+	rc = cpr4_parse_fuse_combo_map(vreg);
+	if (rc) {
+		cpr3_err(vreg, "error while parsing fuse combo map, rc=%d\n",
+			rc);
+		return rc;
+	}
+
 	rc = cpr4_apss_parse_corner_data(vreg);
 	if (rc) {
 		cpr3_err(vreg, "unable to read CPR corner data from device tree, rc=%d\n",
@@ -1246,7 +1330,7 @@
 static int cpr4_apss_init_aging(struct cpr3_controller *ctrl)
 {
 	struct cpr4_msm8953_apss_fuses *fuse = NULL;
-	struct cpr3_regulator *vreg;
+	struct cpr3_regulator *vreg = NULL;
 	u32 aging_ro_scale;
 	int i, j, rc;
 
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 13d53a3..589167e 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -491,7 +491,10 @@
 		.name = "fan53555",
 		.driver_data = FAN53555_VENDOR_FAIRCHILD
 	}, {
-		.name = "syr82x",
+		.name = "syr827",
+		.driver_data = FAN53555_VENDOR_SILERGY
+	}, {
+		.name = "syr828",
 		.driver_data = FAN53555_VENDOR_SILERGY
 	}, {
 		.name = "hl7509",
diff --git a/drivers/regulator/mem-acc-regulator.c b/drivers/regulator/mem-acc-regulator.c
index 4c03dec..e22a259 100644
--- a/drivers/regulator/mem-acc-regulator.c
+++ b/drivers/regulator/mem-acc-regulator.c
@@ -108,6 +108,8 @@
 	u32			*phys_reg_addr_list;
 	void __iomem		**remap_reg_addr_list;
 	struct corner_acc_reg_config	*corner_acc_reg_config;
+	u32			*override_acc_range_fuse_list;
+	int			override_acc_range_fuse_num;
 };
 
 static DEFINE_MUTEX(mem_acc_memory_mutex);
@@ -549,9 +551,8 @@
 	return 0;
 }
 
-static int override_mem_acc_custom_data(struct platform_device *pdev,
-				 struct mem_acc_regulator *mem_acc_vreg,
-				 int mem_type)
+static int override_mem_acc_custom_data(struct mem_acc_regulator *mem_acc_vreg,
+		 int mem_type)
 {
 	char *custom_apc_data_str;
 	int len, rc = 0, i;
@@ -647,27 +648,48 @@
 
 }
 
-static int mem_acc_find_override_map_match(struct platform_device *pdev,
-				 struct mem_acc_regulator *mem_acc_vreg)
+static void mem_acc_read_efuse_param(struct mem_acc_regulator *mem_acc_vreg,
+		u32 *fuse_sel, int *val)
 {
-	struct device_node *of_node = pdev->dev.of_node;
+	u64 fuse_bits;
+
+	fuse_bits = mem_acc_read_efuse_row(mem_acc_vreg, fuse_sel[0],
+					   fuse_sel[3]);
+	/*
+	 * fuse_sel[1] = LSB position in row (shift)
+	 * fuse_sel[2] = num of bits (mask)
+	 */
+	*val = (fuse_bits >> fuse_sel[1]) & ((1 << fuse_sel[2]) - 1);
+}
+
+#define FUSE_TUPLE_SIZE 4
+static int mem_acc_parse_override_fuse_version_map(
+			 struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
 	int i, rc, tuple_size;
 	int len = 0;
 	u32 *tmp;
-	char *prop_str = "qcom,override-fuse-version-map";
+	u32 fuse_sel[4];
+	char *prop_str;
 
-	/* Specify default no match case. */
-	mem_acc_vreg->override_map_match = FUSE_MAP_NO_MATCH;
-	mem_acc_vreg->override_map_count = 0;
-
-	if (!of_find_property(of_node, prop_str, &len)) {
-		/* No mapping present. */
-		return 0;
+	prop_str = "qcom,override-acc-fuse-sel";
+	rc = of_property_read_u32_array(of_node, prop_str, fuse_sel,
+					FUSE_TUPLE_SIZE);
+	if (rc < 0) {
+		pr_err("Read failed - %s rc=%d\n", prop_str, rc);
+		return rc;
 	}
 
+	mem_acc_read_efuse_param(mem_acc_vreg, fuse_sel,
+				 &mem_acc_vreg->override_fuse_value);
+
+	prop_str = "qcom,override-fuse-version-map";
+	if (!of_find_property(of_node, prop_str, &len))
+		return -EINVAL;
+
 	tuple_size = 1;
 	mem_acc_vreg->override_map_count = len / (sizeof(u32) * tuple_size);
-
 	if (len == 0 || len % (sizeof(u32) * tuple_size)) {
 		pr_err("%s length=%d is invalid\n", prop_str, len);
 		return -EINVAL;
@@ -695,8 +717,9 @@
 	}
 
 	if (mem_acc_vreg->override_map_match != FUSE_MAP_NO_MATCH)
-		pr_debug("%s tuple match found: %d\n", prop_str,
-				mem_acc_vreg->override_map_match);
+		pr_info("override_fuse_val=%d, %s tuple match found: %d\n",
+			mem_acc_vreg->override_fuse_value, prop_str,
+			mem_acc_vreg->override_map_match);
 	else
 		pr_err("%s tuple match not found\n", prop_str);
 
@@ -705,6 +728,121 @@
 	return rc;
 }
 
+static int mem_acc_parse_override_fuse_version_range(
+			 struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	int i, j, rc, size, row_size;
+	int num_fuse_sel, len = 0;
+	u32 *tmp = NULL;
+	char *prop_str;
+	u32 *fuse_val, *fuse_sel;
+	char *buf = NULL;
+	int pos = 0, buflen;
+
+	prop_str = "qcom,override-acc-range-fuse-list";
+	if (!of_find_property(of_node, prop_str, &len)) {
+		pr_err("%s property is missing\n", prop_str);
+		return -EINVAL;
+	}
+
+	size = len / sizeof(u32);
+	if (len == 0 || (size % FUSE_TUPLE_SIZE)) {
+		pr_err("%s property length (%d) is invalid\n", prop_str, len);
+		return -EINVAL;
+	}
+
+	num_fuse_sel = size / FUSE_TUPLE_SIZE;
+	fuse_val = devm_kcalloc(mem_acc_vreg->dev, num_fuse_sel,
+				sizeof(*fuse_val), GFP_KERNEL);
+	if (!fuse_val)
+		return -ENOMEM;
+	mem_acc_vreg->override_acc_range_fuse_list = fuse_val;
+	mem_acc_vreg->override_acc_range_fuse_num = num_fuse_sel;
+
+	fuse_sel = kzalloc(len, GFP_KERNEL);
+	if (!fuse_sel) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	rc = of_property_read_u32_array(of_node, prop_str, fuse_sel,
+					size);
+	if (rc) {
+		pr_err("%s read failed, rc=%d\n", prop_str, rc);
+		goto done;
+	}
+
+	for (i = 0; i < num_fuse_sel; i++) {
+		mem_acc_read_efuse_param(mem_acc_vreg, &fuse_sel[i * 4],
+					 &fuse_val[i]);
+	}
+
+	prop_str = "qcom,override-fuse-range-map";
+	if (!of_find_property(of_node, prop_str, &len))
+		goto done;
+
+	row_size = num_fuse_sel * 2;
+	mem_acc_vreg->override_map_count = len / (sizeof(u32) * row_size);
+
+	if (len == 0 || len % (sizeof(u32) * row_size)) {
+		pr_err("%s length=%d is invalid\n", prop_str, len);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	tmp = kzalloc(len, GFP_KERNEL);
+	if (!tmp) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	rc = of_property_read_u32_array(of_node, prop_str, tmp,
+				mem_acc_vreg->override_map_count * row_size);
+	if (rc) {
+		pr_err("could not read %s rc=%d\n", prop_str, rc);
+		goto done;
+	}
+
+	for (i = 0; i < mem_acc_vreg->override_map_count; i++) {
+		for (j = 0; j < num_fuse_sel; j++) {
+			if (tmp[i * row_size + j * 2] > fuse_val[j]
+				|| tmp[i * row_size + j * 2 + 1] < fuse_val[j])
+				break;
+		}
+
+		if (j == num_fuse_sel) {
+			mem_acc_vreg->override_map_match = i;
+			break;
+		}
+	}
+
+	/*
+	 * Log register and value mapping since they are useful for
+	 * baseline MEM ACC logging.
+	 */
+	buflen = num_fuse_sel * sizeof("fuse_selxxxx = XXXX ");
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (!buf)
+		goto done;
+
+	for (j = 0; j < num_fuse_sel; j++)
+		pos += scnprintf(buf + pos, buflen - pos, "fuse_sel%d = %d ",
+				 j, fuse_val[j]);
+	buf[pos] = '\0';
+	if (mem_acc_vreg->override_map_match != FUSE_MAP_NO_MATCH)
+		pr_info("%s %s tuple match found: %d\n", buf, prop_str,
+			mem_acc_vreg->override_map_match);
+	else
+		pr_err("%s %s tuple match not found\n", buf, prop_str);
+
+done:
+	kfree(fuse_sel);
+	kfree(tmp);
+	kfree(buf);
+	return rc;
+}
+
 #define MAX_CHARS_PER_INT	20
 
 static int mem_acc_reg_addr_val_dump(struct mem_acc_regulator *mem_acc_vreg,
@@ -789,6 +927,150 @@
 	return rc;
 }
 
+static int mem_acc_override_reg_addr_val_init(
+			struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	struct corner_acc_reg_config *corner_acc_reg_config;
+	struct acc_reg_value *override_reg_config_list;
+	int i, tuple_count, tuple_match, len = 0, rc = 0;
+	u32 list_size, override_max_reg_config_len;
+	char prop_str[40];
+	struct property *prop;
+	int num_corners = mem_acc_vreg->num_corners;
+
+	if (!mem_acc_vreg->corner_acc_reg_config)
+		return 0;
+
+	if (mem_acc_vreg->override_map_count) {
+		if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+			return 0;
+		tuple_count = mem_acc_vreg->override_map_count;
+		tuple_match = mem_acc_vreg->override_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	corner_acc_reg_config = mem_acc_vreg->corner_acc_reg_config;
+	for (i = 1; i <= num_corners; i++) {
+		snprintf(prop_str, sizeof(prop_str),
+			 "qcom,override-corner%d-addr-val-map", i);
+		prop = of_find_property(of_node, prop_str, &len);
+		list_size = len / (tuple_count * sizeof(u32));
+		if (!prop) {
+			pr_debug("%s property not specified\n", prop_str);
+			continue;
+		}
+
+		if ((!list_size) || list_size < (num_corners * 2)) {
+			pr_err("qcom,override-corner%d-addr-val-map property is missed or invalid length: len=%d\n",
+			i, len);
+			return -EINVAL;
+		}
+
+		override_max_reg_config_len = list_size / (num_corners * 2);
+		override_reg_config_list =
+				corner_acc_reg_config[i].reg_config_list;
+
+		if (corner_acc_reg_config[i].max_reg_config_len
+					!= override_max_reg_config_len) {
+			/* Free already allocate memory */
+			devm_kfree(mem_acc_vreg->dev, override_reg_config_list);
+
+			/* Allocated memory for new requirement */
+			override_reg_config_list =
+				devm_kcalloc(mem_acc_vreg->dev,
+				override_max_reg_config_len * num_corners,
+				sizeof(*override_reg_config_list), GFP_KERNEL);
+			if (!override_reg_config_list)
+				return -ENOMEM;
+
+			corner_acc_reg_config[i].max_reg_config_len =
+						override_max_reg_config_len;
+			corner_acc_reg_config[i].reg_config_list =
+						override_reg_config_list;
+		}
+
+		rc = mem_acc_get_reg_addr_val(of_node, prop_str,
+					override_reg_config_list, tuple_match,
+					list_size, mem_acc_vreg->num_acc_reg);
+		if (rc) {
+			pr_err("Failed to read %s property: rc=%d\n",
+				prop_str, rc);
+			return rc;
+		}
+
+		rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
+						&corner_acc_reg_config[i], i);
+		if (rc) {
+			pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int mem_acc_parse_override_config(struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	int i, rc = 0;
+
+	/* Specify default no match case. */
+	mem_acc_vreg->override_map_match = FUSE_MAP_NO_MATCH;
+	mem_acc_vreg->override_map_count = 0;
+
+	if (of_find_property(of_node, "qcom,override-fuse-range-map",
+			     NULL)) {
+		rc = mem_acc_parse_override_fuse_version_range(mem_acc_vreg);
+		if (rc) {
+			pr_err("parsing qcom,override-fuse-range-map property failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else if (of_find_property(of_node, "qcom,override-fuse-version-map",
+				    NULL)) {
+		rc = mem_acc_parse_override_fuse_version_map(mem_acc_vreg);
+		if (rc) {
+			pr_err("parsing qcom,override-fuse-version-map property failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		/* No override fuse configuration defined in device node */
+		return 0;
+	}
+
+	if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+		return 0;
+
+	rc = mem_acc_override_corner_map(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to override corner map rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = mem_acc_override_reg_addr_val_init(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to override reg_config_list init rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	for (i = 0; i < MEMORY_MAX; i++) {
+		rc = override_mem_acc_custom_data(mem_acc_vreg, i);
+		if (rc) {
+			pr_err("Unable to override custom data for mem_type=%d rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
 static int mem_acc_init_reg_config(struct mem_acc_regulator *mem_acc_vreg)
 {
 	struct device_node *of_node = mem_acc_vreg->dev->of_node;
@@ -965,92 +1247,6 @@
 	return rc;
 }
 
-static int mem_acc_override_reg_addr_val_init(
-			struct mem_acc_regulator *mem_acc_vreg)
-{
-	struct device_node *of_node = mem_acc_vreg->dev->of_node;
-	struct corner_acc_reg_config *corner_acc_reg_config;
-	struct acc_reg_value *override_reg_config_list;
-	int i, tuple_count, tuple_match, len = 0, rc = 0;
-	u32 list_size, override_max_reg_config_len;
-	char prop_str[40];
-	struct property *prop;
-	int num_corners = mem_acc_vreg->num_corners;
-
-	if (!mem_acc_vreg->corner_acc_reg_config)
-		return 0;
-
-	if (mem_acc_vreg->override_map_count) {
-		if (mem_acc_vreg->override_map_match ==	FUSE_MAP_NO_MATCH)
-			return 0;
-		tuple_count = mem_acc_vreg->override_map_count;
-		tuple_match = mem_acc_vreg->override_map_match;
-	} else {
-		tuple_count = 1;
-		tuple_match = 0;
-	}
-
-	corner_acc_reg_config = mem_acc_vreg->corner_acc_reg_config;
-	for (i = 1; i <= num_corners; i++) {
-		snprintf(prop_str, sizeof(prop_str),
-				"qcom,override-corner%d-addr-val-map", i);
-		prop = of_find_property(of_node, prop_str, &len);
-		list_size = len / (tuple_count * sizeof(u32));
-		if (!prop) {
-			pr_debug("%s property not specified\n", prop_str);
-			continue;
-		}
-
-		if ((!list_size) || list_size < (num_corners * 2)) {
-			pr_err("qcom,override-corner%d-addr-val-map property is missed or invalid length: len=%d\n",
-			i, len);
-			return -EINVAL;
-		}
-
-		override_max_reg_config_len = list_size / (num_corners * 2);
-		override_reg_config_list =
-				corner_acc_reg_config[i].reg_config_list;
-
-		if (corner_acc_reg_config[i].max_reg_config_len
-					!= override_max_reg_config_len) {
-			/* Free already allocate memory */
-			devm_kfree(mem_acc_vreg->dev, override_reg_config_list);
-
-			/* Allocated memory for new requirement */
-			override_reg_config_list =
-				devm_kcalloc(mem_acc_vreg->dev,
-				override_max_reg_config_len * num_corners,
-				sizeof(*override_reg_config_list), GFP_KERNEL);
-			if (!override_reg_config_list)
-				return -ENOMEM;
-
-			corner_acc_reg_config[i].max_reg_config_len =
-						override_max_reg_config_len;
-			corner_acc_reg_config[i].reg_config_list =
-						override_reg_config_list;
-		}
-
-		rc = mem_acc_get_reg_addr_val(of_node, prop_str,
-					override_reg_config_list, tuple_match,
-					list_size, mem_acc_vreg->num_acc_reg);
-		if (rc) {
-			pr_err("Failed to read %s property: rc=%d\n",
-				prop_str, rc);
-			return rc;
-		}
-
-		rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
-						&corner_acc_reg_config[i], i);
-		if (rc) {
-			pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
-				i, rc);
-			return rc;
-		}
-	}
-
-	return rc;
-}
-
 #define MEM_TYPE_STRING_LEN	20
 static int mem_acc_init(struct platform_device *pdev,
 		struct mem_acc_regulator *mem_acc_vreg)
@@ -1058,8 +1254,6 @@
 	struct device_node *of_node = pdev->dev.of_node;
 	struct resource *res;
 	int len, rc, i, j;
-	u32 fuse_sel[4];
-	u64 fuse_bits;
 	bool acc_type_present = false;
 	char tmps[MEM_TYPE_STRING_LEN];
 
@@ -1201,59 +1395,12 @@
 		}
 	}
 
-	if (of_find_property(mem_acc_vreg->dev->of_node,
-				"qcom,override-acc-fuse-sel", NULL)) {
-		rc = of_property_read_u32_array(mem_acc_vreg->dev->of_node,
-			"qcom,override-acc-fuse-sel", fuse_sel, 4);
-		if (rc < 0) {
-			pr_err("Read failed - qcom,override-acc-fuse-sel rc=%d\n",
-					rc);
-			return rc;
-		}
-
-		fuse_bits = mem_acc_read_efuse_row(mem_acc_vreg, fuse_sel[0],
-								fuse_sel[3]);
-		/*
-		 * fuse_sel[1] = LSB position in row (shift)
-		 * fuse_sel[2] = num of bits (mask)
-		 */
-		mem_acc_vreg->override_fuse_value = (fuse_bits >> fuse_sel[1]) &
-						((1 << fuse_sel[2]) - 1);
-
-		rc = mem_acc_find_override_map_match(pdev, mem_acc_vreg);
-		if (rc) {
-			pr_err("Unable to find fuse map match rc=%d\n", rc);
-			return rc;
-		}
-
-		pr_debug("override_fuse_val=%d override_map_match=%d\n",
-					mem_acc_vreg->override_fuse_value,
-					mem_acc_vreg->override_map_match);
-
-		rc = mem_acc_override_corner_map(mem_acc_vreg);
-		if (rc) {
-			pr_err("Unable to override corner map rc=%d\n", rc);
-			return rc;
-		}
-
-		rc = mem_acc_override_reg_addr_val_init(mem_acc_vreg);
-		if (rc) {
-			pr_err("Unable to override reg_config_list init rc=%d\n",
-				rc);
-			return rc;
-		}
-
-		for (i = 0; i < MEMORY_MAX; i++) {
-			rc = override_mem_acc_custom_data(pdev,
-							mem_acc_vreg, i);
-			if (rc) {
-				pr_err("Unable to override custom data for mem_type=%d rc=%d\n",
-					i, rc);
-				return rc;
-			}
-		}
+	rc = mem_acc_parse_override_config(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to parse mem acc override configuration, rc=%d\n",
+			rc);
+		return rc;
 	}
-
 	if (acc_type_present) {
 		mem_acc_vreg->mem_acc_type_data = devm_kzalloc(
 			mem_acc_vreg->dev, mem_acc_vreg->num_corners *
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
index 6b1e480..d672d5f 100644
--- a/drivers/regulator/qpnp-labibb-regulator.c
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -65,6 +65,7 @@
 #define REG_LAB_PRECHARGE_CTL		0x5E
 #define REG_LAB_SOFT_START_CTL		0x5F
 #define REG_LAB_SPARE_CTL		0x60
+#define REG_LAB_MISC_CTL		0x60 /* PMI8998/PM660A */
 #define REG_LAB_PFM_CTL			0x62
 
 /* LAB registers for PM660A */
@@ -137,6 +138,9 @@
 #define LAB_SPARE_TOUCH_WAKE_BIT	BIT(3)
 #define LAB_SPARE_DISABLE_SCP_BIT	BIT(0)
 
+/* REG_LAB_MISC_CTL */
+#define LAB_AUTO_GM_BIT			BIT(4)
+
 /* REG_LAB_PFM_CTL */
 #define LAB_PFM_EN_BIT			BIT(7)
 
@@ -1854,7 +1858,7 @@
 static int qpnp_labibb_ttw_enter_ibb_common(struct qpnp_labibb *labibb)
 {
 	int rc = 0;
-	u8 val;
+	u8 val, mask;
 
 	val = 0;
 	rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_PD_CTL,
@@ -1874,10 +1878,16 @@
 		return rc;
 	}
 
-	val = IBB_WAIT_MBG_OK;
+	if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) {
+		val = 0;
+		mask = IBB_DIS_DLY_MASK;
+	} else {
+		val = IBB_WAIT_MBG_OK;
+		mask = IBB_DIS_DLY_MASK | IBB_WAIT_MBG_OK;
+	}
+
 	rc = qpnp_labibb_sec_masked_write(labibb, labibb->ibb_base,
-				REG_IBB_PWRUP_PWRDN_CTL_2,
-				IBB_DIS_DLY_MASK | IBB_WAIT_MBG_OK, val);
+				REG_IBB_PWRUP_PWRDN_CTL_2, mask, val);
 	if (rc < 0) {
 		pr_err("write to register %x failed rc = %d\n",
 			REG_IBB_PWRUP_PWRDN_CTL_2, rc);
@@ -1953,7 +1963,7 @@
 static int qpnp_labibb_regulator_ttw_mode_enter(struct qpnp_labibb *labibb)
 {
 	int rc = 0;
-	u8 val;
+	u8 val, reg;
 
 	/* Save the IBB settings before they get modified for TTW mode */
 	if (!labibb->ibb_settings_saved) {
@@ -2015,10 +2025,17 @@
 		}
 
 		val = LAB_SPARE_DISABLE_SCP_BIT;
+
 		if (labibb->pmic_rev_id->pmic_subtype != PMI8950_SUBTYPE)
 			val |= LAB_SPARE_TOUCH_WAKE_BIT;
-		rc = qpnp_labibb_write(labibb, labibb->lab_base +
-				REG_LAB_SPARE_CTL, &val, 1);
+
+		if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) {
+			reg = REG_LAB_MISC_CTL;
+			val |= LAB_AUTO_GM_BIT;
+		} else {
+			reg = REG_LAB_SPARE_CTL;
+		}
+		rc = qpnp_labibb_write(labibb, labibb->lab_base + reg, &val, 1);
 		if (rc < 0) {
 			pr_err("qpnp_labibb_write register %x failed rc = %d\n",
 				REG_LAB_SPARE_CTL, rc);
@@ -2048,7 +2065,15 @@
 	case PMI8950_SUBTYPE:
 		rc = qpnp_labibb_ttw_enter_ibb_pmi8950(labibb);
 		break;
+	case PMI8998_SUBTYPE:
+		rc = labibb->lab_ver_ops->ps_ctl(labibb, 70, true);
+		if (rc < 0)
+			break;
+
+		rc = qpnp_ibb_ps_config(labibb, true);
+		break;
 	}
+
 	if (rc < 0) {
 		pr_err("Failed to configure TTW-enter for IBB rc=%d\n", rc);
 		return rc;
@@ -2081,7 +2106,7 @@
 static int qpnp_labibb_regulator_ttw_mode_exit(struct qpnp_labibb *labibb)
 {
 	int rc = 0;
-	u8 val;
+	u8 val, reg;
 
 	if (!labibb->ibb_settings_saved) {
 		pr_err("IBB settings are not saved!\n");
@@ -2115,8 +2140,14 @@
 		}
 
 		val = 0;
-		rc = qpnp_labibb_write(labibb, labibb->lab_base +
-					REG_LAB_SPARE_CTL, &val, 1);
+		if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) {
+			reg = REG_LAB_MISC_CTL;
+			val |= LAB_AUTO_GM_BIT;
+		} else {
+			reg = REG_LAB_SPARE_CTL;
+		}
+
+		rc = qpnp_labibb_write(labibb, labibb->lab_base + reg, &val, 1);
 		if (rc < 0) {
 			pr_err("qpnp_labibb_write register %x failed rc = %d\n",
 					REG_LAB_SPARE_CTL, rc);
@@ -3692,6 +3723,9 @@
 	case PMI8950_SUBTYPE:
 		/* TTW supported for all revisions */
 		break;
+	case PMI8998_SUBTYPE:
+		/* TTW supported for all revisions */
+		break;
 	default:
 		pr_info("TTW mode not supported for PMIC-subtype = %d\n",
 					labibb->pmic_rev_id->pmic_subtype);
diff --git a/drivers/regulator/rpmh-regulator.c b/drivers/regulator/rpmh-regulator.c
index 562b05a..1de08d4 100644
--- a/drivers/regulator/rpmh-regulator.c
+++ b/drivers/regulator/rpmh-regulator.c
@@ -36,10 +36,13 @@
  * %RPMH_REGULATOR_TYPE_ARC:	RPMh ARC accelerator which supports voting on
  *				the CPR managed voltage level of LDO and SMPS
  *				type PMIC regulators.
+ * %RPMH_REGULATOR_TYPE_XOB:	RPMh XOB accelerator which supports voting on
+ *				the enable state of PMIC regulators.
  */
 enum rpmh_regulator_type {
 	RPMH_REGULATOR_TYPE_VRM,
 	RPMH_REGULATOR_TYPE_ARC,
+	RPMH_REGULATOR_TYPE_XOB,
 };
 
 /**
@@ -52,6 +55,7 @@
  *					for enable voting.  Instead, ARC level
  *					0 corresponds to "disabled" for a given
  *					ARC regulator resource if supported.
+ * %RPMH_REGULATOR_REG_XOB_ENABLE:	XOB enable voting register index
  * %RPMH_REGULATOR_REG_ENABLE:		Common enable index used in callback
  *					functions for both ARC and VRM.
  * %RPMH_REGULATOR_REG_VRM_MODE:	VRM regulator mode voting register index
@@ -61,6 +65,8 @@
  *					register indices
  * %RPMH_REGULATOR_REG_ARC_MAX:		Exclusive upper limit of ARC register
  *					indices
+ * %RPMH_REGULATOR_REG_XOB_MAX:		Exclusive upper limit of XOB register
+ *					indices
  * %RPMH_REGULATOR_REG_VRM_MAX:		Exclusive upper limit of VRM register
  *					indices
  * %RPMH_REGULATOR_REG_MAX:		Combined exclusive upper limit of ARC
@@ -73,11 +79,13 @@
 	RPMH_REGULATOR_REG_ARC_LEVEL		= 0,
 	RPMH_REGULATOR_REG_VRM_ENABLE		= 1,
 	RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE	= RPMH_REGULATOR_REG_VRM_ENABLE,
+	RPMH_REGULATOR_REG_XOB_ENABLE		= RPMH_REGULATOR_REG_VRM_ENABLE,
 	RPMH_REGULATOR_REG_ENABLE		= RPMH_REGULATOR_REG_VRM_ENABLE,
 	RPMH_REGULATOR_REG_VRM_MODE		= 2,
 	RPMH_REGULATOR_REG_VRM_HEADROOM		= 3,
 	RPMH_REGULATOR_REG_ARC_REAL_MAX		= 1,
 	RPMH_REGULATOR_REG_ARC_MAX		= 2,
+	RPMH_REGULATOR_REG_XOB_MAX		= 2,
 	RPMH_REGULATOR_REG_VRM_MAX		= 4,
 	RPMH_REGULATOR_REG_MAX			= 4,
 };
@@ -104,6 +112,9 @@
 #define RPMH_VRM_MODE_MIN		0
 #define RPMH_VRM_MODE_MAX		7
 
+/* XOB voting registers are found in the VRM hardware module */
+#define CMD_DB_HW_XOB			CMD_DB_HW_VRM
+
 /*
  * Mapping from RPMh VRM accelerator modes to regulator framework modes
  * Assumes that SMPS PFM mode == LDO LPM mode and SMPS PWM mode == LDO HPM mode
@@ -297,6 +308,10 @@
 	[RPMH_REGULATOR_REG_ARC_LEVEL]		= "hlvl",
 };
 
+static const char *const rpmh_regulator_xob_param_names[] = {
+	[RPMH_REGULATOR_REG_XOB_ENABLE]		= "en",
+};
+
 /**
  * rpmh_regulator_req() - print the rpmh regulator request to the kernel log
  * @vreg:		Pointer to the RPMh regulator
@@ -323,12 +338,22 @@
 	u32 valid;
 	bool first;
 
-	max_reg_index = aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_VRM
-					? RPMH_REGULATOR_REG_VRM_MAX
-					: RPMH_REGULATOR_REG_ARC_REAL_MAX;
-	param_name = aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_VRM
-					? rpmh_regulator_vrm_param_names
-					: rpmh_regulator_arc_param_names;
+	switch (aggr_vreg->regulator_type) {
+	case RPMH_REGULATOR_TYPE_VRM:
+		max_reg_index = RPMH_REGULATOR_REG_VRM_MAX;
+		param_name =  rpmh_regulator_vrm_param_names;
+		break;
+	case RPMH_REGULATOR_TYPE_ARC:
+		max_reg_index = RPMH_REGULATOR_REG_ARC_REAL_MAX;
+		param_name =  rpmh_regulator_arc_param_names;
+		break;
+	case RPMH_REGULATOR_TYPE_XOB:
+		max_reg_index = RPMH_REGULATOR_REG_XOB_MAX;
+		param_name =  rpmh_regulator_xob_param_names;
+		break;
+	default:
+		return;
+	}
 
 	pos += scnprintf(buf + pos, buflen - pos,
 			"%s (%s), addr=0x%05X: s=%s; sent: ",
@@ -438,9 +463,20 @@
 	enum rpmh_state state;
 	u32 sent_mask;
 
-	max_reg_index = aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_VRM
-						? RPMH_REGULATOR_REG_VRM_MAX
-						: RPMH_REGULATOR_REG_ARC_MAX;
+	switch (aggr_vreg->regulator_type) {
+	case RPMH_REGULATOR_TYPE_VRM:
+		max_reg_index = RPMH_REGULATOR_REG_VRM_MAX;
+		break;
+	case RPMH_REGULATOR_TYPE_ARC:
+		max_reg_index = RPMH_REGULATOR_REG_ARC_MAX;
+		break;
+	case RPMH_REGULATOR_TYPE_XOB:
+		max_reg_index = RPMH_REGULATOR_REG_XOB_MAX;
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	/*
 	 * Perform max aggregration of each register value across all regulators
 	 * which use this RPMh resource.
@@ -1005,9 +1041,16 @@
 	.list_voltage		= rpmh_regulator_arc_list_voltage,
 };
 
+static const struct regulator_ops rpmh_regulator_xob_ops = {
+	.enable			= rpmh_regulator_enable,
+	.disable		= rpmh_regulator_disable,
+	.is_enabled		= rpmh_regulator_is_enabled,
+};
+
 static const struct regulator_ops *rpmh_regulator_ops[] = {
 	[RPMH_REGULATOR_TYPE_VRM]	= &rpmh_regulator_vrm_ops,
 	[RPMH_REGULATOR_TYPE_ARC]	= &rpmh_regulator_arc_ops,
+	[RPMH_REGULATOR_TYPE_XOB]	= &rpmh_regulator_xob_ops,
 };
 
 /**
@@ -1322,6 +1365,13 @@
 		rc = of_property_read_u32(vreg->of_node, prop, &temp);
 		if (!rc)
 			vreg->rdesc.min_dropout_uV = temp;
+	} else if (type == RPMH_REGULATOR_TYPE_XOB) {
+		prop = "qcom,init-enable";
+		rc = of_property_read_u32(vreg->of_node, prop, &temp);
+		if (!rc)
+			rpmh_regulator_set_reg(vreg,
+						RPMH_REGULATOR_REG_XOB_ENABLE,
+						!!temp);
 	}
 
 	return 0;
@@ -1408,6 +1458,10 @@
 		init_data->constraints.valid_ops_mask
 			|= REGULATOR_CHANGE_VOLTAGE;
 
+	if (type == RPMH_REGULATOR_TYPE_XOB
+	    && init_data->constraints.min_uV == init_data->constraints.max_uV)
+		vreg->rdesc.fixed_uV = init_data->constraints.min_uV;
+
 	if (vreg->aggr_vreg->mode_count) {
 		init_data->constraints.valid_ops_mask
 			|= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
@@ -1434,8 +1488,19 @@
 		init_data->constraints.valid_ops_mask
 			|= REGULATOR_CHANGE_STATUS;
 
-	vreg->rdesc.n_voltages = type == RPMH_REGULATOR_TYPE_ARC ?
-					vreg->aggr_vreg->level_count : 2;
+	switch (type) {
+	case RPMH_REGULATOR_TYPE_VRM:
+		vreg->rdesc.n_voltages = 2;
+		break;
+	case RPMH_REGULATOR_TYPE_ARC:
+		vreg->rdesc.n_voltages = vreg->aggr_vreg->level_count;
+		break;
+	case RPMH_REGULATOR_TYPE_XOB:
+		vreg->rdesc.n_voltages = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
 
 	rc = of_property_read_u32(vreg->of_node, "qcom,set", &set);
 	if (rc) {
@@ -1493,6 +1558,10 @@
 		.compatible = "qcom,rpmh-arc-regulator",
 		.data = (void *)(uintptr_t)RPMH_REGULATOR_TYPE_ARC,
 	},
+	{
+		.compatible = "qcom,rpmh-xob-regulator",
+		.data = (void *)(uintptr_t)RPMH_REGULATOR_TYPE_XOB,
+	},
 	{}
 };
 
@@ -1570,11 +1639,15 @@
 	if ((aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_ARC
 			&& sid != CMD_DB_HW_ARC)
 	    || (aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_VRM
-			&& sid != CMD_DB_HW_VRM)) {
+			&& sid != CMD_DB_HW_VRM)
+	    || (aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_XOB
+			&& sid != CMD_DB_HW_XOB)) {
 		aggr_vreg_err(aggr_vreg, "RPMh slave ID mismatch; config=%d (%s) != cmd-db=%d\n",
 			aggr_vreg->regulator_type,
 			aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_ARC
-				? "ARC" : "VRM",
+				? "ARC" : (aggr_vreg->regulator_type
+						== RPMH_REGULATOR_TYPE_VRM
+					  ? "VRM" : "XOB"),
 			sid);
 		return -EINVAL;
 	}
@@ -1643,7 +1716,10 @@
 	aggr_vreg_debug(aggr_vreg, "successfully probed; addr=0x%05X, type=%s\n",
 			aggr_vreg->addr,
 			aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_ARC
-				? "ARC" : "VRM");
+				? "ARC"
+				: (aggr_vreg->regulator_type
+						== RPMH_REGULATOR_TYPE_VRM
+					? "VRM" : "XOB"));
 
 	return rc;
 
diff --git a/drivers/rtc/qpnp-rtc.c b/drivers/rtc/qpnp-rtc.c
index a2c004e..4152086 100644
--- a/drivers/rtc/qpnp-rtc.c
+++ b/drivers/rtc/qpnp-rtc.c
@@ -599,9 +599,6 @@
 		goto fail_rtc_enable;
 	}
 
-	/* Init power_on_alarm after adding rtc device */
-	power_on_alarm_init();
-
 	/* Request the alarm IRQ */
 	rc = request_any_context_irq(rtc_dd->rtc_alarm_irq,
 				 qpnp_alarm_trigger, IRQF_TRIGGER_RISING,
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index bcc8f3d..b3f9243 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -358,6 +358,8 @@
 
 	adapter->next_port_scan = jiffies;
 
+	adapter->erp_action.adapter = adapter;
+
 	if (zfcp_qdio_setup(adapter))
 		goto failed;
 
@@ -514,6 +516,9 @@
 	port->dev.groups = zfcp_port_attr_groups;
 	port->dev.release = zfcp_port_release;
 
+	port->erp_action.adapter = adapter;
+	port->erp_action.port = port;
+
 	if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
 		kfree(port);
 		goto err_out;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 7ccfce5..3b23d675 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -193,9 +193,8 @@
 		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
 				&zfcp_sdev->status);
 		erp_action = &zfcp_sdev->erp_action;
-		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
-		erp_action->port = port;
-		erp_action->sdev = sdev;
+		WARN_ON_ONCE(erp_action->port != port);
+		WARN_ON_ONCE(erp_action->sdev != sdev);
 		if (!(atomic_read(&zfcp_sdev->status) &
 		      ZFCP_STATUS_COMMON_RUNNING))
 			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -208,8 +207,8 @@
 		zfcp_erp_action_dismiss_port(port);
 		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
 		erp_action = &port->erp_action;
-		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
-		erp_action->port = port;
+		WARN_ON_ONCE(erp_action->port != port);
+		WARN_ON_ONCE(erp_action->sdev != NULL);
 		if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
 			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
 		break;
@@ -219,7 +218,8 @@
 		zfcp_erp_action_dismiss_adapter(adapter);
 		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
 		erp_action = &adapter->erp_action;
-		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+		WARN_ON_ONCE(erp_action->port != NULL);
+		WARN_ON_ONCE(erp_action->sdev != NULL);
 		if (!(atomic_read(&adapter->status) &
 		      ZFCP_STATUS_COMMON_RUNNING))
 			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -229,7 +229,11 @@
 		return NULL;
 	}
 
-	erp_action->adapter = adapter;
+	WARN_ON_ONCE(erp_action->adapter != adapter);
+	memset(&erp_action->list, 0, sizeof(erp_action->list));
+	memset(&erp_action->timer, 0, sizeof(erp_action->timer));
+	erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
+	erp_action->fsf_req_id = 0;
 	erp_action->action = need;
 	erp_action->status = act_status;
 
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 9bd9b9a..a9b8104 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -115,10 +115,15 @@
 	struct zfcp_unit *unit;
 	int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
 
+	zfcp_sdev->erp_action.adapter = adapter;
+	zfcp_sdev->erp_action.sdev = sdev;
+
 	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
 	if (!port)
 		return -ENXIO;
 
+	zfcp_sdev->erp_action.port = port;
+
 	unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
 	if (unit)
 		put_device(&unit->dev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0114e2a..9965135 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -841,7 +841,7 @@
 
 	val = 0;
 	list_for_each_entry(srp, &sfp->rq_list, entry) {
-		if (val > SG_MAX_QUEUE)
+		if (val >= SG_MAX_QUEUE)
 			break;
 		rinfo[val].req_state = srp->done + 1;
 		rinfo[val].problem =
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index 0c86263..84765b1 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -170,17 +170,15 @@
 static void ufs_qcom_ice_cfg_work(struct work_struct *work)
 {
 	unsigned long flags;
-	struct ice_data_setting ice_set;
 	struct ufs_qcom_host *qcom_host =
 		container_of(work, struct ufs_qcom_host, ice_cfg_work);
-	struct request *req_pending = NULL;
 
 	if (!qcom_host->ice.vops->config_start)
 		return;
 
 	spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
-	req_pending = qcom_host->req_pending;
-	if (!req_pending) {
+	if (!qcom_host->req_pending) {
+		qcom_host->work_pending = false;
 		spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
 		return;
 	}
@@ -189,24 +187,15 @@
 	/*
 	 * config_start is called again as previous attempt returned -EAGAIN,
 	 * this call shall now take care of the necessary key setup.
-	 * 'ice_set' will not actually be used, instead the next call to
-	 * config_start() for this request, in the normal call flow, will
-	 * succeed as the key has now been setup.
 	 */
 	qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
-		qcom_host->req_pending, &ice_set, false);
+		qcom_host->req_pending, NULL, false);
 
 	spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
 	qcom_host->req_pending = NULL;
+	qcom_host->work_pending = false;
 	spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
 
-	/*
-	 * Resume with requests processing. We assume config_start has been
-	 * successful, but even if it wasn't we still must resume in order to
-	 * allow for the request to be retried.
-	 */
-	ufshcd_scsi_unblock_requests(qcom_host->hba);
-
 }
 
 /**
@@ -285,18 +274,14 @@
 			 * requires a non-atomic context, this means we should
 			 * call the function again from the worker thread to do
 			 * the configuration. For this request the error will
-			 * propagate so it will be re-queued and until the
-			 * configuration is is completed we block further
-			 * request processing.
+			 * propagate so it will be re-queued.
 			 */
 			if (err == -EAGAIN) {
 				dev_dbg(qcom_host->hba->dev,
 					"%s: scheduling task for ice setup\n",
 					__func__);
 
-				if (!qcom_host->req_pending) {
-					ufshcd_scsi_block_requests(
-						qcom_host->hba);
+				if (!qcom_host->work_pending) {
 					qcom_host->req_pending = cmd->request;
 
 					if (!schedule_work(
@@ -307,10 +292,9 @@
 						&qcom_host->ice_work_lock,
 						flags);
 
-						ufshcd_scsi_unblock_requests(
-							qcom_host->hba);
 						return err;
 					}
+					qcom_host->work_pending = true;
 				}
 
 			} else {
@@ -409,9 +393,7 @@
 			 * requires a non-atomic context, this means we should
 			 * call the function again from the worker thread to do
 			 * the configuration. For this request the error will
-			 * propagate so it will be re-queued and until the
-			 * configuration is is completed we block further
-			 * request processing.
+			 * propagate so it will be re-queued.
 			 */
 			if (err == -EAGAIN) {
 
@@ -419,9 +401,8 @@
 					"%s: scheduling task for ice setup\n",
 					__func__);
 
-				if (!qcom_host->req_pending) {
-					ufshcd_scsi_block_requests(
-						qcom_host->hba);
+				if (!qcom_host->work_pending) {
+
 					qcom_host->req_pending = cmd->request;
 					if (!schedule_work(
 						&qcom_host->ice_cfg_work)) {
@@ -431,10 +412,9 @@
 						&qcom_host->ice_work_lock,
 						flags);
 
-						ufshcd_scsi_unblock_requests(
-							qcom_host->hba);
 						return err;
 					}
+					qcom_host->work_pending = true;
 				}
 
 			} else {
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 0ab656e..9da3d19 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -375,6 +375,7 @@
 	struct work_struct ice_cfg_work;
 	struct request *req_pending;
 	struct ufs_vreg *vddp_ref_clk;
+	bool work_pending;
 };
 
 static inline u32
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b98d2ae..4a43695 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -549,19 +549,19 @@
 
 #ifdef CONFIG_TRACEPOINTS
 static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
-			struct ufshcd_cmd_log_entry *entry, u8 opcode)
+			struct ufshcd_cmd_log_entry *entry)
 {
 	if (trace_ufshcd_command_enabled()) {
 		u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 
 		trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
 				     entry->doorbell, entry->transfer_len, intr,
-				     entry->lba, opcode);
+				     entry->lba, entry->cmd_id);
 	}
 }
 #else
 static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
-			struct ufshcd_cmd_log_entry *entry, u8 opcode)
+			struct ufshcd_cmd_log_entry *entry)
 {
 }
 #endif
@@ -582,7 +582,7 @@
 
 static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
 			     unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
-			     sector_t lba, int transfer_len, u8 opcode)
+			     sector_t lba, int transfer_len)
 {
 	struct ufshcd_cmd_log_entry *entry;
 
@@ -606,19 +606,18 @@
 	hba->cmd_log.pos =
 			(hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
 
-	ufshcd_add_command_trace(hba, entry, opcode);
+	ufshcd_add_command_trace(hba, entry);
 }
 
 static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
 	unsigned int tag, u8 cmd_id, u8 idn)
 {
-	__ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
-			 0xff, (sector_t)-1, -1, -1);
+	__ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn, 0, 0, 0);
 }
 
 static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
 {
-	ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
+	ufshcd_cmd_log(hba, str, "dme", 0, cmd_id, 0);
 }
 
 static void ufshcd_print_cmd_log(struct ufs_hba *hba)
@@ -653,7 +652,7 @@
 
 static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
 			     unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
-			     sector_t lba, int transfer_len, u8 opcode)
+			     sector_t lba, int transfer_len)
 {
 	struct ufshcd_cmd_log_entry entry;
 
@@ -663,7 +662,7 @@
 	entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 	entry.tag = tag;
 
-	ufshcd_add_command_trace(hba, &entry, opcode);
+	ufshcd_add_command_trace(hba, &entry);
 }
 
 static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
@@ -683,8 +682,8 @@
 	char *cmd_type = NULL;
 	u8 opcode = 0;
 	u8 cmd_id = 0, idn = 0;
-	sector_t lba = -1;
-	int transfer_len = -1;
+	sector_t lba = 0;
+	int transfer_len = 0;
 
 	lrbp = &hba->lrb[tag];
 
@@ -718,7 +717,7 @@
 	}
 
 	__ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
-			 lrbp->lun, lba, transfer_len, opcode);
+			 lrbp->lun, lba, transfer_len);
 }
 #else
 static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
@@ -2366,7 +2365,8 @@
 	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 	/* Make sure that doorbell is committed immediately */
 	wmb();
-	ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
+	ufshcd_cond_add_cmd_trace(hba, task_tag,
+			hba->lrb[task_tag].cmd ? "scsi_send" : "dev_cmd_send");
 	ufshcd_update_tag_stats(hba, task_tag);
 	return ret;
 }
@@ -2483,7 +2483,7 @@
 
 	hba->active_uic_cmd = uic_cmd;
 
-	ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
+	ufshcd_dme_cmd_log(hba, "dme_send", hba->active_uic_cmd->command);
 	/* Write Args */
 	ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
 	ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
@@ -2517,7 +2517,7 @@
 	if (ret)
 		ufsdbg_set_err_state(hba);
 
-	ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
+	ufshcd_dme_cmd_log(hba, "dme_cmpl_1", hba->active_uic_cmd->command);
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	hba->active_uic_cmd = NULL;
@@ -4450,7 +4450,7 @@
 			cmd->command, status);
 		ret = (status != PWR_OK) ? status : -1;
 	}
-	ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
+	ufshcd_dme_cmd_log(hba, "dme_cmpl_2", hba->active_uic_cmd->command);
 
 out:
 	if (ret) {
@@ -5695,7 +5695,7 @@
 		lrbp = &hba->lrb[index];
 		cmd = lrbp->cmd;
 		if (cmd) {
-			ufshcd_cond_add_cmd_trace(hba, index, "failed");
+			ufshcd_cond_add_cmd_trace(hba, index, "scsi_failed");
 			ufshcd_update_error_stats(hba,
 					UFS_ERR_INT_FATAL_ERRORS);
 			scsi_dma_unmap(cmd);
@@ -5725,7 +5725,7 @@
 		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
 			if (hba->dev_cmd.complete) {
 				ufshcd_cond_add_cmd_trace(hba, index,
-							"dev_failed");
+							"dev_cmd_failed");
 				ufshcd_outstanding_req_clear(hba, index);
 				complete(hba->dev_cmd.complete);
 			}
@@ -5753,7 +5753,7 @@
 		lrbp = &hba->lrb[index];
 		cmd = lrbp->cmd;
 		if (cmd) {
-			ufshcd_cond_add_cmd_trace(hba, index, "complete");
+			ufshcd_cond_add_cmd_trace(hba, index, "scsi_cmpl");
 			ufshcd_update_tag_stats_completion(hba, cmd);
 			result = ufshcd_transfer_rsp_status(hba, lrbp);
 			scsi_dma_unmap(cmd);
@@ -5799,7 +5799,7 @@
 		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
 			if (hba->dev_cmd.complete) {
 				ufshcd_cond_add_cmd_trace(hba, index,
-						"dcmp");
+						"dev_cmd_cmpl");
 				complete(hba->dev_cmd.complete);
 			}
 		}
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index 4c9210a..f34467b 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -333,6 +333,20 @@
 	}
 }
 
+static void slim_device_reset(struct work_struct *work)
+{
+	struct slim_driver *sbdrv;
+	struct slim_device *sbdev =
+			container_of(work, struct slim_device, device_reset);
+
+	if (!sbdev->dev.driver)
+		return;
+
+	sbdrv = to_slim_driver(sbdev->dev.driver);
+	if (sbdrv && sbdrv->reset_device)
+		sbdrv->reset_device(sbdev);
+}
+
 /*
  * slim_add_device: Add a new device without register board info.
  * @ctrl: Controller to which this device is to be added to.
@@ -353,6 +367,7 @@
 	INIT_LIST_HEAD(&sbdev->mark_suspend);
 	INIT_LIST_HEAD(&sbdev->mark_removal);
 	INIT_WORK(&sbdev->wd, slim_report);
+	INIT_WORK(&sbdev->device_reset, slim_device_reset);
 	mutex_lock(&ctrl->m_ctrl);
 	list_add_tail(&sbdev->dev_list, &ctrl->devs);
 	mutex_unlock(&ctrl->m_ctrl);
@@ -684,16 +699,9 @@
 	mutex_unlock(&ctrl->sched.m_reconf);
 	mutex_lock(&ctrl->m_ctrl);
 	list_for_each_safe(pos, next, &ctrl->devs) {
-		struct slim_driver *sbdrv;
-
 		sbdev = list_entry(pos, struct slim_device, dev_list);
-		mutex_unlock(&ctrl->m_ctrl);
-		if (sbdev && sbdev->dev.driver) {
-			sbdrv = to_slim_driver(sbdev->dev.driver);
-			if (sbdrv->reset_device)
-				sbdrv->reset_device(sbdev);
-		}
-		mutex_lock(&ctrl->m_ctrl);
+		if (sbdev)
+			queue_work(ctrl->wq, &sbdev->device_reset);
 	}
 	mutex_unlock(&ctrl->m_ctrl);
 }
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 62306bad..18aaacc 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -686,3 +686,11 @@
 	  and ETM registers are saved and restored across power collapse.
 	  If unsure, say 'N' here to avoid potential power, performance and
 	  memory penalty.
+
+config QCOM_QDSS_BRIDGE
+	bool "Configure bridge driver for QTI/Qualcomm Technologies, Inc. MDM"
+	depends on MSM_MHI
+	help
+	  The driver will help route diag traffic from modem side over the QDSS
+	  sub-system to USB on APSS side. The driver acts as a bridge between the
+	  MHI and USB interface. If unsure, say N.
\ No newline at end of file
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 9a4e010..bb08357 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -79,3 +79,4 @@
 obj-$(CONFIG_QMP_DEBUGFS_CLIENT) += qmp-debugfs-client.o
 obj-$(CONFIG_MSM_REMOTEQDSS) += remoteqdss.o
 obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
+obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
\ No newline at end of file
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 6019e4b..ebed4d2 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -4070,6 +4070,7 @@
 		kfree(xprt_ptr);
 		return -ENOMEM;
 	}
+	cfg->tx_task = xprt_ptr->tx_task;
 	ret = glink_core_init_xprt_qos_cfg(xprt_ptr, cfg);
 	if (ret < 0) {
 		kfree(xprt_ptr);
diff --git a/drivers/soc/qcom/glink_core_if.h b/drivers/soc/qcom/glink_core_if.h
index 1411330..704171f 100644
--- a/drivers/soc/qcom/glink_core_if.h
+++ b/drivers/soc/qcom/glink_core_if.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
 
 #include <linux/of.h>
 #include <linux/types.h>
+#include <linux/sched.h>
 #include "glink_private.h"
 
 /* Local Channel state */
@@ -105,6 +106,7 @@
  * @versions_entries:	Number of entries in @versions.
  * @max_cid:		Maximum number of channel identifiers supported.
  * @max_iid:		Maximum number of intent identifiers supported.
+ * @tx_task:		Task structure for tx thread.
  * @mtu:		MTU supported by this transport.
  * @num_flows:		Number of traffic flows/priority buckets.
  * @flow_info:		Information about each flow/priority.
@@ -117,6 +119,7 @@
 	size_t versions_entries;
 	uint32_t max_cid;
 	uint32_t max_iid;
+	struct task_struct *tx_task;
 
 	size_t mtu;
 	uint32_t num_flows;
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 384347d..ea7374f 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -33,6 +33,7 @@
 #include <linux/spinlock.h>
 #include <linux/srcu.h>
 #include <linux/wait.h>
+#include <linux/cpumask.h>
 #include <soc/qcom/smem.h>
 #include <soc/qcom/tracer_pkt.h>
 #include "glink_core_if.h"
@@ -226,6 +227,7 @@
 	spinlock_t rt_vote_lock;
 	uint32_t rt_votes;
 	uint32_t num_pw_states;
+	uint32_t readback;
 	unsigned long *ramp_time_us;
 	struct mailbox_config_info *mailbox;
 };
@@ -270,6 +272,7 @@
 	 * Any data associated with this event must be visable to the remote
 	 * before the interrupt is triggered
 	 */
+	einfo->readback = einfo->tx_ch_desc->write_index;
 	wmb();
 	writel_relaxed(einfo->out_irq_mask, einfo->out_irq_reg);
 	if (einfo->remote_proc_id != SMEM_SPSS)
@@ -2321,17 +2324,40 @@
 	return -ENODEV;
 }
 
+static void glink_set_affinity(struct edge_info *einfo, u32 *arr, size_t size)
+{
+	struct cpumask cpumask;
+	pid_t pid;
+	int i;
+
+	cpumask_clear(&cpumask);
+	for (i = 0; i < size; i++) {
+		if (arr[i] < num_possible_cpus())
+			cpumask_set_cpu(arr[i], &cpumask);
+	}
+	if (irq_set_affinity(einfo->irq_line, &cpumask))
+		pr_err("%s: Failed to set irq affinity\n", __func__);
+
+	if (sched_setaffinity(einfo->task->pid, &cpumask))
+		pr_err("%s: Failed to set rx cpu affinity\n", __func__);
+
+	pid = einfo->xprt_cfg.tx_task->pid;
+	if (sched_setaffinity(pid, &cpumask))
+		pr_err("%s: Failed to set tx cpu affinity\n", __func__);
+}
+
 static int glink_smem_native_probe(struct platform_device *pdev)
 {
 	struct device_node *node;
 	struct device_node *phandle_node;
 	struct edge_info *einfo;
-	int rc;
+	int rc, cpu_size;
 	char *key;
 	const char *subsys_name;
 	uint32_t irq_line;
 	uint32_t irq_mask;
 	struct resource *r;
+	u32 *cpu_array;
 
 	node = pdev->dev.of_node;
 
@@ -2478,6 +2504,20 @@
 		pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
 								irq_line);
 
+	key = "cpu-affinity";
+	cpu_size = of_property_count_u32_elems(node, key);
+	if (cpu_size > 0) {
+		cpu_array = kmalloc_array(cpu_size, sizeof(u32), GFP_KERNEL);
+		if (!cpu_array) {
+			rc = -ENOMEM;
+			goto request_irq_fail;
+		}
+		rc = of_property_read_u32_array(node, key, cpu_array, cpu_size);
+		if (!rc)
+			glink_set_affinity(einfo, cpu_array, cpu_size);
+		kfree(cpu_array);
+	}
+
 	register_debugfs_info(einfo);
 	/* fake an interrupt on this edge to see if the remote side is up */
 	irq_handler(0, einfo);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 1794278..e391cd1 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -2329,7 +2329,7 @@
 	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
 		goto out;
 
-	if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
+	if (test_bit(ICNSS_PD_RESTART, &priv->state) && event_data->crashed) {
 		icnss_pr_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
 			     event_data->crashed, priv->state);
 		ICNSS_ASSERT(0);
@@ -2476,7 +2476,7 @@
 	icnss_pr_vdbg("Modem-Notify: event %lu\n", code);
 
 	if (code == SUBSYS_AFTER_SHUTDOWN &&
-			notif->crashed != CRASH_STATUS_WDOG_BITE) {
+	    notif->crashed == CRASH_STATUS_ERR_FATAL) {
 		ret = icnss_assign_msa_perm_all(priv,
 						ICNSS_MSA_PERM_HLOS_ALL);
 		if (!ret) {
@@ -2494,8 +2494,17 @@
 	if (code != SUBSYS_BEFORE_SHUTDOWN)
 		return NOTIFY_OK;
 
-	if (test_bit(ICNSS_PDR_REGISTERED, &priv->state))
+	if (test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
+		set_bit(ICNSS_FW_DOWN, &priv->state);
+		icnss_ignore_qmi_timeout(true);
+
+		fw_down_data.crashed = !!notif->crashed;
+		if (test_bit(ICNSS_FW_READY, &priv->state))
+			icnss_call_driver_uevent(priv,
+						 ICNSS_UEVENT_FW_DOWN,
+						 &fw_down_data);
 		return NOTIFY_OK;
+	}
 
 	icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
 		      priv->state, notif->crashed);
@@ -2629,14 +2638,18 @@
 	icnss_pr_info("PD service down, pd_state: %d, state: 0x%lx: cause: %s\n",
 		      *state, priv->state, icnss_pdr_cause[cause]);
 event_post:
-	set_bit(ICNSS_FW_DOWN, &priv->state);
-	icnss_ignore_qmi_timeout(true);
-	clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
+	if (!test_bit(ICNSS_FW_DOWN, &priv->state)) {
+		set_bit(ICNSS_FW_DOWN, &priv->state);
+		icnss_ignore_qmi_timeout(true);
 
-	fw_down_data.crashed = event_data->crashed;
-	if (test_bit(ICNSS_FW_READY, &priv->state))
-		icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN,
-					 &fw_down_data);
+		fw_down_data.crashed = event_data->crashed;
+		if (test_bit(ICNSS_FW_READY, &priv->state))
+			icnss_call_driver_uevent(priv,
+						 ICNSS_UEVENT_FW_DOWN,
+						 &fw_down_data);
+	}
+
+	clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
 	icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
 				ICNSS_EVENT_SYNC, event_data);
 done:
diff --git a/drivers/soc/qcom/llcc-sdm670.c b/drivers/soc/qcom/llcc-sdm670.c
index 494b93b..aaed9ee 100644
--- a/drivers/soc/qcom/llcc-sdm670.c
+++ b/drivers/soc/qcom/llcc-sdm670.c
@@ -63,7 +63,7 @@
 	SCT_ENTRY("audio", 6, 6, 512, 1, 0, 0xF, 0x0, 0, 0, 1, 1, 0),
 	SCT_ENTRY("modem",  8, 8, 512, 1, 0, 0xF,  0x0, 0, 0, 1, 1, 0),
 	SCT_ENTRY("gpu", 12, 12, 384, 1, 1, 0x0, 0x0, 0, 0, 1, 1, 0),
-	SCT_ENTRY("mmuhwt", 13, 13, 512, 1, 0, 0x0, 0x8, 0, 0, 1, 0, 1),
+	SCT_ENTRY("mmuhwt", 13, 13, 512, 1, 0, 0xF, 0x0, 0, 0, 1, 0, 1),
 	SCT_ENTRY("audiohw", 22, 22, 512, 1, 1, 0xF, 0x0, 0, 0, 1, 1, 0),
 };
 
diff --git a/drivers/soc/qcom/lpm-stats.c b/drivers/soc/qcom/lpm-stats.c
index ee68433..4a41eee 100644
--- a/drivers/soc/qcom/lpm-stats.c
+++ b/drivers/soc/qcom/lpm-stats.c
@@ -21,6 +21,7 @@
 #include <linux/debugfs.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/smp.h>
 #include <linux/suspend.h>
 #include <soc/qcom/spm.h>
 #include <soc/qcom/pm.h>
@@ -45,7 +46,7 @@
 	int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
 	int success_count;
 	int failed_count;
-	int64_t total_time;
+	uint64_t total_time;
 	uint64_t enter_time;
 };
 
@@ -104,7 +105,7 @@
 	int i = 0;
 	int64_t bucket_time = 0;
 	char seqs[MAX_STR_LEN] = {0};
-	int64_t s = stats->total_time;
+	uint64_t s = stats->total_time;
 	uint32_t ns = do_div(s, NSEC_PER_SEC);
 
 	snprintf(seqs, MAX_STR_LEN,
@@ -255,6 +256,15 @@
 	return count;
 }
 
+static void reset_cpu_stats(void *info)
+{
+	struct lpm_stats *stats = &(*this_cpu_ptr(&(cpu_stats)));
+	int i;
+
+	for (i = 0; i < stats->num_levels; i++)
+		level_stats_reset(&stats->time_stats[i]);
+}
+
 static ssize_t lpm_stats_file_write(struct file *file,
 	const char __user *buffer, size_t count, loff_t *off)
 {
@@ -276,6 +286,12 @@
 		return -EINVAL;
 
 	level_stats_reset_all(stats);
+	/*
+	 * Wake up each CPU and reset the stats from that CPU,
+	 * for that CPU, so we could have better timestamp for
+	 * accounting.
+	 */
+	on_each_cpu(reset_cpu_stats, NULL, 1);
 
 	return count;
 }
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index 5ed66bf..5873f5c 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -38,7 +38,18 @@
 	struct msm_dump_table *table;
 };
 
+struct dump_vaddr_entry {
+	uint32_t id;
+	void *dump_vaddr;
+};
+
+struct msm_mem_dump_vaddr_tbl {
+	uint8_t num_node;
+	struct dump_vaddr_entry *entries;
+};
+
 static struct msm_memory_dump memdump;
+static struct msm_mem_dump_vaddr_tbl vaddr_tbl;
 
 uint32_t msm_dump_table_version(void)
 {
@@ -113,6 +124,28 @@
 }
 EXPORT_SYMBOL(msm_dump_data_register);
 
+void *get_msm_dump_ptr(enum msm_dump_data_ids id)
+{
+	int i;
+
+	if (!vaddr_tbl.entries)
+		return NULL;
+
+	if (id > MSM_DUMP_DATA_MAX)
+		return NULL;
+
+	for (i = 0; i < vaddr_tbl.num_node; i++) {
+		if (vaddr_tbl.entries[i].id == id)
+			break;
+	}
+
+	if (i == vaddr_tbl.num_node)
+		return NULL;
+
+	return (void *)vaddr_tbl.entries[i].dump_vaddr;
+}
+EXPORT_SYMBOL(get_msm_dump_ptr);
+
 static int __init init_memory_dump(void)
 {
 	struct msm_dump_table *table;
@@ -209,6 +242,14 @@
 	struct msm_dump_entry dump_entry;
 	int ret;
 	u32 size, id;
+	int i = 0;
+
+	vaddr_tbl.num_node = of_get_child_count(node);
+	vaddr_tbl.entries = devm_kcalloc(&pdev->dev, vaddr_tbl.num_node,
+				 sizeof(struct dump_vaddr_entry),
+				 GFP_KERNEL);
+	if (!vaddr_tbl.entries)
+		dev_err(&pdev->dev, "Unable to allocate mem for ptr addr\n");
 
 	for_each_available_child_of_node(node, child_node) {
 		ret = of_property_read_u32(child_node, "qcom,dump-size", &size);
@@ -254,6 +295,10 @@
 			dma_free_coherent(&pdev->dev, size, dump_vaddr,
 					dump_addr);
 			devm_kfree(&pdev->dev, dump_data);
+		} else if (vaddr_tbl.entries) {
+			vaddr_tbl.entries[i].id = id;
+			vaddr_tbl.entries[i].dump_vaddr = dump_vaddr;
+			i++;
 		}
 	}
 	return 0;
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index 6d6b9f7..d23b050 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -87,7 +87,7 @@
 #define MSS_STATUS			(0x40)
 #define QDSP6SS_SLEEP			(0x3C)
 #define SLEEP_CHECK_MAX_LOOPS		(200)
-#define BOOT_FSM_TIMEOUT		(100)
+#define BOOT_FSM_TIMEOUT		(10000)
 
 #define QDSP6SS_ACC_OVERRIDE_VAL	0x20
 
@@ -411,7 +411,7 @@
 
 	/* Wait for boot FSM to complete */
 	ret = readl_poll_timeout(drv->rmb_base + MSS_STATUS, val,
-			(val & BIT(1)) != 0, 10, BOOT_FSM_TIMEOUT);
+			(val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
 
 	if (ret) {
 		dev_err(drv->desc.dev, "Boot FSM failed to complete.\n");
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
new file mode 100644
index 0000000..8668155
--- /dev/null
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -0,0 +1,463 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define KMSG_COMPONENT "QDSS diag bridge"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include <linux/msm_mhi.h>
+#include <linux/usb/usb_qdss.h>
+#include "qdss_bridge.h"
+
+#define MODULE_NAME "qdss_bridge"
+
+#define QDSS_BUF_SIZE		(16*1024)
+#define MHI_CLIENT_QDSS_IN	9
+
+/* Max number of objects needed */
+static int poolsize = 32;
+module_param(poolsize, int, 0644);
+
+/* Size of single buffer */
+static int itemsize = QDSS_BUF_SIZE;
+module_param(itemsize, int, 0644);
+
+static int qdss_destroy_buf_tbl(struct qdss_bridge_drvdata *drvdata)
+{
+	struct list_head *start, *temp;
+	struct qdss_buf_tbl_lst *entry = NULL;
+
+	list_for_each_safe(start, temp, &drvdata->buf_tbl) {
+		entry = list_entry(start, struct qdss_buf_tbl_lst, link);
+		list_del(&entry->link);
+		kfree(entry->buf);
+		kfree(entry->usb_req);
+		kfree(entry);
+	}
+
+	return 0;
+}
+
+static int qdss_create_buf_tbl(struct qdss_bridge_drvdata *drvdata)
+{
+	struct qdss_buf_tbl_lst *entry;
+	void *buf;
+	struct qdss_request *usb_req;
+	int i;
+
+	for (i = 0; i < poolsize; i++) {
+		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry)
+			goto err;
+
+		buf = kzalloc(QDSS_BUF_SIZE, GFP_KERNEL);
+		usb_req = kzalloc(sizeof(*usb_req), GFP_KERNEL);
+
+		entry->buf = buf;
+		entry->usb_req = usb_req;
+		atomic_set(&entry->available, 1);
+		list_add_tail(&entry->link, &drvdata->buf_tbl);
+
+		if (!buf || !usb_req)
+			goto err;
+	}
+
+	return 0;
+err:
+	qdss_destroy_buf_tbl(drvdata);
+	return -ENOMEM;
+}
+
+struct qdss_buf_tbl_lst *qdss_get_buf_tbl_entry(
+					struct qdss_bridge_drvdata *drvdata,
+					void *buf)
+{
+	struct qdss_buf_tbl_lst *entry;
+
+	list_for_each_entry(entry, &drvdata->buf_tbl, link) {
+		if (atomic_read(&entry->available))
+			continue;
+		if (entry->buf == buf)
+			return entry;
+	}
+
+	return NULL;
+}
+
+struct qdss_buf_tbl_lst *qdss_get_entry(struct qdss_bridge_drvdata *drvdata)
+{
+	struct qdss_buf_tbl_lst *item;
+
+	list_for_each_entry(item, &drvdata->buf_tbl, link)
+		if (atomic_cmpxchg(&item->available, 1, 0) == 1)
+			return item;
+
+	return NULL;
+}
+
+static void qdss_buf_tbl_remove(struct qdss_bridge_drvdata *drvdata,
+				void *buf)
+{
+	struct qdss_buf_tbl_lst *entry = NULL;
+
+	list_for_each_entry(entry, &drvdata->buf_tbl, link) {
+		if (entry->buf != buf)
+			continue;
+		atomic_set(&entry->available, 1);
+		return;
+	}
+
+	pr_err_ratelimited("Failed to find buffer for removal\n");
+}
+
+static void mhi_ch_close(struct qdss_bridge_drvdata *drvdata)
+{
+	flush_workqueue(drvdata->mhi_wq);
+	qdss_destroy_buf_tbl(drvdata);
+	mhi_close_channel(drvdata->hdl);
+}
+
+static void mhi_close_work_fn(struct work_struct *work)
+{
+	struct qdss_bridge_drvdata *drvdata =
+				container_of(work,
+					     struct qdss_bridge_drvdata,
+					     close_work);
+
+	usb_qdss_close(drvdata->usb_ch);
+	mhi_ch_close(drvdata);
+}
+
+static void mhi_read_work_fn(struct work_struct *work)
+{
+	int err = 0;
+	enum MHI_FLAGS mhi_flags = MHI_EOT;
+	struct qdss_buf_tbl_lst *entry;
+
+	struct qdss_bridge_drvdata *drvdata =
+				container_of(work,
+					     struct qdss_bridge_drvdata,
+					     read_work);
+
+	do {
+		if (!drvdata->opened)
+			break;
+		entry = qdss_get_entry(drvdata);
+		if (!entry)
+			break;
+
+		err = mhi_queue_xfer(drvdata->hdl, entry->buf, QDSS_BUF_SIZE,
+				      mhi_flags);
+		if (err) {
+			pr_err_ratelimited("Unable to read from MHI buffer err:%d",
+					   err);
+			goto fail;
+		}
+	} while (entry);
+
+	return;
+fail:
+	qdss_buf_tbl_remove(drvdata, entry->buf);
+	queue_work(drvdata->mhi_wq, &drvdata->read_work);
+}
+
+static int mhi_queue_read(struct qdss_bridge_drvdata *drvdata)
+{
+	queue_work(drvdata->mhi_wq, &(drvdata->read_work));
+	return 0;
+}
+
+static int usb_write(struct qdss_bridge_drvdata *drvdata,
+			     struct mhi_result *result)
+{
+	int ret = 0;
+	struct qdss_buf_tbl_lst *entry;
+
+	entry = qdss_get_buf_tbl_entry(drvdata, result->buf_addr);
+	if (!entry)
+		return -EINVAL;
+
+	entry->usb_req->buf = result->buf_addr;
+	entry->usb_req->length = result->bytes_xferd;
+	ret = usb_qdss_data_write(drvdata->usb_ch, entry->usb_req);
+
+	return ret;
+}
+
+static void mhi_read_done_work_fn(struct work_struct *work)
+{
+	unsigned char *buf = NULL;
+	struct mhi_result result;
+	int err = 0;
+	struct qdss_bridge_drvdata *drvdata =
+				container_of(work,
+					     struct qdss_bridge_drvdata,
+					     read_done_work);
+
+	do {
+		err = mhi_poll_inbound(drvdata->hdl, &result);
+		if (err) {
+			pr_debug("MHI poll failed err:%d\n", err);
+			break;
+		}
+		buf = result.buf_addr;
+		if (!buf)
+			break;
+		err = usb_write(drvdata, &result);
+		if (err)
+			qdss_buf_tbl_remove(drvdata, buf);
+	} while (1);
+}
+
+static void usb_write_done(struct qdss_bridge_drvdata *drvdata,
+				   struct qdss_request *d_req)
+{
+	if (d_req->status) {
+		pr_err_ratelimited("USB write failed err:%d\n", d_req->status);
+		mhi_queue_read(drvdata);
+		return;
+	}
+	qdss_buf_tbl_remove(drvdata, d_req->buf);
+	mhi_queue_read(drvdata);
+}
+
+static void usb_notifier(void *priv, unsigned int event,
+			struct qdss_request *d_req, struct usb_qdss_ch *ch)
+{
+	struct qdss_bridge_drvdata *drvdata = priv;
+
+	if (!drvdata)
+		return;
+
+	switch (event) {
+	case USB_QDSS_CONNECT:
+		usb_qdss_alloc_req(drvdata->usb_ch, poolsize, 0);
+		mhi_queue_read(drvdata);
+		break;
+
+	case USB_QDSS_DISCONNECT:
+		/* Leave MHI/USB open.Only close on MHI disconnect */
+		break;
+
+	case USB_QDSS_DATA_WRITE_DONE:
+		usb_write_done(drvdata, d_req);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static int mhi_ch_open(struct qdss_bridge_drvdata *drvdata)
+{
+	int ret;
+
+	if (drvdata->opened)
+		return 0;
+
+	ret = mhi_open_channel(drvdata->hdl);
+	if (ret) {
+		pr_err("Unable to open MHI channel\n");
+		return ret;
+	}
+
+	ret = mhi_get_free_desc(drvdata->hdl);
+	if (ret <= 0)
+		return -EIO;
+
+	drvdata->opened = 1;
+	return 0;
+}
+
+static void qdss_bridge_open_work_fn(struct work_struct *work)
+{
+	struct qdss_bridge_drvdata *drvdata =
+				container_of(work,
+					     struct qdss_bridge_drvdata,
+					     open_work);
+	int ret;
+
+	ret = mhi_ch_open(drvdata);
+	if (ret)
+		goto err_open;
+
+	ret = qdss_create_buf_tbl(drvdata);
+	if (ret)
+		goto err;
+
+	drvdata->usb_ch = usb_qdss_open("qdss_mdm", drvdata, usb_notifier);
+	if (IS_ERR_OR_NULL(drvdata->usb_ch)) {
+		ret = PTR_ERR(drvdata->usb_ch);
+		goto err;
+	}
+
+	return;
+err:
+	mhi_ch_close(drvdata);
+err_open:
+	pr_err("Open work failed with err:%d\n", ret);
+}
+
+static void mhi_notifier(struct mhi_cb_info *cb_info)
+{
+	struct mhi_result *result;
+	struct qdss_bridge_drvdata *drvdata;
+
+	if (!cb_info)
+		return;
+
+	result = cb_info->result;
+	if (!result) {
+		pr_err_ratelimited("Failed to obtain MHI result\n");
+		return;
+	}
+
+	drvdata = (struct qdss_bridge_drvdata *)cb_info->result->user_data;
+	if (!drvdata) {
+		pr_err_ratelimited("MHI returned invalid drvdata\n");
+		return;
+	}
+
+	switch (cb_info->cb_reason) {
+	case MHI_CB_MHI_ENABLED:
+		queue_work(drvdata->mhi_wq, &drvdata->open_work);
+		break;
+
+	case MHI_CB_XFER:
+		if (!drvdata->opened)
+			break;
+
+		queue_work(drvdata->mhi_wq, &drvdata->read_done_work);
+		break;
+
+	case MHI_CB_MHI_DISABLED:
+		if (!drvdata->opened)
+			break;
+
+		drvdata->opened = 0;
+		queue_work(drvdata->mhi_wq, &drvdata->close_work);
+		break;
+
+	default:
+		pr_err_ratelimited("MHI returned invalid cb reason 0x%x\n",
+		       cb_info->cb_reason);
+		break;
+	}
+}
+
+static int qdss_mhi_register_ch(struct qdss_bridge_drvdata *drvdata)
+{
+	struct mhi_client_info_t *client_info;
+	int ret;
+	struct mhi_client_info_t *mhi_info;
+
+	client_info = devm_kzalloc(drvdata->dev, sizeof(*client_info),
+				   GFP_KERNEL);
+	if (!client_info)
+		return -ENOMEM;
+
+	client_info->mhi_client_cb = mhi_notifier;
+	drvdata->client_info = client_info;
+
+	mhi_info = client_info;
+	mhi_info->chan = MHI_CLIENT_QDSS_IN;
+	mhi_info->dev = drvdata->dev;
+	mhi_info->node_name = "qcom,mhi";
+	mhi_info->user_data = drvdata;
+
+	ret = mhi_register_channel(&drvdata->hdl, mhi_info);
+	return ret;
+}
+
+int qdss_mhi_init(struct qdss_bridge_drvdata *drvdata)
+{
+	int ret;
+
+	drvdata->mhi_wq = create_singlethread_workqueue(MODULE_NAME);
+	if (!drvdata->mhi_wq)
+		return -ENOMEM;
+
+	INIT_WORK(&(drvdata->read_work), mhi_read_work_fn);
+	INIT_WORK(&(drvdata->read_done_work), mhi_read_done_work_fn);
+	INIT_WORK(&(drvdata->open_work), qdss_bridge_open_work_fn);
+	INIT_WORK(&(drvdata->close_work), mhi_close_work_fn);
+	INIT_LIST_HEAD(&drvdata->buf_tbl);
+	drvdata->opened = 0;
+
+	ret = qdss_mhi_register_ch(drvdata);
+	if (ret) {
+		destroy_workqueue(drvdata->mhi_wq);
+		pr_err("Unable to register MHI read channel err:%d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qdss_mhi_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device *dev = &pdev->dev;
+	struct qdss_bridge_drvdata *drvdata;
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata) {
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+
+	ret = qdss_mhi_init(drvdata);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	pr_err("Device probe failed err:%d\n", ret);
+	return ret;
+}
+
+static const struct of_device_id qdss_mhi_table[] = {
+	{.compatible = "qcom,qdss-mhi"},
+	{},
+};
+
+static struct platform_driver qdss_mhi_driver = {
+	.probe = qdss_mhi_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = qdss_mhi_table,
+	},
+};
+
+static int __init qdss_bridge_init(void)
+{
+	return platform_driver_register(&qdss_mhi_driver);
+}
+
+static void __exit qdss_bridge_exit(void)
+{
+	platform_driver_unregister(&qdss_mhi_driver);
+}
+
+module_init(qdss_bridge_init);
+module_exit(qdss_bridge_exit);
+MODULE_LICENSE("GPL v2")
+MODULE_DESCRIPTION("QDSS Bridge driver");
diff --git a/drivers/soc/qcom/qdss_bridge.h b/drivers/soc/qcom/qdss_bridge.h
new file mode 100644
index 0000000..97b9c40
--- /dev/null
+++ b/drivers/soc/qcom/qdss_bridge.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QDSS_BRIDGE_H
+#define _QDSS_BRIDGE_H
+
+struct qdss_buf_tbl_lst {
+	struct list_head link;
+	unsigned char *buf;
+	struct qdss_request *usb_req;
+	atomic_t available;
+};
+
+struct qdss_bridge_drvdata {
+	struct device *dev;
+	bool opened;
+	struct work_struct read_work;
+	struct work_struct read_done_work;
+	struct work_struct open_work;
+	struct work_struct close_work;
+	struct workqueue_struct *mhi_wq;
+	struct mhi_client_handle *hdl;
+	struct mhi_client_info_t *client_info;
+	struct list_head buf_tbl;
+	struct usb_qdss_ch *usb_ch;
+};
+
+#endif
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 57f38d3..9dfe281 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -149,11 +149,10 @@
 
 	do {
 		pr_debug("Notified about a Receive event\n");
-		ret = qmi_recv_msg(service_locator.clnt_handle);
-		if (ret < 0)
-			pr_err("Error receiving message rc:%d. Retrying...\n",
-								ret);
-	} while (ret == 0);
+	} while ((ret = qmi_recv_msg(service_locator.clnt_handle)) == 0);
+
+	if (ret != -ENOMSG)
+		pr_err("Error receiving message rc:%d\n", ret);
 
 }
 
@@ -190,7 +189,7 @@
 	 */
 	rc = qmi_send_req_wait(service_locator.clnt_handle, req_desc, req,
 		sizeof(*req), resp_desc, resp, sizeof(*resp),
-		msecs_to_jiffies(QMI_SERVREG_LOC_SERVER_TIMEOUT));
+		QMI_SERVREG_LOC_SERVER_TIMEOUT);
 	if (rc < 0) {
 		pr_err("QMI send req failed for client %s, ret - %d\n",
 			pd->client_name, rc);
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index c35119c..9af39e1 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -66,6 +66,7 @@
 	HW_PLATFORM_RCM	= 21,
 	HW_PLATFORM_STP = 23,
 	HW_PLATFORM_SBC = 24,
+	HW_PLATFORM_HDK = 31,
 	HW_PLATFORM_INVALID
 };
 
@@ -86,6 +87,7 @@
 	[HW_PLATFORM_DTV] = "DTV",
 	[HW_PLATFORM_STP] = "STP",
 	[HW_PLATFORM_SBC] = "SBC",
+	[HW_PLATFORM_HDK] = "HDK",
 };
 
 enum {
@@ -578,6 +580,13 @@
 	/* SDA670 ID */
 	[337] = {MSM_CPU_SDA670, "SDA670"},
 
+	/* 8953 ID */
+	[293] = {MSM_CPU_8953, "MSM8953"},
+	[304] = {MSM_CPU_8953, "APQ8053"},
+
+	/* SDM450 ID */
+	[338] = {MSM_CPU_SDM450, "SDM450"},
+
 	/* Uninitialized IDs are not known to run Linux.
 	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
 	 * considered as unknown CPU.
@@ -1452,6 +1461,14 @@
 		dummy_socinfo.id = 334;
 		strlcpy(dummy_socinfo.build_id, "sdxpoorwills - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8953()) {
+		dummy_socinfo.id = 293;
+		strlcpy(dummy_socinfo.build_id, "msm8953 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sdm450()) {
+		dummy_socinfo.id = 338;
+		strlcpy(dummy_socinfo.build_id, "sdm450 - ",
+			sizeof(dummy_socinfo.build_id));
 	}
 
 	strlcat(dummy_socinfo.build_id, "Dummy socinfo",
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 3ea4be6..d65756c 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -1127,23 +1127,55 @@
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						"sp2soc_irq_status");
 		d->irq_status = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(d->irq_status)) {
+			dev_err(&pdev->dev, "Invalid resource for sp2soc_irq_status\n");
+			rc = PTR_ERR(d->irq_status);
+			goto err_ramdump;
+		}
+
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						"sp2soc_irq_clr");
 		d->irq_clear = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(d->irq_clear)) {
+			dev_err(&pdev->dev, "Invalid resource for sp2soc_irq_clr\n");
+			rc = PTR_ERR(d->irq_clear);
+			goto err_ramdump;
+		}
+
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						"sp2soc_irq_mask");
 		d->irq_mask = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(d->irq_mask)) {
+			dev_err(&pdev->dev, "Invalid resource for sp2soc_irq_mask\n");
+			rc = PTR_ERR(d->irq_mask);
+			goto err_ramdump;
+		}
+
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						"rmb_err");
 		d->err_status = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(d->err_status)) {
+			dev_err(&pdev->dev, "Invalid resource for rmb_err\n");
+			rc = PTR_ERR(d->err_status);
+			goto err_ramdump;
+		}
+
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						"rmb_err_spare2");
 		d->err_status_spare = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(d->err_status_spare)) {
+			dev_err(&pdev->dev, "Invalid resource for rmb_err_spare2\n");
+			rc = PTR_ERR(d->err_status_spare);
+			goto err_ramdump;
+		}
+
 		rc = of_property_read_u32_array(pdev->dev.of_node,
 		       "qcom,spss-scsr-bits", d->bits_arr, sizeof(d->bits_arr)/
 							sizeof(d->bits_arr[0]));
-		if (rc)
+		if (rc) {
 			dev_err(&pdev->dev, "Failed to read qcom,spss-scsr-bits");
+			goto err_ramdump;
+		}
 		mask_scsr_irqs(d);
 
 	} else {
@@ -1186,6 +1218,7 @@
 	destroy_ramdump_device(d->ramdump_dev);
 err_ramdump:
 	pil_desc_release(&d->desc);
+	platform_set_drvdata(pdev, NULL);
 
 	return rc;
 }
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
index 2ecbf15..3d978f7 100644
--- a/drivers/soc/qcom/system_pm.c
+++ b/drivers/soc/qcom/system_pm.c
@@ -13,6 +13,7 @@
 
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
+#include <asm/arch_timer.h>
 
 #include <soc/qcom/rpmh.h>
 #include <soc/qcom/system_pm.h>
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 14f9dea..7d629b4 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1215,7 +1215,7 @@
 			goto qspi_probe_err;
 		}
 	} else {
-		goto qspi_probe_err;
+		goto qspi_resource_err;
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
@@ -1237,7 +1237,7 @@
 		qspi->base[CHIP_SELECT]  = devm_ioremap_resource(dev, res);
 		if (IS_ERR(qspi->base[CHIP_SELECT])) {
 			ret = PTR_ERR(qspi->base[CHIP_SELECT]);
-			goto qspi_probe_err;
+			goto qspi_resource_err;
 		}
 	}
 
@@ -1245,7 +1245,7 @@
 				GFP_KERNEL);
 	if (!qspi->dev_ids) {
 		ret = -ENOMEM;
-		goto qspi_probe_err;
+		goto qspi_resource_err;
 	}
 
 	for (val = 0; val < num_irqs; val++) {
@@ -1334,8 +1334,9 @@
 	bcm_qspi_hw_uninit(qspi);
 	clk_disable_unprepare(qspi->clk);
 qspi_probe_err:
-	spi_master_put(master);
 	kfree(qspi->dev_ids);
+qspi_resource_err:
+	spi_master_put(master);
 	return ret;
 }
 /* probe function to be called by SoC specific platform driver probe */
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 0948c22..720ac31 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -215,7 +215,6 @@
 
 	buffer->dev = dev;
 	buffer->size = len;
-	buffer->flags = flags;
 	INIT_LIST_HEAD(&buffer->vmas);
 
 	table = heap->ops->map_dma(heap, buffer);
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index b264ec2..7c58e19 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -23,6 +23,7 @@
 #include <linux/err.h>
 #include <linux/dma-mapping.h>
 #include <linux/msm_ion.h>
+#include <linux/of.h>
 
 #include <asm/cacheflush.h>
 #include <soc/qcom/secure_buffer.h>
@@ -59,6 +60,18 @@
 	return 0;
 }
 
+static bool ion_cma_has_kernel_mapping(struct ion_heap *heap)
+{
+	struct device *dev = heap->priv;
+	struct device_node *mem_region;
+
+	mem_region = of_parse_phandle(dev->of_node, "memory-region", 0);
+	if (IS_ERR(mem_region))
+		return false;
+
+	return !of_property_read_bool(mem_region, "no-map");
+}
+
 /* ION CMA heap operations functions */
 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
 			    unsigned long len, unsigned long align,
@@ -73,6 +86,12 @@
 	if (!info)
 		return ION_CMA_ALLOCATE_FAILED;
 
+	/* Override flags if cached-mappings are not supported */
+	if (!ion_cma_has_kernel_mapping(heap)) {
+		flags &= ~((unsigned long)ION_FLAG_CACHED);
+		buffer->flags = flags;
+	}
+
 	if (!ION_IS_CACHED(flags))
 		info->cpu_addr = dma_alloc_writecombine(dev, len,
 							&info->handle,
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 09f7f20..f25bade 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -32,6 +32,7 @@
 #include <linux/cpu_cooling.h>
 #include <linux/sched.h>
 #include <linux/of_device.h>
+#include <linux/suspend.h>
 
 #include <trace/events/thermal.h>
 
@@ -117,10 +118,12 @@
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
 
+static atomic_t in_suspend;
 static unsigned int cpufreq_dev_count;
 static int8_t cpuhp_registered;
 static struct work_struct cpuhp_register_work;
 static struct cpumask cpus_pending_online;
+static struct cpumask cpus_isolated_by_thermal;
 static DEFINE_MUTEX(core_isolate_lock);
 
 static DEFINE_MUTEX(cooling_list_lock);
@@ -218,6 +221,51 @@
 }
 EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
 
+static int cpufreq_cooling_pm_notify(struct notifier_block *nb,
+				unsigned long mode, void *_unused)
+{
+	struct cpufreq_cooling_device *cpufreq_dev;
+	unsigned int cpu;
+
+	switch (mode) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_RESTORE_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		atomic_set(&in_suspend, 1);
+		break;
+	case PM_POST_HIBERNATION:
+	case PM_POST_RESTORE:
+	case PM_POST_SUSPEND:
+		mutex_lock(&cooling_list_lock);
+		mutex_lock(&core_isolate_lock);
+		list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+			if (cpufreq_dev->cpufreq_state ==
+				cpufreq_dev->max_level) {
+				cpu = cpumask_any(&cpufreq_dev->allowed_cpus);
+				if (cpu_online(cpu) &&
+					!cpumask_test_and_set_cpu(cpu,
+					&cpus_isolated_by_thermal)) {
+					if (sched_isolate_cpu(cpu))
+						cpumask_clear_cpu(cpu,
+						&cpus_isolated_by_thermal);
+				}
+			}
+		}
+		mutex_unlock(&core_isolate_lock);
+		mutex_unlock(&cooling_list_lock);
+
+		atomic_set(&in_suspend, 0);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static struct notifier_block cpufreq_cooling_pm_nb = {
+	.notifier_call = cpufreq_cooling_pm_notify,
+};
+
 static int cpufreq_hp_offline(unsigned int offline_cpu)
 {
 	struct cpufreq_cooling_device *cpufreq_dev;
@@ -228,7 +276,9 @@
 			continue;
 
 		mutex_lock(&core_isolate_lock);
-		if (cpufreq_dev->cpufreq_state == cpufreq_dev->max_level)
+		if ((cpufreq_dev->cpufreq_state == cpufreq_dev->max_level) &&
+			(cpumask_test_and_clear_cpu(offline_cpu,
+			&cpus_isolated_by_thermal)))
 			sched_unisolate_cpu_unlocked(offline_cpu);
 		mutex_unlock(&core_isolate_lock);
 		break;
@@ -243,6 +293,9 @@
 	struct cpufreq_cooling_device *cpufreq_dev;
 	int ret = 0;
 
+	if (atomic_read(&in_suspend))
+		return 0;
+
 	mutex_lock(&cooling_list_lock);
 	list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
 		if (!cpumask_test_cpu(online_cpu, &cpufreq_dev->allowed_cpus))
@@ -677,8 +730,13 @@
 	cpufreq_device->cpufreq_state = state;
 	/* If state is the last, isolate the CPU */
 	if (state == cpufreq_device->max_level) {
-		if (cpu_online(cpu))
-			sched_isolate_cpu(cpu);
+		if (cpu_online(cpu) &&
+			(!cpumask_test_and_set_cpu(cpu,
+			&cpus_isolated_by_thermal))) {
+			if (sched_isolate_cpu(cpu))
+				cpumask_clear_cpu(cpu,
+					&cpus_isolated_by_thermal);
+		}
 		mutex_unlock(&core_isolate_lock);
 		return ret;
 	} else if ((prev_state == cpufreq_device->max_level)
@@ -695,8 +753,10 @@
 			if (ret)
 				pr_err("CPU:%d online error:%d\n", cpu, ret);
 			goto update_frequency;
-		} else
+		} else if (cpumask_test_and_clear_cpu(cpu,
+			&cpus_isolated_by_thermal)) {
 			sched_unisolate_cpu(cpu);
+		}
 	}
 	mutex_unlock(&core_isolate_lock);
 update_frequency:
@@ -1105,12 +1165,14 @@
 	mutex_unlock(&cooling_list_lock);
 
 	/* Register the notifier for first cpufreq cooling device */
-	if (!cpufreq_dev_count++)
+	if (!cpufreq_dev_count++ && !cpufreq_dev->plat_ops)
 		cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
 					  CPUFREQ_POLICY_NOTIFIER);
 	if (!cpuhp_registered) {
 		cpuhp_registered = 1;
+		register_pm_notifier(&cpufreq_cooling_pm_nb);
 		cpumask_clear(&cpus_pending_online);
+		cpumask_clear(&cpus_isolated_by_thermal);
 		INIT_WORK(&cpuhp_register_work, register_cdev);
 		queue_work(system_wq, &cpuhp_register_work);
 	}
@@ -1285,9 +1347,13 @@
 
 	/* Unregister the notifier for the last cpufreq cooling device */
 	mutex_lock(&cooling_cpufreq_lock);
-	if (!--cpufreq_dev_count)
-		cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
-					    CPUFREQ_POLICY_NOTIFIER);
+	if (!--cpufreq_dev_count) {
+		unregister_pm_notifier(&cpufreq_cooling_pm_nb);
+		if (!cpufreq_dev->plat_ops)
+			cpufreq_unregister_notifier(
+				&thermal_cpufreq_notifier_block,
+				CPUFREQ_POLICY_NOTIFIER);
+	}
 
 	mutex_lock(&cooling_list_lock);
 	list_del(&cpufreq_dev->node);
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 9c819fb..b142869 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -185,6 +185,8 @@
 static int msm_geni_serial_poll_bit(struct uart_port *uport,
 				int offset, int bit_field, bool set);
 static void msm_geni_serial_stop_rx(struct uart_port *uport);
+static int msm_geni_serial_runtime_resume(struct device *dev);
+static int msm_geni_serial_runtime_suspend(struct device *dev);
 
 static atomic_t uart_line_id = ATOMIC_INIT(0);
 
@@ -246,7 +248,7 @@
 {
 	int usage_count = atomic_read(&uport->dev->power.usage_count);
 
-	return (pm_runtime_suspended(uport->dev) || !usage_count);
+	return (pm_runtime_status_suspended(uport->dev) || !usage_count);
 }
 
 static bool check_transfers_inflight(struct uart_port *uport)
@@ -311,26 +313,24 @@
 static int vote_clock_on(struct uart_port *uport)
 {
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+	int usage_count = atomic_read(&uport->dev->power.usage_count);
 	int ret = 0;
 
-	if (!pm_runtime_enabled(uport->dev)) {
-		dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
-		return -EPERM;
-	}
 	ret = msm_geni_serial_power_on(uport);
 	if (ret) {
 		dev_err(uport->dev, "Failed to vote clock on\n");
 		return ret;
 	}
 	port->ioctl_count++;
-	IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d\n", __func__,
-					current->comm, port->ioctl_count);
+	IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d usage_count %d\n",
+		__func__, current->comm, port->ioctl_count, usage_count);
 	return 0;
 }
 
 static int vote_clock_off(struct uart_port *uport)
 {
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+	int usage_count = atomic_read(&uport->dev->power.usage_count);
 
 	if (!pm_runtime_enabled(uport->dev)) {
 		dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
@@ -347,8 +347,8 @@
 	wait_for_transfers_inflight(uport);
 	port->ioctl_count--;
 	msm_geni_serial_power_off(uport);
-	IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d\n", __func__,
-				current->comm, port->ioctl_count);
+	IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d usage_count %d\n",
+		__func__, current->comm, port->ioctl_count, usage_count);
 	return 0;
 };
 
@@ -472,13 +472,37 @@
 	int ret = 0;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
-	ret = pm_runtime_get_sync(uport->dev);
-	if (ret < 0) {
-		IPC_LOG_MSG(port->ipc_log_pwr, "%s Err\n", __func__);
-		WARN_ON_ONCE(1);
-		pm_runtime_put_noidle(uport->dev);
-		pm_runtime_set_suspended(uport->dev);
-		return ret;
+	if (!pm_runtime_enabled(uport->dev)) {
+		if (pm_runtime_status_suspended(uport->dev)) {
+			struct uart_state *state = uport->state;
+			struct tty_port *tport = &state->port;
+			int lock = mutex_trylock(&tport->mutex);
+
+			IPC_LOG_MSG(port->ipc_log_pwr,
+					"%s:Manual resume\n", __func__);
+			pm_runtime_disable(uport->dev);
+			ret = msm_geni_serial_runtime_resume(uport->dev);
+			if (ret) {
+				IPC_LOG_MSG(port->ipc_log_pwr,
+					"%s:Manual RPM CB failed %d\n",
+								__func__, ret);
+			} else {
+				pm_runtime_get_noresume(uport->dev);
+				pm_runtime_set_active(uport->dev);
+			}
+			pm_runtime_enable(uport->dev);
+			if (lock)
+				mutex_unlock(&tport->mutex);
+		}
+	} else {
+		ret = pm_runtime_get_sync(uport->dev);
+		if (ret < 0) {
+			IPC_LOG_MSG(port->ipc_log_pwr, "%s Err\n", __func__);
+			WARN_ON_ONCE(1);
+			pm_runtime_put_noidle(uport->dev);
+			pm_runtime_set_suspended(uport->dev);
+			return ret;
+		}
 	}
 	return 0;
 }
@@ -834,8 +858,11 @@
 		goto exit_start_tx;
 	}
 
-	if (!uart_console(uport))
+	if (!uart_console(uport)) {
+		IPC_LOG_MSG(msm_port->ipc_log_misc,
+				"%s.Power on.\n", __func__);
 		pm_runtime_get(uport->dev);
+	}
 
 	if (msm_port->xfer_mode == FIFO_MODE) {
 		geni_status = geni_read_reg_nolog(uport->membase,
@@ -1315,7 +1342,7 @@
 	spin_lock_irqsave(&uport->lock, flags);
 	if (uart_console(uport) && uport->suspended)
 		goto exit_geni_serial_isr;
-	if (!uart_console(uport) && pm_runtime_suspended(uport->dev)) {
+	if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
 		dev_err(uport->dev, "%s.Device is suspended.\n", __func__);
 		IPC_LOG_MSG(msm_port->ipc_log_misc,
 				"%s.Device is suspended.\n", __func__);
@@ -1503,6 +1530,17 @@
 	spin_unlock_irqrestore(&uport->lock, flags);
 
 	if (!uart_console(uport)) {
+		if (msm_port->ioctl_count) {
+			int i;
+
+			for (i = 0; i < msm_port->ioctl_count; i++) {
+				IPC_LOG_MSG(msm_port->ipc_log_pwr,
+				"%s IOCTL vote present. Forcing off\n",
+								__func__);
+				msm_geni_serial_power_off(uport);
+			}
+			msm_port->ioctl_count = 0;
+		}
 		msm_geni_serial_power_off(uport);
 		if (msm_port->wakeup_irq > 0) {
 			irq_set_irq_wake(msm_port->wakeup_irq, 0);
@@ -1851,11 +1889,8 @@
 	unsigned int is_tx_empty = 1;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
-	if (!uart_console(uport) && device_pending_suspend(uport)) {
-		IPC_LOG_MSG(port->ipc_log_pwr,
-			"%s Device suspended,vote clocks on.\n", __func__);
+	if (!uart_console(uport) && device_pending_suspend(uport))
 		return 0;
-	}
 
 	if (port->xfer_mode == SE_DMA)
 		tx_fifo_status = port->tx_dma ? 1 : 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f16491c..ea20b2c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1773,6 +1773,9 @@
 	{ USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
 	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
 	},
+	{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
+	.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
+	},
 
 	{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
 	.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 6e7bafe..701d9f7 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -955,10 +955,12 @@
 	for (i = 0; i < num; i++) {
 		buffer += length;
 		cap = (struct usb_dev_cap_header *)buffer;
-		length = cap->bLength;
 
-		if (total_len < length)
+		if (total_len < sizeof(*cap) || total_len < cap->bLength) {
+			dev->bos->desc->bNumDeviceCaps = i;
 			break;
+		}
+		length = cap->bLength;
 		total_len -= length;
 
 		if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 860108c..c8075eb 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1577,11 +1577,7 @@
 			totlen += isopkt[u].length;
 		}
 		u *= sizeof(struct usb_iso_packet_descriptor);
-		if (totlen <= uurb->buffer_length)
-			uurb->buffer_length = totlen;
-		else
-			WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d",
-				  totlen, uurb->buffer_length);
+		uurb->buffer_length = totlen;
 		break;
 
 	default:
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1c9fbcb..70c90e4 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2715,13 +2715,16 @@
 	if (!(portstatus & USB_PORT_STAT_CONNECTION))
 		return -ENOTCONN;
 
-	/* bomb out completely if the connection bounced.  A USB 3.0
-	 * connection may bounce if multiple warm resets were issued,
+	/* Retry if connect change is set but status is still connected.
+	 * A USB 3.0 connection may bounce if multiple warm resets were issued,
 	 * but the device may have successfully re-connected. Ignore it.
 	 */
 	if (!hub_is_superspeed(hub->hdev) &&
-			(portchange & USB_PORT_STAT_C_CONNECTION))
-		return -ENOTCONN;
+	    (portchange & USB_PORT_STAT_C_CONNECTION)) {
+		usb_clear_port_feature(hub->hdev, port1,
+				       USB_PORT_FEAT_C_CONNECTION);
+		return -EAGAIN;
+	}
 
 	if (!(portstatus & USB_PORT_STAT_ENABLE))
 		return -EBUSY;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 82806e3..a6aaf2f 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -221,6 +221,10 @@
 	/* Corsair Strafe RGB */
 	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
 
+	/* MIDI keyboard WORLDE MINI */
+	{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
+			USB_QUIRK_CONFIG_INTF_STRINGS },
+
 	/* Acer C120 LED Projector */
 	{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
 
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 5ca987a..a5e050a 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -690,8 +690,6 @@
 {
 	dwc3_event_buffers_cleanup(dwc);
 
-	usb_phy_shutdown(dwc->usb2_phy);
-	usb_phy_shutdown(dwc->usb3_phy);
 	phy_exit(dwc->usb2_generic_phy);
 	phy_exit(dwc->usb3_generic_phy);
 
@@ -1220,7 +1218,8 @@
 				 &dwc->fladj);
 	dwc->disable_clk_gating = device_property_read_bool(dev,
 				"snps,disable-clk-gating");
-
+	dwc->enable_bus_suspend = device_property_read_bool(dev,
+					"snps,bus-suspend-enable");
 	if (dwc->enable_bus_suspend) {
 		pm_runtime_set_autosuspend_delay(dev, 500);
 		pm_runtime_use_autosuspend(dev);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 63d0a3e..f511055 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -568,7 +568,6 @@
  * @started_list: list of started requests on this endpoint
  * @lock: spinlock for endpoint request queue traversal
  * @regs: pointer to first endpoint register
- * @trb_dma_pool: dma pool used to get aligned trb memory pool
  * @trb_pool: array of transaction buffers
  * @trb_pool_dma: dma address of @trb_pool
  * @num_trbs: num of trbs in the trb dma pool
@@ -600,7 +599,6 @@
 	spinlock_t		lock;
 	void __iomem		*regs;
 
-	struct dma_pool		*trb_dma_pool;
 	struct dwc3_trb		*trb_pool;
 	dma_addr_t		trb_pool_dma;
 	u32			num_trbs;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index d0fc511..b6ad39b 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -266,6 +266,7 @@
 	struct pm_qos_request pm_qos_req_dma;
 	struct delayed_work perf_vote_work;
 	struct delayed_work sdp_check;
+	struct mutex suspend_resume_mutex;
 };
 
 #define USB_HSPHY_3P3_VOL_MIN		3050000 /* uV */
@@ -1051,25 +1052,17 @@
 	int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
 					: (req->num_bufs + 2);
 
-	dep->trb_dma_pool = dma_pool_create(ep->name, dwc->sysdev,
-					num_trbs * sizeof(struct dwc3_trb),
-					num_trbs * sizeof(struct dwc3_trb), 0);
-	if (!dep->trb_dma_pool) {
+	dep->trb_pool = dma_zalloc_coherent(dwc->sysdev,
+				num_trbs * sizeof(struct dwc3_trb),
+				&dep->trb_pool_dma, GFP_KERNEL);
+
+	if (!dep->trb_pool) {
 		dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
 				dep->name);
 		return -ENOMEM;
 	}
 
 	dep->num_trbs = num_trbs;
-
-	dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
-					   GFP_KERNEL, &dep->trb_pool_dma);
-	if (!dep->trb_pool) {
-		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
-				dep->name);
-		return -ENOMEM;
-	}
-
 	/* IN direction */
 	if (dep->direction) {
 		for (i = 0; i < num_trbs ; i++) {
@@ -1159,18 +1152,19 @@
 static void gsi_free_trbs(struct usb_ep *ep)
 {
 	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
 
 	if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
 		return;
 
 	/*  Free TRBs and TRB pool for EP */
-	if (dep->trb_dma_pool) {
-		dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
-						dep->trb_pool_dma);
-		dma_pool_destroy(dep->trb_dma_pool);
+	if (dep->trb_pool_dma) {
+		dma_free_coherent(dwc->sysdev,
+			dep->num_trbs * sizeof(struct dwc3_trb),
+			dep->trb_pool,
+			dep->trb_pool_dma);
 		dep->trb_pool = NULL;
 		dep->trb_pool_dma = 0;
-		dep->trb_dma_pool = NULL;
 	}
 }
 /*
@@ -1852,7 +1846,7 @@
 				break;
 			evt->dwc	= dwc;
 			evt->length	= DWC3_EVENT_BUFFERS_SIZE;
-			evt->buf	= dma_alloc_coherent(dwc->dev,
+			evt->buf	= dma_alloc_coherent(dwc->sysdev,
 						DWC3_EVENT_BUFFERS_SIZE,
 						&evt->dma, GFP_KERNEL);
 			if (!evt->buf) {
@@ -1923,7 +1917,7 @@
 		for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
 			evt = mdwc->gsi_ev_buff[i];
 			if (evt)
-				dma_free_coherent(dwc->dev, evt->length,
+				dma_free_coherent(dwc->sysdev, evt->length,
 							evt->buf, evt->dma);
 		}
 		break;
@@ -2157,8 +2151,10 @@
 	struct dwc3_event_buffer *evt;
 	struct usb_irq *uirq;
 
+	mutex_lock(&mdwc->suspend_resume_mutex);
 	if (atomic_read(&dwc->in_lpm)) {
 		dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
+		mutex_unlock(&mdwc->suspend_resume_mutex);
 		return 0;
 	}
 
@@ -2171,6 +2167,7 @@
 			dev_dbg(mdwc->dev,
 				"%s: %d device events pending, abort suspend\n",
 				__func__, evt->count / 4);
+			mutex_unlock(&mdwc->suspend_resume_mutex);
 			return -EBUSY;
 		}
 	}
@@ -2189,6 +2186,7 @@
 		dev_dbg(mdwc->dev,
 			"%s: cable disconnected while not in idle otg state\n",
 			__func__);
+		mutex_unlock(&mdwc->suspend_resume_mutex);
 		return -EBUSY;
 	}
 
@@ -2202,12 +2200,15 @@
 		pr_err("%s(): Trying to go in LPM with state:%d\n",
 					__func__, dwc->gadget.state);
 		pr_err("%s(): LPM is not performed.\n", __func__);
+		mutex_unlock(&mdwc->suspend_resume_mutex);
 		return -EBUSY;
 	}
 
 	ret = dwc3_msm_prepare_suspend(mdwc);
-	if (ret)
+	if (ret) {
+		mutex_unlock(&mdwc->suspend_resume_mutex);
 		return ret;
+	}
 
 	/* Disable core irq */
 	if (dwc->irq)
@@ -2315,6 +2316,7 @@
 	}
 
 	dev_info(mdwc->dev, "DWC3 in low power mode\n");
+	mutex_unlock(&mdwc->suspend_resume_mutex);
 	return 0;
 }
 
@@ -2327,8 +2329,10 @@
 
 	dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
 
+	mutex_lock(&mdwc->suspend_resume_mutex);
 	if (!atomic_read(&dwc->in_lpm)) {
 		dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
+		mutex_unlock(&mdwc->suspend_resume_mutex);
 		return 0;
 	}
 
@@ -2479,6 +2483,8 @@
 			msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
 
 	dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
+	mutex_unlock(&mdwc->suspend_resume_mutex);
+
 	return 0;
 }
 
@@ -2983,8 +2989,8 @@
 	return ret;
 }
 
-#define SMMU_BASE	0x10000000 /* Device address range base */
-#define SMMU_SIZE	0x40000000 /* Device address range size */
+#define SMMU_BASE	0x60000000 /* Device address range base */
+#define SMMU_SIZE	0x90000000 /* Device address range size */
 
 static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
 {
@@ -3422,6 +3428,7 @@
 			POWER_SUPPLY_PROP_PRESENT, &pval);
 	}
 
+	mutex_init(&mdwc->suspend_resume_mutex);
 	/* Update initial VBUS/ID state from extcon */
 	if (mdwc->extcon_vbus && extcon_get_state(mdwc->extcon_vbus,
 							EXTCON_USB))
@@ -3791,8 +3798,7 @@
 		dwc3_usb3_phy_suspend(dwc, false);
 		mdwc->in_host_mode = false;
 
-		pm_runtime_mark_last_busy(mdwc->dev);
-		pm_runtime_put_sync_autosuspend(mdwc->dev);
+		pm_runtime_put_sync_suspend(mdwc->dev);
 		dbg_event(0xFF, "StopHost psync",
 			atomic_read(&mdwc->dev->power.usage_count));
 	}
@@ -3897,6 +3903,8 @@
 	if (dwc->maximum_speed == usb_speed)
 		goto err;
 
+	dbg_event(0xFF, "fw_restarthost", 0);
+	flush_delayed_work(&mdwc->sm_work);
 	dbg_event(0xFF, "stop_host_mode", dwc->maximum_speed);
 	ret = dwc3_otg_start_host(mdwc, 0);
 	if (ret)
@@ -4062,7 +4070,7 @@
 			 * which was incremented upon cable connect in
 			 * OTG_STATE_B_IDLE state
 			 */
-			pm_runtime_put_sync(mdwc->dev);
+			pm_runtime_put_sync_suspend(mdwc->dev);
 			dbg_event(0xFF, "!BSV psync",
 				atomic_read(&mdwc->dev->power.usage_count));
 			work = 1;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 1c33051..4e7de00 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -599,22 +599,30 @@
 		return -EINVAL;
 
 	case USB_STATE_ADDRESS:
-		/* Read ep0IN related TXFIFO size */
-		dwc->last_fifo_depth = (dwc3_readl(dwc->regs,
-					DWC3_GTXFIFOSIZ(0)) & 0xFFFF);
-		/* Clear existing allocated TXFIFO for all IN eps except ep0 */
-		for (num = 0; num < dwc->num_in_eps; num++) {
-			dep = dwc->eps[(num << 1) | 1];
-			if (num) {
-				dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), 0);
-				dep->fifo_depth = 0;
-			} else {
-				dep->fifo_depth = dwc->last_fifo_depth;
-			}
+		/*
+		 * If tx-fifo-resize flag is not set for the controller, then
+		 * do not clear existing allocated TXFIFO since we do not
+		 * allocate it again in dwc3_gadget_resize_tx_fifos
+		 */
+		if (dwc->needs_fifo_resize) {
+			/* Read ep0IN related TXFIFO size */
+			dwc->last_fifo_depth = (dwc3_readl(dwc->regs,
+						DWC3_GTXFIFOSIZ(0)) & 0xFFFF);
+			/* Clear existing TXFIFO for all IN eps except ep0 */
+			for (num = 0; num < dwc->num_in_eps; num++) {
+				dep = dwc->eps[(num << 1) | 1];
+				if (num) {
+					dwc3_writel(dwc->regs,
+						DWC3_GTXFIFOSIZ(num), 0);
+					dep->fifo_depth = 0;
+				} else {
+					dep->fifo_depth = dwc->last_fifo_depth;
+				}
 
-			dev_dbg(dwc->dev, "%s(): %s dep->fifo_depth:%x\n",
+				dev_dbg(dwc->dev, "%s(): %s fifo_depth:%x\n",
 					__func__, dep->name, dep->fifo_depth);
-			dbg_event(0xFF, "fifo_reset", dep->number);
+				dbg_event(0xFF, "fifo_reset", dep->number);
+			}
 		}
 
 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4f4f7e8..c12fbf3 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -230,7 +230,8 @@
 	tmp = ((max_packet + mdwidth) * mult) + mdwidth;
 	fifo_size = DIV_ROUND_UP(tmp, mdwidth);
 	dep->fifo_depth = fifo_size;
-	fifo_size |= (dwc->last_fifo_depth << 16);
+	fifo_size |= (dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0)) & 0xffff0000)
+						+ (dwc->last_fifo_depth << 16);
 	dwc->last_fifo_depth += (fifo_size & 0xffff);
 
 	dev_dbg(dwc->dev, "%s ep_num:%d last_fifo_depth:%04x fifo_depth:%d\n",
@@ -2332,6 +2333,10 @@
 	dwc->gadget_driver = NULL;
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
+	dbg_event(0xFF, "fwq_started", 0);
+	flush_workqueue(dwc->dwc_wq);
+	dbg_event(0xFF, "fwq_completed", 0);
+
 	return 0;
 }
 
@@ -2826,43 +2831,55 @@
 
 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
 {
+	struct usb_gadget_driver *gadget_driver;
+
 	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
+		gadget_driver = dwc->gadget_driver;
 		spin_unlock(&dwc->lock);
 		dbg_event(0xFF, "DISCONNECT", 0);
-		dwc->gadget_driver->disconnect(&dwc->gadget);
+		gadget_driver->disconnect(&dwc->gadget);
 		spin_lock(&dwc->lock);
 	}
 }
 
 static void dwc3_suspend_gadget(struct dwc3 *dwc)
 {
+	struct usb_gadget_driver *gadget_driver;
+
 	if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
+		gadget_driver = dwc->gadget_driver;
 		spin_unlock(&dwc->lock);
 		dbg_event(0xFF, "SUSPEND", 0);
-		dwc->gadget_driver->suspend(&dwc->gadget);
+		gadget_driver->suspend(&dwc->gadget);
 		spin_lock(&dwc->lock);
 	}
 }
 
 static void dwc3_resume_gadget(struct dwc3 *dwc)
 {
+	struct usb_gadget_driver *gadget_driver;
+
 	if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+		gadget_driver = dwc->gadget_driver;
 		spin_unlock(&dwc->lock);
 		dbg_event(0xFF, "RESUME", 0);
-		dwc->gadget_driver->resume(&dwc->gadget);
+		gadget_driver->resume(&dwc->gadget);
 		spin_lock(&dwc->lock);
 	}
 }
 
 static void dwc3_reset_gadget(struct dwc3 *dwc)
 {
+	struct usb_gadget_driver *gadget_driver;
+
 	if (!dwc->gadget_driver)
 		return;
 
 	if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
+		gadget_driver = dwc->gadget_driver;
 		spin_unlock(&dwc->lock);
 		dbg_event(0xFF, "UDC RESET", 0);
-		usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
+		usb_gadget_udc_reset(&dwc->gadget, gadget_driver);
 		spin_lock(&dwc->lock);
 	}
 }
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 885ed26..50e4e44 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -84,6 +84,7 @@
 	struct usb_composite_driver composite;
 	struct usb_composite_dev cdev;
 	bool use_os_desc;
+	bool unbinding;
 	char b_vendor_code;
 	char qw_sign[OS_STRING_QW_SIGN_LEN];
 #ifdef CONFIG_USB_CONFIGFS_UEVENT
@@ -281,9 +282,12 @@
 	if (!gi->composite.gadget_driver.udc_name)
 		return -ENODEV;
 
+	gi->unbinding = true;
 	ret = usb_gadget_unregister_driver(&gi->composite.gadget_driver);
 	if (ret)
 		return ret;
+
+	gi->unbinding = false;
 	kfree(gi->composite.gadget_driver.udc_name);
 	gi->composite.gadget_driver.udc_name = NULL;
 	return 0;
@@ -1555,7 +1559,8 @@
 	acc_disconnect();
 #endif
 	gi->connected = 0;
-	schedule_work(&gi->work);
+	if (!gi->unbinding)
+		schedule_work(&gi->work);
 	composite_disconnect(gadget);
 }
 #endif
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 667e596..5434902 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -366,7 +366,7 @@
 
 	slot_id = 0;
 	for (i = 0; i < MAX_HC_SLOTS; i++) {
-		if (!xhci->devs[i])
+		if (!xhci->devs[i] || !xhci->devs[i]->udev)
 			continue;
 		speed = xhci->devs[i]->udev->speed;
 		if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
@@ -415,25 +415,25 @@
 						     GFP_NOWAIT);
 			if (!command) {
 				spin_unlock_irqrestore(&xhci->lock, flags);
-				xhci_free_command(xhci, cmd);
-				return -ENOMEM;
-
+				ret = -ENOMEM;
+				goto cmd_cleanup;
 			}
 
 			ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
-					i, suspend);
+						       i, suspend);
 			if (ret) {
 				spin_unlock_irqrestore(&xhci->lock, flags);
 				xhci_free_command(xhci, command);
-				goto err_cmd_queue;
+				goto cmd_cleanup;
 			}
 		}
 	}
 	ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
 	if (ret) {
 		spin_unlock_irqrestore(&xhci->lock, flags);
-		goto err_cmd_queue;
+		goto cmd_cleanup;
 	}
+
 	xhci_ring_cmd_db(xhci);
 	spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -445,7 +445,7 @@
 		ret = -ETIME;
 	}
 
-err_cmd_queue:
+cmd_cleanup:
 	xhci_free_command(xhci, cmd);
 	return ret;
 }
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1660c7c..1332057 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4871,7 +4871,8 @@
 		 */
 		hcd->has_tt = 1;
 	} else {
-		if (xhci->sbrn == 0x31) {
+		/* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */
+		if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) {
 			xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
 			hcd->speed = HCD_USB31;
 			hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index a6b6b1c..aac28d9 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -890,7 +890,7 @@
 	 */
 	if (int_usb & MUSB_INTR_RESET) {
 		handled = IRQ_HANDLED;
-		if (devctl & MUSB_DEVCTL_HM) {
+		if (is_host_active(musb)) {
 			/*
 			 * When BABBLE happens what we can depends on which
 			 * platform MUSB is running, because some platforms
@@ -900,9 +900,7 @@
 			 * drop the session.
 			 */
 			dev_err(musb->controller, "Babble\n");
-
-			if (is_host_active(musb))
-				musb_recover_from_babble(musb);
+			musb_recover_from_babble(musb);
 		} else {
 			musb_dbg(musb, "BUS RESET as %s",
 				usb_otg_state_string(musb->xceiv->otg->state));
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 1408245..3e1f3da 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -313,6 +313,8 @@
 	if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
 		sunxi_sram_release(musb->controller->parent);
 
+	devm_usb_put_phy(glue->dev, glue->xceiv);
+
 	return 0;
 }
 
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index aabfb41..44ab6d6 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -368,6 +368,7 @@
 
 	enum usbpd_state	current_state;
 	bool			hard_reset_recvd;
+	ktime_t			hard_reset_recvd_time;
 	struct list_head	rx_q;
 	spinlock_t		rx_lock;
 	struct rx_msg		*rx_ext_msg;
@@ -614,6 +615,9 @@
 	int ret;
 	u16 hdr;
 
+	if (pd->hard_reset_recvd)
+		return -EBUSY;
+
 	hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr,
 			pd->tx_msgid, num_data, pd->spec_rev);
 
@@ -805,11 +809,13 @@
 		return;
 	}
 
-	usbpd_dbg(&pd->dev, "hard reset received\n");
+	pd->hard_reset_recvd = true;
+	pd->hard_reset_recvd_time = ktime_get();
+
+	usbpd_err(&pd->dev, "hard reset received\n");
 
 	/* Force CC logic to source/sink to keep Rp/Rd unchanged */
 	set_power_role(pd, pd->current_pr);
-	pd->hard_reset_recvd = true;
 	power_supply_set_property(pd->usb_psy,
 			POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
 
@@ -1074,6 +1080,9 @@
 	unsigned long flags;
 	int ret;
 
+	if (pd->hard_reset_recvd) /* let usbpd_sm handle it */
+		return;
+
 	usbpd_dbg(&pd->dev, "%s -> %s\n",
 			usbpd_state_strings[pd->current_state],
 			usbpd_state_strings[next_state]);
@@ -2044,8 +2053,13 @@
 		if (pd->current_pr == PR_SINK) {
 			usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
 		} else {
+			s64 delta = ktime_ms_delta(ktime_get(),
+					pd->hard_reset_recvd_time);
 			pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
-			kick_sm(pd, PS_HARD_RESET_TIME);
+			if (delta >= PS_HARD_RESET_TIME)
+				kick_sm(pd, 0);
+			else
+				kick_sm(pd, PS_HARD_RESET_TIME - (int)delta);
 		}
 
 		goto sm_done;
@@ -2302,8 +2316,11 @@
 					&val);
 
 			/* save the PDOs so userspace can further evaluate */
-			memcpy(&pd->received_pdos, rx_msg->payload,
+			memset(&pd->received_pdos, 0,
 					sizeof(pd->received_pdos));
+			memcpy(&pd->received_pdos, rx_msg->payload,
+					min_t(size_t, rx_msg->data_len,
+						sizeof(pd->received_pdos)));
 			pd->src_cap_id++;
 
 			usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
@@ -2411,8 +2428,11 @@
 	case PE_SNK_READY:
 		if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) {
 			/* save the PDOs so userspace can further evaluate */
-			memcpy(&pd->received_pdos, rx_msg->payload,
+			memset(&pd->received_pdos, 0,
 					sizeof(pd->received_pdos));
+			memcpy(&pd->received_pdos, rx_msg->payload,
+					min_t(size_t, rx_msg->data_len,
+						sizeof(pd->received_pdos)));
 			pd->src_cap_id++;
 
 			usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
index 6395ca2..8a4f3d4 100644
--- a/drivers/usb/pd/qpnp-pdphy.c
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -582,6 +582,10 @@
 {
 	struct usb_pdphy *pdphy = data;
 
+	/* TX already aborted by received signal */
+	if (pdphy->tx_status != -EINPROGRESS)
+		return IRQ_HANDLED;
+
 	if (irq == pdphy->msg_tx_irq) {
 		pdphy->msg_tx_cnt++;
 		pdphy->tx_status = 0;
@@ -635,6 +639,10 @@
 	if (pdphy->signal_cb)
 		pdphy->signal_cb(pdphy->usbpd, frame_type);
 
+	if (pdphy->tx_status == -EINPROGRESS) {
+		pdphy->tx_status = -EBUSY;
+		wake_up(&pdphy->tx_waitq);
+	}
 done:
 	return IRQ_HANDLED;
 }
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 6d97dec..bc27c31 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -63,6 +63,11 @@
 #define LINESTATE_DP			BIT(0)
 #define LINESTATE_DM			BIT(1)
 
+#define BIAS_CTRL_2_OVERRIDE_VAL	0x28
+
+/* PERIPH_SS_PHY_REFGEN_NORTH_BG_CTRL register bits */
+#define BANDGAP_BYPASS			BIT(0)
+
 unsigned int phy_tune1;
 module_param(phy_tune1, uint, 0644);
 MODULE_PARM_DESC(phy_tune1, "QUSB PHY v2 TUNE1");
@@ -74,6 +79,7 @@
 	INTR_CTRL,
 	PLL_CORE_INPUT_OVERRIDE,
 	TEST1,
+	BIAS_CTRL_2,
 	USB2_PHY_REG_MAX,
 };
 
@@ -82,6 +88,7 @@
 	struct mutex		lock;
 	void __iomem		*base;
 	void __iomem		*efuse_reg;
+	void __iomem		*refgen_north_bg_reg;
 
 	struct clk		*ref_clk_src;
 	struct clk		*ref_clk;
@@ -466,6 +473,11 @@
 				qphy->base + qphy->phy_reg[PORT_TUNE1]);
 	}
 
+	if (qphy->refgen_north_bg_reg)
+		if (readl_relaxed(qphy->refgen_north_bg_reg) & BANDGAP_BYPASS)
+			writel_relaxed(BIAS_CTRL_2_OVERRIDE_VAL,
+				qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
+
 	/* ensure above writes are completed before re-enabling PHY */
 	wmb();
 
@@ -775,6 +787,12 @@
 		}
 	}
 
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					"refgen_north_bg_reg_addr");
+	if (res)
+		qphy->refgen_north_bg_reg = devm_ioremap(dev, res->start,
+						resource_size(res));
+
 	/* ref_clk_src is needed irrespective of SE_CLK or DIFF_CLK usage */
 	qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
 	if (IS_ERR(qphy->ref_clk_src)) {
@@ -892,7 +910,7 @@
 		if (qphy->phy_reg) {
 			qphy->qusb_phy_reg_offset_cnt =
 				size / sizeof(*qphy->phy_reg);
-			if (qphy->qusb_phy_reg_offset_cnt > USB2_PHY_REG_MAX) {
+			if (qphy->qusb_phy_reg_offset_cnt != USB2_PHY_REG_MAX) {
 				dev_err(dev, "invalid reg offset count\n");
 				return -EINVAL;
 			}
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 39e6830..45182c6 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -45,6 +45,7 @@
 static const struct usb_device_id id_table[] = {
 	{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
 	{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) },	/* MS7820 */
 	{ }, /* Terminating entry. */
 };
 MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 2ef2b61..79b8ab4 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1030,6 +1030,7 @@
 	mutex_unlock(&priv->lock);
 
 	if (use_ptemod) {
+		map->pages_vm_start = vma->vm_start;
 		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
 					  vma->vm_end - vma->vm_start,
 					  find_grant_ptes, map);
@@ -1067,7 +1068,6 @@
 					    set_grant_ptes_as_special, NULL);
 		}
 #endif
-		map->pages_vm_start = vma->vm_start;
 	}
 
 	return 0;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 03951f9..3e1c136 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1900,6 +1900,7 @@
 retry:
 	spin_lock(&ci->i_ceph_lock);
 	if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
+		spin_unlock(&ci->i_ceph_lock);
 		dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
 		goto out;
 	}
@@ -1917,8 +1918,10 @@
 			mutex_lock(&session->s_mutex);
 			goto retry;
 		}
-		if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
+		if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
+			spin_unlock(&ci->i_ceph_lock);
 			goto out;
+		}
 
 		flushing = __mark_caps_flushing(inode, session, true,
 						&flush_tid, &oldest_flush_tid);
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
index f17684c..facf63c 100644
--- a/fs/crypto/Makefile
+++ b/fs/crypto/Makefile
@@ -1,3 +1,4 @@
 obj-$(CONFIG_FS_ENCRYPTION)	+= fscrypto.o
 
+ccflags-y += -Ifs/ext4
 fscrypto-y := crypto.o fname.o policy.o keyinfo.o
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 61cfcce..5c24071 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -28,6 +28,7 @@
 #include <linux/dcache.h>
 #include <linux/namei.h>
 #include <linux/fscrypto.h>
+#include "ext4_ice.h"
 
 static unsigned int num_prealloc_crypto_pages = 32;
 static unsigned int num_prealloc_crypto_ctxs = 128;
@@ -406,6 +407,9 @@
 
 	bio_for_each_segment_all(bv, bio, i) {
 		struct page *page = bv->bv_page;
+	if (ext4_is_ice_enabled())
+		SetPageUptodate(page);
+	else {
 		int ret = fscrypt_decrypt_page(page);
 
 		if (ret) {
@@ -414,6 +418,7 @@
 		} else {
 			SetPageUptodate(page);
 		}
+	}
 		unlock_page(page);
 	}
 	fscrypt_release_ctx(ctx);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index bb46063..106e55c 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -11,6 +11,7 @@
 #include <keys/user-type.h>
 #include <linux/scatterlist.h>
 #include <linux/fscrypto.h>
+#include "ext4_ice.h"
 
 static void derive_crypt_complete(struct crypto_async_request *req, int rc)
 {
@@ -108,6 +109,11 @@
 		goto out;
 	}
 	ukp = user_key_payload(keyring_key);
+	if (!ukp) {
+		/* key was revoked before we acquired its semaphore */
+		res = -EKEYREVOKED;
+		goto out;
+	}
 	if (ukp->datalen != sizeof(struct fscrypt_key)) {
 		res = -EINVAL;
 		goto out;
@@ -130,13 +136,17 @@
 }
 
 static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
-				 const char **cipher_str_ret, int *keysize_ret)
+		const char **cipher_str_ret, int *keysize_ret, int *fname)
 {
 	if (S_ISREG(inode->i_mode)) {
 		if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) {
 			*cipher_str_ret = "xts(aes)";
 			*keysize_ret = FS_AES_256_XTS_KEY_SIZE;
 			return 0;
+		} else if (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE) {
+			*cipher_str_ret = "bugon";
+			*keysize_ret = FS_AES_256_XTS_KEY_SIZE;
+			return 0;
 		}
 		pr_warn_once("fscrypto: unsupported contents encryption mode "
 			     "%d for inode %lu\n",
@@ -148,6 +158,7 @@
 		if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) {
 			*cipher_str_ret = "cts(cbc(aes))";
 			*keysize_ret = FS_AES_256_CTS_KEY_SIZE;
+			*fname = 1;
 			return 0;
 		}
 		pr_warn_once("fscrypto: unsupported filenames encryption mode "
@@ -167,9 +178,26 @@
 		return;
 
 	crypto_free_skcipher(ci->ci_ctfm);
+	memzero_explicit(ci->ci_raw_key,
+		sizeof(ci->ci_raw_key));
 	kmem_cache_free(fscrypt_info_cachep, ci);
 }
 
+static int fs_data_encryption_mode(void)
+{
+	return ext4_is_ice_enabled() ? FS_ENCRYPTION_MODE_PRIVATE :
+		FS_ENCRYPTION_MODE_AES_256_XTS;
+}
+
+int fs_using_hardware_encryption(struct inode *inode)
+{
+	struct fscrypt_info *ci = inode->i_crypt_info;
+
+	return S_ISREG(inode->i_mode) && ci &&
+		ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
+}
+EXPORT_SYMBOL(fs_using_hardware_encryption);
+
 int fscrypt_get_encryption_info(struct inode *inode)
 {
 	struct fscrypt_info *crypt_info;
@@ -177,8 +205,8 @@
 	struct crypto_skcipher *ctfm;
 	const char *cipher_str;
 	int keysize;
-	u8 *raw_key = NULL;
 	int res;
+	int fname = 0;
 
 	if (inode->i_crypt_info)
 		return 0;
@@ -195,7 +223,7 @@
 		if (!fscrypt_dummy_context_enabled(inode))
 			return res;
 		ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
-		ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+		ctx.contents_encryption_mode = fs_data_encryption_mode();
 		ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
 		ctx.flags = 0;
 	} else if (res != sizeof(ctx)) {
@@ -219,7 +247,8 @@
 	memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
 				sizeof(crypt_info->ci_master_key));
 
-	res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
+	res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize,
+				&fname);
 	if (res)
 		goto out;
 
@@ -228,24 +257,21 @@
 	 * crypto API as part of key derivation.
 	 */
 	res = -ENOMEM;
-	raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
-	if (!raw_key)
-		goto out;
 
 	if (fscrypt_dummy_context_enabled(inode)) {
-		memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
+		memset(crypt_info->ci_raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
 		goto got_key;
 	}
 
-	res = validate_user_key(crypt_info, &ctx, raw_key,
+	res = validate_user_key(crypt_info, &ctx, crypt_info->ci_raw_key,
 			FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE);
 	if (res && inode->i_sb->s_cop->key_prefix) {
 		u8 *prefix = NULL;
 		int prefix_size, res2;
 
 		prefix_size = inode->i_sb->s_cop->key_prefix(inode, &prefix);
-		res2 = validate_user_key(crypt_info, &ctx, raw_key,
-							prefix, prefix_size);
+		res2 = validate_user_key(crypt_info, &ctx,
+				crypt_info->ci_raw_key,	prefix, prefix_size);
 		if (res2) {
 			if (res2 == -ENOKEY)
 				res = -ENOKEY;
@@ -255,28 +281,33 @@
 		goto out;
 	}
 got_key:
-	ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
-	if (!ctfm || IS_ERR(ctfm)) {
-		res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
-		printk(KERN_DEBUG
-		       "%s: error %d (inode %u) allocating crypto tfm\n",
-		       __func__, res, (unsigned) inode->i_ino);
+	if (crypt_info->ci_data_mode != FS_ENCRYPTION_MODE_PRIVATE || fname) {
+		ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
+		if (!ctfm || IS_ERR(ctfm)) {
+			res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
+			pr_err("%s: error %d inode %u allocating crypto tfm\n",
+				__func__, res, (unsigned int) inode->i_ino);
+			goto out;
+		}
+		crypt_info->ci_ctfm = ctfm;
+		crypto_skcipher_clear_flags(ctfm, ~0);
+		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
+		res = crypto_skcipher_setkey(ctfm, crypt_info->ci_raw_key,
+						keysize);
+		if (res)
+			goto out;
+	} else if (!ext4_is_ice_enabled()) {
+		pr_warn("%s: ICE support not available\n",
+			__func__);
+		res = -EINVAL;
 		goto out;
 	}
-	crypt_info->ci_ctfm = ctfm;
-	crypto_skcipher_clear_flags(ctfm, ~0);
-	crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
-	res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
-	if (res)
-		goto out;
-
 	if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
 		crypt_info = NULL;
 out:
 	if (res == -ENOKEY)
 		res = 0;
 	put_crypt_info(crypt_info);
-	kzfree(raw_key);
 	return res;
 }
 EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index c6220a2..bf03a92 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -411,6 +411,7 @@
 	if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
 		bio_set_pages_dirty(bio);
 
+	bio->bi_dio_inode = dio->inode;
 	dio->bio_bdev = bio->bi_bdev;
 
 	if (sdio->submit_io) {
@@ -424,6 +425,18 @@
 	sdio->logical_offset_in_bio = 0;
 }
 
+struct inode *dio_bio_get_inode(struct bio *bio)
+{
+	struct inode *inode = NULL;
+
+	if (bio == NULL)
+		return NULL;
+
+	inode = bio->bi_dio_inode;
+
+	return inode;
+}
+EXPORT_SYMBOL(dio_bio_get_inode);
 /*
  * Release any resources in case of a failure
  */
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 599a292..a896e46 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -84,11 +84,16 @@
 static inline struct ecryptfs_auth_tok *
 ecryptfs_get_encrypted_key_payload_data(struct key *key)
 {
-	if (key->type == &key_type_encrypted)
-		return (struct ecryptfs_auth_tok *)
-			(&((struct encrypted_key_payload *)key->payload.data[0])->payload_data);
-	else
+	struct encrypted_key_payload *payload;
+
+	if (key->type != &key_type_encrypted)
 		return NULL;
+
+	payload = key->payload.data[0];
+	if (!payload)
+		return ERR_PTR(-EKEYREVOKED);
+
+	return (struct ecryptfs_auth_tok *)payload->payload_data;
 }
 
 static inline struct key *ecryptfs_get_encrypted_key(char *sig)
@@ -114,12 +119,17 @@
 ecryptfs_get_key_payload_data(struct key *key)
 {
 	struct ecryptfs_auth_tok *auth_tok;
+	const struct user_key_payload *ukp;
 
 	auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
-	if (!auth_tok)
-		return (struct ecryptfs_auth_tok *)user_key_payload(key)->data;
-	else
+	if (auth_tok)
 		return auth_tok;
+
+	ukp = user_key_payload(key);
+	if (!ukp)
+		return ERR_PTR(-EKEYREVOKED);
+
+	return (struct ecryptfs_auth_tok *)ukp->data;
 }
 
 #define ECRYPTFS_MAX_KEYSET_SIZE 1024
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 3cf1546..fa218cd 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -459,7 +459,8 @@
  * @auth_tok_key: key containing the authentication token
  * @auth_tok: authentication token
  *
- * Returns zero on valid auth tok; -EINVAL otherwise
+ * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
+ * -EKEYREVOKED if the key was revoked before we acquired its semaphore.
  */
 static int
 ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
@@ -468,6 +469,12 @@
 	int rc = 0;
 
 	(*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
+	if (IS_ERR(*auth_tok)) {
+		rc = PTR_ERR(*auth_tok);
+		*auth_tok = NULL;
+		goto out;
+	}
+
 	if (ecryptfs_verify_version((*auth_tok)->version)) {
 		printk(KERN_ERR "Data structure version mismatch. Userspace "
 		       "tools must match eCryptfs kernel module with major "
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index e38039f..e9232a0 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -109,10 +109,16 @@
 	  decrypted pages in the page cache.
 
 config EXT4_FS_ENCRYPTION
-	bool
-	default y
+	bool "Ext4 FS Encryption"
+	default n
 	depends on EXT4_ENCRYPTION
 
+config EXT4_FS_ICE_ENCRYPTION
+	bool "Ext4 Encryption with ICE support"
+	default n
+	depends on EXT4_FS_ENCRYPTION
+	depends on PFK
+
 config EXT4_DEBUG
 	bool "EXT4 debugging support"
 	depends on EXT4_FS
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 354103f..d9e563a 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -1,6 +1,7 @@
 #
 # Makefile for the linux ext4-filesystem routines.
 #
+ccflags-y += -Ifs/crypto
 
 obj-$(CONFIG_EXT4_FS) += ext4.o
 
@@ -12,3 +13,4 @@
 
 ext4-$(CONFIG_EXT4_FS_POSIX_ACL)	+= acl.o
 ext4-$(CONFIG_EXT4_FS_SECURITY)		+= xattr_security.o
+ext4-$(CONFIG_EXT4_FS_ICE_ENCRYPTION)   += ext4_ice.o
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 20ee0e4..9b67de7 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2352,6 +2352,7 @@
 #define fscrypt_fname_free_buffer	fscrypt_notsupp_fname_free_buffer
 #define fscrypt_fname_disk_to_usr	fscrypt_notsupp_fname_disk_to_usr
 #define fscrypt_fname_usr_to_disk	fscrypt_notsupp_fname_usr_to_disk
+#define fs_using_hardware_encryption fs_notsupp_using_hardware_encryption
 #endif
 
 /* dir.c */
diff --git a/fs/ext4/ext4_ice.c b/fs/ext4/ext4_ice.c
new file mode 100644
index 0000000..25f79ae
--- /dev/null
+++ b/fs/ext4/ext4_ice.c
@@ -0,0 +1,107 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ext4_ice.h"
+
+/*
+ * Retrieves encryption key from the inode
+ */
+char *ext4_get_ice_encryption_key(const struct inode *inode)
+{
+	struct fscrypt_info *ci = NULL;
+
+	if (!inode)
+		return NULL;
+
+	ci = inode->i_crypt_info;
+	if (!ci)
+		return NULL;
+
+	return &(ci->ci_raw_key[0]);
+}
+
+/*
+ * Retrieves encryption salt from the inode
+ */
+char *ext4_get_ice_encryption_salt(const struct inode *inode)
+{
+	struct fscrypt_info *ci = NULL;
+
+	if (!inode)
+		return NULL;
+
+	ci = inode->i_crypt_info;
+	if (!ci)
+		return NULL;
+
+	return &(ci->ci_raw_key[ext4_get_ice_encryption_key_size(inode)]);
+}
+
+/*
+ * returns true if the cipher mode in inode is AES XTS
+ */
+int ext4_is_aes_xts_cipher(const struct inode *inode)
+{
+	struct fscrypt_info *ci = NULL;
+
+	ci = inode->i_crypt_info;
+	if (!ci)
+		return 0;
+
+	return (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE);
+}
+
+/*
+ * returns true if encryption info in both inodes is equal
+ */
+int ext4_is_ice_encryption_info_equal(const struct inode *inode1,
+	const struct inode *inode2)
+{
+	char *key1 = NULL;
+	char *key2 = NULL;
+	char *salt1 = NULL;
+	char *salt2 = NULL;
+
+	if (!inode1 || !inode2)
+		return 0;
+
+	if (inode1 == inode2)
+		return 1;
+
+	/* both do not belong to ice, so we don't care, they are equal for us */
+	if (!ext4_should_be_processed_by_ice(inode1) &&
+		!ext4_should_be_processed_by_ice(inode2))
+		return 1;
+
+	/* one belongs to ice, the other does not -> not equal */
+	if (ext4_should_be_processed_by_ice(inode1) ^
+		ext4_should_be_processed_by_ice(inode2))
+		return 0;
+
+	key1 = ext4_get_ice_encryption_key(inode1);
+	key2 = ext4_get_ice_encryption_key(inode2);
+	salt1 = ext4_get_ice_encryption_salt(inode1);
+	salt2 = ext4_get_ice_encryption_salt(inode2);
+
+	/* key and salt should not be null by this point */
+	if (!key1 || !key2 || !salt1 || !salt2 ||
+		(ext4_get_ice_encryption_key_size(inode1) !=
+		 ext4_get_ice_encryption_key_size(inode2)) ||
+		(ext4_get_ice_encryption_salt_size(inode1) !=
+		 ext4_get_ice_encryption_salt_size(inode2)))
+		return 0;
+
+	return ((memcmp(key1, key2,
+			ext4_get_ice_encryption_key_size(inode1)) == 0) &&
+		(memcmp(salt1, salt2,
+			ext4_get_ice_encryption_salt_size(inode1)) == 0));
+}
diff --git a/fs/ext4/ext4_ice.h b/fs/ext4/ext4_ice.h
new file mode 100644
index 0000000..04e09bf
--- /dev/null
+++ b/fs/ext4/ext4_ice.h
@@ -0,0 +1,104 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _EXT4_ICE_H
+#define _EXT4_ICE_H
+
+#include "ext4.h"
+#include <linux/fscrypto.h>
+
+#ifdef CONFIG_EXT4_FS_ICE_ENCRYPTION
+static inline int ext4_should_be_processed_by_ice(const struct inode *inode)
+{
+	if (!ext4_encrypted_inode((struct inode *)inode))
+		return 0;
+
+	return fs_using_hardware_encryption((struct inode *)inode);
+}
+
+static inline int ext4_is_ice_enabled(void)
+{
+	return 1;
+}
+
+int ext4_is_aes_xts_cipher(const struct inode *inode);
+
+char *ext4_get_ice_encryption_key(const struct inode *inode);
+char *ext4_get_ice_encryption_salt(const struct inode *inode);
+
+int ext4_is_ice_encryption_info_equal(const struct inode *inode1,
+	const struct inode *inode2);
+
+static inline size_t ext4_get_ice_encryption_key_size(
+	const struct inode *inode)
+{
+	return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+
+static inline size_t ext4_get_ice_encryption_salt_size(
+	const struct inode *inode)
+{
+	return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+
+#else
+static inline int ext4_should_be_processed_by_ice(const struct inode *inode)
+{
+	return 0;
+}
+static inline int ext4_is_ice_enabled(void)
+{
+	return 0;
+}
+
+static inline char *ext4_get_ice_encryption_key(const struct inode *inode)
+{
+	return NULL;
+}
+
+static inline char *ext4_get_ice_encryption_salt(const struct inode *inode)
+{
+	return NULL;
+}
+
+static inline size_t ext4_get_ice_encryption_key_size(
+	const struct inode *inode)
+{
+	return 0;
+}
+
+static inline size_t ext4_get_ice_encryption_salt_size(
+	const struct inode *inode)
+{
+	return 0;
+}
+
+static inline int ext4_is_xts_cipher(const struct inode *inode)
+{
+	return 0;
+}
+
+static inline int ext4_is_ice_encryption_info_equal(
+	const struct inode *inode1,
+	const struct inode *inode2)
+{
+	return 0;
+}
+
+static inline int ext4_is_aes_xts_cipher(const struct inode *inode)
+{
+	return 0;
+}
+
+#endif
+
+#endif	/* _EXT4_ICE_H */
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 496c9b5..dcb9669 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -42,6 +42,7 @@
 #include "xattr.h"
 #include "acl.h"
 #include "truncate.h"
+#include "ext4_ice.h"
 
 #include <trace/events/ext4.h>
 #include <trace/events/android_fs.h>
@@ -1152,7 +1153,8 @@
 			ll_rw_block(REQ_OP_READ, 0, 1, &bh);
 			*wait_bh++ = bh;
 			decrypt = ext4_encrypted_inode(inode) &&
-				S_ISREG(inode->i_mode);
+				S_ISREG(inode->i_mode) &&
+				!ext4_is_ice_enabled();
 		}
 	}
 	/*
@@ -3509,7 +3511,8 @@
 		get_block_func = ext4_dio_get_block_unwritten_async;
 		dio_flags = DIO_LOCKING;
 	}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#if defined(CONFIG_EXT4_FS_ENCRYPTION) && \
+!defined(CONFIG_EXT4_FS_ICE_ENCRYPTION)
 	BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
 #endif
 	if (IS_DAX(inode)) {
@@ -3623,7 +3626,8 @@
 	ssize_t ret;
 	int rw = iov_iter_rw(iter);
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#if defined(CONFIG_EXT4_FS_ENCRYPTION) && \
+!defined(CONFIG_EXT4_FS_ICE_ENCRYPTION)
 	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
 		return 0;
 #endif
@@ -3820,7 +3824,8 @@
 		if (!buffer_uptodate(bh))
 			goto unlock;
 		if (S_ISREG(inode->i_mode) &&
-		    ext4_encrypted_inode(inode)) {
+		    ext4_encrypted_inode(inode) &&
+		    !fs_using_hardware_encryption(inode)) {
 			/* We expect the key to be set. */
 			BUG_ON(!fscrypt_has_encryption_key(inode));
 			BUG_ON(blocksize != PAGE_SIZE);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index cec9280..1ddceb6 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -773,10 +773,6 @@
 	case EXT4_IOC_SET_ENCRYPTION_POLICY: {
 #ifdef CONFIG_EXT4_FS_ENCRYPTION
 		struct fscrypt_policy policy;
-
-		if (!ext4_has_feature_encrypt(sb))
-			return -EOPNOTSUPP;
-
 		if (copy_from_user(&policy,
 				   (struct fscrypt_policy __user *)arg,
 				   sizeof(policy)))
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 0094923..d8a0770 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -29,6 +29,7 @@
 #include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
+#include "ext4_ice.h"
 
 static struct kmem_cache *io_end_cachep;
 
@@ -470,6 +471,7 @@
 		gfp_t gfp_flags = GFP_NOFS;
 
 	retry_encrypt:
+		if (!fs_using_hardware_encryption(inode))
 		data_page = fscrypt_encrypt_page(inode, page, gfp_flags);
 		if (IS_ERR(data_page)) {
 			ret = PTR_ERR(data_page);
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index 5d5ddaa..37e0c31d 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -330,6 +330,13 @@
 	rcu_read_lock();
 
 	confkey = user_key_payload(key);
+	if (!confkey) {
+		/* key was revoked */
+		rcu_read_unlock();
+		key_put(key);
+		goto no_config;
+	}
+
 	buf = confkey->data;
 
 	for (len = confkey->datalen - 1; len >= 0; len--) {
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c7c3c96..1693308 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1361,7 +1361,8 @@
 			*/
 			over = !dir_emit(ctx, dirent->name, dirent->namelen,
 				       dirent->ino, dirent->type);
-			ctx->pos = dirent->off;
+			if (!over)
+				ctx->pos = dirent->off;
 		}
 
 		buf += reclen;
diff --git a/fs/namei.c b/fs/namei.c
index e10895c..10d4276 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2903,6 +2903,11 @@
 	if (error)
 		return error;
 	error = dir->i_op->create(dir, dentry, mode, want_excl);
+	if (error)
+		return error;
+	error = security_inode_post_create(dir, dentry, mode);
+	if (error)
+		return error;
 	if (!error)
 		fsnotify_create(dir, dentry);
 	return error;
@@ -3712,6 +3717,13 @@
 		return error;
 
 	error = dir->i_op->mknod(dir, dentry, mode, dev);
+	if (error)
+		return error;
+
+	error = security_inode_post_create(dir, dentry, mode);
+	if (error)
+		return error;
+
 	if (!error)
 		fsnotify_create(dir, dentry);
 	return error;
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 7ec77f8..ab8dd15 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -50,8 +50,7 @@
 static inline bool isalarm(struct timerfd_ctx *ctx)
 {
 	return ctx->clockid == CLOCK_REALTIME_ALARM ||
-		ctx->clockid == CLOCK_BOOTTIME_ALARM ||
-		ctx->clockid == CLOCK_POWEROFF_ALARM;
+		ctx->clockid == CLOCK_BOOTTIME_ALARM;
 }
 
 /*
@@ -143,8 +142,7 @@
 {
 	spin_lock(&ctx->cancel_lock);
 	if ((ctx->clockid == CLOCK_REALTIME ||
-	     ctx->clockid == CLOCK_REALTIME_ALARM ||
-	     ctx->clockid == CLOCK_POWEROFF_ALARM) &&
+	     ctx->clockid == CLOCK_REALTIME_ALARM) &&
 	    (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) {
 		if (!ctx->might_cancel) {
 			ctx->might_cancel = true;
@@ -176,7 +174,6 @@
 	enum hrtimer_mode htmode;
 	ktime_t texp;
 	int clockid = ctx->clockid;
-	enum alarmtimer_type type;
 
 	htmode = (flags & TFD_TIMER_ABSTIME) ?
 		HRTIMER_MODE_ABS: HRTIMER_MODE_REL;
@@ -187,8 +184,10 @@
 	ctx->tintv = timespec_to_ktime(ktmr->it_interval);
 
 	if (isalarm(ctx)) {
-		type = clock2alarm(ctx->clockid);
-		alarm_init(&ctx->t.alarm, type, timerfd_alarmproc);
+		alarm_init(&ctx->t.alarm,
+			   ctx->clockid == CLOCK_REALTIME_ALARM ?
+			   ALARM_REALTIME : ALARM_BOOTTIME,
+			   timerfd_alarmproc);
 	} else {
 		hrtimer_init(&ctx->t.tmr, clockid, htmode);
 		hrtimer_set_expires(&ctx->t.tmr, texp);
@@ -388,7 +387,6 @@
 {
 	int ufd;
 	struct timerfd_ctx *ctx;
-	enum alarmtimer_type type;
 
 	/* Check the TFD_* constants for consistency.  */
 	BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
@@ -399,8 +397,7 @@
 	     clockid != CLOCK_REALTIME &&
 	     clockid != CLOCK_REALTIME_ALARM &&
 	     clockid != CLOCK_BOOTTIME &&
-	     clockid != CLOCK_BOOTTIME_ALARM &&
-	     clockid != CLOCK_POWEROFF_ALARM))
+	     clockid != CLOCK_BOOTTIME_ALARM))
 		return -EINVAL;
 
 	if (!capable(CAP_WAKE_ALARM) &&
@@ -416,12 +413,13 @@
 	spin_lock_init(&ctx->cancel_lock);
 	ctx->clockid = clockid;
 
-	if (isalarm(ctx)) {
-		type = clock2alarm(ctx->clockid);
-		alarm_init(&ctx->t.alarm, type, timerfd_alarmproc);
-	} else {
+	if (isalarm(ctx))
+		alarm_init(&ctx->t.alarm,
+			   ctx->clockid == CLOCK_REALTIME_ALARM ?
+			   ALARM_REALTIME : ALARM_BOOTTIME,
+			   timerfd_alarmproc);
+	else
 		hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS);
-	}
 
 	ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 });
 
@@ -498,10 +496,6 @@
 	ret = timerfd_setup(ctx, flags, new);
 
 	spin_unlock_irq(&ctx->wqh.lock);
-
-	if (ctx->clockid == CLOCK_POWEROFF_ALARM)
-		set_power_on_alarm();
-
 	fdput(f);
 	return ret;
 }
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index 33db69b..eed8f58 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -157,7 +157,8 @@
 	trace_xfs_ag_resv_free(pag, type, 0);
 
 	resv = xfs_perag_resv(pag, type);
-	pag->pag_mount->m_ag_max_usable += resv->ar_asked;
+	if (pag->pag_agno == 0)
+		pag->pag_mount->m_ag_max_usable += resv->ar_asked;
 	/*
 	 * AGFL blocks are always considered "free", so whatever
 	 * was reserved at mount time must be given back at umount.
@@ -217,7 +218,14 @@
 		return error;
 	}
 
-	mp->m_ag_max_usable -= ask;
+	/*
+	 * Reduce the maximum per-AG allocation length by however much we're
+	 * trying to reserve for an AG.  Since this is a filesystem-wide
+	 * counter, we only make the adjustment for AG 0.  This assumes that
+	 * there aren't any AGs hungrier for per-AG reservation than AG 0.
+	 */
+	if (pag->pag_agno == 0)
+		mp->m_ag_max_usable -= ask;
 
 	resv = xfs_perag_resv(pag, type);
 	resv->ar_asked = ask;
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 9f06a21..c3702cd 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1579,6 +1579,10 @@
 
 				bp = xfs_btree_get_bufs(args->mp, args->tp,
 					args->agno, fbno, 0);
+				if (!bp) {
+					error = -EFSCORRUPTED;
+					goto error0;
+				}
 				xfs_trans_binval(args->tp, bp);
 			}
 			args->len = 1;
@@ -2136,6 +2140,10 @@
 		if (error)
 			goto out_agbp_relse;
 		bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
+		if (!bp) {
+			error = -EFSCORRUPTED;
+			goto out_agbp_relse;
+		}
 		xfs_trans_binval(tp, bp);
 	}
 
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index d2f4ab1..7eb9970 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -4057,6 +4057,17 @@
 	}
 }
 
+/* trim extent to within eof */
+void
+xfs_trim_extent_eof(
+	struct xfs_bmbt_irec	*irec,
+	struct xfs_inode	*ip)
+
+{
+	xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
+					      i_size_read(VFS_I(ip))));
+}
+
 /*
  * Trim the returned map to the required bounds
  */
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index db53ac7f..f1446d1 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -196,6 +196,7 @@
 
 void	xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
 		xfs_filblks_t len);
+void	xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
 int	xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
 void	xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
 void	xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 083cdd6..ce6958b 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -270,6 +270,7 @@
 	__uint32_t		ilf_fields;	/* flags for fields logged */
 	__uint16_t		ilf_asize;	/* size of attr d/ext/root */
 	__uint16_t		ilf_dsize;	/* size of data/ext/root */
+	__uint32_t		ilf_pad;	/* pad for 64 bit boundary */
 	__uint64_t		ilf_ino;	/* inode number */
 	union {
 		__uint32_t	ilfu_rdev;	/* rdev value for dev inode*/
@@ -280,7 +281,12 @@
 	__int32_t		ilf_boffset;	/* off of inode in buffer */
 } xfs_inode_log_format_t;
 
-typedef struct xfs_inode_log_format_32 {
+/*
+ * Old 32 bit systems will log in this format without the 64 bit
+ * alignment padding. Recovery will detect this and convert it to the
+ * correct format.
+ */
+struct xfs_inode_log_format_32 {
 	__uint16_t		ilf_type;	/* inode log item type */
 	__uint16_t		ilf_size;	/* size of this item */
 	__uint32_t		ilf_fields;	/* flags for fields logged */
@@ -294,24 +300,7 @@
 	__int64_t		ilf_blkno;	/* blkno of inode buffer */
 	__int32_t		ilf_len;	/* len of inode buffer */
 	__int32_t		ilf_boffset;	/* off of inode in buffer */
-} __attribute__((packed)) xfs_inode_log_format_32_t;
-
-typedef struct xfs_inode_log_format_64 {
-	__uint16_t		ilf_type;	/* inode log item type */
-	__uint16_t		ilf_size;	/* size of this item */
-	__uint32_t		ilf_fields;	/* flags for fields logged */
-	__uint16_t		ilf_asize;	/* size of attr d/ext/root */
-	__uint16_t		ilf_dsize;	/* size of data/ext/root */
-	__uint32_t		ilf_pad;	/* pad for 64 bit boundary */
-	__uint64_t		ilf_ino;	/* inode number */
-	union {
-		__uint32_t	ilfu_rdev;	/* rdev value for dev inode*/
-		uuid_t		ilfu_uuid;	/* mount point value */
-	} ilf_u;
-	__int64_t		ilf_blkno;	/* blkno of inode buffer */
-	__int32_t		ilf_len;	/* len of inode buffer */
-	__int32_t		ilf_boffset;	/* off of inode in buffer */
-} xfs_inode_log_format_64_t;
+} __attribute__((packed));
 
 
 /*
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index b2d55a3..710a131 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -247,6 +247,8 @@
 int
 xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
+	umode_t mode;
+	bool set_mode = false;
 	int error = 0;
 
 	if (!acl)
@@ -257,18 +259,27 @@
 		return error;
 
 	if (type == ACL_TYPE_ACCESS) {
-		umode_t mode;
 		struct posix_acl *old_acl = acl;
 		error = posix_acl_update_mode(inode, &mode, &acl);
 		if (!acl)
 			posix_acl_release(old_acl);
 		if (error)
 			return error;
-		error = xfs_set_mode(inode, mode);
-		if (error)
-			return error;
+		set_mode = true;
 	}
 
  set_acl:
-	return __xfs_set_acl(inode, acl, type);
+	error =  __xfs_set_acl(inode, acl, type);
+	if (error)
+		return error;
+
+	/*
+	 * We set the mode after successfully updating the ACL xattr because the
+	 * xattr update can fail at ENOSPC and we don't want to change the mode
+	 * if the ACL update hasn't been applied.
+	 */
+	if (set_mode)
+		error = xfs_set_mode(inode, mode);
+
+	return error;
 }
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index d23889e..d31cd1e 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -335,7 +335,8 @@
 		error = xfs_reflink_end_cow(ip, offset, size);
 		break;
 	case XFS_IO_UNWRITTEN:
-		error = xfs_iomap_write_unwritten(ip, offset, size);
+		/* writeback should never update isize */
+		error = xfs_iomap_write_unwritten(ip, offset, size, false);
 		break;
 	default:
 		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
@@ -437,6 +438,19 @@
 {
 	offset >>= inode->i_blkbits;
 
+	/*
+	 * We have to make sure the cached mapping is within EOF to protect
+	 * against eofblocks trimming on file release leaving us with a stale
+	 * mapping. Otherwise, a page for a subsequent file extending buffered
+	 * write could get picked up by this writeback cycle and written to the
+	 * wrong blocks.
+	 *
+	 * Note that what we really want here is a generic mapping invalidation
+	 * mechanism to protect us from arbitrary extent modifying contexts, not
+	 * just eofblocks.
+	 */
+	xfs_trim_extent_eof(imap, XFS_I(inode));
+
 	return offset >= imap->br_startoff &&
 		offset < imap->br_startoff + imap->br_blockcount;
 }
@@ -725,6 +739,14 @@
 {
 	trace_xfs_invalidatepage(page->mapping->host, page, offset,
 				 length);
+
+	/*
+	 * If we are invalidating the entire page, clear the dirty state from it
+	 * so that we can check for attempts to release dirty cached pages in
+	 * xfs_vm_releasepage().
+	 */
+	if (offset == 0 && length >= PAGE_SIZE)
+		cancel_dirty_page(page);
 	block_invalidatepage(page, offset, length);
 }
 
@@ -1180,25 +1202,27 @@
 	 * mm accommodates an old ext3 case where clean pages might not have had
 	 * the dirty bit cleared. Thus, it can send actual dirty pages to
 	 * ->releasepage() via shrink_active_list(). Conversely,
-	 * block_invalidatepage() can send pages that are still marked dirty
-	 * but otherwise have invalidated buffers.
+	 * block_invalidatepage() can send pages that are still marked dirty but
+	 * otherwise have invalidated buffers.
 	 *
 	 * We want to release the latter to avoid unnecessary buildup of the
-	 * LRU, skip the former and warn if we've left any lingering
-	 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
-	 * or unwritten buffers and warn if the page is not dirty. Otherwise
-	 * try to release the buffers.
+	 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
+	 * that are entirely invalidated and need to be released.  Hence the
+	 * only time we should get dirty pages here is through
+	 * shrink_active_list() and so we can simply skip those now.
+	 *
+	 * warn if we've left any lingering delalloc/unwritten buffers on clean
+	 * or invalidated pages we are about to release.
 	 */
+	if (PageDirty(page))
+		return 0;
+
 	xfs_count_page_state(page, &delalloc, &unwritten);
 
-	if (delalloc) {
-		WARN_ON_ONCE(!PageDirty(page));
+	if (WARN_ON_ONCE(delalloc))
 		return 0;
-	}
-	if (unwritten) {
-		WARN_ON_ONCE(!PageDirty(page));
+	if (WARN_ON_ONCE(unwritten))
 		return 0;
-	}
 
 	return try_to_free_buffers(page);
 }
@@ -1532,6 +1556,21 @@
 		return 0;
 	}
 
+	if (flags & XFS_DIO_FLAG_COW)
+		error = xfs_reflink_end_cow(ip, offset, size);
+
+	/*
+	 * Unwritten conversion updates the in-core isize after extent
+	 * conversion but before updating the on-disk size. Updating isize any
+	 * earlier allows a racing dio read to find unwritten extents before
+	 * they are converted.
+	 */
+	if (flags & XFS_DIO_FLAG_UNWRITTEN) {
+		trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
+
+		return xfs_iomap_write_unwritten(ip, offset, size, true);
+	}
+
 	/*
 	 * We need to update the in-core inode size here so that we don't end up
 	 * with the on-disk inode size being outside the in-core inode size. We
@@ -1548,13 +1587,6 @@
 		i_size_write(inode, offset + size);
 	spin_unlock(&ip->i_flags_lock);
 
-	if (flags & XFS_DIO_FLAG_COW)
-		error = xfs_reflink_end_cow(ip, offset, size);
-	if (flags & XFS_DIO_FLAG_UNWRITTEN) {
-		trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
-
-		error = xfs_iomap_write_unwritten(ip, offset, size);
-	}
 	if (flags & XFS_DIO_FLAG_APPEND) {
 		trace_xfs_end_io_direct_write_append(ip, offset, size);
 
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index be0b79d..c664300 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -302,6 +302,8 @@
 						 &bp, XFS_ATTR_FORK);
 			if (error)
 				return error;
+			node = bp->b_addr;
+			btree = dp->d_ops->node_tree_p(node);
 			child_fsb = be32_to_cpu(btree[i + 1].before);
 			xfs_trans_brelse(*trans, bp);
 		}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 5ffefac..cb62871 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -84,6 +84,7 @@
 		GFP_NOFS, true);
 }
 
+#ifdef CONFIG_XFS_RT
 int
 xfs_bmap_rtalloc(
 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
@@ -195,6 +196,7 @@
 	}
 	return 0;
 }
+#endif /* CONFIG_XFS_RT */
 
 /*
  * Check if the endoff is outside the last extent. If so the caller will grow
@@ -1445,7 +1447,19 @@
 		return error;
 
 	/*
-	 * The extent shiting code works on extent granularity. So, if
+	 * Clean out anything hanging around in the cow fork now that
+	 * we've flushed all the dirty data out to disk to avoid having
+	 * CoW extents at the wrong offsets.
+	 */
+	if (xfs_is_reflink_inode(ip)) {
+		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
+				true);
+		if (error)
+			return error;
+	}
+
+	/*
+	 * The extent shifting code works on extent granularity. So, if
 	 * stop_fsb is not the starting block of extent, we need to split
 	 * the extent at stop_fsb.
 	 */
@@ -2094,11 +2108,31 @@
 		ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
 		tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
 		tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
+	}
+
+	/* Swap the cow forks. */
+	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+		xfs_extnum_t	extnum;
+
+		ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
+		ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
+
+		extnum = ip->i_cnextents;
+		ip->i_cnextents = tip->i_cnextents;
+		tip->i_cnextents = extnum;
+
 		cowfp = ip->i_cowfp;
 		ip->i_cowfp = tip->i_cowfp;
 		tip->i_cowfp = cowfp;
-		xfs_inode_set_cowblocks_tag(ip);
-		xfs_inode_set_cowblocks_tag(tip);
+
+		if (ip->i_cowfp && ip->i_cnextents)
+			xfs_inode_set_cowblocks_tag(ip);
+		else
+			xfs_inode_clear_cowblocks_tag(ip);
+		if (tip->i_cowfp && tip->i_cnextents)
+			xfs_inode_set_cowblocks_tag(tip);
+		else
+			xfs_inode_clear_cowblocks_tag(tip);
 	}
 
 	xfs_trans_log_inode(tp, ip,  src_log_flags);
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index f100539..ce330f0 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -28,7 +28,20 @@
 struct xfs_trans;
 struct xfs_bmalloca;
 
+#ifdef CONFIG_XFS_RT
 int	xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
+#else /* !CONFIG_XFS_RT */
+/*
+ * Attempts to allocate RT extents when RT is disable indicates corruption and
+ * should trigger a shutdown.
+ */
+static inline int
+xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
+{
+	return -EFSCORRUPTED;
+}
+#endif /* CONFIG_XFS_RT */
+
 int	xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
 		     int whichfork, int *eof);
 int	xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index ed7ee4e..bcf7297 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -167,7 +167,7 @@
 {
 	struct xfs_mount *mp = bp->b_target->bt_mount;
 
-	xfs_alert(mp, "Metadata %s detected at %pF, %s block 0x%llx",
+	xfs_alert(mp, "Metadata %s detected at %pS, %s block 0x%llx",
 		  bp->b_error == -EFSBADCRC ? "CRC error" : "corruption",
 		  __return_address, bp->b_ops->name, bp->b_bn);
 
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 586b398..362c6b4 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -92,7 +92,7 @@
 	xfs_off_t		count,
 	bool			*did_zero)
 {
-	return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops);
+	return iomap_zero_range(VFS_I(ip), pos, count, did_zero, &xfs_iomap_ops);
 }
 
 int
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 9e795ab..fe9a9a1 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1632,10 +1632,12 @@
 		goto out;
 
 	/*
-	 * Clear the reflink flag if we truncated everything.
+	 * Clear the reflink flag if there are no data fork blocks and
+	 * there are no extents staged in the cow fork.
 	 */
-	if (ip->i_d.di_nblocks == 0 && xfs_is_reflink_inode(ip)) {
-		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
+	if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) {
+		if (ip->i_d.di_nblocks == 0)
+			ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
 		xfs_inode_clear_cowblocks_tag(ip);
 	}
 
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 9491574..d0a3c4b 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -364,6 +364,9 @@
 	to->di_dmstate = from->di_dmstate;
 	to->di_flags = from->di_flags;
 
+	/* log a dummy value to ensure log structure is fully initialised */
+	to->di_next_unlinked = NULLAGINO;
+
 	if (from->di_version == 3) {
 		to->di_changecount = inode->i_version;
 		to->di_crtime.t_sec = from->di_crtime.t_sec;
@@ -404,6 +407,11 @@
  * the second with the on-disk inode structure, and a possible third and/or
  * fourth with the inode data/extents/b-tree root and inode attributes
  * data/extents/b-tree root.
+ *
+ * Note: Always use the 64 bit inode log format structure so we don't
+ * leave an uninitialised hole in the format item on 64 bit systems. Log
+ * recovery on 32 bit systems handles this just fine, so there's no reason
+ * for not using an initialising the properly padded structure all the time.
  */
 STATIC void
 xfs_inode_item_format(
@@ -412,8 +420,8 @@
 {
 	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
 	struct xfs_inode	*ip = iip->ili_inode;
-	struct xfs_inode_log_format *ilf;
 	struct xfs_log_iovec	*vecp = NULL;
+	struct xfs_inode_log_format *ilf;
 
 	ASSERT(ip->i_d.di_version > 1);
 
@@ -425,7 +433,17 @@
 	ilf->ilf_boffset = ip->i_imap.im_boffset;
 	ilf->ilf_fields = XFS_ILOG_CORE;
 	ilf->ilf_size = 2; /* format + core */
-	xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format));
+
+	/*
+	 * make sure we don't leak uninitialised data into the log in the case
+	 * when we don't log every field in the inode.
+	 */
+	ilf->ilf_dsize = 0;
+	ilf->ilf_asize = 0;
+	ilf->ilf_pad = 0;
+	memset(&ilf->ilf_u.ilfu_uuid, 0, sizeof(ilf->ilf_u.ilfu_uuid));
+
+	xlog_finish_iovec(lv, vecp, sizeof(*ilf));
 
 	xfs_inode_item_format_core(ip, lv, &vecp);
 	xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
@@ -745,7 +763,7 @@
 		 */
 		iip = INODE_ITEM(blip);
 		if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) ||
-		    lip->li_flags & XFS_LI_FAILED)
+		    (blip->li_flags & XFS_LI_FAILED))
 			need_ail++;
 
 		blip = next;
@@ -855,48 +873,30 @@
 }
 
 /*
- * convert an xfs_inode_log_format struct from either 32 or 64 bit versions
- * (which can have different field alignments) to the native version
+ * convert an xfs_inode_log_format struct from the old 32 bit version
+ * (which can have different field alignments) to the native 64 bit version
  */
 int
 xfs_inode_item_format_convert(
-	xfs_log_iovec_t		*buf,
-	xfs_inode_log_format_t	*in_f)
+	struct xfs_log_iovec		*buf,
+	struct xfs_inode_log_format	*in_f)
 {
-	if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) {
-		xfs_inode_log_format_32_t *in_f32 = buf->i_addr;
+	struct xfs_inode_log_format_32	*in_f32 = buf->i_addr;
 
-		in_f->ilf_type = in_f32->ilf_type;
-		in_f->ilf_size = in_f32->ilf_size;
-		in_f->ilf_fields = in_f32->ilf_fields;
-		in_f->ilf_asize = in_f32->ilf_asize;
-		in_f->ilf_dsize = in_f32->ilf_dsize;
-		in_f->ilf_ino = in_f32->ilf_ino;
-		/* copy biggest field of ilf_u */
-		memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
-		       in_f32->ilf_u.ilfu_uuid.__u_bits,
-		       sizeof(uuid_t));
-		in_f->ilf_blkno = in_f32->ilf_blkno;
-		in_f->ilf_len = in_f32->ilf_len;
-		in_f->ilf_boffset = in_f32->ilf_boffset;
-		return 0;
-	} else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){
-		xfs_inode_log_format_64_t *in_f64 = buf->i_addr;
+	if (buf->i_len != sizeof(*in_f32))
+		return -EFSCORRUPTED;
 
-		in_f->ilf_type = in_f64->ilf_type;
-		in_f->ilf_size = in_f64->ilf_size;
-		in_f->ilf_fields = in_f64->ilf_fields;
-		in_f->ilf_asize = in_f64->ilf_asize;
-		in_f->ilf_dsize = in_f64->ilf_dsize;
-		in_f->ilf_ino = in_f64->ilf_ino;
-		/* copy biggest field of ilf_u */
-		memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
-		       in_f64->ilf_u.ilfu_uuid.__u_bits,
-		       sizeof(uuid_t));
-		in_f->ilf_blkno = in_f64->ilf_blkno;
-		in_f->ilf_len = in_f64->ilf_len;
-		in_f->ilf_boffset = in_f64->ilf_boffset;
-		return 0;
-	}
-	return -EFSCORRUPTED;
+	in_f->ilf_type = in_f32->ilf_type;
+	in_f->ilf_size = in_f32->ilf_size;
+	in_f->ilf_fields = in_f32->ilf_fields;
+	in_f->ilf_asize = in_f32->ilf_asize;
+	in_f->ilf_dsize = in_f32->ilf_dsize;
+	in_f->ilf_ino = in_f32->ilf_ino;
+	/* copy biggest field of ilf_u */
+	memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
+	       in_f32->ilf_u.ilfu_uuid.__u_bits, sizeof(uuid_t));
+	in_f->ilf_blkno = in_f32->ilf_blkno;
+	in_f->ilf_len = in_f32->ilf_len;
+	in_f->ilf_boffset = in_f32->ilf_boffset;
+	return 0;
 }
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 65740d1..f286f63 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -836,7 +836,8 @@
 xfs_iomap_write_unwritten(
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
-	xfs_off_t	count)
+	xfs_off_t	count,
+	bool		update_isize)
 {
 	xfs_mount_t	*mp = ip->i_mount;
 	xfs_fileoff_t	offset_fsb;
@@ -847,6 +848,7 @@
 	xfs_trans_t	*tp;
 	xfs_bmbt_irec_t imap;
 	struct xfs_defer_ops dfops;
+	struct inode	*inode = VFS_I(ip);
 	xfs_fsize_t	i_size;
 	uint		resblks;
 	int		error;
@@ -906,7 +908,8 @@
 		i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
 		if (i_size > offset + count)
 			i_size = offset + count;
-
+		if (update_isize && i_size > i_size_read(inode))
+			i_size_write(inode, i_size);
 		i_size = xfs_new_eof(ip, i_size);
 		if (i_size) {
 			ip->i_d.di_size = i_size;
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 6d45cf0..d71703a 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -27,7 +27,7 @@
 			struct xfs_bmbt_irec *, int);
 int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t,
 			struct xfs_bmbt_irec *);
-int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t);
+int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
 
 void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
 		struct xfs_bmbt_irec *);
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index 0c381d7..0492436 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -134,7 +134,7 @@
 	XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log,		28);
 	XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp,		8);
 	XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32,	52);
-	XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_64,	56);
+	XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format,	56);
 	XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat,	20);
 	XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header,		16);
 }
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 93a7aaf..cecd375 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -279,7 +279,7 @@
 					(end - 1) >> PAGE_SHIFT);
 		WARN_ON_ONCE(error);
 
-		error = xfs_iomap_write_unwritten(ip, start, length);
+		error = xfs_iomap_write_unwritten(ip, start, length, false);
 		if (error)
 			goto out_drop_iolock;
 	}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 0015c19..17d3c96 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -767,7 +767,13 @@
 
 	/* If there is a hole at end_fsb - 1 go to the previous extent */
 	if (eof || got.br_startoff > end_fsb) {
-		ASSERT(idx > 0);
+		/*
+		 * In case of racing, overlapping AIO writes no COW extents
+		 * might be left by the time I/O completes for the loser of
+		 * the race.  In that case we are done.
+		 */
+		if (idx <= 0)
+			goto out_cancel;
 		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, --idx), &got);
 	}
 
@@ -841,6 +847,7 @@
 
 out_defer:
 	xfs_defer_cancel(&dfops);
+out_cancel:
 	xfs_trans_cancel(tp);
 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out:
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 453a63f..50810be 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -491,7 +491,9 @@
 # define DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT 0x2
 # define DP_TEST_PHY_PATTERN_PRBS7			0x3
 # define DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN	0x4
-# define DP_TEST_PHY_PATTERN_HBR2_CTS_EYE_PATTERN	0x5
+# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_1		0x5
+# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_2		0x6
+# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_3		0x7
 
 #define DP_TEST_RESPONSE		    0x260
 # define DP_TEST_ACK			    (1 << 0)
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 8a30cb5..9d80312 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -5,12 +5,10 @@
 #include <linux/hrtimer.h>
 #include <linux/timerqueue.h>
 #include <linux/rtc.h>
-#include <linux/types.h>
 
 enum alarmtimer_type {
 	ALARM_REALTIME,
 	ALARM_BOOTTIME,
-	ALARM_POWEROFF_REALTIME,
 
 	ALARM_NUMTYPE,
 };
@@ -50,9 +48,6 @@
 void alarm_restart(struct alarm *alarm);
 int alarm_try_to_cancel(struct alarm *alarm);
 int alarm_cancel(struct alarm *alarm);
-void set_power_on_alarm(void);
-void power_on_alarm_init(void);
-enum alarmtimer_type clock2alarm(clockid_t clockid);
 
 u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
 u64 alarm_forward_now(struct alarm *alarm, ktime_t interval);
@@ -60,8 +55,5 @@
 
 /* Provide way to access the rtc device being used by alarmtimers */
 struct rtc_device *alarmtimer_get_rtcdev(void);
-#ifdef CONFIG_RTC_DRV_QPNP
-extern bool poweron_alarm;
-#endif
 
 #endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 2b8b6e0..8a7a15c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -81,6 +81,12 @@
 	struct bio_set		*bi_pool;
 
 	/*
+	 * When using dircet-io (O_DIRECT), we can't get the inode from a bio
+	 * by walking bio->bi_io_vec->bv_page->mapping->host
+	 * since the page is anon.
+	 */
+	struct inode		*bi_dio_inode;
+	/*
 	 * We can inline a number of vecs at the end of the bio, to avoid
 	 * double allocations for a small number of bio_vecs. This member
 	 * MUST obviously be kept at the very end of the bio.
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 7a2ae2f..d921206 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -59,6 +59,7 @@
 	CPUHP_AP_OFFLINE,
 	CPUHP_AP_SCHED_STARTING,
 	CPUHP_AP_RCUTREE_DYING,
+	CPUHP_AP_KMAP_DYING,
 	CPUHP_AP_IRQ_GIC_STARTING,
 	CPUHP_AP_IRQ_GICV3_STARTING,
 	CPUHP_AP_IRQ_HIP04_STARTING,
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 7f7e9a7..8dce6fd 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -20,6 +20,7 @@
 #include <asm/errno.h>
 
 #ifdef CONFIG_IOMMU_DMA
+#include <linux/dma-mapping.h>
 #include <linux/iommu.h>
 #include <linux/msi.h>
 
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 18bd249..4f6ec47 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2925,6 +2925,8 @@
 		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
 }
 
+struct inode *dio_bio_get_inode(struct bio *bio);
+
 extern void inode_set_flags(struct inode *inode, unsigned int flags,
 			    unsigned int mask);
 
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index f6dfc29..9b57c19 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -34,6 +34,7 @@
 #define FS_ENCRYPTION_MODE_AES_256_GCM		2
 #define FS_ENCRYPTION_MODE_AES_256_CBC		3
 #define FS_ENCRYPTION_MODE_AES_256_CTS		4
+#define FS_ENCRYPTION_MODE_PRIVATE          127
 
 /**
  * Encryption context for inode
@@ -80,6 +81,7 @@
 	u8 ci_flags;
 	struct crypto_skcipher *ci_ctfm;
 	u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+	u8 ci_raw_key[FS_MAX_KEY_SIZE];
 };
 
 #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL		0x00000001
@@ -176,7 +178,8 @@
 
 static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
 {
-	return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
+	return (mode == FS_ENCRYPTION_MODE_AES_256_XTS ||
+		mode == FS_ENCRYPTION_MODE_PRIVATE);
 }
 
 static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
@@ -257,6 +260,7 @@
 /* keyinfo.c */
 extern int fscrypt_get_encryption_info(struct inode *);
 extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
+extern int fs_using_hardware_encryption(struct inode *inode);
 
 /* fname.c */
 extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
@@ -354,6 +358,11 @@
 	return;
 }
 
+static inline int fs_notsupp_using_hardware_encryption(struct inode *inode)
+{
+	return -EOPNOTSUPP;
+}
+
  /* fname.c */
 static inline int fscrypt_notsupp_setup_filename(struct inode *dir,
 			const struct qstr *iname,
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 61aff32..657b565 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -41,6 +41,7 @@
 
 #ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
 void kmap_atomic_flush_unused(void);
+int kmap_remove_unused_cpu(unsigned int cpu);
 #else
 static inline void kmap_atomic_flush_unused(void) { }
 #endif
@@ -91,6 +92,10 @@
 
 #endif /* CONFIG_HIGHMEM */
 
+#if !defined(CONFIG_HIGHMEM) || !defined(CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH)
+static inline int kmap_remove_unused_cpu(unsigned int cpu) { return 0; }
+#endif
+
 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
 
 DECLARE_PER_CPU(int, __kmap_atomic_idx);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index d596a07..8cc99de 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1521,11 +1521,11 @@
 
 	cur_write_sz = hv_get_bytes_to_write(rbi);
 
-	if (cur_write_sz < pending_sz)
+	if (cur_write_sz <= pending_sz)
 		return;
 
 	cached_write_sz = hv_get_cached_bytes_to_write(rbi);
-	if (cached_write_sz < pending_sz)
+	if (cached_write_sz <= pending_sz)
 		vmbus_setevent(channel);
 
 	return;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 3d9e6b8..f25acfc 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -19,12 +19,12 @@
 #ifndef __LINUX_IOMMU_H
 #define __LINUX_IOMMU_H
 
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/of.h>
-#include <linux/types.h>
-#include <linux/scatterlist.h>
-#include <trace/events/iommu.h>
 
 #define IOMMU_READ	(1 << 0)
 #define IOMMU_WRITE	(1 << 1)
@@ -88,6 +88,8 @@
 #define IOMMU_DOMAIN_DMA	(__IOMMU_DOMAIN_PAGING |	\
 				 __IOMMU_DOMAIN_DMA_API)
 
+
+#define IOMMU_DOMAIN_NAME_LEN 32
 struct iommu_domain {
 	unsigned type;
 	const struct iommu_ops *ops;
@@ -96,6 +98,7 @@
 	void *handler_token;
 	struct iommu_domain_geometry geometry;
 	void *iova_cookie;
+	char name[IOMMU_DOMAIN_NAME_LEN];
 };
 
 enum iommu_cap {
@@ -144,7 +147,7 @@
 	DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
 	DOMAIN_ATTR_CB_STALL_DISABLE,
 	DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR,
-	DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_ALIGN,
+	DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
 	DOMAIN_ATTR_MAX,
 };
 
@@ -241,10 +244,6 @@
 	/* Get the number of windows per domain */
 	u32 (*domain_get_windows)(struct iommu_domain *domain);
 	void (*trigger_fault)(struct iommu_domain *domain, unsigned long flags);
-	unsigned long (*reg_read)(struct iommu_domain *domain,
-				  unsigned long offset);
-	void (*reg_write)(struct iommu_domain *domain, unsigned long val,
-			  unsigned long offset);
 	void (*tlbi_domain)(struct iommu_domain *domain);
 	int (*enable_config_clocks)(struct iommu_domain *domain);
 	void (*disable_config_clocks)(struct iommu_domain *domain);
@@ -281,6 +280,9 @@
 		     phys_addr_t paddr, size_t size, int prot);
 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 		       size_t size);
+extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+				struct scatterlist *sg, unsigned int nents,
+				int prot);
 extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
 				struct scatterlist *sg,unsigned int nents,
 				int prot);
@@ -340,58 +342,9 @@
 
 extern uint64_t iommu_iova_to_pte(struct iommu_domain *domain,
 	    dma_addr_t iova);
-/**
- * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
- * @domain: the iommu domain where the fault has happened
- * @dev: the device where the fault has happened
- * @iova: the faulting address
- * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
- *
- * This function should be called by the low-level IOMMU implementations
- * whenever IOMMU faults happen, to allow high-level users, that are
- * interested in such events, to know about them.
- *
- * This event may be useful for several possible use cases:
- * - mere logging of the event
- * - dynamic TLB/PTE loading
- * - if restarting of the faulting device is required
- *
- * Returns 0 on success and an appropriate error code otherwise (if dynamic
- * PTE/TLB loading will one day be supported, implementations will be able
- * to tell whether it succeeded or not according to this return value).
- *
- * Specifically, -ENOSYS is returned if a fault handler isn't installed
- * (though fault handlers can also return -ENOSYS, in case they want to
- * elicit the default behavior of the IOMMU drivers).
 
- * Client fault handler returns -EBUSY to signal to the IOMMU driver
- * that the client will take responsibility for any further fault
- * handling, including clearing fault status registers or retrying
- * the faulting transaction.
- */
-static inline int report_iommu_fault(struct iommu_domain *domain,
-		struct device *dev, unsigned long iova, int flags)
-{
-	int ret = -ENOSYS;
-
-	/*
-	 * if upper layers showed interest and installed a fault handler,
-	 * invoke it.
-	 */
-	if (domain->handler)
-		ret = domain->handler(domain, dev, iova, flags,
-						domain->handler_token);
-
-	trace_io_page_fault(dev, iova, flags);
-	return ret;
-}
-
-static inline size_t iommu_map_sg(struct iommu_domain *domain,
-				  unsigned long iova, struct scatterlist *sg,
-				  unsigned int nents, int prot)
-{
-	return domain->ops->map_sg(domain, iova, sg, nents, prot);
-}
+extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
+			      unsigned long iova, int flags);
 
 extern void iommu_trigger_fault(struct iommu_domain *domain,
 				unsigned long flags);
diff --git a/include/linux/key.h b/include/linux/key.h
index 6a54472..ed9b44f 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -126,6 +126,11 @@
 	return (unsigned long) key_ref & 1UL;
 }
 
+enum key_state {
+	KEY_IS_UNINSTANTIATED,
+	KEY_IS_POSITIVE,		/* Positively instantiated */
+};
+
 /*****************************************************************************/
 /*
  * authentication token / access credential / keyring
@@ -157,6 +162,7 @@
 						 * - may not match RCU dereferenced payload
 						 * - payload should contain own length
 						 */
+	short			state;		/* Key state (+) or rejection error (-) */
 
 #ifdef KEY_DEBUGGING
 	unsigned		magic;
@@ -165,18 +171,16 @@
 #endif
 
 	unsigned long		flags;		/* status flags (change with bitops) */
-#define KEY_FLAG_INSTANTIATED	0	/* set if key has been instantiated */
-#define KEY_FLAG_DEAD		1	/* set if key type has been deleted */
-#define KEY_FLAG_REVOKED	2	/* set if key had been revoked */
-#define KEY_FLAG_IN_QUOTA	3	/* set if key consumes quota */
-#define KEY_FLAG_USER_CONSTRUCT	4	/* set if key is being constructed in userspace */
-#define KEY_FLAG_NEGATIVE	5	/* set if key is negative */
-#define KEY_FLAG_ROOT_CAN_CLEAR	6	/* set if key can be cleared by root without permission */
-#define KEY_FLAG_INVALIDATED	7	/* set if key has been invalidated */
-#define KEY_FLAG_BUILTIN	8	/* set if key is built in to the kernel */
-#define KEY_FLAG_ROOT_CAN_INVAL	9	/* set if key can be invalidated by root without permission */
-#define KEY_FLAG_KEEP		10	/* set if key should not be removed */
-#define KEY_FLAG_UID_KEYRING	11	/* set if key is a user or user session keyring */
+#define KEY_FLAG_DEAD		0	/* set if key type has been deleted */
+#define KEY_FLAG_REVOKED	1	/* set if key had been revoked */
+#define KEY_FLAG_IN_QUOTA	2	/* set if key consumes quota */
+#define KEY_FLAG_USER_CONSTRUCT	3	/* set if key is being constructed in userspace */
+#define KEY_FLAG_ROOT_CAN_CLEAR	4	/* set if key can be cleared by root without permission */
+#define KEY_FLAG_INVALIDATED	5	/* set if key has been invalidated */
+#define KEY_FLAG_BUILTIN	6	/* set if key is built in to the kernel */
+#define KEY_FLAG_ROOT_CAN_INVAL	7	/* set if key can be invalidated by root without permission */
+#define KEY_FLAG_KEEP		8	/* set if key should not be removed */
+#define KEY_FLAG_UID_KEYRING	9	/* set if key is a user or user session keyring */
 
 	/* the key type and key description string
 	 * - the desc is used to match a key against search criteria
@@ -202,7 +206,6 @@
 			struct list_head name_link;
 			struct assoc_array keys;
 		};
-		int reject_error;
 	};
 
 	/* This is set on a keyring to restrict the addition of a link to a key
@@ -343,17 +346,27 @@
 #define	KEY_NEED_SETATTR 0x20	/* Require permission to change attributes */
 #define	KEY_NEED_ALL	0x3f	/* All the above permissions */
 
+static inline short key_read_state(const struct key *key)
+{
+	/* Barrier versus mark_key_instantiated(). */
+	return smp_load_acquire(&key->state);
+}
+
 /**
- * key_is_instantiated - Determine if a key has been positively instantiated
+ * key_is_positive - Determine if a key has been positively instantiated
  * @key: The key to check.
  *
  * Return true if the specified key has been positively instantiated, false
  * otherwise.
  */
-static inline bool key_is_instantiated(const struct key *key)
+static inline bool key_is_positive(const struct key *key)
 {
-	return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
-		!test_bit(KEY_FLAG_NEGATIVE, &key->flags);
+	return key_read_state(key) == KEY_IS_POSITIVE;
+}
+
+static inline bool key_is_negative(const struct key *key)
+{
+	return key_read_state(key) < 0;
 }
 
 #define rcu_dereference_key(KEY)					\
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 8f5af30..580cc10 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1419,6 +1419,8 @@
 					size_t *len);
 	int (*inode_create)(struct inode *dir, struct dentry *dentry,
 				umode_t mode);
+	int (*inode_post_create)(struct inode *dir, struct dentry *dentry,
+				umode_t mode);
 	int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
 				struct dentry *new_dentry);
 	int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
@@ -1706,6 +1708,7 @@
 	struct list_head inode_free_security;
 	struct list_head inode_init_security;
 	struct list_head inode_create;
+	struct list_head inode_post_create;
 	struct list_head inode_link;
 	struct list_head inode_unlink;
 	struct list_head inode_symlink;
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 2931aa4..f70420e 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -31,8 +31,8 @@
 	struct mbus_dram_window {
 		u8	cs_index;
 		u8	mbus_attr;
-		u32	base;
-		u32	size;
+		u64	base;
+		u64	size;
 	} cs[4];
 };
 
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index b718105..c5a4a25 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -180,6 +180,7 @@
 extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
 extern int mmc_set_auto_bkops(struct mmc_card *card, bool enable);
 extern int mmc_suspend_clk_scaling(struct mmc_host *host);
+extern void mmc_flush_detect_work(struct mmc_host *host);
 
 #define MMC_ERASE_ARG		0x00000000
 #define MMC_SECURE_ERASE_ARG	0x80000000
@@ -233,7 +234,7 @@
 	bool lock_needed);
 extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
 	bool lock_needed, bool is_cmdq_dcmd);
-extern void mmc_recovery_fallback_lower_speed(struct mmc_host *host);
+extern int mmc_recovery_fallback_lower_speed(struct mmc_host *host);
 
 /**
  *	mmc_claim_host - exclusively claim a host
diff --git a/include/linux/msm_drm_notify.h b/include/linux/msm_drm_notify.h
new file mode 100644
index 0000000..924ba85
--- /dev/null
+++ b/include/linux/msm_drm_notify.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_DRM_NOTIFY_H_
+#define _MSM_DRM_NOTIFY_H_
+
+#include <linux/notifier.h>
+
+/* A hardware display blank change occurred */
+#define MSM_DRM_EVENT_BLANK			0x01
+/* A hardware display blank early change occurred */
+#define MSM_DRM_EARLY_EVENT_BLANK		0x02
+
+enum {
+	/* panel: power on */
+	MSM_DRM_BLANK_UNBLANK,
+	/* panel: power off */
+	MSM_DRM_BLANK_POWERDOWN,
+};
+
+enum msm_drm_display_id {
+	/* primary display */
+	MSM_DRM_PRIMARY_DISPLAY,
+	/* external display */
+	MSM_DRM_EXTERNAL_DISPLAY,
+	MSM_DRM_DISPLAY_MAX
+};
+
+struct msm_drm_notifier {
+	enum msm_drm_display_id id;
+	void *data;
+};
+
+int msm_drm_register_client(struct notifier_block *nb);
+int msm_drm_unregister_client(struct notifier_block *nb);
+#endif
diff --git a/include/linux/msm_ext_display.h b/include/linux/msm_ext_display.h
index 08e0def..e34f468 100644
--- a/include/linux/msm_ext_display.h
+++ b/include/linux/msm_ext_display.h
@@ -117,6 +117,7 @@
  *  @get_intf_id: id of connected interface
  *  @teardown_done: audio session teardown done by qdsp
  *  @acknowledge: acknowledge audio status received by user modules
+ *  @ready: notify audio when codec driver is ready.
  */
 struct msm_ext_disp_audio_codec_ops {
 	int (*audio_info_setup)(struct platform_device *pdev,
@@ -127,6 +128,7 @@
 	int (*get_intf_id)(struct platform_device *pdev);
 	void (*teardown_done)(struct platform_device *pdev);
 	int (*acknowledge)(struct platform_device *pdev, u32 ack);
+	int (*ready)(struct platform_device *pdev);
 };
 
 /**
diff --git a/include/linux/pfk.h b/include/linux/pfk.h
new file mode 100644
index 0000000..82ee741
--- /dev/null
+++ b/include/linux/pfk.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_H_
+#define PFK_H_
+
+#include <linux/bio.h>
+
+struct ice_crypto_setting;
+
+#ifdef CONFIG_PFK
+
+int pfk_load_key_start(const struct bio *bio,
+		struct ice_crypto_setting *ice_setting, bool *is_pfe, bool);
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe);
+int pfk_remove_key(const unsigned char *key, size_t key_size);
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2);
+void pfk_clear_on_reset(void);
+
+#else
+static inline int pfk_load_key_start(const struct bio *bio,
+	struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
+{
+	return -ENODEV;
+}
+
+static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+	return -ENODEV;
+}
+
+static inline int pfk_remove_key(const unsigned char *key, size_t key_size)
+{
+	return -ENODEV;
+}
+
+static inline bool pfk_allow_merge_bio(const struct bio *bio1,
+		const struct bio *bio2)
+{
+	return true;
+}
+
+static inline void pfk_clear_on_reset(void)
+{}
+
+#endif /* CONFIG_PFK */
+
+#endif /* PFK_H */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index b6c8c92..ba99b33 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -112,6 +112,11 @@
 	POWER_SUPPLY_PL_USBMID_USBMID,
 };
 
+enum {
+	POWER_SUPPLY_CONNECTOR_TYPEC,
+	POWER_SUPPLY_CONNECTOR_MICRO_USB,
+};
+
 enum power_supply_property {
 	/* Properties of type `int' */
 	POWER_SUPPLY_PROP_STATUS = 0,
@@ -257,6 +262,7 @@
 	POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
 	POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
 	POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CONNECTOR_TYPE,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 77a46bd..fc02ece 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -547,6 +547,24 @@
 		       bool msb_to_lsb);
 
 /**
+ * se_geni_clks_off() - Turn off clocks associated with the serial
+ *                      engine
+ * @rsc:	Handle to resources associated with the serial engine.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_clks_off(struct se_geni_rsc *rsc);
+
+/**
+ * se_geni_clks_on() - Turn on clocks associated with the serial
+ *                     engine
+ * @rsc:	Handle to resources associated with the serial engine.
+ *
+ * Return:	0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_clks_on(struct se_geni_rsc *rsc);
+
+/**
  * se_geni_resources_off() - Turn off resources associated with the serial
  *                           engine
  * @rsc:	Handle to resources associated with the serial engine.
@@ -842,6 +860,16 @@
 {
 }
 
+static inline int se_geni_clks_on(struct se_geni_rsc *rsc)
+{
+	return -ENXIO;
+}
+
+static inline int se_geni_clks_off(struct se_geni_rsc *rsc)
+{
+	return -ENXIO;
+}
+
 static inline int se_geni_resources_on(struct se_geni_rsc *rsc)
 {
 	return -ENXIO;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b7ff73d..0d4035a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -184,6 +184,8 @@
 				     unsigned int *big_max_nr);
 extern unsigned int sched_get_cpu_util(int cpu);
 extern u64 sched_get_cpu_last_busy_time(int cpu);
+extern u32 sched_get_wake_up_idle(struct task_struct *p);
+extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
 #else
 static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
 {
@@ -201,6 +203,15 @@
 {
 	return 0;
 }
+static inline u32 sched_get_wake_up_idle(struct task_struct *p)
+{
+	return 0;
+}
+static inline int sched_set_wake_up_idle(struct task_struct *p,
+					 int wake_up_idle)
+{
+	return 0;
+}
 #endif
 
 extern void calc_global_load(unsigned long ticks);
@@ -2699,6 +2710,7 @@
 
 extern int set_cpus_allowed_ptr(struct task_struct *p,
 				const struct cpumask *new_mask);
+extern bool cpupri_check_rt(void);
 #else
 static inline void do_set_cpus_allowed(struct task_struct *p,
 				      const struct cpumask *new_mask)
@@ -2711,6 +2723,10 @@
 		return -EINVAL;
 	return 0;
 }
+static inline bool cpupri_check_rt(void)
+{
+	return false;
+}
 #endif
 
 struct sched_load {
@@ -2719,9 +2735,6 @@
 	unsigned long predicted_load;
 };
 
-extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
-extern u32 sched_get_wake_up_idle(struct task_struct *p);
-
 struct cpu_cycle_counter_cb {
 	u64 (*get_cpu_cycle_counter)(int cpu);
 };
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
index 19e76db..cda2654 100644
--- a/include/linux/sde_rsc.h
+++ b/include/linux/sde_rsc.h
@@ -259,7 +259,8 @@
 
 static inline int sde_rsc_client_state_update(struct sde_rsc_client *client,
 	enum sde_rsc_state state,
-	struct sde_rsc_cmd_config *config, int crtc_id)
+	struct sde_rsc_cmd_config *config, int crtc_id,
+	int *wait_vblank_crtc_id)
 {
 	return 0;
 }
diff --git a/include/linux/security.h b/include/linux/security.h
index c2125e9..02e05de 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -30,6 +30,7 @@
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/fs.h>
+#include <linux/bio.h>
 
 struct linux_binprm;
 struct cred;
@@ -256,6 +257,8 @@
 				     const struct qstr *qstr, const char **name,
 				     void **value, size_t *len);
 int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+					umode_t mode);
 int security_inode_link(struct dentry *old_dentry, struct inode *dir,
 			 struct dentry *new_dentry);
 int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@@ -304,6 +307,7 @@
 				 struct fown_struct *fown, int sig);
 int security_file_receive(struct file *file);
 int security_file_open(struct file *file, const struct cred *cred);
+
 int security_task_create(unsigned long clone_flags);
 void security_task_free(struct task_struct *task);
 int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -637,6 +641,13 @@
 	return 0;
 }
 
+static inline int security_inode_post_create(struct inode *dir,
+					struct dentry *dentry,
+					umode_t mode)
+{
+	return 0;
+}
+
 static inline int security_inode_link(struct dentry *old_dentry,
 				       struct inode *dir,
 				       struct dentry *new_dentry)
diff --git a/include/linux/slimbus/slimbus.h b/include/linux/slimbus/slimbus.h
index f1b1a7f..53af941 100644
--- a/include/linux/slimbus/slimbus.h
+++ b/include/linux/slimbus/slimbus.h
@@ -684,6 +684,7 @@
  *	first time it has reported present.
  *  @dev_list: List of devices on a controller
  *  @wd: Work structure associated with workqueue for presence notification
+ *  @device_reset: Work structure for device reset notification
  *  @sldev_reconf: Mutex to protect the pending data-channel lists.
  *  @pending_msgsl: Message bandwidth reservation request by this client in
  *	slots that's pending reconfiguration.
@@ -706,6 +707,7 @@
 	bool			notified;
 	struct list_head	dev_list;
 	struct work_struct	wd;
+	struct work_struct	device_reset;
 	struct mutex		sldev_reconf;
 	u32			pending_msgsl;
 	u32			cur_msgsl;
diff --git a/include/net/cnss_nl.h b/include/net/cnss_nl.h
index 86c2fcc..b8a7cfd 100644
--- a/include/net/cnss_nl.h
+++ b/include/net/cnss_nl.h
@@ -23,12 +23,16 @@
  * @CLD80211_ATTR_VENDOR_DATA: Embed all other attributes in this nested
  *	attribute.
  * @CLD80211_ATTR_DATA: Embed complete data in this attribute
+ * @CLD80211_ATTR_META_DATA: Embed meta data for above data. This will help
+ * wlan driver to peek into request message packet without opening up definition
+ * of complete request message.
  *
  * Any new message in future can be added as another attribute
  */
 enum cld80211_attr {
 	CLD80211_ATTR_VENDOR_DATA = 1,
 	CLD80211_ATTR_DATA,
+	CLD80211_ATTR_META_DATA,
 	/* add new attributes above here */
 
 	__CLD80211_ATTR_AFTER_LAST,
diff --git a/include/net/cnss_utils.h b/include/net/cnss_utils.h
index 6ff0fd0..77d14d1 100644
--- a/include/net/cnss_utils.h
+++ b/include/net/cnss_utils.h
@@ -33,6 +33,9 @@
 extern void cnss_utils_increment_driver_load_cnt(struct device *dev);
 extern int cnss_utils_set_wlan_mac_address(const u8 *in, uint32_t len);
 extern u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern int cnss_utils_set_wlan_derived_mac_address(const u8 *in, uint32_t len);
+extern u8 *cnss_utils_get_wlan_derived_mac_address(struct device *dev,
+							uint32_t *num);
 extern void cnss_utils_set_cc_source(struct device *dev,
 				     enum cnss_utils_cc_src cc_source);
 extern enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 835c30e..9b6e6a4 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -155,6 +155,7 @@
 		int		header_len;
 		int		trailer_len;
 		u32		extra_flags;
+		u32		output_mark;
 	} props;
 
 	struct xfrm_lifetime_cfg lft;
@@ -284,10 +285,12 @@
 	struct dst_entry	*(*dst_lookup)(struct net *net,
 					       int tos, int oif,
 					       const xfrm_address_t *saddr,
-					       const xfrm_address_t *daddr);
+					       const xfrm_address_t *daddr,
+					       u32 mark);
 	int			(*get_saddr)(struct net *net, int oif,
 					     xfrm_address_t *saddr,
-					     xfrm_address_t *daddr);
+					     xfrm_address_t *daddr,
+					     u32 mark);
 	void			(*decode_session)(struct sk_buff *skb,
 						  struct flowi *fl,
 						  int reverse);
diff --git a/include/soc/qcom/memory_dump.h b/include/soc/qcom/memory_dump.h
index e67ee0e..b4733d7 100644
--- a/include/soc/qcom/memory_dump.h
+++ b/include/soc/qcom/memory_dump.h
@@ -83,6 +83,7 @@
 	MSM_DUMP_DATA_RPM = 0xEA,
 	MSM_DUMP_DATA_SCANDUMP = 0xEB,
 	MSM_DUMP_DATA_RPMH = 0xEC,
+	MSM_DUMP_DATA_FCM = 0xEE,
 	MSM_DUMP_DATA_POWER_REGS = 0xED,
 	MSM_DUMP_DATA_TMC_ETF = 0xF0,
 	MSM_DUMP_DATA_TMC_REG = 0x100,
@@ -122,12 +123,19 @@
 #ifdef CONFIG_QCOM_MEMORY_DUMP_V2
 extern int msm_dump_data_register(enum msm_dump_table_ids id,
 				  struct msm_dump_entry *entry);
+
+extern void *get_msm_dump_ptr(enum msm_dump_data_ids id);
 #else
 static inline int msm_dump_data_register(enum msm_dump_table_ids id,
 					 struct msm_dump_entry *entry)
 {
 	return -EINVAL;
 }
+
+static inline void *get_msm_dump_ptr(enum msm_dump_data_ids id)
+{
+	return NULL;
+}
 #endif
 
 #endif
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 71bd075..9e91e4b 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -106,6 +106,10 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,qcs605")
 #define early_machine_is_sda670()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda670")
+#define early_machine_is_msm8953()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8953")
+#define early_machine_is_sdm450()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm450")
 #else
 #define of_board_is_sim()		0
 #define of_board_is_rumi()		0
@@ -148,6 +152,8 @@
 #define early_machine_is_sdm670()	0
 #define early_machine_is_qcs605()	0
 #define early_machine_is_sda670()	0
+#define early_machine_is_msm8953()	0
+#define early_machine_is_sdm450()	0
 #endif
 
 #define PLATFORM_SUBTYPE_MDM	1
@@ -212,6 +218,8 @@
 	MSM_CPU_SDM670,
 	MSM_CPU_QCS605,
 	MSM_CPU_SDA670,
+	MSM_CPU_8953,
+	MSM_CPU_SDM450,
 };
 
 struct msm_soc_info {
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index 9652037..255e228 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -12,8 +12,10 @@
 
 #include <linux/tracepoint.h>
 #include <linux/pci.h>
+#include <linux/iommu.h>
 
 struct device;
+struct iommu_domain;
 
 DECLARE_EVENT_CLASS(iommu_group_event,
 
@@ -85,47 +87,84 @@
 
 TRACE_EVENT(map,
 
-	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+	TP_PROTO(struct iommu_domain *domain, unsigned long iova,
+		 phys_addr_t paddr, size_t size, int prot),
 
-	TP_ARGS(iova, paddr, size),
+	TP_ARGS(domain, iova, paddr, size, prot),
 
 	TP_STRUCT__entry(
+		__string(name, domain->name)
 		__field(u64, iova)
 		__field(u64, paddr)
 		__field(size_t, size)
+		__field(int, prot)
 	),
 
 	TP_fast_assign(
+		__assign_str(name, domain->name);
 		__entry->iova = iova;
 		__entry->paddr = paddr;
 		__entry->size = size;
+		__entry->prot = prot;
 	),
 
-	TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
-			__entry->iova, __entry->paddr, __entry->size
+	TP_printk("IOMMU:%s iova=0x%016llx paddr=0x%016llx size=0x%zx prot=0x%x",
+			__get_str(name), __entry->iova, __entry->paddr,
+			__entry->size, __entry->prot
 	)
 );
 
 TRACE_EVENT(unmap,
 
-	TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
+	TP_PROTO(struct iommu_domain *domain, unsigned long iova, size_t size,
+			size_t unmapped_size),
 
-	TP_ARGS(iova, size, unmapped_size),
+	TP_ARGS(domain, iova, size, unmapped_size),
 
 	TP_STRUCT__entry(
+		__string(name, domain->name)
 		__field(u64, iova)
 		__field(size_t, size)
 		__field(size_t, unmapped_size)
 	),
 
 	TP_fast_assign(
+		__assign_str(name, domain->name);
 		__entry->iova = iova;
 		__entry->size = size;
 		__entry->unmapped_size = unmapped_size;
 	),
 
-	TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
-			__entry->iova, __entry->size, __entry->unmapped_size
+	TP_printk("IOMMU:%s iova=0x%016llx size=0x%zx unmapped_size=0x%zx",
+			__get_str(name), __entry->iova, __entry->size,
+			__entry->unmapped_size
+	)
+);
+
+TRACE_EVENT(map_sg,
+
+	TP_PROTO(struct iommu_domain *domain, unsigned long iova, size_t size,
+		int prot),
+
+	TP_ARGS(domain, iova, size, prot),
+
+	TP_STRUCT__entry(
+		__string(name, domain->name)
+		__field(u64, iova)
+		__field(size_t, size)
+		__field(int, prot)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, domain->name);
+		__entry->iova = iova;
+		__entry->size = size;
+		__entry->prot = prot;
+	),
+
+	TP_printk("IOMMU:%s iova=0x%016llx size=0x%zx prot=0x%x",
+			__get_str(name), __entry->iova, __entry->size,
+			__entry->prot
 	)
 );
 
@@ -217,6 +256,24 @@
 
 	TP_ARGS(dev, time)
 );
+
+TRACE_EVENT(smmu_init,
+
+	TP_PROTO(u64 time),
+
+	TP_ARGS(time),
+
+	TP_STRUCT__entry(
+		__field(u64, time)
+	),
+
+	TP_fast_assign(
+		__entry->time = time;
+	),
+
+	TP_printk("ARM SMMU init latency: %lld us", __entry->time)
+);
+
 #endif /* _TRACE_IOMMU_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
index 6dc4735..aaa5cf7 100644
--- a/include/trace/events/ufs.h
+++ b/include/trace/events/ufs.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -225,10 +225,10 @@
 	),
 
 	TP_printk(
-		"%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x",
-		__get_str(str), __get_str(dev_name), __entry->tag,
-		__entry->doorbell, __entry->transfer_len,
-		__entry->intr, __entry->lba, (u32)__entry->opcode
+		"%s: %14s: tag: %-2u cmd: 0x%-2x lba: %-9llu size: %-7d DB: 0x%-8x IS: 0x%x",
+		__get_str(dev_name), __get_str(str), __entry->tag,
+		(u32)__entry->opcode, __entry->lba, __entry->transfer_len,
+		__entry->doorbell, __entry->intr
 	)
 );
 
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index d5438d3..6f33a4a 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -61,6 +61,44 @@
 	__s64 tv_nsec;         /* nanoseconds */
 };
 
+/*
+ * HDR Metadata
+ * These are defined as per EDID spec and shall be used by the sink
+ * to set the HDR metadata for playback from userspace.
+ */
+
+#define HDR_PRIMARIES_COUNT   3
+
+#define DRM_MSM_EXT_HDR_METADATA
+struct drm_msm_ext_hdr_metadata {
+	__u32 hdr_state;        /* HDR state */
+	__u32 eotf;             /* electro optical transfer function */
+	__u32 hdr_supported;    /* HDR supported */
+	__u32 display_primaries_x[HDR_PRIMARIES_COUNT]; /* Primaries x */
+	__u32 display_primaries_y[HDR_PRIMARIES_COUNT]; /* Primaries y */
+	__u32 white_point_x;    /* white_point_x */
+	__u32 white_point_y;    /* white_point_y */
+	__u32 max_luminance;    /* Max luminance */
+	__u32 min_luminance;    /* Min Luminance */
+	__u32 max_content_light_level; /* max content light level */
+	__u32 max_average_light_level; /* max average light level */
+};
+
+/**
+ * HDR sink properties
+ * These are defined as per EDID spec and shall be used by the userspace
+ * to determine the HDR properties to be set to the sink.
+ */
+#define DRM_MSM_EXT_HDR_PROPERTIES
+struct drm_msm_ext_hdr_properties {
+	__u8 hdr_metadata_type_one;   /* static metadata type one */
+	__u32 hdr_supported;          /* HDR supported */
+	__u32 hdr_eotf;               /* electro optical transfer function */
+	__u32 hdr_max_luminance;      /* Max luminance */
+	__u32 hdr_avg_luminance;      /* Avg luminance */
+	__u32 hdr_min_luminance;      /* Min Luminance */
+};
+
 #define MSM_PARAM_GPU_ID     0x01
 #define MSM_PARAM_GMEM_SIZE  0x02
 #define MSM_PARAM_CHIP_ID    0x03
diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h
index e2bd12b..45f3222 100644
--- a/include/uapi/linux/esoc_ctrl.h
+++ b/include/uapi/linux/esoc_ctrl.h
@@ -18,6 +18,7 @@
 #define HSIC		"HSIC"
 #define HSICPCIe	"HSIC+PCIe"
 #define PCIe		"PCIe"
+#define ESOC_REQ_SEND_SHUTDOWN	ESOC_REQ_SEND_SHUTDOWN
 
 enum esoc_evt {
 	ESOC_RUN_STATE = 0x1,
@@ -58,6 +59,7 @@
 	ESOC_REQ_IMG = 1,
 	ESOC_REQ_DEBUG,
 	ESOC_REQ_SHUTDOWN,
+	ESOC_REQ_SEND_SHUTDOWN,
 };
 
 #ifdef __KERNEL__
diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h
index 1ba819b..1917c0d 100644
--- a/include/uapi/linux/ipa_qmi_service_v01.h
+++ b/include/uapi/linux/ipa_qmi_service_v01.h
@@ -47,6 +47,12 @@
 #define QMI_IPA_MAX_FILTERS_EX_V01 128
 #define QMI_IPA_MAX_PIPES_V01 20
 #define QMI_IPA_MAX_APN_V01 8
+#define QMI_IPA_MAX_PER_CLIENTS_V01 64
+/* Currently max we can use is only 1. But for scalability purpose
+ * we are having max value as 8.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES_V01 8
+#define QMI_IPA_MAX_UL_FIREWALL_RULES_V01 64
 
 #define IPA_INT_MAX	((int)(~0U>>1))
 #define IPA_INT_MIN	(-IPA_INT_MAX - 1)
@@ -989,6 +995,16 @@
 	 *	failure, the Rule Ids in this list must be set to a reserved
 	 *	index (255).
 	 */
+
+	/* Optional */
+	/*	List of destination pipe IDs. */
+	uint8_t dst_pipe_id_valid;
+	/* Must be set to true if dst_pipe_id is being passed. */
+	uint32_t dst_pipe_id_len;
+	/* Must be set to # of elements in dst_pipe_id. */
+	uint32_t dst_pipe_id[QMI_IPA_MAX_CLIENT_DST_PIPES_V01];
+	/* Provides the list of destination pipe IDs for a source pipe. */
+
 };  /* Message */
 
 /* Response Message; This is the message that is exchanged between the
@@ -1626,6 +1642,273 @@
 	 */
 };  /* Message */
 
+/*
+ * Request Message; Requests the modem IPA driver to enable or
+ * disable collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_req_msg_v01 {
+
+	/* Mandatory */
+	/* Collect statistics per client; */
+	uint8_t enable_per_client_stats;
+	/*
+	 * Indicates whether to start or stop collecting
+	 * per client statistics.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to enable or disable
+ * collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_resp_msg_v01 {
+
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. */
+};  /* Message */
+
+struct ipa_per_client_stats_info_type_v01 {
+
+	uint32_t client_id;
+	/*
+	 * Id of the client on APPS processor side for which Modem processor
+	 * needs to send uplink/downlink statistics.
+	 */
+
+	uint32_t src_pipe_id;
+	/*
+	 * IPA consumer pipe on which client on APPS side sent uplink
+	 * data to modem.
+	 */
+
+	uint64_t num_ul_ipv4_bytes;
+	/*
+	 * Accumulated number of uplink IPv4 bytes for a client.
+	 */
+
+	uint64_t num_ul_ipv6_bytes;
+	/*
+	 * Accumulated number of uplink IPv6 bytes for a client.
+	 */
+
+	uint64_t num_dl_ipv4_bytes;
+	/*
+	 * Accumulated number of downlink IPv4 bytes for a client.
+	 */
+
+	uint64_t num_dl_ipv6_bytes;
+	/*
+	 * Accumulated number of downlink IPv6 byes for a client.
+	 */
+
+
+	uint32_t num_ul_ipv4_pkts;
+	/*
+	 * Accumulated number of uplink IPv4 packets for a client.
+	 */
+
+	uint32_t num_ul_ipv6_pkts;
+	/*
+	 * Accumulated number of uplink IPv6 packets for a client.
+	 */
+
+	uint32_t num_dl_ipv4_pkts;
+	/*
+	 * Accumulated number of downlink IPv4 packets for a client.
+	 */
+
+	uint32_t num_dl_ipv6_pkts;
+	/*
+	 * Accumulated number of downlink IPv6 packets for a client.
+	 */
+};  /* Type */
+
+/*
+ * Request Message; Requests the modem IPA driver to provide statistics
+ * for a givenclient.
+ */
+struct ipa_get_stats_per_client_req_msg_v01 {
+
+	/* Mandatory */
+	/*  Client id */
+	uint32_t client_id;
+	/*
+	 * Id of the client on APPS processor side for which Modem processor
+	 * needs to send uplink/downlink statistics. if client id is specified
+	 * as 0xffffffff, then Q6 will send the stats for all the clients of
+	 * the specified source pipe.
+	 */
+
+	/* Mandatory */
+	/*  Source pipe id */
+	uint32_t src_pipe_id;
+	/*
+	 * IPA consumer pipe on which client on APPS side sent uplink
+	 * data to modem. In future, this implementation can be extended
+	 * to provide 0xffffffff as the source pipe id, where Q6 will send
+	 * the stats of all the clients across all different tethered-pipes.
+	 */
+
+	/* Optional */
+	/*  Reset client statistics. */
+	uint8_t reset_stats_valid;
+	/* Must be set to true if reset_stats is being passed. */
+	uint8_t reset_stats;
+	/*
+	 * Option to reset the statistics currently collected by modem for this
+	 * particular client.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to provide statistics
+ * for a given client.
+ */
+struct ipa_get_stats_per_client_resp_msg_v01 {
+
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. */
+
+	/* Optional */
+	/*  Per clients Statistics List */
+	uint8_t per_client_stats_list_valid;
+	/* Must be set to true if per_client_stats_list is being passed. */
+	uint32_t per_client_stats_list_len;
+	/* Must be set to # of elements in per_client_stats_list. */
+	struct ipa_per_client_stats_info_type_v01
+		per_client_stats_list[QMI_IPA_MAX_PER_CLIENTS_V01];
+	/*
+	 * List of all per client statistics that are retrieved.
+	 */
+};  /* Message */
+
+struct ipa_ul_firewall_rule_type_v01 {
+
+	enum ipa_ip_type_enum_v01 ip_type;
+	/*
+	 * IP type for which this rule is applicable.
+	 * The driver must identify the filter table (v6 or v4), and this
+	 * field is essential for that. Values:
+	 * - QMI_IPA_IP_TYPE_INVALID (0) --  Invalid IP type identifier
+	 * - QMI_IPA_IP_TYPE_V4 (1) --  IPv4 type
+	 * - QMI_IPA_IP_TYPE_V6 (2) --  IPv6 type
+	 */
+
+	struct ipa_filter_rule_type_v01 filter_rule;
+	/*
+	 * Rules in the filter specification. These rules are the
+	 * ones that are matched against fields in the packet.
+	 * Currently we only send IPv6 whitelist rules to Q6.
+	 */
+};  /* Type */
+
+/*
+ * Request Message; Requestes remote IPA driver to install uplink
+ * firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_req_msg_v01 {
+
+	/* Optional */
+	/*  Uplink Firewall Specification  */
+	uint32_t firewall_rules_list_len;
+	/* Must be set to # of elements in firewall_rules_list. */
+	struct ipa_ul_firewall_rule_type_v01
+		firewall_rules_list[QMI_IPA_MAX_UL_FIREWALL_RULES_V01];
+	/*
+	 * List of uplink firewall specifications of filters that must be
+	 * installed.
+	 */
+
+	uint32_t mux_id;
+	/*
+	 * QMAP Mux ID. As a part of the QMAP protocol,
+	 * several data calls may be multiplexed over the same physical
+	 * transport channel. This identifier is used to identify one
+	 * such data call. The maximum value for this identifier is 255.
+	 */
+
+	/* Optional */
+	uint8_t disable_valid;
+	/* Must be set to true if enable is being passed. */
+	uint8_t disable;
+	/*
+	 * Indicates whether uplink firewall needs to be enabled or disabled.
+	 */
+
+	/* Optional */
+	uint8_t are_blacklist_filters_valid;
+	/* Must be set to true if are_blacklist_filters is being passed. */
+	uint8_t are_blacklist_filters;
+	/*
+	 * Indicates whether the filters received as part of this message are
+	 * blacklist filters. i.e. drop uplink packets matching these rules.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_resp_msg_v01 {
+
+	/* Mandatory */
+	/* Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/*
+	 * Standard response type.
+	 * Standard response type. Contains the following data members:
+	 * qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+	 * qmi_error_type  -- Error code. Possible error code values are
+	 * described in the error codes section of each message definition.
+	 */
+};  /* Message */
+
+enum ipa_ul_firewall_status_enum_v01 {
+	IPA_UL_FIREWALL_STATUS_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use*/
+	QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01 = 0,
+	/* Indicates that the uplink firewall rules
+	 * are configured successfully.
+	 */
+	QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01 = 1,
+	/* Indicates that the uplink firewall rules
+	 * are not configured successfully.
+	 */
+	IPA_UL_FIREWALL_STATUS_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use*/
+};
+
+struct ipa_ul_firewall_config_result_type_v01 {
+
+	enum ipa_ul_firewall_status_enum_v01 is_success;
+	/*
+	 * Indicates whether the uplink firewall rules are configured
+	 * successfully.
+	 */
+
+	uint32_t mux_id;
+	/*
+	 * QMAP Mux ID. As a part of the QMAP protocol,
+	 * several data calls may be multiplexed over the same physical
+	 * transport channel. This identifier is used to identify one
+	 * such data call. The maximum value for this identifier is 255.
+	 */
+};
+
+/*
+ * Indication Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_ind_msg_v01 {
+
+	 struct ipa_ul_firewall_config_result_type_v01 result;
+};  /* Message */
+
+
 /*Service Message Definition*/
 #define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020
 #define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020
@@ -1659,6 +1942,13 @@
 #define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01 0x0035
 #define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01 0x0037
 #define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_V01 0x0037
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01 0x0038
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 0x0038
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01 0x0039
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 0x0039
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01 0x003A
 
 /* add for max length*/
 #define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 134
@@ -1667,7 +1957,7 @@
 #define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7
 #define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 22369
 #define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783
-#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 834
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 870
 #define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01 7
 #define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01 7
 #define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01 15
@@ -1700,6 +1990,15 @@
 #define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01 22685
 #define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01 523
 
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01 4
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01 7
+
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01 3595
+
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01 9875
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01 11
 /* Service Object Accessor */
 
 #endif/* IPA_QMI_SERVICE_V01_H */
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 97eca0a..d3b9a33 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -127,6 +127,17 @@
 #define IPA_WAN_MSG_IPv6_ADDR_GW_LEN 4
 
 /**
+ * max number of lan clients supported per device type
+ * for LAN stats via HW.
+ */
+#define IPA_MAX_NUM_HW_PATH_CLIENTS 16
+
+/**
+ * max number of destination pipes possible for a client.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES 4
+
+/**
  * the attributes of the rule (routing or filtering)
  */
 #define IPA_FLT_TOS			(1ul << 0)
@@ -470,6 +481,7 @@
 	IPA_TETHERING_STATS_EVENT_MAX,
 };
 
+
 enum ipa_quota_event {
 	IPA_QUOTA_REACH = IPA_TETHERING_STATS_EVENT_MAX,
 	IPA_QUOTA_EVENT_MAX,
@@ -489,7 +501,13 @@
 	IPA_VLAN_L2TP_EVENT_MAX,
 };
 
-#define IPA_EVENT_MAX_NUM (IPA_VLAN_L2TP_EVENT_MAX)
+enum ipa_per_client_stats_event {
+	IPA_PER_CLIENT_STATS_CONNECT_EVENT = IPA_VLAN_L2TP_EVENT_MAX,
+	IPA_PER_CLIENT_STATS_DISCONNECT_EVENT,
+	IPA_PER_CLIENT_STATS_EVENT_MAX
+};
+
+#define IPA_EVENT_MAX_NUM (IPA_PER_CLIENT_STATS_EVENT_MAX)
 #define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
 
 /**
@@ -1175,6 +1193,48 @@
 };
 
 /**
+ * struct ipa_rt_rule_add_ext - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ * @rule_id: rule_id to be assigned to the routing rule. In case client
+ *  specifies rule_id as 0 the driver will assign a new rule_id
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add_ext {
+	struct ipa_rt_rule rule;
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+	uint16_t rule_id;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit with rule_id);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add_ext rules: all rules need to go back to back here,
+ *  no pointers
+ */
+struct ipa_ioc_add_rt_rule_ext {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	struct ipa_rt_rule_add_ext rules[0];
+};
+
+
+/**
  * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
  * multiple headers and commit)
  * @commit: should rules be removed from IPA HW also?
@@ -1733,6 +1793,52 @@
 	IPACM_CLIENT_WLAN,
 	IPACM_CLIENT_MAX
 };
+
+enum ipacm_per_client_device_type {
+	IPACM_CLIENT_DEVICE_TYPE_USB = 0,
+	IPACM_CLIENT_DEVICE_TYPE_WLAN = 1,
+	IPACM_CLIENT_DEVICE_TYPE_ETH = 2
+};
+
+/**
+ * max number of device types supported.
+ */
+#define IPACM_MAX_CLIENT_DEVICE_TYPES 3
+
+/**
+ * @lanIface - Name of the lan interface
+ * @mac: Mac address of the client.
+ */
+struct ipa_lan_client_msg {
+	char lanIface[IPA_RESOURCE_NAME_MAX];
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+};
+
+/**
+ * struct ipa_lan_client - lan client data
+ * @mac: MAC Address of the client.
+ * @client_idx: Client Index.
+ * @inited: Bool to indicate whether client info is set.
+ */
+struct ipa_lan_client {
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+	int8_t client_idx;
+	uint8_t inited;
+};
+
+/**
+ * struct ipa_tether_device_info - tether device info indicated from IPACM
+ * @ul_src_pipe: Source pipe of the lan client.
+ * @hdr_len: Header length of the client.
+ * @num_clients: Number of clients connected.
+ */
+struct ipa_tether_device_info {
+	int32_t ul_src_pipe;
+	uint8_t hdr_len;
+	uint32_t num_clients;
+	struct ipa_lan_client lan_client[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
+
 /**
  *   actual IOCTLs supported by IPA driver
  */
@@ -1745,6 +1851,9 @@
 #define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_ADD_RT_RULE, \
 					struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_ADD_RT_RULE_EXT _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE_EXT, \
+					struct ipa_ioc_add_rt_rule_ext *)
 #define IPA_IOC_ADD_RT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_ADD_RT_RULE_AFTER, \
 					struct ipa_ioc_add_rt_rule_after *)
diff --git a/include/uapi/linux/rmnet_ipa_fd_ioctl.h b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
index 04aaaad..2992e2c 100644
--- a/include/uapi/linux/rmnet_ipa_fd_ioctl.h
+++ b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
@@ -34,6 +34,12 @@
 #define WAN_IOCTL_ADD_FLT_RULE_EX        9
 #define WAN_IOCTL_QUERY_TETHER_STATS_ALL  10
 #define WAN_IOCTL_NOTIFY_WAN_STATE  11
+#define WAN_IOCTL_ADD_UL_FLT_RULE          12
+#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS    13
+#define WAN_IOCTL_QUERY_PER_CLIENT_STATS     14
+#define WAN_IOCTL_SET_LAN_CLIENT_INFO        15
+#define WAN_IOCTL_CLEAR_LAN_CLIENT_INFO      16
+#define WAN_IOCTL_SEND_LAN_CLIENT_MSG        17
 
 /* User space may not have this defined. */
 #ifndef IFNAMSIZ
@@ -130,6 +136,56 @@
 struct wan_ioctl_notify_wan_state {
 	uint8_t up;
 };
+struct wan_ioctl_send_lan_client_msg {
+	/* Lan client info. */
+	struct ipa_lan_client_msg lan_client;
+	/* Event to indicate whether client is
+	 * connected or disconnected.
+	 */
+	enum ipa_per_client_stats_event client_event;
+};
+
+struct wan_ioctl_lan_client_info {
+	/* Device type of the client. */
+	enum ipacm_per_client_device_type device_type;
+	/* MAC Address of the client. */
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+	/* Init client. */
+	uint8_t client_init;
+	/* Client Index */
+	int8_t client_idx;
+	/* Header length of the client. */
+	uint8_t hdr_len;
+	/* Source pipe of the lan client. */
+	enum ipa_client_type ul_src_pipe;
+};
+
+struct wan_ioctl_per_client_info {
+	/* MAC Address of the client. */
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+	/* Ipv4 UL traffic bytes. */
+	uint64_t ipv4_tx_bytes;
+	/* Ipv4 DL traffic bytes. */
+	uint64_t ipv4_rx_bytes;
+	/* Ipv6 UL traffic bytes. */
+	uint64_t ipv6_tx_bytes;
+	/* Ipv6 DL traffic bytes. */
+	uint64_t ipv6_rx_bytes;
+};
+
+struct wan_ioctl_query_per_client_stats {
+	/* Device type of the client. */
+	enum ipacm_per_client_device_type device_type;
+	/* Indicate whether to reset the stats (use 1) or not */
+	uint8_t reset_stats;
+	/* Indicates whether client is disconnected. */
+	uint8_t disconnect_clnt;
+	/* Number of clients. */
+	uint8_t num_clients;
+	/* Client information. */
+	struct wan_ioctl_per_client_info
+		client_info[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
 
 #define WAN_IOC_ADD_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
 		WAN_IOCTL_ADD_FLT_RULE, \
@@ -179,4 +235,27 @@
 		WAN_IOCTL_NOTIFY_WAN_STATE, \
 		struct wan_ioctl_notify_wan_state *)
 
+#define WAN_IOC_ADD_UL_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_UL_FLT_RULE, \
+		struct ipa_configure_ul_firewall_rules_req_msg_v01 *)
+
+#define WAN_IOC_ENABLE_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \
+		bool *)
+
+#define WAN_IOC_QUERY_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_PER_CLIENT_STATS, \
+		struct wan_ioctl_query_per_client_stats *)
+
+#define WAN_IOC_SET_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_SET_LAN_CLIENT_INFO, \
+			struct wan_ioctl_lan_client_info *)
+
+#define WAN_IOC_SEND_LAN_CLIENT_MSG _IOWR(WAN_IOC_MAGIC, \
+				WAN_IOCTL_SEND_LAN_CLIENT_MSG, \
+				struct wan_ioctl_send_lan_client_msg *)
+
+#define WAN_IOC_CLEAR_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_CLEAR_LAN_CLIENT_INFO, \
+			struct wan_ioctl_lan_client_info *)
 #endif /* _RMNET_IPA_FD_IOCTL_H */
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index dd5f21e..856de39 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -23,6 +23,7 @@
 #define SPIDEV_H
 
 #include <linux/types.h>
+#include <linux/ioctl.h>
 
 /* User space versions of kernel symbols for SPI clocking modes,
  * matching <linux/spi/spi.h>
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index 7fe799e..e75e1b6 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -56,7 +56,6 @@
 #define CLOCK_BOOTTIME_ALARM		9
 #define CLOCK_SGI_CYCLE			10	/* Hardware specific */
 #define CLOCK_TAI			11
-#define CLOCK_POWEROFF_ALARM		12
 
 #define MAX_CLOCKS			16
 #define CLOCKS_MASK			(CLOCK_REALTIME | CLOCK_MONOTONIC)
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 1fc62b2..7d75e56 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -303,6 +303,8 @@
 	XFRMA_PROTO,		/* __u8 */
 	XFRMA_ADDRESS_FILTER,	/* struct xfrm_address_filter */
 	XFRMA_PAD,
+	XFRMA_OFFLOAD_DEV,	/* struct xfrm_state_offload */
+	XFRMA_OUTPUT_MARK,	/* __u32 */
 	__XFRMA_MAX
 
 #define XFRMA_MAX (__XFRMA_MAX - 1)
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index e72a1f0..1e087a1 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -14,3 +14,4 @@
 header-y += msm_sde_rotator.h
 header-y += radio-iris.h
 header-y += radio-iris-commands.h
+header-y += cam_lrme.h
diff --git a/include/uapi/media/cam_isp.h b/include/uapi/media/cam_isp.h
index 05c9283..4a63292 100644
--- a/include/uapi/media/cam_isp.h
+++ b/include/uapi/media/cam_isp.h
@@ -74,8 +74,9 @@
 #define CAM_ISP_PACKET_META_CLOCK               7
 #define CAM_ISP_PACKET_META_CSID                8
 #define CAM_ISP_PACKET_META_DUAL_CONFIG         9
-#define CAM_ISP_PACKET_META_GENERIC_BLOB        10
-#define CAM_ISP_PACKET_META_MAX                 11
+#define CAM_ISP_PACKET_META_GENERIC_BLOB_LEFT   10
+#define CAM_ISP_PACKET_META_GENERIC_BLOB_RIGHT  11
+#define CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON 12
 
 /* DSP mode */
 #define CAM_ISP_DSP_MODE_NONE                   0
@@ -252,14 +253,14 @@
 /**
  * struct cam_isp_resource_hfr_config - Resource HFR configuration
  *
- * @num_io_configs:             Number of ports
+ * @num_ports:                  Number of ports
  * @reserved:                   Reserved for alignment
- * @io_hfr_config:              HFR configuration for each IO port
+ * @port_hfr_config:            HFR configuration for each IO port
  */
 struct cam_isp_resource_hfr_config {
-	uint32_t                       num_io_configs;
+	uint32_t                       num_ports;
 	uint32_t                       reserved;
-	struct cam_isp_port_hfr_config io_hfr_config[1];
+	struct cam_isp_port_hfr_config port_hfr_config[1];
 };
 
 /**
diff --git a/include/uapi/media/cam_lrme.h b/include/uapi/media/cam_lrme.h
new file mode 100644
index 0000000..97d9578
--- /dev/null
+++ b/include/uapi/media/cam_lrme.h
@@ -0,0 +1,65 @@
+#ifndef __UAPI_CAM_LRME_H__
+#define __UAPI_CAM_LRME_H__
+
+#include "cam_defs.h"
+
+/* LRME Resource Types */
+
+enum CAM_LRME_IO_TYPE {
+	CAM_LRME_IO_TYPE_TAR,
+	CAM_LRME_IO_TYPE_REF,
+	CAM_LRME_IO_TYPE_RES,
+	CAM_LRME_IO_TYPE_DS2,
+};
+
+#define CAM_LRME_INPUT_PORT_TYPE_TAR (1 << 0)
+#define CAM_LRME_INPUT_PORT_TYPE_REF (1 << 1)
+
+#define CAM_LRME_OUTPUT_PORT_TYPE_DS2 (1 << 0)
+#define CAM_LRME_OUTPUT_PORT_TYPE_RES (1 << 1)
+
+#define CAM_LRME_DEV_MAX 1
+
+
+struct cam_lrme_hw_version {
+	uint32_t gen;
+	uint32_t rev;
+	uint32_t step;
+};
+
+struct cam_lrme_dev_cap {
+	struct cam_lrme_hw_version clc_hw_version;
+	struct cam_lrme_hw_version bus_rd_hw_version;
+	struct cam_lrme_hw_version bus_wr_hw_version;
+	struct cam_lrme_hw_version top_hw_version;
+	struct cam_lrme_hw_version top_titan_version;
+};
+
+/**
+ * struct cam_lrme_query_cap_cmd - LRME query device capability payload
+ *
+ * @dev_iommu_handle: LRME iommu handles for secure/non secure
+ *      modes
+ * @cdm_iommu_handle: Iommu handles for secure/non secure modes
+ * @num_devices: number of hardware devices
+ * @dev_caps: Returned device capability array
+ */
+struct cam_lrme_query_cap_cmd {
+	struct cam_iommu_handle device_iommu;
+	struct cam_iommu_handle cdm_iommu;
+	uint32_t num_devices;
+	struct cam_lrme_dev_cap dev_caps[CAM_LRME_DEV_MAX];
+};
+
+struct cam_lrme_soc_info {
+	uint64_t clock_rate;
+	uint64_t bandwidth;
+	uint64_t reserved[4];
+};
+
+struct cam_lrme_acquire_args {
+	struct cam_lrme_soc_info lrme_soc_info;
+};
+
+#endif /* __UAPI_CAM_LRME_H__ */
+
diff --git a/init/Kconfig b/init/Kconfig
index 9782dfc..7b3006a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1051,23 +1051,6 @@
 config PAGE_COUNTER
        bool
 
-config CGROUP_SCHEDTUNE
-	bool "CFS tasks boosting cgroup subsystem (EXPERIMENTAL)"
-	depends on SCHED_TUNE
-	help
-	  This option provides the "schedtune" controller which improves the
-	  flexibility of the task boosting mechanism by introducing the support
-	  to define "per task" boost values.
-
-	  This new controller:
-	  1. allows only a two layers hierarchy, where the root defines the
-	     system-wide boost value and its direct childrens define each one a
-	     different "class of tasks" to be boosted with a different value
-	  2. supports up to 16 different task classes, each one which could be
-	     configured with a different boost value
-
-	  Say N if unsure.
-
 config MEMCG
 	bool "Memory controller"
 	select PAGE_COUNTER
@@ -1276,13 +1259,6 @@
 
 endif # CGROUPS
 
-config SCHED_WALT
-	bool "WALT"
-	depends on SMP && FAIR_GROUP_SCHED
-	help
-	  Use Window-Assisted Load Tracking (WALT) as an alternative or
-	  additional load tracking scheme in lieu of or along with PELT.
-
 config SCHED_CORE_CTL
 	bool "QTI Core Control"
 	depends on SMP
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 0d10ef5..915e750 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -25,6 +25,7 @@
 #include <linux/smpboot.h>
 #include <linux/relay.h>
 #include <linux/slab.h>
+#include <linux/highmem.h>
 
 #include <trace/events/power.h>
 #define CREATE_TRACE_POINTS
@@ -1433,6 +1434,11 @@
 		.startup.single		= NULL,
 		.teardown.single	= rcutree_dying_cpu,
 	},
+	[CPUHP_AP_KMAP_DYING] = {
+		.name			= "KMAP:dying",
+		.startup.single		= NULL,
+		.teardown.single	= kmap_remove_unused_cpu,
+	},
 	/* Entry state on starting. Interrupts enabled from here on. Transient
 	 * state for synchronsization */
 	[CPUHP_AP_ONLINE] = {
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 009f788..5183134 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -267,7 +267,8 @@
 	.release        = single_release,
 };
 
-static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c)
+static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
+		struct cpumask *cpus)
 {
 	struct pm_qos_request *req = NULL;
 	int cpu;
@@ -294,8 +295,11 @@
 		}
 	}
 
-	for_each_possible_cpu(cpu)
+	for_each_possible_cpu(cpu) {
+		if (c->target_per_cpu[cpu] != qos_val[cpu])
+			cpumask_set_cpu(cpu, cpus);
 		c->target_per_cpu[cpu] = qos_val[cpu];
+	}
 }
 
 /**
@@ -316,6 +320,7 @@
 	unsigned long flags;
 	int prev_value, curr_value, new_value;
 	struct plist_node *node = &req->node;
+	struct cpumask cpus;
 	int ret;
 
 	spin_lock_irqsave(&pm_qos_lock, flags);
@@ -346,18 +351,24 @@
 	}
 
 	curr_value = pm_qos_get_value(c);
+	cpumask_clear(&cpus);
 	pm_qos_set_value(c, curr_value);
-	pm_qos_set_value_for_cpus(c);
+	pm_qos_set_value_for_cpus(c, &cpus);
 
 	spin_unlock_irqrestore(&pm_qos_lock, flags);
 
 	trace_pm_qos_update_target(action, prev_value, curr_value);
-	if (prev_value != curr_value) {
+
+	/*
+	 * if cpu mask bits are set, call the notifier call chain
+	 * to update the new qos restriction for the cores
+	 */
+
+	if (!cpumask_empty(&cpus)) {
 		ret = 1;
 		if (c->notifiers)
 			blocking_notifier_call_chain(c->notifiers,
-						     (unsigned long)curr_value,
-						     NULL);
+				     (unsigned long)curr_value, &cpus);
 	} else {
 		ret = 0;
 	}
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index f6cce95..4b87c4e 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -29,4 +29,3 @@
 obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
 obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o
 obj-$(CONFIG_CPU_FREQ_GOV_SCHED) += cpufreq_sched.o
-obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index ba5e3e2..87bea1e 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -279,3 +279,14 @@
 	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
 		free_cpumask_var(cp->pri_to_cpu[i].mask);
 }
+
+/*
+ * cpupri_check_rt - check if CPU has a RT task
+ * should be called from rcu-sched read section.
+ */
+bool cpupri_check_rt(void)
+{
+	int cpu = raw_smp_processor_id();
+
+	return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
+}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3d933a0..0782ea74 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2769,6 +2769,7 @@
 
 	return !!enabled;
 }
+EXPORT_SYMBOL(sched_get_wake_up_idle);
 
 int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle)
 {
@@ -2781,6 +2782,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(sched_set_wake_up_idle);
 
 /* Precomputed fixed inverse multiplies for multiplication by y^n */
 static const u32 runnable_avg_yN_inv[] = {
@@ -5593,13 +5595,6 @@
 	return DIV_ROUND_UP(util << SCHED_CAPACITY_SHIFT, capacity);
 }
 
-static inline bool bias_to_waker_cpu_enabled(struct task_struct *wakee,
-		struct task_struct *waker)
-{
-	return task_util(waker) > sched_big_waker_task_load &&
-		task_util(wakee) < sched_small_wakee_task_load;
-}
-
 static inline bool
 bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target)
 {
@@ -6954,7 +6949,6 @@
 	struct related_thread_group *grp;
 	cpumask_t search_cpus;
 	int prev_cpu = task_cpu(p);
-	struct task_struct *curr = cpu_rq(cpu)->curr;
 #ifdef CONFIG_SCHED_CORE_ROTATE
 	bool do_rotate = false;
 	bool avoid_prev_cpu = false;
@@ -6975,14 +6969,14 @@
 
 	curr_util = boosted_task_util(cpu_rq(cpu)->curr);
 
-	need_idle = wake_to_idle(p);
-
+	need_idle = wake_to_idle(p) || schedtune_prefer_idle(p);
+	if (need_idle)
+		sync = 0;
 	grp = task_related_thread_group(p);
 	if (grp && grp->preferred_cluster)
 		rtg_target = &grp->preferred_cluster->cpus;
 
-	if (sync && bias_to_waker_cpu_enabled(p, curr) &&
-		bias_to_waker_cpu(p, cpu, rtg_target)) {
+	if (sync && bias_to_waker_cpu(p, cpu, rtg_target)) {
 		trace_sched_task_util_bias_to_waker(p, prev_cpu,
 					task_util(p), cpu, cpu, 0, need_idle);
 		return cpu;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 1bf8e63..b2b26e5 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1824,11 +1824,9 @@
 			cpumask_andnot(&backup_search_cpu, &backup_search_cpu,
 				       &search_cpu);
 
-#ifdef CONFIG_SCHED_CORE_ROTATE
 			cpu = find_first_cpu_bit(task, &search_cpu, sg_target,
 						 &avoid_prev_cpu, &do_rotate,
 						 &first_cpu_bit_env);
-#endif
 		} else {
 			cpumask_copy(&search_cpu, lowest_mask);
 			cpumask_clear(&backup_search_cpu);
@@ -1912,6 +1910,7 @@
 		} else if (!cpumask_empty(&backup_search_cpu)) {
 			cpumask_copy(&search_cpu, &backup_search_cpu);
 			cpumask_clear(&backup_search_cpu);
+			cpu = -1;
 			goto retry;
 		}
 	}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b5a271b..494ab14 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1810,7 +1810,7 @@
 cpu_util_freq_pelt(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
-	unsigned long util = rq->cfs.avg.util_avg;
+	u64 util = rq->cfs.avg.util_avg;
 	unsigned long capacity = capacity_orig_of(cpu);
 
 	util *= (100 + per_cpu(sched_load_boost, cpu));
@@ -2853,4 +2853,6 @@
 find_first_cpu_bit(struct task_struct *p, const cpumask_t *search_cpus,
 		   struct sched_group *sg_target, bool *avoid_prev_cpu,
 		   bool *do_rotate, struct find_first_cpu_bit_env *env);
+#else
+#define find_first_cpu_bit(...) -1
 #endif
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 46480a7..10d7f1b 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -206,7 +206,7 @@
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 
 	util = (util >= capacity) ? capacity : util;
-	busy = (util * 100) / capacity;
+	busy = div64_ul((util * 100), capacity);
 	return busy;
 }
 
diff --git a/kernel/sched/tune.h b/kernel/sched/tune.h
index 4f64417..d1b4c72 100644
--- a/kernel/sched/tune.h
+++ b/kernel/sched/tune.h
@@ -28,6 +28,7 @@
 
 #define schedtune_cpu_boost(cpu)  get_sysctl_sched_cfs_boost()
 #define schedtune_task_boost(tsk) get_sysctl_sched_cfs_boost()
+#define schedtune_prefer_idle(tsk) 0
 
 #define schedtune_exit_task(task) do { } while (0)
 
@@ -44,6 +45,7 @@
 
 #define schedtune_cpu_boost(cpu)  0
 #define schedtune_task_boost(tsk) 0
+#define schedtune_prefer_idle(tsk) 0
 
 #define schedtune_exit_task(task) do { } while (0)
 
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 1f5639c..32c7f32 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -162,13 +162,6 @@
  */
 __read_mostly unsigned int sysctl_sched_freq_reporting_policy;
 
-
-#define SCHED_BIG_WAKER_TASK_LOAD_PCT 25UL
-#define SCHED_SMALL_WAKEE_TASK_LOAD_PCT 10UL
-
-__read_mostly unsigned int sched_big_waker_task_load;
-__read_mostly unsigned int sched_small_wakee_task_load;
-
 static int __init set_sched_ravg_window(char *str)
 {
 	unsigned int window_size;
@@ -3121,8 +3114,7 @@
 	walt_cpu_util_freq_divisor =
 	    (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100;
 
-	sched_big_waker_task_load =
-		(SCHED_BIG_WAKER_TASK_LOAD_PCT << SCHED_CAPACITY_SHIFT) / 100;
-	sched_small_wakee_task_load =
-		(SCHED_SMALL_WAKEE_TASK_LOAD_PCT << SCHED_CAPACITY_SHIFT) / 100;
+	sched_init_task_load_windows =
+		div64_u64((u64)sysctl_sched_init_task_load_pct *
+			  (u64)sched_ravg_window, 100);
 }
diff --git a/kernel/softirq.c b/kernel/softirq.c
index bde8e33..6833ffa 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -245,6 +245,8 @@
 static inline void lockdep_softirq_end(bool in_hardirq) { }
 #endif
 
+#define long_softirq_pending()	(local_softirq_pending() & LONG_SOFTIRQ_MASK)
+#define defer_for_rt()		(long_softirq_pending() && cpupri_check_rt())
 asmlinkage __visible void __softirq_entry __do_softirq(void)
 {
 	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
@@ -308,6 +310,7 @@
 	pending = local_softirq_pending();
 	if (pending) {
 		if (time_before(jiffies, end) && !need_resched() &&
+		    !defer_for_rt() &&
 		    --max_restart)
 			goto restart;
 
@@ -363,7 +366,7 @@
 	if (ksoftirqd_running())
 		return;
 
-	if (!force_irqthreads) {
+	if (!force_irqthreads && !defer_for_rt()) {
 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
 		/*
 		 * We can safely execute softirq on the current stack if
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index b9b881eb..7251e3c 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -11,5 +11,3 @@
 obj-$(CONFIG_TICK_ONESHOT)			+= tick-oneshot.o tick-sched.o
 obj-$(CONFIG_DEBUG_FS)				+= timekeeping_debug.o
 obj-$(CONFIG_TEST_UDELAY)			+= test_udelay.o
-
-ccflags-y += -Idrivers/cpuidle
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 842928a..b2df539 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -26,11 +26,6 @@
 #include <linux/workqueue.h>
 #include <linux/freezer.h>
 
-#ifdef CONFIG_MSM_PM
-#include "lpm-levels.h"
-#endif
-#include <linux/workqueue.h>
-
 /**
  * struct alarm_base - Alarm timer bases
  * @lock:		Lock for syncrhonized access to the base
@@ -50,116 +45,12 @@
 static DEFINE_SPINLOCK(freezer_delta_lock);
 
 static struct wakeup_source *ws;
-static struct delayed_work work;
-static struct workqueue_struct *power_off_alarm_workqueue;
 
 #ifdef CONFIG_RTC_CLASS
 /* rtc timer and device for setting alarm wakeups at suspend */
 static struct rtc_timer		rtctimer;
 static struct rtc_device	*rtcdev;
 static DEFINE_SPINLOCK(rtcdev_lock);
-static struct mutex power_on_alarm_lock;
-static struct alarm init_alarm;
-
-/**
- * power_on_alarm_init - Init power on alarm value
- *
- * Read rtc alarm value after device booting up and add this alarm
- * into alarm queue.
- */
-void power_on_alarm_init(void)
-{
-	struct rtc_wkalrm rtc_alarm;
-	struct rtc_time rt;
-	unsigned long alarm_time;
-	struct rtc_device *rtc;
-	ktime_t alarm_ktime;
-
-	rtc = alarmtimer_get_rtcdev();
-
-	if (!rtc)
-		return;
-
-	rtc_read_alarm(rtc, &rtc_alarm);
-	rt = rtc_alarm.time;
-
-	rtc_tm_to_time(&rt, &alarm_time);
-
-	if (alarm_time) {
-		alarm_ktime = ktime_set(alarm_time, 0);
-		alarm_init(&init_alarm, ALARM_POWEROFF_REALTIME, NULL);
-		alarm_start(&init_alarm, alarm_ktime);
-	}
-}
-
-/**
- * set_power_on_alarm - set power on alarm value into rtc register
- *
- * Get the soonest power off alarm timer and set the alarm value into rtc
- * register.
- */
-void set_power_on_alarm(void)
-{
-	int rc;
-	struct timespec wall_time, alarm_ts;
-	long alarm_secs = 0l;
-	long rtc_secs, alarm_time, alarm_delta;
-	struct rtc_time rtc_time;
-	struct rtc_wkalrm alarm;
-	struct rtc_device *rtc;
-	struct timerqueue_node *next;
-	unsigned long flags;
-	struct alarm_base *base = &alarm_bases[ALARM_POWEROFF_REALTIME];
-
-	rc = mutex_lock_interruptible(&power_on_alarm_lock);
-	if (rc != 0)
-		return;
-
-	spin_lock_irqsave(&base->lock, flags);
-	next = timerqueue_getnext(&base->timerqueue);
-	spin_unlock_irqrestore(&base->lock, flags);
-
-	if (next) {
-		alarm_ts = ktime_to_timespec(next->expires);
-		alarm_secs = alarm_ts.tv_sec;
-	}
-
-	if (!alarm_secs)
-		goto disable_alarm;
-
-	getnstimeofday(&wall_time);
-
-	/*
-	 * alarm_secs have to be bigger than "wall_time +1".
-	 * It is to make sure that alarm time will be always
-	 * bigger than wall time.
-	 */
-	if (alarm_secs <= wall_time.tv_sec + 1)
-		goto disable_alarm;
-
-	rtc = alarmtimer_get_rtcdev();
-	if (!rtc)
-		goto exit;
-
-	rtc_read_time(rtc, &rtc_time);
-	rtc_tm_to_time(&rtc_time, &rtc_secs);
-	alarm_delta = wall_time.tv_sec - rtc_secs;
-	alarm_time = alarm_secs - alarm_delta;
-
-	rtc_time_to_tm(alarm_time, &alarm.time);
-	alarm.enabled = 1;
-	rc = rtc_set_alarm(rtcdev, &alarm);
-	if (rc)
-		goto disable_alarm;
-
-	mutex_unlock(&power_on_alarm_lock);
-	return;
-
-disable_alarm:
-	rtc_alarm_irq_enable(rtcdev, 0);
-exit:
-	mutex_unlock(&power_on_alarm_lock);
-}
 
 static void alarmtimer_triggered_func(void *p)
 {
@@ -231,8 +122,6 @@
 
 static inline void alarmtimer_rtc_timer_init(void)
 {
-	mutex_init(&power_on_alarm_lock);
-
 	rtc_timer_init(&rtctimer, NULL, NULL);
 }
 
@@ -259,14 +148,8 @@
 static inline int alarmtimer_rtc_interface_setup(void) { return 0; }
 static inline void alarmtimer_rtc_interface_remove(void) { }
 static inline void alarmtimer_rtc_timer_init(void) { }
-void set_power_on_alarm(void) { }
 #endif
 
-static void alarm_work_func(struct work_struct *unused)
-{
-	set_power_on_alarm();
-}
-
 /**
  * alarmtimer_enqueue - Adds an alarm timer to an alarm_base timerqueue
  * @base: pointer to the base where the timer is being run
@@ -336,10 +219,6 @@
 	}
 	spin_unlock_irqrestore(&base->lock, flags);
 
-	/* set next power off alarm */
-	if (alarm->type == ALARM_POWEROFF_REALTIME)
-		queue_delayed_work(power_off_alarm_workqueue, &work, 0);
-
 	return ret;
 
 }
@@ -362,70 +241,6 @@
  * set an rtc timer to fire that far into the future, which
  * will wake us from suspend.
  */
-#if defined(CONFIG_RTC_DRV_QPNP) && defined(CONFIG_MSM_PM)
-static int alarmtimer_suspend(struct device *dev)
-{
-	struct rtc_time tm;
-	ktime_t min, now;
-	unsigned long flags;
-	struct rtc_device *rtc;
-	int i;
-	int ret = 0;
-
-	spin_lock_irqsave(&freezer_delta_lock, flags);
-	min = freezer_delta;
-	freezer_delta = ktime_set(0, 0);
-	spin_unlock_irqrestore(&freezer_delta_lock, flags);
-
-	rtc = alarmtimer_get_rtcdev();
-	/* If we have no rtcdev, just return */
-	if (!rtc)
-		return 0;
-
-	/* Find the soonest timer to expire*/
-	for (i = 0; i < ALARM_NUMTYPE; i++) {
-		struct alarm_base *base = &alarm_bases[i];
-		struct timerqueue_node *next;
-		ktime_t delta;
-
-		spin_lock_irqsave(&base->lock, flags);
-		next = timerqueue_getnext(&base->timerqueue);
-		spin_unlock_irqrestore(&base->lock, flags);
-		if (!next)
-			continue;
-		delta = ktime_sub(next->expires, base->gettime());
-		if (!min.tv64 || (delta.tv64 < min.tv64))
-			min = delta;
-	}
-	if (min.tv64 == 0)
-		return 0;
-
-	if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) {
-		__pm_wakeup_event(ws, 2 * MSEC_PER_SEC);
-		return -EBUSY;
-	}
-
-	/* Setup a timer to fire that far in the future */
-	rtc_timer_cancel(rtc, &rtctimer);
-	rtc_read_time(rtc, &tm);
-	now = rtc_tm_to_ktime(tm);
-	now = ktime_add(now, min);
-	if (poweron_alarm) {
-		struct rtc_time tm_val;
-		unsigned long secs;
-
-		tm_val = rtc_ktime_to_tm(min);
-		rtc_tm_to_time(&tm_val, &secs);
-		lpm_suspend_wake_time(secs);
-	} else {
-		/* Set alarm, if in the past reject suspend briefly to handle */
-		ret = rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
-		if (ret < 0)
-			__pm_wakeup_event(ws, MSEC_PER_SEC);
-	}
-	return ret;
-}
-#else
 static int alarmtimer_suspend(struct device *dev)
 {
 	struct rtc_time tm;
@@ -435,8 +250,6 @@
 	int i;
 	int ret;
 
-	cancel_delayed_work_sync(&work);
-
 	spin_lock_irqsave(&freezer_delta_lock, flags);
 	min = freezer_delta;
 	freezer_delta = ktime_set(0, 0);
@@ -482,7 +295,7 @@
 		__pm_wakeup_event(ws, MSEC_PER_SEC);
 	return ret;
 }
-#endif
+
 static int alarmtimer_resume(struct device *dev)
 {
 	struct rtc_device *rtc;
@@ -490,8 +303,6 @@
 	rtc = alarmtimer_get_rtcdev();
 	if (rtc)
 		rtc_timer_cancel(rtc, &rtctimer);
-
-	queue_delayed_work(power_off_alarm_workqueue, &work, 0);
 	return 0;
 }
 
@@ -672,14 +483,12 @@
  * clock2alarm - helper that converts from clockid to alarmtypes
  * @clockid: clockid.
  */
-enum alarmtimer_type clock2alarm(clockid_t clockid)
+static enum alarmtimer_type clock2alarm(clockid_t clockid)
 {
 	if (clockid == CLOCK_REALTIME_ALARM)
 		return ALARM_REALTIME;
 	if (clockid == CLOCK_BOOTTIME_ALARM)
 		return ALARM_BOOTTIME;
-	if (clockid == CLOCK_POWEROFF_ALARM)
-		return ALARM_POWEROFF_REALTIME;
 	return -1;
 }
 
@@ -1073,13 +882,10 @@
 
 	posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
 	posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
-	posix_timers_register_clock(CLOCK_POWEROFF_ALARM, &alarm_clock);
 
 	/* Initialize alarm bases */
 	alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME;
 	alarm_bases[ALARM_REALTIME].gettime = &ktime_get_real;
-	alarm_bases[ALARM_POWEROFF_REALTIME].base_clockid = CLOCK_REALTIME;
-	alarm_bases[ALARM_POWEROFF_REALTIME].gettime = &ktime_get_real;
 	alarm_bases[ALARM_BOOTTIME].base_clockid = CLOCK_BOOTTIME;
 	alarm_bases[ALARM_BOOTTIME].gettime = &ktime_get_boottime;
 	for (i = 0; i < ALARM_NUMTYPE; i++) {
@@ -1101,24 +907,8 @@
 		goto out_drv;
 	}
 	ws = wakeup_source_register("alarmtimer");
-	if (!ws) {
-		error = -ENOMEM;
-		goto out_ws;
-	}
-
-	INIT_DELAYED_WORK(&work, alarm_work_func);
-	power_off_alarm_workqueue =
-		create_singlethread_workqueue("power_off_alarm");
-	if (!power_off_alarm_workqueue) {
-		error = -ENOMEM;
-		goto out_wq;
-	}
-
 	return 0;
-out_wq:
-	wakeup_source_unregister(ws);
-out_ws:
-	platform_device_unregister(pdev);
+
 out_drv:
 	platform_driver_unregister(&alarmtimer_driver);
 out_if:
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index ccf6499..4c0b001 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1385,8 +1385,8 @@
 
 	pos = find_next_bit(base->pending_map, start, offset);
 	pos_down = pos < start ? pos + LVL_SIZE - start : -1;
-	if (((pos_up + base->clk) << LVL_SHIFT(lvl)) >
-		((pos_down + base->clk) << LVL_SHIFT(lvl)))
+	if (((pos_up + (u64)base->clk) << LVL_SHIFT(lvl)) >
+		((pos_down + (u64)base->clk) << LVL_SHIFT(lvl)))
 		return pos_down;
 	return pos_up;
 }
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0e5e54f..3630826 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -70,6 +70,7 @@
 	 * attach_mutex to avoid changing binding state while
 	 * worker_attach_to_pool() is in progress.
 	 */
+	POOL_MANAGER_ACTIVE	= 1 << 0,	/* being managed */
 	POOL_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
 
 	/* worker flags */
@@ -167,7 +168,6 @@
 						/* L: hash of busy workers */
 
 	/* see manage_workers() for details on the two manager mutexes */
-	struct mutex		manager_arb;	/* manager arbitration */
 	struct worker		*manager;	/* L: purely informational */
 	struct mutex		attach_mutex;	/* attach/detach exclusion */
 	struct list_head	workers;	/* A: attached workers */
@@ -299,6 +299,7 @@
 
 static DEFINE_MUTEX(wq_pool_mutex);	/* protects pools and workqueues list */
 static DEFINE_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */
+static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
 
 static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
 static bool workqueue_freezing;		/* PL: have wqs started freezing? */
@@ -801,7 +802,7 @@
 /* Do we have too many workers and should some go away? */
 static bool too_many_workers(struct worker_pool *pool)
 {
-	bool managing = mutex_is_locked(&pool->manager_arb);
+	bool managing = pool->flags & POOL_MANAGER_ACTIVE;
 	int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
 	int nr_busy = pool->nr_workers - nr_idle;
 
@@ -1985,24 +1986,17 @@
 {
 	struct worker_pool *pool = worker->pool;
 
-	/*
-	 * Anyone who successfully grabs manager_arb wins the arbitration
-	 * and becomes the manager.  mutex_trylock() on pool->manager_arb
-	 * failure while holding pool->lock reliably indicates that someone
-	 * else is managing the pool and the worker which failed trylock
-	 * can proceed to executing work items.  This means that anyone
-	 * grabbing manager_arb is responsible for actually performing
-	 * manager duties.  If manager_arb is grabbed and released without
-	 * actual management, the pool may stall indefinitely.
-	 */
-	if (!mutex_trylock(&pool->manager_arb))
+	if (pool->flags & POOL_MANAGER_ACTIVE)
 		return false;
+
+	pool->flags |= POOL_MANAGER_ACTIVE;
 	pool->manager = worker;
 
 	maybe_create_worker(pool);
 
 	pool->manager = NULL;
-	mutex_unlock(&pool->manager_arb);
+	pool->flags &= ~POOL_MANAGER_ACTIVE;
+	wake_up(&wq_manager_wait);
 	return true;
 }
 
@@ -3210,7 +3204,6 @@
 	setup_timer(&pool->mayday_timer, pool_mayday_timeout,
 		    (unsigned long)pool);
 
-	mutex_init(&pool->manager_arb);
 	mutex_init(&pool->attach_mutex);
 	INIT_LIST_HEAD(&pool->workers);
 
@@ -3280,13 +3273,15 @@
 	hash_del(&pool->hash_node);
 
 	/*
-	 * Become the manager and destroy all workers.  Grabbing
-	 * manager_arb prevents @pool's workers from blocking on
-	 * attach_mutex.
+	 * Become the manager and destroy all workers.  This prevents
+	 * @pool's workers from blocking on attach_mutex.  We're the last
+	 * manager and @pool gets freed with the flag set.
 	 */
-	mutex_lock(&pool->manager_arb);
-
 	spin_lock_irq(&pool->lock);
+	wait_event_lock_irq(wq_manager_wait,
+			    !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+	pool->flags |= POOL_MANAGER_ACTIVE;
+
 	while ((worker = first_idle_worker(pool)))
 		destroy_worker(worker);
 	WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -3300,8 +3295,6 @@
 	if (pool->detach_completion)
 		wait_for_completion(pool->detach_completion);
 
-	mutex_unlock(&pool->manager_arb);
-
 	/* shut down the timers */
 	del_timer_sync(&pool->idle_timer);
 	del_timer_sync(&pool->mayday_timer);
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 59fd7c0..5cd0935 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -598,21 +598,31 @@
 		if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
 			goto all_leaves_cluster_together;
 
-		/* Otherwise we can just insert a new node ahead of the old
-		 * one.
+		/* Otherwise all the old leaves cluster in the same slot, but
+		 * the new leaf wants to go into a different slot - so we
+		 * create a new node (n0) to hold the new leaf and a pointer to
+		 * a new node (n1) holding all the old leaves.
+		 *
+		 * This can be done by falling through to the node splitting
+		 * path.
 		 */
-		goto present_leaves_cluster_but_not_new_leaf;
+		pr_devel("present leaves cluster but not new leaf\n");
 	}
 
 split_node:
 	pr_devel("split node\n");
 
-	/* We need to split the current node; we know that the node doesn't
-	 * simply contain a full set of leaves that cluster together (it
-	 * contains meta pointers and/or non-clustering leaves).
+	/* We need to split the current node.  The node must contain anything
+	 * from a single leaf (in the one leaf case, this leaf will cluster
+	 * with the new leaf) and the rest meta-pointers, to all leaves, some
+	 * of which may cluster.
+	 *
+	 * It won't contain the case in which all the current leaves plus the
+	 * new leaves want to cluster in the same slot.
 	 *
 	 * We need to expel at least two leaves out of a set consisting of the
-	 * leaves in the node and the new leaf.
+	 * leaves in the node and the new leaf.  The current meta pointers can
+	 * just be copied as they shouldn't cluster with any of the leaves.
 	 *
 	 * We need a new node (n0) to replace the current one and a new node to
 	 * take the expelled nodes (n1).
@@ -717,33 +727,6 @@
 	pr_devel("<--%s() = ok [split node]\n", __func__);
 	return true;
 
-present_leaves_cluster_but_not_new_leaf:
-	/* All the old leaves cluster in the same slot, but the new leaf wants
-	 * to go into a different slot, so we create a new node to hold the new
-	 * leaf and a pointer to a new node holding all the old leaves.
-	 */
-	pr_devel("present leaves cluster but not new leaf\n");
-
-	new_n0->back_pointer = node->back_pointer;
-	new_n0->parent_slot = node->parent_slot;
-	new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
-	new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
-	new_n1->parent_slot = edit->segment_cache[0];
-	new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
-	edit->adjust_count_on = new_n0;
-
-	for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
-		new_n1->slots[i] = node->slots[i];
-
-	new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
-	edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
-
-	edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
-	edit->set[0].to = assoc_array_node_to_ptr(new_n0);
-	edit->excised_meta[0] = assoc_array_node_to_ptr(node);
-	pr_devel("<--%s() = ok [insert node before]\n", __func__);
-	return true;
-
 all_leaves_cluster_together:
 	/* All the leaves, new and old, want to cluster together in this node
 	 * in the same slot, so we have to replace this node with a shortcut to
diff --git a/lib/digsig.c b/lib/digsig.c
index 55b8b2f..a876156 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -87,6 +87,12 @@
 	down_read(&key->sem);
 	ukp = user_key_payload(key);
 
+	if (!ukp) {
+		/* key was revoked before we acquired its semaphore */
+		err = -EKEYREVOKED;
+		goto err1;
+	}
+
 	if (ukp->datalen < sizeof(*pkh))
 		goto err1;
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 7aab8f6..18de74e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4174,6 +4174,9 @@
 int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly;
 EXPORT_SYMBOL(athrs_fast_nat_recv);
 
+int (*embms_tm_multicast_recv)(struct sk_buff *skb) __rcu __read_mostly;
+EXPORT_SYMBOL(embms_tm_multicast_recv);
+
 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
 {
 	struct packet_type *ptype, *pt_prev;
@@ -4183,6 +4186,7 @@
 	int ret = NET_RX_DROP;
 	__be16 type;
 	int (*fast_recv)(struct sk_buff *skb);
+	int (*embms_recv)(struct sk_buff *skb);
 
 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
 
@@ -4250,6 +4254,10 @@
 		}
 	}
 
+	embms_recv = rcu_dereference(embms_tm_multicast_recv);
+	if (embms_recv)
+		embms_recv(skb);
+
 #ifdef CONFIG_NET_CLS_ACT
 	skb->tc_verd = 0;
 ncls:
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 93f7a5a..b91cecc 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -941,7 +941,11 @@
 		if (!mod_timer(&neigh->timer, next))
 			neigh_hold(neigh);
 	}
-	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE | NUD_STALE)) {
+
+	if (neigh_probe_enable) {
+		if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE | NUD_STALE))
+			neigh_probe(neigh);
+	} else if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
 		neigh_probe(neigh);
 	} else {
 out:
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 8737412..e1d4d89 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -224,7 +224,7 @@
 static void dns_resolver_describe(const struct key *key, struct seq_file *m)
 {
 	seq_puts(m, key->description);
-	if (key_is_instantiated(key)) {
+	if (key_is_positive(key)) {
 		int err = PTR_ERR(key->payload.data[dns_key_error]);
 
 		if (err)
diff --git a/net/embms_kernel/Makefile b/net/embms_kernel/Makefile
new file mode 100644
index 0000000..c21480e
--- /dev/null
+++ b/net/embms_kernel/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for Embms Kernel module.
+#
+
+KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
+
+obj-m += embms_kernel.o
+
+ccflags-y += -D__CHECK_ENDIAN__
+
+CDEFINES += -D__CHECK_ENDIAN__
+
+KBUILD_CPPFLAGS += $(CDEFINES)
+
+all:
+	$(MAKE) -C $(KERNEL_SRC) M=$(shell pwd) modules
+modules_install:
+	$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(shell pwd) modules_install
+
+clean:
+	$(MAKE) -C $(KERNEL_SRC) M=$(PWD) clean
+
diff --git a/net/embms_kernel/embms_kernel.c b/net/embms_kernel/embms_kernel.c
new file mode 100644
index 0000000..3bbe51b
--- /dev/null
+++ b/net/embms_kernel/embms_kernel.c
@@ -0,0 +1,1031 @@
+/*************************************************************************
+ * -----------------------------------------------------------------------
+ * Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ * -----------------------------------------------------------------------
+
+ * DESCRIPTION
+ * Main file for eMBMs Tunneling Module in kernel.
+ *************************************************************************
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <net/ip.h>
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/etherdevice.h>
+
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <net/arp.h>
+#include <net/neighbour.h>
+
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/in.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/miscdevice.h>
+#include "embms_kernel.h"
+
+struct embms_info_internal embms_conf;
+
+/* Global structures used for tunneling. These include
+ * iphdr and udphdr which are appended to skbs for
+ * tunneling, net_device and tunnleing related
+ * structs and params
+ */
+
+unsigned char hdr_buff[sizeof(struct iphdr) + sizeof(struct udphdr)];
+struct iphdr *iph_global;
+struct udphdr *udph_global;
+struct net_device *dev_global;
+
+static struct tmgi_to_clnt_info tmgi_to_clnt_map_tbl;
+
+/* handle_multicast_stream - packet forwarding
+ * function for multicast stream
+ * Main use case is for EMBMS Over Softap feature
+ */
+
+static int handle_multicast_stream(struct sk_buff *skb)
+{
+	struct iphdr *iph;
+	struct udphdr *udph;
+	unsigned char *tmp_ptr = NULL;
+	struct sk_buff *skb_new = NULL;
+	struct sk_buff *skb_cpy = NULL;
+	struct clnt_info *temp_client = NULL;
+	struct tmgi_to_clnt_info *temp_tmgi = NULL;
+	struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
+	struct list_head *clnt_ptr, *prev_clnt_ptr;
+	int hdr_size = sizeof(*udph) + sizeof(*iph) + ETH_HLEN;
+
+	/* only IP packets */
+	if (htons(ETH_P_IP) != skb->protocol) {
+		embms_error("Not an IP packet\n");
+		return 0;
+	}
+
+	if (embms_conf.embms_tunneling_status == TUNNELING_OFF) {
+		embms_debug("Tunneling Disabled. Can't process packets\n");
+		return 0;
+	}
+
+	if (unlikely(memcmp(skb->dev->name, embms_conf.embms_iface,
+			    strlen(embms_conf.embms_iface)) != 0)) {
+		embms_error("Packet received on %s iface. NOT an EMBMS Iface\n",
+			    skb->dev->name);
+		return 0;
+	}
+
+	/* Check if dst ip of packet is same as multicast ip of any tmgi*/
+
+	iph = (struct iphdr *)skb->data;
+	udph = (struct udphdr *)(skb->data + sizeof(struct iphdr));
+
+	spin_lock_bh(&embms_conf.lock);
+
+	list_for_each_safe(tmgi_entry_ptr, prev_tmgi_entry_ptr,
+			   &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+		temp_tmgi = list_entry(tmgi_entry_ptr,
+				       struct tmgi_to_clnt_info,
+				       tmgi_list_ptr);
+
+		if ((temp_tmgi->tmgi_multicast_addr == iph->daddr) &&
+		    (temp_tmgi->tmgi_port == udph->dest))
+			break;
+	}
+
+	if (tmgi_entry_ptr == &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+		embms_error("handle_multicast_stream:");
+		embms_error("could not find matchin tmgi entry\n");
+		spin_unlock_bh(&embms_conf.lock);
+		return 0;
+	}
+
+	/* Found a matching tmgi entry. Realloc headroom to
+	 * accommodate new Ethernet, IP and UDP header
+	 */
+
+	skb_new = skb_realloc_headroom(skb, hdr_size);
+	if (unlikely(!skb_new)) {
+		embms_error("Can't allocate headroom\n");
+		spin_unlock_bh(&embms_conf.lock);
+		return 0;
+	}
+
+	/* push skb->data and copy IP and UDP headers*/
+
+	tmp_ptr = skb_push(skb_new,
+			   sizeof(struct udphdr) + sizeof(struct iphdr));
+
+	iph = (struct iphdr *)tmp_ptr;
+	udph = (struct udphdr *)(tmp_ptr + sizeof(struct iphdr));
+
+	memcpy(tmp_ptr, hdr_buff, hdr_size - ETH_HLEN);
+	udph->len = htons(skb_new->len - sizeof(struct iphdr));
+	iph->tot_len = htons(skb_new->len);
+
+	list_for_each_safe(clnt_ptr, prev_clnt_ptr,
+			   &temp_tmgi->client_list_head) {
+		temp_client = list_entry(clnt_ptr,
+					 struct clnt_info,
+					 client_list_ptr);
+
+		/* Make a copy of skb_new with new IP and UDP header.
+		 * We can't use skb_new or its clone here since we need to
+		 * constantly change dst ip and dst port which is not possible
+		 * for shared memory as is the case with skb_new.
+		 */
+
+		skb_cpy = skb_copy(skb_new, GFP_ATOMIC);
+		if (unlikely(!skb_cpy)) {
+			embms_error("Can't copy skb\n");
+			kfree_skb(skb_new);
+			return 0;
+		}
+
+		iph = (struct iphdr *)skb_cpy->data;
+		udph = (struct udphdr *)(skb_cpy->data + sizeof(struct iphdr));
+
+		iph->id = htons(atomic_inc_return(&embms_conf.ip_ident));
+
+		/* Calculate checksum for new IP and UDP header*/
+
+		udph->dest = temp_client->port;
+		skb_cpy->csum = csum_partial((char *)udph,
+					     ntohs(udph->len),
+					     skb_cpy->csum);
+
+		iph->daddr = temp_client->addr;
+		ip_send_check(iph);
+
+		udph->check = 0;
+		udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+						ntohs(udph->len),
+						IPPROTO_UDP,
+						skb_cpy->csum);
+
+		if (udph->check == 0)
+			udph->check = CSUM_MANGLED_0;
+
+		if (unlikely(!dev_global)) {
+			embms_error("Global device NULL\n");
+			kfree_skb(skb_cpy);
+			kfree_skb(skb_new);
+			return 0;
+		}
+
+		/* update device info and add MAC header*/
+
+		skb_cpy->dev = dev_global;
+
+		skb_cpy->dev->header_ops->create(skb_cpy, skb_cpy->dev,
+						ETH_P_IP, temp_client->dmac,
+						NULL, skb_cpy->len);
+		dev_queue_xmit(skb_cpy);
+	}
+
+	spin_unlock_bh(&embms_conf.lock);
+	kfree_skb(skb_new);
+	return 1;
+}
+
+static int check_embms_device(atomic_t *use_count)
+{
+	int ret;
+
+	if (atomic_inc_return(use_count) == 1) {
+		ret = 0;
+	} else {
+		atomic_dec(use_count);
+		ret = -EBUSY;
+	}
+	return ret;
+}
+
+static int embms_device_open(struct inode *inode, struct file *file)
+{
+	/*Check if the device is busy*/
+	if (check_embms_device(&embms_conf.device_under_use)) {
+		embms_error("embms_tm_open : EMBMS device busy\n");
+		return -EBUSY;
+	}
+
+	try_module_get(THIS_MODULE);
+	return SUCCESS;
+}
+
+static int embms_device_release(struct inode *inode, struct file *file)
+{
+	/* Reduce device use count before leaving*/
+	embms_debug("Releasing EMBMS device..\n");
+	atomic_dec(&embms_conf.device_under_use);
+	embms_conf.embms_tunneling_status = TUNNELING_OFF;
+	module_put(THIS_MODULE);
+	return SUCCESS;
+}
+
+static struct tmgi_to_clnt_info *check_for_tmgi_entry(u32 addr,
+						      u16 port)
+{
+	struct list_head *tmgi_ptr, *prev_tmgi_ptr;
+	struct tmgi_to_clnt_info *temp_tmgi = NULL;
+
+	embms_debug("check_for_tmgi_entry: mcast addr :%pI4, port %u\n",
+		    &addr, ntohs(port));
+
+	list_for_each_safe(tmgi_ptr,
+			   prev_tmgi_ptr,
+			   &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+		temp_tmgi = list_entry(tmgi_ptr,
+				       struct tmgi_to_clnt_info,
+				       tmgi_list_ptr);
+
+		if ((temp_tmgi->tmgi_multicast_addr == addr) &&
+		    (temp_tmgi->tmgi_port == port)) {
+			embms_debug("check_for_tmgi_entry:TMGI entry found\n");
+			return temp_tmgi;
+		}
+	}
+	return NULL;
+}
+
+static struct clnt_info *chk_clnt_entry(struct tmgi_to_clnt_info *tmgi,
+					struct tmgi_to_clnt_info_update *clnt)
+{
+	struct list_head *clnt_ptr, *prev_clnt_ptr;
+	struct clnt_info *temp_client = NULL;
+
+	embms_debug("check_for_client_entry: clnt addr :%pI4, port %u\n",
+		    &clnt->client_addr, ntohs(clnt->client_port));
+
+	list_for_each_safe(clnt_ptr,
+			   prev_clnt_ptr,
+			   &tmgi->client_list_head) {
+		temp_client = list_entry(clnt_ptr,
+					 struct clnt_info,
+					 client_list_ptr);
+		if ((temp_client->addr == clnt->client_addr) &&
+		    (temp_client->port == clnt->client_port)) {
+			embms_debug("Clnt entry present\n");
+			return temp_client;
+		}
+	}
+	return NULL;
+}
+
+static int add_new_tmgi_entry(struct tmgi_to_clnt_info_update *info_update,
+			      struct clnt_info *clnt)
+{
+	struct tmgi_to_clnt_info *new_tmgi = NULL;
+
+	embms_debug("add_new_tmgi_entry:Enter\n");
+
+	new_tmgi = kzalloc(sizeof(*new_tmgi),
+			   GFP_ATOMIC);
+	if (!new_tmgi) {
+		embms_error("add_new_tmgi_entry: mem alloc failed\n");
+		return -ENOMEM;
+	}
+
+	memset(new_tmgi, 0, sizeof(struct tmgi_to_clnt_info));
+
+	new_tmgi->tmgi_multicast_addr = info_update->multicast_addr;
+	new_tmgi->tmgi_port = info_update->multicast_port;
+
+	embms_debug("add_new_tmgi_entry:");
+	embms_debug("New tmgi multicast addr :%pI4 , port %u\n",
+		    &info_update->multicast_addr,
+		    ntohs(info_update->multicast_port));
+
+	embms_debug("add_new_tmgi_entry:Adding client entry\n");
+
+	spin_lock_bh(&embms_conf.lock);
+
+	INIT_LIST_HEAD(&new_tmgi->client_list_head);
+	list_add(&clnt->client_list_ptr,
+		 &new_tmgi->client_list_head);
+	new_tmgi->no_of_clients++;
+
+	/* Once above steps are done successfully,
+	 * we add tmgi entry to our local table
+	 */
+
+	list_add(&new_tmgi->tmgi_list_ptr,
+		 &tmgi_to_clnt_map_tbl.tmgi_list_ptr);
+	embms_conf.no_of_tmgi_sessions++;
+
+	spin_unlock_bh(&embms_conf.lock);
+
+	return SUCCESS;
+}
+
+static void print_tmgi_to_client_table(void)
+{
+	int i, j;
+	struct clnt_info *temp_client = NULL;
+	struct tmgi_to_clnt_info *temp_tmgi = NULL;
+	struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
+	struct list_head *clnt_ptr, *prev_clnt_ptr;
+
+	embms_debug("====================================================\n");
+	embms_debug("Printing TMGI to Client Table :\n");
+	embms_debug("No of Active TMGIs : %d\n",
+		    embms_conf.no_of_tmgi_sessions);
+	embms_debug("====================================================\n\n");
+
+	if (embms_conf.no_of_tmgi_sessions > 0) {
+		i = 1;
+		list_for_each_safe(tmgi_entry_ptr, prev_tmgi_entry_ptr,
+				   &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+			temp_tmgi = list_entry(tmgi_entry_ptr,
+					       struct tmgi_to_clnt_info,
+					       tmgi_list_ptr);
+
+			embms_debug("TMGI entry %d :\n", i);
+			embms_debug("TMGI multicast addr : %pI4 , port %u\n\n",
+				    &temp_tmgi->tmgi_multicast_addr,
+				    ntohs(temp_tmgi->tmgi_port));
+			embms_debug("No of clients : %d\n",
+				    temp_tmgi->no_of_clients);
+			j = 1;
+
+			list_for_each_safe(clnt_ptr, prev_clnt_ptr,
+					   &temp_tmgi->client_list_head) {
+				temp_client = list_entry(clnt_ptr,
+							 struct clnt_info,
+							 client_list_ptr);
+				embms_debug("Client entry %d :\n", j);
+				embms_debug("client addr : %pI4 , port %u\n\n",
+					    &temp_client->addr,
+					    ntohs(temp_client->port));
+				j++;
+			}
+			i++;
+			embms_debug("===========================================\n\n");
+		}
+	} else {
+		embms_debug("No TMGI entries to Display\n");
+	}
+	embms_debug("==================================================================\n\n");
+}
+
+/**
+ * delete_tmgi_entry_from_table() - deletes tmgi from global tmgi-client table
+ * @buffer:	Buffer containing TMGI info for deletion.
+ *
+ * This function completely removes the TMGI from
+ * global TMGI-client table, along with the client list
+ * so that no packets for this TMGI are processed
+ *
+ * Return: Success on deleting TMGI entry, error otherwise.
+ */
+
+int delete_tmgi_entry_from_table(char *buffer)
+{
+	struct tmgi_to_clnt_info_update *info_update;
+	struct clnt_info *temp_client = NULL;
+	struct tmgi_to_clnt_info *temp_tmgi = NULL;
+	struct list_head *clnt_ptr, *prev_clnt_ptr;
+
+	embms_debug("delete_tmgi_entry_from_table: Enter\n");
+
+	info_update = (struct tmgi_to_clnt_info_update *)buffer;
+
+	if (!info_update) {
+		embms_error("delete_tmgi_entry_from_table:");
+		embms_error("NULL arguments passed\n");
+		return -EBADPARAM;
+	}
+
+	/* This function is used to delete a specific TMGI entry
+	 * when that particular TMGI goes down
+	 * Search for the TMGI entry in our local table
+	 */
+	if (embms_conf.no_of_tmgi_sessions == 0) {
+		embms_error("TMGI count 0. Nothing to delete\n");
+		return SUCCESS;
+	}
+
+	temp_tmgi = check_for_tmgi_entry(info_update->multicast_addr,
+					 info_update->multicast_port);
+
+	if (!temp_tmgi) {
+		/* TMGI entry was not found in our local table*/
+		embms_error("delete_client_entry_from_table :");
+		embms_error("Desired TMGI entry not found\n");
+		return -EBADPARAM;
+	}
+
+	spin_lock_bh(&embms_conf.lock);
+
+	/* We need to free memory allocated to client entries
+	 * for a particular TMGI entry
+	 */
+
+	list_for_each_safe(clnt_ptr, prev_clnt_ptr,
+			   &temp_tmgi->client_list_head) {
+		temp_client = list_entry(clnt_ptr,
+					 struct clnt_info,
+					 client_list_ptr);
+		embms_debug("delete_tmgi_entry_from_table :");
+		embms_debug("Client addr to delete :%pI4 , port %u\n",
+			    &temp_client->addr, ntohs(temp_client->port));
+		list_del(&temp_client->client_list_ptr);
+		temp_tmgi->no_of_clients--;
+		kfree(temp_client);
+	}
+
+	/* Free memory allocated to tmgi entry*/
+
+	list_del(&temp_tmgi->tmgi_list_ptr);
+	kfree(temp_tmgi);
+	embms_conf.no_of_tmgi_sessions--;
+
+	spin_unlock_bh(&embms_conf.lock);
+
+	embms_debug("delete_tmgi_entry_from_table : TMGI Entry deleted.\n");
+
+	return SUCCESS;
+}
+
+/**
+ * delete_client_entry_from_all_tmgi() - deletes client from all tmgi lists
+ * @buffer:	Buffer containing client info for deletion.
+ *
+ * This function completely removes a client from
+ * all TMGIs in global TMGI-client table. Also delets TMGI
+ * entries if no more clients are there
+ *
+ * Return: Success on deleting client entry, error otherwise.
+ */
+int delete_client_entry_from_all_tmgi(char *buffer)
+{
+	struct tmgi_to_clnt_info_update *info_update;
+	struct clnt_info *temp_client = NULL;
+	struct tmgi_to_clnt_info *tmgi = NULL;
+	struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
+
+	/* We use this function when we want to delete any
+	 * client entry from all TMGI entries. This scenario
+	 * happens when any client disconnects and hence
+	 * we need to clean all realted client entries
+	 * in our mapping table
+	 */
+
+	embms_debug("del_clnt_from_all_tmgi: Enter\n");
+
+	info_update = (struct tmgi_to_clnt_info_update *)buffer;
+
+	if (!info_update) {
+		embms_error("del_clnt_from_all_tmgi:");
+		embms_error("NULL arguments passed\n");
+		return -EBADPARAM;
+	}
+
+	/* We start checking from first TMGI entry and if client
+	 * entry is found in client entries of any TMGI, we clean
+	 * up that client entry from that TMGI entry
+	 */
+	if (embms_conf.no_of_tmgi_sessions == 0)
+		return SUCCESS;
+
+	list_for_each_safe(tmgi_entry_ptr, prev_tmgi_entry_ptr,
+			   &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+		tmgi = list_entry(tmgi_entry_ptr,
+				  struct tmgi_to_clnt_info,
+				  tmgi_list_ptr);
+
+		temp_client = chk_clnt_entry(tmgi, info_update);
+		if (!temp_client)
+			continue;
+
+		spin_lock_bh(&embms_conf.lock);
+
+		list_del(&temp_client->client_list_ptr);
+		tmgi->no_of_clients--;
+		kfree(temp_client);
+
+		spin_unlock_bh(&embms_conf.lock);
+
+		temp_client = NULL;
+
+		if (tmgi->no_of_clients == 0) {
+			/* Deleted clnt was the only clnt for
+			 * that TMGI we need to delete TMGI
+			 * entry from table
+			 */
+			embms_debug("del_clnt_from_all_tmgi:");
+			embms_debug("Deleted client was ");
+			embms_debug("last client for tmgi\n");
+			embms_debug("del_clnt_from_all_tmgi:");
+			embms_debug("Delting tmgi as it has ");
+			embms_debug("zero clients.TMGI IP ");
+			embms_debug(":%pI4 , port %u\n",
+				    &tmgi->tmgi_multicast_addr,
+				    ntohs(tmgi->tmgi_port));
+
+			spin_lock_bh(&embms_conf.lock);
+
+			list_del(&tmgi->tmgi_list_ptr);
+			embms_conf.no_of_tmgi_sessions--;
+			kfree(tmgi);
+
+			spin_unlock_bh(&embms_conf.lock);
+
+			embms_debug("del_clnt_from_all_tmgi:");
+			embms_debug("TMGI entry deleted\n");
+		}
+	}
+
+	embms_debug("del_clnt_from_all_tmgi Successful\n");
+	return SUCCESS;
+}
+
+/**
+ * add_client_entry_to_table() - add client entry to specified TMGI
+ * @buffer:	Buffer containing client info for addition.
+ *
+ * This function adds a client to the specified TMGI in
+ * the global TMGI-client table. If TMGI entry is not
+ * present, it adds a new TMGI entry and adds client
+ * entry to it.
+ *
+ * Return: Success on adding client entry, error otherwise.
+ */
+int add_client_entry_to_table(char *buffer)
+{
+	int ret;
+	struct tmgi_to_clnt_info_update *info_update;
+	struct clnt_info *new_client = NULL;
+	struct tmgi_to_clnt_info *tmgi = NULL;
+	struct neighbour *neigh_entry;
+
+	embms_debug("add_client_entry_to_table: Enter\n");
+
+	info_update = (struct tmgi_to_clnt_info_update *)buffer;
+
+	if (!info_update) {
+		embms_error("add_client_entry_to_table:");
+		embms_error("NULL arguments passed\n");
+		return -EBADPARAM;
+	}
+
+	new_client = kzalloc(sizeof(*new_client), GFP_ATOMIC);
+	if (!new_client) {
+		embms_error("add_client_entry_to_table:");
+		embms_error("Cannot allocate memory\n");
+		return -ENOMEM;
+	}
+
+	new_client->addr = info_update->client_addr;
+	new_client->port = info_update->client_port;
+
+	neigh_entry = __ipv4_neigh_lookup(dev_global,
+					  (u32)(new_client->addr));
+	if (!neigh_entry) {
+		embms_error("add_client_entry_to_table :");
+		embms_error("Can't find neighbour entry\n");
+		kfree(new_client);
+		return -EBADPARAM;
+	}
+
+	ether_addr_copy(new_client->dmac, neigh_entry->ha);
+
+	embms_debug("DMAC of client : %pM\n", new_client->dmac);
+
+	embms_debug("add_client_entry_to_table:");
+	embms_debug("New client addr :%pI4 , port %u\n",
+		    &info_update->client_addr,
+		    ntohs(info_update->client_port));
+
+	if (embms_conf.no_of_tmgi_sessions == 0) {
+		/* TMGI Client mapping table is empty.
+		 * First client entry is being added
+		 */
+
+		embms_debug("tmgi_to_clnt_map_tbl is empty\n");
+
+		ret = add_new_tmgi_entry(info_update, new_client);
+		if (ret != SUCCESS) {
+			kfree(new_client);
+			new_client = NULL;
+		}
+
+		goto exit_add;
+	}
+
+	/* In this case, table already has some entries
+	 * and we need to search for the specific tmgi entry
+	 * for which client entry is to be added
+	 */
+
+	tmgi = check_for_tmgi_entry(info_update->multicast_addr,
+				    info_update->multicast_port);
+	if (tmgi) {
+		if (chk_clnt_entry(tmgi, info_update)) {
+			kfree(new_client);
+			return -ENOEFFECT;
+		}
+
+		/* Adding client to the client list
+		 * for the specified TMGI
+		 */
+
+		spin_lock_bh(&embms_conf.lock);
+
+		list_add(&new_client->client_list_ptr,
+			 &tmgi->client_list_head);
+			tmgi->no_of_clients++;
+
+		spin_unlock_bh(&embms_conf.lock);
+
+		ret = SUCCESS;
+	} else {
+		/* TMGI specified in the message was not found in
+		 * mapping table.Hence, we need to add a new entry
+		 * for this TMGI and add the specified client to the client
+		 * list
+		 */
+
+		embms_debug("TMGI entry not present. Adding tmgi entry\n");
+
+		ret = add_new_tmgi_entry(info_update, new_client);
+		if (ret != SUCCESS) {
+			kfree(new_client);
+			new_client = NULL;
+		}
+	}
+
+exit_add:
+	return ret;
+}
+
+/**
+ * delete_client_entry_from_table() - delete client entry from specified TMGI
+ * @buffer:	Buffer containing client info for deletion.
+ *
+ * This function deletes a client from the specified TMGI in
+ * the global TMGI-client table. If this was the last client
+ * entry, it also deletes the TMGI entry.
+ *
+ * Return: Success on deleting client entry, error otherwise.
+ */
+int delete_client_entry_from_table(char *buffer)
+{
+	struct tmgi_to_clnt_info_update *info_update;
+	struct clnt_info *temp_client = NULL;
+	struct tmgi_to_clnt_info *temp_tmgi = NULL;
+
+	embms_debug("delete_client_entry_from_table: Enter\n");
+
+	info_update = (struct tmgi_to_clnt_info_update *)buffer;
+
+	if (!info_update) {
+		embms_error("delete_client_entry_from_table:");
+		embms_error("NULL arguments passed\n");
+		return -EBADPARAM;
+	}
+
+	/* Search for the TMGI entry*/
+	if (embms_conf.no_of_tmgi_sessions == 0)
+		return SUCCESS;
+
+	temp_tmgi = check_for_tmgi_entry(info_update->multicast_addr,
+					 info_update->multicast_port);
+
+	if (!temp_tmgi) {
+		embms_error("delete_client_entry_from_table:TMGI not found\n");
+		return -EBADPARAM;
+	}
+	/* Delete client entry for a specific tmgi*/
+
+	embms_debug("delete_client_entry_from_table:clnt addr :%pI4,port %u\n",
+		    &info_update->client_addr,
+		    ntohs(info_update->client_port));
+
+	temp_client = chk_clnt_entry(temp_tmgi, info_update);
+
+	if (!temp_client) {
+		/* Specified client entry was not found in client list
+		 * of specified TMGI
+		 */
+		embms_error("delete_client_entry_from_table:Clnt not found\n");
+		return -EBADPARAM;
+	}
+
+	spin_lock_bh(&embms_conf.lock);
+
+	list_del(&temp_client->client_list_ptr);
+	temp_tmgi->no_of_clients--;
+
+	spin_unlock_bh(&embms_conf.lock);
+
+	kfree(temp_client);
+	temp_client = NULL;
+
+	embms_debug("delete_client_entry_from_table:Client entry deleted\n");
+
+	if (temp_tmgi->no_of_clients == 0) {
+		/* If deleted client was the only client for that TMGI
+		 * we need to delete TMGI entry from table
+		 */
+		embms_debug("delete_client_entry_from_table:");
+		embms_debug("Deleted client was the last client for tmgi\n");
+		embms_debug("delete_client_entry_from_table:");
+		embms_debug("Deleting tmgi since it has zero clients\n");
+
+		spin_lock_bh(&embms_conf.lock);
+
+		list_del(&temp_tmgi->tmgi_list_ptr);
+		embms_conf.no_of_tmgi_sessions--;
+		kfree(temp_tmgi);
+
+		spin_unlock_bh(&embms_conf.lock);
+
+		embms_debug("delete_client_entry_from_table: TMGI deleted\n");
+	}
+
+	if (embms_conf.no_of_tmgi_sessions == 0)
+		embms_conf.embms_tunneling_status = TUNNELING_OFF;
+
+	return SUCCESS;
+}
+
+/**
+ * embms_device_ioctl() - handle IOCTL calls to device
+ * @file:	File descriptor of file opened from userspace process
+ * @ioctl_num:	IOCTL to use
+ * @ioctl_param:	IOCTL parameters/arguments
+ *
+ * This function is called whenever a process tries to do
+ * an ioctl on our device file. As per the IOCTL number,
+ * it calls various functions to manipulate global
+ * TMGI-client table
+ *
+ * Return: Success if functoin call returns SUCCESS, error otherwise.
+ */
+
+long embms_device_ioctl(struct file *file, unsigned int ioctl_num,
+			unsigned long ioctl_param)
+{
+	int ret;
+	char buffer[BUF_LEN];
+	struct in_device *iface_dev;
+	struct in_ifaddr *iface_info;
+	struct tmgi_to_clnt_info_update *info_update;
+	char __user *argp = (char __user *)ioctl_param;
+
+	memset(buffer, 0, BUF_LEN);
+
+	/* Switch according to the ioctl called*/
+	switch (ioctl_num) {
+	case ADD_EMBMS_TUNNEL:
+		if (copy_from_user(buffer, argp,
+				   sizeof(struct tmgi_to_clnt_info_update)))
+			return -EFAULT;
+
+		ret = add_client_entry_to_table(buffer);
+		print_tmgi_to_client_table();
+		break;
+
+	case DEL_EMBMS_TUNNEL:
+		if (copy_from_user(buffer, argp,
+				   sizeof(struct tmgi_to_clnt_info_update)))
+			return -EFAULT;
+
+		ret = delete_client_entry_from_table(buffer);
+		print_tmgi_to_client_table();
+		break;
+
+	case TMGI_DEACTIVATE:
+		if (copy_from_user(buffer, argp,
+				   sizeof(struct tmgi_to_clnt_info_update)))
+			return -EFAULT;
+
+		ret = delete_tmgi_entry_from_table(buffer);
+		print_tmgi_to_client_table();
+		break;
+
+	case CLIENT_DEACTIVATE:
+		if (copy_from_user(buffer, argp,
+				   sizeof(struct tmgi_to_clnt_info_update)))
+			return -EFAULT;
+
+		ret = delete_client_entry_from_all_tmgi(buffer);
+		print_tmgi_to_client_table();
+		break;
+
+	case GET_EMBMS_TUNNELING_STATUS:
+		/* This ioctl is both input (ioctl_param) and
+		 * output (the return value of this function)
+		 */
+		embms_debug("Sending tunneling status : %d\n",
+			    embms_conf.embms_tunneling_status);
+		ret = embms_conf.embms_tunneling_status;
+		break;
+
+	case START_EMBMS_TUNNEL:
+
+		if (copy_from_user(buffer, argp,
+				   sizeof(struct tmgi_to_clnt_info_update)))
+			return -EFAULT;
+
+		info_update = (struct tmgi_to_clnt_info_update *)buffer;
+		embms_conf.embms_data_port = info_update->data_port;
+		udph_global->source = embms_conf.embms_data_port;
+
+		memset(embms_conf.embms_iface, 0, EMBMS_MAX_IFACE_NAME);
+		memcpy(embms_conf.embms_iface, info_update->iface_name,
+		       EMBMS_MAX_IFACE_NAME);
+
+		embms_conf.embms_tunneling_status = TUNNELING_ON;
+		embms_debug("Starting Tunneling. Embms_data_port  = %d\n",
+			    ntohs(embms_conf.embms_data_port));
+		embms_debug("Embms Data Iface = %s\n", embms_conf.embms_iface);
+		ret = SUCCESS;
+
+		/*Initialise dev_global to bridge device*/
+		dev_global = __dev_get_by_name(&init_net, BRIDGE_IFACE);
+		if (!dev_global) {
+			embms_error("Error in getting device info\n");
+			ret = FAILURE;
+		} else {
+			iface_dev = (struct in_device *)dev_global->ip_ptr;
+			iface_info = iface_dev->ifa_list;
+			while (iface_info) {
+				if (memcmp(iface_info->ifa_label,
+					   BRIDGE_IFACE,
+					   strlen(BRIDGE_IFACE)) == 0)
+					break;
+
+				iface_info = iface_info->ifa_next;
+			}
+			if (iface_info) {
+				embms_debug("IP address of %s iface is %pI4\n",
+					    BRIDGE_IFACE,
+					    &iface_info->ifa_address);
+				/*Populate source addr for header*/
+				iph_global->saddr = iface_info->ifa_address;
+				ret = SUCCESS;
+			} else {
+				embms_debug("Could not find iface address\n");
+				ret = FAILURE;
+			}
+		}
+
+		break;
+
+	case STOP_EMBMS_TUNNEL:
+
+		embms_conf.embms_tunneling_status = TUNNELING_OFF;
+		embms_debug("Stopped Tunneling..\n");
+		ret = SUCCESS;
+		break;
+	}
+
+	return ret;
+}
+
+/* Module Declarations
+ * This structure will hold the functions to be called
+ * when a process does something to the device we
+ * created. Since a pointer to this structure is kept in
+ * the devices table, it can't be local to
+ * init_module. NULL is for unimplemented functions.
+ */
+static const struct file_operations embms_device_fops = {
+	.owner = THIS_MODULE,
+	.open = embms_device_open,
+	.release = embms_device_release,
+	.read = NULL,
+	.write = NULL,
+	.unlocked_ioctl = embms_device_ioctl,
+};
+
+static int embms_ioctl_init(void)
+{
+	int ret;
+	struct device *dev;
+
+	ret = alloc_chrdev_region(&device, 0, dev_num, EMBMS_DEVICE_NAME);
+	if (ret) {
+		embms_error("device_alloc err\n");
+		goto dev_alloc_err;
+	}
+
+	embms_class = class_create(THIS_MODULE, EMBMS_DEVICE_NAME);
+	if (IS_ERR(embms_class)) {
+		embms_error("class_create err\n");
+		goto class_err;
+	}
+
+	dev = device_create(embms_class, NULL, device,
+			    &embms_conf, EMBMS_DEVICE_NAME);
+	if (IS_ERR(dev)) {
+		embms_error("device_create err\n");
+		goto device_err;
+	}
+
+	cdev_init(&embms_device, &embms_device_fops);
+	ret = cdev_add(&embms_device, device, dev_num);
+	if (ret) {
+		embms_error("cdev_add err\n");
+		goto cdev_add_err;
+	}
+
+	embms_debug("ioctl init OK!!\n");
+	return 0;
+
+cdev_add_err:
+	device_destroy(embms_class, device);
+device_err:
+	class_destroy(embms_class);
+class_err:
+	unregister_chrdev_region(device, dev_num);
+dev_alloc_err:
+	return -ENODEV;
+}
+
+static void embms_ioctl_deinit(void)
+{
+	cdev_del(&embms_device);
+	device_destroy(embms_class, device);
+	class_destroy(embms_class);
+	unregister_chrdev_region(device, dev_num);
+}
+
+/*Initialize the module - Register the misc device*/
+static int __init start_embms(void)
+{
+	int ret = 0;
+
+	iph_global = (struct iphdr *)hdr_buff;
+	udph_global = (struct udphdr *)(hdr_buff + sizeof(struct iphdr));
+
+	embms_conf.embms_tunneling_status = TUNNELING_OFF;
+	embms_conf.no_of_tmgi_sessions = 0;
+	embms_conf.embms_data_port = 0;
+	atomic_set(&embms_conf.device_under_use, 0);
+	atomic_set(&embms_conf.ip_ident, 0);
+	spin_lock_init(&embms_conf.lock);
+
+	embms_debug("Registering embms device\n");
+
+	ret = embms_ioctl_init();
+	if (ret) {
+		embms_error("embms device failed to register");
+		goto fail_init;
+	}
+
+	INIT_LIST_HEAD(&tmgi_to_clnt_map_tbl.tmgi_list_ptr);
+
+	memset(hdr_buff, 0, sizeof(struct udphdr) + sizeof(struct iphdr));
+	udph_global->check = UDP_CHECKSUM;
+	iph_global->version = IP_VERSION;
+	iph_global->ihl = IP_IHL;
+	iph_global->tos = IP_TOS;
+	iph_global->frag_off = IP_FRAG_OFFSET;
+	iph_global->ttl = IP_TTL;
+	iph_global->protocol = IPPROTO_UDP;
+
+	dev_global = NULL;
+
+	if (!embms_tm_multicast_recv)
+		RCU_INIT_POINTER(embms_tm_multicast_recv,
+				 handle_multicast_stream);
+
+	return ret;
+
+fail_init:
+	embms_ioctl_deinit();
+	return ret;
+}
+
+/*Cleanup - unregister the appropriate file from proc*/
+
+static void __exit stop_embms(void)
+{
+	embms_ioctl_deinit();
+
+	if (rcu_dereference(embms_tm_multicast_recv))
+		RCU_INIT_POINTER(embms_tm_multicast_recv, NULL);
+
+	embms_debug("unregister_chrdev done\n");
+}
+
+module_init(start_embms);
+module_exit(stop_embms);
+MODULE_LICENSE("GPL v2");
diff --git a/net/embms_kernel/embms_kernel.h b/net/embms_kernel/embms_kernel.h
new file mode 100644
index 0000000..c8248ce
--- /dev/null
+++ b/net/embms_kernel/embms_kernel.h
@@ -0,0 +1,233 @@
+/******************************************************************
+ * Copyright (c) 2013-2015,2017, The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *---------------------------------------------------------------
+
+ * DESCRIPTION
+ * Header file for eMBMs Tunneling Module in kernel.
+ *******************************************************************
+ */
+
+#ifndef EMBMS_H
+#define EMBMS_H
+
+#include <linux/ioctl.h>
+#include <stdbool.h>
+#include <linux/if_addr.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/cdev.h>
+
+#define EMBMS_MAX_IFACE_NAME    20
+
+/* Defining IP and UDP header related macros*/
+
+#define UDP_CHECKSUM                0
+#define IP_VERSION                  4
+#define IP_IHL                      5
+#define IP_TOS                      0
+#define IP_ID                       1
+#define IP_FRAG_OFFSET              htons(0x4000)
+#define IP_TTL                      64
+#define BRIDGE_IFACE                "bridge0"
+
+#define BUF_LEN 1024
+#define TUNNELING_ON 1
+#define TUNNELING_OFF 0
+
+// definitions required for IOCTL
+static unsigned int dev_num = 1;
+/* Embms device used for communication*/
+struct cdev embms_device;
+static struct class *embms_class;
+static dev_t device;
+#define EMBMS_IOC_MAGIC 0x64
+
+#define embms_debug pr_debug
+#define embms_error pr_debug
+
+/* The name of the device file*/
+#define EMBMS_DEVICE_NAME "embms_tm_device"
+
+extern int (*embms_tm_multicast_recv)(struct sk_buff *skb);
+
+/**
+ * enum embms_action_type - Describes action to perform
+ * @ADD_CLIENT_ENTRY: add client entry to TMGI
+ * @DELETE_CLIENT_ENTRY: deelte client entry from TMGI
+ * @TMGI_DEACTIVATE: Delete TMGI entry
+ * @CLIENT_ACTIVATE_ALL_TMGI: Add client to all TMGI
+ * @CLIENT_DEACTIVATE_ALL_TMGI: Delete client from all TMGI
+ * @SESSION_DEACTIVATE: Stop session
+ * @SOCK_INFO: Socket information like V4 addr, port etc
+ *
+ * This enum defines the types of action which are
+ * supported by this module.
+ */
+
+enum {
+	ADD_CLIENT_ENTRY = 0,
+	DELETE_CLIENT_ENTRY,
+	TMGI_DEACTIVATE,
+	CLIENT_ACTIVATE_ALL_TMGI,
+	CLIENT_DEACTIVATE_ALL_TMGI,
+	SESSION_DEACTIVATE,
+	SOCK_INFO
+} embms_action_type;
+
+/**
+ * struct tmgi_to_clnt_info_update - information for addition/deletion
+ * @multicast_addr: TMGI multicast IP to receive data
+ * @multicast_port: TMGI multicast port to receive date
+ * @client_addr: Client IPV4 address for sending data
+ * @client_port: Client port for sending data
+ * @data_port: port used to send data to client
+ * @action_type: Action to be performed
+ * @iface_name: iface to listen to for data
+ *
+ * This structure contains information as to what action
+ * needs to be performed on TMGI-client table. It is
+ * sent as a parameter during an IOCTL call
+ */
+
+struct tmgi_to_clnt_info_update {
+	u32 multicast_addr;
+	u16 multicast_port;
+	u32 client_addr;
+	u16 client_port;
+	u16 data_port;
+	u32 action_type;
+	char iface_name[EMBMS_MAX_IFACE_NAME];
+};
+
+/**
+ * struct clnt_info - contains client information
+ * @addr: Client IPV4 address for sending packets
+ * @port: Client port for sending packets
+ * @dmac: Client DMAC address
+ * @client_list_ptr : list ptr used to maintain client list
+ *
+ * This structure maintains complete client information
+ * to be used when sending packets to client
+ */
+
+struct clnt_info {
+	u32 addr;
+	u16 port;
+	u8 dmac[ETH_ALEN];
+	struct list_head client_list_ptr;
+};
+
+/**
+ * struct tmgi_to_clnt_info - contains TMGI information
+ * @tmgi_multicast_addr: TMGI IPV4 address to listen for packets
+ * @tmgi_port: Client port to listen for packets
+ * @no_of_clients: No of clients for a TMGI
+ * @client_list_head : list head for client list
+ * @tmgi_list_ptr : list ptr to maintain tmgi list
+ *
+ * This structure maintains complete client information
+ * to be used when sending data to client
+ */
+
+struct tmgi_to_clnt_info {
+	u32 tmgi_multicast_addr;
+	u16 tmgi_port;
+	u16 no_of_clients;
+	struct list_head client_list_head;
+	struct list_head tmgi_list_ptr;
+};
+
+/**
+ * struct embms_info_internal - stores module specific params
+ * @device_under_use: Used to prevent concurent access to the same device
+ * @embms_data_port: Source Data port used for tunnelled packets
+ * @embms_iface: Iface to receive embms traffic
+ * @embms_tunneling_status : Current EMBMS Status
+ * @no_of_tmgi_sessions : Number of current active TMGI sessions
+ * @lock : Lock for concurrency scenarios
+ * @ip_ident : IP identification number to be used for sent packets
+ *
+ * This tructure holds module specific information which is
+ * used throughout the module to maintain consistency
+ */
+
+struct embms_info_internal {
+	atomic_t device_under_use;
+	int embms_data_port;
+	char embms_iface[EMBMS_MAX_IFACE_NAME];
+	int embms_tunneling_status;
+	int no_of_tmgi_sessions;
+	/*lock to prevent concurrent access*/
+	spinlock_t lock;
+	atomic_t ip_ident;
+};
+
+/* This ioctl is used to add a new client entry to tunneling module.
+ * Entry params are populated in the struct used for ioctl
+ */
+
+#define ADD_EMBMS_TUNNEL _IOW(EMBMS_IOC_MAGIC, 0, \
+		struct tmgi_to_clnt_info_update)
+
+/* This ioctl is used to delete a client entry for a particular
+ * TMGI from tunneling module.
+ * Entry params are populated in the struct used for ioctl
+ */
+
+#define DEL_EMBMS_TUNNEL _IOW(EMBMS_IOC_MAGIC, 1, \
+		struct tmgi_to_clnt_info_update)
+
+/* This ioctl is used to delete a TMGI entry completely
+ * from tunneling module.
+ * Entry params are populated in the struct used for ioctl
+ */
+
+#define TMGI_DEACTIVATE _IOW(EMBMS_IOC_MAGIC, 2, \
+		struct tmgi_to_clnt_info_update)
+
+/* This ioctl is used to delete client entry completely
+ * from tunneling module.
+ * Entry params are populated in the struct used for ioctl
+ */
+
+#define CLIENT_DEACTIVATE _IOW(EMBMS_IOC_MAGIC, 3, \
+		struct tmgi_to_clnt_info_update)
+
+/* Gets the ON/OFF status of Tunneling module*/
+
+#define GET_EMBMS_TUNNELING_STATUS _IO(EMBMS_IOC_MAGIC, 4)
+
+/* Used to start tunneling. Argument is the port
+ * number to be used to send
+ * data to clients
+ */
+
+#define START_EMBMS_TUNNEL _IOW(EMBMS_IOC_MAGIC, 5, \
+		struct tmgi_to_clnt_info_update)
+
+/* Used to stop tunnleing*/
+
+#define STOP_EMBMS_TUNNEL _IO(EMBMS_IOC_MAGIC, 6)
+
+/* Return values indicating error status*/
+#define SUCCESS               0         /* Successful operation*/
+#define FAILURE               -1         /* Unsuccessful operation*/
+
+/* Error Condition Values*/
+#define ENOMEM                -2        /* Out of memory*/
+#define EBADPARAM             -3        /* Incorrect parameters passed*/
+#define ENOEFFECT             -4        /* No Effect*/
+
+#endif
+
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 6a7ff69..7f9a8df 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -22,7 +22,8 @@
 static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
 					    int tos, int oif,
 					    const xfrm_address_t *saddr,
-					    const xfrm_address_t *daddr)
+					    const xfrm_address_t *daddr,
+					    u32 mark)
 {
 	struct rtable *rt;
 
@@ -30,6 +31,7 @@
 	fl4->daddr = daddr->a4;
 	fl4->flowi4_tos = tos;
 	fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
+	fl4->flowi4_mark = mark;
 	if (saddr)
 		fl4->saddr = saddr->a4;
 
@@ -44,20 +46,22 @@
 
 static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
 					  const xfrm_address_t *saddr,
-					  const xfrm_address_t *daddr)
+					  const xfrm_address_t *daddr,
+					  u32 mark)
 {
 	struct flowi4 fl4;
 
-	return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr);
+	return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr, mark);
 }
 
 static int xfrm4_get_saddr(struct net *net, int oif,
-			   xfrm_address_t *saddr, xfrm_address_t *daddr)
+			   xfrm_address_t *saddr, xfrm_address_t *daddr,
+			   u32 mark)
 {
 	struct dst_entry *dst;
 	struct flowi4 fl4;
 
-	dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr);
+	dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr, mark);
 	if (IS_ERR(dst))
 		return -EHOSTUNREACH;
 
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index e0f71c0..4003b28 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -29,7 +29,8 @@
 
 static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
 					  const xfrm_address_t *saddr,
-					  const xfrm_address_t *daddr)
+					  const xfrm_address_t *daddr,
+					  u32 mark)
 {
 	struct flowi6 fl6;
 	struct dst_entry *dst;
@@ -38,6 +39,7 @@
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_oif = l3mdev_master_ifindex_by_index(net, oif);
 	fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
+	fl6.flowi6_mark = mark;
 	memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
 	if (saddr)
 		memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
@@ -54,12 +56,13 @@
 }
 
 static int xfrm6_get_saddr(struct net *net, int oif,
-			   xfrm_address_t *saddr, xfrm_address_t *daddr)
+			   xfrm_address_t *saddr, xfrm_address_t *daddr,
+			   u32 mark)
 {
 	struct dst_entry *dst;
 	struct net_device *dev;
 
-	dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr);
+	dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark);
 	if (IS_ERR(dst))
 		return -EHOSTUNREACH;
 
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 1972a14..b97caa1 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -19,6 +19,7 @@
 #include <linux/tcp.h>
 #include <linux/netfilter.h>
 #include <linux/slab.h>
+#include <linux/list.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_expect.h>
@@ -32,6 +33,18 @@
 static unsigned int dcc_timeout __read_mostly = 300;
 /* This is slow, but it's simple. --RR */
 static char *irc_buffer;
+struct irc_client_info {
+	char *nickname;
+	bool conn_to_server;
+	int nickname_len;
+	__be32 server_ip;
+	__be32 client_ip;
+	struct list_head ptr;
+	};
+
+static struct irc_client_info client_list;
+
+static unsigned int no_of_clients;
 static DEFINE_SPINLOCK(irc_buffer_lock);
 
 unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
@@ -61,7 +74,7 @@
 };
 
 #define MINMATCHLEN	5
-
+#define MINLENNICK	1
 /* tries to get the ip_addr and port out of a dcc command
  * return value: -1 on failure, 0 on success
  *	data		pointer to first byte of DCC command data
@@ -71,6 +84,23 @@
  *	ad_beg_p	returns pointer to first byte of addr data
  *	ad_end_p	returns pointer to last byte of addr data
  */
+static struct irc_client_info *search_client_by_ip
+(
+	struct nf_conntrack_tuple *tuple
+)
+{
+	struct irc_client_info *temp, *ret = NULL;
+	struct list_head *obj_ptr, *prev_obj_ptr;
+
+	list_for_each_safe(obj_ptr, prev_obj_ptr, &client_list.ptr) {
+		temp = list_entry(obj_ptr, struct irc_client_info, ptr);
+		if ((temp->client_ip == tuple->src.u3.ip) &&
+		    (temp->server_ip == tuple->dst.u3.ip))
+			ret = temp;
+	}
+	return ret;
+}
+
 static int parse_dcc(char *data, const char *data_end, __be32 *ip,
 		     u_int16_t *port, char **ad_beg_p, char **ad_end_p)
 {
@@ -105,6 +135,106 @@
 	return 0;
 }
 
+static bool mangle_ip(struct nf_conn *ct,
+		      int dir, char *nick_start)
+{
+	char *nick_end;
+	struct nf_conntrack_tuple *tuple;
+	struct irc_client_info *temp;
+	struct list_head *obj_ptr, *prev_obj_ptr;
+
+	tuple = &ct->tuplehash[dir].tuple;
+	nick_end = nick_start;
+	while (*nick_end != ' ')
+		nick_end++;
+	list_for_each_safe(obj_ptr, prev_obj_ptr,
+			   &client_list.ptr) {
+		temp = list_entry(obj_ptr,
+				  struct irc_client_info, ptr);
+		/*If it is an internal client,
+		 *do not mangle the DCC Server IP
+		 */
+		if ((temp->server_ip == tuple->dst.u3.ip) &&
+		    (temp->nickname_len == (nick_end - nick_start))) {
+			if (memcmp(nick_start, temp->nickname,
+				   temp->nickname_len) == 0)
+				return false;
+		}
+	}
+	return true;
+}
+
+static int handle_nickname(struct nf_conn *ct,
+			   int dir, char *nick_start)
+{
+	char *nick_end;
+	struct nf_conntrack_tuple *tuple;
+	struct irc_client_info *temp;
+	int i, j;
+	bool add_entry = true;
+
+	nick_end = nick_start;
+	i = 0;
+	while (*nick_end != '\n') {
+		nick_end++;
+		i++;
+	}
+	tuple = &ct->tuplehash[dir].tuple;
+	/*Check if the entry is already
+	 * present for that client
+	 */
+	temp = search_client_by_ip(tuple);
+	if (temp) {
+		add_entry = false;
+		/*Update nickname if the client is not already
+		 * connected to the server.If the client is
+		 * connected, wait for server to confirm
+		 * if nickname is valid
+		 */
+		if (!temp->conn_to_server) {
+			kfree(temp->nickname);
+			temp->nickname =
+				kmalloc(i, GFP_ATOMIC);
+			if (temp->nickname) {
+				temp->nickname_len = i;
+				memcpy(temp->nickname,
+				       nick_start, temp->nickname_len);
+			} else {
+				list_del(&temp->ptr);
+				no_of_clients--;
+				kfree(temp);
+			}
+		}
+	}
+	/*Add client entry if not already present*/
+	if (add_entry) {
+		j = sizeof(struct irc_client_info);
+		temp = kmalloc(j, GFP_ATOMIC);
+		if (temp) {
+			no_of_clients++;
+			tuple = &ct->tuplehash[dir].tuple;
+			temp->nickname_len = i;
+			temp->nickname =
+				kmalloc(temp->nickname_len, GFP_ATOMIC);
+			if (!temp->nickname) {
+				kfree(temp);
+				return NF_DROP;
+			}
+			memcpy(temp->nickname, nick_start,
+			       temp->nickname_len);
+			memcpy(&temp->client_ip,
+			       &tuple->src.u3.ip, sizeof(__be32));
+			memcpy(&temp->server_ip,
+			       &tuple->dst.u3.ip, sizeof(__be32));
+			temp->conn_to_server = false;
+			list_add(&temp->ptr,
+				 &client_list.ptr);
+		} else {
+			return NF_DROP;
+		}
+	}
+	return NF_ACCEPT;
+}
 static int help(struct sk_buff *skb, unsigned int protoff,
 		struct nf_conn *ct, enum ip_conntrack_info ctinfo)
 {
@@ -113,7 +243,7 @@
 	const struct tcphdr *th;
 	struct tcphdr _tcph;
 	const char *data_limit;
-	char *data, *ib_ptr;
+	char *data, *ib_ptr, *for_print, *nick_end;
 	int dir = CTINFO2DIR(ctinfo);
 	struct nf_conntrack_expect *exp;
 	struct nf_conntrack_tuple *tuple;
@@ -123,10 +253,8 @@
 	int i, ret = NF_ACCEPT;
 	char *addr_beg_p, *addr_end_p;
 	typeof(nf_nat_irc_hook) nf_nat_irc;
-
-	/* If packet is coming from IRC server */
-	if (dir == IP_CT_DIR_REPLY)
-		return NF_ACCEPT;
+	struct irc_client_info *temp;
+	bool mangle = true;
 
 	/* Until there's been traffic both ways, don't look in packets. */
 	if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
@@ -150,80 +278,223 @@
 	data = ib_ptr;
 	data_limit = ib_ptr + skb->len - dataoff;
 
-	/* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
-	 * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
-	while (data < data_limit - (19 + MINMATCHLEN)) {
-		if (memcmp(data, "\1DCC ", 5)) {
-			data++;
-			continue;
+	/* If packet is coming from IRC server
+	 * parse the packet for different type of
+	 * messages (MOTD,NICK etc) and process
+	 * accordingly
+	 */
+	if (dir == IP_CT_DIR_REPLY) {
+		/* strlen("NICK xxxxxx")
+		 * 5+strlen("xxxxxx")=1 (minimum length of nickname)
+		 */
+
+		while (data < data_limit - 6) {
+			if (memcmp(data, " MOTD ", 6)) {
+				data++;
+				continue;
+			}
+			/* MOTD message signifies successful
+			 * registration with server
+			 */
+			tuple = &ct->tuplehash[!dir].tuple;
+			temp = search_client_by_ip(tuple);
+			if (temp && !temp->conn_to_server)
+				temp->conn_to_server = true;
+			ret = NF_ACCEPT;
+			goto out;
 		}
-		data += 5;
-		/* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
 
-		iph = ip_hdr(skb);
-		pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
-			 &iph->saddr, ntohs(th->source),
-			 &iph->daddr, ntohs(th->dest));
-
-		for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
-			if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) {
-				/* no match */
+		/* strlen("NICK :xxxxxx")
+		 * 6+strlen("xxxxxx")=1 (minimum length of nickname)
+		 * Parsing the server reply to get nickname
+		 * of the client
+		 */
+		data = ib_ptr;
+		data_limit = ib_ptr + skb->len - dataoff;
+		while (data < data_limit - (6 + MINLENNICK)) {
+			if (memcmp(data, "NICK :", 6)) {
+				data++;
 				continue;
 			}
-			data += strlen(dccprotos[i]);
-			pr_debug("DCC %s detected\n", dccprotos[i]);
-
-			/* we have at least
-			 * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
-			 * data left (== 14/13 bytes) */
-			if (parse_dcc(data, data_limit, &dcc_ip,
-				       &dcc_port, &addr_beg_p, &addr_end_p)) {
-				pr_debug("unable to parse dcc command\n");
-				continue;
-			}
-
-			pr_debug("DCC bound ip/port: %pI4:%u\n",
-				 &dcc_ip, dcc_port);
-
-			/* dcc_ip can be the internal OR external (NAT'ed) IP */
-			tuple = &ct->tuplehash[dir].tuple;
-			if (tuple->src.u3.ip != dcc_ip &&
-			    tuple->dst.u3.ip != dcc_ip) {
-				net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
-						     &tuple->src.u3.ip,
-						     &dcc_ip, dcc_port);
-				continue;
-			}
-
-			exp = nf_ct_expect_alloc(ct);
-			if (exp == NULL) {
-				nf_ct_helper_log(skb, ct,
-						 "cannot alloc expectation");
-				ret = NF_DROP;
-				goto out;
+			data += 6;
+			nick_end = data;
+			i = 0;
+			while ((*nick_end != 0x0d) &&
+			       (*(nick_end + 1) != '\n')) {
+				nick_end++;
+				i++;
 			}
 			tuple = &ct->tuplehash[!dir].tuple;
-			port = htons(dcc_port);
-			nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
-					  tuple->src.l3num,
-					  NULL, &tuple->dst.u3,
-					  IPPROTO_TCP, NULL, &port);
-
-			nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
-			if (nf_nat_irc && ct->status & IPS_NAT_MASK)
-				ret = nf_nat_irc(skb, ctinfo, protoff,
-						 addr_beg_p - ib_ptr,
-						 addr_end_p - addr_beg_p,
-						 exp);
-			else if (nf_ct_expect_related(exp) != 0) {
-				nf_ct_helper_log(skb, ct,
-						 "cannot add expectation");
-				ret = NF_DROP;
+			temp = search_client_by_ip(tuple);
+			if (temp && temp->nickname) {
+				kfree(temp->nickname);
+				temp->nickname = kmalloc(i, GFP_ATOMIC);
+				if (temp->nickname) {
+					temp->nickname_len = i;
+					memcpy(temp->nickname, data,
+					       temp->nickname_len);
+					temp->conn_to_server = true;
+				} else {
+					list_del(&temp->ptr);
+					no_of_clients--;
+					kfree(temp);
+					ret = NF_ACCEPT;
+				}
 			}
-			nf_ct_expect_put(exp);
+			/*NICK during registration*/
+			ret = NF_ACCEPT;
 			goto out;
 		}
 	}
+
+	else{
+		/*Parsing NICK command from client to create an entry
+		 * strlen("NICK xxxxxx")
+		 * 5+strlen("xxxxxx")=1 (minimum length of nickname)
+		 */
+		data = ib_ptr;
+		data_limit = ib_ptr + skb->len - dataoff;
+		while (data < data_limit - (5 + MINLENNICK)) {
+			if (memcmp(data, "NICK ", 5)) {
+				data++;
+				continue;
+			}
+			data += 5;
+			ret = handle_nickname(ct, dir, data);
+			goto out;
+		}
+
+		data = ib_ptr;
+		while (data < data_limit - 6) {
+			if (memcmp(data, "QUIT :", 6)) {
+				data++;
+				continue;
+			}
+			/* Parsing QUIT to free the list entry
+			 */
+			tuple = &ct->tuplehash[dir].tuple;
+			temp = search_client_by_ip(tuple);
+			if (temp) {
+				list_del(&temp->ptr);
+				no_of_clients--;
+				kfree(temp->nickname);
+				kfree(temp);
+			}
+			ret = NF_ACCEPT;
+			goto out;
+		}
+		/* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
+		 * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14
+		 */
+		data = ib_ptr;
+		while (data < data_limit - (19 + MINMATCHLEN)) {
+			if (memcmp(data, "\1DCC ", 5)) {
+				data++;
+				continue;
+			}
+			data += 5;
+			/* we have at least (19+MINMATCHLEN)-5
+			 *bytes valid data left
+			 */
+			iph = ip_hdr(skb);
+			pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
+				 &iph->saddr, ntohs(th->source),
+				 &iph->daddr, ntohs(th->dest));
+
+			for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
+				if (memcmp(data, dccprotos[i],
+					   strlen(dccprotos[i]))) {
+					/* no match */
+					continue;
+				}
+				data += strlen(dccprotos[i]);
+				pr_debug("DCC %s detected\n", dccprotos[i]);
+
+				/* we have at least
+				 * (19+MINMATCHLEN)-5-dccprotos[i].matchlen
+				 *bytes valid data left (== 14/13 bytes)
+				 */
+				if (parse_dcc(data, data_limit, &dcc_ip,
+					      &dcc_port, &addr_beg_p,
+					      &addr_end_p)) {
+					pr_debug("unable to parse dcc command\n");
+					continue;
+				}
+
+				pr_debug("DCC bound ip/port: %pI4:%u\n",
+					 &dcc_ip, dcc_port);
+
+				/* dcc_ip can be the internal OR
+				 *external (NAT'ed) IP
+				 */
+				tuple = &ct->tuplehash[dir].tuple;
+				if (tuple->src.u3.ip != dcc_ip &&
+				    tuple->dst.u3.ip != dcc_ip) {
+					net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
+							     &tuple->src.u3.ip,
+							     &dcc_ip, dcc_port);
+					continue;
+				}
+
+				exp = nf_ct_expect_alloc(ct);
+				if (!exp) {
+					nf_ct_helper_log(skb, ct,
+							 "cannot alloc expectation");
+					ret = NF_DROP;
+					goto out;
+				}
+				tuple = &ct->tuplehash[!dir].tuple;
+				port = htons(dcc_port);
+				nf_ct_expect_init(exp,
+						  NF_CT_EXPECT_CLASS_DEFAULT,
+						  tuple->src.l3num,
+						  NULL, &tuple->dst.u3,
+						  IPPROTO_TCP, NULL, &port);
+
+				nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
+
+				tuple = &ct->tuplehash[dir].tuple;
+				for_print = ib_ptr;
+				/* strlen("PRIVMSG xxxx :\1DCC
+				 *SENT t AAAAAAAA P\1\n")=26
+				 * 8+strlen(xxxx) = 1(min length)+7+
+				 *MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14
+				 *Parsing DCC command to get client name and
+				 *check whether it is an internal client
+				 */
+				while (for_print <
+				       data_limit - (25 + MINMATCHLEN)) {
+					if (memcmp(for_print, "PRIVMSG ", 8)) {
+						for_print++;
+						continue;
+					}
+					for_print += 8;
+					mangle = mangle_ip(ct,
+							   dir, for_print);
+					break;
+				}
+				if (mangle &&
+				    nf_nat_irc &&
+				    ct->status & IPS_NAT_MASK)
+					ret = nf_nat_irc(skb, ctinfo,
+							 protoff,
+							 addr_beg_p - ib_ptr,
+							 addr_end_p
+							 - addr_beg_p,
+							 exp);
+
+				else if (mangle &&
+					 nf_ct_expect_related(exp)
+					 != 0) {
+					nf_ct_helper_log(skb, ct,
+							 "cannot add expectation");
+					ret = NF_DROP;
+				}
+				nf_ct_expect_put(exp);
+				goto out;
+			}
+		}
+	}
  out:
 	spin_unlock_bh(&irc_buffer_lock);
 	return ret;
@@ -266,7 +537,8 @@
 		kfree(irc_buffer);
 		return ret;
 	}
-
+	no_of_clients = 0;
+	INIT_LIST_HEAD(&client_list.ptr);
 	return 0;
 }
 
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 3a8dc39..f132ef9 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -49,13 +49,28 @@
 MODULE_PARM_DESC(sip_direct_signalling, "expect incoming calls from registrar "
 					"only (default 1)");
 
-static int sip_direct_media __read_mostly = 1;
-module_param(sip_direct_media, int, 0600);
-MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
-				   "endpoints only (default 1)");
-
 const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
 EXPORT_SYMBOL_GPL(nf_nat_sip_hooks);
+static struct ctl_table_header *sip_sysctl_header;
+static unsigned int nf_ct_disable_sip_alg;
+static int sip_direct_media = 1;
+static struct ctl_table sip_sysctl_tbl[] = {
+	{
+		.procname     = "nf_conntrack_disable_sip_alg",
+		.data         = &nf_ct_disable_sip_alg,
+		.maxlen       = sizeof(unsigned int),
+		.mode         = 0644,
+		.proc_handler = proc_dointvec,
+	},
+	{
+		.procname     = "nf_conntrack_sip_direct_media",
+		.data         = &sip_direct_media,
+		.maxlen       = sizeof(int),
+		.mode         = 0644,
+		.proc_handler = proc_dointvec,
+	},
+	{}
+};
 
 static int string_len(const struct nf_conn *ct, const char *dptr,
 		      const char *limit, int *shift)
@@ -1467,6 +1482,8 @@
 	const struct nf_nat_sip_hooks *hooks;
 	int ret;
 
+	if (nf_ct_disable_sip_alg)
+		return NF_ACCEPT;
 	if (strncasecmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
 		ret = process_sip_request(skb, protoff, dataoff, dptr, datalen);
 	else
@@ -1626,6 +1643,16 @@
 {
 	int i, ret;
 
+	sip_sysctl_header = register_net_sysctl(&init_net, "net/netfilter",
+						sip_sysctl_tbl);
+	if (!sip_sysctl_header)
+		pr_debug("nf_ct_sip:Unable to register SIP systbl\n");
+
+	if (nf_ct_disable_sip_alg)
+		pr_debug("nf_ct_sip: SIP ALG disabled\n");
+	else
+		pr_debug("nf_ct_sip: SIP ALG enabled\n");
+
 	if (ports_c == 0)
 		ports[ports_c++] = SIP_PORT;
 
diff --git a/net/rmnet_data/rmnet_data_config.h b/net/rmnet_data/rmnet_data_config.h
index 5ce4600..aa8a0b5 100644
--- a/net/rmnet_data/rmnet_data_config.h
+++ b/net/rmnet_data/rmnet_data_config.h
@@ -34,12 +34,14 @@
  *            parmeter depends on the rmnet_mode
  */
 struct rmnet_logical_ep_conf_s {
+	struct net_device *egress_dev;
+	struct timespec last_flush_time;
+	long curr_time_limit;
+	unsigned int flush_byte_count;
+	unsigned int curr_byte_threshold;
 	u8 refcount;
 	u8 rmnet_mode;
 	u8 mux_id;
-	struct timespec flush_time;
-	unsigned int flush_byte_count;
-	struct net_device *egress_dev;
 };
 
 /**
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index 68a4376..a5b22c4 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -41,26 +41,30 @@
 MODULE_PARM_DESC(dump_pkt_tx, "Dump packets exiting egress handler");
 #endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
 
-/* Time in nano seconds. This number must be less that a second. */
-long gro_flush_time __read_mostly = 10000L;
-module_param(gro_flush_time, long, 0644);
-MODULE_PARM_DESC(gro_flush_time, "Flush GRO when spaced more than this");
+static bool gro_flush_logic_on __read_mostly = 1;
+module_param(gro_flush_logic_on, bool, 0644);
+MODULE_PARM_DESC(gro_flush_logic_on, "If off let GRO determine flushing");
 
-unsigned int gro_min_byte_thresh __read_mostly = 7500;
-module_param(gro_min_byte_thresh, uint, 0644);
-MODULE_PARM_DESC(gro_min_byte_thresh, "Min byte thresh to change flush time");
-
-unsigned int dynamic_gro_on __read_mostly = 1;
-module_param(dynamic_gro_on, uint, 0644);
+static bool dynamic_gro_on __read_mostly = 1;
+module_param(dynamic_gro_on, bool, 0644);
 MODULE_PARM_DESC(dynamic_gro_on, "Toggle to turn on dynamic gro logic");
 
+/* Time in nano seconds. This number must be less that a second. */
+static long lower_flush_time __read_mostly = 10000L;
+module_param(lower_flush_time, long, 0644);
+MODULE_PARM_DESC(lower_flush_time, "Min time value for flushing GRO");
+
+static unsigned int lower_byte_limit __read_mostly = 7500;
+module_param(lower_byte_limit, uint, 0644);
+MODULE_PARM_DESC(lower_byte_limit, "Min byte count for flushing GRO");
+
 unsigned int upper_flush_time __read_mostly = 15000;
 module_param(upper_flush_time, uint, 0644);
-MODULE_PARM_DESC(upper_flush_time, "Upper limit on flush time");
+MODULE_PARM_DESC(upper_flush_time, "Max time value for flushing GRO");
 
 unsigned int upper_byte_limit __read_mostly = 10500;
 module_param(upper_byte_limit, uint, 0644);
-MODULE_PARM_DESC(upper_byte_limit, "Upper byte limit");
+MODULE_PARM_DESC(upper_byte_limit, "Max byte count for flushing GRO");
 
 #define RMNET_DATA_IP_VERSION_4 0x40
 #define RMNET_DATA_IP_VERSION_6 0x60
@@ -258,62 +262,64 @@
 {
 	struct timespec curr_time, diff;
 
-	if (!gro_flush_time)
+	if (!gro_flush_logic_on)
 		return;
 
-	if (unlikely(ep->flush_time.tv_sec == 0)) {
-		getnstimeofday(&ep->flush_time);
+	if (unlikely(ep->last_flush_time.tv_sec == 0)) {
+		getnstimeofday(&ep->last_flush_time);
 		ep->flush_byte_count = 0;
+		ep->curr_time_limit = lower_flush_time;
+		ep->curr_byte_threshold = lower_byte_limit;
 	} else {
 		getnstimeofday(&(curr_time));
-		diff = timespec_sub(curr_time, ep->flush_time);
+		diff = timespec_sub(curr_time, ep->last_flush_time);
 		ep->flush_byte_count += skb_size;
 
 		if (dynamic_gro_on) {
 			if ((!(diff.tv_sec > 0) || diff.tv_nsec <=
-					gro_flush_time) &&
+					ep->curr_time_limit) &&
 					ep->flush_byte_count >=
-					gro_min_byte_thresh) {
+					ep->curr_byte_threshold) {
 				/* Processed many bytes in a small time window.
 				 * No longer need to flush so often and we can
 				 * increase our byte limit
 				 */
-				gro_flush_time = upper_flush_time;
-				gro_min_byte_thresh = upper_byte_limit;
+				ep->curr_time_limit = upper_flush_time;
+				ep->curr_byte_threshold = upper_byte_limit;
 			} else if ((diff.tv_sec > 0 ||
-					diff.tv_nsec > gro_flush_time) &&
+					diff.tv_nsec > ep->curr_time_limit) &&
 					ep->flush_byte_count <
-					gro_min_byte_thresh) {
+					ep->curr_byte_threshold) {
 				/* We have not hit our time limit and we are not
 				 * receive many bytes. Demote ourselves to the
 				 * lowest limits and flush
 				 */
 				napi_gro_flush(napi, false);
-				getnstimeofday(&ep->flush_time);
+				ep->last_flush_time = curr_time;
 				ep->flush_byte_count = 0;
-				gro_flush_time = 10000L;
-				gro_min_byte_thresh = 7500L;
+				ep->curr_time_limit = lower_flush_time;
+				ep->curr_byte_threshold = lower_byte_limit;
 			} else if ((diff.tv_sec > 0 ||
-					diff.tv_nsec > gro_flush_time) &&
+					diff.tv_nsec > ep->curr_time_limit) &&
 					ep->flush_byte_count >=
-					gro_min_byte_thresh) {
+					ep->curr_byte_threshold) {
 				/* Above byte and time limt, therefore we can
 				 * move/maintain our limits to be the max
 				 * and flush
 				 */
 				napi_gro_flush(napi, false);
-				getnstimeofday(&ep->flush_time);
+				ep->last_flush_time = curr_time;
 				ep->flush_byte_count = 0;
-				gro_flush_time = upper_flush_time;
-				gro_min_byte_thresh = upper_byte_limit;
+				ep->curr_time_limit = upper_flush_time;
+				ep->curr_byte_threshold = upper_byte_limit;
 			}
 			/* else, below time limit and below
 			 * byte thresh, so change nothing
 			 */
 		} else if (diff.tv_sec > 0 ||
-				diff.tv_nsec >= gro_flush_time) {
+				diff.tv_nsec >= lower_flush_time) {
 			napi_gro_flush(napi, false);
-			getnstimeofday(&ep->flush_time);
+			ep->last_flush_time = curr_time;
 			ep->flush_byte_count = 0;
 		}
 	}
@@ -567,7 +573,8 @@
 			 skb_is_nonlinear(skb);
 
 	if ((!(config->egress_data_format &
-	    RMNET_EGRESS_FORMAT_AGGREGATION)) || non_linear_skb)
+	    RMNET_EGRESS_FORMAT_AGGREGATION)) || csum_required ||
+	    non_linear_skb)
 		map_header = rmnet_map_add_map_header
 		(skb, additional_header_length, RMNET_MAP_NO_PAD_BYTES);
 	else
@@ -589,8 +596,14 @@
 
 	skb->protocol = htons(ETH_P_MAP);
 
-	if ((config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) &&
-	    !non_linear_skb) {
+	if (config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
+		if (rmnet_ul_aggregation_skip(skb, required_headroom))
+			return RMNET_MAP_SUCCESS;
+
+		if (non_linear_skb)
+			if (unlikely(__skb_linearize(skb)))
+				return RMNET_MAP_SUCCESS;
+
 		rmnet_map_aggregate(skb, config);
 		return RMNET_MAP_CONSUMED;
 	}
diff --git a/net/rmnet_data/rmnet_map.h b/net/rmnet_data/rmnet_map.h
index f597f1b..3bab6d9 100644
--- a/net/rmnet_data/rmnet_map.h
+++ b/net/rmnet_data/rmnet_map.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -146,5 +146,5 @@
 int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
 				     struct net_device *orig_dev,
 				     u32 egress_data_format);
-
+int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset);
 #endif /* _RMNET_MAP_H_ */
diff --git a/net/rmnet_data/rmnet_map_data.c b/net/rmnet_data/rmnet_map_data.c
index d7e420b..1c0f1060 100644
--- a/net/rmnet_data/rmnet_map_data.c
+++ b/net/rmnet_data/rmnet_map_data.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -234,16 +234,9 @@
 
 	if (!skb || !config)
 		return;
-	size = config->egress_agg_size - skb->len;
-
-	if (size < 2000) {
-		LOGL("Invalid length %d", size);
-		return;
-	}
 
 new_packet:
 	spin_lock_irqsave(&config->agg_lock, flags);
-
 	memcpy(&last, &config->agg_last, sizeof(struct timespec));
 	getnstimeofday(&config->agg_last);
 
@@ -265,6 +258,7 @@
 			return;
 		}
 
+		size = config->egress_agg_size - skb->len;
 		config->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
 		if (!config->agg_skb) {
 			config->agg_skb = 0;
@@ -748,3 +742,31 @@
 done:
 	return ret;
 }
+
+int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset)
+{
+	unsigned char *packet_start = skb->data + offset;
+	int is_icmp = 0;
+
+	if ((skb->data[offset]) >> 4 == 0x04) {
+		struct iphdr *ip4h = (struct iphdr *)(packet_start);
+
+		if (ip4h->protocol == IPPROTO_ICMP)
+			is_icmp = 1;
+	} else if ((skb->data[offset]) >> 4 == 0x06) {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
+
+		if (ip6h->nexthdr == IPPROTO_ICMPV6) {
+			is_icmp = 1;
+		} else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
+			struct frag_hdr *frag;
+
+			frag = (struct frag_hdr *)(packet_start
+						   + sizeof(struct ipv6hdr));
+			if (frag->nexthdr == IPPROTO_ICMPV6)
+				is_icmp = 1;
+		}
+	}
+
+	return is_icmp;
+}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 3dd7b21..d8387b1 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -9700,6 +9700,9 @@
 		if (err)
 			return err;
 
+		if (!setup.chandef.chan)
+			return -EINVAL;
+
 		err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band,
 					      &setup.beacon_rate);
 		if (err)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index bb7f5be..d414049 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -545,11 +545,6 @@
 		return -EOPNOTSUPP;
 
 	if (wdev->current_bss) {
-		if (!prev_bssid)
-			return -EALREADY;
-		if (prev_bssid &&
-		    !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
-			return -ENOTCONN;
 		cfg80211_unhold_bss(wdev->current_bss);
 		cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
 		wdev->current_bss = NULL;
@@ -1085,11 +1080,35 @@
 
 	ASSERT_WDEV_LOCK(wdev);
 
-	if (WARN_ON(wdev->connect_keys)) {
-		kzfree(wdev->connect_keys);
-		wdev->connect_keys = NULL;
+	/*
+	 * If we have an ssid_len, we're trying to connect or are
+	 * already connected, so reject a new SSID unless it's the
+	 * same (which is the case for re-association.)
+	 */
+	if (wdev->ssid_len &&
+	    (wdev->ssid_len != connect->ssid_len ||
+	     memcmp(wdev->ssid, connect->ssid, wdev->ssid_len)))
+		return -EALREADY;
+
+	/*
+	 * If connected, reject (re-)association unless prev_bssid
+	 * matches the current BSSID.
+	 */
+	if (wdev->current_bss) {
+		if (!prev_bssid)
+			return -EALREADY;
+		if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
+			return -ENOTCONN;
 	}
 
+	/*
+	 * Reject if we're in the process of connecting with WEP,
+	 * this case isn't very interesting and trying to handle
+	 * it would make the code much more complex.
+	 */
+	if (wdev->connect_keys)
+		return -EINPROGRESS;
+
 	cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
 				  rdev->wiphy.ht_capa_mod_mask);
 
@@ -1140,7 +1159,12 @@
 
 	if (err) {
 		wdev->connect_keys = NULL;
-		wdev->ssid_len = 0;
+		/*
+		 * This could be reassoc getting refused, don't clear
+		 * ssid_len in that case.
+		 */
+		if (!wdev->current_bss)
+			wdev->ssid_len = 0;
 		return err;
 	}
 
@@ -1165,5 +1189,13 @@
 	else if (wdev->current_bss)
 		err = rdev_disconnect(rdev, dev, reason);
 
+	/*
+	 * Clear ssid_len unless we actually were fully connected,
+	 * in which case cfg80211_disconnected() will take care of
+	 * this later.
+	 */
+	if (!wdev->current_bss)
+		wdev->ssid_len = 0;
+
 	return err;
 }
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 637387b..d864a6d 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -66,6 +66,9 @@
 			goto error_nolock;
 		}
 
+		if (x->props.output_mark)
+			skb->mark = x->props.output_mark;
+
 		err = x->outer_mode->output(x, skb);
 		if (err) {
 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8ce5711..77fbfbd 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -125,7 +125,7 @@
 						  int tos, int oif,
 						  const xfrm_address_t *saddr,
 						  const xfrm_address_t *daddr,
-						  int family)
+						  int family, u32 mark)
 {
 	struct xfrm_policy_afinfo *afinfo;
 	struct dst_entry *dst;
@@ -134,7 +134,7 @@
 	if (unlikely(afinfo == NULL))
 		return ERR_PTR(-EAFNOSUPPORT);
 
-	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
+	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
 
 	xfrm_policy_put_afinfo(afinfo);
 
@@ -145,7 +145,7 @@
 						int tos, int oif,
 						xfrm_address_t *prev_saddr,
 						xfrm_address_t *prev_daddr,
-						int family)
+						int family, u32 mark)
 {
 	struct net *net = xs_net(x);
 	xfrm_address_t *saddr = &x->props.saddr;
@@ -161,7 +161,7 @@
 		daddr = x->coaddr;
 	}
 
-	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
+	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
 
 	if (!IS_ERR(dst)) {
 		if (prev_saddr != saddr)
@@ -1427,14 +1427,14 @@
 
 static int
 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
-	       xfrm_address_t *remote, unsigned short family)
+	       xfrm_address_t *remote, unsigned short family, u32 mark)
 {
 	int err;
 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
 
 	if (unlikely(afinfo == NULL))
 		return -EINVAL;
-	err = afinfo->get_saddr(net, oif, local, remote);
+	err = afinfo->get_saddr(net, oif, local, remote, mark);
 	xfrm_policy_put_afinfo(afinfo);
 	return err;
 }
@@ -1465,7 +1465,7 @@
 			if (xfrm_addr_any(local, tmpl->encap_family)) {
 				error = xfrm_get_saddr(net, fl->flowi_oif,
 						       &tmp, remote,
-						       tmpl->encap_family);
+						       tmpl->encap_family, 0);
 				if (error)
 					goto fail;
 				local = &tmp;
@@ -1744,7 +1744,8 @@
 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
 			family = xfrm[i]->props.family;
 			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
-					      &saddr, &daddr, family);
+					      &saddr, &daddr, family,
+					      xfrm[i]->props.output_mark);
 			err = PTR_ERR(dst);
 			if (IS_ERR(dst))
 				goto put_states;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index b2bba35..2cade02 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -584,6 +584,9 @@
 
 	xfrm_mark_get(attrs, &x->mark);
 
+	if (attrs[XFRMA_OUTPUT_MARK])
+		x->props.output_mark = nla_get_u32(attrs[XFRMA_OUTPUT_MARK]);
+
 	err = __xfrm_init_state(x, false);
 	if (err)
 		goto error;
@@ -871,6 +874,11 @@
 		goto out;
 	if (x->security)
 		ret = copy_sec_ctx(x->security, skb);
+	if (x->props.output_mark) {
+		ret = nla_put_u32(skb, XFRMA_OUTPUT_MARK, x->props.output_mark);
+		if (ret)
+			goto out;
+	}
 out:
 	return ret;
 }
@@ -1656,32 +1664,34 @@
 
 static int xfrm_dump_policy_done(struct netlink_callback *cb)
 {
-	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
 	struct net *net = sock_net(cb->skb->sk);
 
 	xfrm_policy_walk_done(walk, net);
 	return 0;
 }
 
+static int xfrm_dump_policy_start(struct netlink_callback *cb)
+{
+	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
+
+	BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
+
+	xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
+	return 0;
+}
+
 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	struct net *net = sock_net(skb->sk);
-	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
 	struct xfrm_dump_info info;
 
-	BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
-		     sizeof(cb->args) - sizeof(cb->args[0]));
-
 	info.in_skb = cb->skb;
 	info.out_skb = skb;
 	info.nlmsg_seq = cb->nlh->nlmsg_seq;
 	info.nlmsg_flags = NLM_F_MULTI;
 
-	if (!cb->args[0]) {
-		cb->args[0] = 1;
-		xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
-	}
-
 	(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
 
 	return skb->len;
@@ -2419,6 +2429,7 @@
 	[XFRMA_SA_EXTRA_FLAGS]	= { .type = NLA_U32 },
 	[XFRMA_PROTO]		= { .type = NLA_U8 },
 	[XFRMA_ADDRESS_FILTER]	= { .len = sizeof(struct xfrm_address_filter) },
+	[XFRMA_OUTPUT_MARK]	= { .len = NLA_U32 },
 };
 
 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
@@ -2428,6 +2439,7 @@
 
 static const struct xfrm_link {
 	int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
+	int (*start)(struct netlink_callback *);
 	int (*dump)(struct sk_buff *, struct netlink_callback *);
 	int (*done)(struct netlink_callback *);
 	const struct nla_policy *nla_pol;
@@ -2441,6 +2453,7 @@
 	[XFRM_MSG_NEWPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_add_policy    },
 	[XFRM_MSG_DELPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy    },
 	[XFRM_MSG_GETPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
+						   .start = xfrm_dump_policy_start,
 						   .dump = xfrm_dump_policy,
 						   .done = xfrm_dump_policy_done },
 	[XFRM_MSG_ALLOCSPI    - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
@@ -2492,6 +2505,7 @@
 
 		{
 			struct netlink_dump_control c = {
+				.start = link->start,
 				.dump = link->dump,
 				.done = link->done,
 			};
@@ -2635,6 +2649,8 @@
 		l += nla_total_size(sizeof(*x->coaddr));
 	if (x->props.extra_flags)
 		l += nla_total_size(sizeof(x->props.extra_flags));
+	if (x->props.output_mark)
+		l += nla_total_size(sizeof(x->props.output_mark));
 
 	/* Must count x->lastused as it may become non-zero behind our back. */
 	l += nla_total_size_64bit(sizeof(u64));
diff --git a/security/Kconfig b/security/Kconfig
index 5693989..4415de2 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -6,6 +6,11 @@
 
 source security/keys/Kconfig
 
+if ARCH_QCOM
+source security/pfe/Kconfig
+endif
+
+
 config SECURITY_DMESG_RESTRICT
 	bool "Restrict unprivileged access to the kernel syslog"
 	default n
diff --git a/security/Makefile b/security/Makefile
index f2d71cd..79166ba 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -9,6 +9,7 @@
 subdir-$(CONFIG_SECURITY_APPARMOR)	+= apparmor
 subdir-$(CONFIG_SECURITY_YAMA)		+= yama
 subdir-$(CONFIG_SECURITY_LOADPIN)	+= loadpin
+subdir-$(CONFIG_ARCH_QCOM)	+= pfe
 
 # always enable default capabilities
 obj-y					+= commoncap.o
@@ -24,6 +25,7 @@
 obj-$(CONFIG_SECURITY_APPARMOR)		+= apparmor/
 obj-$(CONFIG_SECURITY_YAMA)		+= yama/
 obj-$(CONFIG_SECURITY_LOADPIN)		+= loadpin/
+obj-$(CONFIG_ARCH_QCOM)				+= pfe/
 obj-$(CONFIG_CGROUP_DEVICE)		+= device_cgroup.o
 
 # Object integrity file lists
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index 47c6dca..e628817 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -245,7 +245,7 @@
 
 	/* clear the quota */
 	key_payload_reserve(key, 0);
-	if (key_is_instantiated(key) &&
+	if (key_is_positive(key) &&
 	    (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
 		vfs_truncate(path, 0);
 }
@@ -277,7 +277,7 @@
 
 	seq_puts(m, key->description);
 
-	if (key_is_instantiated(key))
+	if (key_is_positive(key))
 		seq_printf(m, ": %zu [%s]",
 			   datalen,
 			   datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 8d9330a..a871159 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -315,6 +315,13 @@
 
 	down_read(&ukey->sem);
 	upayload = user_key_payload(ukey);
+	if (!upayload) {
+		/* key was revoked before we acquired its semaphore */
+		up_read(&ukey->sem);
+		key_put(ukey);
+		ukey = ERR_PTR(-EKEYREVOKED);
+		goto error;
+	}
 	*master_key = upayload->data;
 	*master_keylen = upayload->datalen;
 error:
@@ -867,7 +874,7 @@
 	size_t datalen = prep->datalen;
 	int ret = 0;
 
-	if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+	if (key_is_negative(key))
 		return -ENOKEY;
 	if (datalen <= 0 || datalen > 32767 || !prep->data)
 		return -EINVAL;
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 9cb4fe4..1659094 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -129,15 +129,15 @@
 	while (!list_empty(keys)) {
 		struct key *key =
 			list_entry(keys->next, struct key, graveyard_link);
+		short state = key->state;
+
 		list_del(&key->graveyard_link);
 
 		kdebug("- %u", key->serial);
 		key_check(key);
 
 		/* Throw away the key data if the key is instantiated */
-		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
-		    !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
-		    key->type->destroy)
+		if (state == KEY_IS_POSITIVE && key->type->destroy)
 			key->type->destroy(key);
 
 		security_key_free(key);
@@ -151,7 +151,7 @@
 		}
 
 		atomic_dec(&key->user->nkeys);
-		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+		if (state != KEY_IS_UNINSTANTIATED)
 			atomic_dec(&key->user->nikeys);
 
 		key_user_put(key->user);
diff --git a/security/keys/key.c b/security/keys/key.c
index 135e1eb..7dc5906 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -401,6 +401,18 @@
 EXPORT_SYMBOL(key_payload_reserve);
 
 /*
+ * Change the key state to being instantiated.
+ */
+static void mark_key_instantiated(struct key *key, int reject_error)
+{
+	/* Commit the payload before setting the state; barrier versus
+	 * key_read_state().
+	 */
+	smp_store_release(&key->state,
+			  (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
+}
+
+/*
  * Instantiate a key and link it into the target keyring atomically.  Must be
  * called with the target keyring's semaphore writelocked.  The target key's
  * semaphore need not be locked as instantiation is serialised by
@@ -423,14 +435,14 @@
 	mutex_lock(&key_construction_mutex);
 
 	/* can't instantiate twice */
-	if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+	if (key->state == KEY_IS_UNINSTANTIATED) {
 		/* instantiate the key */
 		ret = key->type->instantiate(key, prep);
 
 		if (ret == 0) {
 			/* mark the key as being instantiated */
 			atomic_inc(&key->user->nikeys);
-			set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
+			mark_key_instantiated(key, 0);
 
 			if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
 				awaken = 1;
@@ -572,13 +584,10 @@
 	mutex_lock(&key_construction_mutex);
 
 	/* can't instantiate twice */
-	if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+	if (key->state == KEY_IS_UNINSTANTIATED) {
 		/* mark the key as being negatively instantiated */
 		atomic_inc(&key->user->nikeys);
-		key->reject_error = -error;
-		smp_wmb();
-		set_bit(KEY_FLAG_NEGATIVE, &key->flags);
-		set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
+		mark_key_instantiated(key, -error);
 		now = current_kernel_time();
 		key->expiry = now.tv_sec + timeout;
 		key_schedule_gc(key->expiry + key_gc_delay);
@@ -750,8 +759,8 @@
 
 	ret = key->type->update(key, prep);
 	if (ret == 0)
-		/* updating a negative key instantiates it */
-		clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+		/* Updating a negative key positively instantiates it */
+		mark_key_instantiated(key, 0);
 
 	up_write(&key->sem);
 
@@ -935,6 +944,16 @@
 	 */
 	__key_link_end(keyring, &index_key, edit);
 
+	key = key_ref_to_ptr(key_ref);
+	if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
+		ret = wait_for_key_construction(key, true);
+		if (ret < 0) {
+			key_ref_put(key_ref);
+			key_ref = ERR_PTR(ret);
+			goto error_free_prep;
+		}
+	}
+
 	key_ref = __key_update(key_ref, &prep);
 	goto error_free_prep;
 }
@@ -985,8 +1004,8 @@
 
 	ret = key->type->update(key, &prep);
 	if (ret == 0)
-		/* updating a negative key instantiates it */
-		clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+		/* Updating a negative key positively instantiates it */
+		mark_key_instantiated(key, 0);
 
 	up_write(&key->sem);
 
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 1302cb3..797edcf 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -766,10 +766,9 @@
 
 	key = key_ref_to_ptr(key_ref);
 
-	if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
-		ret = -ENOKEY;
-		goto error2;
-	}
+	ret = key_read_state(key);
+	if (ret < 0)
+		goto error2; /* Negatively instantiated */
 
 	/* see if we can read it directly */
 	ret = key_permission(key_ref, KEY_NEED_READ);
@@ -901,7 +900,7 @@
 		atomic_dec(&key->user->nkeys);
 		atomic_inc(&newowner->nkeys);
 
-		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+		if (key->state != KEY_IS_UNINSTANTIATED) {
 			atomic_dec(&key->user->nikeys);
 			atomic_inc(&newowner->nikeys);
 		}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index a86d0ae..32969f6 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -407,7 +407,7 @@
 	else
 		seq_puts(m, "[anon]");
 
-	if (key_is_instantiated(keyring)) {
+	if (key_is_positive(keyring)) {
 		if (keyring->keys.nr_leaves_on_tree != 0)
 			seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
 		else
@@ -546,7 +546,8 @@
 {
 	struct keyring_search_context *ctx = iterator_data;
 	const struct key *key = keyring_ptr_to_key(object);
-	unsigned long kflags = key->flags;
+	unsigned long kflags = READ_ONCE(key->flags);
+	short state = READ_ONCE(key->state);
 
 	kenter("{%d}", key->serial);
 
@@ -590,9 +591,8 @@
 
 	if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
 		/* we set a different error code if we pass a negative key */
-		if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
-			smp_rmb();
-			ctx->result = ERR_PTR(key->reject_error);
+		if (state < 0) {
+			ctx->result = ERR_PTR(state);
 			kleave(" = %d [neg]", ctx->skipped_ret);
 			goto skipped;
 		}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index b9f531c..0361286 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -182,6 +182,7 @@
 	unsigned long timo;
 	key_ref_t key_ref, skey_ref;
 	char xbuf[16];
+	short state;
 	int rc;
 
 	struct keyring_search_context ctx = {
@@ -240,17 +241,19 @@
 			sprintf(xbuf, "%luw", timo / (60*60*24*7));
 	}
 
+	state = key_read_state(key);
+
 #define showflag(KEY, LETTER, FLAG) \
 	(test_bit(FLAG,	&(KEY)->flags) ? LETTER : '-')
 
 	seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
 		   key->serial,
-		   showflag(key, 'I', KEY_FLAG_INSTANTIATED),
+		   state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
 		   showflag(key, 'R', KEY_FLAG_REVOKED),
 		   showflag(key, 'D', KEY_FLAG_DEAD),
 		   showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
 		   showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
-		   showflag(key, 'N', KEY_FLAG_NEGATIVE),
+		   state < 0 ? 'N' : '-',
 		   showflag(key, 'i', KEY_FLAG_INVALIDATED),
 		   atomic_read(&key->usage),
 		   xbuf,
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index ce45c78..2d35d71 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -729,7 +729,7 @@
 
 	ret = -EIO;
 	if (!(lflags & KEY_LOOKUP_PARTIAL) &&
-	    !test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+	    key_read_state(key) == KEY_IS_UNINSTANTIATED)
 		goto invalid_key;
 
 	/* check the permissions */
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 43affcf..5030fcf 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -594,10 +594,9 @@
 			  intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
 	if (ret)
 		return -ERESTARTSYS;
-	if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
-		smp_rmb();
-		return key->reject_error;
-	}
+	ret = key_read_state(key);
+	if (ret < 0)
+		return ret;
 	return key_validate(key);
 }
 EXPORT_SYMBOL(wait_for_key_construction);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 9db8b4a..ba74a0b 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -73,7 +73,7 @@
 
 	seq_puts(m, "key:");
 	seq_puts(m, key->description);
-	if (key_is_instantiated(key))
+	if (key_is_positive(key))
 		seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
 }
 
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 90d6175..f4db42e 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1067,7 +1067,7 @@
 	char *datablob;
 	int ret = 0;
 
-	if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+	if (key_is_negative(key))
 		return -ENOKEY;
 	p = key->payload.data[0];
 	if (!p->migratable)
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 66b1840..3dc2607 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -106,7 +106,7 @@
 
 	/* attach the new data, displacing the old */
 	key->expiry = prep->expiry;
-	if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+	if (key_is_positive(key))
 		zap = rcu_dereference_key(key);
 	rcu_assign_keypointer(key, prep->payload.data[0]);
 	prep->payload.data[0] = NULL;
@@ -154,7 +154,7 @@
 void user_describe(const struct key *key, struct seq_file *m)
 {
 	seq_puts(m, key->description);
-	if (key_is_instantiated(key))
+	if (key_is_positive(key))
 		seq_printf(m, ": %u", key->datalen);
 }
 
diff --git a/security/pfe/Kconfig b/security/pfe/Kconfig
new file mode 100644
index 0000000..0cd9e81
--- /dev/null
+++ b/security/pfe/Kconfig
@@ -0,0 +1,28 @@
+menu "Qualcomm Technologies, Inc Per File Encryption security device drivers"
+	depends on ARCH_QCOM
+
+config PFT
+	bool "Per-File-Tagger driver"
+	depends on SECURITY
+	default n
+	help
+		This driver is used for tagging enterprise files.
+		It is part of the Per-File-Encryption (PFE) feature.
+		The driver is tagging files when created by
+		registered application.
+		Tagged files are encrypted using the dm-req-crypt driver.
+
+config PFK
+	bool "Per-File-Key driver"
+	depends on SECURITY
+	depends on SECURITY_SELINUX
+	default n
+	help
+		This driver is used for storing eCryptfs information
+		in file node.
+		This is part of eCryptfs hardware enhanced solution
+		provided by Qualcomm Technologies, Inc.
+		Information is used when file is encrypted later using
+		ICE or dm crypto engine
+
+endmenu
diff --git a/security/pfe/Makefile b/security/pfe/Makefile
new file mode 100644
index 0000000..242a216
--- /dev/null
+++ b/security/pfe/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the MSM specific security device drivers.
+#
+
+ccflags-y += -Isecurity/selinux -Isecurity/selinux/include
+ccflags-y += -Ifs/ext4
+ccflags-y += -Ifs/crypto
+
+obj-$(CONFIG_PFT) += pft.o
+obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ice.o pfk_ext4.o
diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c
new file mode 100644
index 0000000..615353e
--- /dev/null
+++ b/security/pfe/pfk.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK).
+ *
+ * This driver is responsible for overall management of various
+ * Per File Encryption variants that work on top of or as part of different
+ * file systems.
+ *
+ * The driver has the following purpose :
+ * 1) Define priorities between PFE's if more than one is enabled
+ * 2) Extract key information from inode
+ * 3) Load and manage various keys in ICE HW engine
+ * 4) It should be invoked from various layers in FS/BLOCK/STORAGE DRIVER
+ *    that need to take decision on HW encryption management of the data
+ *    Some examples:
+ *	BLOCK LAYER: when it takes decision on whether 2 chunks can be united
+ *	to one encryption / decryption request sent to the HW
+ *
+ *	UFS DRIVER: when it need to configure ICE HW with a particular key slot
+ *	to be used for encryption / decryption
+ *
+ * PFE variants can differ on particular way of storing the cryptographic info
+ * inside inode, actions to be taken upon file operations, etc., but the common
+ * properties are described above
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+#define pr_fmt(fmt)	"pfk [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/bio.h>
+#include <linux/security.h>
+#include <crypto/ice.h>
+
+#include <linux/pfk.h>
+
+#include "pfk_kc.h"
+#include "objsec.h"
+#include "pfk_ice.h"
+#include "pfk_ext4.h"
+#include "pfk_internal.h"
+#include "ext4.h"
+
+static bool pfk_ready;
+
+
+/* might be replaced by a table when more than one cipher is supported */
+#define PFK_SUPPORTED_KEY_SIZE 32
+#define PFK_SUPPORTED_SALT_SIZE 32
+
+/* Various PFE types and function tables to support each one of them */
+enum pfe_type {EXT4_CRYPT_PFE, INVALID_PFE};
+
+typedef int (*pfk_parse_inode_type)(const struct bio *bio,
+	const struct inode *inode,
+	struct pfk_key_info *key_info,
+	enum ice_cryto_algo_mode *algo,
+	bool *is_pfe);
+
+typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2);
+
+static const pfk_parse_inode_type pfk_parse_inode_ftable[] = {
+	/* EXT4_CRYPT_PFE */ &pfk_ext4_parse_inode,
+};
+
+static const pfk_allow_merge_bio_type pfk_allow_merge_bio_ftable[] = {
+	/* EXT4_CRYPT_PFE */ &pfk_ext4_allow_merge_bio,
+};
+
+static void __exit pfk_exit(void)
+{
+	pfk_ready = false;
+	pfk_ext4_deinit();
+	pfk_kc_deinit();
+}
+
+static int __init pfk_init(void)
+{
+
+	int ret = 0;
+
+	ret = pfk_ext4_init();
+	if (ret != 0)
+		goto fail;
+
+	ret = pfk_kc_init();
+	if (ret != 0) {
+		pr_err("could init pfk key cache, error %d\n", ret);
+		pfk_ext4_deinit();
+		goto fail;
+	}
+
+	pfk_ready = true;
+	pr_info("Driver initialized successfully\n");
+
+	return 0;
+
+fail:
+	pr_err("Failed to init driver\n");
+	return -ENODEV;
+}
+
+/*
+ * If more than one type is supported simultaneously, this function will also
+ * set the priority between them
+ */
+static enum pfe_type pfk_get_pfe_type(const struct inode *inode)
+{
+	if (!inode)
+		return INVALID_PFE;
+
+	if (pfk_is_ext4_type(inode))
+		return EXT4_CRYPT_PFE;
+
+	return INVALID_PFE;
+}
+
+/**
+ * inode_to_filename() - get the filename from inode pointer.
+ * @inode: inode pointer
+ *
+ * it is used for debug prints.
+ *
+ * Return: filename string or "unknown".
+ */
+char *inode_to_filename(const struct inode *inode)
+{
+	struct dentry *dentry = NULL;
+	char *filename = NULL;
+
+	if (hlist_empty(&inode->i_dentry))
+		return "unknown";
+
+	dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
+	filename = dentry->d_iname;
+
+	return filename;
+}
+
+/**
+ * pfk_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_is_ready(void)
+{
+	return pfk_ready;
+}
+
+/**
+ * pfk_bio_get_inode() - get the inode from a bio.
+ * @bio: Pointer to BIO structure.
+ *
+ * Walk the bio struct links to get the inode.
+ * Please note, that in general bio may consist of several pages from
+ * several files, but in our case we always assume that all pages come
+ * from the same file, since our logic ensures it. That is why we only
+ * walk through the first page to look for inode.
+ *
+ * Return: pointer to the inode struct if successful, or NULL otherwise.
+ *
+ */
+static struct inode *pfk_bio_get_inode(const struct bio *bio)
+{
+	struct address_space *mapping;
+
+	if (!bio)
+		return NULL;
+	if (!bio->bi_io_vec)
+		return NULL;
+	if (!bio->bi_io_vec->bv_page)
+		return NULL;
+	if (!bio_has_data((struct bio *)bio))
+		return NULL;
+
+	if (PageAnon(bio->bi_io_vec->bv_page)) {
+		struct inode *inode;
+
+		//Using direct-io (O_DIRECT) without page cache
+		inode = dio_bio_get_inode((struct bio *)bio);
+		pr_debug("inode on direct-io, inode = 0x%pK.\n", inode);
+
+		return inode;
+	}
+
+	mapping = page_mapping(bio->bi_io_vec->bv_page);
+	if (!mapping)
+		return NULL;
+
+	if (!mapping->host)
+		return NULL;
+
+	return bio->bi_io_vec->bv_page->mapping->host;
+}
+
+/**
+ * pfk_key_size_to_key_type() - translate key size to key size enum
+ * @key_size: key size in bytes
+ * @key_size_type: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported key size)
+ */
+int pfk_key_size_to_key_type(size_t key_size,
+	enum ice_crpto_key_size *key_size_type)
+{
+	/*
+	 *  currently only 32 bit key size is supported
+	 *  in the future, table with supported key sizes might
+	 *  be introduced
+	 */
+
+	if (key_size != PFK_SUPPORTED_KEY_SIZE) {
+		pr_err("not supported key size %zu\n", key_size);
+		return -EINVAL;
+	}
+
+	if (key_size_type)
+		*key_size_type = ICE_CRYPTO_KEY_SIZE_256;
+
+	return 0;
+}
+
+/*
+ * Retrieves filesystem type from inode's superblock
+ */
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+	const char *fs_type)
+{
+	if (!inode || !fs_type)
+		return false;
+
+	if (!inode->i_sb)
+		return false;
+
+	if (!inode->i_sb->s_type)
+		return false;
+
+	return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
+}
+
+
+/**
+ * pfk_load_key_start() - loads PFE encryption key to the ICE
+ *			  Can also be invoked from non
+ *			  PFE context, in this case it
+ *			  is not relevant and is_pfe
+ *			  flag is set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @ice_setting: Pointer to ice setting structure that will be filled with
+ * ice configuration values, including the index to which the key was loaded
+ *  @is_pfe: will be false if inode is not relevant to PFE, in such a case
+ * it should be treated as non PFE by the block layer
+ *
+ * Returns the index where the key is stored in encryption hw and additional
+ * information that will be used later for configuration of the encryption hw.
+ *
+ * Must be followed by pfk_load_key_end when key is no longer used by ice
+ *
+ */
+int pfk_load_key_start(const struct bio *bio,
+		struct ice_crypto_setting *ice_setting, bool *is_pfe,
+		bool async)
+{
+	int ret = 0;
+	struct pfk_key_info key_info = {NULL, NULL, 0, 0};
+	enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+	enum ice_crpto_key_size key_size_type = 0;
+	u32 key_index = 0;
+	struct inode *inode = NULL;
+	enum pfe_type which_pfe = INVALID_PFE;
+
+	if (!is_pfe) {
+		pr_err("is_pfe is NULL\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_is_ready())
+		return -ENODEV;
+
+	if (!ice_setting) {
+		pr_err("ice setting is NULL\n");
+		return -EINVAL;
+	}
+//pr_err("%s %d\n", __func__, __LINE__);
+	inode = pfk_bio_get_inode(bio);
+	if (!inode) {
+		*is_pfe = false;
+		return -EINVAL;
+	}
+    //pr_err("%s %d\n", __func__, __LINE__);
+	which_pfe = pfk_get_pfe_type(inode);
+	if (which_pfe == INVALID_PFE) {
+		*is_pfe = false;
+		return -EPERM;
+	}
+
+	pr_debug("parsing file %s with PFE %d\n",
+		inode_to_filename(inode), which_pfe);
+//pr_err("%s %d\n", __func__, __LINE__);
+	ret = (*(pfk_parse_inode_ftable[which_pfe]))
+			(bio, inode, &key_info, &algo_mode, is_pfe);
+	if (ret != 0)
+		return ret;
+//pr_err("%s %d\n", __func__, __LINE__);
+	ret = pfk_key_size_to_key_type(key_info.key_size, &key_size_type);
+	if (ret != 0)
+		return ret;
+//pr_err("%s %d\n", __func__, __LINE__);
+	ret = pfk_kc_load_key_start(key_info.key, key_info.key_size,
+			key_info.salt, key_info.salt_size, &key_index, async);
+	if (ret) {
+		if (ret != -EBUSY && ret != -EAGAIN)
+			pr_err("start: could not load key into pfk key cache, error %d\n",
+					ret);
+
+		return ret;
+	}
+
+	ice_setting->key_size = key_size_type;
+	ice_setting->algo_mode = algo_mode;
+	/* hardcoded for now */
+	ice_setting->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
+	ice_setting->key_index = key_index;
+
+	pr_debug("loaded key for file %s key_index %d\n",
+		inode_to_filename(inode), key_index);
+
+	return 0;
+}
+
+/**
+ * pfk_load_key_end() - marks the PFE key as no longer used by ICE
+ *			Can also be invoked from non
+ *			PFE context, in this case it is not
+ *			relevant and is_pfe flag is
+ *			set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked
+ *			from PFE context
+ */
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+	int ret = 0;
+	struct pfk_key_info key_info = {0};
+	enum pfe_type which_pfe = INVALID_PFE;
+	struct inode *inode = NULL;
+
+	if (!is_pfe) {
+		pr_err("is_pfe is NULL\n");
+		return -EINVAL;
+	}
+
+	/* only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_is_ready())
+		return -ENODEV;
+
+	inode = pfk_bio_get_inode(bio);
+	if (!inode) {
+		*is_pfe = false;
+		return -EINVAL;
+	}
+
+	which_pfe = pfk_get_pfe_type(inode);
+	if (which_pfe == INVALID_PFE) {
+		*is_pfe = false;
+		return -EPERM;
+	}
+
+	ret = (*(pfk_parse_inode_ftable[which_pfe]))
+			(bio, inode, &key_info, NULL, is_pfe);
+	if (ret != 0)
+		return ret;
+
+	pfk_kc_load_key_end(key_info.key, key_info.key_size,
+		key_info.salt, key_info.salt_size);
+
+	pr_debug("finished using key for file %s\n",
+		inode_to_filename(inode));
+
+	return 0;
+}
+
+/**
+ * pfk_allow_merge_bio() - Check if 2 BIOs can be merged.
+ * @bio1:	Pointer to first BIO structure.
+ * @bio2:	Pointer to second BIO structure.
+ *
+ * Prevent merging of BIOs from encrypted and non-encrypted
+ * files, or files encrypted with different key.
+ * Also prevent non encrypted and encrypted data from the same file
+ * to be merged (ecryptfs header if stored inside file should be non
+ * encrypted)
+ * This API is called by the file system block layer.
+ *
+ * Return: true if the BIOs allowed to be merged, false
+ * otherwise.
+ */
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2)
+{
+	struct inode *inode1 = NULL;
+	struct inode *inode2 = NULL;
+	enum pfe_type which_pfe1 = INVALID_PFE;
+	enum pfe_type which_pfe2 = INVALID_PFE;
+
+	if (!pfk_is_ready())
+		return false;
+
+	if (!bio1 || !bio2)
+		return false;
+
+	if (bio1 == bio2)
+		return true;
+
+	inode1 = pfk_bio_get_inode(bio1);
+	inode2 = pfk_bio_get_inode(bio2);
+
+
+	which_pfe1 = pfk_get_pfe_type(inode1);
+	which_pfe2 = pfk_get_pfe_type(inode2);
+
+	/* nodes with different encryption, do not merge */
+	if (which_pfe1 != which_pfe2)
+		return false;
+
+	/* both nodes do not have encryption, allow merge */
+	if (which_pfe1 == INVALID_PFE)
+		return true;
+
+	return (*(pfk_allow_merge_bio_ftable[which_pfe1]))(bio1, bio2,
+		inode1, inode2);
+}
+/**
+ * Flush key table on storage core reset. During core reset key configuration
+ * is lost in ICE. We need to flash the cache, so that the keys will be
+ * reconfigured again for every subsequent transaction
+ */
+void pfk_clear_on_reset(void)
+{
+	if (!pfk_is_ready())
+		return;
+
+	pfk_kc_clear_on_reset();
+}
+
+module_init(pfk_init);
+module_exit(pfk_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key driver");
diff --git a/security/pfe/pfk_ext4.c b/security/pfe/pfk_ext4.c
new file mode 100644
index 0000000..7ce70bc
--- /dev/null
+++ b/security/pfe/pfk_ext4.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK) - EXT4
+ *
+ * This driver is used for working with EXT4 crypt extension
+ *
+ * The key information  is stored in node by EXT4 when file is first opened
+ * and will be later accessed by Block Device Driver to actually load the key
+ * to encryption hw.
+ *
+ * PFK exposes API's for loading and removing keys from encryption hw
+ * and also API to determine whether 2 adjacent blocks can be agregated by
+ * Block Layer in one request to encryption hw.
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+#define pr_fmt(fmt)	"pfk_ext4 [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+
+#include "ext4_ice.h"
+#include "pfk_ext4.h"
+
+static bool pfk_ext4_ready;
+
+/*
+ * pfk_ext4_deinit() - Deinit function, should be invoked by upper PFK layer
+ */
+void pfk_ext4_deinit(void)
+{
+	pfk_ext4_ready = false;
+}
+
+/*
+ * pfk_ecryptfs_init() - Init function, should be invoked by upper PFK layer
+ */
+int __init pfk_ext4_init(void)
+{
+	pfk_ext4_ready = true;
+	pr_info("PFK EXT4 inited successfully\n");
+
+	return 0;
+}
+
+/**
+ * pfk_ecryptfs_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_ext4_is_ready(void)
+{
+	return pfk_ext4_ready;
+}
+
+/**
+ * pfk_ext4_dump_inode() - dumps all interesting info about inode to the screen
+ *
+ *
+ */
+/*
+ * static void pfk_ext4_dump_inode(const struct inode* inode)
+ * {
+ *	struct ext4_crypt_info *ci = ext4_encryption_info((struct inode*)inode);
+ *
+ *	pr_debug("dumping inode with address 0x%p\n", inode);
+ *	pr_debug("S_ISREG is %d\n", S_ISREG(inode->i_mode));
+ *	pr_debug("EXT4_INODE_ENCRYPT flag is %d\n",
+ *		ext4_test_inode_flag((struct inode*)inode, EXT4_INODE_ENCRYPT));
+ *	if (ci) {
+ *		pr_debug("crypt_info address 0x%p\n", ci);
+ *		pr_debug("ci->ci_data_mode %d\n", ci->ci_data_mode);
+ *	} else {
+ *		pr_debug("crypt_info is NULL\n");
+ *	}
+ * }
+ */
+
+/**
+ * pfk_is_ext4_type() - return true if inode belongs to ICE EXT4 PFE
+ * @inode: inode pointer
+ */
+bool pfk_is_ext4_type(const struct inode *inode)
+{
+	if (!pfe_is_inode_filesystem_type(inode, "ext4"))
+		return false;
+
+	return ext4_should_be_processed_by_ice(inode);
+}
+
+/**
+ * pfk_ext4_parse_cipher() - parse cipher from inode to enum
+ * @inode: inode
+ * @algo: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported cipher)
+ */
+static int pfk_ext4_parse_cipher(const struct inode *inode,
+	enum ice_cryto_algo_mode *algo)
+{
+	/*
+	 * currently only AES XTS algo is supported
+	 * in the future, table with supported ciphers might
+	 * be introduced
+	 */
+
+	if (!inode)
+		return -EINVAL;
+
+	if (!ext4_is_aes_xts_cipher(inode)) {
+		pr_err("ext4 alghoritm is not supported by pfk\n");
+		return -EINVAL;
+	}
+
+	if (algo)
+		*algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+
+	return 0;
+}
+
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+	const struct inode *inode,
+	struct pfk_key_info *key_info,
+	enum ice_cryto_algo_mode *algo,
+	bool *is_pfe)
+{
+	int ret = 0;
+
+	if (!is_pfe)
+		return -EINVAL;
+
+	/*
+	 * only a few errors below can indicate that
+	 * this function was not invoked within PFE context,
+	 * otherwise we will consider it PFE
+	 */
+	*is_pfe = true;
+
+	if (!pfk_ext4_is_ready())
+		return -ENODEV;
+
+	if (!inode)
+		return -EINVAL;
+
+	if (!key_info)
+		return -EINVAL;
+
+	key_info->key = ext4_get_ice_encryption_key(inode);
+	if (!key_info->key) {
+		pr_err("could not parse key from ext4\n");
+		return -EINVAL;
+	}
+
+	key_info->key_size = ext4_get_ice_encryption_key_size(inode);
+	if (!key_info->key_size) {
+		pr_err("could not parse key size from ext4\n");
+		return -EINVAL;
+	}
+
+	key_info->salt = ext4_get_ice_encryption_salt(inode);
+	if (!key_info->salt) {
+		pr_err("could not parse salt from ext4\n");
+		return -EINVAL;
+	}
+
+	key_info->salt_size = ext4_get_ice_encryption_salt_size(inode);
+	if (!key_info->salt_size) {
+		pr_err("could not parse salt size from ext4\n");
+		return -EINVAL;
+	}
+
+	ret = pfk_ext4_parse_cipher(inode, algo);
+	if (ret != 0) {
+		pr_err("not supported cipher\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2)
+{
+	/* if there is no ext4 pfk, don't disallow merging blocks */
+	if (!pfk_ext4_is_ready())
+		return true;
+
+	if (!inode1 || !inode2)
+		return false;
+
+	return ext4_is_ice_encryption_info_equal(inode1, inode2);
+}
+
diff --git a/security/pfe/pfk_ext4.h b/security/pfe/pfk_ext4.h
new file mode 100644
index 0000000..1f33632
--- /dev/null
+++ b/security/pfe/pfk_ext4.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_EXT4_H_
+#define _PFK_EXT4_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <crypto/ice.h>
+#include "pfk_internal.h"
+
+bool pfk_is_ext4_type(const struct inode *inode);
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+	const struct inode *inode,
+	struct pfk_key_info *key_info,
+	enum ice_cryto_algo_mode *algo,
+	bool *is_pfe);
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+	const struct bio *bio2, const struct inode *inode1,
+	const struct inode *inode2);
+
+int __init pfk_ext4_init(void);
+
+void pfk_ext4_deinit(void);
+
+#endif /* _PFK_EXT4_H_ */
diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c
new file mode 100644
index 0000000..f0bbf9c
--- /dev/null
+++ b/security/pfe/pfk_ice.c
@@ -0,0 +1,188 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/async.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <soc/qcom/scm.h>
+#include <linux/device-mapper.h>
+#include <soc/qcom/qseecomi.h>
+#include <crypto/ice.h>
+#include "pfk_ice.h"
+
+
+/**********************************/
+/** global definitions		 **/
+/**********************************/
+
+#define TZ_ES_SET_ICE_KEY 0x2
+#define TZ_ES_INVALIDATE_ICE_KEY 0x3
+
+/* index 0 and 1 is reserved for FDE */
+#define MIN_ICE_KEY_INDEX 2
+
+#define MAX_ICE_KEY_INDEX 31
+
+
+#define TZ_ES_SET_ICE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, TZ_ES_SET_ICE_KEY)
+
+
+#define TZ_ES_INVALIDATE_ICE_KEY_ID \
+		TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \
+			TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY)
+
+
+#define TZ_ES_SET_ICE_KEY_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+		TZ_SYSCALL_PARAM_TYPE_VAL, \
+		TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+		TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1( \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define ICE_KEY_SIZE 32
+#define ICE_SALT_SIZE 32
+
+static uint8_t ice_key[ICE_KEY_SIZE];
+static uint8_t ice_salt[ICE_KEY_SIZE];
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+			char *storage_type)
+{
+	struct scm_desc desc = {0};
+	int ret, ret1;
+	char *tzbuf_key = (char *)ice_key;
+	char *tzbuf_salt = (char *)ice_salt;
+	char *s_type = storage_type;
+
+	uint32_t smc_id = 0;
+	u32 tzbuflen_key = sizeof(ice_key);
+	u32 tzbuflen_salt = sizeof(ice_salt);
+
+	if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+		pr_err("%s Invalid index %d\n", __func__, index);
+		return -EINVAL;
+	}
+	if (!key || !salt) {
+		pr_err("%s Invalid key/salt\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!tzbuf_key || !tzbuf_salt) {
+		pr_err("%s No Memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	if (s_type == NULL) {
+		pr_err("%s Invalid Storage type\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(tzbuf_key, 0, tzbuflen_key);
+	memset(tzbuf_salt, 0, tzbuflen_salt);
+
+	memcpy(ice_key, key, tzbuflen_key);
+	memcpy(ice_salt, salt, tzbuflen_salt);
+
+	dmac_flush_range(tzbuf_key, tzbuf_key + tzbuflen_key);
+	dmac_flush_range(tzbuf_salt, tzbuf_salt + tzbuflen_salt);
+
+	smc_id = TZ_ES_SET_ICE_KEY_ID;
+
+	desc.arginfo = TZ_ES_SET_ICE_KEY_PARAM_ID;
+	desc.args[0] = index;
+	desc.args[1] = virt_to_phys(tzbuf_key);
+	desc.args[2] = tzbuflen_key;
+	desc.args[3] = virt_to_phys(tzbuf_salt);
+	desc.args[4] = tzbuflen_salt;
+
+	ret = qcom_ice_setup_ice_hw((const char *)s_type, true);
+
+	if (ret) {
+		pr_err("%s: could not enable clocks: %d\n", __func__, ret);
+		goto out;
+	}
+
+	ret = scm_call2(smc_id, &desc);
+
+	if (ret) {
+		pr_err("%s: Set Key Error: %d\n", __func__, ret);
+		if (ret == -EBUSY) {
+			if (qcom_ice_setup_ice_hw((const char *)s_type, false))
+				pr_err("%s: clock disable failed\n", __func__);
+			goto out;
+		}
+		/*Try to invalidate the key to keep ICE in proper state*/
+		smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+		desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+		desc.args[0] = index;
+		ret1 = scm_call2(smc_id, &desc);
+		if (ret1)
+			pr_err("%s: Invalidate Key Error: %d\n", __func__,
+					ret1);
+	}
+	ret = qcom_ice_setup_ice_hw((const char *)s_type, false);
+
+out:
+	return ret;
+}
+
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type)
+{
+	struct scm_desc desc = {0};
+	int ret;
+
+	uint32_t smc_id = 0;
+
+	if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+		pr_err("%s Invalid index %d\n", __func__, index);
+		return -EINVAL;
+	}
+
+	if (storage_type == NULL) {
+		pr_err("%s Invalid Storage type\n", __func__);
+		return -EINVAL;
+	}
+
+	smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+
+	desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+	desc.args[0] = index;
+
+	ret = qcom_ice_setup_ice_hw((const char *)storage_type, true);
+
+	if (ret) {
+		pr_err("%s: could not enable clocks: 0x%x\n", __func__, ret);
+		return ret;
+	}
+
+	ret = scm_call2(smc_id, &desc);
+
+	if (ret) {
+		pr_err("%s: Error: 0x%x\n", __func__, ret);
+		if (qcom_ice_setup_ice_hw((const char *)storage_type, false))
+			pr_err("%s: could not disable clocks\n", __func__);
+	} else {
+		ret = qcom_ice_setup_ice_hw((const char *)storage_type, false);
+	}
+
+	return ret;
+}
diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h
new file mode 100644
index 0000000..fb7c0d1
--- /dev/null
+++ b/security/pfe/pfk_ice.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_ICE_H_
+#define PFK_ICE_H_
+
+/*
+ * PFK ICE
+ *
+ * ICE keys configuration through scm calls.
+ *
+ */
+
+#include <linux/types.h>
+
+int pfk_ice_init(void);
+int pfk_ice_deinit(void);
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+			char *storage_type);
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type);
+
+
+#endif /* PFK_ICE_H_ */
diff --git a/security/pfe/pfk_internal.h b/security/pfe/pfk_internal.h
new file mode 100644
index 0000000..86526fa
--- /dev/null
+++ b/security/pfe/pfk_internal.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_INTERNAL_H_
+#define _PFK_INTERNAL_H_
+
+#include <linux/types.h>
+#include <crypto/ice.h>
+
+struct pfk_key_info {
+	const unsigned char *key;
+	const unsigned char *salt;
+	size_t key_size;
+	size_t salt_size;
+};
+
+int pfk_key_size_to_key_type(size_t key_size,
+	enum ice_crpto_key_size *key_size_type);
+
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+	const char *fs_type);
+
+char *inode_to_filename(const struct inode *inode);
+
+#endif /* _PFK_INTERNAL_H_ */
diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c
new file mode 100644
index 0000000..da71f80
--- /dev/null
+++ b/security/pfe/pfk_kc.c
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * PFK Key Cache
+ *
+ * Key Cache used internally in PFK.
+ * The purpose of the cache is to save access time to QSEE when loading keys.
+ * Currently the cache is the same size as the total number of keys that can
+ * be loaded to ICE. Since this number is relatively small, the algorithms for
+ * cache eviction are simple, linear and based on last usage timestamp, i.e
+ * the node that will be evicted is the one with the oldest timestamp.
+ * Empty entries always have the oldest timestamp.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <crypto/ice.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+
+#include "pfk_kc.h"
+#include "pfk_ice.h"
+
+
+/** the first available index in ice engine */
+#define PFK_KC_STARTING_INDEX 2
+
+/** currently the only supported key and salt sizes */
+#define PFK_KC_KEY_SIZE 32
+#define PFK_KC_SALT_SIZE 32
+
+/** Table size */
+/* TODO replace by some constant from ice.h */
+#define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX))
+
+/** The maximum key and salt size */
+#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
+#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
+#define PFK_UFS "ufs"
+
+static DEFINE_SPINLOCK(kc_lock);
+static unsigned long flags;
+static bool kc_ready;
+static char *s_type = "sdcc";
+
+/**
+ * enum pfk_kc_entry_state - state of the entry inside kc table
+ *
+ * @FREE:		   entry is free
+ * @ACTIVE_ICE_PRELOAD:    entry is actively used by ICE engine
+			   and cannot be used by others. SCM call
+			   to load key to ICE is pending to be performed
+ * @ACTIVE_ICE_LOADED:     entry is actively used by ICE engine and
+			   cannot be used by others. SCM call to load the
+			   key to ICE was successfully executed and key is
+			   now loaded
+ * @INACTIVE_INVALIDATING: entry is being invalidated during file close
+			   and cannot be used by others until invalidation
+			   is complete
+ * @INACTIVE:		   entry's key is already loaded, but is not
+			   currently being used. It can be re-used for
+			   optimization and to avoid SCM call cost or
+			   it can be taken by another key if there are
+			   no FREE entries
+ * @SCM_ERROR:		   error occurred while scm call was performed to
+			   load the key to ICE
+ */
+enum pfk_kc_entry_state {
+	FREE,
+	ACTIVE_ICE_PRELOAD,
+	ACTIVE_ICE_LOADED,
+	INACTIVE_INVALIDATING,
+	INACTIVE,
+	SCM_ERROR
+};
+
+struct kc_entry {
+	 unsigned char key[PFK_MAX_KEY_SIZE];
+	 size_t key_size;
+
+	 unsigned char salt[PFK_MAX_SALT_SIZE];
+	 size_t salt_size;
+
+	 u64 time_stamp;
+	 u32 key_index;
+
+	 struct task_struct *thread_pending;
+
+	 enum pfk_kc_entry_state state;
+
+	 /* ref count for the number of requests in the HW queue for this key */
+	 int loaded_ref_cnt;
+	 int scm_error;
+};
+
+static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
+
+/**
+ * kc_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the key cache is ready.
+ */
+static inline bool kc_is_ready(void)
+{
+	return kc_ready;
+}
+
+static inline void kc_spin_lock(void)
+{
+	spin_lock_irqsave(&kc_lock, flags);
+}
+
+static inline void kc_spin_unlock(void)
+{
+	spin_unlock_irqrestore(&kc_lock, flags);
+}
+
+/**
+ * kc_entry_is_available() - checks whether the entry is available
+ *
+ * Return true if it is , false otherwise or if invalid
+ * Should be invoked under spinlock
+ */
+static bool kc_entry_is_available(const struct kc_entry *entry)
+{
+	if (!entry)
+		return false;
+
+	return (entry->state == FREE || entry->state == INACTIVE);
+}
+
+/**
+ * kc_entry_wait_till_available() - waits till entry is available
+ *
+ * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
+ * by signal
+ *
+ * Should be invoked under spinlock
+ */
+static int kc_entry_wait_till_available(struct kc_entry *entry)
+{
+	int res = 0;
+
+	while (!kc_entry_is_available(entry)) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (signal_pending(current)) {
+			res = -ERESTARTSYS;
+			break;
+		}
+		/* assuming only one thread can try to invalidate
+		 * the same entry
+		 */
+		entry->thread_pending = current;
+		kc_spin_unlock();
+		schedule();
+		kc_spin_lock();
+	}
+	set_current_state(TASK_RUNNING);
+
+	return res;
+}
+
+/**
+ * kc_entry_start_invalidating() - moves entry to state
+ *			           INACTIVE_INVALIDATING
+ *				   If entry is in use, waits till
+ *				   it gets available
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static int kc_entry_start_invalidating(struct kc_entry *entry)
+{
+	int res;
+
+	res = kc_entry_wait_till_available(entry);
+	if (res)
+		return res;
+
+	entry->state = INACTIVE_INVALIDATING;
+
+	return 0;
+}
+
+/**
+ * kc_entry_finish_invalidating() - moves entry to state FREE
+ *				    wakes up all the tasks waiting
+ *				    on it
+ *
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static void kc_entry_finish_invalidating(struct kc_entry *entry)
+{
+	if (!entry)
+		return;
+
+	if (entry->state != INACTIVE_INVALIDATING)
+		return;
+
+	entry->state = FREE;
+}
+
+/**
+ * kc_min_entry() - compare two entries to find one with minimal time
+ * @a: ptr to the first entry. If NULL the other entry will be returned
+ * @b: pointer to the second entry
+ *
+ * Return the entry which timestamp is the minimal, or b if a is NULL
+ */
+static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
+		struct kc_entry *b)
+{
+	if (!a)
+		return b;
+
+	if (time_before64(b->time_stamp, a->time_stamp))
+		return b;
+
+	return a;
+}
+
+/**
+ * kc_entry_at_index() - return entry at specific index
+ * @index: index of entry to be accessed
+ *
+ * Return entry
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_entry_at_index(int index)
+{
+	return &(kc_table[index]);
+}
+
+/**
+ * kc_find_key_at_index() - find kc entry starting at specific index
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ * @sarting_index: index to start search with, if entry found, updated with
+ * index of that entry
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
+	size_t key_size, const unsigned char *salt, size_t salt_size,
+	int *starting_index)
+{
+	struct kc_entry *entry = NULL;
+	int i = 0;
+
+	for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+
+		if (salt != NULL) {
+			if (entry->salt_size != salt_size)
+				continue;
+
+			if (memcmp(entry->salt, salt, salt_size) != 0)
+				continue;
+		}
+
+		if (entry->key_size != key_size)
+			continue;
+
+		if (memcmp(entry->key, key, key_size) == 0) {
+			*starting_index = i;
+			return entry;
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * kc_find_key() - find kc entry
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size)
+{
+	int index = 0;
+
+	return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
+}
+
+/**
+ * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
+ * that is not locked
+ *
+ * Returns entry with minimal timestamp. Empty entries have timestamp
+ * of 0, therefore they are returned first.
+ * If all the entries are locked, will return NULL
+ * Should be invoked under spin lock
+ */
+static struct kc_entry *kc_find_oldest_entry_non_locked(void)
+{
+	struct kc_entry *curr_min_entry = NULL;
+	struct kc_entry *entry = NULL;
+	int i = 0;
+
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+
+		if (entry->state == FREE)
+			return entry;
+
+		if (entry->state == INACTIVE)
+			curr_min_entry = kc_min_entry(curr_min_entry, entry);
+	}
+
+	return curr_min_entry;
+}
+
+/**
+ * kc_update_timestamp() - updates timestamp of entry to current
+ *
+ * @entry: entry to update
+ *
+ */
+static void kc_update_timestamp(struct kc_entry *entry)
+{
+	if (!entry)
+		return;
+
+	entry->time_stamp = get_jiffies_64();
+}
+
+/**
+ * kc_clear_entry() - clear the key from entry and mark entry not in use
+ *
+ * @entry: pointer to entry
+ *
+ * Should be invoked under spinlock
+ */
+static void kc_clear_entry(struct kc_entry *entry)
+{
+	if (!entry)
+		return;
+
+	memset(entry->key, 0, entry->key_size);
+	memset(entry->salt, 0, entry->salt_size);
+
+	entry->key_size = 0;
+	entry->salt_size = 0;
+
+	entry->time_stamp = 0;
+	entry->scm_error = 0;
+
+	entry->state = FREE;
+
+	entry->loaded_ref_cnt = 0;
+	entry->thread_pending = NULL;
+}
+
+/**
+ * kc_update_entry() - replaces the key in given entry and
+ *			loads the new key to ICE
+ *
+ * @entry: entry to replace key in
+ * @key: key
+ * @key_size: key_size
+ * @salt: salt
+ * @salt_size: salt_size
+ *
+ * The previous key is securely released and wiped, the new one is loaded
+ * to ICE.
+ * Should be invoked under spinlock
+ */
+static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
+	size_t key_size, const unsigned char *salt, size_t salt_size)
+{
+	int ret;
+
+	kc_clear_entry(entry);
+
+	memcpy(entry->key, key, key_size);
+	entry->key_size = key_size;
+
+	memcpy(entry->salt, salt, salt_size);
+	entry->salt_size = salt_size;
+
+	/* Mark entry as no longer free before releasing the lock */
+	entry->state = ACTIVE_ICE_PRELOAD;
+	kc_spin_unlock();
+
+	ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
+			entry->salt, s_type);
+
+	kc_spin_lock();
+	return ret;
+}
+
+/**
+ * pfk_kc_init() - init function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_init(void)
+{
+	int i = 0;
+	struct kc_entry *entry = NULL;
+
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+		entry->key_index = PFK_KC_STARTING_INDEX + i;
+	}
+	kc_ready = true;
+	kc_spin_unlock();
+	return 0;
+}
+
+/**
+ * pfk_kc_denit() - deinit function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_deinit(void)
+{
+	int res = pfk_kc_clear();
+
+	kc_ready = false;
+	return res;
+}
+
+/**
+ * pfk_kc_load_key_start() - retrieve the key from cache or add it if
+ * it's not there and return the ICE hw key index in @key_index.
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ * @key_index: the pointer to key_index where the output will be stored
+ * @async: whether scm calls are allowed in the caller context
+ *
+ * If key is present in cache, than the key_index will be retrieved from cache.
+ * If it is not present, the oldest entry from kc table will be evicted,
+ * the key will be loaded to ICE via QSEE to the index that is the evicted
+ * entry number and stored in cache.
+ * Entry that is going to be used is marked as being used, it will mark
+ * as not being used when ICE finishes using it and pfk_kc_load_key_end
+ * will be invoked.
+ * As QSEE calls can only be done from a non-atomic context, when @async flag
+ * is set to 'false', it specifies that it is ok to make the calls in the
+ * current context. Otherwise, when @async is set, the caller should retry the
+ * call again from a different context, and -EAGAIN error will be returned.
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size, u32 *key_index,
+		bool async)
+{
+	int ret = 0;
+	struct kc_entry *entry = NULL;
+	bool entry_exists = false;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	if (!key || !salt || !key_index) {
+		pr_err("%s key/salt/key_index NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (key_size != PFK_KC_KEY_SIZE) {
+		pr_err("unsupported key size %zu\n", key_size);
+		return -EINVAL;
+	}
+
+	if (salt_size != PFK_KC_SALT_SIZE) {
+		pr_err("unsupported salt size %zu\n", salt_size);
+		return -EINVAL;
+	}
+
+	kc_spin_lock();
+
+	entry = kc_find_key(key, key_size, salt, salt_size);
+	if (!entry) {
+		if (async) {
+			pr_debug("%s task will populate entry\n", __func__);
+			kc_spin_unlock();
+			return -EAGAIN;
+		}
+
+		entry = kc_find_oldest_entry_non_locked();
+		if (!entry) {
+			/* could not find a single non locked entry,
+			 * return EBUSY to upper layers so that the
+			 * request will be rescheduled
+			 */
+			kc_spin_unlock();
+			return -EBUSY;
+		}
+	} else {
+		entry_exists = true;
+	}
+
+	pr_debug("entry with index %d is in state %d\n",
+		entry->key_index, entry->state);
+
+	switch (entry->state) {
+	case (INACTIVE):
+		if (entry_exists) {
+			kc_update_timestamp(entry);
+			entry->state = ACTIVE_ICE_LOADED;
+
+			if (!strcmp(s_type, (char *)PFK_UFS)) {
+				if (async)
+					entry->loaded_ref_cnt++;
+			} else {
+				entry->loaded_ref_cnt++;
+			}
+			break;
+		}
+	case (FREE):
+		ret = kc_update_entry(entry, key, key_size, salt, salt_size);
+		if (ret) {
+			entry->state = SCM_ERROR;
+			entry->scm_error = ret;
+			pr_err("%s: key load error (%d)\n", __func__, ret);
+		} else {
+			kc_update_timestamp(entry);
+			entry->state = ACTIVE_ICE_LOADED;
+
+			/*
+			 * In case of UFS only increase ref cnt for async calls,
+			 * sync calls from within work thread do not pass
+			 * requests further to HW
+			 */
+			if (!strcmp(s_type, (char *)PFK_UFS)) {
+				if (async)
+					entry->loaded_ref_cnt++;
+			} else {
+				entry->loaded_ref_cnt++;
+			}
+		}
+		break;
+	case (ACTIVE_ICE_PRELOAD):
+	case (INACTIVE_INVALIDATING):
+		ret = -EAGAIN;
+		break;
+	case (ACTIVE_ICE_LOADED):
+		kc_update_timestamp(entry);
+
+		if (!strcmp(s_type, (char *)PFK_UFS)) {
+			if (async)
+				entry->loaded_ref_cnt++;
+		} else {
+			entry->loaded_ref_cnt++;
+		}
+		break;
+	case(SCM_ERROR):
+		ret = entry->scm_error;
+		kc_clear_entry(entry);
+		entry->state = FREE;
+		break;
+	default:
+		pr_err("invalid state %d for entry with key index %d\n",
+			entry->state, entry->key_index);
+		ret = -EINVAL;
+	}
+
+	*key_index = entry->key_index;
+	kc_spin_unlock();
+
+	return ret;
+}
+
+/**
+ * pfk_kc_load_key_end() - finish the process of key loading that was started
+ *						   by pfk_kc_load_key_start
+ *						   by marking the entry as not
+ *						   being in use
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ *
+ */
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size)
+{
+	struct kc_entry *entry = NULL;
+	struct task_struct *tmp_pending = NULL;
+	int ref_cnt = 0;
+
+	if (!kc_is_ready())
+		return;
+
+	if (!key || !salt)
+		return;
+
+	if (key_size != PFK_KC_KEY_SIZE)
+		return;
+
+	if (salt_size != PFK_KC_SALT_SIZE)
+		return;
+
+	kc_spin_lock();
+
+	entry = kc_find_key(key, key_size, salt, salt_size);
+	if (!entry) {
+		kc_spin_unlock();
+		pr_err("internal error, there should an entry to unlock\n");
+
+		return;
+	}
+	ref_cnt = --entry->loaded_ref_cnt;
+
+	if (ref_cnt < 0)
+		pr_err("internal error, ref count should never be negative\n");
+
+	if (!ref_cnt) {
+		entry->state = INACTIVE;
+		/*
+		 * wake-up invalidation if it's waiting
+		 * for the entry to be released
+		 */
+		if (entry->thread_pending) {
+			tmp_pending = entry->thread_pending;
+			entry->thread_pending = NULL;
+
+			kc_spin_unlock();
+			wake_up_process(tmp_pending);
+			return;
+		}
+	}
+
+	kc_spin_unlock();
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the key
+ * @salt_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also in case of non
+ * (existing key)
+ */
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size)
+{
+	struct kc_entry *entry = NULL;
+	int res = 0;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	if (!key)
+		return -EINVAL;
+
+	if (!salt)
+		return -EINVAL;
+
+	if (key_size != PFK_KC_KEY_SIZE)
+		return -EINVAL;
+
+	if (salt_size != PFK_KC_SALT_SIZE)
+		return -EINVAL;
+
+	kc_spin_lock();
+
+	entry = kc_find_key(key, key_size, salt, salt_size);
+	if (!entry) {
+		pr_debug("%s: key does not exist\n", __func__);
+		kc_spin_unlock();
+		return -EINVAL;
+	}
+
+	res = kc_entry_start_invalidating(entry);
+	if (res != 0) {
+		kc_spin_unlock();
+		return res;
+	}
+	kc_clear_entry(entry);
+
+	kc_spin_unlock();
+
+	qti_pfk_ice_invalidate_key(entry->key_index, s_type);
+
+	kc_spin_lock();
+	kc_entry_finish_invalidating(entry);
+	kc_spin_unlock();
+
+	return 0;
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * when no salt is available. Will only search key part, if there are several,
+ * all will be removed
+ *
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also for non-existing key)
+ */
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
+{
+	struct kc_entry *entry = NULL;
+	int index = 0;
+	int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
+	int temp_indexes_size = 0;
+	int i = 0;
+	int res = 0;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	if (!key)
+		return -EINVAL;
+
+	if (key_size != PFK_KC_KEY_SIZE)
+		return -EINVAL;
+
+	memset(temp_indexes, -1, sizeof(temp_indexes));
+
+	kc_spin_lock();
+
+	entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+	if (!entry) {
+		pr_err("%s: key does not exist\n", __func__);
+		kc_spin_unlock();
+		return -EINVAL;
+	}
+
+	res = kc_entry_start_invalidating(entry);
+	if (res != 0) {
+		kc_spin_unlock();
+		return res;
+	}
+
+	temp_indexes[temp_indexes_size++] = index;
+	kc_clear_entry(entry);
+
+	/* let's clean additional entries with the same key if there are any */
+	do {
+		index++;
+		entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+		if (!entry)
+			break;
+
+		res = kc_entry_start_invalidating(entry);
+		if (res != 0) {
+			kc_spin_unlock();
+			goto out;
+		}
+
+		temp_indexes[temp_indexes_size++] = index;
+
+		kc_clear_entry(entry);
+
+
+	} while (true);
+
+	kc_spin_unlock();
+
+	temp_indexes_size--;
+	for (i = temp_indexes_size; i >= 0 ; i--)
+		qti_pfk_ice_invalidate_key(
+			kc_entry_at_index(temp_indexes[i])->key_index,
+					s_type);
+
+	/* fall through */
+	res = 0;
+
+out:
+	kc_spin_lock();
+	for (i = temp_indexes_size; i >= 0 ; i--)
+		kc_entry_finish_invalidating(
+				kc_entry_at_index(temp_indexes[i]));
+	kc_spin_unlock();
+
+	return res;
+}
+
+/**
+ * pfk_kc_clear() - clear the table and remove all keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+int pfk_kc_clear(void)
+{
+	struct kc_entry *entry = NULL;
+	int i = 0;
+	int res = 0;
+
+	if (!kc_is_ready())
+		return -ENODEV;
+
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+		res = kc_entry_start_invalidating(entry);
+		if (res != 0) {
+			kc_spin_unlock();
+			goto out;
+		}
+		kc_clear_entry(entry);
+	}
+	kc_spin_unlock();
+
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+		qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
+					s_type);
+
+	/* fall through */
+	res = 0;
+out:
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+		kc_entry_finish_invalidating(kc_entry_at_index(i));
+	kc_spin_unlock();
+
+	return res;
+}
+
+/**
+ * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE
+ * The assumption is that at this point we don't have any pending transactions
+ * Also, there is no need to clear keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+void pfk_kc_clear_on_reset(void)
+{
+	struct kc_entry *entry = NULL;
+	int i = 0;
+
+	if (!kc_is_ready())
+		return;
+
+	kc_spin_lock();
+	for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+		entry = kc_entry_at_index(i);
+		kc_clear_entry(entry);
+	}
+	kc_spin_unlock();
+}
+
+static int pfk_kc_find_storage_type(char **device)
+{
+	char boot[20] = {'\0'};
+	char *match = (char *)strnstr(saved_command_line,
+				"androidboot.bootdevice=",
+				strlen(saved_command_line));
+	if (match) {
+		memcpy(boot, (match + strlen("androidboot.bootdevice=")),
+			sizeof(boot) - 1);
+		if (strnstr(boot, PFK_UFS, strlen(boot)))
+			*device = PFK_UFS;
+
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int __init pfk_kc_pre_init(void)
+{
+	return pfk_kc_find_storage_type(&s_type);
+}
+
+static void __exit pfk_kc_exit(void)
+{
+	s_type = NULL;
+}
+
+module_init(pfk_kc_pre_init);
+module_exit(pfk_kc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key-KC driver");
diff --git a/security/pfe/pfk_kc.h b/security/pfe/pfk_kc.h
new file mode 100644
index 0000000..dc4ad15
--- /dev/null
+++ b/security/pfe/pfk_kc.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_KC_H_
+#define PFK_KC_H_
+
+#include <linux/types.h>
+
+int pfk_kc_init(void);
+int pfk_kc_deinit(void);
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size, u32 *key_index,
+		bool async);
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+		const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size);
+int pfk_kc_clear(void);
+void pfk_kc_clear_on_reset(void);
+extern char *saved_command_line;
+
+
+#endif /* PFK_KC_H_ */
diff --git a/security/security.c b/security/security.c
index 6a7b359..e1f9e32 100644
--- a/security/security.c
+++ b/security/security.c
@@ -524,6 +524,14 @@
 }
 EXPORT_SYMBOL_GPL(security_inode_create);
 
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+			       umode_t mode)
+{
+	if (unlikely(IS_PRIVATE(dir)))
+		return 0;
+	return call_int_hook(inode_post_create, 0, dir, dentry, mode);
+}
+
 int security_inode_link(struct dentry *old_dentry, struct inode *dir,
 			 struct dentry *new_dentry)
 {
@@ -1668,6 +1676,8 @@
 	.inode_init_security =
 		LIST_HEAD_INIT(security_hook_heads.inode_init_security),
 	.inode_create =	LIST_HEAD_INIT(security_hook_heads.inode_create),
+	.inode_post_create =
+		LIST_HEAD_INIT(security_hook_heads.inode_post_create),
 	.inode_link =	LIST_HEAD_INIT(security_hook_heads.inode_link),
 	.inode_unlink =	LIST_HEAD_INIT(security_hook_heads.inode_unlink),
 	.inode_symlink =
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index c21e135..13011038 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -25,8 +25,9 @@
 #include <linux/in.h>
 #include <linux/spinlock.h>
 #include <net/net_namespace.h>
-#include "flask.h"
-#include "avc.h"
+//#include "flask.h"
+//#include "avc.h"
+#include "security.h"
 
 struct task_security_struct {
 	u32 osid;		/* SID prior to last execve */
@@ -52,6 +53,8 @@
 	u32 sid;		/* SID of this object */
 	u16 sclass;		/* security class of this object */
 	unsigned char initialized;	/* initialization flag */
+	u32 tag;		/* Per-File-Encryption tag */
+	void *pfk_data; /* Per-File-Key data from ecryptfs */
 	struct mutex lock;
 };
 
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 308a286..b8e98c1 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -12,7 +12,6 @@
 #include <linux/dcache.h>
 #include <linux/magic.h>
 #include <linux/types.h>
-#include "flask.h"
 
 #define SECSID_NULL			0x00000000 /* unspecified SID */
 #define SECSID_WILD			0xffffffff /* wildcard SID */
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 99ee618..add1f8d 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -781,7 +781,7 @@
 	if (!retval) {
 		stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
 		wake_up(&stream->runtime->sleep);
-		return retval;
+		goto ret;
 	}
 
 ret:
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 12ba833..ba5752e 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -23,8 +23,6 @@
 #include <sound/core.h>
 #include "seq_lock.h"
 
-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
-
 /* wait until all locks are released */
 void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
 {
@@ -42,5 +40,3 @@
 }
 
 EXPORT_SYMBOL(snd_use_lock_sync_helper);
-
-#endif
diff --git a/sound/core/seq/seq_lock.h b/sound/core/seq/seq_lock.h
index 54044bc..ac38031 100644
--- a/sound/core/seq/seq_lock.h
+++ b/sound/core/seq/seq_lock.h
@@ -3,8 +3,6 @@
 
 #include <linux/sched.h>
 
-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
-
 typedef atomic_t snd_use_lock_t;
 
 /* initialize lock */
@@ -20,14 +18,4 @@
 void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
 #define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__)
 
-#else /* SMP || CONFIG_SND_DEBUG */
-
-typedef spinlock_t snd_use_lock_t;	/* dummy */
-#define snd_use_lock_init(lockp) /**/
-#define snd_use_lock_use(lockp) /**/
-#define snd_use_lock_free(lockp) /**/
-#define snd_use_lock_sync(lockp) /**/
-
-#endif /* SMP || CONFIG_SND_DEBUG */
-
 #endif /* __SND_SEQ_LOCK_H */
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 0f41257..8761877 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -284,6 +284,11 @@
 		dev_dbg(bus->dev, "HDA capability ID: 0x%x\n",
 			(cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF);
 
+		if (cur_cap == -1) {
+			dev_dbg(bus->dev, "Invalid capability reg read\n");
+			break;
+		}
+
 		switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) {
 		case AZX_ML_CAP_ID:
 			dev_dbg(bus->dev, "Found ML capability\n");
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 9913be8..e46c561 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1755,7 +1755,7 @@
 			return -1;
 		if (*step_to_check && *step_to_check != step) {
 			codec_err(codec, "Mismatching dB step for vmaster slave (%d!=%d)\n",
--				   *step_to_check, step);
+				   *step_to_check, step);
 			return -1;
 		}
 		*step_to_check = step;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 6f337f0..fe1d06d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -329,6 +329,7 @@
 		break;
 	case 0x10ec0225:
 	case 0x10ec0233:
+	case 0x10ec0236:
 	case 0x10ec0255:
 	case 0x10ec0256:
 	case 0x10ec0282:
@@ -909,6 +910,7 @@
 	{ 0x10ec0275, 0x1028, 0, "ALC3260" },
 	{ 0x10ec0899, 0x1028, 0, "ALC3861" },
 	{ 0x10ec0298, 0x1028, 0, "ALC3266" },
+	{ 0x10ec0236, 0x1028, 0, "ALC3204" },
 	{ 0x10ec0256, 0x1028, 0, "ALC3246" },
 	{ 0x10ec0225, 0x1028, 0, "ALC3253" },
 	{ 0x10ec0295, 0x1028, 0, "ALC3254" },
@@ -3694,6 +3696,7 @@
 		alc_process_coef_fw(codec, coef0255_1);
 		alc_process_coef_fw(codec, coef0255);
 		break;
+	case 0x10ec0236:
 	case 0x10ec0256:
 		alc_process_coef_fw(codec, coef0256);
 		alc_process_coef_fw(codec, coef0255);
@@ -3777,6 +3780,7 @@
 
 
 	switch (codec->core.vendor_id) {
+	case 0x10ec0236:
 	case 0x10ec0255:
 	case 0x10ec0256:
 		alc_write_coef_idx(codec, 0x45, 0xc489);
@@ -3885,6 +3889,7 @@
 	case 0x10ec0295:
 		alc_process_coef_fw(codec, coef0225);
 		break;
+	case 0x10ec0236:
 	case 0x10ec0255:
 	case 0x10ec0256:
 		alc_process_coef_fw(codec, coef0255);
@@ -3971,6 +3976,7 @@
 	case 0x10ec0255:
 		alc_process_coef_fw(codec, coef0255);
 		break;
+	case 0x10ec0236:
 	case 0x10ec0256:
 		alc_process_coef_fw(codec, coef0256);
 		break;
@@ -4064,6 +4070,7 @@
 	case 0x10ec0255:
 		alc_process_coef_fw(codec, coef0255);
 		break;
+	case 0x10ec0236:
 	case 0x10ec0256:
 		alc_process_coef_fw(codec, coef0256);
 		break;
@@ -4131,6 +4138,7 @@
 	};
 
 	switch (codec->core.vendor_id) {
+	case 0x10ec0236:
 	case 0x10ec0255:
 	case 0x10ec0256:
 		alc_process_coef_fw(codec, coef0255);
@@ -4335,6 +4343,7 @@
 	case 0x10ec0255:
 		alc_process_coef_fw(codec, alc255fw);
 		break;
+	case 0x10ec0236:
 	case 0x10ec0256:
 		alc_process_coef_fw(codec, alc256fw);
 		break;
@@ -5852,6 +5861,14 @@
 		ALC225_STANDARD_PINS,
 		{0x12, 0xb7a60130},
 		{0x1b, 0x90170110}),
+	SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+		{0x12, 0x90a60140},
+		{0x14, 0x90170110},
+		{0x21, 0x02211020}),
+	SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+		{0x12, 0x90a60140},
+		{0x14, 0x90170150},
+		{0x21, 0x02211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
 		{0x14, 0x90170110},
 		{0x21, 0x02211020}),
@@ -6226,6 +6243,7 @@
 	case 0x10ec0255:
 		spec->codec_variant = ALC269_TYPE_ALC255;
 		break;
+	case 0x10ec0236:
 	case 0x10ec0256:
 		spec->codec_variant = ALC269_TYPE_ALC256;
 		spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
@@ -7205,6 +7223,7 @@
 	HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
+	HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 286efc3..7613b9e 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1352,6 +1352,7 @@
 	case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
 	case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
 	case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
+	case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
 		if (fp->altsetting == 2)
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 		break;
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index a7cda4a..0aeabfe 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -26,6 +26,7 @@
 #include <linux/qmi_encdec.h>
 #include <soc/qcom/msm_qmi_interface.h>
 #include <linux/iommu.h>
+#include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/usb/audio-v3.h>