Merge "msm: kgsl: Provide the context bank in SMMU_TABLE_UPDATE packet"
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 592fcef..d4a352b 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -159,6 +159,23 @@
 
 	* qcom,inst-id: must be present. QMI instance id for remote ETMs.
 
+* Optional properties for funnels:
+
+	* qcom,duplicate-funnel: boolean, indicates its a duplicate of an
+	  existing funnel. Funnel devices are now capable of supporting
+	  multiple-input and multiple-output configuration with in built
+	  hardware filtering for TPDM devices. Each set of input-output
+	  combination is treated as independent funnel device.
+	  funnel-base-dummy and funnel-base-real reg-names must be specified
+	  when this property is enabled.
+
+	* reg-names: funnel-base-dummy: dummy register space used by a
+	  duplicate funnel. Should be a valid register address space that
+	  no other device is using.
+
+	* reg-names: funnel-base-real: actual register space for the
+	  duplicate funnel.
+
 Example:
 
 1. Sinks
diff --git a/Documentation/devicetree/bindings/devfreq/devfreq-simple-dev.txt b/Documentation/devicetree/bindings/devfreq/devfreq-simple-dev.txt
index d00ebd8..5f66bbf 100644
--- a/Documentation/devicetree/bindings/devfreq/devfreq-simple-dev.txt
+++ b/Documentation/devicetree/bindings/devfreq/devfreq-simple-dev.txt
@@ -9,12 +9,12 @@
 - compatible:		Must be "devfreq-simple-dev"
 - clock-names:		Must be "devfreq_clk"
 - clocks:		Must refer to the clock that's fed to the device.
-- freq-tbl-khz:		A list of usable frequencies (in KHz) for the device
-			clock.
 Optional properties:
 - polling-ms:	Polling interval for the device in milliseconds. Default: 50
 - governor:	Initial governor to user for the device. Default: "performance"
 - qcom,prepare-clk:	Prepare the device clock during initialization.
+- freq-tbl-khz:		A list of usable frequencies (in kHz) for the device
+			clock.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/display/msm/sde-rsc.txt b/Documentation/devicetree/bindings/display/msm/sde-rsc.txt
index 7e54fdd..55d18cf 100644
--- a/Documentation/devicetree/bindings/display/msm/sde-rsc.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde-rsc.txt
@@ -29,6 +29,10 @@
 Bus Scaling Subnodes:
 - qcom,sde-data-bus:		Property to provide Bus scaling for data bus access for
 				sde blocks.
+- qcom,sde-llcc-bus:		Property to provide Bus scaling for data bus access for
+				mnoc to llcc.
+- qcom,sde-ebi-bus:		Property to provide Bus scaling for data bus access for
+				llcc to ebi.
 
 Bus Scaling Data:
 - qcom,msm-bus,name:		String property describing client name.
@@ -69,4 +73,24 @@
 		          <22 512 0 6400000>, <23 512 0 6400000>,
 		          <22 512 0 6400000>, <23 512 0 6400000>;
 		};
+		qcom,sde-llcc-bus {
+			qcom,msm-bus,name = "sde_rsc_llcc";
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+			    <20001 20513 0 0>,
+			    <20001 20513 0 6400000>,
+			    <20001 20513 0 6400000>;
+		};
+		qcom,sde-ebi-bus {
+			qcom,msm-bus,name = "sde_rsc_ebi";
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+			    <20000 20512 0 0>,
+			    <20000 20512 0 6400000>,
+			    <20000 20512 0 6400000>;
+		};
 	};
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 47fc465..863a169 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -341,6 +341,10 @@
 				mdss blocks.
 - qcom,sde-data-bus:		Property to provide Bus scaling for data bus access for
 				mdss blocks.
+- qcom,sde-llcc-bus:		Property to provide Bus scaling for data bus access for
+				mnoc to llcc.
+- qcom,sde-ebi-bus:		Property to provide Bus scaling for data bus access for
+				llcc to ebi.
 
 - qcom,sde-inline-rotator:	A 2 cell property, with format of (rotator phandle,
 				instance id), of inline rotator device.
@@ -638,6 +642,24 @@
             <22 512 0 6400000>, <23 512 0 6400000>,
                 <25 512 0 6400000>;
     };
+    qcom,sde-llcc-bus {
+        qcom,msm-bus,name = "mdss_sde_llcc";
+        qcom,msm-bus,num-cases = <3>;
+        qcom,msm-bus,num-paths = <1>;
+        qcom,msm-bus,vectors-KBps =
+            <132 770 0 0>,
+            <132 770 0 6400000>,
+            <132 770 0 6400000>;
+    };
+    qcom,sde-ebi-bus {
+        qcom,msm-bus,name = "mdss_sde_ebi";
+        qcom,msm-bus,num-cases = <3>;
+        qcom,msm-bus,num-paths = <1>;
+        qcom,msm-bus,vectors-KBps =
+            <129 512 0 0>,
+            <129 512 0 6400000>,
+            <129 512 0 6400000>;
+    };
 
     qcom,sde-reg-bus {
         /* Reg Bus Scale Settings */
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index 53f419c..d61606a 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -6,20 +6,11 @@
 Required properties:
 - compatible : one of:
 	- "qcom,msm-vidc"
-- qcom,max-hw-load: The maximum load the hardware can support expressed in units
-  of macroblocks per second. The load is a reflection of hardware capability
-  rather than a performance guarantee. Performance is guaranteed only up to
-  advertised capability of the chipset.
-- qcom,max-hq-mbs-per-frame : Max no of mbs per frame beyond which
-    "High Quality" encoding is not supported.
-- qcom,max-hq-frames-per-sec : Max no of frames per second beyond which
-    "High Quality" encoding is not supported.
+        - "qcom,sdm845-vidc" : Invokes driver specific data for SDM845.
 
 Optional properties:
 - reg : offset and length of the register set for the device.
 - interrupts : should contain the vidc interrupt.
-- qcom,platform-version : mask and shift of the platform version bits
-    in efuse register.
 - qcom,reg-presets : list of offset-value pairs for registers to be written.
   The offsets are from the base offset specified in 'reg'. This is mainly
   used for QoS, VBIF, etc. presets for video.
@@ -64,20 +55,9 @@
        macro block in low power mode.
        the required frequency to get the final frequency, the factor is
        represented in Q16 format.
-- qcom,sw-power-collapse = A bool indicating if video hardware core can be
-  power collapsed in idle state.
-- qcom,never-unload-fw = A bool indicating if video firmware should be not be
-  unloaded after all active sessions have closed.  Once a new session starts up
-  after this, the firmware will be ready to go.  This should be set on platforms
-  that desire low-latency video startup and don't mind "leakage" of some memory.
 - qcom,use-non-secure-pil = A bool indicating which type of pil to use to load
   the fw.
 - qcom,fw-bias = The address at which venus fw is loaded (manually).
-- qcom,enable-thermal-mitigation = A bool to enable thermal mitigation when
-  thermal run away occurs.
-- qcom,hfi-version = The hfi packetization version supported by venus firmware.
-  If hfi version is not specified, then packetization type will default to
-  legacy.
 - qcom,vidc-iommu-domains = node containing individual domain nodes, each with:
      - a unique domain name for the domain node (e.g vidc,domain-ns)
      - qcom,vidc-domain-phandle: phandle for the domain as defined in
@@ -97,13 +77,6 @@
            internal persist = 0x200
            internal persist1 = 0x400
            internal cmd queue = 0x800
-- qcom,pm-qos-latency-us = The latency used to vote for QOS power manager. This
-value is typically max(latencies of every cluster at all power levels) + 1
-- qcom,max-secure-instances = An int containing max number of concurrent secure
-  instances supported, accounting for venus and system wide limitations like
-  memory, performance etc.
-- qcom,debug-timeout = A bool indicating that FW errors such as SYS_ERROR,
-  SESSION_ERROR and timeouts will be treated as Fatal.
 - cache-slice-names = An array of supported cache slice names by llcc
 - cache-slices = An array of supported cache slice ids corresponding
   to cache-slice-names by llcc
@@ -171,25 +144,16 @@
 		venus-supply = <&gdsc>;
 		venus-core0-supply = <&gdsc1>;
 		venus-core1-supply = <&gdsc2>;
-		qcom,hfi-version = "3xx";
 		qcom,reg-presets = <0x80004 0x1>,
 			<0x80178 0x00001FFF>;
 		qcom,qdss-presets = <0xFC307000 0x1000>,
 			<0xFC322000 0x1000>;
-		qcom,max-hw-load = <1224450>; /* 4k @ 30 + 1080p @ 30*/
-		qcom,never-unload-fw;
 		clock-names = "foo_clk", "bar_clk", "baz_clk";
 		qcom,clock-configs = <0x3 0x1 0x0>;
-		qcom,sw-power-collapse;
 		qcom,buffer-type-tz-usage-table = <0x1 0x1>,
 						<0x1fe 0x2>;
-		qcom,enable-thermal-mitigation;
-		qcom,use-non-secure-pil;
-		qcom,use_dynamic_bw_update;
 		qcom,fw-bias = <0xe000000>;
 		qcom,allowed-clock-rates = <200000000 300000000 400000000>;
-		qcom,max-hq-mbs-per-frame = <8160>;
-		qcom,max-hq-frames-per-sec = <60>;
 		msm_vidc_cb1: msm_vidc_cb1 {
 			compatible = "qcom,msm-vidc,context-bank";
 			label = "venus_ns";
diff --git a/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
new file mode 100644
index 0000000..094dc25
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
@@ -0,0 +1,74 @@
+Qualcomm Technologies, Inc. Parallel Interface controller (QPIC) for NAND devices
+
+Required properties:
+- compatible : "qcom,msm-nand".
+- reg : should specify QPIC NANDc and BAM physical address range.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should specify QPIC/BAM interrupt numbers.
+- interrupt-names : should specify relevant names to each interrupts property
+  defined.
+- qcom,reg-adjustment-offset : Specify the base adjustment offset value for the
+  version registers
+
+MTD flash partition layout for NAND devices -
+
+Each partition is represented as a sub-node of the qcom,mtd-partitions device.
+Each node's name represents the name of the corresponding partition.
+
+This is now completely optional as the partition information is avaialble from
+bootloader.
+
+Optional properties:
+- reg : boot_cfg. This is needed only on the targets where both NAND and eMMC
+  devices are supported. On eMMC based builds, NAND cannot be enabled by
+  default due to the absence of some of its required resources.
+- reg : The partition offset and size
+- label : The label / name for this partition.
+- read-only: This parameter, if present, indicates that this partition
+  should only be mounted read-only.
+- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
+below optional properties:
+	- qcom,msm-bus,name
+	- qcom,msm-bus,num-cases
+	- qcom,msm-bus,active-only
+	- qcom,msm-bus,num-paths
+	- qcom,msm-bus,vectors-KBps
+
+Examples:
+
+	qcom,nand@f9af0000 {
+		compatible = "qcom,msm-nand";
+		reg = <0xf9af0000 0x1000>,
+		      <0xf9ac4000 0x8000>,
+		      <0x5e02c 0x4>;
+		reg-names = "nand_phys",
+			    "bam_phys",
+			    "boot_cfg";
+		qcom,reg-adjustment-offset = <0x4000>;
+
+		interrupts = <0 279 0>;
+		interrupt-names = "bam_irq";
+
+		qcom,msm-bus,name = "qpic_nand";
+		qcom,msm-bus,num-cases = <1>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps = <91 512 0 0>,
+	};
+
+       qcom,mtd-partitions {
+	       #address-cells = <1>;
+	       #size-cells = <1>;
+               partition@0 {
+                       label = "boot";
+                       reg = <0x0 0x1000>;
+		       read-only;
+               };
+               partition@20000 {
+                       label = "userdata";
+                       reg = <0x20000 0x1000>;
+               };
+               partition@40000 {
+                       label = "system";
+                       reg = <0x40000 0x1000>;
+               };
+       };
diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi
index ef8fe1b..4abf260 100644
--- a/arch/arm64/boot/dts/qcom/pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660.dtsi
@@ -330,7 +330,9 @@
 					   "usbin_v";
 
 			qcom,wipower-max-uw = <5000000>;
-			dpdm-supply = <&qusb_phy0>;
+
+			/* Enable after the qusb_phy0 device node is added */
+			/* dpdm-supply = <&qusb_phy0>; */
 
 			qcom,thermal-mitigation
 					= <3000000 2500000 2000000 1500000
diff --git a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
index a1dc261..b0c436f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
@@ -361,3 +361,13 @@
 		regulator-max-microvolt = <7>;
 	};
 };
+
+&pm660_charger {
+	smb2_vbus: qcom,smb2-vbus {
+		regulator-name = "smb2-vbus";
+	};
+
+	smb2_vconn: qcom,smb2-vconn {
+		regulator-name = "smb2-vconn";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 927e0b2..bb5217e 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -748,6 +748,24 @@
 		reg-names = "dcc-base", "dcc-ram-base";
 	};
 
+	spmi_bus: qcom,spmi@c440000 {
+		compatible = "qcom,spmi-pmic-arb";
+		reg = <0xc440000 0x1100>,
+		      <0xc600000 0x2000000>,
+		      <0xe600000 0x100000>,
+		      <0xe700000 0xa0000>,
+		      <0xc40a000 0x26000>;
+		reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+		interrupt-names = "periph_irq";
+		interrupts = <GIC_SPI 481 IRQ_TYPE_NONE>;
+		qcom,ee = <0>;
+		qcom,channel = <0>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+		interrupt-controller;
+		#interrupt-cells = <4>;
+		cell-index = <0>;
+	};
 };
 
 #include "sdm670-pinctrl.dtsi"
@@ -827,4 +845,6 @@
 	status = "ok";
 };
 
+#include "pm660.dtsi"
+#include "pm660l.dtsi"
 #include "sdm670-regulator.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index c8f84fd..0430ea4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -334,7 +334,9 @@
 		interrupts = <63 0>;
 		interrupt-names = "nfc_irq";
 		pinctrl-names = "nfc_active", "nfc_suspend";
-		pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+		pinctrl-0 = <&nfc_int_active
+			     &nfc_enable_active
+			     &nfc_clk_default>;
 		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
 		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
 		clock-names = "ref_clk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index e32ec6e..04a332e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -767,13 +767,42 @@
 					    <&tpdm_lpass_out_funnel_lpass>;
 				};
 			};
+		};
+	};
 
-			port@2 {
+	funnel_lpass_1: funnel_1@6845000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6867010 0x10>,
+		      <0x6845000 0x1000>;
+		reg-names = "funnel-base-dummy", "funnel-base-real";
+
+		coresight-name = "coresight-funnel-lpass-1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		qcom,duplicate-funnel;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_lpass_1_out_funnel_qatb: endpoint {
+					remote-endpoint =
+					    <&funnel_qatb_in_funnel_lpass_1>;
+				};
+			};
+
+			port@1 {
 				reg = <1>;
-				funnel_lpass_in_audio_etm0: endpoint {
+				funnel_lpass_1_in_audio_etm0: endpoint {
 					slave-mode;
 					remote-endpoint =
-					    <&audio_etm0_out_funnel_lpass>;
+					    <&audio_etm0_out_funnel_lpass_1>;
 				};
 			};
 		};
@@ -1100,13 +1129,42 @@
 					    <&tpdm_turing_out_funnel_turing>;
 				};
 			};
+		};
+	};
 
-			port@2 {
+	funnel_turing_1: funnel_1@6861000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6867000 0x10>,
+		      <0x6861000 0x1000>;
+		reg-names = "funnel-base-dummy", "funnel-base-real";
+
+		coresight-name = "coresight-funnel-turing-1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+
+		qcom,duplicate-funnel;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_turing_1_out_funnel_qatb: endpoint {
+					remote-endpoint =
+					    <&funnel_qatb_in_funnel_turing_1>;
+				};
+			};
+
+			port@1 {
 				reg = <1>;
-				funnel_turing_in_turing_etm0: endpoint {
+				funnel_turing_1_in_turing_etm0: endpoint {
 					slave-mode;
 					remote-endpoint =
-					    <&turing_etm0_out_funnel_turing>;
+					    <&turing_etm0_out_funnel_turing_1>;
 				};
 			};
 		};
@@ -1394,6 +1452,24 @@
 						<&tpda_out_funnel_qatb>;
 				};
 			};
+
+			port@2 {
+				reg = <6>;
+				funnel_qatb_in_funnel_lpass_1: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&funnel_lpass_1_out_funnel_qatb>;
+				};
+			};
+
+			port@3 {
+				reg = <7>;
+				funnel_qatb_in_funnel_turing_1: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&funnel_turing_1_out_funnel_qatb>;
+				};
+			};
 		};
 	};
 
@@ -1780,9 +1856,9 @@
 		qcom,inst-id = <13>;
 
 		port{
-			turing_etm0_out_funnel_turing: endpoint {
+			turing_etm0_out_funnel_turing_1: endpoint {
 				remote-endpoint =
-					<&funnel_turing_in_turing_etm0>;
+					<&funnel_turing_1_in_turing_etm0>;
 			};
 		};
 	};
@@ -1823,8 +1899,9 @@
 		qcom,inst-id = <5>;
 
 		port {
-			audio_etm0_out_funnel_lpass: endpoint {
-				remote-endpoint = <&funnel_lpass_in_audio_etm0>;
+			audio_etm0_out_funnel_lpass_1: endpoint {
+				remote-endpoint =
+					<&funnel_lpass_1_in_audio_etm0>;
 			};
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index c75eb48..c3217e7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -308,7 +308,9 @@
 		interrupts = <63 0>;
 		interrupt-names = "nfc_irq";
 		pinctrl-names = "nfc_active", "nfc_suspend";
-		pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+		pinctrl-0 = <&nfc_int_active
+			     &nfc_enable_active
+			     &nfc_clk_default>;
 		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
 		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
 		clock-names = "ref_clk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 9946a25..dc58f9c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -2800,14 +2800,6 @@
 };
 
 &pm8998_gpios {
-	gpio@d400 {
-		qcom,mode = <0>;
-		qcom,vin-sel = <1>;
-		qcom,src-sel = <0>;
-		qcom,master-en = <1>;
-		status = "okay";
-	};
-
 	key_home {
 		key_home_default: key_home_default {
 			pins = "gpio5";
@@ -2865,6 +2857,15 @@
 			output-low;
 		};
 	};
+
+	nfc_clk {
+		nfc_clk_default: nfc_clk_default {
+			pins = "gpio21";
+			function = "normal";
+			input-enable;
+			power-source = <1>;
+		};
+	};
 };
 
 &pmi8998_gpios {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index c2fbed5..f14293b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -73,7 +73,9 @@
 		interrupts = <63 0>;
 		interrupt-names = "nfc_irq";
 		pinctrl-names = "nfc_active", "nfc_suspend";
-		pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+		pinctrl-0 = <&nfc_int_active
+			     &nfc_enable_active
+			     &nfc_clk_default>;
 		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
 		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
 		clock-names = "ref_clk";
@@ -265,3 +267,189 @@
 &ext_5v_boost {
 	status = "ok";
 };
+
+&pm8998_vadc {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@85 {
+		label = "vcoin";
+		reg = <0x85>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4f {
+		label = "pa_therm1";
+		reg = <0x4f>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+};
+
+&pm8998_adc_tm {
+	chan@83 {
+		label = "vph_pwr";
+		reg = <0x83>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,btm-channel-number = <0x60>;
+	};
+
+	chan@4c {
+		label = "xo_therm";
+		reg = <0x4c>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x68>;
+		qcom,thermal-node;
+	};
+
+	chan@4d {
+		label = "msm_therm";
+		reg = <0x4d>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x70>;
+		qcom,thermal-node;
+	};
+
+	chan@4f {
+		label = "pa_therm1";
+		reg = <0x4f>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x78>;
+		qcom,thermal-node;
+	};
+
+	chan@51 {
+		label = "quiet_therm";
+		reg = <0x51>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x80>;
+		qcom,thermal-node;
+	};
+};
+
+&thermal_zones {
+	xo-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_adc_tm 0x4c>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	msm-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_adc_tm 0x4d>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	pa-therm1-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_adc_tm 0x4f>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	quiet-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_adc_tm 0x51>;
+		thermal-governor = "user_space";
+
+		trips {
+			active-config0 {
+				temperature = <65000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index af12224..71c521a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -16,16 +16,10 @@
 
 &soc {
 	msm_vidc: qcom,vidc@aa00000 {
-		compatible = "qcom,msm-vidc";
+		compatible = "qcom,msm-vidc", "qcom,sdm845-vidc";
 		status = "ok";
 		reg = <0xaa00000 0x200000>;
 		interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
-		qcom,hfi = "venus";
-		qcom,firmware-name = "venus";
-		qcom,never-unload-fw;
-		qcom,sw-power-collapse;
-		qcom,max-secure-instances = <5>;
-		qcom,max-hw-load = <2563200>; /* Full 4k @ 60 + 1080p @ 60 */
 
 		/* LLCC Info */
 		cache-slice-names = "vidsc0", "vidsc1";
@@ -53,21 +47,6 @@
 		qcom,clock-configs = <0x1 0x0 0x0 0x1 0x0 0x1 0x0>;
 		qcom,allowed-clock-rates = <200000000 320000000 380000000
 			444000000 533000000>;
-		qcom,max-hq-mbs-per-frame = <8160>;
-		qcom,max-hq-frames-per-sec = <60>;
-		qcom,clock-freq-tbl {
-			qcom,profile-enc {
-				qcom,codec-mask = <0x55555555>;
-				qcom,vpp-cycles-per-mb = <675>;
-				qcom,vsp-cycles-per-mb = <125>;
-				qcom,low-power-cycles-per-mb = <320>;
-			};
-			qcom,profile-dec {
-				qcom,codec-mask = <0xffffffff>;
-				qcom,vpp-cycles-per-mb = <200>;
-				qcom,vsp-cycles-per-mb = <50>;
-			};
-		};
 
 		/* Buses */
 		bus_cnoc {
@@ -84,7 +63,7 @@
 			label = "venus-ddr";
 			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
 			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
-			qcom,bus-governor = "performance";
+			qcom,bus-governor = "msm-vidc-ddr";
 			qcom,bus-range-kbps = <1000 3388000>;
 		};
 		arm9_bus_ddr {
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 7ea200e..83f1166 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -488,6 +488,11 @@
 	firmware: firmware {
 		android {
 			compatible = "android,firmware";
+			vbmeta {
+				compatible = "android,vbmeta";
+				parts = "vbmeta,boot,system,vendor,dtbo";
+			};
+
 			fstab {
 				compatible = "android,fstab";
 				vendor {
@@ -495,7 +500,7 @@
 					dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor";
 					type = "ext4";
 					mnt_flags = "ro,barrier=1,discard";
-					fsmgr_flags = "wait,slotselect";
+					fsmgr_flags = "wait,slotselect,avb";
 				};
 			};
 		};
@@ -939,20 +944,6 @@
 		clocks = <&clock_cpucc L3_CLUSTER0_VOTE_CLK>;
 		governor = "performance";
 		qcom,prepare-clk;
-		freq-tbl-khz =
-			< 300000 >,
-			< 422400 >,
-			< 499200 >,
-			< 576000 >,
-			< 652800 >,
-			< 729600 >,
-			< 806400 >,
-			< 883200 >,
-			< 960000 >,
-			< 1036800 >,
-			< 1094400 >,
-			< 1209600 >,
-			< 1305600 >;
 	};
 
 	l3_cpu4: qcom,l3-cpu4 {
@@ -961,20 +952,6 @@
 		clocks = <&clock_cpucc L3_CLUSTER1_VOTE_CLK>;
 		governor = "performance";
 		qcom,prepare-clk;
-		freq-tbl-khz =
-			< 300000 >,
-			< 422400 >,
-			< 499200 >,
-			< 576000 >,
-			< 652800 >,
-			< 729600 >,
-			< 806400 >,
-			< 883200 >,
-			< 960000 >,
-			< 1036800 >,
-			< 1094400 >,
-			< 1209600 >,
-			< 1305600 >;
 	};
 
 	devfreq_l3lat_0: qcom,cpu0-l3lat-mon {
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 36672cc..7a53ed4 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -19,6 +19,7 @@
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
 CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
@@ -31,6 +32,7 @@
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
 CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
 # CONFIG_AIO is not set
 # CONFIG_MEMBARRIER is not set
 CONFIG_EMBEDDED=y
@@ -83,7 +85,6 @@
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -157,6 +158,7 @@
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
@@ -204,8 +206,6 @@
 CONFIG_NET_SCH_INGRESS=y
 CONFIG_NET_CLS_FW=y
 CONFIG_NET_CLS_U32=y
-CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_FLOW=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
 CONFIG_NET_EMATCH_NBYTE=y
@@ -246,9 +246,12 @@
 CONFIG_SCSI_UFSHCD_PLATFORM=y
 CONFIG_SCSI_UFS_QCOM=y
 CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
 CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -405,6 +408,7 @@
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_PARANOID_SD_INIT=y
 CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
@@ -527,6 +531,7 @@
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index e799405..7115294 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -23,6 +23,7 @@
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
 CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
@@ -35,6 +36,7 @@
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
 CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
 # CONFIG_AIO is not set
 # CONFIG_MEMBARRIER is not set
 CONFIG_EMBEDDED=y
@@ -89,7 +91,6 @@
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -163,9 +164,9 @@
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -212,8 +213,6 @@
 CONFIG_NET_SCH_INGRESS=y
 CONFIG_NET_CLS_FW=y
 CONFIG_NET_CLS_U32=y
-CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_FLOW=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
 CONFIG_NET_EMATCH_NBYTE=y
@@ -260,7 +259,9 @@
 CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
 CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -414,6 +415,7 @@
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
 CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_PARANOID_SD_INIT=y
 CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
@@ -482,6 +484,7 @@
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_SDM845_LLCC=y
+CONFIG_QCOM_SDM670_LLCC=y
 CONFIG_MSM_SERVICE_LOCATOR=y
 CONFIG_MSM_SERVICE_NOTIFIER=y
 CONFIG_MSM_BOOT_STATS=y
@@ -552,6 +555,7 @@
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
diff --git a/block/blk-core.c b/block/blk-core.c
index 710c93b..d8fba67 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1438,6 +1438,9 @@
 	/* this is a bio leak */
 	WARN_ON(req->bio != NULL);
 
+	/* this is a bio leak if the bio is not tagged with BIO_DONTFREE */
+	WARN_ON(req->bio && !bio_flagged(req->bio, BIO_DONTFREE));
+
 	/*
 	 * Request may not have originated from ll_rw_blk. if not,
 	 * it didn't come out of our reserved rq pools
@@ -2619,6 +2622,15 @@
 	blk_account_io_completion(req, nr_bytes);
 
 	total_bytes = 0;
+
+	/*
+	 * Check for this if flagged, Req based dm needs to perform
+	 * post processing, hence dont end bios or request.DM
+	 * layer takes care.
+	 */
+	if (bio_flagged(req->bio, BIO_DONTFREE))
+		return false;
+
 	while (req->bio) {
 		struct bio *bio = req->bio;
 		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2642e5f..abde370 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -492,6 +492,64 @@
 }
 EXPORT_SYMBOL(blk_rq_map_sg);
 
+/*
+ * map a request to scatterlist without combining PHY CONT
+ * blocks, return number of sg entries setup. Caller
+ * must make sure sg can hold rq->nr_phys_segments entries
+ */
+int blk_rq_map_sg_no_cluster(struct request_queue *q, struct request *rq,
+		  struct scatterlist *sglist)
+{
+	struct bio_vec bvec, bvprv = { NULL };
+	struct req_iterator iter;
+	struct scatterlist *sg;
+	int nsegs, cluster = 0;
+
+	nsegs = 0;
+
+	/*
+	 * for each bio in rq
+	 */
+	sg = NULL;
+	rq_for_each_segment(bvec, rq, iter) {
+		__blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
+				     &nsegs, &cluster);
+	} /* segments in rq */
+
+
+	if (!sg)
+		return nsegs;
+
+	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
+	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
+		unsigned int pad_len =
+			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
+
+		sg->length += pad_len;
+		rq->extra_len += pad_len;
+	}
+
+	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
+		if (rq->cmd_flags & REQ_OP_WRITE)
+			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
+
+		sg->page_link &= ~0x02;
+		sg = sg_next(sg);
+		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
+			    q->dma_drain_size,
+			    ((unsigned long)q->dma_drain_buffer) &
+			    (PAGE_SIZE - 1));
+		nsegs++;
+		rq->extra_len += q->dma_drain_size;
+	}
+
+	if (sg)
+		sg_mark_end(sg);
+
+	return nsegs;
+}
+EXPORT_SYMBOL(blk_rq_map_sg_no_cluster);
+
 static inline int ll_new_hw_segment(struct request_queue *q,
 				    struct request *req,
 				    struct bio *bio)
diff --git a/block/blk.h b/block/blk.h
index 74444c4..ae07666 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -207,7 +207,6 @@
 int attempt_front_merge(struct request_queue *q, struct request *rq);
 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 				struct request *next);
-void blk_recalc_rq_segments(struct request *rq);
 void blk_rq_set_mixed_merge(struct request *rq);
 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
 int blk_try_merge(struct request *rq, struct bio *bio);
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 8069b36..a7511a1 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -59,7 +59,7 @@
 /*
  * Assigned numbers, used for dynamic minors
  */
-#define DYNAMIC_MINORS 64 /* like dynamic majors */
+#define DYNAMIC_MINORS 75 /* like dynamic majors */
 static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
 
 #ifdef CONFIG_PROC_FS
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 930e281..6a8c43b 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -9,6 +9,7 @@
 clk-qcom-y += clk-branch.o
 clk-qcom-y += clk-regmap-divider.o
 clk-qcom-y += clk-regmap-mux.o
+clk-qcom-y += clk-regmap-mux-div.o
 clk-qcom-y += reset.o clk-voter.o
 clk-qcom-y += clk-dummy.o clk-debug.o
 clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o gdsc-regulator.o
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 9ccef91..86e148d 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1928,22 +1928,11 @@
 };
 
 static const struct qcom_reset_map cam_cc_sdm845_resets[] = {
-	[TITAN_CAM_CC_BPS_BCR] = { 0x6000 },
-	[TITAN_CAM_CC_CAMNOC_BCR] = { 0xb120 },
 	[TITAN_CAM_CC_CCI_BCR] = { 0xb0d4 },
 	[TITAN_CAM_CC_CPAS_BCR] = { 0xb118 },
 	[TITAN_CAM_CC_CSI0PHY_BCR] = { 0x5000 },
 	[TITAN_CAM_CC_CSI1PHY_BCR] = { 0x5024 },
 	[TITAN_CAM_CC_CSI2PHY_BCR] = { 0x5048 },
-	[TITAN_CAM_CC_FD_BCR] = { 0xb0ac },
-	[TITAN_CAM_CC_ICP_BCR] = { 0xb074 },
-	[TITAN_CAM_CC_IFE_0_BCR] = { 0x9000 },
-	[TITAN_CAM_CC_IFE_1_BCR] = { 0xa000 },
-	[TITAN_CAM_CC_IFE_LITE_BCR] = { 0xb000 },
-	[TITAN_CAM_CC_IPE_0_BCR] = { 0x7000 },
-	[TITAN_CAM_CC_IPE_1_BCR] = { 0x8000 },
-	[TITAN_CAM_CC_JPEG_BCR] = { 0xb048 },
-	[TITAN_CAM_CC_LRME_BCR] = { 0xb0f4 },
 	[TITAN_CAM_CC_MCLK0_BCR] = { 0x4000 },
 	[TITAN_CAM_CC_MCLK1_BCR] = { 0x4020 },
 	[TITAN_CAM_CC_MCLK2_BCR] = { 0x4040 },
diff --git a/drivers/clk/qcom/clk-regmap-mux-div.c b/drivers/clk/qcom/clk-regmap-mux-div.c
new file mode 100644
index 0000000..9593aef
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux-div.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+
+#include "clk-regmap-mux-div.h"
+
+#define CMD_RCGR			0x0
+#define CMD_RCGR_UPDATE			BIT(0)
+#define CMD_RCGR_DIRTY_CFG		BIT(4)
+#define CMD_RCGR_ROOT_OFF		BIT(31)
+#define CFG_RCGR			0x4
+
+#define to_clk_regmap_mux_div(_hw) \
+	container_of(to_clk_regmap(_hw), struct clk_regmap_mux_div, clkr)
+
+int __mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div)
+{
+	int ret, count;
+	u32 val, mask;
+	const char *name = clk_hw_get_name(&md->clkr.hw);
+
+	val = (div << md->hid_shift) | (src << md->src_shift);
+	mask = ((BIT(md->hid_width) - 1) << md->hid_shift) |
+	       ((BIT(md->src_width) - 1) << md->src_shift);
+
+	ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md->reg_offset,
+				 mask, val);
+	if (ret)
+		return ret;
+
+	ret = regmap_update_bits(md->clkr.regmap, CMD_RCGR + md->reg_offset,
+				 CMD_RCGR_UPDATE, CMD_RCGR_UPDATE);
+	if (ret)
+		return ret;
+
+	/* Wait for update to take effect */
+	for (count = 500; count > 0; count--) {
+		ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset,
+				  &val);
+		if (ret)
+			return ret;
+		if (!(val & CMD_RCGR_UPDATE))
+			return 0;
+		udelay(1);
+	}
+
+	pr_err("%s: RCG did not update its configuration", name);
+	return -EBUSY;
+}
+
+static void __mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src,
+				  u32 *div)
+{
+	u32 val, __div, __src;
+	const char *name = clk_hw_get_name(&md->clkr.hw);
+
+	regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, &val);
+
+	if (val & CMD_RCGR_DIRTY_CFG) {
+		pr_err("%s: RCG configuration is pending\n", name);
+		return;
+	}
+
+	regmap_read(md->clkr.regmap, CFG_RCGR + md->reg_offset, &val);
+	__src = (val >> md->src_shift);
+	__src &= BIT(md->src_width) - 1;
+	*src = __src;
+
+	__div = (val >> md->hid_shift);
+	__div &= BIT(md->hid_width) - 1;
+	*div = __div;
+}
+
+static int mux_div_enable(struct clk_hw *hw)
+{
+	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+	return __mux_div_set_src_div(md, md->src, md->div);
+}
+
+static inline bool is_better_rate(unsigned long req, unsigned long best,
+				  unsigned long new)
+{
+	return (req <= new && new < best) || (best < req && best < new);
+}
+
+static int mux_div_determine_rate(struct clk_hw *hw,
+				  struct clk_rate_request *req)
+{
+	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+	unsigned int i, div, max_div;
+	unsigned long actual_rate, best_rate = 0;
+	unsigned long req_rate = req->rate;
+
+	for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+		struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+		unsigned long parent_rate = clk_hw_get_rate(parent);
+
+		max_div = BIT(md->hid_width) - 1;
+		for (div = 1; div < max_div; div++) {
+			parent_rate = mult_frac(req_rate, div, 2);
+			parent_rate = clk_hw_round_rate(parent, parent_rate);
+			actual_rate = mult_frac(parent_rate, 2, div);
+
+			if (is_better_rate(req_rate, best_rate, actual_rate)) {
+				best_rate = actual_rate;
+				req->rate = best_rate;
+				req->best_parent_rate = parent_rate;
+				req->best_parent_hw = parent;
+			}
+
+			if (actual_rate < req_rate || best_rate <= req_rate)
+				break;
+		}
+	}
+
+	if (!best_rate)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int __mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+					 unsigned long prate, u32 src)
+{
+	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+	int ret;
+	u32 div, max_div, best_src = 0, best_div = 0;
+	unsigned int i;
+	unsigned long actual_rate, best_rate = 0;
+
+	for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+		struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+		unsigned long parent_rate = clk_hw_get_rate(parent);
+
+		max_div = BIT(md->hid_width) - 1;
+		for (div = 1; div < max_div; div++) {
+			parent_rate = mult_frac(rate, div, 2);
+			parent_rate = clk_hw_round_rate(parent, parent_rate);
+			actual_rate = mult_frac(parent_rate, 2, div);
+
+			if (is_better_rate(rate, best_rate, actual_rate)) {
+				best_rate = actual_rate;
+				best_src = md->parent_map[i].cfg;
+				best_div = div - 1;
+			}
+
+			if (actual_rate < rate || best_rate <= rate)
+				break;
+		}
+	}
+
+	ret = __mux_div_set_src_div(md, best_src, best_div);
+	if (!ret) {
+		md->div = best_div;
+		md->src = best_src;
+	}
+
+	return ret;
+}
+
+static u8 mux_div_get_parent(struct clk_hw *hw)
+{
+	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+	const char *name = clk_hw_get_name(hw);
+	u32 i, div, src = 0;
+
+	__mux_div_get_src_div(md, &src, &div);
+
+	for (i = 0; i < clk_hw_get_num_parents(hw); i++)
+		if (src == md->parent_map[i].cfg)
+			return i;
+
+	pr_err("%s: Can't find parent with src %d\n", name, src);
+	return 0;
+}
+
+static int mux_div_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+	return __mux_div_set_src_div(md, md->parent_map[index].cfg, md->div);
+}
+
+static int mux_div_set_rate(struct clk_hw *hw,
+			    unsigned long rate, unsigned long prate)
+{
+	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+	return __mux_div_set_rate_and_parent(hw, rate, prate, md->src);
+}
+
+static int mux_div_set_rate_and_parent(struct clk_hw *hw,  unsigned long rate,
+				       unsigned long prate, u8 index)
+{
+	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+	return __mux_div_set_rate_and_parent(hw, rate, prate,
+					     md->parent_map[index].cfg);
+}
+
+static unsigned long mux_div_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+	u32 div, src;
+	int i, num_parents = clk_hw_get_num_parents(hw);
+	const char *name = clk_hw_get_name(hw);
+
+	__mux_div_get_src_div(md, &src, &div);
+	for (i = 0; i < num_parents; i++)
+		if (src == md->parent_map[i].cfg) {
+			struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
+			unsigned long parent_rate = clk_hw_get_rate(p);
+
+			return mult_frac(parent_rate, 2, div + 1);
+		}
+
+	pr_err("%s: Can't find parent %d\n", name, src);
+	return 0;
+}
+
+static void mux_div_disable(struct clk_hw *hw)
+{
+	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+	__mux_div_set_src_div(md, md->safe_src, md->safe_div);
+}
+
+const struct clk_ops clk_regmap_mux_div_ops = {
+	.enable = mux_div_enable,
+	.disable = mux_div_disable,
+	.get_parent = mux_div_get_parent,
+	.set_parent = mux_div_set_parent,
+	.set_rate = mux_div_set_rate,
+	.set_rate_and_parent = mux_div_set_rate_and_parent,
+	.determine_rate = mux_div_determine_rate,
+	.recalc_rate = mux_div_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_mux_div_ops);
diff --git a/drivers/clk/qcom/clk-regmap-mux-div.h b/drivers/clk/qcom/clk-regmap-mux-div.h
new file mode 100644
index 0000000..6fac5c5
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux-div.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_MUX_DIV_H__
+#define __QCOM_CLK_REGMAP_MUX_DIV_H__
+
+#include <linux/clk-provider.h>
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+
+/**
+ * struct mux_div_clk - combined mux/divider clock
+ * @reg_offset: offset of the mux/divider register
+ * @hid_width:	number of bits in half integer divider
+ * @hid_shift:	lowest bit of hid value field
+ * @src_width:	number of bits in source select
+ * @src_shift:	lowest bit of source select field
+ * @div:	the divider raw configuration value
+ * @src:	the mux index which will be used if the clock is enabled
+ * @safe_src:	the safe source mux value we switch to, while the main PLL is
+ *		reconfigured
+ * @safe_div:	the safe divider value that we set, while the main PLL is
+ *		reconfigured
+ * @safe_freq:	When switching rates from A to B, the mux div clock will
+ *		instead switch from A -> safe_freq -> B. This allows the
+ *		mux_div clock to change rates while enabled, even if this
+ *		behavior is not supported by the parent clocks.
+ *		If changing the rate of parent A also causes the rate of
+ *		parent B to change, then safe_freq must be defined.
+ *		safe_freq is expected to have a source clock which is always
+ *		on and runs at only one rate.
+ * @parent_map:	pointer to parent_map struct
+ * @clkr:	handle between common and hardware-specific interfaces
+ */
+
+struct clk_regmap_mux_div {
+	u32				reg_offset;
+	u32				hid_width;
+	u32				hid_shift;
+	u32				src_width;
+	u32				src_shift;
+	u32				div;
+	u32				src;
+	u32				safe_src;
+	u32				safe_div;
+	unsigned long			safe_freq;
+	const struct parent_map		*parent_map;
+	struct clk_regmap		clkr;
+};
+
+extern const struct clk_ops clk_regmap_mux_div_ops;
+int __mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div);
+
+#endif
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 6acab9f..d6ecf12 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -992,8 +992,6 @@
 };
 
 static const struct qcom_reset_map disp_cc_sdm845_resets[] = {
-	[DISP_CC_MDSS_CORE_BCR] = { 0x2000 },
-	[DISP_CC_MDSS_GCC_CLOCKS_BCR] = { 0x4000 },
 	[DISP_CC_MDSS_RSCC_BCR] = { 0x5000 },
 };
 
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 13de253..cd47e14 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -1240,6 +1240,8 @@
 static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
 	.halt_reg = 0x82028,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x82028,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x82028,
 		.enable_mask = BIT(0),
@@ -1275,6 +1277,8 @@
 static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
 	.halt_reg = 0x82024,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x82024,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x82024,
 		.enable_mask = BIT(0),
@@ -1346,6 +1350,8 @@
 static struct clk_branch gcc_boot_rom_ahb_clk = {
 	.halt_reg = 0x38004,
 	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x38004,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x52004,
 		.enable_mask = BIT(10),
@@ -1359,6 +1365,8 @@
 static struct clk_branch gcc_camera_ahb_clk = {
 	.halt_reg = 0xb008,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb008,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0xb008,
 		.enable_mask = BIT(0),
@@ -1398,6 +1406,8 @@
 static struct clk_branch gcc_ce1_ahb_clk = {
 	.halt_reg = 0x4100c,
 	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x4100c,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x52004,
 		.enable_mask = BIT(3),
@@ -1504,6 +1514,8 @@
 static struct clk_branch gcc_cpuss_gnoc_clk = {
 	.halt_reg = 0x48004,
 	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x48004,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x52004,
 		.enable_mask = BIT(22),
@@ -1548,6 +1560,8 @@
 static struct clk_branch gcc_disp_ahb_clk = {
 	.halt_reg = 0xb00c,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb00c,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0xb00c,
 		.enable_mask = BIT(0),
@@ -1675,6 +1689,8 @@
 static struct clk_branch gcc_gpu_cfg_ahb_clk = {
 	.halt_reg = 0x71004,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x71004,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x71004,
 		.enable_mask = BIT(0),
@@ -1774,6 +1790,8 @@
 static struct clk_branch gcc_mss_cfg_ahb_clk = {
 	.halt_reg = 0x8a000,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x8a000,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x8a000,
 		.enable_mask = BIT(0),
@@ -1799,6 +1817,8 @@
 static struct clk_branch gcc_mss_mfab_axis_clk = {
 	.halt_reg = 0x8a004,
 	.halt_check = BRANCH_VOTED,
+	.hwcg_reg = 0x8a004,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x8a004,
 		.enable_mask = BIT(0),
@@ -1856,6 +1876,8 @@
 static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
 	.halt_reg = 0x6b018,
 	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x6b018,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x5200c,
 		.enable_mask = BIT(2),
@@ -1907,6 +1929,8 @@
 static struct clk_branch gcc_pcie_0_slv_axi_clk = {
 	.halt_reg = 0x6b010,
 	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x6b010,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x5200c,
 		.enable_mask = BIT(0),
@@ -1951,6 +1975,8 @@
 static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
 	.halt_reg = 0x8d018,
 	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x8d018,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x52004,
 		.enable_mask = BIT(28),
@@ -2002,6 +2028,8 @@
 static struct clk_branch gcc_pcie_1_slv_axi_clk = {
 	.halt_reg = 0x8d010,
 	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x8d010,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x52004,
 		.enable_mask = BIT(26),
@@ -2082,6 +2110,8 @@
 static struct clk_branch gcc_pdm_ahb_clk = {
 	.halt_reg = 0x33004,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x33004,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x33004,
 		.enable_mask = BIT(0),
@@ -2108,6 +2138,8 @@
 static struct clk_branch gcc_prng_ahb_clk = {
 	.halt_reg = 0x34004,
 	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x34004,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x52004,
 		.enable_mask = BIT(13),
@@ -2121,6 +2153,8 @@
 static struct clk_branch gcc_qmip_camera_ahb_clk = {
 	.halt_reg = 0xb014,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb014,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0xb014,
 		.enable_mask = BIT(0),
@@ -2134,6 +2168,8 @@
 static struct clk_branch gcc_qmip_disp_ahb_clk = {
 	.halt_reg = 0xb018,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb018,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0xb018,
 		.enable_mask = BIT(0),
@@ -2147,6 +2183,8 @@
 static struct clk_branch gcc_qmip_video_ahb_clk = {
 	.halt_reg = 0xb010,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb010,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0xb010,
 		.enable_mask = BIT(0),
@@ -2461,6 +2499,8 @@
 static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
 	.halt_reg = 0x17008,
 	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x17008,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x5200c,
 		.enable_mask = BIT(7),
@@ -2487,6 +2527,8 @@
 static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
 	.halt_reg = 0x18010,
 	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x18010,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x5200c,
 		.enable_mask = BIT(21),
@@ -2624,6 +2666,8 @@
 static struct clk_branch gcc_ufs_card_ahb_clk = {
 	.halt_reg = 0x75010,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x75010,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x75010,
 		.enable_mask = BIT(0),
@@ -2637,6 +2681,8 @@
 static struct clk_branch gcc_ufs_card_axi_clk = {
 	.halt_reg = 0x7500c,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x7500c,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x7500c,
 		.enable_mask = BIT(0),
@@ -2685,6 +2731,8 @@
 static struct clk_branch gcc_ufs_card_ice_core_clk = {
 	.halt_reg = 0x75058,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x75058,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x75058,
 		.enable_mask = BIT(0),
@@ -2720,6 +2768,8 @@
 static struct clk_branch gcc_ufs_card_phy_aux_clk = {
 	.halt_reg = 0x7508c,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x7508c,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x7508c,
 		.enable_mask = BIT(0),
@@ -2791,6 +2841,8 @@
 static struct clk_branch gcc_ufs_card_unipro_core_clk = {
 	.halt_reg = 0x75054,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x75054,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x75054,
 		.enable_mask = BIT(0),
@@ -2839,6 +2891,8 @@
 static struct clk_branch gcc_ufs_phy_ahb_clk = {
 	.halt_reg = 0x77010,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x77010,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x77010,
 		.enable_mask = BIT(0),
@@ -2852,6 +2906,8 @@
 static struct clk_branch gcc_ufs_phy_axi_clk = {
 	.halt_reg = 0x7700c,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x7700c,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x7700c,
 		.enable_mask = BIT(0),
@@ -2887,6 +2943,8 @@
 static struct clk_branch gcc_ufs_phy_ice_core_clk = {
 	.halt_reg = 0x77058,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x77058,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x77058,
 		.enable_mask = BIT(0),
@@ -2922,6 +2980,8 @@
 static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
 	.halt_reg = 0x7708c,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x7708c,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x7708c,
 		.enable_mask = BIT(0),
@@ -2993,6 +3053,8 @@
 static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
 	.halt_reg = 0x77054,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x77054,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x77054,
 		.enable_mask = BIT(0),
@@ -3248,6 +3310,8 @@
 static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
 	.halt_reg = 0x6a004,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x6a004,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0x6a004,
 		.enable_mask = BIT(0),
@@ -3261,6 +3325,8 @@
 static struct clk_branch gcc_video_ahb_clk = {
 	.halt_reg = 0xb004,
 	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb004,
+	.hwcg_bit = 1,
 	.clkr = {
 		.enable_reg = 0xb004,
 		.enable_mask = BIT(0),
@@ -3500,7 +3566,6 @@
 };
 
 static const struct qcom_reset_map gcc_sdm845_resets[] = {
-	[GCC_GPU_BCR] = { 0x71000 },
 	[GCC_MMSS_BCR] = { 0xb000 },
 	[GCC_PCIE_0_BCR] = { 0x6b000 },
 	[GCC_PCIE_1_BCR] = { 0x8d000 },
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 14a9cff..362ea0b 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -311,13 +311,6 @@
 	[VIDEO_PLL0] = &video_pll0.clkr,
 };
 
-static const struct qcom_reset_map video_cc_sdm845_resets[] = {
-	[VIDEO_CC_INTERFACE_BCR] = { 0x8f0 },
-	[VIDEO_CC_VCODEC0_BCR] = { 0x870 },
-	[VIDEO_CC_VCODEC1_BCR] = { 0x8b0 },
-	[VIDEO_CC_VENUS_BCR] = { 0x810 },
-};
-
 static const struct regmap_config video_cc_sdm845_regmap_config = {
 	.reg_bits	= 32,
 	.reg_stride	= 4,
@@ -330,8 +323,6 @@
 	.config = &video_cc_sdm845_regmap_config,
 	.clks = video_cc_sdm845_clocks,
 	.num_clks = ARRAY_SIZE(video_cc_sdm845_clocks),
-	.resets = video_cc_sdm845_resets,
-	.num_resets = ARRAY_SIZE(video_cc_sdm845_resets),
 };
 
 static const struct of_device_id video_cc_sdm845_match_table[] = {
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index 56fbb94..b979fb9 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -439,6 +439,7 @@
 	u8 rfc4309_iv[QCRYPTO_MAX_IV_LENGTH];
 	unsigned int ivsize;
 	int  aead;
+	int  ccmtype;			/* default: 0, rfc4309: 1 */
 	struct scatterlist asg;		/* Formatted associated data sg  */
 	unsigned char *adata;		/* Pointer to formatted assoc data */
 	enum qce_cipher_alg_enum alg;
@@ -1936,9 +1937,8 @@
 	return 0;
 }
 
-static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq)
+static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq, uint32_t assoclen)
 {
-	struct aead_request *areq = (struct aead_request *) qreq->areq;
 	unsigned int i = ((unsigned int)qreq->iv[0]) + 1;
 
 	memcpy(&qreq->nonce[0], qreq->iv, qreq->ivsize);
@@ -1947,7 +1947,7 @@
 	 * NIST Special Publication 800-38C
 	 */
 	qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2));
-	if (areq->assoclen)
+	if (assoclen)
 		qreq->nonce[0] |= 64;
 
 	if (i > MAX_NONCE)
@@ -2153,24 +2153,31 @@
 	qreq.flags = cipher_ctx->flags;
 
 	if (qreq.mode == QCE_MODE_CCM) {
+		uint32_t assoclen;
+
 		if (qreq.dir == QCE_ENCRYPT)
 			qreq.cryptlen = req->cryptlen;
 		else
 			qreq.cryptlen = req->cryptlen -
 						qreq.authsize;
+
+		/* if rfc4309 ccm, adjust assoclen */
+		assoclen = req->assoclen;
+		if (rctx->ccmtype)
+			assoclen -= 8;
 		/* Get NONCE */
-		ret = qccrypto_set_aead_ccm_nonce(&qreq);
+		ret = qccrypto_set_aead_ccm_nonce(&qreq, assoclen);
 		if (ret)
 			return ret;
 
-		if (req->assoclen) {
-			rctx->adata = kzalloc((req->assoclen + 0x64),
+		if (assoclen) {
+			rctx->adata = kzalloc((assoclen + 0x64),
 								GFP_ATOMIC);
 			if (!rctx->adata)
 				return -ENOMEM;
 			/* Format Associated data    */
 			ret = qcrypto_aead_ccm_format_adata(&qreq,
-						req->assoclen,
+						assoclen,
 						req->src,
 						rctx->adata);
 		} else {
@@ -2633,6 +2640,7 @@
 	rctx->dir = QCE_ENCRYPT;
 	rctx->mode = QCE_MODE_CCM;
 	rctx->iv = req->iv;
+	rctx->ccmtype = 0;
 
 	pstat->aead_ccm_aes_enc++;
 	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
@@ -2647,6 +2655,8 @@
 
 	pstat = &_qcrypto_stat;
 
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
 	rctx = aead_request_ctx(req);
 	rctx->aead = 1;
 	rctx->alg = CIPHER_ALG_AES;
@@ -2656,6 +2666,7 @@
 	rctx->rfc4309_iv[0] = 3; /* L -1 */
 	memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
 	memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+	rctx->ccmtype = 1;
 	rctx->iv = rctx->rfc4309_iv;
 	pstat->aead_rfc4309_ccm_aes_enc++;
 	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
@@ -2963,6 +2974,7 @@
 	rctx->dir = QCE_DECRYPT;
 	rctx->mode = QCE_MODE_CCM;
 	rctx->iv = req->iv;
+	rctx->ccmtype = 0;
 
 	pstat->aead_ccm_aes_dec++;
 	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
@@ -2976,6 +2988,8 @@
 	struct crypto_stat *pstat;
 
 	pstat = &_qcrypto_stat;
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
 	rctx = aead_request_ctx(req);
 	rctx->aead = 1;
 	rctx->alg = CIPHER_ALG_AES;
@@ -2985,6 +2999,7 @@
 	rctx->rfc4309_iv[0] = 3; /* L -1 */
 	memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
 	memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+	rctx->ccmtype = 1;
 	rctx->iv = rctx->rfc4309_iv;
 	pstat->aead_rfc4309_ccm_aes_dec++;
 	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
@@ -4274,7 +4289,7 @@
 };
 EXPORT_SYMBOL(qcrypto_cipher_set_device);
 
-int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req, u32 dev,
+int qcrypto_cipher_set_device_hw(struct skcipher_request *req, u32 dev,
 			u32 hw_inst)
 {
 	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -4320,7 +4335,7 @@
 };
 EXPORT_SYMBOL(qcrypto_ahash_set_device);
 
-int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags)
+int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags)
 {
 	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 	struct crypto_priv *cp = ctx->cp;
diff --git a/drivers/devfreq/devfreq_simple_dev.c b/drivers/devfreq/devfreq_simple_dev.c
index 9c99fcf..b0757b6 100644
--- a/drivers/devfreq/devfreq_simple_dev.c
+++ b/drivers/devfreq/devfreq_simple_dev.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -34,6 +34,7 @@
 	struct clk *clk;
 	struct devfreq *df;
 	struct devfreq_dev_profile profile;
+	bool freq_in_khz;
 };
 
 static void find_freq(struct devfreq_dev_profile *p, unsigned long *freq,
@@ -65,7 +66,7 @@
 
 	find_freq(&d->profile, freq, flags);
 
-	rfreq = clk_round_rate(d->clk, *freq * 1000);
+	rfreq = clk_round_rate(d->clk, d->freq_in_khz ? *freq * 1000 : *freq);
 	if (IS_ERR_VALUE(rfreq)) {
 		dev_err(dev, "devfreq: Cannot find matching frequency for %lu\n",
 			*freq);
@@ -83,39 +84,30 @@
 	f = clk_get_rate(d->clk);
 	if (IS_ERR_VALUE(f))
 		return f;
-	*freq = f / 1000;
+	*freq = d->freq_in_khz ? f / 1000 : f;
 	return 0;
 }
 
 #define PROP_TBL "freq-tbl-khz"
-static int devfreq_clock_probe(struct platform_device *pdev)
+static int parse_freq_table(struct device *dev, struct dev_data *d)
 {
-	struct device *dev = &pdev->dev;
-	struct dev_data *d;
-	struct devfreq_dev_profile *p;
-	u32 *data, poll;
-	const char *gov_name;
+	struct devfreq_dev_profile *p = &d->profile;
 	int ret, len, i, j;
+	u32 *data;
 	unsigned long f;
 
-	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
-	if (!d)
-		return -ENOMEM;
-	platform_set_drvdata(pdev, d);
+	if (!of_find_property(dev->of_node, PROP_TBL, &len)) {
+		if (dev_pm_opp_get_opp_count(dev) <= 0)
+			return -EPROBE_DEFER;
+		return 0;
+	}
 
-	d->clk = devm_clk_get(dev, "devfreq_clk");
-	if (IS_ERR(d->clk))
-		return PTR_ERR(d->clk);
-
-	if (!of_find_property(dev->of_node, PROP_TBL, &len))
-		return -EINVAL;
-
+	d->freq_in_khz = true;
 	len /= sizeof(*data);
 	data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
-	p = &d->profile;
 	p->freq_table = devm_kzalloc(dev, len * sizeof(*p->freq_table),
 				     GFP_KERNEL);
 	if (!p->freq_table)
@@ -142,6 +134,32 @@
 		return -EINVAL;
 	}
 
+	return 0;
+}
+
+static int devfreq_clock_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dev_data *d;
+	struct devfreq_dev_profile *p;
+	u32 poll;
+	const char *gov_name;
+	int ret;
+
+	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, d);
+
+	d->clk = devm_clk_get(dev, "devfreq_clk");
+	if (IS_ERR(d->clk))
+		return PTR_ERR(d->clk);
+
+	ret = parse_freq_table(dev, d);
+	if (ret)
+		return ret;
+
+	p = &d->profile;
 	p->target = dev_target;
 	p->get_cur_freq = dev_get_cur_freq;
 	ret = dev_get_cur_freq(dev, &p->initial_freq);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 747d9a6..d4a270e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -304,7 +304,8 @@
 	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
 	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 
-	kthread_queue_work(&priv->disp_thread[crtc_id].worker, &vbl_ctrl->work);
+	kthread_queue_work(&priv->event_thread[crtc_id].worker,
+			&vbl_ctrl->work);
 
 	return 0;
 }
@@ -330,13 +331,19 @@
 		kfree(vbl_ev);
 	}
 
-	/* clean up display commit worker threads */
+	/* clean up display commit/event worker threads */
 	for (i = 0; i < priv->num_crtcs; i++) {
 		if (priv->disp_thread[i].thread) {
 			kthread_flush_worker(&priv->disp_thread[i].worker);
 			kthread_stop(priv->disp_thread[i].thread);
 			priv->disp_thread[i].thread = NULL;
 		}
+
+		if (priv->event_thread[i].thread) {
+			kthread_flush_worker(&priv->event_thread[i].worker);
+			kthread_stop(priv->event_thread[i].thread);
+			priv->event_thread[i].thread = NULL;
+		}
 	}
 
 	msm_gem_shrinker_cleanup(ddev);
@@ -637,22 +644,50 @@
 	ddev->mode_config.funcs = &mode_config_funcs;
 
 	for (i = 0; i < priv->num_crtcs; i++) {
+
+		/* initialize display thread */
 		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
 		kthread_init_worker(&priv->disp_thread[i].worker);
 		priv->disp_thread[i].dev = ddev;
 		priv->disp_thread[i].thread =
 			kthread_run(kthread_worker_fn,
 				&priv->disp_thread[i].worker,
-				"crtc_commit:%d",
-				priv->disp_thread[i].crtc_id);
+				"crtc_commit:%d", priv->disp_thread[i].crtc_id);
 
 		if (IS_ERR(priv->disp_thread[i].thread)) {
-			dev_err(dev, "failed to create kthread\n");
+			dev_err(dev, "failed to create crtc_commit kthread\n");
 			priv->disp_thread[i].thread = NULL;
+		}
+
+		/* initialize event thread */
+		priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
+		kthread_init_worker(&priv->event_thread[i].worker);
+		priv->event_thread[i].dev = ddev;
+		priv->event_thread[i].thread =
+			kthread_run(kthread_worker_fn,
+				&priv->event_thread[i].worker,
+				"crtc_event:%d", priv->event_thread[i].crtc_id);
+
+		if (IS_ERR(priv->event_thread[i].thread)) {
+			dev_err(dev, "failed to create crtc_event kthread\n");
+			priv->event_thread[i].thread = NULL;
+		}
+
+		if ((!priv->disp_thread[i].thread) ||
+				!priv->event_thread[i].thread) {
 			/* clean up previously created threads if any */
-			for (i -= 1; i >= 0; i--) {
-				kthread_stop(priv->disp_thread[i].thread);
-				priv->disp_thread[i].thread = NULL;
+			for ( ; i >= 0; i--) {
+				if (priv->disp_thread[i].thread) {
+					kthread_stop(
+						priv->disp_thread[i].thread);
+					priv->disp_thread[i].thread = NULL;
+				}
+
+				if (priv->event_thread[i].thread) {
+					kthread_stop(
+						priv->event_thread[i].thread);
+					priv->event_thread[i].thread = NULL;
+				}
 			}
 			goto fail;
 		}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 77dde55..fdf9b1f 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -136,8 +136,10 @@
 	CRTC_PROP_CORE_CLK,
 	CRTC_PROP_CORE_AB,
 	CRTC_PROP_CORE_IB,
-	CRTC_PROP_MEM_AB,
-	CRTC_PROP_MEM_IB,
+	CRTC_PROP_LLCC_AB,
+	CRTC_PROP_LLCC_IB,
+	CRTC_PROP_DRAM_AB,
+	CRTC_PROP_DRAM_IB,
 	CRTC_PROP_ROT_PREFILL_BW,
 	CRTC_PROP_ROT_CLK,
 	CRTC_PROP_ROI_V1,
@@ -471,8 +473,8 @@
 	u8 data[];
 };
 
-/* Commit thread specific structure */
-struct msm_drm_commit {
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
 	struct drm_device *dev;
 	struct task_struct *thread;
 	unsigned int crtc_id;
@@ -536,7 +538,8 @@
 	unsigned int num_crtcs;
 	struct drm_crtc *crtcs[MAX_CRTCS];
 
-	struct msm_drm_commit disp_thread[MAX_CRTCS];
+	struct msm_drm_thread disp_thread[MAX_CRTCS];
+	struct msm_drm_thread event_thread[MAX_CRTCS];
 
 	unsigned int num_encoders;
 	struct drm_encoder *encoders[MAX_ENCODERS];
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index b1f8b0f..71dfc12 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -110,6 +110,7 @@
 		struct sde_core_perf_params *perf)
 {
 	struct sde_crtc_state *sde_cstate;
+	int i;
 
 	if (!kms || !kms->catalog || !crtc || !state || !perf) {
 		SDE_ERROR("invalid parameters\n");
@@ -119,29 +120,64 @@
 	sde_cstate = to_sde_crtc_state(state);
 	memset(perf, 0, sizeof(struct sde_core_perf_params));
 
-	perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
-	perf->max_per_pipe_ib =
+	perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC] =
+		sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+	perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC] =
+		sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+
+	if (sde_cstate->bw_split_vote) {
+		perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
+			sde_crtc_get_property(sde_cstate, CRTC_PROP_LLCC_AB);
+		perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
+			sde_crtc_get_property(sde_cstate, CRTC_PROP_LLCC_IB);
+		perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI] =
+			sde_crtc_get_property(sde_cstate, CRTC_PROP_DRAM_AB);
+		perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI] =
+			sde_crtc_get_property(sde_cstate, CRTC_PROP_DRAM_IB);
+	} else {
+		perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
+			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+		perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
 			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+		perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI] =
+			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+		perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI] =
+			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+	}
+
 	perf->core_clk_rate =
 			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
 
 	if (!sde_cstate->bw_control) {
-		perf->bw_ctl = kms->catalog->perf.max_bw_high * 1000ULL;
-		perf->max_per_pipe_ib = perf->bw_ctl;
+		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
+					1000ULL;
+			perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
+		}
 		perf->core_clk_rate = kms->perf.max_core_clk_rate;
 	} else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_MINIMUM) {
-		perf->bw_ctl = 0;
-		perf->max_per_pipe_ib = 0;
+		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			perf->bw_ctl[i] = 0;
+			perf->max_per_pipe_ib[i] = 0;
+		}
 		perf->core_clk_rate = 0;
 	} else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED) {
-		perf->bw_ctl = kms->perf.fix_core_ab_vote;
-		perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote;
+		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
+			perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
+		}
 		perf->core_clk_rate = kms->perf.fix_core_clk_rate;
 	}
 
-	SDE_DEBUG("crtc=%d clk_rate=%llu ib=%llu ab=%llu\n",
+	SDE_DEBUG(
+		"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
 			crtc->base.id, perf->core_clk_rate,
-			perf->max_per_pipe_ib, perf->bw_ctl);
+			perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC],
+			perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC],
+			perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC],
+			perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC],
+			perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI],
+			perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI]);
 }
 
 int sde_core_perf_crtc_check(struct drm_crtc *crtc,
@@ -154,6 +190,7 @@
 	struct sde_crtc_state *sde_cstate;
 	struct drm_crtc *tmp_crtc;
 	struct sde_kms *kms;
+	int i;
 
 	if (!crtc || !state) {
 		SDE_ERROR("invalid crtc\n");
@@ -175,39 +212,46 @@
 	/* obtain new values */
 	_sde_core_perf_calc_crtc(kms, crtc, state, &sde_cstate->new_perf);
 
-	bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl;
-	curr_client_type = sde_crtc_get_client_type(crtc);
+	for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC;
+			i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl[i];
+		curr_client_type = sde_crtc_get_client_type(crtc);
 
-	drm_for_each_crtc(tmp_crtc, crtc->dev) {
-		if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
-		    (sde_crtc_get_client_type(tmp_crtc) == curr_client_type) &&
-		    (tmp_crtc != crtc)) {
-			struct sde_crtc_state *tmp_cstate =
+		drm_for_each_crtc(tmp_crtc, crtc->dev) {
+			if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
+			    (sde_crtc_get_client_type(tmp_crtc) ==
+					    curr_client_type) &&
+			    (tmp_crtc != crtc)) {
+				struct sde_crtc_state *tmp_cstate =
 					to_sde_crtc_state(tmp_crtc->state);
 
-			bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
+				bw_sum_of_intfs +=
+					tmp_cstate->new_perf.bw_ctl[i];
+			}
 		}
-	}
 
-	/* convert bandwidth to kb */
-	bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
-	SDE_DEBUG("calculated bandwidth=%uk\n", bw);
+		/* convert bandwidth to kb */
+		bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+		SDE_DEBUG("calculated bandwidth=%uk\n", bw);
 
-	is_video_mode = sde_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
-	threshold = (is_video_mode ||
-		_sde_core_video_mode_intf_connected(crtc)) ?
-		kms->catalog->perf.max_bw_low : kms->catalog->perf.max_bw_high;
+		is_video_mode = sde_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+		threshold = (is_video_mode ||
+			_sde_core_video_mode_intf_connected(crtc)) ?
+			kms->catalog->perf.max_bw_low :
+			kms->catalog->perf.max_bw_high;
 
-	SDE_DEBUG("final threshold bw limit = %d\n", threshold);
+		SDE_DEBUG("final threshold bw limit = %d\n", threshold);
 
-	if (!sde_cstate->bw_control) {
-		SDE_DEBUG("bypass bandwidth check\n");
-	} else if (!threshold) {
-		SDE_ERROR("no bandwidth limits specified\n");
-		return -E2BIG;
-	} else if (bw > threshold) {
-		SDE_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
-		return -E2BIG;
+		if (!sde_cstate->bw_control) {
+			SDE_DEBUG("bypass bandwidth check\n");
+		} else if (!threshold) {
+			SDE_ERROR("no bandwidth limits specified\n");
+			return -E2BIG;
+		} else if (bw > threshold) {
+			SDE_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+					threshold);
+			return -E2BIG;
+		}
 	}
 
 	return 0;
@@ -240,10 +284,10 @@
 }
 
 static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
-		struct drm_crtc *crtc)
+		struct drm_crtc *crtc, u32 bus_id)
 {
 	u64 bw_sum_of_intfs = 0, bus_ab_quota, bus_ib_quota;
-	struct sde_core_perf_params perf = {0};
+	struct sde_core_perf_params perf = { { 0 } };
 	enum sde_crtc_client_type client_vote, curr_client_type
 					= sde_crtc_get_client_type(crtc);
 	struct drm_crtc *tmp_crtc;
@@ -256,19 +300,20 @@
 								&kms->perf)) {
 			sde_cstate = to_sde_crtc_state(tmp_crtc->state);
 
-			perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
-				sde_cstate->new_perf.max_per_pipe_ib);
+			perf.max_per_pipe_ib[bus_id] =
+				max(perf.max_per_pipe_ib[bus_id],
+				sde_cstate->new_perf.max_per_pipe_ib[bus_id]);
 
-			bw_sum_of_intfs += sde_cstate->new_perf.bw_ctl;
+			bw_sum_of_intfs += sde_cstate->new_perf.bw_ctl[bus_id];
 
-			SDE_DEBUG("crtc=%d bw=%llu\n",
-				tmp_crtc->base.id,
-				sde_cstate->new_perf.bw_ctl);
+			SDE_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
+				tmp_crtc->base.id, bus_id,
+				sde_cstate->new_perf.bw_ctl[bus_id]);
 		}
 	}
 
 	bus_ab_quota = max(bw_sum_of_intfs, kms->perf.perf_tune.min_bus_vote);
-	bus_ib_quota = perf.max_per_pipe_ib;
+	bus_ib_quota = perf.max_per_pipe_ib[bus_id];
 
 	if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED) {
 		bus_ab_quota = kms->perf.fix_core_ab_vote;
@@ -280,25 +325,25 @@
 	case NRT_CLIENT:
 		sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
 				SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
-				bus_ab_quota, bus_ib_quota);
-		SDE_DEBUG("client:%s ab=%llu ib=%llu\n", "nrt",
-				bus_ab_quota, bus_ib_quota);
+				bus_id, bus_ab_quota, bus_ib_quota);
+		SDE_DEBUG("client:%s bus_id=%d ab=%llu ib=%llu\n", "nrt",
+				bus_id, bus_ab_quota, bus_ib_quota);
 		break;
 
 	case RT_CLIENT:
 		sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
 				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
-				bus_ab_quota, bus_ib_quota);
-		SDE_DEBUG("client:%s ab=%llu ib=%llu\n", "rt",
-				bus_ab_quota, bus_ib_quota);
+				bus_id, bus_ab_quota, bus_ib_quota);
+		SDE_DEBUG("client:%s bus_id=%d ab=%llu ib=%llu\n", "rt",
+				bus_id, bus_ab_quota, bus_ib_quota);
 		break;
 
 	case RT_RSC_CLIENT:
 		sde_cstate = to_sde_crtc_state(crtc->state);
-		sde_rsc_client_vote(sde_cstate->rsc_client, bus_ab_quota,
-					bus_ib_quota);
-		SDE_DEBUG("client:%s ab=%llu ib=%llu\n", "rt_rsc",
-				bus_ab_quota, bus_ib_quota);
+		sde_rsc_client_vote(sde_cstate->rsc_client,
+				bus_id, bus_ab_quota, bus_ib_quota);
+		SDE_DEBUG("client:%s bus_id=%d ab=%llu ib=%llu\n", "rt_rsc",
+				bus_id, bus_ab_quota, bus_ib_quota);
 		break;
 
 	default:
@@ -311,10 +356,12 @@
 		case DISP_RSC_MODE:
 			sde_power_data_bus_set_quota(&priv->phandle,
 				kms->core_client,
-				SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT, 0, 0);
+				SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+				bus_id, 0, 0);
 			sde_power_data_bus_set_quota(&priv->phandle,
 				kms->core_client,
-				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, 0, 0);
+				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+				bus_id, 0, 0);
 			kms->perf.bw_vote_mode_updated = false;
 			break;
 
@@ -322,7 +369,7 @@
 			sde_cstate = to_sde_crtc_state(crtc->state);
 			if (sde_cstate->rsc_client) {
 				sde_rsc_client_vote(sde_cstate->rsc_client,
-									0, 0);
+								bus_id, 0, 0);
 				kms->perf.bw_vote_mode_updated = false;
 			}
 			break;
@@ -347,6 +394,7 @@
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *sde_cstate;
 	struct sde_kms *kms;
+	int i;
 
 	if (!crtc) {
 		SDE_ERROR("invalid crtc\n");
@@ -382,9 +430,11 @@
 	/* Release the bandwidth */
 	if (kms->perf.enable_bw_release) {
 		trace_sde_cmd_release_bw(crtc->base.id);
-		sde_crtc->cur_perf.bw_ctl = 0;
 		SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
-		_sde_core_perf_crtc_update_bus(kms, crtc);
+		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			sde_crtc->cur_perf.bw_ctl[i] = 0;
+			_sde_core_perf_crtc_update_bus(kms, crtc, i);
+		}
 	}
 }
 
@@ -419,7 +469,7 @@
 	u64 clk_rate = 0;
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *sde_cstate;
-	int ret;
+	int ret, i;
 	struct msm_drm_private *priv;
 	struct sde_kms *kms;
 
@@ -449,38 +499,52 @@
 	new = &sde_cstate->new_perf;
 
 	if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
-		/*
-		 * cases for bus bandwidth update.
-		 * 1. new bandwidth vote - "ab or ib vote" is higher
-		 *    than current vote for update request.
-		 * 2. new bandwidth vote - "ab or ib vote" is lower
-		 *    than current vote at end of commit or stop.
-		 */
-		if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
-			  (new->max_per_pipe_ib > old->max_per_pipe_ib))) ||
-		    (!params_changed && ((new->bw_ctl < old->bw_ctl) ||
-			  (new->max_per_pipe_ib < old->max_per_pipe_ib)))) {
-			SDE_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
-				crtc->base.id, params_changed, new->bw_ctl,
-				old->bw_ctl);
-			old->bw_ctl = new->bw_ctl;
-			old->max_per_pipe_ib = new->max_per_pipe_ib;
-			update_bus = 1;
-		}
+		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			/*
+			 * cases for bus bandwidth update.
+			 * 1. new bandwidth vote - "ab or ib vote" is higher
+			 *    than current vote for update request.
+			 * 2. new bandwidth vote - "ab or ib vote" is lower
+			 *    than current vote at end of commit or stop.
+			 */
+			if ((params_changed && ((new->bw_ctl[i] >
+						old->bw_ctl[i]) ||
+				  (new->max_per_pipe_ib[i] >
+						old->max_per_pipe_ib[i]))) ||
+			    (!params_changed && ((new->bw_ctl[i] <
+						old->bw_ctl[i]) ||
+				  (new->max_per_pipe_ib[i] <
+						old->max_per_pipe_ib[i])))) {
+				SDE_DEBUG(
+					"crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+					crtc->base.id, params_changed,
+					new->bw_ctl[i], old->bw_ctl[i]);
+				old->bw_ctl[i] = new->bw_ctl[i];
+				old->max_per_pipe_ib[i] =
+						new->max_per_pipe_ib[i];
+				update_bus |= BIT(i);
+			}
 
-		/* display rsc override during solver mode */
-		if (kms->perf.bw_vote_mode == DISP_RSC_MODE &&
+			/* display rsc override during solver mode */
+			if (kms->perf.bw_vote_mode == DISP_RSC_MODE &&
 				get_sde_rsc_current_state(SDE_RSC_INDEX) ==
-							    SDE_RSC_CMD_STATE) {
-			/* update new bandwdith in all cases */
-			if (params_changed && ((new->bw_ctl != old->bw_ctl) ||
-			      (new->max_per_pipe_ib != old->max_per_pipe_ib))) {
-				old->bw_ctl = new->bw_ctl;
-				old->max_per_pipe_ib = new->max_per_pipe_ib;
-				update_bus = 1;
-			/* reduce bw vote is not required in solver mode */
-			} else if (!params_changed) {
-				update_bus = 0;
+						SDE_RSC_CMD_STATE) {
+				/* update new bandwidth in all cases */
+				if (params_changed && ((new->bw_ctl[i] !=
+						old->bw_ctl[i]) ||
+				      (new->max_per_pipe_ib[i] !=
+						old->max_per_pipe_ib[i]))) {
+					old->bw_ctl[i] = new->bw_ctl[i];
+					old->max_per_pipe_ib[i] =
+							new->max_per_pipe_ib[i];
+					update_bus |= BIT(i);
+				/*
+				 * reduce bw vote is not required in solver
+				 * mode
+				 */
+				} else if (!params_changed) {
+					update_bus &= ~BIT(i);
+				}
 			}
 		}
 
@@ -495,15 +559,20 @@
 		SDE_DEBUG("crtc=%d disable\n", crtc->base.id);
 		memset(old, 0, sizeof(*old));
 		memset(new, 0, sizeof(*new));
-		update_bus = 1;
+		update_bus = ~0;
 		update_clk = 1;
 	}
-	trace_sde_perf_crtc_update(crtc->base.id, new->bw_ctl,
+	trace_sde_perf_crtc_update(crtc->base.id,
+				new->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC],
+				new->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC],
+				new->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI],
 				new->core_clk_rate, stop_req,
 				update_bus, update_clk);
 
-	if (update_bus)
-		_sde_core_perf_crtc_update_bus(kms, crtc);
+	for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		if (update_bus & BIT(i))
+			_sde_core_perf_crtc_update_bus(kms, crtc, i);
+	}
 
 	/*
 	 * Update the clock after bandwidth vote to ensure
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.h b/drivers/gpu/drm/msm/sde/sde_core_perf.h
index 4a1bdad..589415c 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.h
@@ -30,8 +30,8 @@
  * @core_clk_rate: core clock rate request
  */
 struct sde_core_perf_params {
-	u64 max_per_pipe_ib;
-	u64 bw_ctl;
+	u64 max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MAX];
+	u64 bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MAX];
 	u64 core_clk_rate;
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index e708290..30bb72b 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -494,12 +494,6 @@
 {
 	if (!sde_crtc)
 		return;
-
-	if (sde_crtc->event_thread) {
-		kthread_flush_worker(&sde_crtc->event_worker);
-		kthread_stop(sde_crtc->event_thread);
-		sde_crtc->event_thread = NULL;
-	}
 }
 
 static void sde_crtc_destroy(struct drm_crtc *crtc)
@@ -1516,8 +1510,8 @@
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
 	struct sde_kms *sde_kms;
+	struct drm_encoder *encoder;
 	unsigned long flags;
-	bool disable_inprogress = false;
 
 	if (!work) {
 		SDE_ERROR("invalid work handle\n");
@@ -1543,9 +1537,6 @@
 
 	SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
 			ktime_to_ns(fevent->ts));
-	disable_inprogress = fevent->event &
-					SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
-	fevent->event &= ~SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
 
 	if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
 			(fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR) ||
@@ -1566,15 +1557,35 @@
 					ktime_to_ns(fevent->ts));
 			SDE_EVT32(DRMID(crtc), fevent->event,
 							SDE_EVTLOG_FUNC_CASE2);
-			if (!disable_inprogress)
-				sde_core_perf_crtc_release_bw(crtc);
+			sde_core_perf_crtc_release_bw(crtc);
 		} else {
 			SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
 							SDE_EVTLOG_FUNC_CASE3);
 		}
 
-		if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE &&
-							!disable_inprogress)
+		if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
+			    (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR)) {
+			bool signal_fence = true;
+
+			drm_for_each_encoder(encoder, crtc->dev) {
+				if (encoder->crtc != crtc)
+					continue;
+
+				signal_fence &=
+					sde_encoder_is_cmd_mode(encoder);
+			}
+
+			/* signal release fence only for cmd mode panels here */
+			if (signal_fence) {
+				sde_fence_signal(&sde_crtc->output_fence, 0);
+				SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
+							SDE_EVTLOG_FUNC_CASE4);
+			}
+
+			complete_all(&sde_crtc->frame_done_comp);
+		}
+
+		if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
 			sde_core_perf_crtc_update(crtc, 0, false);
 	} else {
 		SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
@@ -1599,7 +1610,7 @@
 	struct msm_drm_private *priv;
 	struct sde_crtc_frame_event *fevent;
 	unsigned long flags;
-	int pipe_id;
+	u32 crtc_id;
 
 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		SDE_ERROR("invalid parameters\n");
@@ -1607,7 +1618,7 @@
 	}
 	sde_crtc = to_sde_crtc(crtc);
 	priv = crtc->dev->dev_private;
-	pipe_id = drm_crtc_index(crtc);
+	crtc_id = drm_crtc_index(crtc);
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 	SDE_EVT32_VERBOSE(DRMID(crtc), event);
@@ -1629,11 +1640,7 @@
 	fevent->event = event;
 	fevent->crtc = crtc;
 	fevent->ts = ktime_get();
-	if (event & SDE_ENCODER_FRAME_EVENT_DURING_DISABLE)
-		sde_crtc_frame_event_work(&fevent->work);
-	else
-		kthread_queue_work(&priv->disp_thread[pipe_id].worker,
-								&fevent->work);
+	kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
 }
 
 void sde_crtc_complete_commit(struct drm_crtc *crtc,
@@ -1641,7 +1648,9 @@
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
+	struct drm_encoder *encoder;
 	int i;
+	bool signal_fence = true;
 
 	if (!crtc || !crtc->state) {
 		SDE_ERROR("invalid crtc\n");
@@ -1652,9 +1661,18 @@
 	cstate = to_sde_crtc_state(crtc->state);
 	SDE_EVT32_VERBOSE(DRMID(crtc));
 
-	/* signal output fence(s) at end of commit */
-	sde_fence_signal(&sde_crtc->output_fence, 0);
+	drm_for_each_encoder(encoder, crtc->dev) {
+		if (encoder->crtc != crtc)
+			continue;
 
+		signal_fence &= !sde_encoder_is_cmd_mode(encoder);
+	}
+
+	/* signal release fence for non-cmd mode panels */
+	if (signal_fence)
+		sde_fence_signal(&sde_crtc->output_fence, 0);
+
+	/* signal retire fence */
 	for (i = 0; i < cstate->num_connectors; ++i)
 		sde_connector_complete_commit(cstate->connectors[i]);
 }
@@ -2085,6 +2103,36 @@
 			cstate->property_values, cstate->property_blobs);
 }
 
+static int _sde_crtc_wait_for_frame_done(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc;
+	int ret, rc = 0;
+
+	if (!crtc) {
+		SDE_ERROR("invalid argument\n");
+		return -EINVAL;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+
+	if (!atomic_read(&sde_crtc->frame_pending)) {
+		SDE_DEBUG("no frames pending\n");
+		return 0;
+	}
+
+	SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_ENTRY);
+	ret = wait_for_completion_timeout(&sde_crtc->frame_done_comp,
+			msecs_to_jiffies(SDE_FRAME_DONE_TIMEOUT));
+	if (!ret) {
+		SDE_ERROR("frame done completion wait timed out, ret:%d\n",
+				ret);
+		SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FATAL);
+		rc = -ETIMEDOUT;
+	}
+	SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
+
+	return rc;
+}
+
 void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
 {
 	struct drm_encoder *encoder;
@@ -2129,19 +2177,21 @@
 		sde_encoder_prepare_for_kickoff(encoder, &params);
 	}
 
-	if (atomic_read(&sde_crtc->frame_pending) > 2) {
-		/* framework allows only 1 outstanding + current */
-		SDE_ERROR("crtc%d invalid frame pending\n",
-				crtc->base.id);
-		SDE_EVT32(DRMID(crtc), 0);
+	/* wait for frame_event_done completion */
+	if (_sde_crtc_wait_for_frame_done(crtc)) {
+		SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+				crtc->base.id,
+				atomic_read(&sde_crtc->frame_pending));
 		goto end;
-	} else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
+	}
+
+	if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
 		/* acquire bandwidth and other resources */
 		SDE_DEBUG("crtc%d first commit\n", crtc->base.id);
-		SDE_EVT32(DRMID(crtc), 1);
+		SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_CASE1);
 	} else {
 		SDE_DEBUG("crtc%d commit\n", crtc->base.id);
-		SDE_EVT32(DRMID(crtc), 2);
+		SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_CASE2);
 	}
 	sde_crtc->play_count++;
 
@@ -2151,6 +2201,9 @@
 
 		sde_encoder_kickoff(encoder);
 	}
+
+	reinit_completion(&sde_crtc->frame_done_comp);
+
 end:
 	SDE_ATRACE_END("crtc_commit");
 	return;
@@ -2444,6 +2497,12 @@
 	mutex_lock(&sde_crtc->crtc_lock);
 	SDE_EVT32(DRMID(crtc));
 
+	/* wait for frame_event_done completion */
+	if (_sde_crtc_wait_for_frame_done(crtc))
+		SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+				crtc->base.id,
+				atomic_read(&sde_crtc->frame_pending));
+
 	if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) {
 		SDE_ERROR("crtc%d invalid vblank refcount\n",
 				crtc->base.id);
@@ -2455,8 +2514,6 @@
 	}
 
 	if (atomic_read(&sde_crtc->frame_pending)) {
-		/* release bandwidth and other resources */
-		SDE_ERROR("crtc%d invalid frame pending\n", crtc->base.id);
 		SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->frame_pending),
 							SDE_EVTLOG_FUNC_CASE2);
 		sde_core_perf_crtc_release_bw(crtc);
@@ -2482,6 +2539,7 @@
 
 	/* disable clk & bw control until clk & bw properties are set */
 	cstate->bw_control = false;
+	cstate->bw_split_vote = false;
 
 	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
 	list_for_each_entry(node, &sde_crtc->user_event_list, list) {
@@ -2986,13 +3044,21 @@
 			catalog->perf.max_bw_high * 1000ULL,
 			CRTC_PROP_CORE_IB);
 	msm_property_install_range(&sde_crtc->property_info,
-			"mem_ab", 0x0, 0, U64_MAX,
+			"llcc_ab", 0x0, 0, U64_MAX,
 			catalog->perf.max_bw_high * 1000ULL,
-			CRTC_PROP_MEM_AB);
+			CRTC_PROP_LLCC_AB);
 	msm_property_install_range(&sde_crtc->property_info,
-			"mem_ib", 0x0, 0, U64_MAX,
+			"llcc_ib", 0x0, 0, U64_MAX,
 			catalog->perf.max_bw_high * 1000ULL,
-			CRTC_PROP_MEM_IB);
+			CRTC_PROP_LLCC_IB);
+	msm_property_install_range(&sde_crtc->property_info,
+			"dram_ab", 0x0, 0, U64_MAX,
+			catalog->perf.max_bw_high * 1000ULL,
+			CRTC_PROP_DRAM_AB);
+	msm_property_install_range(&sde_crtc->property_info,
+			"dram_ib", 0x0, 0, U64_MAX,
+			catalog->perf.max_bw_high * 1000ULL,
+			CRTC_PROP_DRAM_IB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"rot_prefill_bw", 0, 0, U64_MAX,
 			catalog->perf.max_bw_high * 1000ULL,
@@ -3120,10 +3186,15 @@
 			case CRTC_PROP_CORE_CLK:
 			case CRTC_PROP_CORE_AB:
 			case CRTC_PROP_CORE_IB:
-			case CRTC_PROP_MEM_AB:
-			case CRTC_PROP_MEM_IB:
 				cstate->bw_control = true;
 				break;
+			case CRTC_PROP_LLCC_AB:
+			case CRTC_PROP_LLCC_IB:
+			case CRTC_PROP_DRAM_AB:
+			case CRTC_PROP_DRAM_IB:
+				cstate->bw_control = true;
+				cstate->bw_split_vote = true;
+				break;
 			default:
 				/* nothing to do */
 				break;
@@ -3475,15 +3546,22 @@
 	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
 	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
 	struct sde_crtc_res *res;
+	int i;
 
 	seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
 	seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
 	seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
-	seq_printf(s, "bw_ctl: %llu\n", sde_crtc->cur_perf.bw_ctl);
 	seq_printf(s, "core_clk_rate: %llu\n",
 			sde_crtc->cur_perf.core_clk_rate);
-	seq_printf(s, "max_per_pipe_ib: %llu\n",
-			sde_crtc->cur_perf.max_per_pipe_ib);
+	for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC;
+			i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		seq_printf(s, "bw_ctl[%s]: %llu\n",
+				sde_power_handle_get_dbus_name(i),
+				sde_crtc->cur_perf.bw_ctl[i]);
+		seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
+				sde_power_handle_get_dbus_name(i),
+				sde_crtc->cur_perf.max_per_pipe_ib[i]);
+	}
 
 	seq_printf(s, "rp.%d: ", cstate->rp.sequence_id);
 	list_for_each_entry(res, &cstate->rp.res_list, list)
@@ -3624,14 +3702,18 @@
 {
 	unsigned long irq_flags;
 	struct sde_crtc *sde_crtc;
+	struct msm_drm_private *priv;
 	struct sde_crtc_event *event = NULL;
+	u32 crtc_id;
 
-	if (!crtc || !func)
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !func) {
+		SDE_ERROR("invalid parameters\n");
 		return -EINVAL;
+	}
 	sde_crtc = to_sde_crtc(crtc);
+	priv = crtc->dev->dev_private;
+	crtc_id = drm_crtc_index(crtc);
 
-	if (!sde_crtc->event_thread)
-		return -EINVAL;
 	/*
 	 * Obtain an event struct from the private cache. This event
 	 * queue may be called from ISR contexts, so use a private
@@ -3655,7 +3737,8 @@
 
 	/* queue new event request */
 	kthread_init_work(&event->kt_work, _sde_crtc_event_cb);
-	kthread_queue_work(&sde_crtc->event_worker, &event->kt_work);
+	kthread_queue_work(&priv->event_thread[crtc_id].worker,
+			&event->kt_work);
 
 	return 0;
 }
@@ -3676,17 +3759,6 @@
 		list_add_tail(&sde_crtc->event_cache[i].list,
 				&sde_crtc->event_free_list);
 
-	kthread_init_worker(&sde_crtc->event_worker);
-	sde_crtc->event_thread = kthread_run(kthread_worker_fn,
-			&sde_crtc->event_worker, "crtc_event:%d",
-			sde_crtc->base.base.id);
-
-	if (IS_ERR_OR_NULL(sde_crtc->event_thread)) {
-		SDE_ERROR("failed to create event thread\n");
-		rc = PTR_ERR(sde_crtc->event_thread);
-		sde_crtc->event_thread = NULL;
-	}
-
 	return rc;
 }
 
@@ -3714,6 +3786,8 @@
 	spin_lock_init(&sde_crtc->spin_lock);
 	atomic_set(&sde_crtc->frame_pending, 0);
 
+	init_completion(&sde_crtc->frame_done_comp);
+
 	INIT_LIST_HEAD(&sde_crtc->frame_event_list);
 	INIT_LIST_HEAD(&sde_crtc->user_event_list);
 	for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 38311c1..0d72ff1 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -135,6 +135,7 @@
  * @frame_events  : static allocation of in-flight frame events
  * @frame_event_list : available frame event list
  * @spin_lock     : spin lock for frame event, transaction status, etc...
+ * @frame_done_comp    : for frame_event_done synchronization
  * @event_thread  : Pointer to event handler thread
  * @event_worker  : Event worker queue
  * @event_cache   : Local cache of event worker structures
@@ -186,10 +187,9 @@
 	struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
 	struct list_head frame_event_list;
 	spinlock_t spin_lock;
+	struct completion frame_done_comp;
 
 	/* for handling internal event thread */
-	struct task_struct *event_thread;
-	struct kthread_worker event_worker;
 	struct sde_crtc_event event_cache[SDE_CRTC_MAX_EVENT_COUNT];
 	struct list_head event_free_list;
 	spinlock_t event_lock;
@@ -260,7 +260,8 @@
  * @intf_mode     : Interface mode of the primary connector
  * @rsc_client    : sde rsc client when mode is valid
  * @is_ppsplit    : Whether current topology requires PPSplit special handling
- * @bw_control    : true if bw/clk controlled by bw/clk properties
+ * @bw_control    : true if bw/clk controlled by core bw/clk properties
+ * @bw_split_vote : true if bw controlled by llcc/dram bw properties
  * @crtc_roi      : Current CRTC ROI. Possibly sub-rectangle of mode.
  *                  Origin top left of CRTC.
  * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
@@ -287,6 +288,7 @@
 	struct sde_rsc_client *rsc_client;
 	bool rsc_update;
 	bool bw_control;
+	bool bw_split_vote;
 
 	bool is_ppsplit;
 	struct sde_rect crtc_roi;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index e1caeaf..0b4dd82 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -56,9 +56,6 @@
 		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
 		##__VA_ARGS__)
 
-/* timeout in frames waiting for frame done */
-#define SDE_ENCODER_FRAME_DONE_TIMEOUT	60
-
 /*
  * Two to anticipate panels that can do cmd/vid dynamic switching
  * plan is to create all possible physical encoder types, and switch between
@@ -173,7 +170,6 @@
  * @rsc_cfg:			rsc configuration
  * @cur_conn_roi:		current connector roi
  * @prv_conn_roi:		previous connector roi to optimize if unchanged
- * @disable_inprogress:		sde encoder disable is in progress.
  */
 struct sde_encoder_virt {
 	struct drm_encoder base;
@@ -217,7 +213,6 @@
 	struct sde_encoder_rsc_config rsc_cfg;
 	struct sde_rect cur_conn_roi;
 	struct sde_rect prv_conn_roi;
-	bool disable_inprogress;
 };
 
 #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -1643,7 +1638,6 @@
 	SDE_EVT32(DRMID(drm_enc));
 
 	sde_enc->cur_master = NULL;
-	sde_enc->disable_inprogress = false;
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
@@ -1702,7 +1696,6 @@
 
 	priv = drm_enc->dev->dev_private;
 	sde_kms = to_sde_kms(priv->kms);
-	sde_enc->disable_inprogress = true;
 
 	SDE_EVT32(DRMID(drm_enc));
 
@@ -1868,9 +1861,6 @@
 		sde_encoder_resource_control(drm_enc,
 				SDE_ENC_RC_EVENT_FRAME_DONE);
 
-		if (sde_enc->disable_inprogress)
-			event |= SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
-
 		if (sde_enc->crtc_frame_event_cb)
 			sde_enc->crtc_frame_event_cb(
 				sde_enc->crtc_frame_event_cb_data, event);
@@ -2224,6 +2214,22 @@
 	}
 }
 
+bool sde_encoder_is_cmd_mode(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct msm_display_info *disp_info;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return false;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	disp_info = &sde_enc->disp_info;
+
+	return (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE);
+}
+
 void sde_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc;
@@ -2332,7 +2338,7 @@
 	SDE_DEBUG_ENC(sde_enc, "\n");
 
 	atomic_set(&sde_enc->frame_done_timeout,
-			SDE_ENCODER_FRAME_DONE_TIMEOUT * 1000 /
+			SDE_FRAME_DONE_TIMEOUT * 1000 /
 			drm_enc->crtc->state->adjusted_mode.vrefresh);
 	mod_timer(&sde_enc->frame_done_timer, jiffies +
 		((atomic_read(&sde_enc->frame_done_timeout) * HZ) / 1000));
@@ -2912,10 +2918,7 @@
 
 	SDE_ERROR_ENC(sde_enc, "frame done timeout\n");
 
-	event =	SDE_ENCODER_FRAME_EVENT_ERROR;
-	if (sde_enc->disable_inprogress)
-		event |= SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
-
+	event = SDE_ENCODER_FRAME_EVENT_ERROR;
 	SDE_EVT32(DRMID(drm_enc), event);
 	sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data, event);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 0b14a58..9c2d3e9 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -27,7 +27,6 @@
 #define SDE_ENCODER_FRAME_EVENT_DONE		BIT(0)
 #define SDE_ENCODER_FRAME_EVENT_ERROR		BIT(1)
 #define SDE_ENCODER_FRAME_EVENT_PANEL_DEAD	BIT(2)
-#define SDE_ENCODER_FRAME_EVENT_DURING_DISABLE	BIT(3)
 
 /**
  * Encoder functions and data types
@@ -174,6 +173,13 @@
 bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc);
 
 /**
+ * sde_encoder_is_cmd_mode - check if it is cmd mode
+ * @drm_enc: Pointer to drm encoder object
+ * @Return: true if it is cmd mode
+ */
+bool sde_encoder_is_cmd_mode(struct drm_encoder *drm_enc);
+
+/**
  * sde_encoder_init - initialize virtual encoder object
  * @dev:        Pointer to drm device structure
  * @disp_info:  Pointer to display information structure
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 058f19b..5894fe2 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -88,6 +88,10 @@
 
 #define SDE_NAME_SIZE  12
 
+
+/* timeout in frames waiting for frame done */
+#define SDE_FRAME_DONE_TIMEOUT	60
+
 /*
  * struct sde_irq_callback - IRQ callback handlers
  * @list: list to callback
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
index 6962bef..e233fc7 100644
--- a/drivers/gpu/drm/msm/sde/sde_trace.h
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -193,13 +193,16 @@
 )
 
 TRACE_EVENT(sde_perf_crtc_update,
-	TP_PROTO(u32 crtc, u64 bw_ctl, u32 core_clk_rate,
-		bool stop_req, u32 update_bus, u32 update_clk),
-	TP_ARGS(crtc, bw_ctl, core_clk_rate,
+	TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc,
+			u64 bw_ctl_ebi, u32 core_clk_rate,
+			bool stop_req, u32 update_bus, u32 update_clk),
+	TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate,
 		stop_req, update_bus, update_clk),
 	TP_STRUCT__entry(
 			__field(u32, crtc)
-			__field(u64, bw_ctl)
+			__field(u64, bw_ctl_mnoc)
+			__field(u64, bw_ctl_llcc)
+			__field(u64, bw_ctl_ebi)
 			__field(u32, core_clk_rate)
 			__field(bool, stop_req)
 			__field(u32, update_bus)
@@ -207,19 +210,24 @@
 	),
 	TP_fast_assign(
 			__entry->crtc = crtc;
-			__entry->bw_ctl = bw_ctl;
+			__entry->bw_ctl_mnoc = bw_ctl_mnoc;
+			__entry->bw_ctl_llcc = bw_ctl_llcc;
+			__entry->bw_ctl_ebi = bw_ctl_ebi;
 			__entry->core_clk_rate = core_clk_rate;
 			__entry->stop_req = stop_req;
 			__entry->update_bus = update_bus;
 			__entry->update_clk = update_clk;
 	),
-	 TP_printk("crtc=%d bw=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
-			 __entry->crtc,
-			 __entry->bw_ctl,
-			 __entry->core_clk_rate,
-			 __entry->stop_req,
-			 __entry->update_bus,
-			 __entry->update_clk)
+	 TP_printk(
+		"crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+			__entry->crtc,
+			__entry->bw_ctl_mnoc,
+			__entry->bw_ctl_llcc,
+			__entry->bw_ctl_ebi,
+			__entry->core_clk_rate,
+			__entry->stop_req,
+			__entry->update_bus,
+			__entry->update_clk)
 );
 
 #define SDE_ATRACE_END(name) trace_sde_mark_write(current->tgid, name, 0)
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 452a3be..242cd64 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -30,6 +30,20 @@
 #include "sde_power_handle.h"
 #include "sde_trace.h"
 
+static const char *data_bus_name[SDE_POWER_HANDLE_DBUS_ID_MAX] = {
+	[SDE_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,sde-data-bus",
+	[SDE_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,sde-llcc-bus",
+	[SDE_POWER_HANDLE_DBUS_ID_EBI] = "qcom,sde-ebi-bus",
+};
+
+const char *sde_power_handle_get_dbus_name(u32 bus_id)
+{
+	if (bus_id < SDE_POWER_HANDLE_DBUS_ID_MAX)
+		return data_bus_name[bus_id];
+
+	return NULL;
+}
+
 static void sde_power_event_trigger_locked(struct sde_power_handle *phandle,
 		u32 event_type)
 {
@@ -415,7 +429,9 @@
 			vect->ab = ab_quota[i];
 			vect->ib = ib_quota[i];
 
-			pr_debug("uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
+			pr_debug(
+				"%s uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
+				bw_table->name,
 				new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt"
 				, i, vect->ab, vect->ib);
 		}
@@ -433,7 +449,8 @@
 
 int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
 		struct sde_power_client *pclient,
-		int bus_client, u64 ab_quota, u64 ib_quota)
+		int bus_client, u32 bus_id,
+		u64 ab_quota, u64 ib_quota)
 {
 	int rc = 0;
 	int i;
@@ -442,7 +459,8 @@
 	struct sde_power_client *client;
 
 	if (!phandle || !pclient ||
-			bus_client >= SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX) {
+			bus_client >= SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX ||
+			bus_id >= SDE_POWER_HANDLE_DBUS_ID_MAX) {
 		pr_err("invalid parameters\n");
 		return -EINVAL;
 	}
@@ -465,7 +483,9 @@
 		}
 	}
 
-	rc = _sde_power_data_bus_set_quota(&phandle->data_bus_handle,
+	if (phandle->data_bus_handle[bus_id].data_bus_hdl)
+		rc = _sde_power_data_bus_set_quota(
+			&phandle->data_bus_handle[bus_id],
 			total_ab_rt, total_ab_nrt,
 			total_ib_rt, total_ib_nrt);
 
@@ -484,7 +504,7 @@
 }
 
 static int sde_power_data_bus_parse(struct platform_device *pdev,
-	struct sde_power_data_bus_handle *pdbus)
+	struct sde_power_data_bus_handle *pdbus, const char *name)
 {
 	struct device_node *node;
 	int rc = 0;
@@ -507,7 +527,7 @@
 		rc = 0;
 	}
 
-	node = of_get_child_by_name(pdev->dev.of_node, "qcom,sde-data-bus");
+	node = of_get_child_by_name(pdev->dev.of_node, name);
 	if (node) {
 		rc = of_property_read_u32(node,
 				"qcom,msm-bus,num-paths", &paths);
@@ -533,7 +553,8 @@
 			rc = -EINVAL;
 			goto end;
 		}
-		pr_debug("register data_bus_hdl=%x\n", pdbus->data_bus_hdl);
+		pr_debug("register %s data_bus_hdl=%x\n", name,
+				pdbus->data_bus_hdl);
 	}
 
 end:
@@ -621,7 +642,8 @@
 
 int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
 		struct sde_power_client *pclient,
-		int bus_client, u64 ab_quota, u64 ib_quota)
+		int bus_client, u32 bus_id,
+		u64 ab_quota, u64 ib_quota)
 {
 	return 0;
 }
@@ -651,7 +673,7 @@
 int sde_power_resource_init(struct platform_device *pdev,
 	struct sde_power_handle *phandle)
 {
-	int rc = 0;
+	int rc = 0, i;
 	struct dss_module_power *mp;
 
 	if (!phandle || !pdev) {
@@ -699,10 +721,16 @@
 		goto bus_err;
 	}
 
-	rc = sde_power_data_bus_parse(pdev, &phandle->data_bus_handle);
-	if (rc) {
-		pr_err("register data bus parse failed rc=%d\n", rc);
-		goto data_bus_err;
+	for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC;
+			i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		rc = sde_power_data_bus_parse(pdev,
+				&phandle->data_bus_handle[i],
+				data_bus_name[i]);
+		if (rc) {
+			pr_err("register data bus parse failed id=%d rc=%d\n",
+					i, rc);
+			goto data_bus_err;
+		}
 	}
 
 	INIT_LIST_HEAD(&phandle->power_client_clist);
@@ -716,6 +744,8 @@
 	return rc;
 
 data_bus_err:
+	for (i--; i >= 0; i--)
+		sde_power_data_bus_unregister(&phandle->data_bus_handle[i]);
 	sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
 bus_err:
 	msm_dss_put_clk(mp->clk_config, mp->num_clk);
@@ -739,6 +769,7 @@
 	struct dss_module_power *mp;
 	struct sde_power_client *curr_client, *next_client;
 	struct sde_power_event *curr_event, *next_event;
+	int i;
 
 	if (!phandle || !pdev) {
 		pr_err("invalid input param\n");
@@ -766,7 +797,8 @@
 	}
 	mutex_unlock(&phandle->phandle_lock);
 
-	sde_power_data_bus_unregister(&phandle->data_bus_handle);
+	for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
+		sde_power_data_bus_unregister(&phandle->data_bus_handle[i]);
 
 	sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
 
@@ -790,7 +822,7 @@
 int sde_power_resource_enable(struct sde_power_handle *phandle,
 	struct sde_power_client *pclient, bool enable)
 {
-	int rc = 0;
+	int rc = 0, i;
 	bool changed = false;
 	u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
 	struct sde_power_client *client;
@@ -837,13 +869,15 @@
 		sde_power_event_trigger_locked(phandle,
 				SDE_POWER_EVENT_PRE_ENABLE);
 
-		rc = sde_power_data_bus_update(&phandle->data_bus_handle,
-									enable);
-		if (rc) {
-			pr_err("failed to set data bus vote rc=%d\n", rc);
-			goto data_bus_hdl_err;
+		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			rc = sde_power_data_bus_update(
+					&phandle->data_bus_handle[i], enable);
+			if (rc) {
+				pr_err("failed to set data bus vote id=%d rc=%d\n",
+						i, rc);
+				goto data_bus_hdl_err;
+			}
 		}
-
 		/*
 		 * - When the target is RSCC enabled, regulator should
 		 *   be enabled by the s/w only for the first time during
@@ -897,7 +931,9 @@
 		if (!phandle->rsc_client)
 			msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
 									enable);
-		sde_power_data_bus_update(&phandle->data_bus_handle, enable);
+		for (i = 0 ; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
+			sde_power_data_bus_update(&phandle->data_bus_handle[i],
+					enable);
 
 		sde_power_event_trigger_locked(phandle,
 				SDE_POWER_EVENT_POST_DISABLE);
@@ -915,7 +951,8 @@
 	if (!phandle->rsc_client)
 		msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
 vreg_err:
-	sde_power_data_bus_update(&phandle->data_bus_handle, 0);
+	for (i = 0 ; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
+		sde_power_data_bus_update(&phandle->data_bus_handle[i], 0);
 data_bus_hdl_err:
 	phandle->current_usecase_ndx = prev_usecase_ndx;
 	mutex_unlock(&phandle->phandle_lock);
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index c526b71..78c325d 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -16,9 +16,9 @@
 
 #define MAX_CLIENT_NAME_LEN 128
 
-#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	2000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	1600000000
 #define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
-#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	2000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	1600000000
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
 
 #include <linux/sde_io_util.h>
@@ -60,6 +60,19 @@
 };
 
 /**
+ * enum SDE_POWER_HANDLE_DBUS_ID - data bus identifier
+ * @SDE_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
+ * @SDE_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
+ * @SDE_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
+ */
+enum SDE_POWER_HANDLE_DBUS_ID {
+	SDE_POWER_HANDLE_DBUS_ID_MNOC,
+	SDE_POWER_HANDLE_DBUS_ID_LLCC,
+	SDE_POWER_HANDLE_DBUS_ID_EBI,
+	SDE_POWER_HANDLE_DBUS_ID_MAX,
+};
+
+/**
  * struct sde_power_client: stores the power client for sde driver
  * @name:	name of the client
  * @usecase_ndx: current regs bus vote type
@@ -152,7 +165,8 @@
 	struct device *dev;
 	u32 current_usecase_ndx;
 	u32 reg_bus_hdl;
-	struct sde_power_data_bus_handle data_bus_handle;
+	struct sde_power_data_bus_handle data_bus_handle
+		[SDE_POWER_HANDLE_DBUS_ID_MAX];
 	struct list_head event_list;
 	struct sde_rsc_client *rsc_client;
 	bool rsc_client_init;
@@ -254,6 +268,7 @@
  * @phandle:  power handle containing the resources
  * @client: client information to set quota
  * @bus_client: real-time or non-real-time bus client
+ * @bus_id: identifier of data bus, see SDE_POWER_HANDLE_DBUS_ID
  * @ab_quota: arbitrated bus bandwidth
  * @ib_quota: instantaneous bus bandwidth
  *
@@ -261,7 +276,8 @@
  */
 int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
 		struct sde_power_client *pclient,
-		int bus_client, u64 ab_quota, u64 ib_quota);
+		int bus_client, u32 bus_id,
+		u64 ab_quota, u64 ib_quota);
 
 /**
  * sde_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
@@ -298,4 +314,11 @@
 void sde_power_handle_unregister_event(struct sde_power_handle *phandle,
 		struct sde_power_event *event);
 
+/**
+ * sde_power_handle_get_dbus_name - get name of given data bus identifier
+ * @bus_id:	data bus identifier
+ * Return:	Pointer to name string if success; NULL otherwise
+ */
+const char *sde_power_handle_get_dbus_name(u32 bus_id);
+
 #endif /* _SDE_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index caa8cdf..8447916 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -657,13 +657,14 @@
  * sde_rsc_client_vote() - ab/ib vote from rsc client
  *
  * @client:	 Client pointer provided by sde_rsc_client_create().
+ * @bus_id: data bus for which to be voted
  * @ab:		 aggregated bandwidth vote from client.
  * @ib:		 instant bandwidth vote from client.
  *
  * Return: error code.
  */
 int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
-	u64 ab_vote, u64 ib_vote)
+		u32 bus_id, u64 ab_vote, u64 ib_vote)
 {
 	int rc = 0;
 	struct sde_rsc_priv *rsc;
@@ -717,7 +718,8 @@
 
 	rpmh_invalidate(rsc->disp_rsc);
 	sde_power_data_bus_set_quota(&rsc->phandle, rsc->pclient,
-		SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, ab_vote, ib_vote);
+		SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+		bus_id, ab_vote, ib_vote);
 	rpmh_flush(rsc->disp_rsc);
 
 	if (rsc->hw_ops.tcs_use_ok)
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 3c47762..86a1e88 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -327,7 +327,8 @@
 		.minor = 0,
 		.patchid = ANY_ID,
 		.features = ADRENO_64BIT | ADRENO_RPMH |
-			ADRENO_CONTENT_PROTECTION,
+			ADRENO_GPMU | ADRENO_CONTENT_PROTECTION |
+			ADRENO_SPTP_PC,
 		.sqefw_name = "a630_sqe.fw",
 		.zap_name = "a630_zap",
 		.gpudev = &adreno_a6xx_gpudev,
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 33854ea..3cbb68e 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -678,9 +678,6 @@
 	}
 
 	release_firmware(fw);
-
-	ret = _load_gmu_firmware(device);
-
 	return ret;
 }
 
@@ -1567,9 +1564,18 @@
  */
 static int a6xx_microcode_read(struct adreno_device *adreno_dev)
 {
-	return _load_firmware(KGSL_DEVICE(adreno_dev),
-			adreno_dev->gpucore->sqefw_name,
-			ADRENO_FW(adreno_dev, ADRENO_FW_SQE));
+	int ret;
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct adreno_firmware *sqe_fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
+
+	if (sqe_fw->memdesc.hostptr == NULL) {
+		ret = _load_firmware(device, adreno_dev->gpucore->sqefw_name,
+				sqe_fw);
+		if (ret)
+			return ret;
+	}
+
+	return _load_gmu_firmware(device);
 }
 
 #define VBIF_RESET_ACK_TIMEOUT	100
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index b05e18d..067b276 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -185,14 +185,12 @@
 	spin_lock(&hfi->msglock);
 	list_for_each_entry_safe(msg, next, &hfi->msglist, node) {
 		if (msg->msg_id == rsp->ret_hdr.id &&
-				msg->seqnum == rsp->ret_hdr.seqnum) {
-			list_del(&msg->node);
+				msg->seqnum == rsp->ret_hdr.seqnum)
 			break;
-		}
 	}
-	spin_unlock(&hfi->msglock);
 
 	if (msg == NULL) {
+		spin_unlock(&hfi->msglock);
 		dev_err(&gmu->pdev->dev,
 				"Cannot find receiver of ack msg with id=%d\n",
 				rsp->ret_hdr.id);
@@ -201,6 +199,7 @@
 
 	memcpy(&msg->results, (void *) rsp, rsp->hdr.size << 2);
 	complete(&msg->msg_complete);
+	spin_unlock(&hfi->msglock);
 }
 
 static void receive_err_msg(struct gmu_device *gmu, struct hfi_msg_rsp *rsp)
@@ -235,7 +234,7 @@
 
 	if (hfi_cmdq_write(gmu, HFI_CMD_QUEUE, msg) != size) {
 		rc = -EINVAL;
-		goto error;
+		goto done;
 	}
 
 	rc = wait_for_completion_timeout(
@@ -245,11 +244,12 @@
 		dev_err(&gmu->pdev->dev,
 				"Receiving GMU ack %d timed out\n", msg->id);
 		rc = -ETIMEDOUT;
-		goto error;
+		goto done;
 	}
 
-	return 0;
-error:
+	/* If we got here we succeeded */
+	rc = 0;
+done:
 	spin_lock(&hfi->msglock);
 	list_del(&ret_msg->node);
 	spin_unlock(&hfi->msglock);
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index 83abec4..8eedbfa2 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -115,7 +115,7 @@
 	HFI_F2H_QPRI_DEBUG = 40,
 };
 
-#define HFI_RSP_TIMEOUT 50 /* msec */
+#define HFI_RSP_TIMEOUT 100 /* msec */
 #define HFI_H2F_CMD_IRQ_MASK BIT(0)
 
 enum hfi_msg_type {
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index dd41e4e..5466a49 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -292,8 +292,10 @@
 	else {
 		ret = kgsl_sharedmem_page_alloc_user(memdesc, (size_t) size);
 		if (ret == 0) {
-			if (kgsl_memdesc_map(memdesc) == NULL)
+			if (kgsl_memdesc_map(memdesc) == NULL) {
+				kgsl_sharedmem_free(memdesc);
 				ret = -ENOMEM;
+			}
 		}
 	}
 
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
index d26e0d0..621e08f 100644
--- a/drivers/hwtracing/coresight/coresight-cti.c
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -670,6 +670,7 @@
 	struct cti_drvdata *drvdata;
 	unsigned long flag;
 	int trig;
+	int refcnt;
 
 	if (IS_ERR_OR_NULL(cti))
 		return;
@@ -678,6 +679,7 @@
 
 	mutex_lock(&drvdata->mutex);
 
+	refcnt = drvdata->refcnt;
 	spin_lock_irqsave(&drvdata->spinlock, flag);
 	if (cti_cpu_verify_access(drvdata))
 		goto err;
@@ -692,6 +694,8 @@
 			cti_trigout_gpio_disable(drvdata);
 	}
 
+	if (refcnt)
+		pm_runtime_put(drvdata->dev);
 	mutex_unlock(&drvdata->mutex);
 	return;
 err:
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 860fe6e..a3da8ffd 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, 2017, The Linux Foundation. All rights reserved.
  *
  * Description: CoreSight Funnel driver
  *
@@ -23,6 +23,7 @@
 #include <linux/coresight.h>
 #include <linux/amba/bus.h>
 #include <linux/clk.h>
+#include <linux/of_address.h>
 
 #include "coresight-priv.h"
 
@@ -168,6 +169,29 @@
 };
 ATTRIBUTE_GROUPS(coresight_funnel);
 
+static int funnel_get_resource_byname(struct device_node *np,
+				   char *ch_base, struct resource *res)
+{
+	const char *name = NULL;
+	int index = 0, found = 0;
+
+	while (!of_property_read_string_index(np, "reg-names", index, &name)) {
+		if (strcmp(ch_base, name)) {
+			index++;
+			continue;
+		}
+
+		/* We have a match and @index is where it's at */
+		found = 1;
+		break;
+	}
+
+	if (!found)
+		return -EINVAL;
+
+	return of_address_to_resource(np, index, res);
+}
+
 static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
 {
 	int ret;
@@ -175,7 +199,8 @@
 	struct device *dev = &adev->dev;
 	struct coresight_platform_data *pdata = NULL;
 	struct funnel_drvdata *drvdata;
-	struct resource *res = &adev->res;
+	struct resource *res;
+	struct resource res_real;
 	struct coresight_desc desc = { 0 };
 	struct device_node *np = adev->dev.of_node;
 
@@ -199,8 +224,19 @@
 	}
 	dev_set_drvdata(dev, drvdata);
 
-	/* Validity for the resource is already checked by the AMBA core */
-	base = devm_ioremap_resource(dev, res);
+	if (of_property_read_bool(np, "qcom,duplicate-funnel")) {
+		ret = funnel_get_resource_byname(np, "funnel-base-real",
+						 &res_real);
+		if (ret)
+			return ret;
+
+		res = &res_real;
+		base = devm_ioremap(dev, res->start, resource_size(res));
+	} else {
+		/* Validity of resource is already checked by the AMBA core */
+		res = &adev->res;
+		base = devm_ioremap_resource(dev, res);
+	}
 	if (IS_ERR(base))
 		return PTR_ERR(base);
 
diff --git a/drivers/hwtracing/coresight/coresight-ost.c b/drivers/hwtracing/coresight/coresight-ost.c
index e40751a..63fea00 100644
--- a/drivers/hwtracing/coresight/coresight-ost.c
+++ b/drivers/hwtracing/coresight/coresight-ost.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -30,19 +30,26 @@
 
 static struct stm_drvdata *stmdrvdata;
 
-static uint32_t stm_channel_alloc(uint32_t off)
+static uint32_t stm_channel_alloc(void)
 {
 	struct stm_drvdata *drvdata = stmdrvdata;
-	uint32_t ch;
-	unsigned long flags;
+	uint32_t ch, off, num_ch_per_cpu;
+	int cpu;
 
-	spin_lock_irqsave(&drvdata->spinlock, flags);
-	do {
-		ch = find_next_zero_bit(drvdata->chs.bitmap,
-					drvdata->numsp, off);
-	} while ((ch < drvdata->numsp) &&
-		 test_and_set_bit(ch, drvdata->chs.bitmap));
-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+	num_ch_per_cpu = drvdata->numsp/num_present_cpus();
+
+	cpu = get_cpu();
+
+	off = num_ch_per_cpu * cpu;
+	ch = find_next_zero_bit(drvdata->chs.bitmap,
+				drvdata->numsp, off);
+	if (unlikely(ch >= (off + num_ch_per_cpu))) {
+		put_cpu();
+		return drvdata->numsp;
+	}
+
+	set_bit(ch, drvdata->chs.bitmap);
+	put_cpu();
 
 	return ch;
 }
@@ -65,11 +72,8 @@
 static void stm_channel_free(uint32_t ch)
 {
 	struct stm_drvdata *drvdata = stmdrvdata;
-	unsigned long flags;
 
-	spin_lock_irqsave(&drvdata->spinlock, flags);
 	clear_bit(ch, drvdata->chs.bitmap);
-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 }
 
 static int stm_trace_ost_header(unsigned long ch_addr, uint32_t flags,
@@ -146,7 +150,14 @@
 	unsigned long ch_addr;
 
 	/* allocate channel and get the channel address */
-	ch = stm_channel_alloc(0);
+	ch = stm_channel_alloc();
+	if (unlikely(ch >= drvdata->numsp)) {
+		drvdata->ch_alloc_fail_count++;
+		dev_err_ratelimited(drvdata->dev,
+				    "Channel allocation failed %d",
+				    drvdata->ch_alloc_fail_count);
+		return 0;
+	}
 
 	ch_addr = (unsigned long)stm_channel_addr(drvdata, ch);
 
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index eb5dd84..c8f2702e 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * Description: CoreSight System Trace Macrocell driver
  *
@@ -729,7 +729,7 @@
 	numsp &= 0x1ffff;
 	if (!numsp)
 		numsp = STM_32_CHANNEL;
-	return numsp;
+	return STM_32_CHANNEL;
 }
 
 static void stm_init_default_data(struct stm_drvdata *drvdata)
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index f9449fe..077cb45 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -497,14 +497,12 @@
 	struct coresight_desc desc = { 0 };
 	struct device_node *np = adev->dev.of_node;
 
-	if (np) {
-		pdata = of_get_coresight_platform_data(dev, np);
-		if (IS_ERR(pdata)) {
-			ret = PTR_ERR(pdata);
-			goto out;
-		}
-		adev->dev.platform_data = pdata;
+	pdata = of_get_coresight_platform_data(dev, np);
+	if (IS_ERR(pdata)) {
+		ret = PTR_ERR(pdata);
+		goto out;
 	}
+	adev->dev.platform_data = pdata;
 
 	ret = -ENOMEM;
 	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
@@ -531,10 +529,8 @@
 	drvdata->memwidth = tmc_get_memwidth(devid);
 
 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
-		if (np)
-			ret = of_property_read_u32(np,
-						   "arm,buffer-size",
-						   &drvdata->size);
+		ret = of_property_read_u32(np, "arm,buffer-size",
+					   &drvdata->size);
 		if (ret)
 			drvdata->size = SZ_1M;
 
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index 43343a06..bde20b4 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -79,7 +79,7 @@
 #define CMD_STATUS_COMPL		BIT(16)
 
 /* Control/Hidden TCS */
-#define TCS_HIDDEN_MAX_SLOTS		3
+#define TCS_HIDDEN_MAX_SLOTS		2
 #define TCS_HIDDEN_CMD0_DRV_DATA	0x38
 #define TCS_HIDDEN_CMD_SHIFT		0x08
 
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index e7b8f49..89fc93b 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -277,6 +277,23 @@
 
 	  If unsure, say N.
 
+config DM_REQ_CRYPT
+	tristate "Req Crypt target support"
+	depends on BLK_DEV_DM
+	select XTS
+	select CRYPTO_XTS
+	---help---
+	  This request based device-mapper target allows you to create a device that
+	  transparently encrypts the data on it. You'll need to activate
+	  the ciphers you're going to use in the cryptoapi configuration.
+	  The DM REQ CRYPT operates on requests (bigger payloads) to utilize
+	  crypto hardware better.
+
+	  To compile this code as a module, choose M here: the module will
+	  be called dm-req-crypt.
+
+	  If unsure, say N.
+
 config DM_SNAPSHOT
        tristate "Snapshot target"
        depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index f26ce41..f14e2fc 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -59,6 +59,7 @@
 obj-$(CONFIG_DM_CACHE_CLEANER)	+= dm-cache-cleaner.o
 obj-$(CONFIG_DM_ERA)		+= dm-era.o
 obj-$(CONFIG_DM_LOG_WRITES)	+= dm-log-writes.o
+obj-$(CONFIG_DM_REQ_CRYPT)	+= dm-req-crypt.o
 obj-$(CONFIG_DM_ANDROID_VERITY) += dm-android-verity.o
 
 ifeq ($(CONFIG_DM_UEVENT),y)
diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c
new file mode 100644
index 0000000..3ffe7e5
--- /dev/null
+++ b/drivers/md/dm-req-crypt.c
@@ -0,0 +1,1364 @@
+/*
+ * DM request based crypto driver
+ *
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <linux/qcrypto.h>
+#include <linux/workqueue.h>
+#include <linux/backing-dev.h>
+#include <linux/atomic.h>
+#include <linux/scatterlist.h>
+#include <linux/device-mapper.h>
+#include <linux/printk.h>
+
+#include <asm/page.h>
+#include <asm/unaligned.h>
+#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/algapi.h>
+#include <crypto/ice.h>
+
+#define DM_MSG_PREFIX "req-crypt"
+
+#define MAX_SG_LIST	1024
+#define REQ_DM_512_KB (512*1024)
+#define MAX_ENCRYPTION_BUFFERS 1
+#define MIN_IOS 256
+#define MIN_POOL_PAGES 32
+#define KEY_SIZE_XTS 32
+#define AES_XTS_IV_LEN 16
+#define MAX_MSM_ICE_KEY_LUT_SIZE 32
+#define SECTOR_SIZE 512
+#define MIN_CRYPTO_TRANSFER_SIZE (4 * 1024)
+
+#define DM_REQ_CRYPT_ERROR -1
+#define DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC -2
+
+/*
+ * ENCRYPTION_MODE_CRYPTO means dm-req-crypt would invoke crypto operations
+ * for all of the requests. Crypto operations are performed by crypto engine
+ * plugged with Linux Kernel Crypto APIs
+ */
+#define DM_REQ_CRYPT_ENCRYPTION_MODE_CRYPTO 0
+/*
+ * ENCRYPTION_MODE_TRANSPARENT means dm-req-crypt would not invoke crypto
+ * operations for any of the requests. Data would be encrypted or decrypted
+ * using Inline Crypto Engine(ICE) embedded in storage hardware
+ */
+#define DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT 1
+
+#define DM_REQ_CRYPT_QUEUE_SIZE 256
+
+struct req_crypt_result {
+	struct completion completion;
+	int err;
+};
+
+#define FDE_KEY_ID	0
+#define PFE_KEY_ID	1
+
+static struct dm_dev *dev;
+static struct kmem_cache *_req_crypt_io_pool;
+static struct kmem_cache *_req_dm_scatterlist_pool;
+static sector_t start_sector_orig;
+static struct workqueue_struct *req_crypt_queue;
+static struct workqueue_struct *req_crypt_split_io_queue;
+static mempool_t *req_io_pool;
+static mempool_t *req_page_pool;
+static mempool_t *req_scatterlist_pool;
+static bool is_fde_enabled;
+static struct crypto_skcipher *tfm;
+static unsigned int encryption_mode;
+static struct ice_crypto_setting *ice_settings;
+
+unsigned int num_engines;
+unsigned int num_engines_fde, fde_cursor;
+unsigned int num_engines_pfe, pfe_cursor;
+struct crypto_engine_entry *fde_eng, *pfe_eng;
+DEFINE_MUTEX(engine_list_mutex);
+
+struct req_dm_crypt_io {
+	struct ice_crypto_setting ice_settings;
+	struct work_struct work;
+	struct request *cloned_request;
+	int error;
+	atomic_t pending;
+	struct timespec start_time;
+	bool should_encrypt;
+	bool should_decrypt;
+	u32 key_id;
+};
+
+struct req_dm_split_req_io {
+	struct work_struct work;
+	struct scatterlist *req_split_sg_read;
+	struct req_crypt_result result;
+	struct crypto_engine_entry *engine;
+	u8 IV[AES_XTS_IV_LEN];
+	int size;
+	struct request *clone;
+};
+
+#ifdef CONFIG_FIPS_ENABLE
+static struct qcrypto_func_set dm_qcrypto_func;
+#else
+static struct qcrypto_func_set dm_qcrypto_func = {
+		qcrypto_cipher_set_device_hw,
+		qcrypto_cipher_set_flag,
+		qcrypto_get_num_engines,
+		qcrypto_get_engine_list
+};
+#endif
+static void req_crypt_cipher_complete
+		(struct crypto_async_request *req, int err);
+static void req_cryptd_split_req_queue_cb
+		(struct work_struct *work);
+static void req_cryptd_split_req_queue
+		(struct req_dm_split_req_io *io);
+static void req_crypt_split_io_complete
+		(struct req_crypt_result *res, int err);
+
+static  bool req_crypt_should_encrypt(struct req_dm_crypt_io *req)
+{
+	int ret = 0;
+	bool should_encrypt = false;
+	struct bio *bio = NULL;
+	bool is_encrypted = false;
+	bool is_inplace = false;
+
+	if (!req || !req->cloned_request || !req->cloned_request->bio)
+		return false;
+
+	if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT)
+		return false;
+	bio = req->cloned_request->bio;
+
+	/* req->key_id = key_id; @todo support more than 1 pfe key */
+	if ((ret == 0) && (is_encrypted || is_inplace)) {
+		should_encrypt = true;
+		req->key_id = PFE_KEY_ID;
+	} else if (is_fde_enabled) {
+		should_encrypt = true;
+		req->key_id = FDE_KEY_ID;
+	}
+
+	return should_encrypt;
+}
+
+static  bool req_crypt_should_deccrypt(struct req_dm_crypt_io *req)
+{
+	int ret = 0;
+	bool should_deccrypt = false;
+	struct bio *bio = NULL;
+	bool is_encrypted = false;
+	bool is_inplace = false;
+
+	if (!req || !req->cloned_request || !req->cloned_request->bio)
+		return false;
+	if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT)
+		return false;
+
+	bio = req->cloned_request->bio;
+
+	/* req->key_id = key_id; @todo support more than 1 pfe key */
+	if ((ret == 0) && (is_encrypted && !is_inplace)) {
+		should_deccrypt = true;
+		req->key_id = PFE_KEY_ID;
+	} else if (is_fde_enabled) {
+		should_deccrypt = true;
+		req->key_id = FDE_KEY_ID;
+	}
+
+	return should_deccrypt;
+}
+
+static void req_crypt_inc_pending(struct req_dm_crypt_io *io)
+{
+	atomic_inc(&io->pending);
+}
+
+static void req_crypt_dec_pending_encrypt(struct req_dm_crypt_io *io)
+{
+	int error = 0;
+	struct request *clone = NULL;
+
+	if (io) {
+		error = io->error;
+		if (io->cloned_request) {
+			clone = io->cloned_request;
+		} else {
+			DMERR("%s io->cloned_request is NULL\n",
+								__func__);
+			/*
+			 * If Clone is NULL we cannot do anything,
+			 * this should never happen
+			 */
+			WARN_ON(1);
+		}
+	} else {
+		DMERR("%s io is NULL\n", __func__);
+		/*
+		 * If Clone is NULL we cannot do anything,
+		 * this should never happen
+		 */
+		WARN_ON(1);
+	}
+
+	atomic_dec(&io->pending);
+
+	if (error < 0) {
+		dm_kill_unmapped_request(clone, error);
+		mempool_free(io, req_io_pool);
+	} else
+		dm_dispatch_request(clone);
+}
+
+static void req_crypt_dec_pending_decrypt(struct req_dm_crypt_io *io)
+{
+	int error = 0;
+	struct request *clone = NULL;
+
+	if (io) {
+		error = io->error;
+		if (io->cloned_request) {
+			clone = io->cloned_request;
+		} else {
+			DMERR("%s io->cloned_request is NULL\n",
+								__func__);
+			/*
+			 * If Clone is NULL we cannot do anything,
+			 * this should never happen
+			 */
+			WARN_ON(1);
+		}
+	} else {
+		DMERR("%s io is NULL\n",
+							__func__);
+		/*
+		 * If Clone is NULL we cannot do anything,
+		 * this should never happen
+		 */
+		WARN_ON(1);
+	}
+
+	/* Should never get here if io or Clone is NULL */
+	dm_end_request(clone, error);
+	atomic_dec(&io->pending);
+	mempool_free(io, req_io_pool);
+}
+
+/*
+ * The callback that will be called by the worker queue to perform Decryption
+ * for reads and use the dm function to complete the bios and requests.
+ */
+static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
+{
+	struct request *clone = NULL;
+	int error = DM_REQ_CRYPT_ERROR;
+	int total_sg_len = 0, total_bytes_in_req = 0, temp_size = 0, i = 0;
+	struct scatterlist *sg = NULL;
+	struct scatterlist *req_sg_read = NULL;
+
+	unsigned int engine_list_total = 0;
+	struct crypto_engine_entry *curr_engine_list = NULL;
+	bool split_transfers = 0;
+	sector_t tempiv;
+	struct req_dm_split_req_io *split_io = NULL;
+
+	if (io) {
+		error = io->error;
+		if (io->cloned_request) {
+			clone = io->cloned_request;
+		} else {
+			DMERR("%s io->cloned_request is NULL\n",
+								__func__);
+			error = DM_REQ_CRYPT_ERROR;
+			goto submit_request;
+		}
+	} else {
+		DMERR("%s io is NULL\n",
+							__func__);
+		error = DM_REQ_CRYPT_ERROR;
+		goto submit_request;
+	}
+
+	req_crypt_inc_pending(io);
+
+	mutex_lock(&engine_list_mutex);
+
+	engine_list_total = (io->key_id == FDE_KEY_ID ? num_engines_fde :
+						   (io->key_id == PFE_KEY_ID ?
+							num_engines_pfe : 0));
+
+	curr_engine_list = (io->key_id == FDE_KEY_ID ? fde_eng :
+						   (io->key_id == PFE_KEY_ID ?
+							pfe_eng : NULL));
+
+	mutex_unlock(&engine_list_mutex);
+
+	req_sg_read = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
+								GFP_KERNEL);
+	if (!req_sg_read) {
+		DMERR("%s req_sg_read allocation failed\n",
+						__func__);
+		error = DM_REQ_CRYPT_ERROR;
+		goto skcipher_req_alloc_failure;
+	}
+	memset(req_sg_read, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
+
+	total_sg_len = blk_rq_map_sg_no_cluster(clone->q, clone, req_sg_read);
+	if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) {
+		DMERR("%s Request Error%d", __func__, total_sg_len);
+		error = DM_REQ_CRYPT_ERROR;
+		goto skcipher_req_alloc_failure;
+	}
+
+	total_bytes_in_req = clone->__data_len;
+	if (total_bytes_in_req > REQ_DM_512_KB) {
+		DMERR("%s total_bytes_in_req > 512 MB %d",
+				__func__, total_bytes_in_req);
+		error = DM_REQ_CRYPT_ERROR;
+		goto skcipher_req_alloc_failure;
+	}
+
+
+	if ((clone->__data_len >= (MIN_CRYPTO_TRANSFER_SIZE *
+		engine_list_total))
+		&& (engine_list_total > 1))
+		split_transfers = 1;
+
+	if (split_transfers) {
+		split_io = kzalloc(sizeof(struct req_dm_split_req_io)
+				* engine_list_total, GFP_KERNEL);
+		if (!split_io) {
+			DMERR("%s split_io allocation failed\n", __func__);
+			error = DM_REQ_CRYPT_ERROR;
+			goto skcipher_req_alloc_failure;
+		}
+
+		split_io[0].req_split_sg_read = sg = req_sg_read;
+		split_io[engine_list_total - 1].size = total_bytes_in_req;
+		for (i = 0; i < (engine_list_total); i++) {
+			while ((sg) && i < (engine_list_total - 1)) {
+				split_io[i].size += sg->length;
+				split_io[engine_list_total - 1].size -=
+						sg->length;
+				if (split_io[i].size >=
+						(total_bytes_in_req /
+							engine_list_total)) {
+					split_io[i + 1].req_split_sg_read =
+							sg_next(sg);
+					sg_mark_end(sg);
+					break;
+				}
+				sg = sg_next(sg);
+			}
+			split_io[i].engine = &curr_engine_list[i];
+			init_completion(&split_io[i].result.completion);
+			memset(&split_io[i].IV, 0, AES_XTS_IV_LEN);
+			tempiv = clone->__sector + (temp_size / SECTOR_SIZE);
+			memcpy(&split_io[i].IV, &tempiv, sizeof(sector_t));
+			temp_size +=  split_io[i].size;
+			split_io[i].clone = clone;
+			req_cryptd_split_req_queue(&split_io[i]);
+		}
+	} else {
+		split_io = kzalloc(sizeof(struct req_dm_split_req_io),
+				GFP_KERNEL);
+		if (!split_io) {
+			DMERR("%s split_io allocation failed\n", __func__);
+			error = DM_REQ_CRYPT_ERROR;
+			goto skcipher_req_alloc_failure;
+		}
+		split_io->engine = &curr_engine_list[0];
+		init_completion(&split_io->result.completion);
+		memcpy(split_io->IV, &clone->__sector, sizeof(sector_t));
+		split_io->req_split_sg_read = req_sg_read;
+		split_io->size = total_bytes_in_req;
+		split_io->clone = clone;
+		req_cryptd_split_req_queue(split_io);
+	}
+
+	if (!split_transfers) {
+		wait_for_completion_interruptible(&split_io->result.completion);
+		if (split_io->result.err) {
+			DMERR("%s error = %d for request\n",
+				 __func__, split_io->result.err);
+			error = DM_REQ_CRYPT_ERROR;
+			goto skcipher_req_alloc_failure;
+		}
+	} else {
+		for (i = 0; i < (engine_list_total); i++) {
+			wait_for_completion_interruptible(
+					&split_io[i].result.completion);
+			if (split_io[i].result.err) {
+				DMERR("%s error = %d for %dst request\n",
+					 __func__, split_io[i].result.err, i);
+				error = DM_REQ_CRYPT_ERROR;
+				goto skcipher_req_alloc_failure;
+			}
+		}
+	}
+	error = 0;
+skcipher_req_alloc_failure:
+
+	mempool_free(req_sg_read, req_scatterlist_pool);
+	kfree(split_io);
+submit_request:
+	if (io)
+		io->error = error;
+	req_crypt_dec_pending_decrypt(io);
+}
+
+/*
+ * This callback is called by the worker queue to perform non-decrypt reads
+ * and use the dm function to complete the bios and requests.
+ */
+static void req_cryptd_crypt_read_plain(struct req_dm_crypt_io *io)
+{
+	struct request *clone = NULL;
+	int error = 0;
+
+	if (!io || !io->cloned_request) {
+		DMERR("%s io is invalid\n", __func__);
+		WARN_ON(1); /* should not happen */
+	}
+
+	clone = io->cloned_request;
+
+	dm_end_request(clone, error);
+	mempool_free(io, req_io_pool);
+}
+
+/*
+ * The callback that will be called by the worker queue to perform Encryption
+ * for writes and submit the request using the elevelator.
+ */
+static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
+{
+	struct request *clone = NULL;
+	struct bio *bio_src = NULL;
+	unsigned int total_sg_len_req_in = 0, total_sg_len_req_out = 0,
+		total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0;
+	struct req_iterator iter;
+	struct req_iterator iter1;
+	struct skcipher_request *req = NULL;
+	struct req_crypt_result result;
+	struct bio_vec bvec;
+	struct scatterlist *req_sg_in = NULL;
+	struct scatterlist *req_sg_out = NULL;
+	int copy_bio_sector_to_req = 0;
+	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
+	struct page *page = NULL;
+	u8 IV[AES_XTS_IV_LEN];
+	int remaining_size = 0, err = 0;
+	struct crypto_engine_entry engine;
+	unsigned int engine_list_total = 0;
+	struct crypto_engine_entry *curr_engine_list = NULL;
+	unsigned int *engine_cursor = NULL;
+
+
+	if (io) {
+		if (io->cloned_request) {
+			clone = io->cloned_request;
+		} else {
+			DMERR("%s io->cloned_request is NULL\n",
+								__func__);
+			error = DM_REQ_CRYPT_ERROR;
+			goto submit_request;
+		}
+	} else {
+		DMERR("%s io is NULL\n",
+							__func__);
+		error = DM_REQ_CRYPT_ERROR;
+		goto submit_request;
+	}
+
+	req_crypt_inc_pending(io);
+
+	req = skcipher_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		DMERR("%s skcipher request allocation failed\n",
+					__func__);
+		error = DM_REQ_CRYPT_ERROR;
+		goto skcipher_req_alloc_failure;
+	}
+
+	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				req_crypt_cipher_complete, &result);
+
+	mutex_lock(&engine_list_mutex);
+	engine_list_total = (io->key_id == FDE_KEY_ID ? num_engines_fde :
+						   (io->key_id == PFE_KEY_ID ?
+							num_engines_pfe : 0));
+
+	curr_engine_list = (io->key_id == FDE_KEY_ID ? fde_eng :
+						(io->key_id == PFE_KEY_ID ?
+						pfe_eng : NULL));
+
+	engine_cursor = (io->key_id == FDE_KEY_ID ? &fde_cursor :
+					(io->key_id == PFE_KEY_ID ? &pfe_cursor
+					: NULL));
+	if ((engine_list_total < 1) || (curr_engine_list == NULL) ||
+				(engine_cursor == NULL)) {
+		DMERR("%s Unknown Key ID!\n", __func__);
+		error = DM_REQ_CRYPT_ERROR;
+		mutex_unlock(&engine_list_mutex);
+		goto skcipher_req_alloc_failure;
+	}
+
+	engine = curr_engine_list[*engine_cursor];
+	(*engine_cursor)++;
+	(*engine_cursor) %= engine_list_total;
+
+	err = (dm_qcrypto_func.cipher_set)(req, engine.ce_device,
+				   engine.hw_instance);
+	if (err) {
+		DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n",
+				__func__, err);
+		mutex_unlock(&engine_list_mutex);
+		goto skcipher_req_alloc_failure;
+	}
+	mutex_unlock(&engine_list_mutex);
+
+	init_completion(&result.completion);
+
+	(dm_qcrypto_func.cipher_flag)(req,
+		QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
+	crypto_skcipher_clear_flags(tfm, ~0);
+	crypto_skcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
+
+	req_sg_in = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
+								GFP_KERNEL);
+	if (!req_sg_in) {
+		DMERR("%s req_sg_in allocation failed\n",
+					__func__);
+		error = DM_REQ_CRYPT_ERROR;
+		goto skcipher_req_alloc_failure;
+	}
+	memset(req_sg_in, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
+
+	req_sg_out = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
+								GFP_KERNEL);
+	if (!req_sg_out) {
+		DMERR("%s req_sg_out allocation failed\n",
+					__func__);
+		error = DM_REQ_CRYPT_ERROR;
+		goto skcipher_req_alloc_failure;
+	}
+	memset(req_sg_out, 0, sizeof(struct scatterlist) * MAX_SG_LIST);
+
+	total_sg_len_req_in = blk_rq_map_sg(clone->q, clone, req_sg_in);
+	if ((total_sg_len_req_in <= 0) ||
+			(total_sg_len_req_in > MAX_SG_LIST)) {
+		DMERR("%s Request Error%d", __func__, total_sg_len_req_in);
+		error = DM_REQ_CRYPT_ERROR;
+		goto skcipher_req_alloc_failure;
+	}
+
+	total_bytes_in_req = clone->__data_len;
+	if (total_bytes_in_req > REQ_DM_512_KB) {
+		DMERR("%s total_bytes_in_req > 512 MB %d",
+				__func__, total_bytes_in_req);
+		error = DM_REQ_CRYPT_ERROR;
+		goto skcipher_req_alloc_failure;
+	}
+
+	rq_for_each_segment(bvec, clone, iter) {
+		if (bvec.bv_len > remaining_size) {
+			page = NULL;
+			while (page == NULL) {
+				page = mempool_alloc(req_page_pool, gfp_mask);
+				if (!page) {
+					DMERR("%s Crypt page alloc failed",
+							__func__);
+					congestion_wait(BLK_RW_ASYNC, HZ/100);
+				}
+			}
+
+			bvec.bv_page = page;
+			bvec.bv_offset = 0;
+			remaining_size = PAGE_SIZE -  bvec.bv_len;
+			if (remaining_size < 0)
+				WARN_ON(1);
+		} else {
+			bvec.bv_page = page;
+			bvec.bv_offset = PAGE_SIZE - remaining_size;
+			remaining_size = remaining_size -  bvec.bv_len;
+		}
+	}
+
+	total_sg_len_req_out = blk_rq_map_sg(clone->q, clone, req_sg_out);
+	if ((total_sg_len_req_out <= 0) ||
+			(total_sg_len_req_out > MAX_SG_LIST)) {
+		DMERR("%s Request Error %d", __func__, total_sg_len_req_out);
+		error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
+		goto skcipher_req_alloc_failure;
+	}
+
+	memset(IV, 0, AES_XTS_IV_LEN);
+	memcpy(IV, &clone->__sector, sizeof(sector_t));
+
+	skcipher_request_set_crypt(req, req_sg_in, req_sg_out,
+			total_bytes_in_req, (void *) IV);
+
+	rc = crypto_skcipher_encrypt(req);
+
+	switch (rc) {
+	case 0:
+		break;
+
+	case -EBUSY:
+		/*
+		 * Lets make this synchronous request by waiting on
+		 * in progress as well
+		 */
+	case -EINPROGRESS:
+		wait_for_completion_interruptible(&result.completion);
+		if (result.err) {
+			DMERR("%s error = %d encrypting the request\n",
+				 __func__, result.err);
+			error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
+			goto skcipher_req_alloc_failure;
+		}
+		break;
+
+	default:
+		error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
+		goto skcipher_req_alloc_failure;
+	}
+
+	__rq_for_each_bio(bio_src, clone) {
+		if (copy_bio_sector_to_req == 0)
+			copy_bio_sector_to_req++;
+		blk_queue_bounce(clone->q, &bio_src);
+	}
+
+	/*
+	 * Recalculate the phy_segments as we allocate new pages
+	 * This is used by storage driver to fill the sg list.
+	 */
+	blk_recalc_rq_segments(clone);
+
+skcipher_req_alloc_failure:
+	if (req)
+		skcipher_request_free(req);
+
+	if (error == DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC) {
+		rq_for_each_segment(bvec, clone, iter1) {
+			if (bvec.bv_offset == 0) {
+				mempool_free(bvec.bv_page, req_page_pool);
+				bvec.bv_page = NULL;
+			} else
+				bvec.bv_page = NULL;
+		}
+	}
+
+	mempool_free(req_sg_in, req_scatterlist_pool);
+	mempool_free(req_sg_out, req_scatterlist_pool);
+submit_request:
+	if (io)
+		io->error = error;
+	req_crypt_dec_pending_encrypt(io);
+}
+
+/*
+ * This callback is called by the worker queue to perform non-encrypted writes
+ * and submit the request using the elevelator.
+ */
+static void req_cryptd_crypt_write_plain(struct req_dm_crypt_io *io)
+{
+	struct request *clone = NULL;
+
+	if (!io || !io->cloned_request) {
+		DMERR("%s io is invalid\n", __func__);
+		WARN_ON(1); /* should not happen */
+	}
+
+	clone = io->cloned_request;
+	io->error = 0;
+	dm_dispatch_request(clone);
+}
+
+/* Queue callback function that will get triggered */
+static void req_cryptd_crypt(struct work_struct *work)
+{
+	struct req_dm_crypt_io *io =
+			container_of(work, struct req_dm_crypt_io, work);
+
+	if (rq_data_dir(io->cloned_request) == WRITE) {
+		if (io->should_encrypt)
+			req_cryptd_crypt_write_convert(io);
+		else
+			req_cryptd_crypt_write_plain(io);
+	} else if (rq_data_dir(io->cloned_request) == READ) {
+		if (io->should_decrypt)
+			req_cryptd_crypt_read_convert(io);
+		else
+			req_cryptd_crypt_read_plain(io);
+	} else {
+		DMERR("%s received non-write request for Clone 0x%p\n",
+				__func__, io->cloned_request);
+	}
+}
+
+static void req_cryptd_split_req_queue_cb(struct work_struct *work)
+{
+	struct req_dm_split_req_io *io =
+			container_of(work, struct req_dm_split_req_io, work);
+	struct skcipher_request *req = NULL;
+	struct req_crypt_result result;
+	int err = 0;
+	struct crypto_engine_entry *engine = NULL;
+
+	if ((!io) || (!io->req_split_sg_read) || (!io->engine)) {
+		DMERR("%s Input invalid\n",
+			 __func__);
+		err = DM_REQ_CRYPT_ERROR;
+		/* If io is not populated this should not be called */
+		WARN_ON(1);
+	}
+	req = skcipher_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		DMERR("%s skcipher request allocation failed\n", __func__);
+		err = DM_REQ_CRYPT_ERROR;
+		goto skcipher_req_alloc_failure;
+	}
+
+	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					req_crypt_cipher_complete, &result);
+
+	engine = io->engine;
+
+	err = (dm_qcrypto_func.cipher_set)(req, engine->ce_device,
+			engine->hw_instance);
+	if (err) {
+		DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n",
+				__func__, err);
+		goto skcipher_req_alloc_failure;
+	}
+	init_completion(&result.completion);
+	(dm_qcrypto_func.cipher_flag)(req,
+		QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
+
+	crypto_skcipher_clear_flags(tfm, ~0);
+	crypto_skcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
+
+	skcipher_request_set_crypt(req, io->req_split_sg_read,
+			io->req_split_sg_read, io->size, (void *) io->IV);
+
+	err = crypto_skcipher_decrypt(req);
+	switch (err) {
+	case 0:
+		break;
+
+	case -EBUSY:
+		/*
+		 * Lets make this synchronous request by waiting on
+		 * in progress as well
+		 */
+	case -EINPROGRESS:
+		wait_for_completion_io(&result.completion);
+		if (result.err) {
+			DMERR("%s error = %d encrypting the request\n",
+				 __func__, result.err);
+			err = DM_REQ_CRYPT_ERROR;
+			goto skcipher_req_alloc_failure;
+		}
+		break;
+
+	default:
+		err = DM_REQ_CRYPT_ERROR;
+		goto skcipher_req_alloc_failure;
+	}
+	err = 0;
+skcipher_req_alloc_failure:
+	if (req)
+		skcipher_request_free(req);
+
+	req_crypt_split_io_complete(&io->result, err);
+}
+
+static void req_cryptd_split_req_queue(struct req_dm_split_req_io *io)
+{
+	INIT_WORK(&io->work, req_cryptd_split_req_queue_cb);
+	queue_work(req_crypt_split_io_queue, &io->work);
+}
+
+static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io)
+{
+	INIT_WORK(&io->work, req_cryptd_crypt);
+	queue_work(req_crypt_queue, &io->work);
+}
+
+/*
+ * Cipher complete callback, this is triggered by the Linux crypto api once
+ * the operation is done. This signals the waiting thread that the crypto
+ * operation is complete.
+ */
+static void req_crypt_cipher_complete(struct crypto_async_request *req, int err)
+{
+	struct req_crypt_result *res = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	res->err = err;
+	complete(&res->completion);
+}
+
+static void req_crypt_split_io_complete(struct req_crypt_result *res, int err)
+{
+	if (err == -EINPROGRESS)
+		return;
+
+	res->err = err;
+	complete(&res->completion);
+}
+/*
+ * If bio->bi_dev is a partition, remap the location
+ */
+static inline void req_crypt_blk_partition_remap(struct bio *bio)
+{
+	struct block_device *bdev = bio->bi_bdev;
+
+	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
+		struct hd_struct *p = bdev->bd_part;
+		/*
+		 * Check for integer overflow, should never happen.
+		 */
+		if (p->start_sect > (UINT_MAX - bio->bi_iter.bi_sector))
+			WARN_ON(1);
+
+		bio->bi_iter.bi_sector += p->start_sect;
+		bio->bi_bdev = bdev->bd_contains;
+	}
+}
+
+/*
+ * The endio function is called from ksoftirqd context (atomic).
+ * For write operations the new pages created form the mempool
+ * is freed and returned.  * For read operations, decryption is
+ * required, since this is called in a atomic  * context, the
+ * request is sent to a worker queue to complete decryptiona and
+ * free the request once done.
+ */
+static int req_crypt_endio(struct dm_target *ti, struct request *clone,
+			    int error, union map_info *map_context)
+{
+	int err = 0;
+	struct req_iterator iter1;
+	struct bio_vec bvec;
+	struct req_dm_crypt_io *req_io = map_context->ptr;
+
+	/* If it is for ICE, free up req_io and return */
+	if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+		mempool_free(req_io, req_io_pool);
+		err = error;
+		goto submit_request;
+	}
+
+	if (rq_data_dir(clone) == WRITE) {
+		rq_for_each_segment(bvec, clone, iter1) {
+			if (req_io->should_encrypt && bvec.bv_offset == 0) {
+				mempool_free(bvec.bv_page, req_page_pool);
+				bvec.bv_page = NULL;
+			} else
+				bvec.bv_page = NULL;
+		}
+		mempool_free(req_io, req_io_pool);
+		goto submit_request;
+	} else if (rq_data_dir(clone) == READ) {
+		req_io->error = error;
+		req_cryptd_queue_crypt(req_io);
+		err = DM_ENDIO_INCOMPLETE;
+		goto submit_request;
+	}
+
+submit_request:
+	return err;
+}
+
+/*
+ * This function is called with interrupts disabled
+ * The function remaps the clone for the underlying device.
+ * If it is a write request, it calls into the worker queue to
+ * encrypt the data
+ * and submit the request directly using the elevator
+ * For a read request no pre-processing is required the request
+ * is returned to dm once mapping is done
+ */
+static int req_crypt_map(struct dm_target *ti, struct request *clone,
+			 union map_info *map_context)
+{
+	struct req_dm_crypt_io *req_io = NULL;
+	int error = DM_REQ_CRYPT_ERROR, copy_bio_sector_to_req = 0;
+	struct bio *bio_src = NULL;
+	gfp_t gfp_flag = GFP_KERNEL;
+
+	if (in_interrupt() || irqs_disabled())
+		gfp_flag = GFP_NOWAIT;
+
+	req_io = mempool_alloc(req_io_pool, gfp_flag);
+	if (!req_io) {
+		WARN_ON(1);
+		error = DM_REQ_CRYPT_ERROR;
+		goto submit_request;
+	}
+
+	/* Save the clone in the req_io, the callback to the worker
+	 * queue will get the req_io
+	 */
+	req_io->cloned_request = clone;
+	map_context->ptr = req_io;
+	atomic_set(&req_io->pending, 0);
+
+	if (rq_data_dir(clone) == WRITE)
+		req_io->should_encrypt = req_crypt_should_encrypt(req_io);
+	if (rq_data_dir(clone) == READ)
+		req_io->should_decrypt = req_crypt_should_deccrypt(req_io);
+
+	/* Get the queue of the underlying original device */
+	clone->q = bdev_get_queue(dev->bdev);
+	clone->rq_disk = dev->bdev->bd_disk;
+
+	__rq_for_each_bio(bio_src, clone) {
+		bio_src->bi_bdev = dev->bdev;
+		/* Currently the way req-dm works is that once the underlying
+		 * device driver completes the request by calling into the
+		 * block layer. The block layer completes the bios (clones) and
+		 * then the cloned request. This is undesirable for req-dm-crypt
+		 * hence added a flag BIO_DONTFREE, this flag will ensure that
+		 * blk layer does not complete the cloned bios before completing
+		 * the request. When the crypt endio is called, post-processing
+		 * is done and then the dm layer will complete the bios (clones)
+		 * and free them.
+		 */
+		if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT)
+			bio_src->bi_flags |= 1 << BIO_INLINECRYPT;
+		else
+			bio_src->bi_flags |= 1 << BIO_DONTFREE;
+
+		/*
+		 * If this device has partitions, remap block n
+		 * of partition p to block n+start(p) of the disk.
+		 */
+		req_crypt_blk_partition_remap(bio_src);
+		if (copy_bio_sector_to_req == 0) {
+			clone->__sector = bio_src->bi_iter.bi_sector;
+			copy_bio_sector_to_req++;
+		}
+		blk_queue_bounce(clone->q, &bio_src);
+	}
+
+	if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+		/* Set all crypto parameters for inline crypto engine */
+		memcpy(&req_io->ice_settings, ice_settings,
+					sizeof(struct ice_crypto_setting));
+	} else {
+		/* ICE checks for key_index which could be >= 0. If a chip has
+		 * both ICE and GPCE and wanted to use GPCE, there could be
+		 * issue. Storage driver send all requests to ICE driver. If
+		 * it sees key_index as 0, it would assume it is for ICE while
+		 * it is not. Hence set invalid key index by default.
+		 */
+		req_io->ice_settings.key_index = -1;
+
+	}
+
+	if (rq_data_dir(clone) == READ ||
+		encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+		error = DM_MAPIO_REMAPPED;
+		goto submit_request;
+	} else if (rq_data_dir(clone) == WRITE) {
+		req_cryptd_queue_crypt(req_io);
+		error = DM_MAPIO_SUBMITTED;
+		goto submit_request;
+	}
+
+submit_request:
+	return error;
+
+}
+
+static void deconfigure_qcrypto(void)
+{
+	mempool_destroy(req_page_pool);
+	req_page_pool = NULL;
+
+	mempool_destroy(req_scatterlist_pool);
+	req_scatterlist_pool = NULL;
+
+	if (req_crypt_split_io_queue) {
+		destroy_workqueue(req_crypt_split_io_queue);
+		req_crypt_split_io_queue = NULL;
+	}
+	if (req_crypt_queue) {
+		destroy_workqueue(req_crypt_queue);
+		req_crypt_queue = NULL;
+	}
+
+	kmem_cache_destroy(_req_dm_scatterlist_pool);
+
+	mutex_lock(&engine_list_mutex);
+	kfree(pfe_eng);
+	pfe_eng = NULL;
+	kfree(fde_eng);
+	fde_eng = NULL;
+	mutex_unlock(&engine_list_mutex);
+
+	if (tfm) {
+		crypto_free_skcipher(tfm);
+		tfm = NULL;
+	}
+}
+
+static void req_crypt_dtr(struct dm_target *ti)
+{
+	DMDEBUG("dm-req-crypt Destructor.\n");
+
+	mempool_destroy(req_io_pool);
+	req_io_pool = NULL;
+
+	if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+		kfree(ice_settings);
+		ice_settings = NULL;
+	} else {
+		deconfigure_qcrypto();
+	}
+
+	kmem_cache_destroy(_req_crypt_io_pool);
+
+	if (dev) {
+		dm_put_device(ti, dev);
+		dev = NULL;
+	}
+}
+
+static int configure_qcrypto(void)
+{
+	struct crypto_engine_entry *eng_list = NULL;
+	struct block_device *bdev = NULL;
+	int err = DM_REQ_CRYPT_ERROR, i;
+	struct request_queue *q = NULL;
+
+	bdev = dev->bdev;
+	q = bdev_get_queue(bdev);
+	blk_queue_max_hw_sectors(q, DM_REQ_CRYPT_QUEUE_SIZE);
+
+	/* Allocate the crypto alloc blk cipher and keep the handle */
+	tfm = crypto_alloc_skcipher("qcom-xts(aes)", 0, 0);
+	if (IS_ERR(tfm)) {
+		DMERR("%s skcipher tfm allocation failed : error\n",
+						 __func__);
+		tfm = NULL;
+		goto exit_err;
+	}
+
+	num_engines_fde = num_engines_pfe = 0;
+
+	mutex_lock(&engine_list_mutex);
+	num_engines = (dm_qcrypto_func.get_num_engines)();
+	if (!num_engines) {
+		DMERR(KERN_INFO "%s qcrypto_get_num_engines failed\n",
+					__func__);
+		err = DM_REQ_CRYPT_ERROR;
+		mutex_unlock(&engine_list_mutex);
+		goto exit_err;
+	}
+
+	eng_list = kcalloc(num_engines, sizeof(*eng_list), GFP_KERNEL);
+	if (eng_list == NULL) {
+		DMERR("%s engine list allocation failed\n", __func__);
+		err = DM_REQ_CRYPT_ERROR;
+		mutex_unlock(&engine_list_mutex);
+		goto exit_err;
+	}
+
+	(dm_qcrypto_func.get_engine_list)(num_engines, eng_list);
+
+	for (i = 0; i < num_engines; i++) {
+		if (eng_list[i].ce_device == FDE_KEY_ID)
+			num_engines_fde++;
+		if (eng_list[i].ce_device == PFE_KEY_ID)
+			num_engines_pfe++;
+	}
+
+	fde_eng = kcalloc(num_engines_fde, sizeof(*fde_eng), GFP_KERNEL);
+	if (fde_eng == NULL) {
+		DMERR("%s fde engine list allocation failed\n", __func__);
+		mutex_unlock(&engine_list_mutex);
+		goto exit_err;
+	}
+
+	pfe_eng = kcalloc(num_engines_pfe, sizeof(*pfe_eng), GFP_KERNEL);
+	if (pfe_eng == NULL) {
+		DMERR("%s pfe engine list allocation failed\n", __func__);
+		mutex_unlock(&engine_list_mutex);
+		goto exit_err;
+	}
+
+	fde_cursor = 0;
+	pfe_cursor = 0;
+
+	for (i = 0; i < num_engines; i++) {
+		if (eng_list[i].ce_device == FDE_KEY_ID)
+			fde_eng[fde_cursor++] = eng_list[i];
+		if (eng_list[i].ce_device == PFE_KEY_ID)
+			pfe_eng[pfe_cursor++] = eng_list[i];
+	}
+
+	fde_cursor = 0;
+	pfe_cursor = 0;
+	mutex_unlock(&engine_list_mutex);
+
+	_req_dm_scatterlist_pool = kmem_cache_create("req_dm_scatterlist",
+				sizeof(struct scatterlist) * MAX_SG_LIST,
+				 __alignof__(struct scatterlist), 0, NULL);
+	if (!_req_dm_scatterlist_pool)
+		goto exit_err;
+
+	req_crypt_queue = alloc_workqueue("req_cryptd",
+					WQ_UNBOUND |
+					WQ_CPU_INTENSIVE |
+					WQ_MEM_RECLAIM,
+					0);
+	if (!req_crypt_queue) {
+		DMERR("%s req_crypt_queue not allocated\n", __func__);
+		goto exit_err;
+	}
+
+	req_crypt_split_io_queue = alloc_workqueue("req_crypt_split",
+					WQ_UNBOUND |
+					WQ_CPU_INTENSIVE |
+					WQ_MEM_RECLAIM,
+					0);
+	if (!req_crypt_split_io_queue) {
+		DMERR("%s req_crypt_split_io_queue not allocated\n", __func__);
+		goto exit_err;
+	}
+	req_scatterlist_pool = mempool_create_slab_pool(MIN_IOS,
+					_req_dm_scatterlist_pool);
+	if (!req_scatterlist_pool) {
+		DMERR("%s req_scatterlist_pool is not allocated\n", __func__);
+		err = -ENOMEM;
+		goto exit_err;
+	}
+
+	req_page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
+	if (!req_page_pool) {
+		DMERR("%s req_page_pool not allocated\n", __func__);
+		goto exit_err;
+	}
+
+	err = 0;
+
+exit_err:
+	kfree(eng_list);
+	return err;
+}
+
+/*
+ * Construct an encryption mapping:
+ * <cipher> <key> <iv_offset> <dev_path> <start>
+ */
+static int req_crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	int err = DM_REQ_CRYPT_ERROR;
+	unsigned long long tmpll;
+	char dummy;
+	int ret;
+
+	DMDEBUG("dm-req-crypt Constructor.\n");
+
+	if (argc < 5) {
+		DMERR(" %s Not enough args\n", __func__);
+		err = DM_REQ_CRYPT_ERROR;
+		goto ctr_exit;
+	}
+
+	if (argv[3]) {
+		if (dm_get_device(ti, argv[3],
+				dm_table_get_mode(ti->table), &dev)) {
+			DMERR(" %s Device Lookup failed\n", __func__);
+			err =  DM_REQ_CRYPT_ERROR;
+			goto ctr_exit;
+		}
+	} else {
+		DMERR(" %s Arg[3] invalid\n", __func__);
+		err =  DM_REQ_CRYPT_ERROR;
+		goto ctr_exit;
+	}
+
+	if (argv[4]) {
+		if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
+			DMERR("%s Invalid device sector\n", __func__);
+			err =  DM_REQ_CRYPT_ERROR;
+			goto ctr_exit;
+		}
+	} else {
+		DMERR(" %s Arg[4] invalid\n", __func__);
+		err =  DM_REQ_CRYPT_ERROR;
+		goto ctr_exit;
+	}
+	start_sector_orig = tmpll;
+
+	/* Allow backward compatible */
+	if (argc >= 6) {
+		if (argv[5]) {
+			if (!strcmp(argv[5], "fde_enabled"))
+				is_fde_enabled = true;
+			else
+				is_fde_enabled = false;
+		} else {
+			DMERR(" %s Arg[5] invalid\n", __func__);
+			err =  DM_REQ_CRYPT_ERROR;
+			goto ctr_exit;
+		}
+	} else {
+		DMERR(" %s Arg[5] missing, set FDE enabled.\n", __func__);
+		is_fde_enabled = true; /* backward compatible */
+	}
+
+	_req_crypt_io_pool = KMEM_CACHE(req_dm_crypt_io, 0);
+	if (!_req_crypt_io_pool) {
+		err =  DM_REQ_CRYPT_ERROR;
+		goto ctr_exit;
+	}
+
+	encryption_mode = DM_REQ_CRYPT_ENCRYPTION_MODE_CRYPTO;
+	if (argc >= 7 && argv[6]) {
+		if (!strcmp(argv[6], "ice"))
+			encryption_mode =
+				DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT;
+	}
+
+	if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) {
+		/* configure ICE settings */
+		ice_settings =
+			kzalloc(sizeof(struct ice_crypto_setting), GFP_KERNEL);
+		if (!ice_settings) {
+			err = -ENOMEM;
+			goto ctr_exit;
+		}
+		ice_settings->key_size = ICE_CRYPTO_KEY_SIZE_128;
+		ice_settings->algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+		ice_settings->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
+		if (kstrtou16(argv[1], 0, &ice_settings->key_index) ||
+			ice_settings->key_index < 0 ||
+			ice_settings->key_index > MAX_MSM_ICE_KEY_LUT_SIZE) {
+			DMERR("%s Err: key index %d received for ICE\n",
+				__func__, ice_settings->key_index);
+			err = DM_REQ_CRYPT_ERROR;
+			goto ctr_exit;
+		}
+	} else {
+		ret = configure_qcrypto();
+		if (ret) {
+			DMERR("%s failed to configure qcrypto\n", __func__);
+			err = ret;
+			goto ctr_exit;
+		}
+	}
+
+	req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool);
+	if (!req_io_pool) {
+		DMERR("%s req_io_pool not allocated\n", __func__);
+		err = -ENOMEM;
+		goto ctr_exit;
+	}
+
+	/*
+	 * If underlying device supports flush/discard, mapped target
+	 * should also allow it
+	 */
+	ti->num_flush_bios = 1;
+	ti->num_discard_bios = 1;
+
+	err = 0;
+	DMINFO("%s: Mapping block_device %s to dm-req-crypt ok!\n",
+	       __func__, argv[3]);
+ctr_exit:
+	if (err)
+		req_crypt_dtr(ti);
+
+	return err;
+}
+
+static int req_crypt_iterate_devices(struct dm_target *ti,
+				 iterate_devices_callout_fn fn, void *data)
+{
+	return fn(ti, dev, start_sector_orig, ti->len, data);
+}
+void set_qcrypto_func_dm(void *dev,
+			void *flag,
+			void *engines,
+			void *engine_list)
+{
+	dm_qcrypto_func.cipher_set  = dev;
+	dm_qcrypto_func.cipher_flag = flag;
+	dm_qcrypto_func.get_num_engines = engines;
+	dm_qcrypto_func.get_engine_list = engine_list;
+}
+EXPORT_SYMBOL(set_qcrypto_func_dm);
+
+static struct target_type req_crypt_target = {
+	.name   = "req-crypt",
+	.version = {1, 0, 0},
+	.module = THIS_MODULE,
+	.ctr    = req_crypt_ctr,
+	.dtr    = req_crypt_dtr,
+	.map_rq = req_crypt_map,
+	.rq_end_io = req_crypt_endio,
+	.iterate_devices = req_crypt_iterate_devices,
+};
+
+static int __init req_dm_crypt_init(void)
+{
+	int r;
+
+
+	r = dm_register_target(&req_crypt_target);
+	if (r < 0) {
+		DMERR("register failed %d", r);
+		return r;
+	}
+
+	DMINFO("dm-req-crypt successfully initalized.\n");
+
+	return r;
+}
+
+static void __exit req_dm_crypt_exit(void)
+{
+	dm_unregister_target(&req_crypt_target);
+}
+
+module_init(req_dm_crypt_init);
+module_exit(req_dm_crypt_exit);
+
+MODULE_DESCRIPTION(DM_NAME " target for request based transparent encryption / decryption");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index ba7c4c6..bca4c0e 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -283,7 +283,7 @@
  * Must be called without clone's queue lock held,
  * see end_clone_request() for more details.
  */
-static void dm_end_request(struct request *clone, int error)
+void dm_end_request(struct request *clone, int error)
 {
 	int rw = rq_data_dir(clone);
 	struct dm_rq_target_io *tio = clone->end_io_data;
@@ -464,7 +464,7 @@
  * Target's rq_end_io() function isn't called.
  * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
  */
-static void dm_kill_unmapped_request(struct request *rq, int error)
+void dm_kill_unmapped_request(struct request *rq, int error)
 {
 	rq->cmd_flags |= REQ_FAILED;
 	dm_complete_request(rq, error);
@@ -512,6 +512,13 @@
 		dm_complete_request(rq, r);
 }
 
+void dm_dispatch_request(struct request *rq)
+{
+	struct dm_rq_target_io *tio = tio_from_request(rq);
+
+	dm_dispatch_clone_request(tio->clone, rq);
+}
+
 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
 				 void *data)
 {
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index 21a61ff..2a0c4a7 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -60,10 +60,6 @@
 		cam_sync_signal(req->out_map_entries[j].sync_id,
 			CAM_SYNC_STATE_SIGNALED_SUCCESS);
 		req->num_out_acked++;
-		trace_printk("Sync success req %lld, reset sync id 0x%x\n",
-			req->request_id,
-			req->out_map_entries[j].sync_id);
-
 		req->out_map_entries[j].sync_id = -1;
 	}
 
@@ -195,8 +191,6 @@
 	req->num_in_acked++;
 	if (req->num_in_acked == req->num_in_map_entries) {
 		apply.request_id = req->request_id;
-		trace_printk("async cb for request :%llu",
-			req->request_id);
 		cam_context_apply_req_to_hw(ctx, &apply);
 	}
 }
@@ -287,8 +281,6 @@
 		list_add_tail(&req->list, &ctx->pending_req_list);
 		spin_unlock(&ctx->lock);
 		for (i = 0; i < req->num_in_map_entries; i++) {
-			trace_printk("register in fence callback: %d\n",
-				req->in_map_entries[i].sync_id);
 			rc = cam_sync_register_callback(
 					cam_context_sync_callback,
 					(void *)ctx,
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 17f6973..74a94b2 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -16,6 +16,13 @@
 
 #include "cam_node.h"
 
+static void  __cam_node_handle_shutdown(struct cam_node *node)
+{
+	if (node->hw_mgr_intf.hw_close)
+		node->hw_mgr_intf.hw_close(node->hw_mgr_intf.hw_mgr_priv,
+			NULL);
+}
+
 static int __cam_node_handle_query_cap(struct cam_node *node,
 	struct cam_query_cap_cmd *query)
 {
@@ -408,6 +415,9 @@
 		}
 		break;
 	}
+	case CAM_SD_SHUTDOWN:
+		__cam_node_handle_shutdown(node);
+		break;
 	default:
 		pr_err("Unknown op code %d\n", cmd->op_code);
 		rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
index b774625..aba0caa 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -327,7 +327,7 @@
 int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
 	struct cam_control *cmd)
 {
-	int rc;
+	int rc = 0;
 
 	if (!cmd) {
 		pr_err("Invalid input cmd\n");
@@ -357,6 +357,8 @@
 
 		break;
 	}
+	case CAM_SD_SHUTDOWN:
+		break;
 	default:
 		pr_err("Unknown op code %d for CPAS\n", cmd->op_code);
 		rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
index 4cebb58..4c819cf 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
@@ -72,6 +72,7 @@
 	}
 	node = (struct cam_node *) g_isp_dev.sd.token;
 
+	memset(&hw_mgr_intf, 0, sizeof(hw_mgr_intf));
 	rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf);
 	if (rc != 0) {
 		pr_err("%s: Can not initialized ISP HW manager!\n", __func__);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 5e629b6..92a17d8 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -1581,6 +1581,7 @@
 	struct cam_vfe_bus_ver2_priv          *bus_priv;
 	struct cam_irq_controller_reg_info    *reg_info;
 	uint32_t                               irq_mask;
+	int                                    found = 0;
 
 	handler_priv = th_payload->handler_priv;
 	core_info    = handler_priv->core_info;
@@ -1613,6 +1614,8 @@
 			irq_reg_offset[i] - (0xC * 2));
 		evt_payload->irq_reg_val[i] = irq_mask &
 			cam_io_r(handler_priv->mem_base + irq_reg_offset[i]);
+		if (evt_payload->irq_reg_val[i])
+			found = 1;
 		CDBG("irq_status%d = 0x%x\n", i, evt_payload->irq_reg_val[i]);
 	}
 	for (i = 0; i <= CAM_IFE_IRQ_BUS_REG_STATUS2; i++) {
@@ -1628,7 +1631,13 @@
 			reg_info->global_clear_bitmask,
 			reg_info->global_clear_offset);
 
-	th_payload->evt_payload_priv = evt_payload;
+	if (found)
+		th_payload->evt_payload_priv = evt_payload;
+	else {
+		cam_vfe_bus_put_evt_payload(evt_payload->core_info,
+			&evt_payload);
+		rc = -ENOMSG;
+	}
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index 1a8356a..c495088 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -20,6 +20,7 @@
 #include <media/v4l2-event.h>
 #include <media/v4l2-ioctl.h>
 #include <media/cam_req_mgr.h>
+#include <media/cam_defs.h>
 #include "cam_req_mgr_dev.h"
 #include "cam_req_mgr_util.h"
 #include "cam_req_mgr_core.h"
@@ -151,6 +152,9 @@
 
 static int cam_req_mgr_close(struct file *filep)
 {
+	struct v4l2_subdev *sd;
+	struct cam_control cam_ctrl;
+
 	mutex_lock(&g_dev.cam_lock);
 
 	if (g_dev.open_cnt <= 0) {
@@ -158,6 +162,14 @@
 		return -EINVAL;
 	}
 
+	cam_ctrl.op_code = CAM_SD_SHUTDOWN;
+	list_for_each_entry(sd, &g_dev.v4l2_dev->subdevs, list) {
+		if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
+			continue;
+		v4l2_subdev_call(sd, core, ioctl, VIDIOC_CAM_CONTROL,
+			&cam_ctrl);
+	}
+
 	g_dev.open_cnt--;
 	v4l2_fh_release(filep);
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index 648617e..91b68cf 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -616,6 +616,8 @@
 		}
 	}
 		break;
+	case CAM_SD_SHUTDOWN:
+		break;
 	default:
 		pr_err("%s:%d Invalid Opcode %d\n",
 			__func__, __LINE__, cmd->op_code);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
index 789522d..6764b8a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -34,6 +34,8 @@
 	case VIDIOC_MSM_CCI_CFG:
 		rc = cam_cci_core_cfg(sd, arg);
 		break;
+	case VIDIOC_CAM_CONTROL:
+		break;
 	default:
 		pr_err("%s:%d Invalid ioctl cmd: %d\n",
 			__func__, __LINE__, cmd);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 8dc65f5..6751fdd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -486,6 +486,8 @@
 		}
 	}
 		break;
+	case CAM_SD_SHUTDOWN:
+		break;
 	default:
 		pr_err("%s:%d :Error: Invalid Opcode: %d\n",
 			__func__, __LINE__, cmd->op_code);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index 4888e5b..4fc3aa1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -773,6 +773,8 @@
 		}
 	}
 		break;
+	case CAM_SD_SHUTDOWN:
+		break;
 	default:
 		pr_err("%s:%d :Error: Invalid Opcode: %d\n",
 			__func__, __LINE__, cmd->op_code);
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h
index d1bbe01..ff6b72a 100644
--- a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h
+++ b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h
@@ -92,17 +92,14 @@
 
 /**
  * @INVALID: Invalid state
- * @FW_LOAD_DONE: Firmware load is completed
- * @FW_RESP_DONE: Firmware response is received
- * @FW_START_SENT: firmware start is send
- * @FW_READY: firmware is ready to accept commands
+ * @HFI_DEINIT: HFI is not initialized yet
+ * @HFI_INIT: HFI is initialized
+ * @HFI_READY: HFI is ready to send/receive commands/messages
  */
 enum hfi_state {
-	INVALID,
-	FW_LOAD_DONE,
-	FW_RESP_DONE,
-	FW_START_SENT,
-	FW_READY
+	HFI_DEINIT,
+	HFI_INIT,
+	HFI_READY
 };
 
 /**
@@ -292,6 +289,9 @@
  * @msgpacket_buf: message buffer
  * @hfi_state: State machine for hfi
  * @cmd_q_lock: Lock for command queue
+ * @cmd_q_state: State of command queue
+ * @mutex msg_q_lock: Lock for message queue
+ * @msg_q_state: State of message queue
  * @csr_base: CSR base address
  */
 struct hfi_info {
@@ -301,7 +301,9 @@
 	uint32_t msgpacket_buf[ICP_HFI_MAX_MSG_SIZE_IN_WORDS];
 	uint8_t hfi_state;
 	struct mutex cmd_q_lock;
+	bool cmd_q_state;
 	struct mutex msg_q_lock;
+	bool msg_q_state;
 	void __iomem *csr_base;
 };
 
diff --git a/drivers/media/platform/msm/camera/icp/hfi.c b/drivers/media/platform/msm/camera/icp/hfi.c
index 15e0315..170c8cf 100644
--- a/drivers/media/platform/msm/camera/icp/hfi.c
+++ b/drivers/media/platform/msm/camera/icp/hfi.c
@@ -42,7 +42,7 @@
 #undef  HFI_DBG
 #define HFI_DBG(fmt, args...) pr_debug(fmt, ##args)
 
-struct hfi_info *g_hfi;
+static struct hfi_info *g_hfi;
 unsigned int g_icp_mmu_hdl;
 
 int hfi_write_cmd(void *cmd_ptr)
@@ -59,12 +59,17 @@
 		return -EINVAL;
 	}
 
-	if (!g_hfi || g_hfi->hfi_state < FW_START_SENT) {
-		pr_err("FW not ready yet\n");
+	if (!g_hfi || (g_hfi->hfi_state != HFI_READY)) {
+		pr_err("HFI interface not ready yet\n");
 		return -EIO;
 	}
 
 	mutex_lock(&g_hfi->cmd_q_lock);
+	if (!g_hfi->cmd_q_state) {
+		pr_err("HFI command interface not ready yet\n");
+		mutex_unlock(&g_hfi->cmd_q_lock);
+		return -EIO;
+	}
 
 	q_tbl = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
 	q = &q_tbl->q_hdr[Q_CMD];
@@ -78,11 +83,10 @@
 		goto err;
 	}
 
-	HFI_DBG("size_in_words : %u\n", size_in_words);
-	HFI_DBG("q->qhdr_write_idx %x\n", q->qhdr_write_idx);
+	HFI_DBG("size_in_words : %u, q->qhdr_write_idx %x\n", size_in_words,
+		q->qhdr_write_idx);
 
 	read_idx = q->qhdr_read_idx;
-
 	empty_space = (q->qhdr_write_idx >= read_idx) ?
 		(q->qhdr_q_size - (q->qhdr_write_idx - read_idx)) :
 		(read_idx - q->qhdr_write_idx);
@@ -115,7 +119,7 @@
 		g_hfi->csr_base + HFI_REG_A5_CSR_HOST2ICPINT);
 err:
 	mutex_unlock(&g_hfi->cmd_q_lock);
-	return 0;
+	return rc;
 }
 
 int hfi_read_message(uint32_t *pmsg, uint8_t q_id)
@@ -132,21 +136,28 @@
 		return -EINVAL;
 	}
 
+	if (!g_hfi || (g_hfi->hfi_state != HFI_READY)) {
+		pr_err("HFI interface not ready yet\n");
+		return -EIO;
+	}
+
 	q_tbl_ptr = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
 	q = &q_tbl_ptr->q_hdr[q_id];
 
-	if ((g_hfi->hfi_state < FW_START_SENT) ||
-		(q->qhdr_read_idx == q->qhdr_write_idx)) {
+	if (q->qhdr_read_idx == q->qhdr_write_idx) {
 		pr_debug("FW or Q not ready, hfi state : %u, r idx : %u, w idx : %u\n",
 			g_hfi->hfi_state, q->qhdr_read_idx, q->qhdr_write_idx);
 		return -EIO;
 	}
 
 	mutex_lock(&g_hfi->msg_q_lock);
+	if (!g_hfi->msg_q_state) {
+		pr_err("HFI message interface not ready yet\n");
+		mutex_unlock(&g_hfi->msg_q_lock);
+		return -EIO;
+	}
 
-	if (q_id == Q_CMD)
-		read_q = (uint32_t *)g_hfi->map.cmd_q.kva;
-	else if (q_id == Q_MSG)
+	if (q_id == Q_MSG)
 		read_q = (uint32_t *)g_hfi->map.msg_q.kva;
 	else
 		read_q = (uint32_t *)g_hfi->map.dbg_q.kva;
@@ -154,8 +165,8 @@
 	read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx);
 	size_in_words = (*read_ptr) >> BYTE_WORD_SHIFT;
 
-	HFI_DBG("size_in_words : %u\n", size_in_words);
-	HFI_DBG("read_ptr : %pK\n", (void *)read_ptr);
+	HFI_DBG("size_in_words : %u, read_ptr : %pK\n", size_in_words,
+		(void *)read_ptr);
 
 	if ((size_in_words == 0) ||
 		(size_in_words > ICP_HFI_MAX_MSG_SIZE_IN_WORDS)) {
@@ -180,13 +191,12 @@
 	}
 
 	for (i = 0; i < size_in_words; i++)
-		pr_debug("%x\n", read_ptr[i]);
+		HFI_DBG("%x\n", read_ptr[i]);
 
 	q->qhdr_read_idx = new_read_idx;
 err:
 	mutex_unlock(&g_hfi->msg_q_lock);
-	HFI_DBG("Exit\n");
-	return 0;
+	return rc;
 }
 
 void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size)
@@ -307,8 +317,7 @@
 	struct hfi_qtbl *qtbl;
 	struct hfi_qtbl_hdr *qtbl_hdr;
 	struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr, *dbg_q_hdr;
-	uint32_t hw_version, fw_version;
-	uint32_t status;
+	uint32_t hw_version, fw_version, status = 0;
 
 	if (!g_hfi) {
 		g_hfi = kzalloc(sizeof(struct hfi_info), GFP_KERNEL);
@@ -318,13 +327,12 @@
 		}
 	}
 
-	pr_debug("g_hfi: %pK\n", (void *)g_hfi);
-	if (g_hfi->hfi_state != INVALID) {
+	HFI_DBG("g_hfi: %pK\n", (void *)g_hfi);
+	if (g_hfi->hfi_state != HFI_DEINIT) {
 		pr_err("hfi_init: invalid state\n");
 		return -EINVAL;
 	}
 
-	g_hfi->hfi_state = FW_LOAD_DONE;
 	memcpy(&g_hfi->map, hfi_mem, sizeof(g_hfi->map));
 
 	if (debug) {
@@ -342,11 +350,6 @@
 			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
 	}
 
-	mutex_init(&g_hfi->cmd_q_lock);
-	mutex_init(&g_hfi->msg_q_lock);
-
-	g_hfi->csr_base = icp_base;
-
 	qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva;
 	qtbl_hdr = &qtbl->q_tbl_hdr;
 	qtbl_hdr->qtbl_version = 0xFFFFFFFF;
@@ -474,7 +477,7 @@
 		icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
 
 	hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
-	pr_debug("hw version : %u[%x]\n", hw_version, hw_version);
+	HFI_DBG("hw version : [%x]\n", hw_version);
 
 	rc = readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
 		status, status != ICP_INIT_RESP_SUCCESS, 15, 200);
@@ -484,14 +487,19 @@
 	}
 
 	fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
-	g_hfi->hfi_state = FW_START_SENT;
-
 	HFI_DBG("fw version : %u[%x]\n", fw_version, fw_version);
+
+	g_hfi->csr_base = icp_base;
+	g_hfi->hfi_state = HFI_READY;
+	g_hfi->cmd_q_state = true;
+	g_hfi->msg_q_state = true;
+	mutex_init(&g_hfi->cmd_q_lock);
+	mutex_init(&g_hfi->msg_q_lock);
 	cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
 
 	return rc;
 regions_fail:
-	kzfree(g_hfi);
+	kfree(g_hfi);
 alloc_fail:
 	return rc;
 }
@@ -499,6 +507,24 @@
 
 void cam_hfi_deinit(void)
 {
+	if (!g_hfi) {
+		pr_err("hfi path not established yet\n");
+		return;
+	}
+	cam_io_w((uint32_t)INTR_DISABLE,
+		g_hfi->csr_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+
+	mutex_lock(&g_hfi->cmd_q_lock);
+	g_hfi->cmd_q_state = false;
+	mutex_unlock(&g_hfi->cmd_q_lock);
+
+	mutex_lock(&g_hfi->msg_q_lock);
+	g_hfi->msg_q_state = false;
+	mutex_unlock(&g_hfi->msg_q_lock);
+
+	mutex_destroy(&g_hfi->cmd_q_lock);
+	mutex_destroy(&g_hfi->msg_q_lock);
+
 	kfree(g_hfi);
 	g_hfi = NULL;
 }
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c
index f562bb9..39eacd8 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c
@@ -278,19 +278,22 @@
 
 	rc = cam_cpas_start(core_info->cpas_handle,
 		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
-	if (rc < 0) {
+	if (rc) {
 		pr_err("cpass start failed: %d\n", rc);
 		return rc;
 	}
+	core_info->cpas_start = true;
 
 	rc = cam_a5_enable_soc_resources(soc_info);
-	if (rc < 0) {
-		pr_err("soc enable is failed\n");
-		rc = cam_cpas_stop(core_info->cpas_handle);
-		return rc;
+	if (rc) {
+		pr_err("soc enable is failed: %d\n", rc);
+		if (cam_cpas_stop(core_info->cpas_handle))
+			pr_err("cpas stop is failed\n");
+		else
+			core_info->cpas_start = false;
 	}
 
-	return 0;
+	return rc;
 }
 
 int cam_a5_deinit_hw(void *device_priv,
@@ -314,14 +317,17 @@
 	}
 
 	rc = cam_a5_disable_soc_resources(soc_info);
-	if (rc < 0)
-		pr_err("soc enable is failed\n");
+	if (rc)
+		pr_err("soc disable is failed: %d\n", rc);
 
-	rc = cam_cpas_stop(core_info->cpas_handle);
-	if (rc < 0)
-		pr_err("cpas stop is failed: %d\n", rc);
+	if (core_info->cpas_start) {
+		if (cam_cpas_stop(core_info->cpas_handle))
+			pr_err("cpas stop is failed\n");
+		else
+			core_info->cpas_start = false;
+	}
 
-	return 0;
+	return rc;
 }
 
 irqreturn_t cam_a5_irq(int irq_num, void *data)
@@ -443,13 +449,20 @@
 			return -EINVAL;
 		}
 
-		rc = cam_cpas_start(core_info->cpas_handle,
-				&cpas_vote->ahb_vote, &cpas_vote->axi_vote);
+		if (!core_info->cpas_start) {
+			rc = cam_cpas_start(core_info->cpas_handle,
+					&cpas_vote->ahb_vote,
+					&cpas_vote->axi_vote);
+			core_info->cpas_start = true;
+		}
 		break;
 	}
 
 	case CAM_ICP_A5_CMD_CPAS_STOP:
-		cam_cpas_stop(core_info->cpas_handle);
+		if (core_info->cpas_start) {
+			cam_cpas_stop(core_info->cpas_handle);
+			core_info->cpas_start = false;
+		}
 		break;
 	default:
 		break;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.h b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.h
index 8b84270..4aa6b4b 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.h
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.h
@@ -62,6 +62,7 @@
  * @a5_acquire: Acquire information of A5
  * @irq_cb: IRQ callback
  * @cpas_handle: CPAS handle for A5
+ * @cpast_start: state variable for cpas
  */
 struct cam_a5_device_core_info {
 	struct cam_a5_device_hw_info *a5_hw_info;
@@ -74,6 +75,7 @@
 	struct cam_icp_a5_acquire_dev a5_acquire[8];
 	struct cam_icp_a5_set_irq_cb irq_cb;
 	uint32_t cpas_handle;
+	bool cpas_start;
 };
 
 int cam_a5_init_hw(void *device_priv,
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c
index 641c154..d12b3b6 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c
@@ -95,7 +95,7 @@
 
 	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
 	if (rc)
-		pr_err("%s: enable platform failed\n", __func__);
+		pr_err("%s: disable platform failed\n", __func__);
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c
index 50863a5..91652d7 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c
@@ -80,15 +80,19 @@
 
 	rc = cam_cpas_start(core_info->cpas_handle,
 			&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
-	if (rc < 0) {
+	if (rc) {
 		pr_err("cpass start failed: %d\n", rc);
 		return rc;
 	}
+	core_info->cpas_start = true;
 
 	rc = cam_bps_enable_soc_resources(soc_info);
-	if (rc < 0) {
-		pr_err("soc enable is failed\n");
-		rc = cam_cpas_stop(core_info->cpas_handle);
+	if (rc) {
+		pr_err("soc enable is failed: %d\n", rc);
+		if (cam_cpas_stop(core_info->cpas_handle))
+			pr_err("cpas stop is failed\n");
+		else
+			core_info->cpas_start = false;
 	}
 
 	return rc;
@@ -115,12 +119,15 @@
 	}
 
 	rc = cam_bps_disable_soc_resources(soc_info);
-	if (rc < 0)
-		pr_err("soc enable is failed\n");
+	if (rc)
+		pr_err("soc disable is failed: %d\n", rc);
 
-	rc = cam_cpas_stop(core_info->cpas_handle);
-	if (rc < 0)
-		pr_err("cpas stop is failed: %d\n", rc);
+	if (core_info->cpas_start) {
+		if (cam_cpas_stop(core_info->cpas_handle))
+			pr_err("cpas stop is failed\n");
+		else
+			core_info->cpas_start = false;
+	}
 
 	return rc;
 }
@@ -169,13 +176,20 @@
 			return -EINVAL;
 		}
 
-		rc = cam_cpas_start(core_info->cpas_handle,
-				&cpas_vote->ahb_vote, &cpas_vote->axi_vote);
+		if (!core_info->cpas_start) {
+			rc = cam_cpas_start(core_info->cpas_handle,
+					&cpas_vote->ahb_vote,
+					&cpas_vote->axi_vote);
+			core_info->cpas_start = true;
+		}
 		break;
 	}
 
 	case CAM_ICP_BPS_CMD_CPAS_STOP:
-		cam_cpas_stop(core_info->cpas_handle);
+		if (core_info->cpas_start) {
+			cam_cpas_stop(core_info->cpas_handle);
+			core_info->cpas_start = false;
+		}
 		break;
 	default:
 		break;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.h b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.h
index 67e1c03..8a15a7b 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.h
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.h
@@ -26,6 +26,7 @@
 struct cam_bps_device_core_info {
 	struct cam_bps_device_hw_info *bps_hw_info;
 	uint32_t cpas_handle;
+	bool cpas_start;
 };
 
 int cam_bps_init_hw(void *device_priv,
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 43491a9..677c24e 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -74,166 +74,6 @@
 	return 0;
 }
 
-static int cam_icp_stop_cpas(struct cam_icp_hw_mgr *hw_mgr_priv)
-{
-	struct cam_hw_intf *a5_dev_intf = NULL;
-	struct cam_hw_intf *ipe0_dev_intf = NULL;
-	struct cam_hw_intf *ipe1_dev_intf = NULL;
-	struct cam_hw_intf *bps_dev_intf = NULL;
-	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
-	struct cam_icp_cpas_vote cpas_vote;
-	int rc = 0;
-
-	if (!hw_mgr) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
-	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
-	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
-
-	if ((!a5_dev_intf) || (!bps_dev_intf) || (!ipe0_dev_intf)) {
-		pr_err("dev intfs are NULL\n");
-		return -EINVAL;
-	}
-
-	rc = a5_dev_intf->hw_ops.process_cmd(
-		a5_dev_intf->hw_priv,
-		CAM_ICP_A5_CMD_CPAS_STOP,
-		&cpas_vote,
-		sizeof(struct cam_icp_cpas_vote));
-	if (rc < 0)
-		pr_err("CAM_ICP_A5_CMD_CPAS_STOP is failed: %d\n", rc);
-
-	rc = bps_dev_intf->hw_ops.process_cmd(
-		bps_dev_intf->hw_priv,
-		CAM_ICP_BPS_CMD_CPAS_STOP,
-		&cpas_vote,
-		sizeof(struct cam_icp_cpas_vote));
-	if (rc < 0)
-		pr_err("CAM_ICP_BPS_CMD_CPAS_STOP is failed: %d\n", rc);
-
-	rc = ipe0_dev_intf->hw_ops.process_cmd(
-		ipe0_dev_intf->hw_priv,
-		CAM_ICP_IPE_CMD_CPAS_STOP,
-		&cpas_vote,
-		sizeof(struct cam_icp_cpas_vote));
-	if (rc < 0)
-		pr_err("CAM_ICP_IPE_CMD_CPAS_STOP is failed: %d\n", rc);
-
-	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
-	if (!ipe1_dev_intf)
-		return rc;
-
-	rc = ipe1_dev_intf->hw_ops.process_cmd(
-		ipe1_dev_intf->hw_priv,
-		CAM_ICP_IPE_CMD_CPAS_STOP,
-		&cpas_vote,
-		sizeof(struct cam_icp_cpas_vote));
-	if (rc < 0)
-		pr_err("CAM_ICP_IPE_CMD_CPAS_STOP is failed: %d\n", rc);
-
-	return rc;
-}
-
-static int cam_icp_start_cpas(struct cam_icp_hw_mgr *hw_mgr_priv)
-{
-	struct cam_hw_intf *a5_dev_intf = NULL;
-	struct cam_hw_intf *ipe0_dev_intf = NULL;
-	struct cam_hw_intf *ipe1_dev_intf = NULL;
-	struct cam_hw_intf *bps_dev_intf = NULL;
-	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
-	struct cam_icp_cpas_vote cpas_vote;
-	int rc = 0;
-
-	if (!hw_mgr) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
-	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
-	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
-
-	if ((!a5_dev_intf) || (!bps_dev_intf) || (!ipe0_dev_intf)) {
-		pr_err("dev intfs are null\n");
-		return -EINVAL;
-	}
-
-	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
-	cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
-	cpas_vote.axi_vote.compressed_bw = 640000000;
-	cpas_vote.axi_vote.uncompressed_bw = 640000000;
-
-	rc = a5_dev_intf->hw_ops.process_cmd(
-		a5_dev_intf->hw_priv,
-		CAM_ICP_A5_CMD_CPAS_START,
-		&cpas_vote,
-		sizeof(struct cam_icp_cpas_vote));
-	if (rc) {
-		pr_err("CAM_ICP_A5_CMD_CPAS_START is failed: %d\n", rc);
-		goto a5_cpas_start_failed;
-	}
-
-	rc = bps_dev_intf->hw_ops.process_cmd(
-		bps_dev_intf->hw_priv,
-		CAM_ICP_BPS_CMD_CPAS_START,
-		&cpas_vote,
-		sizeof(struct cam_icp_cpas_vote));
-	if (rc < 0) {
-		pr_err("CAM_ICP_BPS_CMD_CPAS_START is failed: %d\n", rc);
-		goto bps_cpas_start_failed;
-	}
-
-	rc = ipe0_dev_intf->hw_ops.process_cmd(
-		ipe0_dev_intf->hw_priv,
-		CAM_ICP_IPE_CMD_CPAS_START,
-		&cpas_vote,
-		sizeof(struct cam_icp_cpas_vote));
-	if (rc < 0) {
-		pr_err("CAM_ICP_IPE_CMD_CPAS_START is failed: %d\n", rc);
-		goto ipe0_cpas_start_failed;
-	}
-
-	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
-	if (!ipe1_dev_intf)
-		return rc;
-
-	rc = ipe1_dev_intf->hw_ops.process_cmd(
-		ipe1_dev_intf->hw_priv,
-		CAM_ICP_IPE_CMD_CPAS_START,
-		&cpas_vote,
-		sizeof(struct cam_icp_cpas_vote));
-	if (rc < 0) {
-		pr_err("CAM_ICP_IPE_CMD_CPAS_START is failed: %d\n", rc);
-		goto ipe1_cpas_start_failed;
-	}
-
-	return rc;
-
-ipe1_cpas_start_failed:
-	rc = ipe0_dev_intf->hw_ops.process_cmd(
-		ipe0_dev_intf->hw_priv,
-		CAM_ICP_IPE_CMD_CPAS_STOP,
-		&cpas_vote,
-		sizeof(struct cam_icp_cpas_vote));
-ipe0_cpas_start_failed:
-	rc = bps_dev_intf->hw_ops.process_cmd(
-		bps_dev_intf->hw_priv,
-		CAM_ICP_BPS_CMD_CPAS_STOP,
-		&cpas_vote,
-		sizeof(struct cam_icp_cpas_vote));
-bps_cpas_start_failed:
-	rc = a5_dev_intf->hw_ops.process_cmd(
-		a5_dev_intf->hw_priv,
-		CAM_ICP_A5_CMD_CPAS_STOP,
-		&cpas_vote,
-		sizeof(struct cam_icp_cpas_vote));
-a5_cpas_start_failed:
-	return rc;
-}
-
 static int cam_icp_mgr_process_cmd(void *priv, void *data)
 {
 	int rc;
@@ -566,7 +406,7 @@
 	return rc;
 }
 
-static int cam_icp_free_hfi_mem(void)
+static void cam_icp_free_hfi_mem(void)
 {
 	cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
 	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
@@ -574,8 +414,6 @@
 	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.msg_q);
 	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
 	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sec_heap);
-
-	return 0;
 }
 
 static int cam_icp_allocate_hfi_mem(void)
@@ -806,18 +644,17 @@
 
 	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 	if (!hw_mgr->ctx_data[ctx_id].in_use) {
-		pr_err("ctx is already in use: %d\n", ctx_id);
+		ICP_DBG("ctx is not in use: %d\n", ctx_id);
 		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
-		return -EINVAL;
+		return 0;
 	}
 	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 
-	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
-	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 	if (task)
 		cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id], task);
 
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 	hw_mgr->ctx_data[ctx_id].in_use = 0;
 	hw_mgr->ctx_data[ctx_id].fw_handle = 0;
@@ -829,6 +666,7 @@
 	mutex_destroy(&hw_mgr->ctx_data[ctx_id].hfi_frame_process.lock);
 	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 	kfree(hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	return 0;
 }
@@ -861,40 +699,64 @@
 	struct cam_hw_intf *ipe0_dev_intf = NULL;
 	struct cam_hw_intf *ipe1_dev_intf = NULL;
 	struct cam_hw_intf *bps_dev_intf = NULL;
-	int rc = 0;
+	struct cam_icp_a5_set_irq_cb irq_cb;
+	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	int i;
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (hw_mgr->fw_download ==  false) {
+		ICP_DBG("hw mgr is already closed\n");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return 0;
+	}
 
 	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
 	ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
+	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
 	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
 
 	if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
-		pr_err("dev intfs are wrong\n");
-		return rc;
+		pr_err("dev intfs are wrong, failed to close\n");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
 	}
+
+	irq_cb.icp_hw_mgr_cb = NULL;
+	irq_cb.data = NULL;
+	a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_SET_IRQ_CB,
+		&irq_cb, sizeof(irq_cb));
+
+	fw_buf_info.kva = 0;
+	fw_buf_info.iova = 0;
+	fw_buf_info.len = 0;
+	a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_SET_FW_BUF,
+		&fw_buf_info,
+		sizeof(fw_buf_info));
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+		ctx_data = &hw_mgr->ctx_data[i];
+		cam_icp_mgr_release_ctx(hw_mgr, i);
+	}
+
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
-	rc = a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
-	if (rc < 0)
-		pr_err("a5 dev de-init failed\n");
-
-	rc = bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
-	if (rc < 0)
-		pr_err("bps dev de-init failed\n");
-
-	rc = ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
-	if (rc < 0)
-		pr_err("ipe0 dev de-init failed\n");
-
 	ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
-	if (ipe1_dev_intf) {
-		rc = ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
-						NULL, 0);
-		if (rc < 0)
-			pr_err("ipe1 dev de-init failed\n");
-	}
+	if (ipe1_dev_intf)
+		ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
+			NULL, 0);
 
+	ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+	bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+	cam_hfi_deinit();
 	cam_icp_free_hfi_mem();
 	hw_mgr->fw_download = false;
-	debugfs_remove_recursive(icp_hw_mgr.dentry);
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	return 0;
@@ -975,9 +837,9 @@
 	irq_cb.icp_hw_mgr_cb = cam_icp_hw_mgr_cb;
 	irq_cb.data = hw_mgr_priv;
 	rc = a5_dev_intf->hw_ops.process_cmd(
-				a5_dev_intf->hw_priv,
-				CAM_ICP_A5_SET_IRQ_CB,
-				&irq_cb, sizeof(irq_cb));
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_SET_IRQ_CB,
+		&irq_cb, sizeof(irq_cb));
 	if (rc < 0) {
 		pr_err("CAM_ICP_A5_SET_IRQ_CB failed\n");
 		rc = -EINVAL;
@@ -989,10 +851,10 @@
 	fw_buf_info.len = icp_hw_mgr.hfi_mem.fw_buf.len;
 
 	rc = a5_dev_intf->hw_ops.process_cmd(
-			a5_dev_intf->hw_priv,
-			CAM_ICP_A5_CMD_SET_FW_BUF,
-			&fw_buf_info,
-			sizeof(fw_buf_info));
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_SET_FW_BUF,
+		&fw_buf_info,
+		sizeof(fw_buf_info));
 	if (rc < 0) {
 		pr_err("CAM_ICP_A5_CMD_SET_FW_BUF failed\n");
 		goto set_irq_failed;
@@ -1001,9 +863,9 @@
 	cam_hfi_enable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
 
 	rc = a5_dev_intf->hw_ops.process_cmd(
-			a5_dev_intf->hw_priv,
-			CAM_ICP_A5_CMD_FW_DOWNLOAD,
-			NULL, 0);
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_FW_DOWNLOAD,
+		NULL, 0);
 	if (rc < 0) {
 		pr_err("FW download is failed\n");
 		goto set_irq_failed;
@@ -1083,14 +945,8 @@
 	}
 
 	hw_mgr->fw_download = true;
-
-	rc = cam_icp_stop_cpas(hw_mgr);
-	if (rc) {
-		pr_err("cpas stop failed\n");
-		goto set_irq_failed;
-	}
-
 	hw_mgr->ctxt_cnt = 0;
+	ICP_DBG("FW download done successfully\n");
 
 	return rc;
 
@@ -1443,20 +1299,11 @@
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		return -EINVAL;
 	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	rc = cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
-	if (rc) {
-		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	if (rc)
 		return -EINVAL;
-	}
-
-	--hw_mgr->ctxt_cnt;
-	if (!hw_mgr->ctxt_cnt) {
-		ICP_DBG("stop cpas for last context\n");
-		cam_icp_stop_cpas(hw_mgr);
-	}
-	ICP_DBG("context count : %u\n", hw_mgr->ctxt_cnt);
-	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	ICP_DBG("fw handle %d\n", fw_handle);
 	return rc;
@@ -1662,13 +1509,6 @@
 
 	/* Fill ctx with acquire info */
 	ctx_data = &hw_mgr->ctx_data[ctx_id];
-
-	if (!hw_mgr->ctxt_cnt++) {
-		ICP_DBG("starting cpas\n");
-		cam_icp_start_cpas(hw_mgr);
-	}
-	ICP_DBG("context count : %u\n", hw_mgr->ctxt_cnt);
-
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	/* Fill ctx with acquire info */
@@ -1782,9 +1622,6 @@
 create_handle_failed:
 get_create_task_failed:
 cmd_cpu_buf_failed:
-	--hw_mgr->ctxt_cnt;
-	if (!hw_mgr->ctxt_cnt)
-		cam_icp_stop_cpas(hw_mgr);
 	cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
 	kfree(tmp_acquire);
 	return rc;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c
index 15cb943..07f63d2 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c
@@ -43,7 +43,7 @@
 		rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
 			&cpas_vote->axi_vote);
 
-	if (rc < 0)
+	if (rc)
 		pr_err("cpas vote is failed: %d\n", rc);
 
 	return rc;
@@ -78,15 +78,19 @@
 
 	rc = cam_cpas_start(core_info->cpas_handle,
 		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
-	if (rc < 0) {
+	if (rc) {
 		pr_err("cpass start failed: %d\n", rc);
 		return rc;
 	}
+	core_info->cpas_start = true;
 
 	rc = cam_ipe_enable_soc_resources(soc_info);
-	if (rc < 0) {
-		pr_err("soc enable is failed\n");
-		rc = cam_cpas_stop(core_info->cpas_handle);
+	if (rc) {
+		pr_err("soc enable is failed : %d\n", rc);
+		if (cam_cpas_stop(core_info->cpas_handle))
+			pr_err("cpas stop is failed\n");
+		else
+			core_info->cpas_start = false;
 	}
 
 	return rc;
@@ -113,12 +117,15 @@
 	}
 
 	rc = cam_ipe_disable_soc_resources(soc_info);
-	if (rc < 0)
-		pr_err("soc enable is failed\n");
+	if (rc)
+		pr_err("soc disable is failed : %d\n", rc);
 
-	rc = cam_cpas_stop(core_info->cpas_handle);
-	if (rc < 0)
-		pr_err("cpas stop is failed: %d\n", rc);
+	if (core_info->cpas_start) {
+		if (cam_cpas_stop(core_info->cpas_handle))
+			pr_err("cpas stop is failed\n");
+		else
+			core_info->cpas_start = false;
+	}
 
 	return rc;
 }
@@ -163,13 +170,19 @@
 		if (!cmd_args)
 			return -EINVAL;
 
-		rc = cam_cpas_start(core_info->cpas_handle,
-			&cpas_vote->ahb_vote, &cpas_vote->axi_vote);
+		if (!core_info->cpas_start) {
+			rc = cam_cpas_start(core_info->cpas_handle,
+				&cpas_vote->ahb_vote, &cpas_vote->axi_vote);
+			core_info->cpas_start = true;
+		}
 		break;
 	}
 
 	case CAM_ICP_IPE_CMD_CPAS_STOP:
-		cam_cpas_stop(core_info->cpas_handle);
+		if (core_info->cpas_start) {
+			cam_cpas_stop(core_info->cpas_handle);
+			core_info->cpas_start = false;
+		}
 		break;
 	default:
 		break;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.h b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.h
index 4818846..8f0e882 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.h
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.h
@@ -26,6 +26,7 @@
 struct cam_ipe_device_core_info {
 	struct cam_ipe_device_hw_info *ipe_hw_info;
 	uint32_t cpas_handle;
+	bool cpas_start;
 };
 
 int cam_ipe_init_hw(void *device_priv,
diff --git a/drivers/media/platform/msm/vidc/Makefile b/drivers/media/platform/msm/vidc/Makefile
index 7bad081..e33eaa8 100644
--- a/drivers/media/platform/msm/vidc/Makefile
+++ b/drivers/media/platform/msm/vidc/Makefile
@@ -1,6 +1,7 @@
 ccflags-y += -I$(srctree)/drivers/media/platform/msm/vidc/
 
 msm-vidc-objs := msm_v4l2_vidc.o \
+				msm_vidc_platform.o \
                                 msm_vidc_common.o \
                                 msm_vidc.o \
                                 msm_vdec.o \
diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
index f7ce757..d329a8b 100644
--- a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
+++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
@@ -16,6 +16,7 @@
 #include "msm_vidc_internal.h"
 #include "msm_vidc_debug.h"
 #include "vidc_hfi_api.h"
+#define COMPRESSION_RATIO_MAX 5
 
 static bool debug;
 module_param(debug, bool, 0644);
@@ -30,13 +31,6 @@
 	struct devfreq_governor devfreq_gov;
 };
 
-enum scenario {
-	SCENARIO_WORST,
-	SCENARIO_SUSTAINED_WORST,
-	SCENARIO_AVERAGE,
-	SCENARIO_MAX,
-};
-
 /*
  * Minimum dimensions that the governor is willing to calculate
  * bandwidth for.  This means that anything bandwidth(0, 0) ==
@@ -62,15 +56,9 @@
 #define kbps(__mbps) ((__mbps) * 1000)
 #define bps(__mbps) (kbps(__mbps) * 1000)
 
-#define GENERATE_SCENARIO_PROFILE(__average, __worst) {                        \
-	[SCENARIO_AVERAGE] = (__average),                                      \
-	[SCENARIO_WORST] =  (__worst),                                         \
-	[SCENARIO_SUSTAINED_WORST] = (__worst),                                \
-}
-
-#define GENERATE_COMPRESSION_PROFILE(__bpp, __average, __worst) {              \
+#define GENERATE_COMPRESSION_PROFILE(__bpp, __worst) {              \
 	.bpp = __bpp,                                                          \
-	.ratio = GENERATE_SCENARIO_PROFILE(__average, __worst),                \
+	.ratio = __worst,                \
 }
 
 /*
@@ -85,109 +73,168 @@
  *  4096   2160|     44   88|     2.2       1.26      1.97        1.22|
  *  4096   2304|     48   96|     2.2       1.26      1.97        1.22|
  */
-#define COMPRESSION_RATIO_MAX 2
 static struct lut {
 	int frame_size; /* width x height */
-	unsigned long bitrate[SCENARIO_MAX];
+	int frame_rate;
+	unsigned long bitrate;
 	struct {
 		int bpp;
-		fp_t ratio[SCENARIO_MAX];
+		fp_t ratio;
 	} compression_ratio[COMPRESSION_RATIO_MAX];
 } const LUT[] = {
 	{
 		.frame_size = 1280 * 720,
-		.bitrate = GENERATE_SCENARIO_PROFILE(7, 14),
+		.frame_rate = 30,
+		.bitrate = 14,
 		.compression_ratio = {
 			GENERATE_COMPRESSION_PROFILE(8,
-					FP(1, 69, 100),
 					FP(1, 28, 100)),
 			GENERATE_COMPRESSION_PROFILE(10,
-					FP(1, 49, 100),
+					FP(1, 23, 100)),
+		}
+	},
+	{
+		.frame_size = 1280 * 720,
+		.frame_rate = 60,
+		.bitrate = 22,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 28, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
 					FP(1, 23, 100)),
 		}
 	},
 	{
 		.frame_size = 1920 * 1088,
-		.bitrate = GENERATE_SCENARIO_PROFILE(20, 40),
+		.frame_rate = 30,
+		.bitrate = 40,
 		.compression_ratio = {
 			GENERATE_COMPRESSION_PROFILE(8,
-					FP(1, 69, 100),
 					FP(1, 28, 100)),
 			GENERATE_COMPRESSION_PROFILE(10,
-					FP(1, 49, 100),
+					FP(1, 23, 100)),
+		}
+	},
+	{
+		.frame_size = 1920 * 1088,
+		.frame_rate = 60,
+		.bitrate = 64,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 28, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
 					FP(1, 23, 100)),
 		}
 	},
 	{
 		.frame_size = 2560 * 1440,
-		.bitrate = GENERATE_SCENARIO_PROFILE(32, 64),
+		.frame_rate = 30,
+		.bitrate = 64,
 		.compression_ratio = {
 			GENERATE_COMPRESSION_PROFILE(8,
-					FP(2, 20, 100),
 					FP(1, 26, 100)),
 			GENERATE_COMPRESSION_PROFILE(10,
-					FP(1, 97, 100),
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 2560 * 1440,
+		.frame_rate = 60,
+		.bitrate = 102,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
 					FP(1, 22, 100)),
 		}
 	},
 	{
 		.frame_size = 3840 * 2160,
-		.bitrate = GENERATE_SCENARIO_PROFILE(42, 84),
+		.frame_rate = 30,
+		.bitrate = 84,
 		.compression_ratio = {
 			GENERATE_COMPRESSION_PROFILE(8,
-					FP(2, 20, 100),
 					FP(1, 26, 100)),
 			GENERATE_COMPRESSION_PROFILE(10,
-					FP(1, 97, 100),
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 3840 * 2160,
+		.frame_rate = 60,
+		.bitrate = 134,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
 					FP(1, 22, 100)),
 		}
 	},
 	{
 		.frame_size = 4096 * 2160,
-		.bitrate = GENERATE_SCENARIO_PROFILE(44, 88),
+		.frame_rate = 30,
+		.bitrate = 88,
 		.compression_ratio = {
 			GENERATE_COMPRESSION_PROFILE(8,
-					FP(2, 20, 100),
 					FP(1, 26, 100)),
 			GENERATE_COMPRESSION_PROFILE(10,
-					FP(1, 97, 100),
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 4096 * 2160,
+		.frame_rate = 60,
+		.bitrate = 141,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
 					FP(1, 22, 100)),
 		}
 	},
 	{
 		.frame_size = 4096 * 2304,
-		.bitrate = GENERATE_SCENARIO_PROFILE(48, 96),
+		.frame_rate = 30,
+		.bitrate = 96,
 		.compression_ratio = {
 			GENERATE_COMPRESSION_PROFILE(8,
-					FP(2, 20, 100),
 					FP(1, 26, 100)),
 			GENERATE_COMPRESSION_PROFILE(10,
-					FP(1, 97, 100),
+					FP(1, 22, 100)),
+		}
+	},
+	{
+		.frame_size = 4096 * 2304,
+		.frame_rate = 60,
+		.bitrate = 154,
+		.compression_ratio = {
+			GENERATE_COMPRESSION_PROFILE(8,
+					FP(1, 26, 100)),
+			GENERATE_COMPRESSION_PROFILE(10,
 					FP(1, 22, 100)),
 		}
 	},
 };
 
-static struct lut const *__lut(int width, int height)
+static struct lut const *__lut(int width, int height, int fps)
 {
 	int frame_size = height * width, c = 0;
 
 	do {
-		if (LUT[c].frame_size >= frame_size)
+		if (LUT[c].frame_size >= frame_size && LUT[c].frame_rate >= fps)
 			return &LUT[c];
 	} while (++c < ARRAY_SIZE(LUT));
 
 	return &LUT[ARRAY_SIZE(LUT) - 1];
 }
 
-static fp_t __compression_ratio(struct lut const *entry, int bpp,
-		enum scenario s)
+static fp_t __compression_ratio(struct lut const *entry, int bpp)
 {
 	int c = 0;
 
 	for (c = 0; c < COMPRESSION_RATIO_MAX; ++c) {
 		if (entry->compression_ratio[c].bpp == bpp)
-			return entry->compression_ratio[c].ratio[s];
+			return entry->compression_ratio[c].ratio;
 	}
 
 	WARN(true, "Shouldn't be here, LUT possibly corrupted?\n");
@@ -282,23 +329,20 @@
 	 * measured heuristics and hardcoded numbers taken from the firmware.
 	 */
 	/* Decoder parameters */
-	enum scenario scenario;
-	int width, height, lcu_size, dpb_bpp, opb_bpp, fps;
+	int width, height, lcu_size, dpb_bpp, opb_bpp, fps, opb_factor;
 	bool unified_dpb_opb, dpb_compression_enabled, opb_compression_enabled;
-	fp_t dpb_opb_scaling_ratio, dpb_compression_factor,
-		opb_compression_factor, qsmmu_bw_overhead_factor;
-	int vmem_size; /* in kB */
+	fp_t dpb_opb_scaling_ratio, dpb_read_compression_factor,
+		dpb_write_compression_factor, opb_compression_factor,
+		qsmmu_bw_overhead_factor, height_ratio;
 
 	/* Derived parameters */
-	int lcu_per_frame, tnbr_per_lcu_10bpc, tnbr_per_lcu_8bpc, tnbr_per_lcu,
-		colocated_bytes_per_lcu, vmem_line_buffer, vmem_chroma_cache,
-		vmem_luma_cache, vmem_chroma_luma_cache;
+	int lcu_per_frame, tnbr_per_lcu, colocated_bytes_per_lcu;
 	unsigned long bitrate;
+
 	fp_t bins_to_bit_factor, dpb_write_factor, ten_bpc_packing_factor,
 		ten_bpc_bpp_factor, vsp_read_factor, vsp_write_factor,
-		ocmem_usage_lcu_factor, ref_ocmem_bw_factor_read,
-		ref_ocmem_bw_factor_write, bw_for_1x_8bpc, dpb_bw_for_1x,
-		motion_vector_complexity, row_cache_penalty, opb_bw;
+		bw_for_1x_8bpc, dpb_bw_for_1x,
+		motion_vector_complexity = 0, row_cache_penalty = 0, opb_bw = 0;
 
 	/* Output parameters */
 	struct {
@@ -306,17 +350,15 @@
 			line_buffer_read, line_buffer_write, recon_read,
 			recon_write, opb_read, opb_write, dpb_read, dpb_write,
 			total;
-	} ddr, vmem;
+	} ddr = {0};
 
 	unsigned long ret = 0;
+	unsigned int integer_part, frac_part;
 
-	/* Decoder parameters setup */
-	scenario = SCENARIO_WORST;
+	width = max(d->input_width, BASELINE_DIMENSIONS.width);
+	height = max(d->input_height, BASELINE_DIMENSIONS.height);
 
-	width = max(d->width, BASELINE_DIMENSIONS.width);
-	height = max(d->height, BASELINE_DIMENSIONS.height);
-
-	lcu_size = 32;
+	lcu_size = d->lcu_size;
 
 	dpb_bpp = d->num_formats >= 1 ? __bpp(d->color_formats[0]) : INT_MAX;
 	opb_bpp = d->num_formats >= 2 ?  __bpp(d->color_formats[1]) : dpb_bpp;
@@ -325,134 +367,61 @@
 
 	unified_dpb_opb = d->num_formats == 1;
 
-	dpb_opb_scaling_ratio = FP_ONE;
+	dpb_opb_scaling_ratio = fp_div(FP_INT(d->input_width * d->input_height),
+		FP_INT(d->output_width * d->output_height));
+	height_ratio = fp_div(d->input_height, d->output_height);
 
 	dpb_compression_enabled = d->num_formats >= 1 &&
 		__ubwc(d->color_formats[0]);
 	opb_compression_enabled = d->num_formats >= 2 &&
 		__ubwc(d->color_formats[1]);
 
-	dpb_compression_factor = !dpb_compression_enabled ? FP_ONE :
-		__compression_ratio(__lut(width, height), dpb_bpp, scenario);
+	/*
+	 * Convert Q16 number into Integer and Fractional part upto 2 places.
+	 * Ex : 105752 / 65536 = 1.61; 1.61 in Q16 = 105752;
+	 * Integer part =  105752 / 65536 = 1;
+	 * Reminder = 105752 - 1 * 65536 = 40216;
+	 * Fractional part = 40216 * 100 / 65536 = 61;
+	 * Now converto to FP(1, 61, 100) for below code.
+	 */
+
+	integer_part = d->compression_ratio >> 16;
+	frac_part =
+		((d->compression_ratio - (integer_part * 65536)) * 100) >> 16;
+
+	dpb_read_compression_factor = FP(integer_part, frac_part, 100);
+
+	dpb_write_compression_factor = !dpb_compression_enabled ? FP_ONE :
+		__compression_ratio(__lut(width, height, fps), opb_bpp);
 
 	opb_compression_factor = !opb_compression_enabled ? FP_ONE :
-		__compression_ratio(__lut(width, height), opb_bpp, scenario);
+		__compression_ratio(__lut(width, height, fps), opb_bpp);
 
-	vmem_size = 512; /* in kB */
 
 	/* Derived parameters setup */
 	lcu_per_frame = DIV_ROUND_UP(width, lcu_size) *
 		DIV_ROUND_UP(height, lcu_size);
 
-	bitrate = __lut(width, height)->bitrate[scenario];
+	bitrate = __lut(width, height, fps)->bitrate;
 
-	bins_to_bit_factor = FP(1, 60, 100);
+	bins_to_bit_factor = d->work_mode == VIDC_WORK_MODE_1 ?
+		FP_INT(0) : FP_INT(4);
 
-	dpb_write_factor = scenario == SCENARIO_AVERAGE ?
-		FP_ONE : FP(1, 5, 100);
+	vsp_read_factor = bins_to_bit_factor + FP_INT(2);
+
+	dpb_write_factor = FP(1, 5, 100);
 
 	ten_bpc_packing_factor = FP(1, 67, 1000);
 	ten_bpc_bpp_factor = FP(1, 1, 4);
 
-	vsp_read_factor = bins_to_bit_factor + FP_INT(2);
 	vsp_write_factor = bins_to_bit_factor;
 
-	tnbr_per_lcu_10bpc = lcu_size == 16 ? 384 + 192 :
-				lcu_size == 32 ? 640 + 256 :
-						1280 + 384;
-	tnbr_per_lcu_8bpc = lcu_size == 16 ? 256 + 192 :
-				lcu_size == 32 ? 512 + 256 :
-						1024 + 384;
-	tnbr_per_lcu = dpb_bpp == 10 ? tnbr_per_lcu_10bpc : tnbr_per_lcu_8bpc;
+	tnbr_per_lcu = lcu_size == 16 ? 128 :
+		lcu_size == 32 ? 64 : 128;
 
 	colocated_bytes_per_lcu = lcu_size == 16 ? 16 :
 				lcu_size == 32 ? 64 : 256;
 
-	ocmem_usage_lcu_factor = lcu_size == 16 ? FP(1, 8, 10) :
-				lcu_size == 32 ? FP(1, 2, 10) :
-						FP_ONE;
-	ref_ocmem_bw_factor_read = vmem_size < 296 ? FP_ZERO :
-				vmem_size < 648 ? FP(0, 1, 4) :
-						FP(0, 55, 100);
-	ref_ocmem_bw_factor_write = vmem_size < 296 ? FP_ZERO :
-				vmem_size < 648 ? FP(0, 7, 10) :
-						FP(1, 4, 10);
-
-	/* Prelim b/w calculation */
-	bw_for_1x_8bpc = fp_mult(FP_INT(width * height * fps),
-			fp_mult(FP(1, 50, 100), dpb_write_factor));
-	bw_for_1x_8bpc = fp_div(bw_for_1x_8bpc, FP_INT(bps(1)));
-
-	dpb_bw_for_1x = dpb_bpp == 8 ? bw_for_1x_8bpc :
-		fp_mult(bw_for_1x_8bpc, fp_mult(ten_bpc_packing_factor,
-					ten_bpc_bpp_factor));
-	/* VMEM adjustments */
-	vmem_line_buffer = tnbr_per_lcu * DIV_ROUND_UP(width, lcu_size) / 1024;
-	vmem_chroma_cache = dpb_bpp == 10 ? 176 : 128;
-	vmem_luma_cache = dpb_bpp == 10 ? 353 : 256;
-	vmem_chroma_luma_cache = vmem_chroma_cache + vmem_luma_cache;
-
-	motion_vector_complexity = scenario == SCENARIO_AVERAGE ?
-		FP(2, 66, 100) : FP_INT(4);
-
-	row_cache_penalty = FP_ZERO;
-	if (vmem_size < vmem_line_buffer + vmem_chroma_cache)
-		row_cache_penalty = fp_mult(FP(0, 5, 100),
-				motion_vector_complexity);
-	else if (vmem_size < vmem_line_buffer + vmem_luma_cache)
-		row_cache_penalty = fp_mult(FP(0, 7, 100),
-				motion_vector_complexity);
-	else if (vmem_size < vmem_line_buffer + vmem_chroma_cache
-			+ vmem_luma_cache)
-		row_cache_penalty = fp_mult(FP(0, 3, 100),
-				motion_vector_complexity);
-	else
-		row_cache_penalty = FP_ZERO;
-
-
-	opb_bw = unified_dpb_opb ? FP_ZERO :
-		fp_div(fp_div(bw_for_1x_8bpc, dpb_opb_scaling_ratio),
-				opb_compression_factor);
-
-	/* B/W breakdown on a per buffer type basis for VMEM */
-	vmem.vsp_read = FP_ZERO;
-	vmem.vsp_write = FP_ZERO;
-
-	vmem.collocated_read = FP_ZERO;
-	vmem.collocated_write = FP_ZERO;
-
-	vmem.line_buffer_read = FP_INT(tnbr_per_lcu *
-			lcu_per_frame * fps / bps(1));
-	vmem.line_buffer_write = vmem.line_buffer_read;
-
-	vmem.recon_read = FP_ZERO;
-	vmem.recon_write = FP_ZERO;
-
-	vmem.opb_read = FP_ZERO;
-	vmem.opb_write = FP_ZERO;
-
-	vmem.dpb_read = fp_mult(ocmem_usage_lcu_factor, fp_mult(
-					ref_ocmem_bw_factor_read,
-					dpb_bw_for_1x));
-	vmem.dpb_write = fp_mult(ocmem_usage_lcu_factor, fp_mult(
-					ref_ocmem_bw_factor_write,
-					dpb_bw_for_1x));
-
-	vmem.total = vmem.vsp_read + vmem.vsp_write +
-		vmem.collocated_read + vmem.collocated_write +
-		vmem.line_buffer_read + vmem.line_buffer_write +
-		vmem.recon_read + vmem.recon_write +
-		vmem.opb_read + vmem.opb_write +
-		vmem.dpb_read + vmem.dpb_write;
-
-	/*
-	 * Attempt to force VMEM to a certain frequency for 4K
-	 */
-	if (width * height * fps >= 3840 * 2160 * 60)
-		vmem.total = FP_INT(NOMINAL_BW_MBPS);
-	else if (width * height * fps >= 3840 * 2160 * 30)
-		vmem.total = FP_INT(SVS_BW_MBPS);
-
 	/* ........................................ for DDR */
 	ddr.vsp_read = fp_div(fp_mult(FP_INT(bitrate),
 				vsp_read_factor), FP_INT(8));
@@ -464,35 +433,51 @@
 	ddr.collocated_write = FP_INT(lcu_per_frame *
 			colocated_bytes_per_lcu * fps / bps(1));
 
-	ddr.line_buffer_read = vmem_size ? FP_ZERO : vmem.line_buffer_read;
-	ddr.line_buffer_write = vmem_size ? FP_ZERO : vmem.line_buffer_write;
+	ddr.line_buffer_read = FP_INT(tnbr_per_lcu *
+			lcu_per_frame * fps / bps(1));
+	ddr.line_buffer_write = ddr.line_buffer_read;
 
-	ddr.recon_read = FP_ZERO;
-	ddr.recon_write = fp_div(dpb_bw_for_1x, dpb_compression_factor);
+	motion_vector_complexity = FP_INT(4);
 
-	ddr.opb_read = FP_ZERO;
-	ddr.opb_write = opb_bw;
+	bw_for_1x_8bpc = fp_div(FP_INT(width * height), FP_INT(32 * 8));
 
-	ddr.dpb_read = fp_div(fp_mult(dpb_bw_for_1x,
-				motion_vector_complexity + row_cache_penalty),
-			dpb_compression_factor);
-	ddr.dpb_write = FP_ZERO;
+	bw_for_1x_8bpc = fp_mult(bw_for_1x_8bpc,
+		fp_div(FP_INT(256 * 30), FP_INT(1000 * 1000)));
+
+	dpb_bw_for_1x = dpb_bpp == 8 ? bw_for_1x_8bpc :
+		fp_mult(bw_for_1x_8bpc, fp_mult(ten_bpc_packing_factor,
+				ten_bpc_bpp_factor));
+
+	ddr.dpb_read = fp_div(fp_mult(fp_mult(dpb_bw_for_1x,
+			motion_vector_complexity), dpb_write_factor),
+			dpb_read_compression_factor);
+
+	ddr.dpb_write = fp_div(fp_mult(dpb_bw_for_1x, dpb_write_factor),
+		dpb_write_compression_factor);
+
+	opb_factor = dpb_bpp == 8 ? 8 : 4;
+
+	ddr.opb_read = unified_dpb_opb ? 0 : opb_compression_enabled ?
+		fp_div(fp_mult(fp_div(dpb_bw_for_1x, dpb_opb_scaling_ratio),
+		FP_INT(opb_factor)), height_ratio) : 0;
+	ddr.opb_write = unified_dpb_opb ? 0 : opb_compression_enabled ?
+		ddr.dpb_read : fp_div(fp_div(fp_mult(dpb_bw_for_1x,
+		FP(1, 50, 100)), dpb_opb_scaling_ratio),
+			opb_compression_factor);
 
 	ddr.total = ddr.vsp_read + ddr.vsp_write +
 		ddr.collocated_read + ddr.collocated_write +
-		ddr.line_buffer_read + ddr.line_buffer_write +
-		ddr.recon_read + ddr.recon_write +
 		ddr.opb_read + ddr.opb_write +
 		ddr.dpb_read + ddr.dpb_write;
 
 	qsmmu_bw_overhead_factor = FP(1, 3, 100);
+
 	ddr.total = fp_mult(ddr.total, qsmmu_bw_overhead_factor);
 
 	/* Dump all the variables for easier debugging */
 	if (debug) {
 		struct dump dump[] = {
 		{"DECODER PARAMETERS", "", DUMP_HEADER_MAGIC},
-		{"content", "%d", scenario},
 		{"LCU size", "%d", lcu_size},
 		{"DPB bitdepth", "%d", dpb_bpp},
 		{"frame rate", "%d", fps},
@@ -501,11 +486,12 @@
 			dpb_opb_scaling_ratio},
 		{"DPB compression", "%d", dpb_compression_enabled},
 		{"OPB compression", "%d", opb_compression_enabled},
-		{"DPB compression factor", DUMP_FP_FMT,
-			dpb_compression_factor},
+		{"DPB Read compression factor", DUMP_FP_FMT,
+			dpb_read_compression_factor},
+		{"DPB Write compression factor", DUMP_FP_FMT,
+			dpb_write_compression_factor},
 		{"OPB compression factor", DUMP_FP_FMT,
 			opb_compression_factor},
-		{"VMEM size", "%dkB", vmem_size},
 		{"frame width", "%d", width},
 		{"frame height", "%d", height},
 
@@ -519,25 +505,11 @@
 		{"10bpc,BPP factor", DUMP_FP_FMT, ten_bpc_bpp_factor},
 		{"VSP read factor", DUMP_FP_FMT, vsp_read_factor},
 		{"VSP write factor", DUMP_FP_FMT, vsp_write_factor},
-		{"TNBR/LCU_10bpc", "%d", tnbr_per_lcu_10bpc},
-		{"TNBR/LCU_8bpc", "%d", tnbr_per_lcu_8bpc},
 		{"TNBR/LCU", "%d", tnbr_per_lcu},
 		{"colocated bytes/LCU", "%d", colocated_bytes_per_lcu},
-		{"OCMEM usage LCU factor", DUMP_FP_FMT,
-			ocmem_usage_lcu_factor},
-		{"ref OCMEM b/w factor (read)", DUMP_FP_FMT,
-			ref_ocmem_bw_factor_read},
-		{"ref OCMEM b/w factor (write)", DUMP_FP_FMT,
-			ref_ocmem_bw_factor_write},
 		{"B/W for 1x (NV12 8bpc)", DUMP_FP_FMT, bw_for_1x_8bpc},
 		{"DPB B/W For 1x (NV12)", DUMP_FP_FMT, dpb_bw_for_1x},
 
-		{"VMEM", "", DUMP_HEADER_MAGIC},
-		{"line buffer", "%d", vmem_line_buffer},
-		{"chroma cache", "%d", vmem_chroma_cache},
-		{"luma cache", "%d", vmem_luma_cache},
-		{"luma & chroma cache", "%d", vmem_chroma_luma_cache},
-
 		{"DERIVED PARAMETERS (2)", "", DUMP_HEADER_MAGIC},
 		{"MV complexity", DUMP_FP_FMT, motion_vector_complexity},
 		{"row cache penalty", DUMP_FP_FMT, row_cache_penalty},
@@ -557,19 +529,6 @@
 		{"DPB read", DUMP_FP_FMT, ddr.dpb_read},
 		{"DPB write", DUMP_FP_FMT, ddr.dpb_write},
 
-		{"INTERMEDIATE VMEM B/W", "", DUMP_HEADER_MAGIC},
-		{"VSP read", "%d", vmem.vsp_read},
-		{"VSP write", DUMP_FP_FMT, vmem.vsp_write},
-		{"collocated read", DUMP_FP_FMT, vmem.collocated_read},
-		{"collocated write", DUMP_FP_FMT, vmem.collocated_write},
-		{"line buffer read", DUMP_FP_FMT, vmem.line_buffer_read},
-		{"line buffer write", DUMP_FP_FMT, vmem.line_buffer_write},
-		{"recon read", DUMP_FP_FMT, vmem.recon_read},
-		{"recon write", DUMP_FP_FMT, vmem.recon_write},
-		{"OPB read", DUMP_FP_FMT, vmem.opb_read},
-		{"OPB write", DUMP_FP_FMT, vmem.opb_write},
-		{"DPB read", DUMP_FP_FMT, vmem.dpb_read},
-		{"DPB write", DUMP_FP_FMT, vmem.dpb_write},
 		};
 		__dump(dump, ARRAY_SIZE(dump));
 	}
@@ -579,7 +538,7 @@
 		ret = kbps(fp_round(ddr.total));
 		break;
 	case GOVERNOR_LLCC:
-		ret = kbps(fp_round(vmem.total));
+		dprintk(VIDC_PROF, "LLCC Voting not supported yet\n");
 		break;
 	default:
 		dprintk(VIDC_ERR, "%s - Unknown governor\n", __func__);
@@ -597,9 +556,9 @@
 	 * measured heuristics and hardcoded numbers taken from the firmware.
 	 */
 	/* Encoder Parameters */
-	enum scenario scenario, bitrate_scenario;
+
 	enum hal_video_codec standard;
-	int width, height, fps, vmem_size;
+	int width, height, fps;
 	enum hal_uncompressed_format dpb_color_format;
 	enum hal_uncompressed_format original_color_format;
 	bool dpb_compression_enabled, original_compression_enabled,
@@ -618,35 +577,34 @@
 	fp_t bins_to_bit_factor, chroma_luma_factor_dpb, one_frame_bw_dpb,
 		 chroma_luma_factor_original, one_frame_bw_original,
 		 line_buffer_size_per_lcu, line_buffer_size, line_buffer_bw,
-		 original_vmem_requirement, bw_increase_p, bw_increase_b;
+		 bw_increase_p, bw_increase_b;
 	int collocated_mv_per_lcu, max_transaction_size,
 		search_window_size_vertical_p, search_window_factor_p,
-		search_window_factor_bw_p, vmem_size_p, available_vmem_p,
+		search_window_factor_bw_p,
 		search_window_size_vertical_b, search_window_factor_b,
-		search_window_factor_bw_b, vmem_size_b, available_vmem_b;
+		search_window_factor_bw_b;
 
 	/* Output paramaters */
 	struct {
 		fp_t vsp_read, vsp_write, collocated_read, collocated_write,
 			line_buffer_read, line_buffer_write, original_read,
 			original_write, dpb_read, dpb_write, total;
-	} ddr, vmem;
+	} ddr = {0};
 
 	unsigned long ret = 0;
+	fp_t integer_part, frac_part;
 
 	/* Encoder Parameters setup */
-	scenario = SCENARIO_WORST;
 
 	standard = d->codec;
-	width = max(d->width, BASELINE_DIMENSIONS.width);
-	height = max(d->height, BASELINE_DIMENSIONS.height);
+	width = max(d->input_width, BASELINE_DIMENSIONS.width);
+	height = max(d->input_height, BASELINE_DIMENSIONS.height);
 
 	dpb_color_format = HAL_COLOR_FORMAT_NV12_UBWC;
 	original_color_format = d->num_formats >= 1 ?
 		d->color_formats[0] : HAL_UNUSED_COLOR;
 
 	fps = d->fps;
-	bitrate_scenario = SCENARIO_WORST;
 
 	dpb_compression_enabled = __ubwc(dpb_color_format);
 	original_compression_enabled = __ubwc(original_color_format);
@@ -655,21 +613,30 @@
 	low_power = d->power_mode == VIDC_POWER_LOW;
 	b_frames_enabled = false;
 
-	dpb_compression_factor = !dpb_compression_enabled ? FP_ONE :
-		__compression_ratio(__lut(width, height),
-				__bpp(dpb_color_format), scenario);
-	original_compression_factor = !original_compression_enabled ? FP_ONE :
-		__compression_ratio(__lut(width, height),
-				__bpp(original_color_format), scenario);
+	/*
+	 * Convert Q16 number into Integer and Fractional part upto 2 places.
+	 * Ex : 105752 / 65536 = 1.61; 1.61 in Q16 = 105752;
+	 * Integer part =  105752 / 65536 = 1;
+	 * Reminder = 105752 - 1 * 65536 = 40216;
+	 * Fractional part = 40216 * 100 / 65536 = 61;
+	 * Now converto to FP(1, 61, 100) for below code.
+	 */
+
+	integer_part = d->compression_ratio >> 16;
+	frac_part =
+		((d->compression_ratio - (integer_part * 65536)) * 100) >> 16;
+
+	dpb_compression_factor = FP(integer_part, frac_part, 100);
+
+	original_compression_factor = dpb_compression_factor;
 
 	rotation = false;
 	cropping_or_scaling = false;
-	vmem_size = 512; /* in kB */
 
 	/* Derived Parameters */
 	lcu_size = 16;
 	gop = b_frames_enabled ? GOP_IBBP : GOP_IPPP;
-	bitrate = __lut(width, height)->bitrate[bitrate_scenario];
+	bitrate = __lut(width, height, fps)->bitrate;
 	bins_to_bit_factor = FP(1, 6, 10);
 
 	/*
@@ -713,16 +680,6 @@
 	collocated_mv_per_lcu = lcu_size == 16 ? 16 : 64;
 	max_transaction_size = 256;
 
-	original_vmem_requirement = FP_INT(3 *
-			(two_stage_encoding ? 2 : 1) * lcu_size);
-	original_vmem_requirement = fp_mult(original_vmem_requirement,
-			(FP_ONE + chroma_luma_factor_original));
-	original_vmem_requirement += FP_INT((cropping_or_scaling ? 3 : 0) * 2);
-	original_vmem_requirement = fp_mult(original_vmem_requirement,
-			FP_INT(max_transaction_size));
-	original_vmem_requirement = fp_div(original_vmem_requirement,
-			FP_INT(1024));
-
 	search_window_size_vertical_p = low_power ? 32 :
 					b_frames_enabled ? 80 :
 					width > 2048 ? 64 : 48;
@@ -730,24 +687,16 @@
 	search_window_factor_bw_p = !two_stage_encoding ?
 		search_window_size_vertical_p * 2 / lcu_size + 1 :
 		(search_window_size_vertical_p * 2 / lcu_size + 2) / 2;
-	vmem_size_p = (search_window_factor_p * width + 128 * 2) *
-		lcu_size / 2 / 1024; /* XXX: CF hack */
 	bw_increase_p = fp_mult(one_frame_bw_dpb,
 			FP_INT(search_window_factor_bw_p - 1) / 3);
-	available_vmem_p = min_t(int, 3, (vmem_size - fp_int(line_buffer_size) -
-			fp_int(original_vmem_requirement)) / vmem_size_p);
 
 	search_window_size_vertical_b = 48;
 	search_window_factor_b = search_window_size_vertical_b * 2 / lcu_size;
 	search_window_factor_bw_b = !two_stage_encoding ?
 		search_window_size_vertical_b * 2 / lcu_size + 1 :
 		(search_window_size_vertical_b * 2 / lcu_size + 2) / 2;
-	vmem_size_b = (search_window_factor_b * width + 128 * 2) * lcu_size /
-		2 / 1024;
 	bw_increase_b = fp_mult(one_frame_bw_dpb,
 			FP_INT((search_window_factor_bw_b - 1) / 3));
-	available_vmem_b = min_t(int, 6, (vmem_size - fp_int(line_buffer_size) -
-			fp_int(original_vmem_requirement)) / vmem_size_b);
 
 	/* Output parameters for DDR */
 	ddr.vsp_read = fp_mult(fp_div(FP_INT(bitrate), FP_INT(8)),
@@ -759,8 +708,6 @@
 			collocated_mv_per_lcu * fps), FP_INT(1000 * 1000));
 	ddr.collocated_write = ddr.collocated_read;
 
-	ddr.line_buffer_read = (FP_INT(vmem_size) >= line_buffer_size +
-		original_vmem_requirement) ? FP_ZERO : line_buffer_bw;
 	ddr.line_buffer_write = ddr.line_buffer_read;
 
 	ddr.original_read = fp_div(one_frame_bw_original,
@@ -768,27 +715,6 @@
 	ddr.original_write = FP_ZERO;
 
 	ddr.dpb_read = FP_ZERO;
-	if (gop == GOP_IPPP) {
-		ddr.dpb_read = one_frame_bw_dpb + fp_mult(bw_increase_p,
-			FP_INT(3 - available_vmem_p));
-	} else if (scenario == SCENARIO_WORST ||
-			scenario == SCENARIO_SUSTAINED_WORST) {
-		ddr.dpb_read = fp_mult(one_frame_bw_dpb, FP_INT(2));
-		ddr.dpb_read += fp_mult(FP_INT(6 - available_vmem_b),
-				bw_increase_b);
-	} else {
-		fp_t part_p, part_b;
-
-		part_p = one_frame_bw_dpb + fp_mult(bw_increase_p,
-				FP_INT(3 - available_vmem_p));
-		part_p = fp_div(part_p, FP_INT(3));
-
-		part_b = fp_mult(one_frame_bw_dpb, 2) +
-			fp_mult(FP_INT(6 - available_vmem_b), bw_increase_b);
-		part_b = fp_mult(part_b, FP(0, 2, 3));
-
-		ddr.dpb_read = part_p + part_b;
-	}
 
 	ddr.dpb_read = fp_div(ddr.dpb_read, dpb_compression_factor);
 	ddr.dpb_write = fp_div(one_frame_bw_dpb, dpb_compression_factor);
@@ -802,103 +728,15 @@
 	qsmmu_bw_overhead_factor = FP(1, 3, 100);
 	ddr.total = fp_mult(ddr.total, qsmmu_bw_overhead_factor);
 
-	/* ................. for VMEM */
-	vmem.vsp_read = FP_ZERO;
-	vmem.vsp_write = FP_ZERO;
-
-	vmem.collocated_read = FP_ZERO;
-	vmem.collocated_write = FP_ZERO;
-
-	vmem.line_buffer_read = line_buffer_bw - ddr.line_buffer_read;
-	vmem.line_buffer_write = vmem.line_buffer_read;
-
-	vmem.original_read = FP_INT(vmem_size) >= original_vmem_requirement ?
-		ddr.original_read : FP_ZERO;
-	vmem.original_write = vmem.original_read;
-
-	vmem.dpb_read = FP_ZERO;
-	if (gop == GOP_IPPP) {
-		fp_t temp = fp_mult(one_frame_bw_dpb,
-			FP_INT(search_window_factor_bw_p * available_vmem_p));
-		temp = fp_div(temp, FP_INT(3));
-
-		vmem.dpb_read = temp;
-	} else if (scenario != SCENARIO_AVERAGE) {
-		fp_t temp = fp_mult(one_frame_bw_dpb, FP_INT(2));
-
-		temp = fp_mult(temp, FP_INT(search_window_factor_bw_b *
-					available_vmem_b));
-		temp = fp_div(temp, FP_INT(6));
-
-		vmem.dpb_read = temp;
-	} else {
-		fp_t part_p, part_b;
-
-		part_p = fp_mult(one_frame_bw_dpb, FP_INT(
-					search_window_factor_bw_p *
-					available_vmem_p));
-		part_p = fp_div(part_p, FP_INT(3 * 3));
-
-		part_b = fp_mult(one_frame_bw_dpb, FP_INT(2 *
-					search_window_factor_bw_b *
-					available_vmem_b));
-		part_b = fp_div(part_b, FP_INT(6));
-		part_b = fp_mult(part_b, FP(0, 2, 3));
-
-		vmem.dpb_read = part_p + part_b;
-	}
-
-	vmem.dpb_write = FP_ZERO;
-	if (gop == GOP_IPPP) {
-		fp_t temp = fp_mult(one_frame_bw_dpb,
-				FP_INT(available_vmem_p));
-		temp = fp_div(temp, FP_INT(3));
-
-		vmem.dpb_write = temp;
-	} else if (scenario != SCENARIO_AVERAGE) {
-		fp_t temp = fp_mult(one_frame_bw_dpb,
-				FP_INT(2 * available_vmem_b));
-		temp = fp_div(temp, FP_INT(6));
-
-		vmem.dpb_write = temp;
-	} else {
-		fp_t part_b, part_p;
-
-		part_b = fp_mult(one_frame_bw_dpb, FP_INT(available_vmem_p));
-		part_b = fp_div(part_b, FP_INT(9));
-
-		part_p = fp_mult(one_frame_bw_dpb, FP_INT(
-					2 * available_vmem_b));
-		part_p = fp_div(part_p, FP_INT(6));
-		part_b = fp_mult(part_b, FP(0, 2, 3));
-
-		vmem.dpb_write = part_p + part_b;
-	}
-
-	vmem.total = vmem.vsp_read + vmem.vsp_write +
-		vmem.collocated_read + vmem.collocated_write +
-		vmem.line_buffer_read + vmem.line_buffer_write +
-		vmem.original_read + vmem.original_write +
-		vmem.dpb_read + vmem.dpb_write;
-
-	/*
-	 * When in low power mode, attempt to force the VMEM clocks a certain
-	 * frequency that DCVS would prefer
-	 */
-	if (width * height >= 3840 * 2160 && low_power)
-		vmem.total = FP_INT(NOMINAL_BW_MBPS);
-
 	if (debug) {
 		struct dump dump[] = {
 		{"ENCODER PARAMETERS", "", DUMP_HEADER_MAGIC},
-		{"scenario", "%d", scenario},
 		{"standard", "%#x", standard},
 		{"width", "%d", width},
 		{"height", "%d", height},
 		{"DPB format", "%#x", dpb_color_format},
 		{"original frame format", "%#x", original_color_format},
 		{"fps", "%d", fps},
-		{"target bitrate", "%d", bitrate_scenario},
 		{"DPB compression enable", "%d", dpb_compression_enabled},
 		{"original compression enable", "%d",
 			original_compression_enabled},
@@ -910,7 +748,6 @@
 			original_compression_factor},
 		{"rotation", "%d", rotation},
 		{"cropping or scaling", "%d", cropping_or_scaling},
-		{"VMEM size (KB)", "%d", vmem_size},
 
 		{"DERIVED PARAMETERS", "", DUMP_HEADER_MAGIC},
 		{"LCU size", "%d", lcu_size},
@@ -923,17 +760,13 @@
 		{"search window factor (B)", "%d", search_window_factor_b},
 		{"search window factor BW (B)", "%d",
 			search_window_factor_bw_b},
-		{"VMEM size (B)", "%d", vmem_size_b},
 		{"bw increase (MB/s) (B)", DUMP_FP_FMT, bw_increase_b},
-		{"available VMEM (B)", "%d", available_vmem_b},
 		{"search window size vertical (P)", "%d",
 			search_window_size_vertical_p},
 		{"search window factor (P)", "%d", search_window_factor_p},
 		{"search window factor BW (P)", "%d",
 			search_window_factor_bw_p},
-		{"VMEM size (P)", "%d", vmem_size_p},
 		{"bw increase (MB/s) (P)", DUMP_FP_FMT, bw_increase_p},
-		{"available VMEM (P)", "%d", available_vmem_p},
 		{"chroma/luma factor DPB", DUMP_FP_FMT,
 			chroma_luma_factor_dpb},
 		{"one frame BW DPB (MB/s)", DUMP_FP_FMT, one_frame_bw_dpb},
@@ -946,8 +779,6 @@
 		{"line buffer size (KB)", DUMP_FP_FMT, line_buffer_size},
 		{"line buffer BW (MB/s)", DUMP_FP_FMT, line_buffer_bw},
 		{"collocated MVs per LCU", "%d", collocated_mv_per_lcu},
-		{"original VMEM requirement (KB)", DUMP_FP_FMT,
-			original_vmem_requirement},
 
 		{"INTERMEDIATE B/W DDR", "", DUMP_HEADER_MAGIC},
 		{"VSP read", DUMP_FP_FMT, ddr.vsp_read},
@@ -960,18 +791,6 @@
 		{"original read", DUMP_FP_FMT, ddr.original_write},
 		{"DPB read", DUMP_FP_FMT, ddr.dpb_read},
 		{"DPB write", DUMP_FP_FMT, ddr.dpb_write},
-
-		{"INTERMEDIATE B/W VMEM", "", DUMP_HEADER_MAGIC},
-		{"VSP read", DUMP_FP_FMT, vmem.vsp_read},
-		{"VSP read", DUMP_FP_FMT, vmem.vsp_write},
-		{"collocated read", DUMP_FP_FMT, vmem.collocated_read},
-		{"collocated read", DUMP_FP_FMT, vmem.collocated_write},
-		{"line buffer read", DUMP_FP_FMT, vmem.line_buffer_read},
-		{"line buffer read", DUMP_FP_FMT, vmem.line_buffer_write},
-		{"original read", DUMP_FP_FMT, vmem.original_read},
-		{"original read", DUMP_FP_FMT, vmem.original_write},
-		{"DPB read", DUMP_FP_FMT, vmem.dpb_read},
-		{"DPB write", DUMP_FP_FMT, vmem.dpb_write},
 		};
 		__dump(dump, ARRAY_SIZE(dump));
 	}
@@ -981,7 +800,7 @@
 		ret = kbps(fp_round(ddr.total));
 		break;
 	case GOVERNOR_LLCC:
-		ret = kbps(fp_round(vmem.total));
+		dprintk(VIDC_PROF, "LLCC Voting not supported yet\n");
 		break;
 	default:
 		dprintk(VIDC_ERR, "%s - Unknown governor\n", __func__);
@@ -1023,18 +842,21 @@
 	dev->profile->get_dev_status(dev->dev.parent, &stats);
 	vidc_data = (struct msm_vidc_gov_data *)stats.private_data;
 
+	if (!vidc_data || !vidc_data->data_count)
+		goto exit;
+
 	for (c = 0; c < vidc_data->data_count; ++c) {
 		if (vidc_data->data->power_mode == VIDC_POWER_TURBO) {
-			*freq = INT_MAX;
 			goto exit;
 		}
 	}
 
+	ab_kbps = 0;
 	for (c = 0; c < vidc_data->data_count; ++c)
 		ab_kbps += __calculate(&vidc_data->data[c], gov->mode);
 
-	*freq = clamp(ab_kbps, dev->min_freq, dev->max_freq ?: UINT_MAX);
 exit:
+	*freq = clamp(ab_kbps, dev->min_freq, dev->max_freq ?: UINT_MAX);
 	return 0;
 }
 
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 8d54e20..40c306d 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -10,9 +10,6 @@
  * GNU General Public License for more details.
  *
  */
-#include <linux/errno.h>
-#include <linux/log2.h>
-#include <linux/hash.h>
 #include "hfi_packetization.h"
 #include "msm_vidc_debug.h"
 
@@ -868,8 +865,6 @@
 		output_frame->device_addr, output_frame->timestamp,
 		output_frame->alloc_len, output_frame->filled_len,
 		output_frame->offset);
-	dprintk(VIDC_DBG, "### Q OUTPUT BUFFER ###: %d, %d, %d\n",
-			pkt->alloc_len, pkt->filled_len, pkt->offset);
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index f678f56..c2a93a96 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -1074,15 +1074,6 @@
 	}
 
 	while (req_bytes) {
-		if (hfi_buf_req->buffer_size &&
-			hfi_buf_req->buffer_count_min > hfi_buf_req->
-			buffer_count_actual)
-			dprintk(VIDC_WARN,
-				"Bad buffer requirements for %#x: min %d, actual %d\n",
-				hfi_buf_req->buffer_type,
-				hfi_buf_req->buffer_count_min,
-				hfi_buf_req->buffer_count_actual);
-
 		dprintk(VIDC_DBG, "got buffer requirements for: %d\n",
 					hfi_buf_req->buffer_type);
 		switch (hfi_buf_req->buffer_type) {
@@ -1320,12 +1311,17 @@
 	data_done.status = hfi_map_err_status(pkt->error_type);
 	data_done.size = sizeof(struct msm_vidc_cb_data_done);
 	data_done.clnt_data = pkt->input_tag;
+	data_done.input_done.recon_stats.buffer_index =
+		pkt->ubwc_cr_stats.frame_index;
+	memcpy(&data_done.input_done.recon_stats.ubwc_stats_info,
+		&pkt->ubwc_cr_stats.ubwc_stats_info,
+		sizeof(data_done.input_done.recon_stats.ubwc_stats_info));
+	data_done.input_done.recon_stats.complexity_number =
+		pkt->ubwc_cr_stats.complexity_number;
 	data_done.input_done.offset = pkt->offset;
 	data_done.input_done.filled_len = pkt->filled_len;
-	data_done.input_done.packet_buffer =
-		(ion_phys_addr_t)pkt->packet_buffer;
-	data_done.input_done.extra_data_buffer =
-		(ion_phys_addr_t)pkt->extra_data_buffer;
+	data_done.input_done.packet_buffer = pkt->packet_buffer;
+	data_done.input_done.extra_data_buffer = pkt->extra_data_buffer;
 	data_done.input_done.status =
 		hfi_map_err_status(pkt->error_type);
 	hfi_picture_type = (struct hfi_picture_type *)&pkt->rgData[0];
@@ -1406,10 +1402,9 @@
 		data_done.output_done.alloc_len1 = pkt->alloc_len;
 		data_done.output_done.filled_len1 = pkt->filled_len;
 		data_done.output_done.picture_type = pkt->picture_type;
-		data_done.output_done.packet_buffer1 =
-			(ion_phys_addr_t)pkt->packet_buffer;
+		data_done.output_done.packet_buffer1 = pkt->packet_buffer;
 		data_done.output_done.extra_data_buffer =
-			(ion_phys_addr_t)pkt->extra_data_buffer;
+			pkt->extra_data_buffer;
 		data_done.output_done.buffer_type = HAL_BUFFER_OUTPUT;
 	} else /* if (is_decoder) */ {
 		struct hfi_msg_session_fbd_uncompressed_plane0_packet *pkt =
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index b116622..9b23376 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -30,7 +30,7 @@
 	enum session_type session_type;
 };
 
-static int get_device_address(struct smem_client *smem_client,
+static int msm_ion_get_device_address(struct smem_client *smem_client,
 		struct ion_handle *hndl, unsigned long align,
 		ion_phys_addr_t *iova, unsigned long *buffer_size,
 		unsigned long flags, enum hal_buffer buffer_type,
@@ -122,12 +122,6 @@
 			goto mem_map_sg_failed;
 		}
 		if (table->sgl) {
-			dprintk(VIDC_DBG,
-				"%s: CB : %s, DMA buf: %pK, device: %pK, attach: %pK, table: %pK, table sgl: %pK, rc: %d, dma_address: %pa\n",
-				__func__, cb->name, buf, cb->dev, attach,
-				table, table->sgl, rc,
-				&table->sgl->dma_address);
-
 			*iova = table->sgl->dma_address;
 			*buffer_size = table->sgl->dma_length;
 		} else {
@@ -153,7 +147,6 @@
 		}
 	}
 
-	dprintk(VIDC_DBG, "mapped ion handle %pK to %pa\n", hndl, iova);
 	return 0;
 mem_map_sg_failed:
 	dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
@@ -166,38 +159,26 @@
 	return rc;
 }
 
-static void put_device_address(struct smem_client *smem_client,
+static int msm_ion_put_device_address(struct smem_client *smem_client,
 	struct ion_handle *hndl, u32 flags,
 	struct dma_mapping_info *mapping_info,
 	enum hal_buffer buffer_type)
 {
-	struct ion_client *clnt = NULL;
+	int rc = 0;
 
 	if (!hndl || !smem_client || !mapping_info) {
 		dprintk(VIDC_WARN, "Invalid params: %pK, %pK\n",
 				smem_client, hndl);
-		return;
+		return -EINVAL;
 	}
 
 	if (!mapping_info->dev || !mapping_info->table ||
 		!mapping_info->buf || !mapping_info->attach) {
 		dprintk(VIDC_WARN, "Invalid params:\n");
-		return;
+		return -EINVAL;
 	}
 
-	clnt = smem_client->clnt;
-	if (!clnt) {
-		dprintk(VIDC_WARN, "Invalid client\n");
-		return;
-	}
 	if (is_iommu_present(smem_client->res)) {
-		dprintk(VIDC_DBG,
-			"Calling dma_unmap_sg - device: %pK, address: %pa, buf: %pK, table: %pK, attach: %pK\n",
-			mapping_info->dev,
-			&mapping_info->table->sgl->dma_address,
-			mapping_info->buf, mapping_info->table,
-			mapping_info->attach);
-
 		trace_msm_smem_buffer_iommu_op_start("UNMAP", 0, 0, 0, 0, 0);
 		msm_dma_unmap_sg(mapping_info->dev, mapping_info->table->sgl,
 			mapping_info->table->nents, DMA_BIDIRECTIONAL,
@@ -207,68 +188,257 @@
 		dma_buf_detach(mapping_info->buf, mapping_info->attach);
 		dma_buf_put(mapping_info->buf);
 		trace_msm_smem_buffer_iommu_op_end("UNMAP", 0, 0, 0, 0, 0);
+
+		mapping_info->dev = NULL;
+		mapping_info->mapping = NULL;
+		mapping_info->table = NULL;
+		mapping_info->attach = NULL;
+		mapping_info->buf = NULL;
 	}
+
+	return rc;
 }
 
-static int ion_user_to_kernel(struct smem_client *client, int fd, u32 size,
-		struct msm_smem *mem, enum hal_buffer buffer_type)
+static void *msm_ion_get_dma_buf(int fd)
 {
-	struct ion_handle *hndl = NULL;
-	ion_phys_addr_t iova = 0;
-	unsigned long buffer_size = size;
+	struct dma_buf *dma_buf;
+
+	dma_buf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(dma_buf)) {
+		dprintk(VIDC_ERR, "Failed to get dma_buf for %d, error %ld\n",
+				fd, PTR_ERR(dma_buf));
+		dma_buf = NULL;
+	}
+
+	return dma_buf;
+}
+
+void *msm_smem_get_dma_buf(int fd)
+{
+	return (void *)msm_ion_get_dma_buf(fd);
+}
+
+static void msm_ion_put_dma_buf(struct dma_buf *dma_buf)
+{
+	if (!dma_buf) {
+		dprintk(VIDC_ERR, "%s: Invalid params: %pK\n",
+				__func__, dma_buf);
+		return;
+	}
+
+	dma_buf_put(dma_buf);
+}
+
+void msm_smem_put_dma_buf(void *dma_buf)
+{
+	return msm_ion_put_dma_buf((struct dma_buf *)dma_buf);
+}
+
+static struct ion_handle *msm_ion_get_handle(void *ion_client,
+		struct dma_buf *dma_buf)
+{
+	struct ion_handle *handle;
+
+	if (!ion_client || !dma_buf) {
+		dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+				__func__, ion_client, dma_buf);
+		return NULL;
+	}
+
+	handle = ion_import_dma_buf(ion_client, dma_buf);
+	if (IS_ERR_OR_NULL(handle)) {
+		dprintk(VIDC_ERR, "Failed to get ion_handle: %pK, %pK, %ld\n",
+				ion_client, dma_buf, PTR_ERR(handle));
+		handle = NULL;
+	}
+
+	return handle;
+}
+
+void *msm_smem_get_handle(struct smem_client *client, void *dma_buf)
+{
+	if (!client)
+		return NULL;
+
+	return (void *)msm_ion_get_handle(client->clnt,
+			(struct dma_buf *)dma_buf);
+}
+
+static void msm_ion_put_handle(struct ion_client *ion_client,
+		struct ion_handle *ion_handle)
+{
+	if (!ion_client || !ion_handle) {
+		dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+				__func__, ion_client, ion_handle);
+		return;
+	}
+
+	ion_free(ion_client, ion_handle);
+}
+
+void msm_smem_put_handle(struct smem_client *client, void *handle)
+{
+	if (!client) {
+		dprintk(VIDC_ERR, "%s: Invalid params %pK %pK\n",
+				__func__, client, handle);
+		return;
+	}
+	return msm_ion_put_handle(client->clnt, (struct ion_handle *)handle);
+}
+
+static int msm_ion_map_dma_buf(struct msm_vidc_inst *inst,
+		struct msm_smem *smem)
+{
 	int rc = 0;
+	ion_phys_addr_t iova = 0;
+	u32 temp = 0;
+	unsigned long buffer_size = 0;
 	unsigned long align = SZ_4K;
 	unsigned long ion_flags = 0;
+	struct ion_client *ion_client;
+	struct ion_handle *ion_handle;
+	struct dma_buf *dma_buf;
 
-#ifdef CONFIG_ION
-	hndl = ion_import_dma_buf_fd(client->clnt, fd);
-#endif
-	dprintk(VIDC_DBG, "%s ion handle: %pK\n", __func__, hndl);
-	if (IS_ERR_OR_NULL(hndl)) {
-		dprintk(VIDC_ERR, "Failed to get handle: %pK, %d, %d, %pK\n",
-				client, fd, size, hndl);
-		rc = -ENOMEM;
-		goto fail_import_fd;
+	if (!inst || !inst->mem_client || !inst->mem_client->clnt) {
+		dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+				__func__, inst, smem);
+		return -EINVAL;
 	}
 
-	mem->kvaddr = NULL;
-	rc = ion_handle_get_flags(client->clnt, hndl, &ion_flags);
+	ion_client = inst->mem_client->clnt;
+	dma_buf = msm_ion_get_dma_buf(smem->fd);
+	if (!dma_buf)
+		return -EINVAL;
+	ion_handle = msm_ion_get_handle(ion_client, dma_buf);
+	if (!ion_handle)
+		return -EINVAL;
+
+	smem->dma_buf = dma_buf;
+	smem->handle = ion_handle;
+	rc = ion_handle_get_flags(ion_client, ion_handle, &ion_flags);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to get ion flags: %d\n", rc);
-		goto fail_device_address;
+		goto exit;
 	}
 
-	mem->buffer_type = buffer_type;
 	if (ion_flags & ION_FLAG_CACHED)
-		mem->flags |= SMEM_CACHED;
+		smem->flags |= SMEM_CACHED;
 
 	if (ion_flags & ION_FLAG_SECURE)
-		mem->flags |= SMEM_SECURE;
+		smem->flags |= SMEM_SECURE;
 
-	rc = get_device_address(client, hndl, align, &iova, &buffer_size,
-				mem->flags, buffer_type, &mem->mapping_info);
+	rc = msm_ion_get_device_address(inst->mem_client, ion_handle,
+			align, &iova, &buffer_size, smem->flags,
+			smem->buffer_type, &smem->mapping_info);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to get device address: %d\n", rc);
-		goto fail_device_address;
+		goto exit;
+	}
+	temp = (u32)iova;
+	if ((ion_phys_addr_t)temp != iova) {
+		dprintk(VIDC_ERR, "iova(%pa) truncated to %#x", &iova, temp);
+		rc = -EINVAL;
+		goto exit;
 	}
 
-	mem->mem_type = client->mem_type;
-	mem->smem_priv = hndl;
-	mem->device_addr = iova;
-	mem->size = buffer_size;
-	if ((u32)mem->device_addr != iova) {
-		dprintk(VIDC_ERR, "iova(%pa) truncated to %#x",
-			&iova, (u32)mem->device_addr);
-		goto fail_device_address;
-	}
-	dprintk(VIDC_DBG,
-		"%s: ion_handle = %pK, fd = %d, device_addr = %pa, size = %zx, kvaddr = %pK, buffer_type = %d, flags = %#lx\n",
-		__func__, mem->smem_priv, fd, &mem->device_addr, mem->size,
-		mem->kvaddr, mem->buffer_type, mem->flags);
+	smem->device_addr = (u32)iova + smem->offset;
+
+exit:
 	return rc;
-fail_device_address:
-	ion_free(client->clnt, hndl);
-fail_import_fd:
+}
+
+int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem)
+{
+	int rc = 0;
+
+	if (!inst || !smem) {
+		dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+				__func__, inst, smem);
+		return -EINVAL;
+	}
+
+	if (smem->refcount) {
+		smem->refcount++;
+		return rc;
+	}
+
+	switch (inst->mem_client->mem_type) {
+	case SMEM_ION:
+		rc = msm_ion_map_dma_buf(inst, smem);
+		break;
+	default:
+		dprintk(VIDC_ERR, "%s: Unknown mem_type %d\n",
+			__func__, inst->mem_client->mem_type);
+		rc = -EINVAL;
+		break;
+	}
+	if (!rc)
+		smem->refcount++;
+
+	return rc;
+}
+
+static int msm_ion_unmap_dma_buf(struct msm_vidc_inst *inst,
+		struct msm_smem *smem)
+{
+	int rc = 0;
+
+	if (!inst || !inst->mem_client || !smem) {
+		dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+				__func__, inst, smem);
+		return -EINVAL;
+	}
+
+	rc = msm_ion_put_device_address(inst->mem_client, smem->handle,
+			smem->flags, &smem->mapping_info, smem->buffer_type);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to put device address: %d\n", rc);
+		goto exit;
+	}
+
+	msm_ion_put_handle(inst->mem_client->clnt, smem->handle);
+	msm_ion_put_dma_buf(smem->dma_buf);
+
+	smem->device_addr = 0x0;
+	smem->handle = NULL;
+	smem->dma_buf = NULL;
+
+exit:
+	return rc;
+}
+
+int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem)
+{
+	int rc = 0;
+
+	if (!inst || !smem) {
+		dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+				__func__, inst, smem);
+		return -EINVAL;
+	}
+
+	if (smem->refcount) {
+		smem->refcount--;
+	} else {
+		dprintk(VIDC_WARN,
+			"unmap called while refcount is zero already\n");
+		return -EINVAL;
+	}
+
+	if (smem->refcount)
+		return rc;
+
+	switch (inst->mem_client->mem_type) {
+	case SMEM_ION:
+		rc = msm_ion_unmap_dma_buf(inst, smem);
+		break;
+	default:
+		dprintk(VIDC_ERR, "%s: Unknown mem_type %d\n",
+			__func__, inst->mem_client->mem_type);
+		rc = -EINVAL;
+		break;
+	}
+
 	return rc;
 }
 
@@ -321,6 +491,12 @@
 	int rc = 0;
 	int ion_flags = 0;
 
+	if (!client || !mem) {
+		dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+				__func__, client, mem);
+		return -EINVAL;
+	}
+
 	align = ALIGN(align, SZ_4K);
 	size = ALIGN(size, SZ_4K);
 
@@ -366,10 +542,13 @@
 	}
 	trace_msm_smem_buffer_ion_op_end("ALLOC", (u32)buffer_type,
 		heap_mask, size, align, flags, map_kernel);
-	mem->mem_type = client->mem_type;
-	mem->smem_priv = hndl;
+
+	mem->handle = hndl;
 	mem->flags = flags;
 	mem->buffer_type = buffer_type;
+	mem->offset = 0;
+	mem->size = size;
+
 	if (map_kernel) {
 		mem->kvaddr = ion_map_kernel(client->clnt, hndl);
 		if (IS_ERR_OR_NULL(mem->kvaddr)) {
@@ -382,24 +561,23 @@
 		mem->kvaddr = NULL;
 	}
 
-	rc = get_device_address(client, hndl, align, &iova, &buffer_size,
-				flags, buffer_type, &mem->mapping_info);
+	rc = msm_ion_get_device_address(client, hndl, align, &iova,
+			&buffer_size, flags, buffer_type, &mem->mapping_info);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to get device address: %d\n",
 			rc);
 		goto fail_device_address;
 	}
-	mem->device_addr = iova;
-	if ((u32)mem->device_addr != iova) {
+	mem->device_addr = (u32)iova;
+	if ((ion_phys_addr_t)mem->device_addr != iova) {
 		dprintk(VIDC_ERR, "iova(%pa) truncated to %#x",
-			&iova, (u32)mem->device_addr);
+			&iova, mem->device_addr);
 		goto fail_device_address;
 	}
-	mem->size = size;
 	dprintk(VIDC_DBG,
-		"%s: ion_handle = %pK, device_addr = %pa, size = %#zx, kvaddr = %pK, buffer_type = %#x, flags = %#lx\n",
-		__func__, mem->smem_priv, &mem->device_addr,
-		mem->size, mem->kvaddr, mem->buffer_type, mem->flags);
+		"%s: ion_handle = %pK, device_addr = %x, size = %d, kvaddr = %pK, buffer_type = %#x, flags = %#lx\n",
+		__func__, mem->handle, mem->device_addr, mem->size,
+		mem->kvaddr, mem->buffer_type, mem->flags);
 	return rc;
 fail_device_address:
 	if (mem->kvaddr)
@@ -410,30 +588,40 @@
 	return rc;
 }
 
-static void free_ion_mem(struct smem_client *client, struct msm_smem *mem)
+static int free_ion_mem(struct smem_client *client, struct msm_smem *mem)
 {
+	int rc = 0;
+
+	if (!client || !mem) {
+		dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+				__func__, client, mem);
+		return -EINVAL;
+	}
+
 	dprintk(VIDC_DBG,
-		"%s: ion_handle = %pK, device_addr = %pa, size = %#zx, kvaddr = %pK, buffer_type = %#x\n",
-		__func__, mem->smem_priv, &mem->device_addr,
-		mem->size, mem->kvaddr, mem->buffer_type);
+		"%s: ion_handle = %pK, device_addr = %x, size = %d, kvaddr = %pK, buffer_type = %#x\n",
+		__func__, mem->handle, mem->device_addr, mem->size,
+		mem->kvaddr, mem->buffer_type);
 
 	if (mem->device_addr)
-		put_device_address(client, mem->smem_priv, mem->flags,
+		msm_ion_put_device_address(client, mem->handle, mem->flags,
 			&mem->mapping_info, mem->buffer_type);
 
 	if (mem->kvaddr)
-		ion_unmap_kernel(client->clnt, mem->smem_priv);
-	if (mem->smem_priv) {
+		ion_unmap_kernel(client->clnt, mem->handle);
+
+	if (mem->handle) {
 		trace_msm_smem_buffer_ion_op_start("FREE",
 				(u32)mem->buffer_type, -1, mem->size, -1,
 				mem->flags, -1);
-		dprintk(VIDC_DBG,
-			"%s: Freeing handle %pK, client: %pK\n",
-			__func__, mem->smem_priv, client->clnt);
-		ion_free(client->clnt, mem->smem_priv);
+		ion_free(client->clnt, mem->handle);
 		trace_msm_smem_buffer_ion_op_end("FREE", (u32)mem->buffer_type,
 			-1, mem->size, -1, mem->flags, -1);
+	} else {
+		dprintk(VIDC_ERR, "%s: invalid ion_handle\n", __func__);
 	}
+
+	return rc;
 }
 
 static void *ion_new_client(void)
@@ -443,135 +631,105 @@
 	client = msm_ion_client_create("video_client");
 	if (!client)
 		dprintk(VIDC_ERR, "Failed to create smem client\n");
+
+	dprintk(VIDC_DBG, "%s: client %pK\n", __func__, client);
+
 	return client;
 };
 
 static void ion_delete_client(struct smem_client *client)
 {
+	if (!client) {
+		dprintk(VIDC_ERR, "%s: Invalid params: %pK\n",
+				__func__, client);
+		return;
+	}
+
+	dprintk(VIDC_DBG, "%s: client %pK\n", __func__, client->clnt);
 	ion_client_destroy(client->clnt);
+	client->clnt = NULL;
 }
 
-struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 size,
-		enum hal_buffer buffer_type)
+static int msm_ion_cache_operations(void *ion_client, void *ion_handle,
+		unsigned long offset, unsigned long size,
+		enum smem_cache_ops cache_op)
 {
-	struct smem_client *client = clt;
 	int rc = 0;
-	struct msm_smem *mem;
-
-	if (fd < 0) {
-		dprintk(VIDC_ERR, "Invalid fd: %d\n", fd);
-		return NULL;
-	}
-	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
-	if (!mem) {
-		dprintk(VIDC_ERR, "Failed to allocate shared mem\n");
-		return NULL;
-	}
-	switch (client->mem_type) {
-	case SMEM_ION:
-		rc = ion_user_to_kernel(clt, fd, size, mem, buffer_type);
-		break;
-	default:
-		dprintk(VIDC_ERR, "Mem type not supported\n");
-		rc = -EINVAL;
-		break;
-	}
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to allocate shared memory\n");
-		kfree(mem);
-		mem = NULL;
-	}
-	return mem;
-}
-
-bool msm_smem_compare_buffers(void *clt, int fd, void *priv)
-{
-	struct smem_client *client = clt;
-	struct ion_handle *handle = NULL;
-	bool ret = false;
-
-	if (!clt || !priv) {
-		dprintk(VIDC_ERR, "Invalid params: %pK, %pK\n",
-			clt, priv);
-		return false;
-	}
-#ifdef CONFIG_ION
-	handle = ion_import_dma_buf_fd(client->clnt, fd);
-#endif
-	ret = handle == priv;
-	(!IS_ERR_OR_NULL(handle)) ? ion_free(client->clnt, handle) : 0;
-	return ret;
-}
-
-static int ion_cache_operations(struct smem_client *client,
-	struct msm_smem *mem, enum smem_cache_ops cache_op)
-{
-	unsigned long ionflag = 0;
-	int rc = 0;
+	unsigned long flags = 0;
 	int msm_cache_ops = 0;
 
-	if (!mem || !client) {
-		dprintk(VIDC_ERR, "Invalid params: %pK, %pK\n",
-			mem, client);
+	if (!ion_client || !ion_handle) {
+		dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+			__func__, ion_client, ion_handle);
 		return -EINVAL;
 	}
-	rc = ion_handle_get_flags(client->clnt,	mem->smem_priv,
-		&ionflag);
+
+	rc = ion_handle_get_flags(ion_client, ion_handle, &flags);
 	if (rc) {
 		dprintk(VIDC_ERR,
-			"ion_handle_get_flags failed: %d\n", rc);
-		goto cache_op_failed;
+			"%s: ion_handle_get_flags failed: %d, ion client %pK, ion handle %pK\n",
+			__func__, rc, ion_client, ion_handle);
+		goto exit;
 	}
-	if (ION_IS_CACHED(ionflag)) {
-		switch (cache_op) {
-		case SMEM_CACHE_CLEAN:
-			msm_cache_ops = ION_IOC_CLEAN_CACHES;
-			break;
-		case SMEM_CACHE_INVALIDATE:
-			msm_cache_ops = ION_IOC_INV_CACHES;
-			break;
-		case SMEM_CACHE_CLEAN_INVALIDATE:
-			msm_cache_ops = ION_IOC_CLEAN_INV_CACHES;
-			break;
-		default:
-			dprintk(VIDC_ERR, "cache operation not supported\n");
-			rc = -EINVAL;
-			goto cache_op_failed;
-		}
-		rc = msm_ion_do_cache_op(client->clnt,
-			(struct ion_handle *)mem->smem_priv,
-			0, (unsigned long)mem->size,
-			msm_cache_ops);
-		if (rc) {
-			dprintk(VIDC_ERR,
-					"cache operation failed %d\n", rc);
-			goto cache_op_failed;
-		}
+
+	if (!ION_IS_CACHED(flags))
+		goto exit;
+
+	switch (cache_op) {
+	case SMEM_CACHE_CLEAN:
+		msm_cache_ops = ION_IOC_CLEAN_CACHES;
+		break;
+	case SMEM_CACHE_INVALIDATE:
+		msm_cache_ops = ION_IOC_INV_CACHES;
+		break;
+	case SMEM_CACHE_CLEAN_INVALIDATE:
+		msm_cache_ops = ION_IOC_CLEAN_INV_CACHES;
+		break;
+	default:
+		dprintk(VIDC_ERR, "%s: cache (%d) operation not supported\n",
+			__func__, cache_op);
+		rc = -EINVAL;
+		goto exit;
 	}
-cache_op_failed:
+
+	rc = msm_ion_do_cache_offset_op(ion_client, ion_handle, NULL,
+			offset, size, msm_cache_ops);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"%s: cache operation failed %d, ion client %pK, ion handle %pK, offset %lu, size %lu, msm_cache_ops %u\n",
+			__func__, rc, ion_client, ion_handle, offset,
+			size, msm_cache_ops);
+		goto exit;
+	}
+
+exit:
 	return rc;
 }
 
-int msm_smem_cache_operations(void *clt, struct msm_smem *mem,
+int msm_smem_cache_operations(struct smem_client *client,
+		void *handle, unsigned long offset, unsigned long size,
 		enum smem_cache_ops cache_op)
 {
-	struct smem_client *client = clt;
 	int rc = 0;
 
-	if (!client) {
-		dprintk(VIDC_ERR, "Invalid params: %pK\n",
-			client);
+	if (!client || !handle) {
+		dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n",
+			__func__, client, handle);
 		return -EINVAL;
 	}
+
 	switch (client->mem_type) {
 	case SMEM_ION:
-		rc = ion_cache_operations(client, mem, cache_op);
+		rc = msm_ion_cache_operations(client->clnt, handle,
+				offset, size, cache_op);
 		if (rc)
 			dprintk(VIDC_ERR,
-			"Failed cache operations: %d\n", rc);
+			"%s: Failed cache operations: %d\n", __func__, rc);
 		break;
 	default:
-		dprintk(VIDC_ERR, "Mem type not supported\n");
+		dprintk(VIDC_ERR, "%s: Mem type (%d) not supported\n",
+			__func__, client->mem_type);
+		rc = -EINVAL;
 		break;
 	}
 	return rc;
@@ -607,32 +765,22 @@
 	return client;
 }
 
-struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags,
-		enum hal_buffer buffer_type, int map_kernel)
+int msm_smem_alloc(struct smem_client *client, size_t size,
+		u32 align, u32 flags, enum hal_buffer buffer_type,
+		int map_kernel, struct msm_smem *smem)
 {
-	struct smem_client *client;
 	int rc = 0;
-	struct msm_smem *mem;
 
-	client = clt;
-	if (!client) {
-		dprintk(VIDC_ERR, "Invalid  client passed\n");
-		return NULL;
+	if (!client || !smem || !size) {
+		dprintk(VIDC_ERR, "%s: Invalid params %pK %pK %d\n",
+				__func__, client, smem, (u32)size);
+		return -EINVAL;
 	}
-	if (!size) {
-		dprintk(VIDC_ERR, "No need to allocate memory of size: %zx\n",
-			size);
-		return NULL;
-	}
-	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
-	if (!mem) {
-		dprintk(VIDC_ERR, "Failed to allocate shared mem\n");
-		return NULL;
-	}
+
 	switch (client->mem_type) {
 	case SMEM_ION:
 		rc = alloc_ion_mem(client, size, align, flags, buffer_type,
-					mem, map_kernel);
+					smem, map_kernel);
 		break;
 	default:
 		dprintk(VIDC_ERR, "Mem type not supported\n");
@@ -640,30 +788,34 @@
 		break;
 	}
 	if (rc) {
-		dprintk(VIDC_ERR, "Failed to allocate shared memory\n");
-		kfree(mem);
-		mem = NULL;
+		dprintk(VIDC_ERR, "Failed to allocate memory\n");
 	}
-	return mem;
+
+	return rc;
 }
 
-void msm_smem_free(void *clt, struct msm_smem *mem)
+int msm_smem_free(void *clt, struct msm_smem *smem)
 {
+	int rc = 0;
 	struct smem_client *client = clt;
 
-	if (!client || !mem) {
+	if (!client || !smem) {
 		dprintk(VIDC_ERR, "Invalid  client/handle passed\n");
-		return;
+		return -EINVAL;
 	}
 	switch (client->mem_type) {
 	case SMEM_ION:
-		free_ion_mem(client, mem);
+		rc = free_ion_mem(client, smem);
 		break;
 	default:
 		dprintk(VIDC_ERR, "Mem type not supported\n");
+		rc = -EINVAL;
 		break;
 	}
-	kfree(mem);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to free memory\n");
+
+	return rc;
 };
 
 void msm_smem_delete_client(void *clt)
@@ -692,7 +844,7 @@
 	struct context_bank_info *cb = NULL, *match = NULL;
 
 	if (!clt) {
-		dprintk(VIDC_ERR, "%s - invalid params\n", __func__);
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
 		return NULL;
 	}
 
@@ -713,12 +865,13 @@
 		if (cb->is_secure == is_secure &&
 				cb->buffer_type & buffer_type) {
 			match = cb;
-			dprintk(VIDC_DBG,
-				"context bank found for CB : %s, device: %pK mapping: %pK\n",
-				match->name, match->dev, match->mapping);
 			break;
 		}
 	}
+	if (!match)
+		dprintk(VIDC_ERR,
+			"%s: cb not found for buffer_type %x, is_secure %d\n",
+			__func__, buffer_type, is_secure);
 
 	return match;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 5c34f28..8c63469 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -36,7 +36,6 @@
 
 struct msm_vidc_drv *vidc_driver;
 
-uint32_t msm_vidc_pwr_collapse_delay = 3000;
 
 static inline struct msm_vidc_inst *get_vidc_inst(struct file *filp, void *fh)
 {
@@ -315,6 +314,7 @@
 	core->resources.pdev = pdev;
 	if (pdev->dev.of_node) {
 		/* Target supports DT, parse from it */
+		rc = read_platform_resources_from_drv_data(core);
 		rc = read_platform_resources_from_dt(&core->resources);
 	} else {
 		dprintk(VIDC_ERR, "pdev node is NULL\n");
@@ -375,13 +375,18 @@
 {
 	unsigned long val = 0;
 	int rc = 0;
+	struct msm_vidc_core *core = NULL;
 
 	rc = kstrtoul(buf, 0, &val);
 	if (rc)
 		return rc;
 	else if (!val)
 		return -EINVAL;
-	msm_vidc_pwr_collapse_delay = val;
+
+	core = get_vidc_core(MSM_VIDC_CORE_VENUS);
+	if (!core)
+		return -EINVAL;
+	core->resources.msm_vidc_pwr_collapse_delay = val;
 	return count;
 }
 
@@ -389,7 +394,14 @@
 		struct device_attribute *attr,
 		char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%u\n", msm_vidc_pwr_collapse_delay);
+	struct msm_vidc_core *core = NULL;
+
+	core = get_vidc_core(MSM_VIDC_CORE_VENUS);
+	if (!core)
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		core->resources.msm_vidc_pwr_collapse_delay);
 }
 
 static DEVICE_ATTR(pwr_collapse_delay, 0644, show_pwr_collapse_delay,
@@ -481,6 +493,7 @@
 		goto err_no_mem;
 	}
 
+	core->platform_data = vidc_get_drv_data(&pdev->dev);
 	dev_set_drvdata(&pdev->dev, core);
 	rc = msm_vidc_initialize_core(pdev, core);
 	if (rc) {
@@ -710,6 +723,7 @@
 	v4l2_device_unregister(&core->v4l2_dev);
 
 	msm_vidc_free_platform_resources(&core->resources);
+	kfree(core->vote_data);
 	sysfs_remove_group(&pdev->dev.kobj, &msm_vidc_core_attr_group);
 	dev_set_drvdata(&pdev->dev, NULL);
 	mutex_destroy(&core->lock);
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 053d748..554e89a 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -20,7 +20,10 @@
 #include "msm_vidc_clocks.h"
 
 #define MSM_VDEC_DVC_NAME "msm_vdec_8974"
+#define MIN_NUM_THUMBNAIL_MODE_OUTPUT_BUFFERS MIN_NUM_OUTPUT_BUFFERS
 #define MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS MIN_NUM_CAPTURE_BUFFERS
+#define MIN_NUM_DEC_OUTPUT_BUFFERS 4
+#define MIN_NUM_DEC_CAPTURE_BUFFERS 4
 #define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8010
 #define MB_SIZE_IN_PIXEL (16 * 16)
 #define OPERATING_FRAME_RATE_STEP (1 << 16)
@@ -513,6 +516,7 @@
 				msm_comm_get_hal_output_buffer(inst),
 				f->fmt.pix_mp.pixelformat);
 
+		inst->clk_data.opb_fourcc = f->fmt.pix_mp.pixelformat;
 		if (msm_comm_get_stream_output_mode(inst) ==
 			HAL_VIDEO_DECODER_SECONDARY) {
 			frame_sz.buffer_type = HAL_BUFFER_OUTPUT2;
@@ -544,7 +548,6 @@
 				f->fmt.pix_mp.plane_fmt[i].sizeimage;
 		}
 
-		rc = msm_comm_try_get_bufreqs(inst);
 	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 
 		fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
@@ -598,8 +601,6 @@
 			inst->bufq[OUTPUT_PORT].plane_sizes[i] =
 				f->fmt.pix_mp.plane_fmt[i].sizeimage;
 		}
-
-		rc = msm_comm_try_get_bufreqs(inst);
 	}
 err_invalid_fmt:
 	return rc;
@@ -675,6 +676,19 @@
 	memcpy(&inst->fmts[fmt->type], fmt,
 			sizeof(struct msm_vidc_format));
 
+	inst->buff_req.buffer[1].buffer_type = HAL_BUFFER_INPUT;
+	inst->buff_req.buffer[1].buffer_count_min_host =
+	inst->buff_req.buffer[1].buffer_count_actual =
+		MIN_NUM_DEC_OUTPUT_BUFFERS;
+	inst->buff_req.buffer[2].buffer_type = HAL_BUFFER_OUTPUT;
+	inst->buff_req.buffer[2].buffer_count_min_host =
+	inst->buff_req.buffer[2].buffer_count_actual =
+		MIN_NUM_DEC_CAPTURE_BUFFERS;
+	inst->buff_req.buffer[3].buffer_type = HAL_BUFFER_OUTPUT2;
+	inst->buff_req.buffer[3].buffer_count_min_host =
+	inst->buff_req.buffer[3].buffer_count_actual =
+		MIN_NUM_DEC_CAPTURE_BUFFERS;
+
 	/* By default, initialize OUTPUT port to H264 decoder */
 	fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
 		ARRAY_SIZE(vdec_formats), V4L2_PIX_FMT_H264,
@@ -716,6 +730,7 @@
 	struct v4l2_ctrl *temp_ctrl = NULL;
 	struct hal_profile_level profile_level;
 	struct hal_frame_size frame_sz;
+	struct hal_buffer_requirements *bufreq;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
@@ -777,6 +792,59 @@
 		hal_property.enable = ctrl->val;
 		pdata = &hal_property;
 		msm_dcvs_try_enable(inst);
+
+		bufreq = get_buff_req_buffer(inst,
+				HAL_BUFFER_INPUT);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+					"Failed : No buffer requirements : %x\n",
+					HAL_BUFFER_OUTPUT);
+			return -EINVAL;
+		}
+		bufreq->buffer_count_min =
+			MIN_NUM_THUMBNAIL_MODE_OUTPUT_BUFFERS;
+
+		if (msm_comm_get_stream_output_mode(inst) ==
+				HAL_VIDEO_DECODER_SECONDARY) {
+
+			bufreq = get_buff_req_buffer(inst,
+					HAL_BUFFER_OUTPUT);
+			if (!bufreq) {
+				dprintk(VIDC_ERR,
+					"Failed : No buffer requirements: %x\n",
+						HAL_BUFFER_OUTPUT);
+				return -EINVAL;
+			}
+
+			bufreq->buffer_count_min =
+				MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS;
+
+			bufreq = get_buff_req_buffer(inst,
+					HAL_BUFFER_OUTPUT2);
+			if (!bufreq) {
+				dprintk(VIDC_ERR,
+					"Failed : No buffer requirements: %x\n",
+						HAL_BUFFER_OUTPUT2);
+				return -EINVAL;
+			}
+
+			bufreq->buffer_count_min =
+				MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS;
+		} else {
+
+			bufreq = get_buff_req_buffer(inst,
+					HAL_BUFFER_OUTPUT);
+			if (!bufreq) {
+				dprintk(VIDC_ERR,
+					"Failed : No buffer requirements: %x\n",
+						HAL_BUFFER_OUTPUT);
+				return -EINVAL;
+			}
+			bufreq->buffer_count_min =
+				MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS;
+
+		}
+
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
 		property_id = HAL_PARAM_SECURE;
@@ -896,7 +964,6 @@
 					"Failed setting OUTPUT2 size : %d\n",
 					rc);
 
-			rc = msm_comm_try_get_bufreqs(inst);
 			break;
 		default:
 			dprintk(VIDC_ERR,
@@ -920,7 +987,6 @@
 				V4L2_CID_MPEG_VIDEO_H264_LEVEL,
 				temp_ctrl->val);
 		pdata = &profile_level;
-		rc = msm_comm_try_get_bufreqs(inst);
 		break;
 	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
 		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
@@ -932,7 +998,6 @@
 				V4L2_CID_MPEG_VIDEO_H264_PROFILE,
 				temp_ctrl->val);
 		pdata = &profile_level;
-		rc = msm_comm_try_get_bufreqs(inst);
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_BUFFER_SIZE_LIMIT:
 		dprintk(VIDC_DBG,
@@ -1050,12 +1115,8 @@
 							__func__, rc);
 						break;
 					}
-					rc = msm_comm_try_get_bufreqs(inst);
-					if (rc)
-						dprintk(VIDC_ERR,
-							"%s Failed to get buffer requirements : %d\n",
-							__func__, rc);
 				}
+				inst->clk_data.dpb_fourcc = fourcc;
 				break;
 			default:
 				dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index d44684e..e2ea2bc 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -39,6 +39,8 @@
 #define MIN_TIME_RESOLUTION 1
 #define MAX_TIME_RESOLUTION 0xFFFFFF
 #define DEFAULT_TIME_RESOLUTION 0x7530
+#define MIN_NUM_ENC_OUTPUT_BUFFERS 4
+#define MIN_NUM_ENC_CAPTURE_BUFFERS 5
 
 /*
  * Default 601 to 709 conversion coefficients for resolution: 176x144 negative
@@ -955,7 +957,7 @@
 		.name = "Set Color space transfer characterstics",
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = MSM_VIDC_TRANSFER_BT709_5,
-		.maximum = MSM_VIDC_TRANSFER_BT_2020_12,
+		.maximum = MSM_VIDC_TRANSFER_HLG,
 		.default_value = MSM_VIDC_TRANSFER_601_6_625,
 		.step = 1,
 		.qmenu = NULL,
@@ -2124,6 +2126,15 @@
 	inst->bufq[CAPTURE_PORT].num_planes = 1;
 	inst->clk_data.operating_rate = 0;
 
+	inst->buff_req.buffer[1].buffer_type = HAL_BUFFER_INPUT;
+	inst->buff_req.buffer[1].buffer_count_min_host =
+	inst->buff_req.buffer[1].buffer_count_actual =
+		MIN_NUM_ENC_OUTPUT_BUFFERS;
+	inst->buff_req.buffer[2].buffer_type = HAL_BUFFER_OUTPUT;
+	inst->buff_req.buffer[2].buffer_count_min_host =
+	inst->buff_req.buffer[2].buffer_count_actual =
+		MIN_NUM_ENC_CAPTURE_BUFFERS;
+
 	/* By default, initialize OUTPUT port to UBWC YUV format */
 	fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
 		ARRAY_SIZE(venc_formats), V4L2_PIX_FMT_NV12_UBWC,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 2e952a3..2ca3e8d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -23,6 +23,7 @@
 #include <linux/delay.h>
 #include "vidc_hfi_api.h"
 #include "msm_vidc_clocks.h"
+#include <linux/dma-buf.h>
 
 #define MAX_EVENTS 30
 
@@ -383,507 +384,6 @@
 }
 EXPORT_SYMBOL(msm_vidc_reqbufs);
 
-struct buffer_info *get_registered_buf(struct msm_vidc_inst *inst,
-		struct v4l2_buffer *b, int idx, int *plane)
-{
-	struct buffer_info *temp;
-	struct buffer_info *ret = NULL;
-	int i;
-	int fd = b->m.planes[idx].reserved[0];
-	u32 buff_off = b->m.planes[idx].reserved[1];
-	u32 size = b->m.planes[idx].length;
-	ion_phys_addr_t device_addr = b->m.planes[idx].m.userptr;
-
-	if (fd < 0 || !plane) {
-		dprintk(VIDC_ERR, "Invalid input\n");
-		goto err_invalid_input;
-	}
-
-	WARN(!mutex_is_locked(&inst->registeredbufs.lock),
-		"Registered buf lock is not acquired for %s", __func__);
-
-	*plane = 0;
-	list_for_each_entry(temp, &inst->registeredbufs.list, list) {
-		for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
-			bool ion_hndl_matches = temp->handle[i] ?
-				msm_smem_compare_buffers(inst->mem_client, fd,
-				temp->handle[i]->smem_priv) : false;
-			bool device_addr_matches = device_addr ==
-						temp->device_addr[i];
-			bool contains_within = CONTAINS(temp->buff_off[i],
-					temp->size[i], buff_off) ||
-				CONTAINS(buff_off, size, temp->buff_off[i]);
-			bool overlaps = OVERLAPS(buff_off, size,
-					temp->buff_off[i], temp->size[i]);
-
-			if (!temp->inactive &&
-				(ion_hndl_matches || device_addr_matches) &&
-				(contains_within || overlaps)) {
-				dprintk(VIDC_DBG,
-						"This memory region is already mapped\n");
-				ret = temp;
-				*plane = i;
-				break;
-			}
-		}
-		if (ret)
-			break;
-	}
-
-err_invalid_input:
-	return ret;
-}
-
-static struct msm_smem *get_same_fd_buffer(struct msm_vidc_inst *inst, int fd)
-{
-	struct buffer_info *temp;
-	struct msm_smem *same_fd_handle = NULL;
-	int i;
-
-	if (!fd)
-		return NULL;
-
-	if (!inst || fd < 0) {
-		dprintk(VIDC_ERR, "%s: Invalid input\n", __func__);
-		goto err_invalid_input;
-	}
-
-	mutex_lock(&inst->registeredbufs.lock);
-	list_for_each_entry(temp, &inst->registeredbufs.list, list) {
-		for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
-			bool ion_hndl_matches = temp->handle[i] ?
-				msm_smem_compare_buffers(inst->mem_client, fd,
-				temp->handle[i]->smem_priv) : false;
-			if (ion_hndl_matches && temp->mapped[i])  {
-				temp->same_fd_ref[i]++;
-				dprintk(VIDC_INFO,
-				"Found same fd buffer\n");
-				same_fd_handle = temp->handle[i];
-				break;
-			}
-		}
-		if (same_fd_handle)
-			break;
-	}
-	mutex_unlock(&inst->registeredbufs.lock);
-
-err_invalid_input:
-	return same_fd_handle;
-}
-
-struct buffer_info *device_to_uvaddr(struct msm_vidc_list *buf_list,
-				ion_phys_addr_t device_addr)
-{
-	struct buffer_info *temp = NULL;
-	bool found = false;
-	int i;
-
-	if (!buf_list || !device_addr) {
-		dprintk(VIDC_ERR,
-			"Invalid input- device_addr: %pa buf_list: %pK\n",
-			&device_addr, buf_list);
-		goto err_invalid_input;
-	}
-
-	mutex_lock(&buf_list->lock);
-	list_for_each_entry(temp, &buf_list->list, list) {
-		for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
-			if (!temp->inactive &&
-				temp->device_addr[i] == device_addr)  {
-				dprintk(VIDC_INFO,
-				"Found same fd buffer\n");
-				found = true;
-				break;
-			}
-		}
-
-		if (found)
-			break;
-	}
-	mutex_unlock(&buf_list->lock);
-
-err_invalid_input:
-	return temp;
-}
-
-static inline void populate_buf_info(struct buffer_info *binfo,
-			struct v4l2_buffer *b, u32 i)
-{
-	if (i >= VIDEO_MAX_PLANES) {
-		dprintk(VIDC_ERR, "%s: Invalid input\n", __func__);
-		return;
-	}
-	binfo->type = b->type;
-	binfo->fd[i] = b->m.planes[i].reserved[0];
-	binfo->buff_off[i] = b->m.planes[i].reserved[1];
-	binfo->size[i] = b->m.planes[i].length;
-	binfo->uvaddr[i] = b->m.planes[i].m.userptr;
-	binfo->num_planes = b->length;
-	binfo->memory = b->memory;
-	binfo->v4l2_index = b->index;
-	binfo->timestamp.tv_sec = b->timestamp.tv_sec;
-	binfo->timestamp.tv_usec = b->timestamp.tv_usec;
-	dprintk(VIDC_DBG, "%s: fd[%d] = %d b->index = %d",
-			__func__, i, binfo->fd[i], b->index);
-}
-
-static inline void repopulate_v4l2_buffer(struct v4l2_buffer *b,
-					struct buffer_info *binfo)
-{
-	int i = 0;
-
-	b->type = binfo->type;
-	b->length = binfo->num_planes;
-	b->memory = binfo->memory;
-	b->index = binfo->v4l2_index;
-	b->timestamp.tv_sec = binfo->timestamp.tv_sec;
-	b->timestamp.tv_usec = binfo->timestamp.tv_usec;
-	binfo->dequeued = false;
-	for (i = 0; i < binfo->num_planes; ++i) {
-		b->m.planes[i].reserved[0] = binfo->fd[i];
-		b->m.planes[i].reserved[1] = binfo->buff_off[i];
-		b->m.planes[i].length = binfo->size[i];
-		b->m.planes[i].m.userptr = binfo->device_addr[i];
-		dprintk(VIDC_DBG, "%s %d %d %d %pa\n", __func__, binfo->fd[i],
-				binfo->buff_off[i], binfo->size[i],
-				&binfo->device_addr[i]);
-	}
-}
-
-static struct msm_smem *map_buffer(struct msm_vidc_inst *inst,
-		struct v4l2_plane *p, enum hal_buffer buffer_type)
-{
-	struct msm_smem *handle = NULL;
-
-	handle = msm_comm_smem_user_to_kernel(inst,
-				p->reserved[0],
-				p->length,
-				buffer_type);
-	if (!handle) {
-		dprintk(VIDC_ERR,
-			"%s: Failed to get device buffer address\n", __func__);
-		return NULL;
-	}
-	return handle;
-}
-
-static inline enum hal_buffer get_hal_buffer_type(
-		struct msm_vidc_inst *inst, struct v4l2_buffer *b)
-{
-	if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-		return HAL_BUFFER_INPUT;
-	else if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-		return HAL_BUFFER_OUTPUT;
-	else
-		return -EINVAL;
-}
-
-static inline bool is_dynamic_buffer_mode(struct v4l2_buffer *b,
-				struct msm_vidc_inst *inst)
-{
-	enum vidc_ports port = b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
-		OUTPUT_PORT : CAPTURE_PORT;
-	return inst->buffer_mode_set[port] == HAL_BUFFER_MODE_DYNAMIC;
-}
-
-
-static inline void save_v4l2_buffer(struct v4l2_buffer *b,
-						struct buffer_info *binfo)
-{
-	int i = 0;
-
-	for (i = 0; i < b->length; ++i) {
-		if (EXTRADATA_IDX(b->length) &&
-			(i == EXTRADATA_IDX(b->length)) &&
-			!b->m.planes[i].length) {
-			continue;
-		}
-		populate_buf_info(binfo, b, i);
-	}
-}
-
-int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
-{
-	struct buffer_info *binfo = NULL;
-	struct buffer_info *temp = NULL, *iterator = NULL;
-	int plane = 0;
-	int i = 0, rc = 0;
-	struct msm_smem *same_fd_handle = NULL;
-
-	if (!b || !inst) {
-		dprintk(VIDC_ERR, "%s: invalid input\n", __func__);
-		return -EINVAL;
-	}
-
-	binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
-	if (!binfo) {
-		dprintk(VIDC_ERR, "Out of memory\n");
-		rc = -ENOMEM;
-		goto exit;
-	}
-	if (b->length > VIDEO_MAX_PLANES) {
-		dprintk(VIDC_ERR, "Num planes exceeds max: %d, %d\n",
-			b->length, VIDEO_MAX_PLANES);
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	dprintk(VIDC_DBG,
-		"[MAP] Create binfo = %pK fd = %d size = %d type = %d\n",
-		binfo, b->m.planes[0].reserved[0],
-		b->m.planes[0].length, b->type);
-
-	for (i = 0; i < b->length; ++i) {
-		rc = 0;
-		if (EXTRADATA_IDX(b->length) &&
-			(i == EXTRADATA_IDX(b->length)) &&
-			!b->m.planes[i].length) {
-			continue;
-		}
-		mutex_lock(&inst->registeredbufs.lock);
-		temp = get_registered_buf(inst, b, i, &plane);
-		if (temp && !is_dynamic_buffer_mode(b, inst)) {
-			dprintk(VIDC_DBG,
-				"This memory region has already been prepared\n");
-			rc = 0;
-			mutex_unlock(&inst->registeredbufs.lock);
-			goto exit;
-		}
-
-		if (temp && is_dynamic_buffer_mode(b, inst) && !i) {
-			/*
-			 * Buffer is already present in registered list
-			 * increment ref_count, populate new values of v4l2
-			 * buffer in existing buffer_info struct.
-			 *
-			 * We will use the saved buffer info and queue it when
-			 * we receive RELEASE_BUFFER_REFERENCE EVENT from f/w.
-			 */
-			dprintk(VIDC_DBG, "[MAP] Buffer already prepared\n");
-			temp->inactive = false;
-			list_for_each_entry(iterator,
-				&inst->registeredbufs.list, list) {
-				if (iterator == temp) {
-					rc = buf_ref_get(inst, temp);
-					save_v4l2_buffer(b, temp);
-					break;
-				}
-			}
-		}
-		mutex_unlock(&inst->registeredbufs.lock);
-		/*
-		 * rc == 1,
-		 * buffer is mapped, fw has released all reference, so skip
-		 * mapping and queue it immediately.
-		 *
-		 * rc == 2,
-		 * buffer is mapped and fw is holding a reference, hold it in
-		 * the driver and queue it later when fw has released
-		 */
-		if (rc == 1) {
-			rc = 0;
-			goto exit;
-		} else if (rc >= 2) {
-			rc = -EEXIST;
-			goto exit;
-		}
-
-		same_fd_handle = get_same_fd_buffer(
-				inst, b->m.planes[i].reserved[0]);
-
-		populate_buf_info(binfo, b, i);
-		if (same_fd_handle) {
-			binfo->device_addr[i] =
-			same_fd_handle->device_addr + binfo->buff_off[i];
-			b->m.planes[i].m.userptr = binfo->device_addr[i];
-			binfo->mapped[i] = false;
-			binfo->handle[i] = same_fd_handle;
-		} else {
-			binfo->handle[i] = map_buffer(inst, &b->m.planes[i],
-					get_hal_buffer_type(inst, b));
-			if (!binfo->handle[i]) {
-				rc = -EINVAL;
-				goto exit;
-			}
-
-			binfo->mapped[i] = true;
-			binfo->device_addr[i] = binfo->handle[i]->device_addr +
-				binfo->buff_off[i];
-			b->m.planes[i].m.userptr = binfo->device_addr[i];
-		}
-
-		/* We maintain one ref count for all planes*/
-		if (!i && is_dynamic_buffer_mode(b, inst)) {
-			rc = buf_ref_get(inst, binfo);
-			if (rc < 0)
-				goto exit;
-		}
-		dprintk(VIDC_DBG,
-			"%s: [MAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
-			__func__, binfo, i, binfo->handle[i],
-			&binfo->device_addr[i], binfo->fd[i],
-			binfo->buff_off[i], binfo->mapped[i]);
-	}
-
-	mutex_lock(&inst->registeredbufs.lock);
-	list_add_tail(&binfo->list, &inst->registeredbufs.list);
-	mutex_unlock(&inst->registeredbufs.lock);
-	return 0;
-
-exit:
-	kfree(binfo);
-	return rc;
-}
-int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
-			struct buffer_info *binfo)
-{
-	int i = 0;
-	struct buffer_info *temp = NULL;
-	bool found = false, keep_node = false;
-
-	if (!inst || !binfo) {
-		dprintk(VIDC_ERR, "%s invalid param: %pK %pK\n",
-			__func__, inst, binfo);
-		return -EINVAL;
-	}
-
-	WARN(!mutex_is_locked(&inst->registeredbufs.lock),
-		"Registered buf lock is not acquired for %s", __func__);
-
-	/*
-	 * Make sure the buffer to be unmapped and deleted
-	 * from the registered list is present in the list.
-	 */
-	list_for_each_entry(temp, &inst->registeredbufs.list, list) {
-		if (temp == binfo) {
-			found = true;
-			break;
-		}
-	}
-
-	/*
-	 * Free the buffer info only if
-	 * - buffer info has not been deleted from registered list
-	 * - vidc client has called dqbuf on the buffer
-	 * - no references are held on the buffer
-	 */
-	if (!found || !temp || !temp->pending_deletion || !temp->dequeued)
-		goto exit;
-
-	for (i = 0; i < temp->num_planes; i++) {
-		dprintk(VIDC_DBG,
-			"%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
-			__func__, temp, i, temp->handle[i],
-			&temp->device_addr[i], temp->fd[i],
-			temp->buff_off[i], temp->mapped[i]);
-		/*
-		 * Unmap the handle only if the buffer has been mapped and no
-		 * other buffer has a reference to this buffer.
-		 * In case of buffers with same fd, we will map the buffer only
-		 * once and subsequent buffers will refer to the mapped buffer's
-		 * device address.
-		 * For buffers which share the same fd, do not unmap and keep
-		 * the buffer info in registered list.
-		 */
-		if (temp->handle[i] && temp->mapped[i] &&
-			!temp->same_fd_ref[i]) {
-			msm_comm_smem_free(inst,
-				temp->handle[i]);
-		}
-
-		if (temp->same_fd_ref[i])
-			keep_node = true;
-		else {
-			temp->fd[i] = 0;
-			temp->handle[i] = 0;
-			temp->device_addr[i] = 0;
-			temp->uvaddr[i] = 0;
-		}
-	}
-	if (!keep_node) {
-		dprintk(VIDC_DBG, "[UNMAP] AND-FREED binfo: %pK\n", temp);
-		list_del(&temp->list);
-		kfree(temp);
-	} else {
-		temp->inactive = true;
-		dprintk(VIDC_DBG, "[UNMAP] NOT-FREED binfo: %pK\n", temp);
-	}
-exit:
-	return 0;
-}
-
-
-int qbuf_dynamic_buf(struct msm_vidc_inst *inst,
-			struct buffer_info *binfo)
-{
-	struct v4l2_buffer b = {0};
-	struct v4l2_plane plane[VIDEO_MAX_PLANES] = { {0} };
-	struct buf_queue *q = NULL;
-	int rc = 0;
-
-	if (!binfo) {
-		dprintk(VIDC_ERR, "%s invalid param: %pK\n", __func__, binfo);
-		return -EINVAL;
-	}
-	dprintk(VIDC_DBG, "%s fd[0] = %d\n", __func__, binfo->fd[0]);
-
-	b.m.planes = plane;
-	repopulate_v4l2_buffer(&b, binfo);
-
-	q = msm_comm_get_vb2q(inst, (&b)->type);
-	if (!q) {
-		dprintk(VIDC_ERR, "Failed to find buffer queue for type = %d\n"
-				, (&b)->type);
-		return -EINVAL;
-	}
-
-	mutex_lock(&q->lock);
-	rc = vb2_qbuf(&q->vb2_bufq, &b);
-	mutex_unlock(&q->lock);
-
-	if (rc)
-		dprintk(VIDC_ERR, "Failed to qbuf, %d\n", rc);
-	return rc;
-}
-
-int output_buffer_cache_invalidate(struct msm_vidc_inst *inst,
-				struct buffer_info *binfo)
-{
-	int i = 0;
-	int rc = 0;
-
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst);
-		return -EINVAL;
-	}
-
-	if (!binfo) {
-		dprintk(VIDC_ERR, "%s: invalid buffer info: %pK\n",
-			__func__, inst);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < binfo->num_planes; i++) {
-		if (binfo->handle[i]) {
-			struct msm_smem smem = *binfo->handle[i];
-
-			smem.offset = (unsigned int)(binfo->buff_off[i]);
-			smem.size   = binfo->size[i];
-			rc = msm_comm_smem_cache_operations(inst,
-				&smem, SMEM_CACHE_INVALIDATE);
-			if (rc) {
-				dprintk(VIDC_ERR,
-					"%s: Failed to clean caches: %d\n",
-					__func__, rc);
-				return -EINVAL;
-			}
-		} else
-			dprintk(VIDC_DBG, "%s: NULL handle for plane %d\n",
-					__func__, i);
-	}
-	return 0;
-}
-
 static bool valid_v4l2_buffer(struct v4l2_buffer *b,
 		struct msm_vidc_inst *inst) {
 	enum vidc_ports port =
@@ -896,17 +396,16 @@
 		inst->bufq[port].num_planes == b->length;
 }
 
-int msm_vidc_release_buffer(void *instance, int buffer_type,
-		unsigned int buffer_index)
+int msm_vidc_release_buffer(void *instance, int type, unsigned int index)
 {
+	int rc = 0;
 	struct msm_vidc_inst *inst = instance;
-	struct buffer_info *bi, *dummy;
-	int i, rc = 0;
-	int found_buf = 0;
-	struct vb2_buf_entry *temp, *next;
+	struct msm_vidc_buffer *mbuf, *dummy;
 
-	if (!inst)
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: invalid inst\n", __func__);
 		return -EINVAL;
+	}
 
 	if (!inst->in_reconfig &&
 		inst->state > MSM_VIDC_LOAD_RESOURCES &&
@@ -914,64 +413,26 @@
 		rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
 		if (rc) {
 			dprintk(VIDC_ERR,
-					"Failed to move inst: %pK to release res done\n",
-					inst);
+					"%s: Failed to move inst: %pK to release res done\n",
+					__func__, inst);
 		}
 	}
 
 	mutex_lock(&inst->registeredbufs.lock);
-	list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
-		if (bi->type == buffer_type && bi->v4l2_index == buffer_index) {
-			found_buf = 1;
-			list_del(&bi->list);
-			for (i = 0; i < bi->num_planes; i++) {
-				if (bi->handle[i] && bi->mapped[i]) {
-					dprintk(VIDC_DBG,
-						"%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
-						__func__, bi, i, bi->handle[i],
-						&bi->device_addr[i], bi->fd[i],
-						bi->buff_off[i], bi->mapped[i]);
-					msm_comm_smem_free(inst,
-							bi->handle[i]);
-					found_buf = 2;
-				}
-			}
-			kfree(bi);
-			break;
-		}
+	list_for_each_entry_safe(mbuf, dummy, &inst->registeredbufs.list,
+			list) {
+		struct vb2_buffer *vb2 = &mbuf->vvb.vb2_buf;
+
+		if (vb2->type != type || vb2->index != index)
+			continue;
+
+		print_vidc_buffer(VIDC_DBG, "release buf", inst, mbuf);
+		msm_comm_unmap_vidc_buffer(inst, mbuf);
+		list_del(&mbuf->list);
+		kfree(mbuf);
 	}
 	mutex_unlock(&inst->registeredbufs.lock);
 
-	switch (found_buf) {
-	case 0:
-		dprintk(VIDC_DBG,
-			"%s: No buffer(type: %d) found for index %d\n",
-			__func__, buffer_type, buffer_index);
-		break;
-	case 1:
-		dprintk(VIDC_WARN,
-			"%s: Buffer(type: %d) found for index %d.",
-			__func__, buffer_type, buffer_index);
-		dprintk(VIDC_WARN, "zero planes mapped.\n");
-		break;
-	case 2:
-		dprintk(VIDC_DBG,
-			"%s: Released buffer(type: %d) for index %d\n",
-			__func__, buffer_type, buffer_index);
-		break;
-	default:
-		break;
-	}
-
-	mutex_lock(&inst->pendingq.lock);
-	list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
-		if (temp->vb->type == buffer_type) {
-			list_del(&temp->list);
-			kfree(temp);
-		}
-	}
-	mutex_unlock(&inst->pendingq.lock);
-
 	return rc;
 }
 EXPORT_SYMBOL(msm_vidc_release_buffer);
@@ -979,65 +440,20 @@
 int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
 {
 	struct msm_vidc_inst *inst = instance;
-	struct buffer_info *binfo;
-	int plane = 0;
-	int rc = 0;
-	int i;
+	int rc = 0, i = 0;
 	struct buf_queue *q = NULL;
 
-	if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst))
+	if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst)) {
+		dprintk(VIDC_ERR, "%s: invalid params, inst %pK\n",
+			__func__, inst);
 		return -EINVAL;
-
-	if (inst->state == MSM_VIDC_CORE_INVALID ||
-		inst->core->state == VIDC_CORE_INVALID)
-		return -EINVAL;
-
-	rc = map_and_register_buf(inst, b);
-	if (rc == -EEXIST) {
-		if (atomic_read(&inst->in_flush) &&
-			is_dynamic_buffer_mode(b, inst)) {
-			dprintk(VIDC_ERR,
-				"Flush in progress, do not hold any buffers in driver\n");
-			msm_comm_flush_dynamic_buffers(inst);
-		}
-		return 0;
 	}
-	if (rc)
-		return rc;
 
-	for (i = 0; i < b->length; ++i) {
-		if (EXTRADATA_IDX(b->length) &&
-			(i == EXTRADATA_IDX(b->length)) &&
-			!b->m.planes[i].length) {
-			b->m.planes[i].m.userptr = 0;
-			continue;
-		}
-		mutex_lock(&inst->registeredbufs.lock);
-		binfo = get_registered_buf(inst, b, i, &plane);
-		mutex_unlock(&inst->registeredbufs.lock);
-		if (!binfo) {
-			dprintk(VIDC_ERR,
-				"This buffer is not registered: %d, %d, %d\n",
-				b->m.planes[i].reserved[0],
-				b->m.planes[i].reserved[1],
-				b->m.planes[i].length);
-			goto err_invalid_buff;
-		}
-		b->m.planes[i].m.userptr = binfo->device_addr[i];
-		dprintk(VIDC_DBG, "Queueing device address = %pa\n",
-				&binfo->device_addr[i]);
-
-		if (binfo->handle[i] &&
-			(b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) {
-			rc = msm_comm_smem_cache_operations(inst,
-					binfo->handle[i], SMEM_CACHE_CLEAN);
-			if (rc) {
-				dprintk(VIDC_ERR,
-					"Failed to clean caches: %d\n", rc);
-				goto err_invalid_buff;
-			}
-		}
+	for (i = 0; i < b->length; i++) {
+		b->m.planes[i].m.fd = b->m.planes[i].reserved[0];
+		b->m.planes[i].data_offset = b->m.planes[i].reserved[1];
 	}
+	msm_comm_qbuf_cache_operations(inst, b);
 
 	q = msm_comm_get_vb2q(inst, b->type);
 	if (!q) {
@@ -1045,27 +461,28 @@
 			"Failed to find buffer queue for type = %d\n", b->type);
 		return -EINVAL;
 	}
+
 	mutex_lock(&q->lock);
 	rc = vb2_qbuf(&q->vb2_bufq, b);
 	mutex_unlock(&q->lock);
 	if (rc)
 		dprintk(VIDC_ERR, "Failed to qbuf, %d\n", rc);
-	return rc;
 
-err_invalid_buff:
-	return -EINVAL;
+	return rc;
 }
 EXPORT_SYMBOL(msm_vidc_qbuf);
 
 int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b)
 {
 	struct msm_vidc_inst *inst = instance;
-	struct buffer_info *buffer_info = NULL;
-	int i = 0, rc = 0;
+	int rc = 0, i = 0;
 	struct buf_queue *q = NULL;
 
-	if (!inst || !b || !valid_v4l2_buffer(b, inst))
+	if (!inst || !b || !valid_v4l2_buffer(b, inst)) {
+		dprintk(VIDC_ERR, "%s: invalid params, inst %pK\n",
+			__func__, inst);
 		return -EINVAL;
+	}
 
 	q = msm_comm_get_vb2q(inst, b->type);
 	if (!q) {
@@ -1073,54 +490,21 @@
 			"Failed to find buffer queue for type = %d\n", b->type);
 		return -EINVAL;
 	}
+
 	mutex_lock(&q->lock);
 	rc = vb2_dqbuf(&q->vb2_bufq, b, true);
 	mutex_unlock(&q->lock);
-	if (rc) {
-		dprintk(VIDC_DBG, "Failed to dqbuf, %d\n", rc);
+	if (rc == -EAGAIN) {
+		return rc;
+	} else if (rc) {
+		dprintk(VIDC_ERR, "Failed to dqbuf, %d\n", rc);
 		return rc;
 	}
 
+	msm_comm_dqbuf_cache_operations(inst, b);
 	for (i = 0; i < b->length; i++) {
-		if (EXTRADATA_IDX(b->length) &&
-			i == EXTRADATA_IDX(b->length)) {
-			continue;
-		}
-		buffer_info = device_to_uvaddr(&inst->registeredbufs,
-			b->m.planes[i].m.userptr);
-
-		if (!buffer_info) {
-			dprintk(VIDC_ERR,
-				"%s no buffer info registered for buffer addr: %#lx\n",
-				__func__, b->m.planes[i].m.userptr);
-			return -EINVAL;
-		}
-
-		b->m.planes[i].m.userptr = buffer_info->uvaddr[i];
-		b->m.planes[i].reserved[0] = buffer_info->fd[i];
-		b->m.planes[i].reserved[1] = buffer_info->buff_off[i];
-	}
-
-	if (!buffer_info) {
-		dprintk(VIDC_ERR,
-			"%s: error - no buffer info found in registered list\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	rc = output_buffer_cache_invalidate(inst, buffer_info);
-	if (rc)
-		return rc;
-
-
-	if (is_dynamic_buffer_mode(b, inst)) {
-		buffer_info->dequeued = true;
-
-		dprintk(VIDC_DBG, "[DEQUEUED]: fd[0] = %d\n",
-			buffer_info->fd[0]);
-		mutex_lock(&inst->registeredbufs.lock);
-		rc = unmap_and_deregister_buf(inst, buffer_info);
-		mutex_unlock(&inst->registeredbufs.lock);
+		b->m.planes[i].reserved[0] = b->m.planes[i].m.fd;
+		b->m.planes[i].reserved[1] = b->m.planes[i].data_offset;
 	}
 
 	return rc;
@@ -1419,7 +803,6 @@
 	int rc = 0;
 	struct hfi_device *hdev;
 	struct hal_buffer_size_minimum b;
-	struct vb2_buf_entry *temp, *next;
 
 	hdev = inst->core->device;
 
@@ -1482,6 +865,13 @@
 		goto fail_start;
 	}
 
+	rc = msm_comm_set_recon_buffers(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to set recon buffers: %d\n", rc);
+		goto fail_start;
+	}
+
 	if (msm_comm_get_stream_output_mode(inst) ==
 			HAL_VIDEO_DECODER_SECONDARY) {
 		rc = msm_comm_set_output_buffers(inst);
@@ -1529,15 +919,22 @@
 
 fail_start:
 	if (rc) {
-		mutex_lock(&inst->pendingq.lock);
-		list_for_each_entry_safe(temp, next, &inst->pendingq.list,
-				list) {
-			vb2_buffer_done(temp->vb,
-					VB2_BUF_STATE_QUEUED);
+		struct msm_vidc_buffer *temp, *next;
+
+		mutex_lock(&inst->registeredbufs.lock);
+		list_for_each_entry_safe(temp, next,
+				&inst->registeredbufs.list, list) {
+			struct vb2_buffer *vb;
+
+			print_vidc_buffer(VIDC_ERR, "return buf", inst, temp);
+			vb = msm_comm_get_vb_using_vidc_buffer(inst, temp);
+			if (vb)
+				vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
+			msm_comm_unmap_vidc_buffer(inst, temp);
 			list_del(&temp->list);
 			kfree(temp);
 		}
-		mutex_unlock(&inst->pendingq.lock);
+		mutex_unlock(&inst->registeredbufs.lock);
 	}
 	return rc;
 }
@@ -1644,12 +1041,29 @@
 			inst, q->type);
 }
 
-static void msm_vidc_buf_queue(struct vb2_buffer *vb)
+static void msm_vidc_buf_queue(struct vb2_buffer *vb2)
 {
-	int rc = msm_comm_qbuf(vb2_get_drv_priv(vb->vb2_queue), vb);
+	int rc = 0;
+	struct msm_vidc_inst *inst = NULL;
+	struct msm_vidc_buffer *mbuf = NULL;
 
+	inst = vb2_get_drv_priv(vb2->vb2_queue);
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: invalid inst\n", __func__);
+		return;
+	}
+
+	mbuf = msm_comm_get_vidc_buffer(inst, vb2);
+	if (IS_ERR_OR_NULL(mbuf)) {
+		if (PTR_ERR(mbuf) != -EEXIST)
+			print_vb2_buffer(VIDC_ERR, "failed to get vidc-buf",
+				inst, vb2);
+		return;
+	}
+
+	rc = msm_comm_qbuf(inst, mbuf);
 	if (rc)
-		dprintk(VIDC_ERR, "Failed to queue buffer: %d\n", rc);
+		print_vidc_buffer(VIDC_ERR, "failed qbuf", inst, mbuf);
 }
 
 static const struct vb2_ops msm_vidc_vb2q_ops = {
@@ -1836,7 +1250,7 @@
 	struct v4l2_ctrl *ctrl)
 {
 	int rc = 0;
-	struct hal_buffer_requirements *bufreq, *newreq;
+	struct hal_buffer_requirements *bufreq;
 	enum hal_buffer buffer_type;
 
 	if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_OUTPUT) {
@@ -1885,16 +1299,7 @@
 
 
 		if (inst->in_reconfig) {
-			rc = msm_comm_try_get_bufreqs(inst);
-			newreq = get_buff_req_buffer(inst,
-				buffer_type);
-			if (!newreq) {
-				dprintk(VIDC_ERR,
-					"Failed to find new bufreqs = %d\n",
-					buffer_type);
-				return 0;
-			}
-			ctrl->val = newreq->buffer_count_min;
+			ctrl->val = bufreq->buffer_count_min;
 		}
 		if (inst->session_type == MSM_VIDC_DECODER &&
 				!inst->in_reconfig &&
@@ -1962,9 +1367,6 @@
 		break;
 
 	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
-		if (inst->in_reconfig)
-			msm_comm_try_get_bufreqs(inst);
-
 		buffer_type = msm_comm_get_hal_output_buffer(inst);
 		bufreq = get_buff_req_buffer(inst,
 			buffer_type);
@@ -1977,7 +1379,6 @@
 		ctrl->val = bufreq->buffer_count_min_host;
 		break;
 	case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
-		msm_comm_try_get_bufreqs(inst);
 		bufreq = get_buff_req_buffer(inst, HAL_BUFFER_INPUT);
 		if (!bufreq) {
 			dprintk(VIDC_ERR,
@@ -2078,13 +1479,13 @@
 	mutex_init(&inst->bufq[OUTPUT_PORT].lock);
 	mutex_init(&inst->lock);
 
-	INIT_MSM_VIDC_LIST(&inst->pendingq);
 	INIT_MSM_VIDC_LIST(&inst->scratchbufs);
 	INIT_MSM_VIDC_LIST(&inst->freqs);
 	INIT_MSM_VIDC_LIST(&inst->persistbufs);
 	INIT_MSM_VIDC_LIST(&inst->pending_getpropq);
 	INIT_MSM_VIDC_LIST(&inst->outputbufs);
 	INIT_MSM_VIDC_LIST(&inst->registeredbufs);
+	INIT_MSM_VIDC_LIST(&inst->reconbufs);
 
 	kref_init(&inst->kref);
 
@@ -2184,7 +1585,6 @@
 	mutex_destroy(&inst->bufq[OUTPUT_PORT].lock);
 	mutex_destroy(&inst->lock);
 
-	DEINIT_MSM_VIDC_LIST(&inst->pendingq);
 	DEINIT_MSM_VIDC_LIST(&inst->scratchbufs);
 	DEINIT_MSM_VIDC_LIST(&inst->persistbufs);
 	DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq);
@@ -2200,50 +1600,43 @@
 
 static void cleanup_instance(struct msm_vidc_inst *inst)
 {
-	struct vb2_buf_entry *entry, *dummy;
-
-	if (inst) {
-
-		mutex_lock(&inst->pendingq.lock);
-		list_for_each_entry_safe(entry, dummy, &inst->pendingq.list,
-				list) {
-			list_del(&entry->list);
-			kfree(entry);
-		}
-		mutex_unlock(&inst->pendingq.lock);
-
-		msm_comm_free_freq_table(inst);
-
-		if (msm_comm_release_scratch_buffers(inst, false)) {
-			dprintk(VIDC_ERR,
-				"Failed to release scratch buffers\n");
-		}
-
-		if (msm_comm_release_persist_buffers(inst)) {
-			dprintk(VIDC_ERR,
-				"Failed to release persist buffers\n");
-		}
-
-		/*
-		 * At this point all buffes should be with driver
-		 * irrespective of scenario
-		 */
-		msm_comm_validate_output_buffers(inst);
-
-		if (msm_comm_release_output_buffers(inst, true)) {
-			dprintk(VIDC_ERR,
-				"Failed to release output buffers\n");
-		}
-
-		if (inst->extradata_handle)
-			msm_comm_smem_free(inst, inst->extradata_handle);
-
-		debugfs_remove_recursive(inst->debugfs_root);
-
-		mutex_lock(&inst->pending_getpropq.lock);
-		WARN_ON(!list_empty(&inst->pending_getpropq.list));
-		mutex_unlock(&inst->pending_getpropq.lock);
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+		return;
 	}
+
+	msm_comm_free_freq_table(inst);
+
+	if (msm_comm_release_scratch_buffers(inst, false))
+		dprintk(VIDC_ERR,
+			"Failed to release scratch buffers\n");
+
+	if (msm_comm_release_recon_buffers(inst))
+		dprintk(VIDC_ERR,
+			"Failed to release recon buffers\n");
+
+	if (msm_comm_release_persist_buffers(inst))
+		dprintk(VIDC_ERR,
+			"Failed to release persist buffers\n");
+
+	/*
+	 * At this point all buffes should be with driver
+	 * irrespective of scenario
+	 */
+	msm_comm_validate_output_buffers(inst);
+
+	if (msm_comm_release_output_buffers(inst, true))
+		dprintk(VIDC_ERR,
+			"Failed to release output buffers\n");
+
+	if (inst->extradata_handle)
+		msm_comm_smem_free(inst, inst->extradata_handle);
+
+	debugfs_remove_recursive(inst->debugfs_root);
+
+	mutex_lock(&inst->pending_getpropq.lock);
+	WARN_ON(!list_empty(&inst->pending_getpropq.list));
+	mutex_unlock(&inst->pending_getpropq.lock);
 }
 
 int msm_vidc_destroy(struct msm_vidc_inst *inst)
@@ -2251,8 +1644,10 @@
 	struct msm_vidc_core *core;
 	int i = 0;
 
-	if (!inst || !inst->core)
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
+	}
 
 	core = inst->core;
 
@@ -2263,7 +1658,6 @@
 
 	msm_comm_ctrl_deinit(inst);
 
-	DEINIT_MSM_VIDC_LIST(&inst->pendingq);
 	DEINIT_MSM_VIDC_LIST(&inst->scratchbufs);
 	DEINIT_MSM_VIDC_LIST(&inst->persistbufs);
 	DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq);
@@ -2287,22 +1681,24 @@
 	return 0;
 }
 
+static void close_helper(struct kref *kref)
+{
+	struct msm_vidc_inst *inst = container_of(kref,
+			struct msm_vidc_inst, kref);
+
+	msm_vidc_destroy(inst);
+}
+
 int msm_vidc_close(void *instance)
 {
-	void close_helper(struct kref *kref)
-	{
-		struct msm_vidc_inst *inst = container_of(kref,
-				struct msm_vidc_inst, kref);
-
-		msm_vidc_destroy(inst);
-	}
-
 	struct msm_vidc_inst *inst = instance;
-	struct buffer_info *bi, *dummy;
+	struct msm_vidc_buffer *temp, *dummy;
 	int rc = 0;
 
-	if (!inst || !inst->core)
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
+	}
 
 	/*
 	 * Make sure that HW stop working on these buffers that
@@ -2314,19 +1710,13 @@
 				MSM_VIDC_RELEASE_RESOURCES_DONE);
 
 	mutex_lock(&inst->registeredbufs.lock);
-	list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
-			int i = 0;
-
-			list_del(&bi->list);
-
-			for (i = 0; i < min(bi->num_planes, VIDEO_MAX_PLANES);
-					i++) {
-				if (bi->handle[i] && bi->mapped[i])
-					msm_comm_smem_free(inst, bi->handle[i]);
-			}
-
-			kfree(bi);
-		}
+	list_for_each_entry_safe(temp, dummy, &inst->registeredbufs.list,
+			list) {
+		print_vidc_buffer(VIDC_ERR, "undequeud buf", inst, temp);
+		msm_comm_unmap_vidc_buffer(inst, temp);
+		list_del(&temp->list);
+		kfree(temp);
+	}
 	mutex_unlock(&inst->registeredbufs.lock);
 
 	cleanup_instance(inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 05af186..5e366d0 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -16,6 +16,94 @@
 #include "msm_vidc_debug.h"
 #include "msm_vidc_clocks.h"
 
+static inline unsigned long int get_ubwc_compression_ratio(
+	struct ubwc_cr_stats_info_type ubwc_stats_info)
+{
+	unsigned long int sum = 0, weighted_sum = 0;
+	unsigned long int compression_ratio = 1 << 16;
+
+	weighted_sum =
+		32  * ubwc_stats_info.cr_stats_info0 +
+		64  * ubwc_stats_info.cr_stats_info1 +
+		96  * ubwc_stats_info.cr_stats_info2 +
+		128 * ubwc_stats_info.cr_stats_info3 +
+		160 * ubwc_stats_info.cr_stats_info4 +
+		192 * ubwc_stats_info.cr_stats_info5 +
+		256 * ubwc_stats_info.cr_stats_info6;
+
+	sum =
+		ubwc_stats_info.cr_stats_info0 +
+		ubwc_stats_info.cr_stats_info1 +
+		ubwc_stats_info.cr_stats_info2 +
+		ubwc_stats_info.cr_stats_info3 +
+		ubwc_stats_info.cr_stats_info4 +
+		ubwc_stats_info.cr_stats_info5 +
+		ubwc_stats_info.cr_stats_info6;
+
+	compression_ratio = (weighted_sum && sum) ?
+		((256 * sum) << 16) / weighted_sum : compression_ratio;
+
+	return compression_ratio;
+}
+
+static inline int msm_vidc_get_mbs_per_frame(struct msm_vidc_inst *inst)
+{
+	int height, width;
+
+	if (!inst->in_reconfig) {
+		height = max(inst->prop.height[CAPTURE_PORT],
+			inst->prop.height[OUTPUT_PORT]);
+		width = max(inst->prop.width[CAPTURE_PORT],
+			inst->prop.width[OUTPUT_PORT]);
+	} else {
+		height = inst->reconfig_height;
+		width = inst->reconfig_width;
+	}
+
+	return NUM_MBS_PER_FRAME(height, width);
+}
+
+void update_recon_stats(struct msm_vidc_inst *inst,
+	struct recon_stats_type *recon_stats)
+{
+	struct recon_buf *binfo;
+	u32 CR = 0, CF = 0;
+	u32 frame_size;
+
+	CR = get_ubwc_compression_ratio(recon_stats->ubwc_stats_info);
+
+	frame_size = (msm_vidc_get_mbs_per_frame(inst) / (32 * 8) * 3) / 2;
+
+	CF = recon_stats->complexity_number / frame_size;
+
+	mutex_lock(&inst->reconbufs.lock);
+	list_for_each_entry(binfo, &inst->reconbufs.list, list) {
+		if (binfo->buffer_index ==
+				recon_stats->buffer_index) {
+			binfo->CR = CR;
+			binfo->CF = CF;
+		}
+	}
+	mutex_unlock(&inst->reconbufs.lock);
+}
+
+static int fill_recon_stats(struct msm_vidc_inst *inst,
+	struct vidc_bus_vote_data *vote_data)
+{
+	struct recon_buf *binfo;
+	u32 CR = 0, CF = 0;
+
+	mutex_lock(&inst->reconbufs.lock);
+	list_for_each_entry(binfo, &inst->reconbufs.list, list) {
+		CR = max(CR, binfo->CR);
+		CF = max(CF, binfo->CF);
+	}
+	mutex_unlock(&inst->reconbufs.lock);
+	vote_data->complexity_factor = CF;
+	vote_data->compression_ratio = CR;
+	return 0;
+}
+
 int msm_comm_vote_bus(struct msm_vidc_core *core)
 {
 	int rc = 0, vote_data_count = 0, i = 0;
@@ -30,35 +118,36 @@
 
 	hdev = core->device;
 
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list)
-		++vote_data_count;
-
-	vote_data = kcalloc(vote_data_count, sizeof(*vote_data),
-			GFP_TEMPORARY);
+	vote_data = core->vote_data;
 	if (!vote_data) {
-		dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__);
-		rc = -ENOMEM;
-		goto fail_alloc;
+		dprintk(VIDC_PROF,
+			"Failed to get vote_data for inst %pK\n",
+				inst);
+		return -EINVAL;
 	}
 
+	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list) {
-		int codec = 0, yuv = 0;
+		int codec = 0;
+
+		++vote_data_count;
 
 		codec = inst->session_type == MSM_VIDC_DECODER ?
 			inst->fmts[OUTPUT_PORT].fourcc :
 			inst->fmts[CAPTURE_PORT].fourcc;
 
-		yuv = inst->session_type == MSM_VIDC_DECODER ?
-			inst->fmts[CAPTURE_PORT].fourcc :
-			inst->fmts[OUTPUT_PORT].fourcc;
-
 		vote_data[i].domain = get_hal_domain(inst->session_type);
 		vote_data[i].codec = get_hal_codec(codec);
-		vote_data[i].width =  max(inst->prop.width[CAPTURE_PORT],
+		vote_data[i].input_width =  max(inst->prop.width[OUTPUT_PORT],
 				inst->prop.width[OUTPUT_PORT]);
-		vote_data[i].height = max(inst->prop.height[CAPTURE_PORT],
+		vote_data[i].input_height = max(inst->prop.height[OUTPUT_PORT],
 				inst->prop.height[OUTPUT_PORT]);
+		vote_data[i].output_width =  max(inst->prop.width[CAPTURE_PORT],
+				inst->prop.width[OUTPUT_PORT]);
+		vote_data[i].output_height =
+				max(inst->prop.height[CAPTURE_PORT],
+				inst->prop.height[OUTPUT_PORT]);
+		vote_data[i].lcu_size = codec == V4L2_PIX_FMT_HEVC ? 32 : 16;
 
 		if (inst->clk_data.operating_rate)
 			vote_data[i].fps =
@@ -67,38 +156,41 @@
 		else
 			vote_data[i].fps = inst->prop.fps;
 
+		vote_data[i].power_mode = 0;
 		if (!msm_vidc_clock_scaling ||
 			inst->clk_data.buffer_counter < DCVS_FTB_WINDOW)
 			vote_data[i].power_mode = VIDC_POWER_TURBO;
 
-		/*
-		 * TODO: support for OBP-DBP split mode hasn't been yet
-		 * implemented, once it is, this part of code needs to be
-		 * revisited since passing in accurate information to the bus
-		 * governor will drastically reduce bandwidth
-		 */
-		//vote_data[i].color_formats[0] = get_hal_uncompressed(yuv);
-		vote_data[i].num_formats = 1;
+		if (msm_comm_get_stream_output_mode(inst) ==
+				HAL_VIDEO_DECODER_PRIMARY) {
+			vote_data[i].color_formats[0] =
+				msm_comm_get_hal_uncompressed(
+				inst->clk_data.opb_fourcc);
+			vote_data[i].num_formats = 1;
+		} else {
+			vote_data[i].color_formats[0] =
+				msm_comm_get_hal_uncompressed(
+				inst->clk_data.dpb_fourcc);
+			vote_data[i].color_formats[1] =
+				msm_comm_get_hal_uncompressed(
+				inst->clk_data.opb_fourcc);
+			vote_data[i].num_formats = 2;
+		}
+		vote_data[i].work_mode = inst->clk_data.work_mode;
+		fill_recon_stats(inst, &vote_data[i]);
 		i++;
 	}
 	mutex_unlock(&core->lock);
+	if (vote_data_count)
+		rc = call_hfi_op(hdev, vote_bus, hdev->hfi_device_data,
+			vote_data, vote_data_count);
 
-	rc = call_hfi_op(hdev, vote_bus, hdev->hfi_device_data, vote_data,
-			vote_data_count);
-	if (rc)
-		dprintk(VIDC_ERR, "Failed to scale bus: %d\n", rc);
-
-	kfree(vote_data);
-	return rc;
-
-fail_alloc:
-	mutex_unlock(&core->lock);
 	return rc;
 }
 
 static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst)
 {
-	int fw_out_qsize = 0, buffers_in_driver = 0;
+	int fw_out_qsize = 0;
 
 	/*
 	 * DCVS always operates on Uncompressed buffers.
@@ -111,11 +203,9 @@
 			fw_out_qsize = inst->count.ftb - inst->count.fbd;
 		else
 			fw_out_qsize = inst->count.etb - inst->count.ebd;
-
-		buffers_in_driver = inst->buffers_held_in_driver;
 	}
 
-	return fw_out_qsize + buffers_in_driver;
+	return fw_out_qsize;
 }
 
 static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst)
@@ -174,7 +264,7 @@
 }
 
 static void msm_vidc_update_freq_entry(struct msm_vidc_inst *inst,
-	unsigned long freq, ion_phys_addr_t device_addr)
+	unsigned long freq, u32 device_addr)
 {
 	struct vidc_freq_data *temp, *next;
 	bool found = false;
@@ -200,7 +290,7 @@
 // TODO this needs to be removed later and use queued_list
 
 void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
-	ion_phys_addr_t device_addr)
+	u32 device_addr)
 {
 	struct vidc_freq_data *temp, *next;
 
@@ -252,24 +342,6 @@
 	mutex_unlock(&inst->freqs.lock);
 }
 
-
-static inline int msm_dcvs_get_mbs_per_frame(struct msm_vidc_inst *inst)
-{
-	int height, width;
-
-	if (!inst->in_reconfig) {
-		height = max(inst->prop.height[CAPTURE_PORT],
-			inst->prop.height[OUTPUT_PORT]);
-		width = max(inst->prop.width[CAPTURE_PORT],
-			inst->prop.width[OUTPUT_PORT]);
-	} else {
-		height = inst->reconfig_height;
-		width = inst->reconfig_width;
-	}
-
-	return NUM_MBS_PER_FRAME(height, width);
-}
-
 static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst,
 	u32 filled_len)
 {
@@ -441,10 +513,10 @@
 
 int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
 {
-	struct vb2_buf_entry *temp, *next;
+	struct msm_vidc_buffer *temp, *next;
 	unsigned long freq = 0;
 	u32 filled_len = 0;
-	ion_phys_addr_t device_addr = 0;
+	u32 device_addr = 0;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
@@ -452,15 +524,17 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&inst->pendingq.lock);
-	list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
-		if (temp->vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+	mutex_lock(&inst->registeredbufs.lock);
+	list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) {
+		if (temp->vvb.vb2_buf.type ==
+				V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+				temp->deferred) {
 			filled_len = max(filled_len,
-				temp->vb->planes[0].bytesused);
-			device_addr = temp->vb->planes[0].m.userptr;
+				temp->vvb.vb2_buf.planes[0].bytesused);
+			device_addr = temp->smem[0].device_addr;
 		}
 	}
-	mutex_unlock(&inst->pendingq.lock);
+	mutex_unlock(&inst->registeredbufs.lock);
 
 	if (!filled_len || !device_addr) {
 		dprintk(VIDC_PROF, "No Change in frequency\n");
@@ -537,73 +611,31 @@
 	return true;
 }
 
-static bool msm_dcvs_check_codec_supported(int fourcc,
-		unsigned long codecs_supported, enum session_type type)
-{
-	int codec_bit, session_type_bit;
-	bool codec_type, session_type;
-	unsigned long session;
-
-	session = VIDC_VOTE_DATA_SESSION_VAL(get_hal_codec(fourcc),
-		get_hal_domain(type));
-
-	if (!codecs_supported || !session)
-		return false;
-
-	/* ffs returns a 1 indexed, test_bit takes a 0 indexed...index */
-	codec_bit = ffs(session) - 1;
-	session_type_bit = codec_bit + 1;
-
-	codec_type =
-		test_bit(codec_bit, &codecs_supported) ==
-		test_bit(codec_bit, &session);
-	session_type =
-		test_bit(session_type_bit, &codecs_supported) ==
-		test_bit(session_type_bit, &session);
-
-	return codec_type && session_type;
-}
-
 int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst)
 {
 	int rc = 0, j = 0;
-	struct clock_freq_table *clk_freq_tbl = NULL;
-	struct clock_profile_entry *entry = NULL;
-	int fourcc;
+	int fourcc, count;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
-			__func__, inst);
+				__func__, inst);
 		return -EINVAL;
 	}
-
-	clk_freq_tbl = &inst->core->resources.clock_freq_tbl;
+	count = inst->core->resources.codec_data_count;
 	fourcc = inst->session_type == MSM_VIDC_DECODER ?
 		inst->fmts[OUTPUT_PORT].fourcc :
 		inst->fmts[CAPTURE_PORT].fourcc;
 
-	for (j = 0; j < clk_freq_tbl->count; j++) {
-		bool matched = false;
-
-		entry = &clk_freq_tbl->clk_prof_entries[j];
-
-		matched = msm_dcvs_check_codec_supported(
-				fourcc,
-				entry->codec_mask,
-				inst->session_type);
-
-		if (matched) {
-			inst->clk_data.entry = entry;
+	for (j = 0; j < count; j++) {
+		if (inst->core->resources.codec_data[j].session_type ==
+				inst->session_type &&
+				inst->core->resources.codec_data[j].fourcc ==
+				fourcc) {
+			inst->clk_data.entry =
+				&inst->core->resources.codec_data[j];
 			break;
 		}
 	}
-
-	if (j == clk_freq_tbl->count) {
-		dprintk(VIDC_ERR,
-			"Failed : No matching clock entry found\n");
-		rc = -EINVAL;
-	}
-
 	return rc;
 }
 
@@ -783,7 +815,7 @@
 				__func__);
 		return 0;
 	}
-	mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+	mbs_per_frame = msm_vidc_get_mbs_per_frame(inst);
 	if (mbs_per_frame >= inst->core->resources.max_hq_mbs_per_frame ||
 		inst->prop.fps >= inst->core->resources.max_hq_fps) {
 		enable = true;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index fe4822b..e1226e4 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -42,6 +42,7 @@
 int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst);
 int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst);
 void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
-	ion_phys_addr_t device_addr);
-
+	u32 device_addr);
+void update_recon_stats(struct msm_vidc_inst *inst,
+	struct recon_stats_type *recon_stats);
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 873a338..ac69ab8 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -36,6 +36,7 @@
 #define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
 
 #define MAX_SUPPORTED_INSTANCES 16
+static int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst);
 
 const char *const mpeg_video_vidc_extradata[] = {
 	"Extradata none",
@@ -817,7 +818,7 @@
 	return codec;
 }
 
-static enum hal_uncompressed_format get_hal_uncompressed(int fourcc)
+enum hal_uncompressed_format msm_comm_get_hal_uncompressed(int fourcc)
 {
 	enum hal_uncompressed_format format = HAL_UNUSED_COLOR;
 
@@ -976,6 +977,11 @@
 		__func__, core->codec_count, core->enc_codec_supported,
 		core->dec_codec_supported);
 
+	core->vote_data = kcalloc(MAX_SUPPORTED_INSTANCES,
+		sizeof(core->vote_data), GFP_KERNEL);
+	if (!core->vote_data)
+		dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__);
+
 	complete(&(core->completions[index]));
 }
 
@@ -1063,9 +1069,9 @@
 	mutex_lock(&inst->scratchbufs.lock);
 	list_for_each_safe(ptr, next, &inst->scratchbufs.list) {
 		buf = list_entry(ptr, struct internal_buf, list);
-		if (address == (u32)buf->handle->device_addr) {
-			dprintk(VIDC_DBG, "releasing scratch: %pa\n",
-					&buf->handle->device_addr);
+		if (address == buf->smem.device_addr) {
+			dprintk(VIDC_DBG, "releasing scratch: %x\n",
+					buf->smem.device_addr);
 			buf_found = true;
 		}
 	}
@@ -1074,9 +1080,9 @@
 	mutex_lock(&inst->persistbufs.lock);
 	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
 		buf = list_entry(ptr, struct internal_buf, list);
-		if (address == (u32)buf->handle->device_addr) {
-			dprintk(VIDC_DBG, "releasing persist: %pa\n",
-					&buf->handle->device_addr);
+		if (address == buf->smem.device_addr) {
+			dprintk(VIDC_DBG, "releasing persist: %x\n",
+					buf->smem.device_addr);
 			buf_found = true;
 		}
 	}
@@ -1162,7 +1168,8 @@
 	hdev = (struct hfi_device *)(inst->core->device);
 	rc = wait_for_completion_timeout(
 		&inst->completions[SESSION_MSG_INDEX(cmd)],
-		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
+		msecs_to_jiffies(
+			inst->core->resources.msm_vidc_hw_rsp_timeout));
 	if (!rc) {
 		dprintk(VIDC_ERR, "Wait interrupted or timed out: %d\n",
 				SESSION_MSG_INDEX(cmd));
@@ -1441,6 +1448,20 @@
 	put_inst(inst);
 }
 
+static void msm_vidc_queue_rbr_event(struct msm_vidc_inst *inst,
+		int fd, u32 offset)
+{
+	struct v4l2_event buf_event = {0};
+	u32 *ptr;
+
+	buf_event.type = V4L2_EVENT_RELEASE_BUFFER_REFERENCE;
+	ptr = (u32 *)buf_event.u.data;
+	ptr[0] = fd;
+	ptr[1] = offset;
+
+	v4l2_event_queue_fh(&inst->event_handler, &buf_event);
+}
+
 static void handle_event_change(enum hal_command_response cmd, void *data)
 {
 	struct msm_vidc_inst *inst = NULL;
@@ -1450,6 +1471,7 @@
 	int rc = 0;
 	struct hfi_device *hdev;
 	u32 *ptr = NULL;
+	struct hal_buffer_requirements *bufreq;
 
 	if (!event_notify) {
 		dprintk(VIDC_WARN, "Got an empty event from hfi\n");
@@ -1473,65 +1495,17 @@
 		break;
 	case HAL_EVENT_RELEASE_BUFFER_REFERENCE:
 	{
-		struct v4l2_event buf_event = {0};
-		struct buffer_info *binfo = NULL, *temp = NULL;
-		u32 *ptr = NULL;
-
-		dprintk(VIDC_DBG, "%s - inst: %pK buffer: %pa extra: %pa\n",
-				__func__, inst, &event_notify->packet_buffer,
-				&event_notify->extra_data_buffer);
-
-		if (inst->state == MSM_VIDC_CORE_INVALID ||
-				inst->core->state == VIDC_CORE_INVALID) {
-			dprintk(VIDC_DBG,
-					"Event release buf ref received in invalid state - discard\n");
-			goto err_bad_event;
-		}
-
-		/*
-		 * Get the buffer_info entry for the
-		 * device address.
-		 */
-		binfo = device_to_uvaddr(&inst->registeredbufs,
-				event_notify->packet_buffer);
-		if (!binfo) {
-			dprintk(VIDC_ERR,
-					"%s buffer not found in registered list\n",
-					__func__);
-			goto err_bad_event;
-		}
-
-		/* Fill event data to be sent to client*/
-		buf_event.type = V4L2_EVENT_RELEASE_BUFFER_REFERENCE;
-		ptr = (u32 *)buf_event.u.data;
-		ptr[0] = binfo->fd[0];
-		ptr[1] = binfo->buff_off[0];
+		u32 planes[VIDEO_MAX_PLANES] = {0};
 
 		dprintk(VIDC_DBG,
-				"RELEASE REFERENCE EVENT FROM F/W - fd = %d offset = %d\n",
-				ptr[0], ptr[1]);
+			"%s: inst: %pK data_buffer: %x extradata_buffer: %x\n",
+			__func__, inst, event_notify->packet_buffer,
+			event_notify->extra_data_buffer);
 
-		/* Decrement buffer reference count*/
-		mutex_lock(&inst->registeredbufs.lock);
-		list_for_each_entry(temp, &inst->registeredbufs.list,
-				list) {
-			if (temp == binfo) {
-				buf_ref_put(inst, binfo);
-				break;
-			}
-		}
+		planes[0] = event_notify->packet_buffer;
+		planes[1] = event_notify->extra_data_buffer;
+		handle_release_buffer_reference(inst, planes);
 
-		/*
-		 * Release buffer and remove from list
-		 * if reference goes to zero.
-		 */
-		if (unmap_and_deregister_buf(inst, binfo))
-			dprintk(VIDC_ERR,
-					"%s: buffer unmap failed\n", __func__);
-		mutex_unlock(&inst->registeredbufs.lock);
-
-		/*send event to client*/
-		v4l2_event_queue_fh(&inst->event_handler, &buf_event);
 		goto err_bad_event;
 	}
 	default:
@@ -1594,6 +1568,46 @@
 	inst->in_reconfig = true;
 	inst->reconfig_height = event_notify->height;
 	inst->reconfig_width = event_notify->width;
+
+	if (msm_comm_get_stream_output_mode(inst) ==
+			HAL_VIDEO_DECODER_SECONDARY) {
+
+		bufreq = get_buff_req_buffer(inst,
+				HAL_BUFFER_OUTPUT);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"Failed : No buffer requirements : %x\n",
+					HAL_BUFFER_OUTPUT);
+			return;
+		}
+
+		bufreq->buffer_count_min = event_notify->capture_buf_count;
+
+		bufreq = get_buff_req_buffer(inst,
+				HAL_BUFFER_OUTPUT2);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"Failed : No buffer requirements : %x\n",
+					HAL_BUFFER_OUTPUT2);
+			return;
+		}
+
+		bufreq->buffer_count_min = event_notify->capture_buf_count;
+	} else {
+
+		bufreq = get_buff_req_buffer(inst,
+				HAL_BUFFER_OUTPUT);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"Failed : No buffer requirements : %x\n",
+					HAL_BUFFER_OUTPUT);
+			return;
+		}
+		bufreq->buffer_count_min = event_notify->capture_buf_count;
+
+	}
+
+	msm_vidc_update_host_buff_counts(inst);
 	mutex_unlock(&inst->lock);
 
 	if (event == V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT) {
@@ -1604,8 +1618,6 @@
 				"event_notify->height = %d event_notify->width = %d\n",
 				event_notify->height,
 				event_notify->width);
-		inst->prop.height[OUTPUT_PORT] = event_notify->height;
-		inst->prop.width[OUTPUT_PORT] = event_notify->width;
 	}
 
 	rc = msm_vidc_check_session_supported(inst);
@@ -1776,8 +1788,8 @@
 	list_for_each_entry(binfo, &inst->outputbufs.list, list) {
 		if (binfo->buffer_ownership != DRIVER) {
 			dprintk(VIDC_DBG,
-				"This buffer is with FW %pa\n",
-				&binfo->handle->device_addr);
+				"This buffer is with FW %x\n",
+				binfo->smem.device_addr);
 			continue;
 		}
 		buffers_owned_by_driver++;
@@ -1797,7 +1809,6 @@
 {
 	struct internal_buf *binfo;
 	struct hfi_device *hdev;
-	struct msm_smem *handle;
 	struct vidc_frame_data frame_data = {0};
 	struct hal_buffer_requirements *output_buf, *extra_buf;
 	int rc = 0;
@@ -1827,13 +1838,12 @@
 	list_for_each_entry(binfo, &inst->outputbufs.list, list) {
 		if (binfo->buffer_ownership != DRIVER)
 			continue;
-		handle = binfo->handle;
 		frame_data.alloc_len = output_buf->buffer_size;
 		frame_data.filled_len = 0;
 		frame_data.offset = 0;
-		frame_data.device_addr = handle->device_addr;
+		frame_data.device_addr = binfo->smem.device_addr;
 		frame_data.flags = 0;
-		frame_data.extradata_addr = handle->device_addr +
+		frame_data.extradata_addr = binfo->smem.device_addr +
 		output_buf->buffer_size;
 		frame_data.buffer_type = HAL_BUFFER_OUTPUT;
 		frame_data.extradata_size = extra_buf ?
@@ -1884,7 +1894,7 @@
 			}
 		}
 	}
-	atomic_dec(&inst->in_flush);
+	inst->in_flush = false;
 	flush_event.type = V4L2_EVENT_MSM_VIDC_FLUSH_DONE;
 	ptr = (u32 *)flush_event.u.data;
 
@@ -2107,82 +2117,84 @@
 	put_inst(inst);
 }
 
-static struct vb2_buffer *get_vb_from_device_addr(struct buf_queue *bufq,
-		unsigned long dev_addr)
+struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer(
+		struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf)
 {
+	u32 port = 0;
 	struct vb2_buffer *vb = NULL;
 	struct vb2_queue *q = NULL;
-	int found = 0;
+	bool found = false;
 
-	if (!bufq) {
-		dprintk(VIDC_ERR, "Invalid parameter\n");
+	if (mbuf->vvb.vb2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		port = CAPTURE_PORT;
+	} else if (mbuf->vvb.vb2_buf.type ==
+			V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		port = OUTPUT_PORT;
+	} else {
+		dprintk(VIDC_ERR, "%s: invalid type %d\n",
+			__func__, mbuf->vvb.vb2_buf.type);
 		return NULL;
 	}
-	q = &bufq->vb2_bufq;
-	mutex_lock(&bufq->lock);
+
+	q = &inst->bufq[port].vb2_bufq;
+	mutex_lock(&inst->bufq[port].lock);
+	found = false;
 	list_for_each_entry(vb, &q->queued_list, queued_entry) {
-		if (vb->planes[0].m.userptr == dev_addr &&
-			vb->state == VB2_BUF_STATE_ACTIVE) {
-			found = 1;
-			dprintk(VIDC_DBG, "Found v4l2_buf index : %d\n",
-					vb->index);
+		if (msm_comm_compare_vb2_planes(inst, mbuf, vb)) {
+			found = true;
 			break;
 		}
 	}
-	mutex_unlock(&bufq->lock);
+	mutex_unlock(&inst->bufq[port].lock);
 	if (!found) {
-		dprintk(VIDC_DBG,
-			"Failed to find buffer in queued list: %#lx, qtype = %d\n",
-			dev_addr, q->type);
-		vb = NULL;
+		print_vidc_buffer(VIDC_ERR, "vb2 not found for", inst, mbuf);
+		return NULL;
 	}
+
 	return vb;
 }
 
-static void handle_dynamic_buffer(struct msm_vidc_inst *inst,
-		ion_phys_addr_t device_addr, u32 flags)
+int msm_comm_vb2_buffer_done(struct msm_vidc_inst *inst,
+		struct vb2_buffer *vb)
 {
-	struct buffer_info *binfo = NULL, *temp = NULL;
+	u32 port;
 
-	/*
-	 * Update reference count and release OR queue back the buffer,
-	 * only when firmware is not holding a reference.
-	 */
-	binfo = device_to_uvaddr(&inst->registeredbufs, device_addr);
-	if (!binfo) {
-		dprintk(VIDC_ERR,
-			"%s buffer not found in registered list\n",
-			__func__);
-		return;
+	if (!inst || !vb) {
+		dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+			__func__, inst, vb);
+		return -EINVAL;
 	}
-	if (flags & HAL_BUFFERFLAG_READONLY) {
-		dprintk(VIDC_DBG,
-			"FBD fd[0] = %d -> Reference with f/w, addr: %pa\n",
-			binfo->fd[0], &device_addr);
+
+	if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		port = CAPTURE_PORT;
+	} else if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		port = OUTPUT_PORT;
 	} else {
-		dprintk(VIDC_DBG,
-			"FBD fd[0] = %d -> FBD_ref_released, addr: %pa\n",
-			binfo->fd[0], &device_addr);
-
-		mutex_lock(&inst->registeredbufs.lock);
-		list_for_each_entry(temp, &inst->registeredbufs.list,
-				list) {
-			if (temp == binfo) {
-				buf_ref_put(inst, binfo);
-				break;
-			}
-		}
-		mutex_unlock(&inst->registeredbufs.lock);
+		dprintk(VIDC_ERR, "%s: invalid type %d\n",
+			__func__, vb->type);
+		return -EINVAL;
 	}
+	msm_vidc_debugfs_update(inst, port == CAPTURE_PORT ?
+			MSM_VIDC_DEBUGFS_EVENT_FBD :
+			MSM_VIDC_DEBUGFS_EVENT_EBD);
+
+	mutex_lock(&inst->bufq[port].lock);
+	vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+	mutex_unlock(&inst->bufq[port].lock);
+
+	return 0;
 }
 
 static void handle_ebd(enum hal_command_response cmd, void *data)
 {
 	struct msm_vidc_cb_data_done *response = data;
+	struct msm_vidc_buffer *mbuf;
 	struct vb2_buffer *vb;
 	struct msm_vidc_inst *inst;
 	struct vidc_hal_ebd *empty_buf_done;
-	struct vb2_v4l2_buffer *vbuf = NULL;
+	struct vb2_v4l2_buffer *vbuf;
+	u32 planes[VIDEO_MAX_PLANES] = {0};
+	u32 extra_idx = 0, i;
 
 	if (!response) {
 		dprintk(VIDC_ERR, "Invalid response from vidc_hal\n");
@@ -2195,137 +2207,79 @@
 		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
 		return;
 	}
-	if (inst->buffer_mode_set[OUTPUT_PORT] == HAL_BUFFER_MODE_DYNAMIC)
-		handle_dynamic_buffer(inst,
-			response->input_done.packet_buffer, 0);
 
-	vb = get_vb_from_device_addr(&inst->bufq[OUTPUT_PORT],
-			response->input_done.packet_buffer);
+	empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
+	planes[0] = empty_buf_done->packet_buffer;
+	planes[1] = empty_buf_done->extra_data_buffer;
+
+	mbuf = msm_comm_get_buffer_using_device_planes(inst, planes);
+	if (!mbuf) {
+		dprintk(VIDC_ERR,
+			"%s: data_addr %x, extradata_addr %x not found\n",
+			__func__, planes[0], planes[1]);
+		goto exit;
+	}
+	vb = &mbuf->vvb.vb2_buf;
+
+	vb->planes[0].bytesused = response->input_done.filled_len;
+	if (vb->planes[0].bytesused > vb->planes[0].length)
+		dprintk(VIDC_INFO, "bytesused overflow length\n");
+
+	if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) {
+		dprintk(VIDC_INFO, "Failed : Unsupported input stream\n");
+		mbuf->vvb.flags |= V4L2_QCOM_BUF_INPUT_UNSUPPORTED;
+	}
+	if (empty_buf_done->status == VIDC_ERR_BITSTREAM_ERR) {
+		dprintk(VIDC_INFO, "Failed : Corrupted input stream\n");
+		mbuf->vvb.flags |= V4L2_QCOM_BUF_DATA_CORRUPT;
+	}
+	if (empty_buf_done->flags & HAL_BUFFERFLAG_SYNCFRAME)
+		mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME |
+			V4L2_BUF_FLAG_KEYFRAME;
+
+	extra_idx = EXTRADATA_IDX(inst->bufq[OUTPUT_PORT].num_planes);
+	if (extra_idx && extra_idx < VIDEO_MAX_PLANES)
+		vb->planes[extra_idx].bytesused = vb->planes[extra_idx].length;
+
+	update_recon_stats(inst, &empty_buf_done->recon_stats);
+	msm_vidc_clear_freq_entry(inst, mbuf->smem[0].device_addr);
+
+	vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
 	if (vb) {
 		vbuf = to_vb2_v4l2_buffer(vb);
-		vb->planes[0].bytesused = response->input_done.filled_len;
-		vb->planes[0].data_offset = response->input_done.offset;
-		if (vb->planes[0].data_offset > vb->planes[0].length)
-			dprintk(VIDC_INFO, "data_offset overflow length\n");
-		if (vb->planes[0].bytesused > vb->planes[0].length)
-			dprintk(VIDC_INFO, "bytesused overflow length\n");
-		if (vb->planes[0].m.userptr !=
-			response->clnt_data)
-			dprintk(VIDC_INFO, "Client data != bufaddr\n");
-		empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
-		if (empty_buf_done) {
-			if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) {
-				dprintk(VIDC_INFO,
-					"Failed : Unsupported input stream\n");
-				vbuf->flags |=
-					V4L2_QCOM_BUF_INPUT_UNSUPPORTED;
-			}
-			if (empty_buf_done->status == VIDC_ERR_BITSTREAM_ERR) {
-				dprintk(VIDC_INFO,
-					"Failed : Corrupted input stream\n");
-				vbuf->flags |=
-					V4L2_QCOM_BUF_DATA_CORRUPT;
-			}
-			if (empty_buf_done->flags & HAL_BUFFERFLAG_SYNCFRAME)
-				vbuf->flags |=
-					V4L2_QCOM_BUF_FLAG_IDRFRAME |
-					V4L2_BUF_FLAG_KEYFRAME;
-		}
-		dprintk(VIDC_DBG,
-			"Got ebd from hal: device_addr: %pa, alloc: %d, status: %#x, pic_type: %#x, flags: %#x\n",
-			&empty_buf_done->packet_buffer,
-			empty_buf_done->alloc_len, empty_buf_done->status,
-			empty_buf_done->picture_type, empty_buf_done->flags);
-
-		msm_vidc_clear_freq_entry(inst, empty_buf_done->packet_buffer);
-
-		mutex_lock(&inst->bufq[OUTPUT_PORT].lock);
-		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
-		mutex_unlock(&inst->bufq[OUTPUT_PORT].lock);
-		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_EBD);
+		vbuf->flags |= mbuf->vvb.flags;
+		for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++)
+			vb->planes[i].bytesused =
+				mbuf->vvb.vb2_buf.planes[i].bytesused;
 	}
+	/*
+	 * put_buffer should be done before vb2_buffer_done else
+	 * client might queue the same buffer before it is unmapped
+	 * in put_buffer. also don't use mbuf after put_buffer
+	 * as it may be freed in put_buffer.
+	 */
+	msm_comm_put_vidc_buffer(inst, mbuf);
+	msm_comm_vb2_buffer_done(inst, vb);
 
+exit:
 	put_inst(inst);
 }
 
-int buf_ref_get(struct msm_vidc_inst *inst, struct buffer_info *binfo)
-{
-	int cnt = 0;
-
-	if (!inst || !binfo)
-		return -EINVAL;
-
-	atomic_inc(&binfo->ref_count);
-	cnt = atomic_read(&binfo->ref_count);
-	if (cnt >= 2)
-		inst->buffers_held_in_driver++;
-
-	dprintk(VIDC_DBG, "REF_GET[%d] fd[0] = %d\n", cnt, binfo->fd[0]);
-
-	return cnt;
-}
-
-int buf_ref_put(struct msm_vidc_inst *inst, struct buffer_info *binfo)
-{
-	int rc = 0;
-	int cnt;
-	bool release_buf = false;
-	bool qbuf_again = false;
-
-	if (!inst || !binfo)
-		return -EINVAL;
-
-	atomic_dec(&binfo->ref_count);
-	cnt = atomic_read(&binfo->ref_count);
-	dprintk(VIDC_DBG, "REF_PUT[%d] fd[0] = %d\n", cnt, binfo->fd[0]);
-	if (!cnt)
-		release_buf = true;
-	else if (cnt >= 1)
-		qbuf_again = true;
-	else {
-		dprintk(VIDC_DBG, "%s: invalid ref_cnt: %d\n", __func__, cnt);
-		cnt = -EINVAL;
-	}
-
-	if (cnt < 0)
-		return cnt;
-
-	if (release_buf) {
-		/*
-		 * We can not delete binfo here as we need to set the user
-		 * virtual address saved in binfo->uvaddr to the dequeued v4l2
-		 * buffer.
-		 *
-		 * We will set the pending_deletion flag to true here and delete
-		 * binfo from registered list in dqbuf after setting the uvaddr.
-		 */
-		dprintk(VIDC_DBG, "fd[0] = %d -> pending_deletion = true\n",
-			binfo->fd[0]);
-		binfo->pending_deletion = true;
-	} else if (qbuf_again) {
-		inst->buffers_held_in_driver--;
-		rc = qbuf_dynamic_buf(inst, binfo);
-		if (!rc)
-			return rc;
-	}
-	return cnt;
-}
-
 static int handle_multi_stream_buffers(struct msm_vidc_inst *inst,
-		ion_phys_addr_t dev_addr)
+		u32 dev_addr)
 {
 	struct internal_buf *binfo;
-	struct msm_smem *handle;
+	struct msm_smem *smem;
 	bool found = false;
 
 	mutex_lock(&inst->outputbufs.lock);
 	list_for_each_entry(binfo, &inst->outputbufs.list, list) {
-		handle = binfo->handle;
-		if (handle && dev_addr == handle->device_addr) {
+		smem = &binfo->smem;
+		if (smem && dev_addr == smem->device_addr) {
 			if (binfo->buffer_ownership == DRIVER) {
 				dprintk(VIDC_ERR,
-					"FW returned same buffer: %pa\n",
-					&dev_addr);
+					"FW returned same buffer: %x\n",
+					dev_addr);
 				break;
 			}
 			binfo->buffer_ownership = DRIVER;
@@ -2337,8 +2291,8 @@
 
 	if (!found) {
 		dprintk(VIDC_ERR,
-			"Failed to find output buffer in queued list: %pa\n",
-			&dev_addr);
+			"Failed to find output buffer in queued list: %x\n",
+			dev_addr);
 	}
 
 	return 0;
@@ -2356,13 +2310,15 @@
 static void handle_fbd(enum hal_command_response cmd, void *data)
 {
 	struct msm_vidc_cb_data_done *response = data;
+	struct msm_vidc_buffer *mbuf;
 	struct msm_vidc_inst *inst;
 	struct vb2_buffer *vb = NULL;
 	struct vidc_hal_fbd *fill_buf_done;
+	struct vb2_v4l2_buffer *vbuf;
 	enum hal_buffer buffer_type;
-	int extra_idx = 0;
 	u64 time_usec = 0;
-	struct vb2_v4l2_buffer *vbuf = NULL;
+	u32 planes[VIDEO_MAX_PLANES] = {0};
+	u32 extra_idx, i;
 
 	if (!response) {
 		dprintk(VIDC_ERR, "Invalid response from vidc_hal\n");
@@ -2377,132 +2333,117 @@
 	}
 
 	fill_buf_done = (struct vidc_hal_fbd *)&response->output_done;
+	planes[0] = fill_buf_done->packet_buffer1;
+	planes[1] = fill_buf_done->extra_data_buffer;
+
 	buffer_type = msm_comm_get_hal_output_buffer(inst);
 	if (fill_buf_done->buffer_type == buffer_type) {
-		vb = get_vb_from_device_addr(&inst->bufq[CAPTURE_PORT],
-				fill_buf_done->packet_buffer1);
+		mbuf = msm_comm_get_buffer_using_device_planes(inst, planes);
+		if (!mbuf) {
+			dprintk(VIDC_ERR,
+				"%s: data_addr %x, extradata_addr %x not found\n",
+				__func__, planes[0], planes[1]);
+			goto exit;
+		}
 	} else {
 		if (handle_multi_stream_buffers(inst,
 				fill_buf_done->packet_buffer1))
 			dprintk(VIDC_ERR,
 				"Failed : Output buffer not found %pa\n",
 				&fill_buf_done->packet_buffer1);
-		goto err_handle_fbd;
+		goto exit;
+	}
+	vb = &mbuf->vvb.vb2_buf;
+
+	if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME ||
+		fill_buf_done->flags1 & HAL_BUFFERFLAG_DECODEONLY)
+		fill_buf_done->filled_len1 = 0;
+	vb->planes[0].bytesused = fill_buf_done->filled_len1;
+	if (vb->planes[0].bytesused > vb->planes[0].length)
+		dprintk(VIDC_INFO,
+			"fbd:Overflow bytesused = %d; length = %d\n",
+			vb->planes[0].bytesused,
+			vb->planes[0].length);
+	if (vb->planes[0].data_offset != fill_buf_done->offset1)
+		dprintk(VIDC_ERR, "%s: data_offset %d vs %d\n",
+			__func__, vb->planes[0].data_offset,
+			fill_buf_done->offset1);
+	if (!(fill_buf_done->flags1 & HAL_BUFFERFLAG_TIMESTAMPINVALID)) {
+		time_usec = fill_buf_done->timestamp_hi;
+		time_usec = (time_usec << 32) | fill_buf_done->timestamp_lo;
+	} else {
+		time_usec = 0;
+		dprintk(VIDC_DBG,
+				"Set zero timestamp for buffer %pa, filled: %d, (hi:%u, lo:%u)\n",
+				&fill_buf_done->packet_buffer1,
+				fill_buf_done->filled_len1,
+				fill_buf_done->timestamp_hi,
+				fill_buf_done->timestamp_lo);
+	}
+	vb->timestamp = (time_usec * NSEC_PER_USEC);
+
+	extra_idx = EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes);
+	if (extra_idx && extra_idx < VIDEO_MAX_PLANES)
+		vb->planes[extra_idx].bytesused = vb->planes[extra_idx].length;
+
+	mbuf->vvb.flags = 0;
+	if (fill_buf_done->flags1 & HAL_BUFFERFLAG_READONLY)
+		mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_READONLY;
+	if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOS)
+		mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_EOS;
+	if (fill_buf_done->flags1 & HAL_BUFFERFLAG_CODECCONFIG)
+		mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_CODECCONFIG;
+	if (fill_buf_done->flags1 & HAL_BUFFERFLAG_SYNCFRAME)
+		mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME;
+	if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOSEQ)
+		mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_EOSEQ;
+	if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DECODEONLY ||
+		fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME)
+		mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_DECODEONLY;
+	if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DATACORRUPT)
+		mbuf->vvb.flags |= V4L2_QCOM_BUF_DATA_CORRUPT;
+	switch (fill_buf_done->picture_type) {
+	case HAL_PICTURE_IDR:
+		mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME;
+		mbuf->vvb.flags |= V4L2_BUF_FLAG_KEYFRAME;
+		break;
+	case HAL_PICTURE_I:
+		mbuf->vvb.flags |= V4L2_BUF_FLAG_KEYFRAME;
+		break;
+	case HAL_PICTURE_P:
+		mbuf->vvb.flags |= V4L2_BUF_FLAG_PFRAME;
+		break;
+	case HAL_PICTURE_B:
+		mbuf->vvb.flags |= V4L2_BUF_FLAG_BFRAME;
+		break;
+	case HAL_FRAME_NOTCODED:
+	case HAL_UNUSED_PICT:
+		/* Do we need to care about these? */
+	case HAL_FRAME_YUV:
+		break;
+	default:
+		break;
 	}
 
+	vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
 	if (vb) {
 		vbuf = to_vb2_v4l2_buffer(vb);
-		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME ||
-			fill_buf_done->flags1 & HAL_BUFFERFLAG_DECODEONLY)
-			fill_buf_done->filled_len1 = 0;
-		vb->planes[0].bytesused = fill_buf_done->filled_len1;
-		vb->planes[0].data_offset = fill_buf_done->offset1;
-		if (vb->planes[0].data_offset > vb->planes[0].length)
-			dprintk(VIDC_INFO,
-				"fbd:Overflow data_offset = %d; length = %d\n",
-				vb->planes[0].data_offset,
-				vb->planes[0].length);
-		if (vb->planes[0].bytesused > vb->planes[0].length)
-			dprintk(VIDC_INFO,
-				"fbd:Overflow bytesused = %d; length = %d\n",
-				vb->planes[0].bytesused,
-				vb->planes[0].length);
-		if (!(fill_buf_done->flags1 &
-			HAL_BUFFERFLAG_TIMESTAMPINVALID)) {
-			time_usec = fill_buf_done->timestamp_hi;
-			time_usec = (time_usec << 32) |
-				fill_buf_done->timestamp_lo;
-		} else {
-			time_usec = 0;
-			dprintk(VIDC_DBG,
-					"Set zero timestamp for buffer %pa, filled: %d, (hi:%u, lo:%u)\n",
-					&fill_buf_done->packet_buffer1,
-					fill_buf_done->filled_len1,
-					fill_buf_done->timestamp_hi,
-					fill_buf_done->timestamp_lo);
-		}
-		vbuf->flags = 0;
-		vb->timestamp = (time_usec * NSEC_PER_USEC);
-
-		extra_idx =
-			EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes);
-		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
-			vb->planes[extra_idx].m.userptr =
-				(unsigned long)fill_buf_done->extra_data_buffer;
-			vb->planes[extra_idx].bytesused =
-				vb->planes[extra_idx].length;
-			vb->planes[extra_idx].data_offset = 0;
-		}
-
-		if (inst->buffer_mode_set[CAPTURE_PORT] ==
-			HAL_BUFFER_MODE_DYNAMIC)
-		handle_dynamic_buffer(inst, fill_buf_done->packet_buffer1,
-					fill_buf_done->flags1);
-		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_READONLY)
-			vbuf->flags |= V4L2_QCOM_BUF_FLAG_READONLY;
-		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOS)
-			vbuf->flags |= V4L2_QCOM_BUF_FLAG_EOS;
-		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_CODECCONFIG)
-			vbuf->flags |= V4L2_QCOM_BUF_FLAG_CODECCONFIG;
-		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_SYNCFRAME)
-			vbuf->flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME;
-		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOSEQ)
-			vbuf->flags |= V4L2_QCOM_BUF_FLAG_EOSEQ;
-		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DECODEONLY ||
-			fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME)
-			vbuf->flags |= V4L2_QCOM_BUF_FLAG_DECODEONLY;
-		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DATACORRUPT)
-			vbuf->flags |= V4L2_QCOM_BUF_DATA_CORRUPT;
-
-		switch (fill_buf_done->picture_type) {
-		case HAL_PICTURE_IDR:
-			vbuf->flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME;
-			vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
-			break;
-		case HAL_PICTURE_I:
-			vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME;
-			break;
-		case HAL_PICTURE_P:
-			vbuf->flags |= V4L2_BUF_FLAG_PFRAME;
-			break;
-		case HAL_PICTURE_B:
-			vbuf->flags |= V4L2_BUF_FLAG_BFRAME;
-			break;
-		case HAL_FRAME_NOTCODED:
-		case HAL_UNUSED_PICT:
-			/* Do we need to care about these? */
-		case HAL_FRAME_YUV:
-			break;
-		default:
-			break;
-		}
-
-		inst->count.fbd++;
-
-		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
-			dprintk(VIDC_DBG,
-				"extradata: userptr = %pK;"
-				" bytesused = %d; length = %d\n",
-				(u8 *)vb->planes[extra_idx].m.userptr,
-				vb->planes[extra_idx].bytesused,
-				vb->planes[extra_idx].length);
-		}
-		dprintk(VIDC_DBG,
-		"Got fbd from hal: device_addr: %pa, alloc: %d, filled: %d, offset: %d, ts: %lld, flags: %#x, crop: %d %d %d %d, pic_type: %#x, mark_data: %#x\n",
-		&fill_buf_done->packet_buffer1, fill_buf_done->alloc_len1,
-		fill_buf_done->filled_len1, fill_buf_done->offset1, time_usec,
-		fill_buf_done->flags1, fill_buf_done->start_x_coord,
-		fill_buf_done->start_y_coord, fill_buf_done->frame_width,
-		fill_buf_done->frame_height, fill_buf_done->picture_type,
-		fill_buf_done->mark_data);
-
-		mutex_lock(&inst->bufq[CAPTURE_PORT].lock);
-		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
-		mutex_unlock(&inst->bufq[CAPTURE_PORT].lock);
-		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FBD);
+		vbuf->flags = mbuf->vvb.flags;
+		vb->timestamp = mbuf->vvb.vb2_buf.timestamp;
+		for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++)
+			vb->planes[i].bytesused =
+				mbuf->vvb.vb2_buf.planes[i].bytesused;
 	}
+	/*
+	 * put_buffer should be done before vb2_buffer_done else
+	 * client might queue the same buffer before it is unmapped
+	 * in put_buffer. also don't use mbuf after put_buffer
+	 * as it may be freed in put_buffer.
+	 */
+	msm_comm_put_vidc_buffer(inst, mbuf);
+	msm_comm_vb2_buffer_done(inst, vb);
 
-err_handle_fbd:
+exit:
 	put_inst(inst);
 }
 
@@ -2647,7 +2588,8 @@
 	}
 	rc = wait_for_completion_timeout(
 			&inst->completions[abort_completion],
-			msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
+			msecs_to_jiffies(
+				inst->core->resources.msm_vidc_hw_rsp_timeout));
 	if (!rc) {
 		dprintk(VIDC_ERR,
 				"%s: Wait interrupted or timed out [%pK]: %d\n",
@@ -2739,7 +2681,7 @@
 	hdev = (struct hfi_device *)core->device;
 	rc = wait_for_completion_timeout(
 		&core->completions[SYS_MSG_INDEX(HAL_SYS_INIT_DONE)],
-		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
+		msecs_to_jiffies(core->resources.msm_vidc_hw_rsp_timeout));
 	if (!rc) {
 		dprintk(VIDC_ERR, "%s: Wait interrupted or timed out: %d\n",
 				__func__, SYS_MSG_INDEX(HAL_SYS_INIT_DONE));
@@ -2878,11 +2820,12 @@
 		 */
 		schedule_delayed_work(&core->fw_unload_work,
 			msecs_to_jiffies(core->state == VIDC_CORE_INVALID ?
-					0 : msm_vidc_firmware_unload_delay));
+					0 :
+			core->resources.msm_vidc_firmware_unload_delay));
 
 		dprintk(VIDC_DBG, "firmware unload delayed by %u ms\n",
 			core->state == VIDC_CORE_INVALID ?
-			0 : msm_vidc_firmware_unload_delay);
+			0 : core->resources.msm_vidc_firmware_unload_delay);
 	}
 
 core_already_uninited:
@@ -3236,7 +3179,6 @@
 	enum hal_buffer buffer_type)
 {
 	int rc = 0;
-	struct msm_smem *handle;
 	struct internal_buf *binfo;
 	u32 smem_flags = 0, buffer_size;
 	struct hal_buffer_requirements *output_buf, *extradata_buf;
@@ -3284,33 +3226,30 @@
 	if (output_buf->buffer_size) {
 		for (i = 0; i < output_buf->buffer_count_actual;
 				i++) {
-			handle = msm_comm_smem_alloc(inst,
-					buffer_size, 1, smem_flags,
-					buffer_type, 0);
-			if (!handle) {
-				dprintk(VIDC_ERR,
-					"Failed to allocate output memory\n");
-				rc = -ENOMEM;
-				goto err_no_mem;
-			}
-			rc = msm_comm_smem_cache_operations(inst,
-					handle, SMEM_CACHE_CLEAN);
-			if (rc) {
-				dprintk(VIDC_WARN,
-					"Failed to clean cache may cause undefined behavior\n");
-			}
 			binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
 			if (!binfo) {
 				dprintk(VIDC_ERR, "Out of memory\n");
 				rc = -ENOMEM;
 				goto fail_kzalloc;
 			}
-
-			binfo->handle = handle;
+			rc = msm_comm_smem_alloc(inst,
+					buffer_size, 1, smem_flags,
+					buffer_type, 0, &binfo->smem);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"Failed to allocate output memory\n");
+				goto err_no_mem;
+			}
+			rc = msm_comm_smem_cache_operations(inst,
+					&binfo->smem, SMEM_CACHE_CLEAN);
+			if (rc) {
+				dprintk(VIDC_WARN,
+					"Failed to clean cache may cause undefined behavior\n");
+			}
 			binfo->buffer_type = buffer_type;
 			binfo->buffer_ownership = DRIVER;
-			dprintk(VIDC_DBG, "Output buffer address: %pa\n",
-					&handle->device_addr);
+			dprintk(VIDC_DBG, "Output buffer address: %#x\n",
+					binfo->smem.device_addr);
 
 			if (inst->buffer_mode_set[CAPTURE_PORT] ==
 				HAL_BUFFER_MODE_STATIC) {
@@ -3321,9 +3260,9 @@
 				buffer_info.buffer_type = buffer_type;
 				buffer_info.num_buffers = 1;
 				buffer_info.align_device_addr =
-					handle->device_addr;
+					binfo->smem.device_addr;
 				buffer_info.extradata_addr =
-					handle->device_addr +
+					binfo->smem.device_addr +
 					output_buf->buffer_size;
 				if (extradata_buf)
 					buffer_info.extradata_size =
@@ -3346,7 +3285,7 @@
 fail_set_buffers:
 	kfree(binfo);
 fail_kzalloc:
-	msm_comm_smem_free(inst, handle);
+	msm_comm_smem_free(inst, &binfo->smem);
 err_no_mem:
 	return rc;
 }
@@ -3396,10 +3335,10 @@
 	buffer_info.buffer_type = buffer_type;
 	buffer_info.num_buffers = 1;
 	buffer_info.align_device_addr = handle->device_addr;
-	dprintk(VIDC_DBG, "%s %s buffer : %pa\n",
+	dprintk(VIDC_DBG, "%s %s buffer : %x\n",
 				reuse ? "Reusing" : "Allocated",
 				get_buffer_name(buffer_type),
-				&buffer_info.align_device_addr);
+				buffer_info.align_device_addr);
 
 	rc = call_hfi_op(hdev, session_set_buffers,
 		(void *) inst->session, &buffer_info);
@@ -3425,11 +3364,6 @@
 
 	mutex_lock(&buf_list->lock);
 	list_for_each_entry(buf, &buf_list->list, list) {
-		if (!buf->handle) {
-			reused = false;
-			break;
-		}
-
 		if (buf->buffer_type != buffer_type)
 			continue;
 
@@ -3445,7 +3379,7 @@
 			&& buffer_type != HAL_BUFFER_INTERNAL_PERSIST_1) {
 
 			rc = set_internal_buf_on_fw(inst, buffer_type,
-					buf->handle, true);
+					&buf->smem, true);
 			if (rc) {
 				dprintk(VIDC_ERR,
 					"%s: session_set_buffers failed\n",
@@ -3466,7 +3400,6 @@
 			struct hal_buffer_requirements *internal_bufreq,
 			struct msm_vidc_list *buf_list)
 {
-	struct msm_smem *handle;
 	struct internal_buf *binfo;
 	u32 smem_flags = 0;
 	int rc = 0;
@@ -3482,27 +3415,25 @@
 		smem_flags |= SMEM_SECURE;
 
 	for (i = 0; i < internal_bufreq->buffer_count_actual; i++) {
-		handle = msm_comm_smem_alloc(inst, internal_bufreq->buffer_size,
-				1, smem_flags, internal_bufreq->buffer_type, 0);
-		if (!handle) {
-			dprintk(VIDC_ERR,
-				"Failed to allocate scratch memory\n");
-			rc = -ENOMEM;
-			goto err_no_mem;
-		}
-
 		binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
 		if (!binfo) {
 			dprintk(VIDC_ERR, "Out of memory\n");
 			rc = -ENOMEM;
 			goto fail_kzalloc;
 		}
+		rc = msm_comm_smem_alloc(inst, internal_bufreq->buffer_size,
+				1, smem_flags, internal_bufreq->buffer_type,
+				0, &binfo->smem);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to allocate scratch memory\n");
+			goto err_no_mem;
+		}
 
-		binfo->handle = handle;
 		binfo->buffer_type = internal_bufreq->buffer_type;
 
 		rc = set_internal_buf_on_fw(inst, internal_bufreq->buffer_type,
-				handle, false);
+				&binfo->smem, false);
 		if (rc)
 			goto fail_set_buffers;
 
@@ -3513,10 +3444,10 @@
 	return rc;
 
 fail_set_buffers:
+	msm_comm_smem_free(inst, &binfo->smem);
+err_no_mem:
 	kfree(binfo);
 fail_kzalloc:
-	msm_comm_smem_free(inst, handle);
-err_no_mem:
 	return rc;
 
 }
@@ -3755,25 +3686,32 @@
 }
 
 static void populate_frame_data(struct vidc_frame_data *data,
-		const struct vb2_buffer *vb, struct msm_vidc_inst *inst)
+		struct msm_vidc_buffer *mbuf, struct msm_vidc_inst *inst)
 {
 	u64 time_usec;
 	int extra_idx;
-	enum v4l2_buf_type type = vb->type;
-	enum vidc_ports port = type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
-		OUTPUT_PORT : CAPTURE_PORT;
-	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct vb2_buffer *vb;
+	struct vb2_v4l2_buffer *vbuf;
+
+	if (!inst || !mbuf || !data) {
+		dprintk(VIDC_ERR, "%s: invalid params %pK %pK %pK\n",
+			__func__, inst, mbuf, data);
+		return;
+	}
+
+	vb = &mbuf->vvb.vb2_buf;
+	vbuf = to_vb2_v4l2_buffer(vb);
 
 	time_usec = vb->timestamp;
 	do_div(time_usec, NSEC_PER_USEC);
 
 	data->alloc_len = vb->planes[0].length;
-	data->device_addr = vb->planes[0].m.userptr;
+	data->device_addr = mbuf->smem[0].device_addr;
 	data->timestamp = time_usec;
 	data->flags = 0;
 	data->clnt_data = data->device_addr;
 
-	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+	if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		bool pic_decoding_mode = msm_comm_g_ctrl_for_id(inst,
 				V4L2_CID_MPEG_VIDC_VIDEO_PICTYPE_DEC_MODE);
 
@@ -3801,59 +3739,64 @@
 		data->mark_data = data->mark_target =
 			pic_decoding_mode ? 0xdeadbeef : 0;
 
-	} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+	} else if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 		data->buffer_type = msm_comm_get_hal_output_buffer(inst);
 	}
 
-	extra_idx = EXTRADATA_IDX(inst->bufq[port].num_planes);
-	if (extra_idx && extra_idx < VIDEO_MAX_PLANES &&
-			vb->planes[extra_idx].m.userptr) {
-		data->extradata_addr = vb->planes[extra_idx].m.userptr;
+	extra_idx = EXTRADATA_IDX(vb->num_planes);
+	if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+		data->extradata_addr = mbuf->smem[extra_idx].device_addr;
 		data->extradata_size = vb->planes[extra_idx].length;
 		data->flags |= HAL_BUFFERFLAG_EXTRADATA;
 	}
 }
 
-static unsigned int count_single_batch(struct msm_vidc_list *list,
+static unsigned int count_single_batch(struct msm_vidc_inst *inst,
 		enum v4l2_buf_type type)
 {
-	struct vb2_buf_entry *buf;
 	int count = 0;
-	struct vb2_v4l2_buffer *vbuf = NULL;
+	struct msm_vidc_buffer *mbuf = NULL;
 
-	mutex_lock(&list->lock);
-	list_for_each_entry(buf, &list->list, list) {
-		if (buf->vb->type != type)
+	mutex_lock(&inst->registeredbufs.lock);
+	list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+		if (mbuf->vvb.vb2_buf.type != type)
+			continue;
+
+		/* count only deferred buffers */
+		if (!mbuf->deferred)
 			continue;
 
 		++count;
 
-		vbuf = to_vb2_v4l2_buffer(buf->vb);
-		if (!(vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER))
+		if (!(mbuf->vvb.flags & V4L2_MSM_BUF_FLAG_DEFER))
 			goto found_batch;
 	}
-	 /* don't have a full batch */
+	/* don't have a full batch */
 	count = 0;
 
 found_batch:
-	mutex_unlock(&list->lock);
+	mutex_unlock(&inst->registeredbufs.lock);
 	return count;
 }
 
-static unsigned int count_buffers(struct msm_vidc_list *list,
+static unsigned int count_buffers(struct msm_vidc_inst *inst,
 		enum v4l2_buf_type type)
 {
-	struct vb2_buf_entry *buf;
+	struct msm_vidc_buffer *mbuf;
 	int count = 0;
 
-	mutex_lock(&list->lock);
-	list_for_each_entry(buf, &list->list, list) {
-		if (buf->vb->type != type)
+	mutex_lock(&inst->registeredbufs.lock);
+	list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+		if (mbuf->vvb.vb2_buf.type != type)
+			continue;
+
+		/* count only deferred buffers */
+		if (!mbuf->deferred)
 			continue;
 
 		++count;
 	}
-	mutex_unlock(&list->lock);
+	mutex_unlock(&inst->registeredbufs.lock);
 
 	return count;
 }
@@ -3864,27 +3807,45 @@
 
 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		dprintk(VIDC_DBG,
-			"Sending etb (%pa) to hal: filled: %d, ts: %lld, flags = %#x\n",
-			&data->device_addr, data->filled_len,
+			"Sending etb (%x) to hal: filled: %d, ts: %lld, flags = %#x\n",
+			data->device_addr, data->filled_len,
 			data->timestamp, data->flags);
 		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_ETB);
 
 	} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 		dprintk(VIDC_DBG,
-			"Sending ftb (%pa) to hal: size: %d, ts: %lld, flags = %#x\n",
-			&data->device_addr, data->alloc_len,
+			"Sending ftb (%x) to hal: size: %d, ts: %lld, flags = %#x\n",
+			data->device_addr, data->alloc_len,
 			data->timestamp, data->flags);
 		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FTB);
 	}
 }
 
+enum hal_buffer get_hal_buffer_type(unsigned int type,
+		unsigned int plane_num)
+{
+	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		if (plane_num == 0)
+			return HAL_BUFFER_INPUT;
+		else
+			return HAL_BUFFER_EXTRADATA_INPUT;
+	} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		if (plane_num == 0)
+			return HAL_BUFFER_OUTPUT;
+		else
+			return HAL_BUFFER_EXTRADATA_OUTPUT;
+	} else {
+		return -EINVAL;
+	}
+}
+
 /*
  * Attempts to queue `vb` to hardware.  If, for various reasons, the buffer
  * cannot be queued to hardware, the buffer will be staged for commit in the
  * pending queue.  Once the hardware reaches a good state (or if `vb` is NULL,
  * the subsequent *_qbuf will commit the previously staged buffers to hardware.
  */
-int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb)
+int msm_comm_qbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf)
 {
 	int rc = 0, capture_count, output_count;
 	struct msm_vidc_core *core;
@@ -3894,8 +3855,7 @@
 		int count;
 	} etbs, ftbs;
 	bool defer = false, batch_mode;
-	struct vb2_buf_entry *temp, *next;
-	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct msm_vidc_buffer *temp = NULL, *next = NULL;
 
 	if (!inst) {
 		dprintk(VIDC_ERR, "%s: Invalid arguments\n", __func__);
@@ -3905,36 +3865,21 @@
 	core = inst->core;
 	hdev = core->device;
 
-	if (inst->state == MSM_VIDC_CORE_INVALID ||
-		core->state == VIDC_CORE_INVALID ||
-		core->state == VIDC_CORE_UNINIT) {
-		dprintk(VIDC_ERR, "Core is in bad state. Can't Queue\n");
+	if (inst->state == MSM_VIDC_CORE_INVALID) {
+		dprintk(VIDC_ERR, "%s: inst is in bad state\n", __func__);
 		return -EINVAL;
 	}
 
-	/*
-	 * Stick the buffer into the pendinq, we'll pop it out later on
-	 * if we want to commit it to hardware
-	 */
-	if (vb) {
-		temp = kzalloc(sizeof(*temp), GFP_KERNEL);
-		if (!temp) {
-			dprintk(VIDC_ERR, "Out of memory\n");
-			goto err_no_mem;
-		}
-
-		temp->vb = vb;
-		mutex_lock(&inst->pendingq.lock);
-		list_add_tail(&temp->list, &inst->pendingq.list);
-		mutex_unlock(&inst->pendingq.lock);
-	}
+	/* initially assume every buffer is going to be deferred */
+	if (mbuf)
+		mbuf->deferred = true;
 
 	batch_mode = msm_comm_g_ctrl_for_id(inst, V4L2_CID_VIDC_QBUF_MODE)
 		== V4L2_VIDC_QBUF_BATCHED;
 	capture_count = (batch_mode ? &count_single_batch : &count_buffers)
-		(&inst->pendingq, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+		(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
 	output_count = (batch_mode ? &count_single_batch : &count_buffers)
-		(&inst->pendingq, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+		(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
 
 	/*
 	 * Somewhat complicated logic to prevent queuing the buffer to hardware.
@@ -3948,13 +3893,18 @@
 	 * buffer to be batched with future frames.  The batch size (on both
 	 * capabilities) is completely determined by the client.
 	 */
-	defer = defer ? defer : (vbuf && vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER);
+	defer = defer ? defer :
+		(mbuf && mbuf->vvb.flags & V4L2_MSM_BUF_FLAG_DEFER);
 
 	/* 3) If we're in batch mode, we must have full batches of both types */
 	defer = defer ? defer:(batch_mode && (!output_count || !capture_count));
 
 	if (defer) {
-		dprintk(VIDC_DBG, "Deferring queue of %pK\n", vb);
+		if (mbuf) {
+			mbuf->deferred = true;
+			print_vidc_buffer(VIDC_DBG, "deferred qbuf",
+				inst, mbuf);
+		}
 		return 0;
 	}
 
@@ -3984,15 +3934,18 @@
 	etbs.count = ftbs.count = 0;
 
 	/*
-	 * Try to collect all pending buffers into 2 batches of ftb and etb
+	 * Try to collect all deferred buffers into 2 batches of ftb and etb
 	 * Note that these "batches" might be empty if we're no in batching mode
-	 * and the pendingq is empty
+	 * and the deferred is not set for buffers.
 	 */
-	mutex_lock(&inst->pendingq.lock);
-	list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
+	mutex_lock(&inst->registeredbufs.lock);
+	list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) {
 		struct vidc_frame_data *frame_data = NULL;
 
-		switch (temp->vb->type) {
+		if (!temp->deferred)
+			continue;
+
+		switch (temp->vvb.vb2_buf.type) {
 		case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
 			if (ftbs.count < capture_count && ftbs.data)
 				frame_data = &ftbs.data[ftbs.count++];
@@ -4008,12 +3961,14 @@
 		if (!frame_data)
 			continue;
 
-		populate_frame_data(frame_data, temp->vb, inst);
+		populate_frame_data(frame_data, temp, inst);
 
-		list_del(&temp->list);
-		kfree(temp);
+		/* this buffer going to be queued (not deferred) */
+		temp->deferred = false;
+
+		print_vidc_buffer(VIDC_DBG, "qbuf", inst, temp);
 	}
-	mutex_unlock(&inst->pendingq.lock);
+	mutex_unlock(&inst->registeredbufs.lock);
 
 	/* Finally commit all our frame(s) to H/W */
 	if (batch_mode) {
@@ -4121,7 +4076,7 @@
 
 		/* For DPB buffers, no need to add Extra buffers */
 
-		bufreq->buffer_count_actual = bufreq->buffer_count_min_host =
+		bufreq->buffer_count_min_host =	bufreq->buffer_count_actual =
 			bufreq->buffer_count_min;
 
 		bufreq = get_buff_req_buffer(inst,
@@ -4136,7 +4091,7 @@
 		extra_buffers = msm_vidc_get_extra_buff_count(inst,
 			HAL_BUFFER_OUTPUT);
 
-		bufreq->buffer_count_min_host =
+		bufreq->buffer_count_min_host =	bufreq->buffer_count_actual =
 			bufreq->buffer_count_min + extra_buffers;
 	} else {
 
@@ -4152,7 +4107,7 @@
 		extra_buffers = msm_vidc_get_extra_buff_count(inst,
 			HAL_BUFFER_OUTPUT);
 
-		bufreq->buffer_count_actual = bufreq->buffer_count_min_host =
+		bufreq->buffer_count_min_host =	bufreq->buffer_count_actual =
 			bufreq->buffer_count_min + extra_buffers;
 	}
 
@@ -4257,7 +4212,8 @@
 
 	rc = wait_for_completion_timeout(&inst->completions[
 			SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO)],
-		msecs_to_jiffies(msm_vidc_hw_rsp_timeout));
+		msecs_to_jiffies(
+			inst->core->resources.msm_vidc_hw_rsp_timeout));
 	if (!rc) {
 		dprintk(VIDC_ERR,
 			"%s: Wait interrupted or timed out [%pK]: %d\n",
@@ -4332,11 +4288,7 @@
 	}
 	mutex_lock(&inst->outputbufs.lock);
 	list_for_each_entry_safe(buf, dummy, &inst->outputbufs.list, list) {
-		handle = buf->handle;
-		if (!handle) {
-			dprintk(VIDC_ERR, "%s - invalid handle\n", __func__);
-			goto exit;
-		}
+		handle = &buf->smem;
 
 		if ((buf->buffer_ownership == FIRMWARE) && !force_release) {
 			dprintk(VIDC_INFO, "DPB is with f/w. Can't free it\n");
@@ -4356,18 +4308,17 @@
 				(void *)inst->session, &buffer_info);
 			if (rc) {
 				dprintk(VIDC_WARN,
-					"Rel output buf fail:%pa, %d\n",
-					&buffer_info.align_device_addr,
+					"Rel output buf fail:%x, %d\n",
+					buffer_info.align_device_addr,
 					buffer_info.buffer_size);
 			}
 		}
 
 		list_del(&buf->list);
-		msm_comm_smem_free(inst, buf->handle);
+		msm_comm_smem_free(inst, &buf->smem);
 		kfree(buf);
 	}
 
-exit:
 	mutex_unlock(&inst->outputbufs.lock);
 	return rc;
 }
@@ -4392,13 +4343,8 @@
 	mutex_lock(&inst->scratchbufs.lock);
 
 	list_for_each_entry(buf, &inst->scratchbufs.list, list) {
-		if (!buf->handle) {
-			dprintk(VIDC_ERR, "%s: invalid buf handle\n", __func__);
-			mutex_unlock(&inst->scratchbufs.lock);
-			goto not_sufficient;
-		}
 		if (buf->buffer_type == buffer_type &&
-			buf->handle->size >= bufreq->buffer_size)
+			buf->smem.size >= bufreq->buffer_size)
 			count++;
 	}
 	mutex_unlock(&inst->scratchbufs.lock);
@@ -4457,13 +4403,7 @@
 
 	mutex_lock(&inst->scratchbufs.lock);
 	list_for_each_entry_safe(buf, dummy, &inst->scratchbufs.list, list) {
-		if (!buf->handle) {
-			dprintk(VIDC_ERR, "%s - buf->handle NULL\n", __func__);
-			rc = -EINVAL;
-			goto exit;
-		}
-
-		handle = buf->handle;
+		handle = &buf->smem;
 		buffer_info.buffer_size = handle->size;
 		buffer_info.buffer_type = buf->buffer_type;
 		buffer_info.num_buffers = 1;
@@ -4475,8 +4415,8 @@
 				(void *)inst->session, &buffer_info);
 			if (rc) {
 				dprintk(VIDC_WARN,
-					"Rel scrtch buf fail:%pa, %d\n",
-					&buffer_info.align_device_addr,
+					"Rel scrtch buf fail:%x, %d\n",
+					buffer_info.align_device_addr,
 					buffer_info.buffer_size);
 			}
 			mutex_unlock(&inst->scratchbufs.lock);
@@ -4495,15 +4435,35 @@
 			continue;
 
 		list_del(&buf->list);
-		msm_comm_smem_free(inst, buf->handle);
+		msm_comm_smem_free(inst, handle);
 		kfree(buf);
 	}
 
-exit:
 	mutex_unlock(&inst->scratchbufs.lock);
 	return rc;
 }
 
+int msm_comm_release_recon_buffers(struct msm_vidc_inst *inst)
+{
+	struct recon_buf *buf, *next;
+
+	if (!inst) {
+		dprintk(VIDC_ERR,
+			"Invalid instance pointer = %pK\n", inst);
+		return -EINVAL;
+	}
+
+	mutex_lock(&inst->reconbufs.lock);
+	list_for_each_entry_safe(buf, next, &inst->reconbufs.list, list) {
+		list_del(&buf->list);
+		kfree(buf);
+	}
+	INIT_LIST_HEAD(&inst->reconbufs.list);
+	mutex_unlock(&inst->reconbufs.lock);
+
+	return 0;
+}
+
 int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst)
 {
 	struct msm_smem *handle;
@@ -4534,7 +4494,7 @@
 	mutex_lock(&inst->persistbufs.lock);
 	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
 		buf = list_entry(ptr, struct internal_buf, list);
-		handle = buf->handle;
+		handle = &buf->smem;
 		buffer_info.buffer_size = handle->size;
 		buffer_info.buffer_type = buf->buffer_type;
 		buffer_info.num_buffers = 1;
@@ -4546,8 +4506,8 @@
 				(void *)inst->session, &buffer_info);
 			if (rc) {
 				dprintk(VIDC_WARN,
-					"Rel prst buf fail:%pa, %d\n",
-					&buffer_info.align_device_addr,
+					"Rel prst buf fail:%x, %d\n",
+					buffer_info.align_device_addr,
 					buffer_info.buffer_size);
 			}
 			mutex_unlock(&inst->persistbufs.lock);
@@ -4560,7 +4520,7 @@
 			mutex_lock(&inst->persistbufs.lock);
 		}
 		list_del(&buf->list);
-		msm_comm_smem_free(inst, buf->handle);
+		msm_comm_smem_free(inst, handle);
 		kfree(buf);
 	}
 	mutex_unlock(&inst->persistbufs.lock);
@@ -4657,6 +4617,54 @@
 	return rc;
 }
 
+int msm_comm_set_recon_buffers(struct msm_vidc_inst *inst)
+{
+	int rc = 0, i = 0;
+	struct hal_buffer_requirements *internal_buf;
+	struct recon_buf *binfo;
+	struct msm_vidc_list *buf_list = &inst->reconbufs;
+
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	if (inst->session_type == MSM_VIDC_ENCODER)
+		internal_buf = get_buff_req_buffer(inst,
+			HAL_BUFFER_INTERNAL_RECON);
+	else if (inst->session_type == MSM_VIDC_DECODER)
+		internal_buf = get_buff_req_buffer(inst,
+			msm_comm_get_hal_output_buffer(inst));
+	else
+		return -EINVAL;
+
+	if (!internal_buf || !internal_buf->buffer_count_actual) {
+		dprintk(VIDC_DBG, "Inst : %pK Recon buffers not required\n",
+			inst);
+		return 0;
+	}
+
+	if (!list_empty(&inst->reconbufs.list))
+		msm_comm_release_recon_buffers(inst);
+
+	for (i = 0; i < internal_buf->buffer_count_actual; i++) {
+		binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+		if (!binfo) {
+			dprintk(VIDC_ERR, "Out of memory\n");
+			rc = -ENOMEM;
+			goto fail_kzalloc;
+		}
+
+		binfo->buffer_index = i;
+		mutex_lock(&buf_list->lock);
+		list_add_tail(&binfo->list, &buf_list->list);
+		mutex_unlock(&buf_list->lock);
+	}
+
+fail_kzalloc:
+	return rc;
+}
+
 int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
@@ -4690,116 +4698,20 @@
 	for (c = 0; c < ARRAY_SIZE(ports); ++c) {
 		enum vidc_ports port = ports[c];
 
-		dprintk(VIDC_DBG, "Flushing buffers of type %d in bad state\n",
-				port);
 		mutex_lock(&inst->bufq[port].lock);
-		list_for_each_safe(ptr, next, &inst->bufq[port].
-				vb2_bufq.queued_list) {
+		list_for_each_safe(ptr, next,
+				&inst->bufq[port].vb2_bufq.queued_list) {
 			struct vb2_buffer *vb = container_of(ptr,
 					struct vb2_buffer, queued_entry);
-
 			vb->planes[0].bytesused = 0;
-			vb->planes[0].data_offset = 0;
-
+			print_vb2_buffer(VIDC_ERR, "flush in invalid",
+				inst, vb);
 			vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
 		}
 		mutex_unlock(&inst->bufq[port].lock);
 	}
-
 	msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_FLUSH_DONE);
-}
-
-void msm_comm_flush_dynamic_buffers(struct msm_vidc_inst *inst)
-{
-	struct buffer_info *binfo = NULL;
-
-	if (inst->buffer_mode_set[CAPTURE_PORT] != HAL_BUFFER_MODE_DYNAMIC)
-		return;
-
-	/*
-	 * dynamic buffer mode:- if flush is called during seek
-	 * driver should not queue any new buffer it has been holding.
-	 *
-	 * Each dynamic o/p buffer can have one of following ref_count:
-	 * ref_count : 0   - f/w has released reference and sent dynamic
-	 *                   buffer back. The buffer has been returned
-	 *                   back to client.
-	 *
-	 * ref_count : 1   - f/w is holding reference. f/w may have released
-	 *                   dynamic buffer as read_only OR dynamic buffer is
-	 *                   pending. f/w will release reference before sending
-	 *                   flush_done.
-	 *
-	 * ref_count : >=2 - f/w is holding reference, f/w has released dynamic
-	 *                   buffer as read_only, which client has queued back
-	 *                   to driver. Driver holds this buffer and will queue
-	 *                   back only when f/w releases the reference. During
-	 *                   flush_done, f/w will release the reference but
-	 *                   driver should not queue back the buffer to f/w.
-	 *                   Flush all buffers with ref_count >= 2.
-	 */
-	mutex_lock(&inst->registeredbufs.lock);
-	if (!list_empty(&inst->registeredbufs.list)) {
-		struct v4l2_event buf_event = {0};
-		u32 *ptr = NULL;
-
-		list_for_each_entry(binfo, &inst->registeredbufs.list, list) {
-			if (binfo->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
-				atomic_read(&binfo->ref_count) >= 2) {
-
-				atomic_dec(&binfo->ref_count);
-				buf_event.type =
-				V4L2_EVENT_MSM_VIDC_RELEASE_UNQUEUED_BUFFER;
-				ptr = (u32 *)buf_event.u.data;
-				ptr[0] = binfo->fd[0];
-				ptr[1] = binfo->buff_off[0];
-				ptr[2] = binfo->uvaddr[0];
-				ptr[3] = (u32) binfo->timestamp.tv_sec;
-				ptr[4] = (u32) binfo->timestamp.tv_usec;
-				ptr[5] = binfo->v4l2_index;
-				dprintk(VIDC_DBG,
-					"released buffer held in driver before issuing flush: %pa fd[0]: %d\n",
-					&binfo->device_addr[0], binfo->fd[0]);
-				/*send event to client*/
-				v4l2_event_queue_fh(&inst->event_handler,
-					&buf_event);
-			}
-		}
-	}
-	mutex_unlock(&inst->registeredbufs.lock);
-}
-
-void msm_comm_flush_pending_dynamic_buffers(struct msm_vidc_inst *inst)
-{
-	struct buffer_info *binfo = NULL;
-
-	if (!inst)
-		return;
-
-	if (inst->buffer_mode_set[CAPTURE_PORT] != HAL_BUFFER_MODE_DYNAMIC)
-		return;
-
-	if (list_empty(&inst->pendingq.list) ||
-		list_empty(&inst->registeredbufs.list))
-		return;
-
-	/*
-	 * Dynamic Buffer mode - Since pendingq is not empty
-	 * no output buffers have been sent to firmware yet.
-	 * Hence remove reference to all pendingq o/p buffers
-	 * before flushing them.
-	 */
-
-	mutex_lock(&inst->registeredbufs.lock);
-	list_for_each_entry(binfo, &inst->registeredbufs.list, list) {
-		if (binfo->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-			dprintk(VIDC_DBG,
-				"%s: binfo = %pK device_addr = %pa\n",
-				__func__, binfo, &binfo->device_addr[0]);
-			buf_ref_put(inst, binfo);
-		}
-	}
-	mutex_unlock(&inst->registeredbufs.lock);
+	return;
 }
 
 int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags)
@@ -4807,33 +4719,25 @@
 	int rc =  0;
 	bool ip_flush = false;
 	bool op_flush = false;
-	struct vb2_buf_entry *temp, *next;
-	struct mutex *lock;
+	struct msm_vidc_buffer *mbuf, *next;
 	struct msm_vidc_core *core;
 	struct hfi_device *hdev;
 
-	if (!inst) {
+	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_ERR,
-				"Invalid instance pointer = %pK\n", inst);
+				"Invalid params, inst %pK\n", inst);
 		return -EINVAL;
 	}
 	core = inst->core;
-	if (!core) {
-		dprintk(VIDC_ERR,
-				"Invalid core pointer = %pK\n", core);
-		return -EINVAL;
-	}
 	hdev = core->device;
-	if (!hdev) {
-		dprintk(VIDC_ERR, "Invalid device pointer = %pK\n", hdev);
-		return -EINVAL;
-	}
 
 	ip_flush = flags & V4L2_QCOM_CMD_FLUSH_OUTPUT;
 	op_flush = flags & V4L2_QCOM_CMD_FLUSH_CAPTURE;
 
 	if (ip_flush && !op_flush) {
-		dprintk(VIDC_INFO, "Input only flush not supported\n");
+		dprintk(VIDC_WARN,
+			"Input only flush not supported, making it flush all\n");
+		op_flush = true;
 		return 0;
 	}
 
@@ -4841,11 +4745,7 @@
 
 	msm_clock_data_reset(inst);
 
-	msm_comm_flush_dynamic_buffers(inst);
-
-	if (inst->state == MSM_VIDC_CORE_INVALID ||
-			core->state == VIDC_CORE_INVALID ||
-			core->state == VIDC_CORE_UNINIT) {
+	if (inst->state == MSM_VIDC_CORE_INVALID) {
 		dprintk(VIDC_ERR,
 				"Core %pK and inst %pK are in bad state\n",
 					core, inst);
@@ -4853,68 +4753,52 @@
 		return 0;
 	}
 
-	if (inst->in_reconfig && !ip_flush && op_flush) {
-		mutex_lock(&inst->pendingq.lock);
-		if (!list_empty(&inst->pendingq.list)) {
-			/*
-			 * Execution can never reach here since port reconfig
-			 * wont happen unless pendingq is emptied out
-			 * (both pendingq and flush being secured with same
-			 * lock). Printing a message here incase this breaks.
-			 */
-			dprintk(VIDC_WARN,
-			"FLUSH BUG: Pending q not empty! It should be empty\n");
-		}
-		mutex_unlock(&inst->pendingq.lock);
-		atomic_inc(&inst->in_flush);
-		dprintk(VIDC_DBG, "Send flush Output to firmware\n");
+	mutex_lock(&inst->registeredbufs.lock);
+	list_for_each_entry_safe(mbuf, next, &inst->registeredbufs.list, list) {
+		/* flush only deferred buffers (which are not queued yet) */
+		if (!mbuf->deferred)
+			continue;
+
+		/* don't flush input buffers if flush not requested on it */
+		if (!ip_flush && mbuf->vvb.vb2_buf.type ==
+				V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+			continue;
+
+		print_vidc_buffer(VIDC_DBG, "flush buf", inst, mbuf);
+		msm_comm_flush_vidc_buffer(inst, mbuf);
+		msm_comm_unmap_vidc_buffer(inst, mbuf);
+
+		/* remove from list */
+		list_del(&mbuf->list);
+		kfree(mbuf);
+		mbuf = NULL;
+	}
+	mutex_unlock(&inst->registeredbufs.lock);
+
+	/* enable in flush */
+	inst->in_flush = true;
+
+	hdev = inst->core->device;
+	if (ip_flush) {
+		dprintk(VIDC_DBG, "Send flush on all ports to firmware\n");
 		rc = call_hfi_op(hdev, session_flush, inst->session,
-				HAL_FLUSH_OUTPUT);
+			HAL_FLUSH_ALL);
 	} else {
-		msm_comm_flush_pending_dynamic_buffers(inst);
-		/*
-		 * If flush is called after queueing buffers but before
-		 * streamon driver should flush the pending queue
-		 */
-		mutex_lock(&inst->pendingq.lock);
-		list_for_each_entry_safe(temp, next,
-				&inst->pendingq.list, list) {
-			enum v4l2_buf_type type = temp->vb->type;
-
-			if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-				lock = &inst->bufq[CAPTURE_PORT].lock;
-			else
-				lock = &inst->bufq[OUTPUT_PORT].lock;
-
-			temp->vb->planes[0].bytesused = 0;
-
-			mutex_lock(lock);
-			vb2_buffer_done(temp->vb, VB2_BUF_STATE_DONE);
-			msm_vidc_debugfs_update(inst,
-				type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ?
-					MSM_VIDC_DEBUGFS_EVENT_FBD :
-					MSM_VIDC_DEBUGFS_EVENT_EBD);
-			list_del(&temp->list);
-			mutex_unlock(lock);
-
-			kfree(temp);
-		}
-		mutex_unlock(&inst->pendingq.lock);
-
-		/*Do not send flush in case of session_error */
-		if (!(inst->state == MSM_VIDC_CORE_INVALID &&
-			  core->state != VIDC_CORE_INVALID)) {
-			atomic_inc(&inst->in_flush);
-			dprintk(VIDC_DBG, "Send flush all to firmware\n");
-			rc = call_hfi_op(hdev, session_flush, inst->session,
-				HAL_FLUSH_ALL);
-		}
+		dprintk(VIDC_DBG, "Send flush on output port to firmware\n");
+		rc = call_hfi_op(hdev, session_flush, inst->session,
+			HAL_FLUSH_OUTPUT);
+	}
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Sending flush to firmware failed, flush out all buffers\n");
+		msm_comm_flush_in_invalid_state(inst);
+		/* disable in_flush */
+		inst->in_flush = false;
 	}
 
 	return rc;
 }
 
-
 enum hal_extradata_id msm_comm_get_hal_extradata_index(
 	enum v4l2_mpeg_vidc_extradata index)
 {
@@ -5290,19 +5174,19 @@
 	return rc;
 }
 
-struct msm_smem *msm_comm_smem_alloc(struct msm_vidc_inst *inst,
-			size_t size, u32 align, u32 flags,
-			enum hal_buffer buffer_type, int map_kernel)
+int msm_comm_smem_alloc(struct msm_vidc_inst *inst,
+		size_t size, u32 align, u32 flags, enum hal_buffer buffer_type,
+		int map_kernel, struct msm_smem *smem)
 {
-	struct msm_smem *m = NULL;
+	int rc = 0;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst);
-		return NULL;
+		return -EINVAL;
 	}
-	m = msm_smem_alloc(inst->mem_client, size, align,
-				flags, buffer_type, map_kernel);
-	return m;
+	rc = msm_smem_alloc(inst->mem_client, size, align,
+			flags, buffer_type, map_kernel, smem);
+	return rc;
 }
 
 void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem)
@@ -5323,28 +5207,138 @@
 			"%s: invalid params: %pK %pK\n", __func__, inst, mem);
 		return -EINVAL;
 	}
-	return msm_smem_cache_operations(inst->mem_client, mem, cache_ops);
+	return msm_smem_cache_operations(inst->mem_client, mem->handle,
+			mem->offset, mem->size, cache_ops);
 }
 
-struct msm_smem *msm_comm_smem_user_to_kernel(struct msm_vidc_inst *inst,
-			int fd, u32 offset, enum hal_buffer buffer_type)
+int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst,
+		struct v4l2_buffer *b)
 {
-	struct msm_smem *m = NULL;
+	int rc = 0, i;
+	void *dma_buf;
+	void *handle;
+	bool skip;
 
-	if (!inst || !inst->core) {
-		dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst);
-		return NULL;
+	if (!inst || !b) {
+		dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+			__func__, inst, b);
+		return -EINVAL;
 	}
 
-	if (inst->state == MSM_VIDC_CORE_INVALID) {
-		dprintk(VIDC_ERR, "Core in Invalid state, returning from %s\n",
-			__func__);
-		return NULL;
+	for (i = 0; i < b->length; i++) {
+		unsigned long offset, size;
+		enum smem_cache_ops cache_ops;
+
+		dma_buf = msm_smem_get_dma_buf(b->m.planes[i].m.fd);
+		handle = msm_smem_get_handle(inst->mem_client, dma_buf);
+
+		offset = b->m.planes[i].data_offset;
+		size = b->m.planes[i].length;
+		cache_ops = SMEM_CACHE_INVALIDATE;
+		skip = false;
+
+		if (inst->session_type == MSM_VIDC_DECODER) {
+			if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+				if (!i) { /* bitstream */
+					size = b->m.planes[i].bytesused;
+					cache_ops = SMEM_CACHE_CLEAN_INVALIDATE;
+				}
+			} else if (b->type ==
+					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+				if (!i) { /* yuv */
+					/* all values are correct */
+				}
+			}
+		} else if (inst->session_type == MSM_VIDC_ENCODER) {
+			if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+				if (!i) { /* yuv */
+					size = b->m.planes[i].bytesused;
+					cache_ops = SMEM_CACHE_CLEAN_INVALIDATE;
+				} else { /* extradata */
+					cache_ops = SMEM_CACHE_CLEAN_INVALIDATE;
+				}
+			} else if (b->type ==
+					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+				if (!i) { /* bitstream */
+					/* all values are correct */
+				}
+			}
+		}
+
+		if (!skip) {
+			rc = msm_smem_cache_operations(inst->mem_client, handle,
+					offset, size, cache_ops);
+			if (rc)
+				print_v4l2_buffer(VIDC_ERR,
+					"qbuf cache ops failed", inst, b);
+		}
+
+		msm_smem_put_handle(inst->mem_client, handle);
+		msm_smem_put_dma_buf(dma_buf);
 	}
 
-	m = msm_smem_user_to_kernel(inst->mem_client,
-			fd, offset, buffer_type);
-	return m;
+	return rc;
+}
+
+int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst,
+		struct v4l2_buffer *b)
+{
+	int rc = 0, i;
+	void *dma_buf;
+	void *handle;
+	bool skip;
+
+	if (!inst || !b) {
+		dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+			__func__, inst, b);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < b->length; i++) {
+		unsigned long offset, size;
+		enum smem_cache_ops cache_ops;
+
+		dma_buf = msm_smem_get_dma_buf(b->m.planes[i].m.fd);
+		handle = msm_smem_get_handle(inst->mem_client, dma_buf);
+
+		offset = b->m.planes[i].data_offset;
+		size = b->m.planes[i].length;
+		cache_ops = SMEM_CACHE_INVALIDATE;
+		skip = false;
+
+		if (inst->session_type == MSM_VIDC_DECODER) {
+			if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+				if (!i) /* bitstream */
+					skip = true;
+			} else if (b->type ==
+					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+				if (!i) /* yuv */
+					skip = true;
+			}
+		} else if (inst->session_type == MSM_VIDC_ENCODER) {
+			if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+				if (!i) /* yuv */
+					skip = true;
+			} else if (b->type ==
+					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+				if (!i) /* bitstream */
+					skip = true;
+			}
+		}
+
+		if (!skip) {
+			rc = msm_smem_cache_operations(inst->mem_client, handle,
+					offset, size, cache_ops);
+			if (rc)
+				print_v4l2_buffer(VIDC_ERR,
+					"dqbuf cache ops failed", inst, b);
+		}
+
+		msm_smem_put_handle(inst->mem_client, handle);
+		msm_smem_put_dma_buf(dma_buf);
+	}
+
+	return rc;
 }
 
 void msm_vidc_fw_unload_handler(struct work_struct *work)
@@ -5399,7 +5393,7 @@
 
 	hdev = inst->core->device;
 
-	format = get_hal_uncompressed(fourcc);
+	format = msm_comm_get_hal_uncompressed(fourcc);
 	if (format == HAL_UNUSED_COLOR) {
 		dprintk(VIDC_ERR, "Using unsupported colorformat %#x\n",
 				fourcc);
@@ -5501,9 +5495,8 @@
 
 void msm_comm_print_inst_info(struct msm_vidc_inst *inst)
 {
-	struct buffer_info *temp;
+	struct msm_vidc_buffer *mbuf;
 	struct internal_buf *buf;
-	int i = 0;
 	bool is_decode = false;
 	enum vidc_ports port;
 	bool is_secure = false;
@@ -5531,37 +5524,32 @@
 			inst, inst->session_type);
 	mutex_lock(&inst->registeredbufs.lock);
 	dprintk(VIDC_ERR, "registered buffer list:\n");
-	list_for_each_entry(temp, &inst->registeredbufs.list, list)
-		for (i = 0; i < temp->num_planes; i++)
-			dprintk(VIDC_ERR,
-					"type: %d plane: %d addr: %pa size: %d\n",
-					temp->type, i, &temp->device_addr[i],
-					temp->size[i]);
-
+	list_for_each_entry(mbuf, &inst->registeredbufs.list, list)
+		print_vidc_buffer(VIDC_ERR, "buf", inst, mbuf);
 	mutex_unlock(&inst->registeredbufs.lock);
 
 	mutex_lock(&inst->scratchbufs.lock);
 	dprintk(VIDC_ERR, "scratch buffer list:\n");
 	list_for_each_entry(buf, &inst->scratchbufs.list, list)
-		dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
-				buf->buffer_type, &buf->handle->device_addr,
-				buf->handle->size);
+		dprintk(VIDC_ERR, "type: %d addr: %x size: %u\n",
+				buf->buffer_type, buf->smem.device_addr,
+				buf->smem.size);
 	mutex_unlock(&inst->scratchbufs.lock);
 
 	mutex_lock(&inst->persistbufs.lock);
 	dprintk(VIDC_ERR, "persist buffer list:\n");
 	list_for_each_entry(buf, &inst->persistbufs.list, list)
-		dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
-				buf->buffer_type, &buf->handle->device_addr,
-				buf->handle->size);
+		dprintk(VIDC_ERR, "type: %d addr: %x size: %u\n",
+				buf->buffer_type, buf->smem.device_addr,
+				buf->smem.size);
 	mutex_unlock(&inst->persistbufs.lock);
 
 	mutex_lock(&inst->outputbufs.lock);
 	dprintk(VIDC_ERR, "dpb buffer list:\n");
 	list_for_each_entry(buf, &inst->outputbufs.list, list)
-		dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
-				buf->buffer_type, &buf->handle->device_addr,
-				buf->handle->size);
+		dprintk(VIDC_ERR, "type: %d addr: %x size: %u\n",
+				buf->buffer_type, buf->smem.device_addr,
+				buf->smem.size);
 	mutex_unlock(&inst->outputbufs.lock);
 }
 
@@ -5658,3 +5646,540 @@
 	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
 }
 
+
+void print_vidc_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf)
+{
+	struct vb2_buffer *vb2 = NULL;
+
+	if (!(tag & msm_vidc_debug) || !inst || !mbuf)
+		return;
+
+	vb2 = &mbuf->vvb.vb2_buf;
+
+	if (vb2->num_planes == 1)
+		dprintk(tag,
+			"%s: %s: %x : idx %2d fd %d off %d daddr %x size %d filled %d flags 0x%x ts %lld refcnt %d\n",
+			str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
+			vb2->index, vb2->planes[0].m.fd,
+			vb2->planes[0].data_offset, mbuf->smem[0].device_addr,
+			vb2->planes[0].length, vb2->planes[0].bytesused,
+			mbuf->vvb.flags, mbuf->vvb.vb2_buf.timestamp,
+			mbuf->smem[0].refcount);
+	else
+		dprintk(tag,
+			"%s: %s: %x : idx %2d fd %d off %d daddr %x size %d filled %d flags 0x%x ts %lld refcnt %d, extradata: fd %d off %d daddr %x size %d filled %d refcnt %d\n",
+			str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
+			vb2->index, vb2->planes[0].m.fd,
+			vb2->planes[0].data_offset, mbuf->smem[0].device_addr,
+			vb2->planes[0].length, vb2->planes[0].bytesused,
+			mbuf->vvb.flags, mbuf->vvb.vb2_buf.timestamp,
+			mbuf->smem[0].refcount, vb2->planes[1].m.fd,
+			vb2->planes[1].data_offset, mbuf->smem[1].device_addr,
+			vb2->planes[1].length, vb2->planes[1].bytesused,
+			mbuf->smem[1].refcount);
+}
+
+void print_vb2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
+		struct vb2_buffer *vb2)
+{
+	if (!(tag & msm_vidc_debug) || !inst || !vb2)
+		return;
+
+	if (vb2->num_planes == 1)
+		dprintk(tag,
+			"%s: %s: %x : idx %2d fd %d off %d size %d filled %d\n",
+			str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
+			vb2->index, vb2->planes[0].m.fd,
+			vb2->planes[0].data_offset, vb2->planes[0].length,
+			vb2->planes[0].bytesused);
+	else
+		dprintk(tag,
+			"%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d\n",
+			str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
+			vb2->index, vb2->planes[0].m.fd,
+			vb2->planes[0].data_offset, vb2->planes[0].length,
+			vb2->planes[0].bytesused, vb2->planes[1].m.fd,
+			vb2->planes[1].data_offset, vb2->planes[1].length);
+}
+
+void print_v4l2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
+		struct v4l2_buffer *v4l2)
+{
+	if (!(tag & msm_vidc_debug) || !inst || !v4l2)
+		return;
+
+	if (v4l2->length == 1)
+		dprintk(tag,
+			"%s: %s: %x : idx %2d fd %d off %d size %d filled %d\n",
+			str, v4l2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
+			v4l2->index, v4l2->m.planes[0].m.fd,
+			v4l2->m.planes[0].data_offset,
+			v4l2->m.planes[0].length,
+			v4l2->m.planes[0].bytesused);
+	else
+		dprintk(tag,
+			"%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d\n",
+			str, v4l2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
+			v4l2->index, v4l2->m.planes[0].m.fd,
+			v4l2->m.planes[0].data_offset,
+			v4l2->m.planes[0].length,
+			v4l2->m.planes[0].bytesused,
+			v4l2->m.planes[1].m.fd,
+			v4l2->m.planes[1].data_offset,
+			v4l2->m.planes[1].length);
+}
+
+bool msm_comm_compare_vb2_plane(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2, u32 i)
+{
+	struct vb2_buffer *vb;
+
+	if (!inst || !mbuf || !vb2) {
+		dprintk(VIDC_ERR, "%s: invalid params, %pK %pK %pK\n",
+			__func__, inst, mbuf, vb2);
+		return false;
+	}
+
+	vb = &mbuf->vvb.vb2_buf;
+	if (vb->planes[i].m.fd == vb2->planes[i].m.fd &&
+		vb->planes[i].data_offset == vb2->planes[i].data_offset &&
+		vb->planes[i].length == vb2->planes[i].length) {
+		return true;
+	}
+
+	return false;
+}
+
+bool msm_comm_compare_vb2_planes(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2)
+{
+	int i = 0;
+	struct vb2_buffer *vb;
+
+	if (!inst || !mbuf || !vb2) {
+		dprintk(VIDC_ERR, "%s: invalid params, %pK %pK %pK\n",
+			__func__, inst, mbuf, vb2);
+		return false;
+	}
+
+	vb = &mbuf->vvb.vb2_buf;
+
+	if (vb->num_planes != vb2->num_planes)
+		return false;
+
+	for (i = 0; i < vb->num_planes; i++) {
+		if (!msm_comm_compare_vb2_plane(inst, mbuf, vb2, i))
+			return false;
+	}
+
+	return true;
+}
+
+bool msm_comm_compare_dma_plane(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf, unsigned long *dma_planes, u32 i)
+{
+	if (!inst || !mbuf || !dma_planes) {
+		dprintk(VIDC_ERR, "%s: invalid params, %pK %pK %pK\n",
+			__func__, inst, mbuf, dma_planes);
+		return false;
+	}
+
+	if ((unsigned long)mbuf->smem[i].dma_buf == dma_planes[i])
+		return true;
+
+	return false;
+}
+
+bool msm_comm_compare_dma_planes(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf, unsigned long *dma_planes)
+{
+	int i = 0;
+	struct vb2_buffer *vb;
+
+	if (!inst || !mbuf || !dma_planes) {
+		dprintk(VIDC_ERR, "%s: invalid params, %pK %pK %pK\n",
+			__func__, inst, mbuf, dma_planes);
+		return false;
+	}
+
+	vb = &mbuf->vvb.vb2_buf;
+	for (i = 0; i < vb->num_planes; i++) {
+		if (!msm_comm_compare_dma_plane(inst, mbuf, dma_planes, i))
+			return false;
+	}
+
+	return true;
+}
+
+
+bool msm_comm_compare_device_plane(struct msm_vidc_buffer *mbuf,
+		u32 *planes, u32 i)
+{
+	if (!mbuf || !planes) {
+		dprintk(VIDC_ERR, "%s: invalid params, %pK %pK\n",
+			__func__, mbuf, planes);
+		return false;
+	}
+
+	if (mbuf->smem[i].device_addr == planes[i])
+		return true;
+
+	return false;
+}
+
+bool msm_comm_compare_device_planes(struct msm_vidc_buffer *mbuf,
+		u32 *planes)
+{
+	int i = 0;
+
+	if (!mbuf || !planes)
+		return false;
+
+	for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
+		if (!msm_comm_compare_device_plane(mbuf, planes, i))
+			return false;
+	}
+
+	return true;
+}
+
+struct msm_vidc_buffer *msm_comm_get_buffer_using_device_planes(
+		struct msm_vidc_inst *inst, u32 *planes)
+{
+	struct msm_vidc_buffer *mbuf;
+	bool found = false;
+
+	mutex_lock(&inst->registeredbufs.lock);
+	found = false;
+	list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+		if (msm_comm_compare_device_planes(mbuf, planes)) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&inst->registeredbufs.lock);
+	if (!found) {
+		dprintk(VIDC_ERR,
+			"%s: data_addr %x, extradata_addr %x not found\n",
+			__func__, planes[0], planes[1]);
+		mbuf = NULL;
+	}
+
+	return mbuf;
+}
+
+int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf)
+{
+	int rc;
+	struct vb2_buffer *vb;
+
+	if (!inst || !mbuf) {
+		dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+			__func__, inst, mbuf);
+		return -EINVAL;
+	}
+
+	vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
+	if (!vb) {
+		print_vidc_buffer(VIDC_ERR,
+			"vb not found for buf", inst, mbuf);
+		return -EINVAL;
+	}
+
+	vb->planes[0].bytesused = 0;
+	rc = msm_comm_vb2_buffer_done(inst, vb);
+	if (rc)
+		print_vidc_buffer(VIDC_ERR,
+			"vb2_buffer_done failed for", inst, mbuf);
+
+	return rc;
+}
+
+struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst,
+		struct vb2_buffer *vb2)
+{
+	int rc = 0;
+	struct vb2_v4l2_buffer *vbuf;
+	struct vb2_buffer *vb;
+	unsigned long dma_planes[VB2_MAX_PLANES] = {0};
+	struct msm_vidc_buffer *mbuf;
+	bool found = false;
+	int i;
+
+	if (!inst || !vb2) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+		return NULL;
+	}
+
+	for (i = 0; i < vb2->num_planes; i++) {
+		/*
+		 * always compare dma_buf addresses which is guaranteed
+		 * to be same across the processes (duplicate fds).
+		 */
+		dma_planes[i] = (unsigned long)dma_buf_get(vb2->planes[i].m.fd);
+		dma_buf_put((struct dma_buf *)dma_planes[i]);
+	}
+
+	mutex_lock(&inst->registeredbufs.lock);
+	list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+		if (msm_comm_compare_dma_planes(inst, mbuf, dma_planes)) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		/* this is new vb2_buffer */
+		mbuf = kzalloc(sizeof(struct msm_vidc_buffer), GFP_KERNEL);
+		if (!mbuf) {
+			dprintk(VIDC_ERR, "%s: alloc msm_vidc_buffer failed\n",
+				__func__);
+			rc = -ENOMEM;
+			goto exit;
+		}
+	}
+
+	vbuf = to_vb2_v4l2_buffer(vb2);
+	memcpy(&mbuf->vvb, vbuf, sizeof(struct vb2_v4l2_buffer));
+	vb = &mbuf->vvb.vb2_buf;
+
+	for (i = 0; i < vb->num_planes; i++) {
+		mbuf->smem[i].buffer_type = get_hal_buffer_type(vb->type, i);
+		mbuf->smem[i].fd = vb->planes[i].m.fd;
+		mbuf->smem[i].offset = vb->planes[i].data_offset;
+		mbuf->smem[i].size = vb->planes[i].length;
+		rc = msm_smem_map_dma_buf(inst, &mbuf->smem[i]);
+		if (rc) {
+			dprintk(VIDC_ERR, "%s: map failed.\n", __func__);
+			goto exit;
+		}
+		/* increase refcount as we get both fbd and rbr */
+		rc = msm_smem_map_dma_buf(inst, &mbuf->smem[i]);
+		if (rc) {
+			dprintk(VIDC_ERR, "%s: map failed..\n", __func__);
+			goto exit;
+		}
+	}
+
+	/* special handling for decoder */
+	if (inst->session_type == MSM_VIDC_DECODER) {
+		if (found) {
+			rc = -EEXIST;
+		} else {
+			bool found_plane0 = false;
+			struct msm_vidc_buffer *temp;
+			/*
+			 * client might have queued same plane[0] but different
+			 * plane[1] search plane[0] and if found don't queue the
+			 * buffer, the buffer will be queued when rbr event
+			 * arrived.
+			 */
+			list_for_each_entry(temp, &inst->registeredbufs.list,
+						list) {
+				if (msm_comm_compare_dma_plane(inst, temp,
+						dma_planes, 0)) {
+					found_plane0 = true;
+					break;
+				}
+			}
+			if (found_plane0)
+				rc = -EEXIST;
+		}
+	}
+
+	/* add the new buffer to list */
+	if (!found)
+		list_add_tail(&mbuf->list, &inst->registeredbufs.list);
+
+	mutex_unlock(&inst->registeredbufs.lock);
+	if (rc == -EEXIST) {
+		print_vidc_buffer(VIDC_DBG, "qbuf upon rbr", inst, mbuf);
+		return ERR_PTR(rc);
+	}
+
+	return mbuf;
+
+exit:
+	mutex_unlock(&inst->registeredbufs.lock);
+	dprintk(VIDC_ERR, "%s: rc %d\n", __func__, rc);
+	msm_comm_unmap_vidc_buffer(inst, mbuf);
+	if (!found)
+		kfree(mbuf);
+
+	return ERR_PTR(rc);
+}
+
+void msm_comm_put_vidc_buffer(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf)
+{
+	struct msm_vidc_buffer *temp;
+	bool found = false;
+	int i = 0;
+
+	if (!inst || !mbuf) {
+		dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+			__func__, inst, mbuf);
+		return;
+	}
+
+	mutex_lock(&inst->registeredbufs.lock);
+	/* check if mbuf was not removed by any chance */
+	list_for_each_entry(temp, &inst->registeredbufs.list, list) {
+		if (msm_comm_compare_vb2_planes(inst, mbuf,
+				&temp->vvb.vb2_buf)) {
+			found = true;
+			break;
+		}
+	}
+	if (!found) {
+		print_vidc_buffer(VIDC_ERR, "buf was removed", inst, mbuf);
+		goto unlock;
+	}
+
+	print_vidc_buffer(VIDC_DBG, "dqbuf", inst, mbuf);
+	for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
+		if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
+			print_vidc_buffer(VIDC_ERR,
+				"dqbuf: unmap failed.", inst, mbuf);
+
+		if (!(mbuf->vvb.flags & V4L2_QCOM_BUF_FLAG_READONLY)) {
+			/* rbr won't come for this buffer */
+			if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
+				print_vidc_buffer(VIDC_ERR,
+					"dqbuf: unmap failed..", inst, mbuf);
+		} /* else RBR event expected */
+	}
+	/*
+	 * remove the entry if plane[0].refcount is zero else
+	 * don't remove as client queued same buffer that's why
+	 * plane[0].refcount is not zero
+	 */
+	if (!mbuf->smem[0].refcount) {
+		list_del(&mbuf->list);
+		kfree(mbuf);
+		mbuf = NULL;
+	}
+unlock:
+	mutex_unlock(&inst->registeredbufs.lock);
+}
+
+void handle_release_buffer_reference(struct msm_vidc_inst *inst, u32 *planes)
+{
+	int rc = 0;
+	struct msm_vidc_buffer *mbuf = NULL;
+	bool found = false;
+	int i = 0;
+
+	mutex_lock(&inst->registeredbufs.lock);
+	found = false;
+	list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+		if (msm_comm_compare_device_planes(mbuf, planes)) {
+			found = true;
+			break;
+		}
+	}
+	if (found) {
+		msm_vidc_queue_rbr_event(inst,
+			mbuf->vvb.vb2_buf.planes[0].m.fd,
+			mbuf->vvb.vb2_buf.planes[0].data_offset);
+
+		for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
+			if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
+				print_vidc_buffer(VIDC_ERR,
+					"rbr unmap failed.", inst, mbuf);
+		}
+		/* refcount is not zero if client queued the same buffer */
+		if (!mbuf->smem[0].refcount) {
+			list_del(&mbuf->list);
+			kfree(mbuf);
+			mbuf = NULL;
+		}
+	} else {
+		dprintk(VIDC_ERR,
+			"%s: data_addr %x extradata_addr %x not found\n",
+			__func__, planes[0], planes[1]);
+		goto unlock;
+	}
+
+	/*
+	 * 1. client might have pushed same planes in which case mbuf will be
+	 *    same and refcounts are positive and buffer wouldn't have been
+	 *    removed from the registeredbufs list.
+	 * 2. client might have pushed same planes[0] but different planes[1]
+	 *    in which case mbuf will be different.
+	 * 3. in either case we can search mbuf->smem[0].device_addr in the list
+	 *    and if found queue it to video hw (if not flushing).
+	 */
+	found = false;
+	list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+		if (msm_comm_compare_device_plane(mbuf, planes, 0)) {
+			found = true;
+			break;
+		}
+	}
+	if (!found)
+		goto unlock;
+
+	/* found means client queued the buffer already */
+	if (inst->in_reconfig || inst->in_flush) {
+		print_vidc_buffer(VIDC_DBG, "rbr flush buf", inst, mbuf);
+		msm_comm_flush_vidc_buffer(inst, mbuf);
+		msm_comm_unmap_vidc_buffer(inst, mbuf);
+		/* remove from list */
+		list_del(&mbuf->list);
+		kfree(mbuf);
+		mbuf = NULL;
+
+		/* don't queue the buffer */
+		found = false;
+	}
+unlock:
+	mutex_unlock(&inst->registeredbufs.lock);
+
+	if (found) {
+		print_vidc_buffer(VIDC_DBG, "rbr qbuf", inst, mbuf);
+		rc = msm_comm_qbuf(inst, mbuf);
+		if (rc)
+			print_vidc_buffer(VIDC_ERR,
+				"rbr qbuf failed", inst, mbuf);
+	}
+}
+
+int msm_comm_unmap_vidc_buffer(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf)
+{
+	int rc = 0, i;
+
+	if (!inst || !mbuf) {
+		dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+			__func__, inst, mbuf);
+		return -EINVAL;
+	}
+	if (mbuf->vvb.vb2_buf.num_planes > VIDEO_MAX_PLANES) {
+		dprintk(VIDC_ERR, "%s: invalid num_planes %d\n", __func__,
+			mbuf->vvb.vb2_buf.num_planes);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
+		u32 refcount = mbuf->smem[i].refcount;
+
+		while (refcount) {
+			if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
+				print_vidc_buffer(VIDC_ERR,
+					"unmap failed for buf", inst, mbuf);
+			refcount--;
+		}
+	}
+
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 7534593..5c653f5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -14,6 +14,7 @@
 #ifndef _MSM_VIDC_COMMON_H_
 #define _MSM_VIDC_COMMON_H_
 #include "msm_vidc_internal.h"
+
 struct vb2_buf_entry {
 	struct list_head list;
 	struct vb2_buffer *vb;
@@ -28,6 +29,8 @@
 	LOAD_CALC_IGNORE_NON_REALTIME_LOAD = 1 << 2,
 };
 
+enum hal_buffer get_hal_buffer_type(unsigned int type,
+		unsigned int plane_num);
 struct msm_vidc_core *get_vidc_core(int core_id);
 const struct msm_vidc_format *msm_comm_get_pixel_fmt_index(
 	const struct msm_vidc_format fmt[], int size, int index, int fmt_type);
@@ -41,16 +44,18 @@
 	enum hal_property ptype, void *pdata);
 int msm_comm_try_get_prop(struct msm_vidc_inst *inst,
 	enum hal_property ptype, union hal_get_property *hprop);
+int msm_comm_set_recon_buffers(struct msm_vidc_inst *inst);
 int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst);
 int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst);
 int msm_comm_set_output_buffers(struct msm_vidc_inst *inst);
 int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst);
-int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb);
+int msm_comm_qbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf);
 void msm_comm_flush_dynamic_buffers(struct msm_vidc_inst *inst);
 int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags);
 int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst,
 					bool check_for_reuse);
 int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst);
+int msm_comm_release_recon_buffers(struct msm_vidc_inst *inst);
 int msm_comm_release_output_buffers(struct msm_vidc_inst *inst,
 	bool force_release);
 void msm_comm_validate_output_buffers(struct msm_vidc_inst *inst);
@@ -67,14 +72,12 @@
 int msm_comm_kill_session(struct msm_vidc_inst *inst);
 enum multi_stream msm_comm_get_stream_output_mode(struct msm_vidc_inst *inst);
 enum hal_buffer msm_comm_get_hal_output_buffer(struct msm_vidc_inst *inst);
-struct msm_smem *msm_comm_smem_alloc(struct msm_vidc_inst *inst,
-			size_t size, u32 align, u32 flags,
-			enum hal_buffer buffer_type, int map_kernel);
-void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem);
+int msm_comm_smem_alloc(struct msm_vidc_inst *inst, size_t size, u32 align,
+		u32 flags, enum hal_buffer buffer_type, int map_kernel,
+		struct msm_smem *smem);
+void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *smem);
 int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst,
 		struct msm_smem *mem, enum smem_cache_ops cache_ops);
-struct msm_smem *msm_comm_smem_user_to_kernel(struct msm_vidc_inst *inst,
-			int fd, u32 offset, enum hal_buffer buffer_type);
 enum hal_video_codec get_hal_codec(int fourcc);
 enum hal_domain get_hal_domain(int session_type);
 int msm_comm_check_core_init(struct msm_vidc_core *core);
@@ -98,10 +101,48 @@
 int msm_comm_v4l2_to_hal(int id, int value);
 int msm_comm_hal_to_v4l2(int id, int value);
 int msm_comm_session_continue(void *instance);
+enum hal_uncompressed_format msm_comm_get_hal_uncompressed(int fourcc);
 u32 get_frame_size_nv12(int plane, u32 height, u32 width);
 u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width);
 u32 get_frame_size_rgba(int plane, u32 height, u32 width);
 u32 get_frame_size_nv21(int plane, u32 height, u32 width);
 u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width);
 void msm_comm_set_use_sys_cache(struct msm_vidc_inst *inst);
+struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer(
+		struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf);
+struct msm_vidc_buffer *msm_comm_get_buffer_using_device_planes(
+		struct msm_vidc_inst *inst, u32 *planes);
+struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst,
+		struct vb2_buffer *vb2);
+void msm_comm_put_vidc_buffer(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf);
+void handle_release_buffer_reference(struct msm_vidc_inst *inst, u32 *planes);
+int msm_comm_vb2_buffer_done(struct msm_vidc_inst *inst,
+		struct vb2_buffer *vb);
+int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf);
+int msm_comm_unmap_vidc_buffer(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf);
+bool msm_comm_compare_dma_plane(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf, unsigned long *dma_planes, u32 i);
+bool msm_comm_compare_dma_planes(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf, unsigned long *dma_planes);
+bool msm_comm_compare_vb2_plane(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2, u32 i);
+bool msm_comm_compare_vb2_planes(struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2);
+bool msm_comm_compare_device_plane(struct msm_vidc_buffer *mbuf,
+		u32 *planes, u32 i);
+bool msm_comm_compare_device_planes(struct msm_vidc_buffer *mbuf,
+		u32 *planes);
+int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst,
+		struct v4l2_buffer *b);
+int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst,
+			struct v4l2_buffer *b);
+void print_vidc_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
+		struct msm_vidc_buffer *mbuf);
+void print_vb2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
+		struct vb2_buffer *vb2);
+void print_v4l2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
+		struct v4l2_buffer *v4l2);
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index c197776..58c3b0f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -25,10 +25,8 @@
 int msm_vidc_fw_debug = 0x18;
 int msm_vidc_fw_debug_mode = 1;
 int msm_vidc_fw_low_power_mode = 1;
-int msm_vidc_hw_rsp_timeout = 2000;
 bool msm_vidc_fw_coverage = !true;
 bool msm_vidc_sys_idle_indicator = !true;
-int msm_vidc_firmware_unload_delay = 15000;
 bool msm_vidc_thermal_mitigation_disabled = !true;
 bool msm_vidc_clock_scaling = true;
 bool msm_vidc_debug_timeout = !true;
@@ -204,11 +202,8 @@
 	__debugfs_create(u32, "fw_low_power_mode",
 			&msm_vidc_fw_low_power_mode) &&
 	__debugfs_create(u32, "debug_output", &msm_vidc_debug_out) &&
-	__debugfs_create(u32, "hw_rsp_timeout", &msm_vidc_hw_rsp_timeout) &&
 	__debugfs_create(bool, "sys_idle_indicator",
 			&msm_vidc_sys_idle_indicator) &&
-	__debugfs_create(u32, "firmware_unload_delay",
-			&msm_vidc_firmware_unload_delay) &&
 	__debugfs_create(bool, "disable_thermal_mitigation",
 			&msm_vidc_thermal_mitigation_disabled) &&
 	__debugfs_create(bool, "clock_scaling",
@@ -270,7 +265,7 @@
 
 static int publish_unreleased_reference(struct msm_vidc_inst *inst)
 {
-	struct buffer_info *temp = NULL;
+	struct msm_vidc_buffer *temp = NULL;
 
 	if (!inst) {
 		dprintk(VIDC_ERR, "%s: invalid param\n", __func__);
@@ -282,14 +277,15 @@
 
 		mutex_lock(&inst->registeredbufs.lock);
 		list_for_each_entry(temp, &inst->registeredbufs.list, list) {
-			if (temp->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
-			!temp->inactive && atomic_read(&temp->ref_count)) {
+			struct vb2_buffer *vb2 = &temp->vvb.vb2_buf;
+
+			if (vb2->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 				write_str(&dbg_buf,
-				"\tpending buffer: %#lx fd[0] = %d ref_count = %d held by: %s\n",
-				temp->device_addr[0],
-				temp->fd[0],
-				atomic_read(&temp->ref_count),
-				DYNAMIC_BUF_OWNER(temp));
+				"\tbuffer: %#x fd[0] = %d size %d refcount = %d\n",
+				temp->smem[0].device_addr,
+				vb2->planes[0].m.fd,
+				vb2->planes[0].length,
+				temp->smem[0].refcount);
 			}
 		}
 		mutex_unlock(&inst->registeredbufs.lock);
@@ -408,18 +404,14 @@
 
 	switch (e) {
 	case MSM_VIDC_DEBUGFS_EVENT_ETB:
-		mutex_lock(&inst->lock);
 		inst->count.etb++;
-		mutex_unlock(&inst->lock);
 		if (inst->count.ebd && inst->count.ftb > inst->count.fbd) {
 			d->pdata[FRAME_PROCESSING].name[0] = '\0';
 			tic(inst, FRAME_PROCESSING, a);
 		}
 	break;
 	case MSM_VIDC_DEBUGFS_EVENT_EBD:
-		mutex_lock(&inst->lock);
 		inst->count.ebd++;
-		mutex_unlock(&inst->lock);
 		if (inst->count.ebd && inst->count.ebd == inst->count.etb) {
 			toc(inst, FRAME_PROCESSING);
 			dprintk(VIDC_PROF, "EBD: FW needs input buffers\n");
@@ -436,6 +428,7 @@
 	}
 	break;
 	case MSM_VIDC_DEBUGFS_EVENT_FBD:
+		inst->count.fbd++;
 		inst->debug.samples++;
 		if (inst->count.ebd && inst->count.fbd == inst->count.ftb) {
 			toc(inst, FRAME_PROCESSING);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index f4c851a..9a798b5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -57,10 +57,8 @@
 extern int msm_vidc_fw_debug;
 extern int msm_vidc_fw_debug_mode;
 extern int msm_vidc_fw_low_power_mode;
-extern int msm_vidc_hw_rsp_timeout;
 extern bool msm_vidc_fw_coverage;
 extern bool msm_vidc_sys_idle_indicator;
-extern int msm_vidc_firmware_unload_delay;
 extern bool msm_vidc_thermal_mitigation_disabled;
 extern bool msm_vidc_clock_scaling;
 extern bool msm_vidc_debug_timeout;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 5edd3d5..22772ef 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -33,7 +33,6 @@
 #include <media/videobuf2-v4l2.h>
 #include <media/msm_vidc.h>
 #include <media/msm_media_info.h>
-
 #include "vidc_hfi_api.h"
 
 #define MSM_VIDC_DRV_NAME "msm_vidc_driver"
@@ -141,17 +140,44 @@
 
 struct vidc_freq_data {
 	struct list_head list;
-	ion_phys_addr_t device_addr;
+	u32 device_addr;
 	unsigned long freq;
 };
 
+struct recon_buf {
+	struct list_head list;
+	u32 buffer_index;
+	u32 CR;
+	u32 CF;
+};
+
 struct internal_buf {
 	struct list_head list;
 	enum hal_buffer buffer_type;
-	struct msm_smem *handle;
+	struct msm_smem smem;
 	enum buffer_owner buffer_ownership;
 };
 
+struct msm_vidc_common_data {
+	char key[128];
+	int value;
+};
+
+struct msm_vidc_codec_data {
+	u32 fourcc;
+	enum session_type session_type;
+	int vpp_cycles;
+	int vsp_cycles;
+	int low_power_cycles;
+};
+
+struct msm_vidc_platform_data {
+	struct msm_vidc_common_data *common_data;
+	unsigned int common_data_length;
+	struct msm_vidc_codec_data *codec_data;
+	unsigned int codec_data_length;
+};
+
 struct msm_vidc_format {
 	char name[MAX_NAME_LENGTH];
 	u8 description[32];
@@ -228,10 +254,13 @@
 	unsigned long min_freq;
 	unsigned long curr_freq;
 	u32 operating_rate;
-	struct clock_profile_entry *entry;
+	struct msm_vidc_codec_data *entry;
 	u32 core_id;
+	u32 dpb_fourcc;
+	u32 opb_fourcc;
 	enum hal_work_mode work_mode;
 	bool low_latency_mode;
+	bool use_sys_cache;
 };
 
 struct profile_data {
@@ -262,6 +291,7 @@
 	struct mutex lock;
 	int id;
 	struct hfi_device *device;
+	struct msm_vidc_platform_data *platform_data;
 	struct msm_video_device vdev[MSM_VIDC_MAX_DEVICES];
 	struct v4l2_device v4l2_dev;
 	struct list_head instances;
@@ -278,6 +308,7 @@
 	bool smmu_fault_handled;
 	unsigned long min_freq;
 	unsigned long curr_freq;
+	struct vidc_bus_vote_data *vote_data;
 };
 
 struct msm_vidc_inst {
@@ -290,15 +321,15 @@
 	enum instance_state state;
 	struct msm_vidc_format fmts[MAX_PORT_NUM];
 	struct buf_queue bufq[MAX_PORT_NUM];
-	struct msm_vidc_list pendingq;
 	struct msm_vidc_list freqs;
 	struct msm_vidc_list scratchbufs;
 	struct msm_vidc_list persistbufs;
 	struct msm_vidc_list pending_getpropq;
 	struct msm_vidc_list outputbufs;
+	struct msm_vidc_list reconbufs;
 	struct msm_vidc_list registeredbufs;
 	struct buffer_requirements buff_req;
-	void *mem_client;
+	struct smem_client *mem_client;
 	struct v4l2_ctrl_handler ctrl_handler;
 	struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1];
 	struct v4l2_ctrl **cluster;
@@ -319,13 +350,13 @@
 	struct v4l2_ctrl **ctrls;
 	enum msm_vidc_pixel_depth bit_depth;
 	struct kref kref;
-	u32 buffers_held_in_driver;
-	atomic_t in_flush;
+	bool in_flush;
 	u32 pic_struct;
 	u32 colour_space;
 	u32 profile;
 	u32 level;
 	u32 entropy_mode;
+	struct msm_vidc_codec_data *codec_data;
 };
 
 extern struct msm_vidc_drv *vidc_driver;
@@ -355,56 +386,37 @@
 int msm_vidc_check_scaling_supported(struct msm_vidc_inst *inst);
 void msm_vidc_queue_v4l2_event(struct msm_vidc_inst *inst, int event_type);
 
-struct buffer_info {
+struct msm_vidc_buffer {
 	struct list_head list;
-	int type;
-	int num_planes;
-	int fd[VIDEO_MAX_PLANES];
-	int buff_off[VIDEO_MAX_PLANES];
-	int size[VIDEO_MAX_PLANES];
-	unsigned long uvaddr[VIDEO_MAX_PLANES];
-	ion_phys_addr_t device_addr[VIDEO_MAX_PLANES];
-	struct msm_smem *handle[VIDEO_MAX_PLANES];
-	enum v4l2_memory memory;
-	u32 v4l2_index;
-	bool pending_deletion;
-	atomic_t ref_count;
-	bool dequeued;
-	bool inactive;
-	bool mapped[VIDEO_MAX_PLANES];
-	int same_fd_ref[VIDEO_MAX_PLANES];
-	struct timeval timestamp;
+	struct msm_smem smem[VIDEO_MAX_PLANES];
+	struct vb2_v4l2_buffer vvb;
+	bool deferred;
 };
 
-struct buffer_info *device_to_uvaddr(struct msm_vidc_list *buf_list,
-				ion_phys_addr_t device_addr);
-int buf_ref_get(struct msm_vidc_inst *inst, struct buffer_info *binfo);
-int buf_ref_put(struct msm_vidc_inst *inst, struct buffer_info *binfo);
-int output_buffer_cache_invalidate(struct msm_vidc_inst *inst,
-				struct buffer_info *binfo);
-int qbuf_dynamic_buf(struct msm_vidc_inst *inst,
-			struct buffer_info *binfo);
-int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
-			struct buffer_info *binfo);
-
 void msm_comm_handle_thermal_event(void);
 void *msm_smem_new_client(enum smem_type mtype,
 		void *platform_resources, enum session_type stype);
-struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags,
-		enum hal_buffer buffer_type, int map_kernel);
-void msm_smem_free(void *clt, struct msm_smem *mem);
+int msm_smem_alloc(struct smem_client *client,
+		size_t size, u32 align, u32 flags, enum hal_buffer buffer_type,
+		int map_kernel, struct msm_smem *smem);
+int msm_smem_free(void *clt, struct msm_smem *mem);
 void msm_smem_delete_client(void *clt);
-int msm_smem_cache_operations(void *clt, struct msm_smem *mem,
-		enum smem_cache_ops);
-struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset,
-				enum hal_buffer buffer_type);
 struct context_bank_info *msm_smem_get_context_bank(void *clt,
 		bool is_secure, enum hal_buffer buffer_type);
+int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem);
+int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem);
+void *msm_smem_get_dma_buf(int fd);
+void msm_smem_put_dma_buf(void *dma_buf);
+void *msm_smem_get_handle(struct smem_client *client, void *dma_buf);
+void msm_smem_put_handle(struct smem_client *client, void *handle);
+int msm_smem_cache_operations(struct smem_client *client,
+		void *handle, unsigned long offset, unsigned long size,
+		enum smem_cache_ops cache_op);
 void msm_vidc_fw_unload_handler(struct work_struct *work);
-bool msm_smem_compare_buffers(void *clt, int fd, void *priv);
 /*
  * XXX: normally should be in msm_vidc.h, but that's meant for public APIs,
  * whereas this is private
  */
 int msm_vidc_destroy(struct msm_vidc_inst *inst);
+void *vidc_get_drv_data(struct device *dev);
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
new file mode 100644
index 0000000..25f22c7
--- /dev/null
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -0,0 +1,146 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#include "msm_vidc_internal.h"
+
+
+#define CODEC_ENTRY(n, p, vsp, vpp, lp) \
+{	\
+	.fourcc = n,		\
+	.session_type = p,	\
+	.vsp_cycles = vsp,	\
+	.vpp_cycles = vpp,	\
+	.low_power_cycles = lp	\
+}
+
+static struct msm_vidc_codec_data default_codec_data[] =  {
+	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 125, 675, 320),
+	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_DECODER, 125, 675, 320),
+};
+
+static struct msm_vidc_codec_data sdm845_codec_data[] =  {
+	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 125, 675, 320),
+	CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_ENCODER, 125, 675, 320),
+	CODEC_ENTRY(V4L2_PIX_FMT_VP8, MSM_VIDC_ENCODER, 125, 675, 320),
+	CODEC_ENTRY(V4L2_PIX_FMT_MPEG2, MSM_VIDC_DECODER, 50, 200, 200),
+	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_DECODER, 50, 200, 200),
+	CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_DECODER, 50, 200, 200),
+	CODEC_ENTRY(V4L2_PIX_FMT_VP8, MSM_VIDC_DECODER, 50, 200, 200),
+	CODEC_ENTRY(V4L2_PIX_FMT_VP9, MSM_VIDC_DECODER, 50, 200, 200),
+};
+
+static struct msm_vidc_common_data default_common_data[] = {
+	{
+		.key = "qcon,never-unload-fw",
+		.value = 1,
+	},
+};
+
+static struct msm_vidc_common_data sdm845_common_data[] = {
+	{
+		.key = "qcon,never-unload-fw",
+		.value = 1,
+	},
+	{
+		.key = "qcom,sw-power-collapse",
+		.value = 1,
+	},
+	{
+		.key = "qcom,max-secure-instances",
+		.value = 5,
+	},
+	{
+		.key = "qcom,max-hw-load",
+		.value = 2563200,
+	},
+	{
+		.key = "qcom,max-hq-mbs-per-frame",
+		.value = 8160,
+	},
+	{
+		.key = "qcom,max-hq-frames-per-sec",
+		.value = 60,
+	},
+	{
+		.key = "qcom,max-b-frame-size",
+		.value = 8160,
+	},
+	{
+		.key = "qcom,max-b-frames-per-sec",
+		.value = 60,
+	},
+	{
+		.key = "qcom,power-collapse-delay",
+		.value = 500,
+	},
+	{
+		.key = "qcom,hw-resp-timeout",
+		.value = 2000,
+	},
+};
+
+
+static struct msm_vidc_platform_data default_data = {
+	.codec_data = default_codec_data,
+	.codec_data_length =  ARRAY_SIZE(default_codec_data),
+	.common_data = default_common_data,
+	.common_data_length =  ARRAY_SIZE(default_common_data),
+};
+
+static struct msm_vidc_platform_data sdm845_data = {
+	.codec_data = sdm845_codec_data,
+	.codec_data_length =  ARRAY_SIZE(sdm845_codec_data),
+	.common_data = sdm845_common_data,
+	.common_data_length =  ARRAY_SIZE(sdm845_common_data),
+};
+
+static const struct of_device_id msm_vidc_dt_match[] = {
+	{
+		.compatible = "qcom,sdm845-vidc",
+		.data = &sdm845_data,
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, msm_vidc_dt_match);
+
+void *vidc_get_drv_data(struct device *dev)
+{
+	struct msm_vidc_platform_data *driver_data = NULL;
+	const struct of_device_id *match;
+
+	if (!IS_ENABLED(CONFIG_OF) || !dev->of_node) {
+		driver_data = &default_data;
+		goto exit;
+	}
+
+	match = of_match_node(msm_vidc_dt_match, dev->of_node);
+
+	if (match)
+		driver_data = (struct msm_vidc_platform_data *)match->data;
+
+exit:
+	return driver_data;
+}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 19ca561..062795f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -132,7 +132,6 @@
 	msm_vidc_free_clock_table(res);
 	msm_vidc_free_regulator_table(res);
 	msm_vidc_free_platform_version_table(res);
-	msm_vidc_free_cycles_per_mb_table(res);
 	msm_vidc_free_allowed_clocks_table(res);
 	msm_vidc_free_reg_table(res);
 	msm_vidc_free_qdss_addr_table(res);
@@ -275,12 +274,12 @@
 			"cache-slice-names", c, &vsc->name);
 	}
 
-	res->sys_cache_present = true;
+	res->sys_cache_enabled = true;
 
 	return 0;
 
 err_load_subcache_table_fail:
-	res->sys_cache_present = false;
+	res->sys_cache_enabled = false;
 	subcaches->count = 0;
 	subcaches->subcache_tbl = NULL;
 
@@ -407,118 +406,6 @@
 	return 0;
 }
 
-static int msm_vidc_load_cycles_per_mb_table(
-		struct msm_vidc_platform_resources *res)
-{
-	int rc = 0, i = 0;
-	struct clock_freq_table *clock_freq_tbl = &res->clock_freq_tbl;
-	struct clock_profile_entry *entry = NULL;
-	struct device_node *parent_node = NULL;
-	struct device_node *child_node = NULL;
-	struct platform_device *pdev = res->pdev;
-
-	parent_node = of_find_node_by_name(pdev->dev.of_node,
-			"qcom,clock-freq-tbl");
-	if (!parent_node) {
-		dprintk(VIDC_DBG, "Node qcom,clock-freq-tbl not found.\n");
-		return 0;
-	}
-
-	clock_freq_tbl->count = 0;
-	for_each_child_of_node(parent_node, child_node)
-		clock_freq_tbl->count++;
-
-	if (!clock_freq_tbl->count) {
-		dprintk(VIDC_DBG, "No child nodes in qcom,clock-freq-tbl\n");
-		return 0;
-	}
-
-	clock_freq_tbl->clk_prof_entries = devm_kzalloc(&pdev->dev,
-		sizeof(*clock_freq_tbl->clk_prof_entries) *
-		clock_freq_tbl->count, GFP_KERNEL);
-	if (!clock_freq_tbl->clk_prof_entries) {
-		dprintk(VIDC_DBG, "no memory to allocate clk_prof_entries\n");
-		return -ENOMEM;
-	}
-
-	for_each_child_of_node(parent_node, child_node) {
-
-		if (i >= clock_freq_tbl->count) {
-			dprintk(VIDC_ERR,
-				"qcom,clock-freq-tbl: invalid child node %d, max is %d\n",
-				i, clock_freq_tbl->count);
-			break;
-		}
-
-		entry = &clock_freq_tbl->clk_prof_entries[i];
-		dprintk(VIDC_DBG, "qcom,clock-freq-tbl: profile[%d]\n", i);
-
-		if (of_find_property(child_node, "qcom,codec-mask", NULL)) {
-			rc = of_property_read_u32(child_node,
-					"qcom,codec-mask", &entry->codec_mask);
-			if (rc) {
-				dprintk(VIDC_ERR,
-					"qcom,codec-mask not found\n");
-				goto error;
-			}
-		} else {
-			entry->codec_mask = 0;
-		}
-		dprintk(VIDC_DBG, "codec_mask %#x\n", entry->codec_mask);
-
-		if (of_find_property(child_node,
-				"qcom,vsp-cycles-per-mb", NULL)) {
-			rc = of_property_read_u32(child_node,
-					"qcom,vsp-cycles-per-mb",
-					&entry->vsp_cycles);
-			if (rc) {
-				dprintk(VIDC_ERR,
-					"qcom,vsp-cycles-per-mb not found\n");
-				goto error;
-			}
-		} else {
-			entry->vsp_cycles = 0;
-		}
-		dprintk(VIDC_DBG, "vsp cycles_per_mb %d\n", entry->vsp_cycles);
-
-		if (of_find_property(child_node,
-				"qcom,vpp-cycles-per-mb", NULL)) {
-			rc = of_property_read_u32(child_node,
-					"qcom,vpp-cycles-per-mb",
-					&entry->vpp_cycles);
-			if (rc) {
-				dprintk(VIDC_ERR,
-					"qcom,vpp-cycles-per-mb not found\n");
-				goto error;
-			}
-		} else {
-			entry->vpp_cycles = 0;
-		}
-		dprintk(VIDC_DBG, "vpp cycles_per_mb %d\n", entry->vpp_cycles);
-
-		if (of_find_property(child_node,
-				"qcom,low-power-cycles-per-mb", NULL)) {
-			rc = of_property_read_u32(child_node,
-					"qcom,low-power-cycles-per-mb",
-					&entry->low_power_cycles);
-			if (rc) {
-				dprintk(VIDC_ERR,
-					"qcom,low-power-cycles-per-mb not found\n");
-				goto error;
-			}
-		} else {
-			entry->low_power_cycles = 0;
-		}
-		dprintk(VIDC_DBG, "low_power_factor %d\n",
-				entry->low_power_cycles);
-
-		i++;
-	}
-
-error:
-	return rc;
-}
-
 static int msm_vidc_populate_bus(struct device *dev,
 		struct msm_vidc_platform_resources *res)
 {
@@ -818,6 +705,83 @@
 	return rc;
 }
 
+static int find_key_value(struct msm_vidc_platform_data *platform_data,
+	const char *key)
+{
+	int i = 0;
+	struct msm_vidc_common_data *common_data = platform_data->common_data;
+	int size = platform_data->common_data_length;
+
+	for (i = 0; i < size; i++) {
+		if (!strcmp(common_data[i].key, key))
+			return common_data[i].value;
+	}
+	return 0;
+}
+
+int read_platform_resources_from_drv_data(
+		struct msm_vidc_core *core)
+{
+	struct msm_vidc_platform_data *platform_data;
+	struct msm_vidc_platform_resources *res;
+	int rc = 0;
+
+	if (!core || !core->platform_data) {
+		dprintk(VIDC_ERR, "%s Invalid data\n", __func__);
+		return -ENOENT;
+	}
+	platform_data = core->platform_data;
+	res = &core->resources;
+
+	res->codec_data_count = platform_data->codec_data_length;
+	res->codec_data = platform_data->codec_data;
+
+	res->fw_name = "venus";
+
+	dprintk(VIDC_DBG, "Firmware filename: %s\n", res->fw_name);
+
+	res->max_load = find_key_value(platform_data,
+			"qcom,max-hw-load");
+
+	res->max_hq_mbs_per_frame = find_key_value(platform_data,
+			"qcom,max-hq-mbs-per-frame");
+
+	res->max_hq_fps = find_key_value(platform_data,
+			"qcom,max-hq-frames-per-sec");
+
+	res->sw_power_collapsible = find_key_value(platform_data,
+			"qcom,sw-power-collapse");
+
+	res->never_unload_fw =  find_key_value(platform_data,
+			"qcom,never-unload-fw");
+
+	res->debug_timeout = find_key_value(platform_data,
+			"qcom,debug-timeout");
+
+	res->debug_timeout |= msm_vidc_debug_timeout;
+
+	res->pm_qos_latency_us = find_key_value(platform_data,
+			"qcom,pm-qos-latency-us");
+
+	res->max_secure_inst_count = find_key_value(platform_data,
+			"qcom,max-secure-instances");
+
+	res->slave_side_cp = find_key_value(platform_data,
+			"qcom,slave-side-cp");
+	res->sys_idle_indicator = find_key_value(platform_data,
+			"qcom,enable-idle-indicator");
+	res->thermal_mitigable = find_key_value(platform_data,
+			"qcom,enable-thermal-mitigation");
+	res->msm_vidc_pwr_collapse_delay = find_key_value(platform_data,
+			"qcom,power-collapse-delay");
+	res->msm_vidc_firmware_unload_delay = find_key_value(platform_data,
+			"qcom,fw-unload-delay");
+	res->msm_vidc_hw_rsp_timeout = find_key_value(platform_data,
+			"qcom,hw-resp-timeout");
+	return rc;
+
+}
+
 int read_platform_resources_from_dt(
 		struct msm_vidc_platform_resources *res)
 {
@@ -842,26 +806,6 @@
 	kres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	res->irq = kres ? kres->start : -1;
 
-	res->sys_idle_indicator = of_property_read_bool(pdev->dev.of_node,
-			"qcom,enable-idle-indicator");
-
-	res->thermal_mitigable =
-			of_property_read_bool(pdev->dev.of_node,
-			"qcom,enable-thermal-mitigation");
-
-	rc = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
-			&res->fw_name);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to read firmware name: %d\n", rc);
-		goto err_load_reg_table;
-	}
-	dprintk(VIDC_DBG, "Firmware filename: %s\n", res->fw_name);
-
-	rc = of_property_read_string(pdev->dev.of_node, "qcom,hfi-version",
-			&res->hfi_version);
-	if (rc)
-		dprintk(VIDC_DBG, "HFI packetization will default to legacy\n");
-
 	rc = msm_vidc_load_platform_version_table(res);
 	if (rc)
 		dprintk(VIDC_ERR, "Failed to load pf version table: %d\n", rc);
@@ -900,13 +844,6 @@
 		goto err_load_clock_table;
 	}
 
-	rc = msm_vidc_load_cycles_per_mb_table(res);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to load cycles per mb table: %d\n", rc);
-		goto err_load_cycles_per_mb_table;
-	}
-
 	rc = msm_vidc_load_allowed_clocks_table(res);
 	if (rc) {
 		dprintk(VIDC_ERR,
@@ -914,32 +851,6 @@
 		goto err_load_allowed_clocks_table;
 	}
 
-	rc = of_property_read_u32(pdev->dev.of_node, "qcom,max-hw-load",
-			&res->max_load);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to determine max load supported: %d\n", rc);
-		goto err_load_max_hw_load;
-	}
-
-	rc = of_property_read_u32(pdev->dev.of_node,
-		"qcom,max-hq-mbs-per-frame",
-			&res->max_hq_mbs_per_frame);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to determine Max HQ mbs per frame: %d\n", rc);
-		goto err_load_HQ_values;
-	}
-
-	rc = of_property_read_u32(pdev->dev.of_node,
-		"qcom,max-hq-frames-per-sec",
-			&res->max_hq_fps);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to determine Max HQ fps: %d\n", rc);
-		goto err_load_HQ_values;
-	}
-
 	rc = msm_vidc_populate_legacy_context_bank(res);
 	if (rc) {
 		dprintk(VIDC_ERR,
@@ -958,39 +869,11 @@
 				"Using fw-bias : %pa", &res->firmware_base);
 	}
 
-	res->sw_power_collapsible = of_property_read_bool(pdev->dev.of_node,
-					"qcom,sw-power-collapse");
-	dprintk(VIDC_DBG, "Power collapse supported = %s\n",
-		res->sw_power_collapsible ? "yes" : "no");
-
-	res->never_unload_fw = of_property_read_bool(pdev->dev.of_node,
-			"qcom,never-unload-fw");
-
-	res->debug_timeout = of_property_read_bool(pdev->dev.of_node,
-			"qcom,debug-timeout");
-
-	msm_vidc_debug_timeout |= res->debug_timeout;
-
-	of_property_read_u32(pdev->dev.of_node,
-			"qcom,pm-qos-latency-us", &res->pm_qos_latency_us);
-
-	res->slave_side_cp = of_property_read_bool(pdev->dev.of_node,
-					"qcom,slave-side-cp");
-	dprintk(VIDC_DBG, "Slave side cp = %s\n",
-				res->slave_side_cp ? "yes" : "no");
-
-	of_property_read_u32(pdev->dev.of_node,
-			"qcom,max-secure-instances",
-			&res->max_secure_inst_count);
-	return rc;
+return rc;
 
 err_setup_legacy_cb:
-err_load_HQ_values:
-err_load_max_hw_load:
 	msm_vidc_free_allowed_clocks_table(res);
 err_load_allowed_clocks_table:
-	msm_vidc_free_cycles_per_mb_table(res);
-err_load_cycles_per_mb_table:
 	msm_vidc_free_clock_table(res);
 err_load_clock_table:
 	msm_vidc_free_regulator_table(res);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h
index 4ba9057..a682282 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h
@@ -22,6 +22,8 @@
 
 int read_hfi_type(struct platform_device *pdev);
 
+int read_platform_resources_from_drv_data(
+		struct msm_vidc_core *core);
 int read_platform_resources_from_dt(
 		struct msm_vidc_platform_resources *res);
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index b07785a..dda5e80 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -185,6 +185,11 @@
 	uint32_t pm_qos_latency_us;
 	uint32_t max_inst_count;
 	uint32_t max_secure_inst_count;
+	int msm_vidc_hw_rsp_timeout;
+	int msm_vidc_firmware_unload_delay;
+	uint32_t msm_vidc_pwr_collapse_delay;
+	struct msm_vidc_codec_data *codec_data;
+	int codec_data_count;
 };
 
 static inline bool is_iommu_present(struct msm_vidc_platform_resources *res)
@@ -192,7 +197,5 @@
 	return !list_empty(&res->context_banks);
 }
 
-extern uint32_t msm_vidc_pwr_collapse_delay;
-
 #endif
 
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 6139e46..f8d8842 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -560,7 +560,7 @@
 			struct vidc_mem_addr *mem, u32 size, u32 align,
 			u32 flags, u32 usage)
 {
-	struct msm_smem *alloc = NULL;
+	struct msm_smem *alloc = &mem->mem_data;
 	int rc = 0;
 
 	if (!dev || !dev->hal_client || !mem || !size) {
@@ -569,8 +569,9 @@
 	}
 
 	dprintk(VIDC_INFO, "start to alloc size: %d, flags: %d\n", size, flags);
-	alloc = msm_smem_alloc(dev->hal_client, size, align, flags, usage, 1);
-	if (!alloc) {
+	rc = msm_smem_alloc(dev->hal_client, size, align, flags,
+		usage, 1, alloc);
+	if (rc) {
 		dprintk(VIDC_ERR, "Alloc failed\n");
 		rc = -ENOMEM;
 		goto fail_smem_alloc;
@@ -578,17 +579,16 @@
 
 	dprintk(VIDC_DBG, "__smem_alloc: ptr = %pK, size = %d\n",
 			alloc->kvaddr, size);
-	rc = msm_smem_cache_operations(dev->hal_client, alloc,
-		SMEM_CACHE_CLEAN);
+	rc = msm_smem_cache_operations(dev->hal_client, alloc->handle, 0,
+			alloc->size, SMEM_CACHE_CLEAN);
 	if (rc) {
 		dprintk(VIDC_WARN, "Failed to clean cache\n");
-		dprintk(VIDC_WARN, "This may result in undefined behavior\n");
 	}
 
 	mem->mem_size = alloc->size;
-	mem->mem_data = alloc;
 	mem->align_virtual_addr = alloc->kvaddr;
 	mem->align_device_addr = alloc->device_addr;
+
 	return rc;
 fail_smem_alloc:
 	return rc;
@@ -723,35 +723,6 @@
 	}
 }
 
-static bool __is_session_supported(unsigned long sessions_supported,
-		enum vidc_vote_data_session session_type)
-{
-	bool same_codec, same_session_type;
-	int codec_bit, session_type_bit;
-	unsigned long session = session_type;
-
-	if (!sessions_supported || !session)
-		return false;
-
-	/* ffs returns a 1 indexed, test_bit takes a 0 indexed...index */
-	codec_bit = ffs(session) - 1;
-	session_type_bit = codec_bit + 1;
-
-	same_codec = test_bit(codec_bit, &sessions_supported) ==
-		test_bit(codec_bit, &session);
-	same_session_type = test_bit(session_type_bit, &sessions_supported) ==
-		test_bit(session_type_bit, &session);
-
-	return same_codec && same_session_type;
-}
-
-bool venus_hfi_is_session_supported(unsigned long sessions_supported,
-		enum vidc_vote_data_session session_type)
-{
-	return __is_session_supported(sessions_supported, session_type);
-}
-EXPORT_SYMBOL(venus_hfi_is_session_supported);
-
 static int __devfreq_target(struct device *devfreq_dev,
 		unsigned long *freq, u32 flags)
 {
@@ -1151,11 +1122,9 @@
 static int __scale_clocks(struct venus_hfi_device *device)
 {
 	int rc = 0;
-	struct clock_freq_table *clk_freq_tbl = NULL;
 	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
 	u32 rate = 0;
 
-	clk_freq_tbl = &device->res->clock_freq_tbl;
 	allowed_clks_tbl = device->res->allowed_clks_tbl;
 
 	dprintk(VIDC_DBG, "%s: NULL scale data\n", __func__);
@@ -1214,7 +1183,7 @@
 			if (!queue_delayed_work(device->venus_pm_workq,
 				&venus_hfi_pm_work,
 				msecs_to_jiffies(
-				msm_vidc_pwr_collapse_delay))) {
+				device->res->msm_vidc_pwr_collapse_delay))) {
 				dprintk(VIDC_DBG,
 				"PM work already scheduled\n");
 			}
@@ -1343,7 +1312,7 @@
 	unsigned long mem_map_table_base_addr;
 	struct context_bank_info *cb;
 
-	if (device->qdss.mem_data) {
+	if (device->qdss.align_virtual_addr) {
 		qdss = (struct hfi_mem_map_table *)
 			device->qdss.align_virtual_addr;
 		qdss->mem_map_num_entries = num_entries;
@@ -1369,32 +1338,27 @@
 						mem_map[i].size);
 		}
 
-		__smem_free(device, device->qdss.mem_data);
+		__smem_free(device, &device->qdss.mem_data);
 	}
 
-	__smem_free(device, device->iface_q_table.mem_data);
-	__smem_free(device, device->sfr.mem_data);
+	__smem_free(device, &device->iface_q_table.mem_data);
+	__smem_free(device, &device->sfr.mem_data);
 
 	for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
 		device->iface_queues[i].q_hdr = NULL;
-		device->iface_queues[i].q_array.mem_data = NULL;
 		device->iface_queues[i].q_array.align_virtual_addr = NULL;
 		device->iface_queues[i].q_array.align_device_addr = 0;
 	}
 
-	device->iface_q_table.mem_data = NULL;
 	device->iface_q_table.align_virtual_addr = NULL;
 	device->iface_q_table.align_device_addr = 0;
 
-	device->qdss.mem_data = NULL;
 	device->qdss.align_virtual_addr = NULL;
 	device->qdss.align_device_addr = 0;
 
-	device->sfr.mem_data = NULL;
 	device->sfr.align_virtual_addr = NULL;
 	device->sfr.align_device_addr = 0;
 
-	device->mem_addr.mem_data = NULL;
 	device->mem_addr.align_virtual_addr = NULL;
 	device->mem_addr.align_device_addr = 0;
 
@@ -1483,7 +1447,6 @@
 	struct vidc_mem_addr *mem_addr;
 	int offset = 0;
 	int num_entries = dev->res->qdss_addr_set.count;
-	u32 value = 0;
 	phys_addr_t fw_bias = 0;
 	size_t q_size;
 	unsigned long mem_map_table_base_addr;
@@ -1514,7 +1477,6 @@
 		iface_q->q_array.align_virtual_addr =
 			mem_addr->align_virtual_addr + offset;
 		iface_q->q_array.mem_size = VIDC_IFACEQ_QUEUE_SIZE;
-		iface_q->q_array.mem_data = NULL;
 		offset += iface_q->q_array.mem_size;
 		iface_q->q_hdr = VIDC_IFACEQ_GET_QHDR_START_ADDR(
 				dev->iface_q_table.align_virtual_addr, i);
@@ -1566,65 +1528,34 @@
 
 	iface_q = &dev->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
 	q_hdr = iface_q->q_hdr;
-	q_hdr->qhdr_start_addr = (u32)iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
 	q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
-	if ((ion_phys_addr_t)q_hdr->qhdr_start_addr !=
-		iface_q->q_array.align_device_addr) {
-		dprintk(VIDC_ERR, "Invalid CMDQ device address (%pa)",
-			&iface_q->q_array.align_device_addr);
-	}
 
 	iface_q = &dev->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
 	q_hdr = iface_q->q_hdr;
-	q_hdr->qhdr_start_addr = (u32)iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
 	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
-	if ((ion_phys_addr_t)q_hdr->qhdr_start_addr !=
-		iface_q->q_array.align_device_addr) {
-		dprintk(VIDC_ERR, "Invalid MSGQ device address (%pa)",
-			&iface_q->q_array.align_device_addr);
-	}
 
 	iface_q = &dev->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
 	q_hdr = iface_q->q_hdr;
-	q_hdr->qhdr_start_addr = (u32)iface_q->q_array.align_device_addr;
+	q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
 	q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
 	/*
 	 * Set receive request to zero on debug queue as there is no
 	 * need of interrupt from video hardware for debug messages
 	 */
 	q_hdr->qhdr_rx_req = 0;
-	if ((ion_phys_addr_t)q_hdr->qhdr_start_addr !=
-		iface_q->q_array.align_device_addr) {
-		dprintk(VIDC_ERR, "Invalid DBGQ device address (%pa)",
-			&iface_q->q_array.align_device_addr);
-	}
 
-	value = (u32)dev->iface_q_table.align_device_addr;
-	if ((ion_phys_addr_t)value !=
-		dev->iface_q_table.align_device_addr) {
-		dprintk(VIDC_ERR,
-			"Invalid iface_q_table device address (%pa)",
-			&dev->iface_q_table.align_device_addr);
-	}
-
-	if (dev->qdss.mem_data) {
+	if (dev->qdss.align_virtual_addr) {
 		qdss = (struct hfi_mem_map_table *)dev->qdss.align_virtual_addr;
 		qdss->mem_map_num_entries = num_entries;
 		mem_map_table_base_addr = dev->qdss.align_device_addr +
 			sizeof(struct hfi_mem_map_table);
-		qdss->mem_map_table_base_addr =
-			(u32)mem_map_table_base_addr;
-		if ((ion_phys_addr_t)qdss->mem_map_table_base_addr !=
-				mem_map_table_base_addr) {
-			dprintk(VIDC_ERR,
-					"Invalid mem_map_table_base_addr (%#lx)",
-					mem_map_table_base_addr);
-		}
+		qdss->mem_map_table_base_addr = mem_map_table_base_addr;
 
 		mem_map = (struct hfi_mem_map *)(qdss + 1);
 		cb = msm_smem_get_context_bank(dev->hal_client, false,
 				HAL_BUFFER_INTERNAL_CMD_QUEUE);
-
 		if (!cb) {
 			dprintk(VIDC_ERR,
 				"%s: failed to get context bank\n", __func__);
@@ -1635,28 +1566,14 @@
 		if (rc) {
 			dprintk(VIDC_ERR,
 				"IOMMU mapping failed, Freeing qdss memdata\n");
-			__smem_free(dev, dev->qdss.mem_data);
-			dev->qdss.mem_data = NULL;
+			__smem_free(dev, &dev->qdss.mem_data);
 			dev->qdss.align_virtual_addr = NULL;
 			dev->qdss.align_device_addr = 0;
 		}
-
-		value = (u32)dev->qdss.align_device_addr;
-		if ((ion_phys_addr_t)value !=
-				dev->qdss.align_device_addr) {
-			dprintk(VIDC_ERR, "Invalid qdss device address (%pa)",
-					&dev->qdss.align_device_addr);
-		}
 	}
 
 	vsfr = (struct hfi_sfr_struct *) dev->sfr.align_virtual_addr;
 	vsfr->bufSize = ALIGNED_SFR_SIZE;
-	value = (u32)dev->sfr.align_device_addr;
-	if ((ion_phys_addr_t)value !=
-		dev->sfr.align_device_addr) {
-		dprintk(VIDC_ERR, "Invalid sfr device address (%pa)",
-			&dev->sfr.align_device_addr);
-	}
 
 	__setup_ucregion_memory_map(dev);
 	return 0;
@@ -1942,7 +1859,6 @@
 
 	__write_register(device, VIDC_CPU_CS_A2HSOFTINTCLR, 1);
 	__write_register(device, VIDC_WRAPPER_INTR_CLEAR, intr_status);
-	dprintk(VIDC_DBG, "Cleared WRAPPER/A2H interrupt\n");
 }
 
 static int venus_hfi_core_ping(void *device)
@@ -2848,7 +2764,8 @@
 		device->skip_pc_count, wfi_status, idle_status, pc_ready);
 	queue_delayed_work(device->venus_pm_workq,
 			&venus_hfi_pm_work,
-			msecs_to_jiffies(msm_vidc_pwr_collapse_delay));
+			msecs_to_jiffies(
+			device->res->msm_vidc_pwr_collapse_delay));
 exit:
 	mutex_unlock(&device->lock);
 }
@@ -3097,7 +3014,8 @@
 		cancel_delayed_work(&venus_hfi_pm_work);
 		if (!queue_delayed_work(device->venus_pm_workq,
 			&venus_hfi_pm_work,
-			msecs_to_jiffies(msm_vidc_pwr_collapse_delay))) {
+			msecs_to_jiffies(
+				device->res->msm_vidc_pwr_collapse_delay))) {
 			dprintk(VIDC_ERR, "PM work already scheduled\n");
 		}
 	}
@@ -3117,7 +3035,7 @@
 
 	mutex_lock(&device->lock);
 
-	dprintk(VIDC_INFO, "Handling interrupt\n");
+	dprintk(VIDC_DBG, "Handling interrupt\n");
 
 	if (!__core_in_valid_state(device)) {
 		dprintk(VIDC_DBG, "%s - Core not in init state\n", __func__);
@@ -3152,7 +3070,8 @@
 	for (i = 0; !IS_ERR_OR_NULL(device->response_pkt) &&
 		i < num_responses; ++i) {
 		struct msm_vidc_cb_info *r = &device->response_pkt[i];
-
+		dprintk(VIDC_DBG, "Processing response %d of %d, type %d\n",
+			(i + 1), num_responses, r->response_type);
 		device->callback(r->response_type, &r->response);
 	}
 
@@ -3160,6 +3079,7 @@
 	if (!(intr_status & VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK))
 		enable_irq(device->hal_data->irq);
 
+	dprintk(VIDC_DBG, "Handling interrupt done\n");
 	/*
 	 * XXX: Don't add any code beyond here.  Reacquiring locks after release
 	 * it above doesn't guarantee the atomicity that we're aiming for.
@@ -4020,7 +3940,6 @@
 		dprintk(VIDC_ERR, "Invalid params: %pK\n", device);
 		return -EINVAL;
 	} else if (device->power_enabled) {
-		dprintk(VIDC_DBG, "Power is already enabled\n");
 		goto exit;
 	} else if (!__core_in_valid_state(device)) {
 		dprintk(VIDC_DBG, "venus_hfi_device in deinit state.");
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.h b/drivers/media/platform/msm/vidc/venus_hfi.h
index 925918c..4c4cb06 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.h
+++ b/drivers/media/platform/msm/vidc/venus_hfi.h
@@ -126,10 +126,10 @@
 };
 
 struct vidc_mem_addr {
-	ion_phys_addr_t align_device_addr;
+	u32 align_device_addr;
 	u8 *align_virtual_addr;
 	u32 mem_size;
-	struct msm_smem *mem_data;
+	struct msm_smem mem_data;
 };
 
 struct vidc_iface_q_info {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 86e4f42..47ce0ba 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -17,6 +17,8 @@
 #include <linux/log2.h>
 #include <linux/platform_device.h>
 #include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/hash.h>
 #include <media/msm_vidc.h>
 #include "msm_vidc_resources.h"
 
@@ -882,8 +884,8 @@
 	enum hal_buffer buffer_type;
 	u32 buffer_size;
 	u32 num_buffers;
-	ion_phys_addr_t align_device_addr;
-	ion_phys_addr_t extradata_addr;
+	u32 align_device_addr;
+	u32 extradata_addr;
 	u32 extradata_size;
 	u32 response_required;
 };
@@ -910,8 +912,8 @@
 
 struct vidc_frame_data {
 	enum hal_buffer buffer_type;
-	ion_phys_addr_t device_addr;
-	ion_phys_addr_t extradata_addr;
+	u32 device_addr;
+	u32 extradata_addr;
 	int64_t timestamp;
 	u32 flags;
 	u32 offset;
@@ -1082,6 +1084,22 @@
 	HAL_RESPONSE_UNUSED = 0x10000000,
 };
 
+struct ubwc_cr_stats_info_type {
+	u32 cr_stats_info0;
+	u32 cr_stats_info1;
+	u32 cr_stats_info2;
+	u32 cr_stats_info3;
+	u32 cr_stats_info4;
+	u32 cr_stats_info5;
+	u32 cr_stats_info6;
+};
+
+struct recon_stats_type {
+	u32 buffer_index;
+	u32 complexity_number;
+	struct ubwc_cr_stats_info_type ubwc_stats_info;
+};
+
 struct vidc_hal_ebd {
 	u32 timestamp_hi;
 	u32 timestamp_lo;
@@ -1094,8 +1112,9 @@
 	u32 alloc_len;
 	u32 filled_len;
 	enum hal_picture picture_type;
-	ion_phys_addr_t packet_buffer;
-	ion_phys_addr_t extra_data_buffer;
+	struct recon_stats_type recon_stats;
+	u32 packet_buffer;
+	u32 extra_data_buffer;
 };
 
 struct vidc_hal_fbd {
@@ -1117,18 +1136,18 @@
 	u32 input_tag;
 	u32 input_tag1;
 	enum hal_picture picture_type;
-	ion_phys_addr_t packet_buffer1;
-	ion_phys_addr_t extra_data_buffer;
+	u32 packet_buffer1;
+	u32 extra_data_buffer;
 	u32 flags2;
 	u32 alloc_len2;
 	u32 filled_len2;
 	u32 offset2;
-	ion_phys_addr_t packet_buffer2;
+	u32 packet_buffer2;
 	u32 flags3;
 	u32 alloc_len3;
 	u32 filled_len3;
 	u32 offset3;
-	ion_phys_addr_t packet_buffer3;
+	u32 packet_buffer3;
 	enum hal_buffer buffer_type;
 };
 
@@ -1230,8 +1249,8 @@
 	u32 width;
 	enum msm_vidc_pixel_depth bit_depth;
 	u32 hal_event_type;
-	ion_phys_addr_t packet_buffer;
-	ion_phys_addr_t extra_data_buffer;
+	u32 packet_buffer;
+	u32 extra_data_buffer;
 	u32 pic_struct;
 	u32 colour_space;
 	u32 profile;
@@ -1315,8 +1334,16 @@
 	enum hal_video_codec codec;
 	enum hal_uncompressed_format color_formats[2];
 	int num_formats; /* 1 = DPB-OPB unified; 2 = split */
-	int height, width, fps;
+	int input_height, input_width, fps;
+	int output_height, output_width;
+	int compression_ratio;
+	int complexity_factor;
+	unsigned int lcu_size;
 	enum msm_vidc_power_mode power_mode;
+	struct imem_ab_table *imem_ab_tbl;
+	enum hal_work_mode work_mode;
+	unsigned long bitrate;
+	u32 imem_ab_tbl_size;
 };
 
 struct vidc_clk_scale_data {
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 52ef883..a29ddca 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -330,6 +330,8 @@
 					b->m.planes[plane].m.userptr;
 				planes[plane].length =
 					b->m.planes[plane].length;
+				planes[plane].data_offset =
+					b->m.planes[plane].data_offset;
 			}
 		}
 		if (b->memory == VB2_MEMORY_DMABUF) {
@@ -338,6 +340,8 @@
 					b->m.planes[plane].m.fd;
 				planes[plane].length =
 					b->m.planes[plane].length;
+				planes[plane].data_offset =
+					b->m.planes[plane].data_offset;
 			}
 		}
 
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index c1857c7..bd51c6c 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -4382,9 +4382,9 @@
 		return -EINVAL;
 	}
 
-	if (strlen(app_name) >= MAX_APP_NAME_SIZE) {
+	if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
 		pr_err("The app_name (%s) with length %zu is not valid\n",
-			app_name, strlen(app_name));
+			app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
 		return -EINVAL;
 	}
 
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 00a1971..3f3f24b 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -15,7 +15,7 @@
 static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf)	\
 {										\
 	struct mmc_card *card = mmc_dev_to_card(dev);				\
-	return sprintf(buf, fmt, args);						\
+	return snprintf(buf, PAGE_SIZE, fmt, args);			\
 }										\
 static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
 
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 58329d2..e0f0c06 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -50,6 +50,19 @@
 	  say M here and read <file:Documentation/kbuild/modules.txt>.
 	  The module will be called ms02-nv.
 
+config MTD_MSM_QPIC_NAND
+	tristate "MSM QPIC NAND Device Support"
+	depends on MTD && (ARCH_QCOM || ARCH_MSM) && !MTD_MSM_NAND
+	select CRC16
+	select BITREVERSE
+	select MTD_NAND_IDS
+	default n
+	help
+	  Support for NAND controller in Qualcomm Technologies, Inc.
+	  Parallel Interface controller (QPIC). This new controller
+	  supports BAM mode and BCH error correction mechanism. Based on the
+	  device capabilities either 4 bit or 8 bit BCH ECC will be used.
+
 config MTD_DATAFLASH
 	tristate "Support for AT45xxx DataFlash"
 	depends on SPI_MASTER
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 7912d3a..1abde5d 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -10,6 +10,7 @@
 obj-$(CONFIG_MTD_MTDRAM)	+= mtdram.o
 obj-$(CONFIG_MTD_LART)		+= lart.o
 obj-$(CONFIG_MTD_BLOCK2MTD)	+= block2mtd.o
+obj-$(CONFIG_MTD_MSM_QPIC_NAND) += msm_qpic_nand.o
 obj-$(CONFIG_MTD_DATAFLASH)	+= mtd_dataflash.o
 obj-$(CONFIG_MTD_M25P80)	+= m25p80.o
 obj-$(CONFIG_MTD_SPEAR_SMI)	+= spear_smi.o
diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c
new file mode 100644
index 0000000..44b56b6
--- /dev/null
+++ b/drivers/mtd/devices/msm_qpic_nand.c
@@ -0,0 +1,3594 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_qpic_nand.h"
+
+#define QPIC_BAM_DEFAULT_IPC_LOGLVL 2
+
+/* The driver supports devices upto 4K page */
+#define MAX_CW_PER_PAGE 8
+/*
+ * Max descriptors needed for erase, read, write operations.
+ * Usually, this is (2 * MAX_CW_PER_PAGE).
+ */
+#define MAX_DESC 16
+
+static bool enable_euclean;
+
+/*
+ * Get the DMA memory for requested amount of size. It returns the pointer
+ * to free memory available from the allocated pool. Returns NULL if there
+ * is no free memory.
+ */
+static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
+{
+	uint32_t bitmask, free_bitmask, old_bitmask;
+	uint32_t need_mask, current_need_mask;
+	int free_index;
+
+	need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
+			- 1;
+	bitmask = atomic_read(&chip->dma_buffer_busy);
+	free_bitmask = ~bitmask;
+	if (free_bitmask == 0)
+		return NULL;
+
+	do {
+		free_index = __ffs(free_bitmask);
+		current_need_mask = need_mask << free_index;
+
+		if (size + free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ >=
+						 MSM_NAND_DMA_BUFFER_SIZE)
+			return NULL;
+
+		if ((bitmask & current_need_mask) == 0) {
+			old_bitmask =
+				atomic_cmpxchg(&chip->dma_buffer_busy,
+					       bitmask,
+					       bitmask | current_need_mask);
+			if (old_bitmask == bitmask)
+				return chip->dma_virt_addr +
+				free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ;
+			free_bitmask = 0;/* force return */
+		}
+		/* current free range was too small, clear all free bits */
+		/* below the top busy bit within current_need_mask */
+		free_bitmask &=
+			~(~0U >> (32 - fls(bitmask & current_need_mask)));
+	} while (free_bitmask);
+
+	return NULL;
+}
+
+/*
+ * Releases the DMA memory used to the free pool and also wakes up any user
+ * thread waiting on wait queue for free memory to be available.
+ */
+static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
+					void *buffer, size_t size)
+{
+	int index;
+	uint32_t used_mask;
+
+	used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
+			- 1;
+	index = ((uint8_t *)buffer - chip->dma_virt_addr) /
+		MSM_NAND_DMA_BUFFER_SLOT_SZ;
+	atomic_sub(used_mask << index, &chip->dma_buffer_busy);
+
+	wake_up(&chip->dma_wait_queue);
+}
+
+/*
+ * Calculates page address of the buffer passed, offset of buffer within
+ * that page and then maps it for DMA by calling dma_map_page().
+ */
+static dma_addr_t msm_nand_dma_map(struct device *dev, void *addr, size_t size,
+					 enum dma_data_direction dir)
+{
+	struct page *page;
+	unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+	if (virt_addr_valid(addr))
+		page = virt_to_page(addr);
+	else {
+		if (WARN_ON(size + offset > PAGE_SIZE))
+			return ~0;
+		page = vmalloc_to_page(addr);
+	}
+	return dma_map_page(dev, page, offset, size, dir);
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+static int msm_nand_bus_set_vote(struct msm_nand_info *info,
+			unsigned int vote)
+{
+	int ret = 0;
+
+	ret = msm_bus_scale_client_update_request(info->clk_data.client_handle,
+			vote);
+	if (ret)
+		pr_err("msm_bus_scale_client_update_request() failed, bus_client_handle=0x%x, vote=%d, err=%d\n",
+			info->clk_data.client_handle, vote, ret);
+	return ret;
+}
+
+static int msm_nand_setup_clocks_and_bus_bw(struct msm_nand_info *info,
+				bool vote)
+{
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(info->clk_data.qpic_clk)) {
+		ret = -EINVAL;
+		goto out;
+	}
+	if (atomic_read(&info->clk_data.clk_enabled) == vote)
+		goto out;
+	if (!atomic_read(&info->clk_data.clk_enabled) && vote) {
+		ret = msm_nand_bus_set_vote(info, 1);
+		if (ret) {
+			pr_err("Failed to vote for bus with %d\n", ret);
+			goto out;
+		}
+		ret = clk_prepare_enable(info->clk_data.qpic_clk);
+		if (ret) {
+			pr_err("Failed to enable the bus-clock with error %d\n",
+				ret);
+			msm_nand_bus_set_vote(info, 0);
+			goto out;
+		}
+	} else if (atomic_read(&info->clk_data.clk_enabled) && !vote) {
+		clk_disable_unprepare(info->clk_data.qpic_clk);
+		msm_nand_bus_set_vote(info, 0);
+	}
+	atomic_set(&info->clk_data.clk_enabled, vote);
+out:
+	return ret;
+}
+#else
+static int msm_nand_setup_clocks_and_bus_bw(struct msm_nand_info *info,
+				bool vote)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int msm_nand_runtime_suspend(struct device *dev)
+{
+	int ret = 0;
+	struct msm_nand_info *info = dev_get_drvdata(dev);
+
+	ret = msm_nand_setup_clocks_and_bus_bw(info, false);
+
+	return ret;
+}
+
+static int msm_nand_runtime_resume(struct device *dev)
+{
+	int ret = 0;
+	struct msm_nand_info *info = dev_get_drvdata(dev);
+
+	ret = msm_nand_setup_clocks_and_bus_bw(info, true);
+
+	return ret;
+}
+
+static void msm_nand_print_rpm_info(struct device *dev)
+{
+	pr_err("RPM: runtime_status=%d, usage_count=%d, is_suspended=%d, disable_depth=%d, runtime_error=%d, request_pending=%d, request=%d\n",
+		dev->power.runtime_status, atomic_read(&dev->power.usage_count),
+		dev->power.is_suspended, dev->power.disable_depth,
+		dev->power.runtime_error, dev->power.request_pending,
+		dev->power.request);
+}
+#else
+static int msm_nand_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int msm_nand_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static void msm_nand_print_rpm_info(struct device *dev)
+{
+}
+#endif
+
+#ifdef CONFIG_PM
+static int msm_nand_suspend(struct device *dev)
+{
+	int ret = 0;
+
+	if (!pm_runtime_suspended(dev))
+		ret = msm_nand_runtime_suspend(dev);
+
+	return ret;
+}
+
+static int msm_nand_resume(struct device *dev)
+{
+	int ret = 0;
+
+	if (!pm_runtime_suspended(dev))
+		ret = msm_nand_runtime_resume(dev);
+
+	return ret;
+}
+#else
+static int msm_nand_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int msm_nand_resume(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int msm_nand_get_device(struct device *dev)
+{
+	int ret = 0;
+
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		pr_err("Failed to resume with %d\n", ret);
+		msm_nand_print_rpm_info(dev);
+	} else { /* Reset to success */
+		ret = 0;
+	}
+	return ret;
+}
+
+static int msm_nand_put_device(struct device *dev)
+{
+	int ret = 0;
+
+	pm_runtime_mark_last_busy(dev);
+	ret = pm_runtime_put_autosuspend(dev);
+	if (ret < 0) {
+		pr_err("Failed to suspend with %d\n", ret);
+		msm_nand_print_rpm_info(dev);
+	} else { /* Reset to success */
+		ret = 0;
+	}
+	return ret;
+}
+#else
+static int msm_nand_get_device(struct device *dev)
+{
+	return 0;
+}
+
+static int msm_nand_put_device(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_MSM_BUS_SCALING
+static int msm_nand_bus_register(struct platform_device *pdev,
+		struct msm_nand_info *info)
+{
+	int ret = 0;
+
+	info->clk_data.use_cases = msm_bus_cl_get_pdata(pdev);
+	if (!info->clk_data.use_cases) {
+		ret = -EINVAL;
+		pr_err("msm_bus_cl_get_pdata failed\n");
+		goto out;
+	}
+	info->clk_data.client_handle =
+		msm_bus_scale_register_client(info->clk_data.use_cases);
+	if (!info->clk_data.client_handle) {
+		ret = -EINVAL;
+		pr_err("msm_bus_scale_register_client failed\n");
+	}
+out:
+	return ret;
+}
+
+static void msm_nand_bus_unregister(struct msm_nand_info *info)
+{
+	if (info->clk_data.client_handle)
+		msm_bus_scale_unregister_client(info->clk_data.client_handle);
+}
+#else
+static int msm_nand_bus_register(struct platform_device *pdev,
+		struct msm_nand_info *info)
+{
+	return 0;
+}
+
+static void msm_nand_bus_unregister(struct msm_nand_info *info)
+{
+}
+#endif
+
+/*
+ * Wrapper function to prepare a single SPS command element with the data
+ * that is passed to this function.
+ */
+static inline void msm_nand_prep_ce(struct sps_command_element *ce,
+				uint32_t addr, uint32_t command, uint32_t data)
+{
+	ce->addr = addr;
+	ce->command = (command & WRITE) ? (uint32_t) SPS_WRITE_COMMAND :
+			(uint32_t) SPS_READ_COMMAND;
+	ce->data = data;
+	ce->mask = 0xFFFFFFFF;
+}
+
+static int msm_nand_sps_get_iovec(struct sps_pipe *pipe, uint32_t indx,
+				unsigned int cnt, struct sps_iovec *iovec)
+{
+	int ret = 0;
+
+	do {
+		do {
+			ret = sps_get_iovec((pipe), (iovec));
+		} while (((iovec)->addr == 0x0) && ((iovec)->size == 0x0));
+		if (ret)
+			return ret;
+	} while (--(cnt));
+	return ret;
+}
+
+/*
+ * Wrapper function to prepare a single command descriptor with a single
+ * SPS command element with the data that is passed to this function.
+ *
+ * Since for any command element it is a must to have this flag
+ * SPS_IOVEC_FLAG_CMD, this function by default updates this flag for a
+ * command element that is passed and thus, the caller need not explicilty
+ * pass this flag. The other flags must be passed based on the need.  If a
+ * command element doesn't have any other flag, then 0 can be passed to flags.
+ */
+static inline void msm_nand_prep_single_desc(struct msm_nand_sps_cmd *sps_cmd,
+				uint32_t addr, uint32_t command,
+				uint32_t data, uint32_t flags)
+{
+	msm_nand_prep_ce(&sps_cmd->ce, addr, command, data);
+	sps_cmd->flags = SPS_IOVEC_FLAG_CMD | flags;
+}
+/*
+ * Read a single NANDc register as mentioned by its parameter addr. The return
+ * value indicates whether read is successful or not. The register value read
+ * is stored in val.
+ */
+static int msm_nand_flash_rd_reg(struct msm_nand_info *info, uint32_t addr,
+				uint32_t *val)
+{
+	int ret = 0, submitted_num_desc = 1;
+	struct msm_nand_sps_cmd *cmd;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	struct {
+		struct msm_nand_sps_cmd cmd;
+		uint32_t data;
+	} *dma_buffer;
+	struct sps_iovec iovec_temp;
+
+	wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+		    chip, sizeof(*dma_buffer))));
+	cmd = &dma_buffer->cmd;
+	msm_nand_prep_single_desc(cmd, addr, READ, msm_virt_to_dma(chip,
+			&dma_buffer->data), SPS_IOVEC_FLAG_INT);
+
+	mutex_lock(&info->lock);
+	ret = msm_nand_get_device(chip->dev);
+	if (ret)
+		goto out;
+	ret = sps_transfer_one(info->sps.cmd_pipe.handle,
+			msm_virt_to_dma(chip, &cmd->ce),
+			sizeof(struct sps_command_element), NULL, cmd->flags);
+	if (ret) {
+		pr_err("failed to submit command %x ret %d\n", addr, ret);
+		msm_nand_put_device(chip->dev);
+		goto out;
+	}
+	ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+			info->sps.cmd_pipe.index, submitted_num_desc,
+			&iovec_temp);
+	if (ret) {
+		pr_err("Failed to get iovec for pipe %d: (ret%d)\n",
+				(info->sps.cmd_pipe.index), ret);
+		goto out;
+	}
+	ret = msm_nand_put_device(chip->dev);
+	if (ret)
+		goto out;
+	*val = dma_buffer->data;
+out:
+	mutex_unlock(&info->lock);
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	return ret;
+}
+
+/*
+ * Read the Flash ID from the Nand Flash Device. The return value < 0
+ * indicates failure. When successful, the Flash ID is stored in parameter
+ * read_id.
+ */
+#define READID_CMDS 5
+static int msm_nand_flash_read_id(struct msm_nand_info *info,
+		bool read_onfi_signature, uint32_t *read_id,
+		uint32_t *read_id2)
+{
+	int err = 0, i = 0;
+	struct msm_nand_sps_cmd *cmd;
+	struct sps_iovec *iovec;
+	struct sps_iovec iovec_temp;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	/*
+	 * The following 5 commands are required to read id -
+	 * write commands - addr0, flash, exec
+	 * read_commands - read_id, read_id2
+	 */
+	struct {
+		struct sps_transfer xfer;
+		struct sps_iovec cmd_iovec[READID_CMDS];
+		struct msm_nand_sps_cmd cmd[READID_CMDS];
+		uint32_t data[READID_CMDS];
+	} *dma_buffer;
+
+	wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+				(chip, sizeof(*dma_buffer))));
+	if (read_onfi_signature)
+		dma_buffer->data[0] = FLASH_READ_ONFI_SIGNATURE_ADDRESS;
+	else
+		dma_buffer->data[0] = FLASH_READ_DEVICE_ID_ADDRESS;
+
+	dma_buffer->data[1] = EXTENDED_FETCH_ID | MSM_NAND_CMD_FETCH_ID;
+	dma_buffer->data[2] = 1;
+	dma_buffer->data[3] = 0xeeeeeeee;
+	dma_buffer->data[4] = 0xeeeeeeee;
+
+	cmd = dma_buffer->cmd;
+	msm_nand_prep_single_desc(cmd, MSM_NAND_ADDR0(info), WRITE,
+			dma_buffer->data[0], SPS_IOVEC_FLAG_LOCK);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_CMD(info), WRITE,
+			dma_buffer->data[1], 0);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+			dma_buffer->data[2], SPS_IOVEC_FLAG_NWD);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_READ_ID(info), READ,
+			msm_virt_to_dma(chip, &dma_buffer->data[3]), 0);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_READ_ID2(info), READ,
+			msm_virt_to_dma(chip, &dma_buffer->data[4]),
+			SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+	cmd++;
+
+	WARN_ON(cmd - dma_buffer->cmd > READID_CMDS);
+	dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+	dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+	dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+					&dma_buffer->cmd_iovec);
+	iovec = dma_buffer->xfer.iovec;
+
+	for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+		iovec->addr =  msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+		iovec->size = sizeof(struct sps_command_element);
+		iovec->flags = dma_buffer->cmd[i].flags;
+		iovec++;
+	}
+
+	mutex_lock(&info->lock);
+	err = msm_nand_get_device(chip->dev);
+	if (err)
+		goto out;
+	err =  sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+	if (err) {
+		pr_err("Failed to submit commands %d\n", err);
+		msm_nand_put_device(chip->dev);
+		goto out;
+	}
+	err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+			info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count,
+			&iovec_temp);
+
+	if (err) {
+		pr_err("Failed to get iovec for pipe %d: (err:%d)\n",
+				(info->sps.cmd_pipe.index), err);
+		goto out;
+	}
+	pr_debug("Read ID register value 0x%x\n", dma_buffer->data[3]);
+	if (!read_onfi_signature)
+		pr_debug("nandid: %x maker %02x device %02x\n",
+		       dma_buffer->data[3], dma_buffer->data[3] & 0xff,
+		       (dma_buffer->data[3] >> 8) & 0xff);
+	*read_id = dma_buffer->data[3];
+	if (read_id2) {
+		pr_debug("Extended Read ID register value 0x%x\n",
+				dma_buffer->data[4]);
+		*read_id2 = dma_buffer->data[4];
+	}
+	err = msm_nand_put_device(chip->dev);
+out:
+	mutex_unlock(&info->lock);
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	return err;
+}
+
+/*
+ * Contains data for common configuration registers that must be programmed
+ * for every NANDc operation.
+ */
+struct msm_nand_common_cfgs {
+	uint32_t cmd;
+	uint32_t addr0;
+	uint32_t addr1;
+	uint32_t cfg0;
+	uint32_t cfg1;
+};
+
+/*
+ * Function to prepare SPS command elements to write into NANDc configuration
+ * registers as per the data defined in struct msm_nand_common_cfgs. This is
+ * required for the following NANDc operations - Erase, Bad Block checking
+ * and for reading ONFI parameter page.
+ */
+static void msm_nand_prep_cfg_cmd_desc(struct msm_nand_info *info,
+				struct msm_nand_common_cfgs data,
+				struct msm_nand_sps_cmd **curr_cmd)
+{
+	struct msm_nand_sps_cmd *cmd;
+
+	cmd = *curr_cmd;
+	msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_CMD(info), WRITE,
+			data.cmd, SPS_IOVEC_FLAG_LOCK);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_ADDR0(info), WRITE,
+			data.addr0, 0);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_ADDR1(info), WRITE,
+			data.addr1, 0);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_CFG0(info), WRITE,
+			data.cfg0, 0);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_CFG1(info), WRITE,
+			data.cfg1, 0);
+	cmd++;
+	*curr_cmd = cmd;
+}
+
+/*
+ * Function to check the CRC integrity check on ONFI parameter page read.
+ * For ONFI parameter page read, the controller ECC will be disabled. Hence,
+ * it is mandatory to manually compute CRC and check it against the value
+ * stored within ONFI page.
+ */
+static uint16_t msm_nand_flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
+{
+	int i;
+	uint16_t result;
+
+	for (i = 0; i < count; i++)
+		buffer[i] = bitrev8(buffer[i]);
+
+	result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
+
+	for (i = 0; i < count; i++)
+		buffer[i] = bitrev8(buffer[i]);
+
+	return result;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for reading ONFI parameter page.
+ */
+struct msm_nand_flash_onfi_data {
+	struct msm_nand_common_cfgs cfg;
+	uint32_t exec;
+	uint32_t ecc_bch_cfg;
+};
+
+struct version {
+	uint16_t nand_major;
+	uint16_t nand_minor;
+	uint16_t qpic_major;
+	uint16_t qpic_minor;
+};
+
+static int msm_nand_version_check(struct msm_nand_info *info,
+			struct version *nandc_version)
+{
+	uint32_t qpic_ver = 0, nand_ver = 0;
+	int err = 0;
+
+	/* Lookup the version to identify supported features */
+	err = msm_nand_flash_rd_reg(info, MSM_NAND_VERSION(info),
+		&nand_ver);
+	if (err) {
+		pr_err("Failed to read NAND_VERSION, err=%d\n", err);
+		goto out;
+	}
+	nandc_version->nand_major = (nand_ver & MSM_NAND_VERSION_MAJOR_MASK) >>
+		MSM_NAND_VERSION_MAJOR_SHIFT;
+	nandc_version->nand_minor = (nand_ver & MSM_NAND_VERSION_MINOR_MASK) >>
+		MSM_NAND_VERSION_MINOR_SHIFT;
+
+	err = msm_nand_flash_rd_reg(info, MSM_NAND_QPIC_VERSION(info),
+		&qpic_ver);
+	if (err) {
+		pr_err("Failed to read QPIC_VERSION, err=%d\n", err);
+		goto out;
+	}
+	nandc_version->qpic_major = (qpic_ver & MSM_NAND_VERSION_MAJOR_MASK) >>
+			MSM_NAND_VERSION_MAJOR_SHIFT;
+	nandc_version->qpic_minor = (qpic_ver & MSM_NAND_VERSION_MINOR_MASK) >>
+			MSM_NAND_VERSION_MINOR_SHIFT;
+	pr_info("nand_major:%d, nand_minor:%d, qpic_major:%d, qpic_minor:%d\n",
+		nandc_version->nand_major, nandc_version->nand_minor,
+		nandc_version->qpic_major, nandc_version->qpic_minor);
+out:
+	return err;
+}
+
+/*
+ * Function to identify whether the attached NAND flash device is
+ * complaint to ONFI spec or not. If yes, then it reads the ONFI parameter
+ * page to get the device parameters.
+ */
+#define ONFI_CMDS 9
+static int msm_nand_flash_onfi_probe(struct msm_nand_info *info)
+{
+	struct msm_nand_chip *chip = &info->nand_chip;
+	struct flash_identification *flash = &info->flash_dev;
+	uint32_t crc_chk_count = 0, page_address = 0;
+	int ret = 0, i = 0, submitted_num_desc = 1;
+
+	/* SPS parameters */
+	struct msm_nand_sps_cmd *cmd, *curr_cmd;
+	struct sps_iovec *iovec;
+	struct sps_iovec iovec_temp;
+	uint32_t rdata;
+
+	/* ONFI Identifier/Parameter Page parameters */
+	uint8_t *onfi_param_info_buf = NULL;
+	dma_addr_t dma_addr_param_info = 0;
+	struct onfi_param_page *onfi_param_page_ptr;
+	struct msm_nand_flash_onfi_data data;
+	uint32_t onfi_signature = 0;
+
+	/*
+	 * The following 9 commands are required to get onfi parameters -
+	 * flash, addr0, addr1, cfg0, cfg1, dev0_ecc_cfg,
+	 * read_loc_0, exec, flash_status (read cmd).
+	 */
+	struct {
+		struct sps_transfer xfer;
+		struct sps_iovec cmd_iovec[ONFI_CMDS];
+		struct msm_nand_sps_cmd cmd[ONFI_CMDS];
+		uint32_t flash_status;
+	} *dma_buffer;
+
+
+	/* Lookup the version to identify supported features */
+	struct version nandc_version = {0};
+
+	ret = msm_nand_version_check(info, &nandc_version);
+	if (!ret && !(nandc_version.nand_major == 1 &&
+			nandc_version.nand_minor >= 5 &&
+			nandc_version.qpic_major == 1 &&
+			nandc_version.qpic_minor >= 5)) {
+		ret = -EPERM;
+		goto out;
+	}
+	wait_event(chip->dma_wait_queue, (onfi_param_info_buf =
+		msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
+	dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
+
+	wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+				(chip, sizeof(*dma_buffer))));
+
+	ret = msm_nand_flash_read_id(info, 1, &onfi_signature, NULL);
+	if (ret < 0) {
+		pr_err("Failed to read ONFI signature\n");
+		goto free_dma;
+	}
+	if (onfi_signature != ONFI_PARAMETER_PAGE_SIGNATURE) {
+		pr_info("Found a non ONFI device\n");
+		ret = -EIO;
+		goto free_dma;
+	}
+
+	memset(&data, 0, sizeof(struct msm_nand_flash_onfi_data));
+
+	/* Lookup the partition to which apps has access to */
+	for (i = 0; i < FLASH_PTABLE_MAX_PARTS_V4; i++) {
+		if (mtd_part[i].name && !strcmp("boot", mtd_part[i].name)) {
+			page_address = mtd_part[i].offset << 6;
+			break;
+		}
+	}
+	if (!page_address) {
+		pr_info("%s: no apps partition found in smem\n", __func__);
+		ret = -EPERM;
+		goto free_dma;
+	}
+	data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ONFI;
+	data.exec = 1;
+	data.cfg.addr0 = (page_address << 16) |
+				FLASH_READ_ONFI_PARAMETERS_ADDRESS;
+	data.cfg.addr1 = (page_address >> 16) & 0xFF;
+	data.cfg.cfg0 =	MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO;
+	data.cfg.cfg1 = MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO;
+	data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+	dma_buffer->flash_status = 0xeeeeeeee;
+
+	curr_cmd = cmd = dma_buffer->cmd;
+	msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+	cmd = curr_cmd;
+	msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+			data.ecc_bch_cfg, 0);
+	cmd++;
+
+	rdata = (0 << 0) | (ONFI_PARAM_INFO_LENGTH << 16) | (1 << 31);
+	msm_nand_prep_single_desc(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
+			rdata, 0);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+		data.exec, SPS_IOVEC_FLAG_NWD);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+		msm_virt_to_dma(chip, &dma_buffer->flash_status),
+		SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+	cmd++;
+
+	WARN_ON(cmd - dma_buffer->cmd > ONFI_CMDS);
+	dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+	dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+	dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+					&dma_buffer->cmd_iovec);
+	iovec = dma_buffer->xfer.iovec;
+
+	for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+		iovec->addr =  msm_virt_to_dma(chip,
+				&dma_buffer->cmd[i].ce);
+		iovec->size = sizeof(struct sps_command_element);
+		iovec->flags = dma_buffer->cmd[i].flags;
+		iovec++;
+	}
+	mutex_lock(&info->lock);
+	ret = msm_nand_get_device(chip->dev);
+	if (ret)
+		goto unlock_mutex;
+	/* Submit data descriptor */
+	ret = sps_transfer_one(info->sps.data_prod.handle, dma_addr_param_info,
+			ONFI_PARAM_INFO_LENGTH, NULL, SPS_IOVEC_FLAG_INT);
+	if (ret) {
+		pr_err("Failed to submit data descriptors %d\n", ret);
+		goto put_dev;
+	}
+	/* Submit command descriptors */
+	ret =  sps_transfer(info->sps.cmd_pipe.handle,
+			&dma_buffer->xfer);
+	if (ret) {
+		pr_err("Failed to submit commands %d\n", ret);
+		goto put_dev;
+	}
+
+	ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+			info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count,
+			&iovec_temp);
+
+	if (ret) {
+		pr_err("Failed to get iovec for pipe %d: (ret:%d)\n",
+				(info->sps.cmd_pipe.index), ret);
+		goto put_dev;
+	}
+	ret = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
+			info->sps.data_prod.index, submitted_num_desc,
+			&iovec_temp);
+	if (ret) {
+		pr_err("Failed to get iovec for pipe %d: (ret:%d)\n",
+				(info->sps.data_prod.index), ret);
+		goto put_dev;
+	}
+
+	ret = msm_nand_put_device(chip->dev);
+	mutex_unlock(&info->lock);
+	if (ret)
+		goto free_dma;
+
+	/* Check for flash status errors */
+	if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+		pr_err("MPU/OP err (0x%x) is set\n", dma_buffer->flash_status);
+		ret = -EIO;
+		goto free_dma;
+	}
+
+	for (crc_chk_count = 0; crc_chk_count < ONFI_PARAM_INFO_LENGTH
+			/ ONFI_PARAM_PAGE_LENGTH; crc_chk_count++) {
+		onfi_param_page_ptr =
+			(struct onfi_param_page *)
+			(&(onfi_param_info_buf
+			[ONFI_PARAM_PAGE_LENGTH *
+			crc_chk_count]));
+		if (msm_nand_flash_onfi_crc_check(
+			(uint8_t *)onfi_param_page_ptr,
+			ONFI_PARAM_PAGE_LENGTH - 2) ==
+			onfi_param_page_ptr->integrity_crc) {
+			break;
+		}
+	}
+	if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
+			/ ONFI_PARAM_PAGE_LENGTH) {
+		pr_err("CRC Check failed on param page\n");
+		ret = -EIO;
+		goto free_dma;
+	}
+	ret = msm_nand_flash_read_id(info, 0, &flash->flash_id, NULL);
+	if (ret < 0) {
+		pr_err("Failed to read flash ID\n");
+		goto free_dma;
+	}
+	flash->widebus  = onfi_param_page_ptr->features_supported & 0x01;
+	flash->pagesize = onfi_param_page_ptr->number_of_data_bytes_per_page;
+	flash->blksize  = onfi_param_page_ptr->number_of_pages_per_block *
+					flash->pagesize;
+	flash->oobsize  = onfi_param_page_ptr->number_of_spare_bytes_per_page;
+	flash->density  = onfi_param_page_ptr->number_of_blocks_per_logical_unit
+					* flash->blksize;
+	flash->ecc_correctability = onfi_param_page_ptr->
+					number_of_bits_ecc_correctability;
+
+	pr_info("Found an ONFI compliant device %s\n",
+			onfi_param_page_ptr->device_model);
+	/*
+	 * Temporary hack for MT29F4G08ABC device.
+	 * Since the device is not properly adhering
+	 * to ONFi specification it is reporting
+	 * as 16 bit device though it is 8 bit device!!!
+	 */
+	if (!strcmp(onfi_param_page_ptr->device_model, "MT29F4G08ABC"))
+		flash->widebus  = 0;
+	goto free_dma;
+put_dev:
+	msm_nand_put_device(chip->dev);
+unlock_mutex:
+	mutex_unlock(&info->lock);
+free_dma:
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
+			ONFI_PARAM_INFO_LENGTH);
+out:
+	return ret;
+}
+
+/*
+ * Structure that contains read/write parameters required for reading/writing
+ * from/to a page.
+ */
+struct msm_nand_rw_params {
+	uint32_t page;
+	uint32_t page_count;
+	uint32_t sectordatasize;
+	uint32_t sectoroobsize;
+	uint32_t cwperpage;
+	uint32_t oob_len_cmd;
+	uint32_t oob_len_data;
+	uint32_t start_sector;
+	uint32_t oob_col;
+	dma_addr_t data_dma_addr;
+	dma_addr_t oob_dma_addr;
+	dma_addr_t ecc_dma_addr;
+	dma_addr_t data_dma_addr_curr;
+	dma_addr_t oob_dma_addr_curr;
+	dma_addr_t ecc_dma_addr_curr;
+	bool read;
+};
+
+/*
+ * Structure that contains NANDc register data required for reading/writing
+ * from/to a page.
+ */
+struct msm_nand_rw_reg_data {
+	uint32_t cmd;
+	uint32_t addr0;
+	uint32_t addr1;
+	uint32_t cfg0;
+	uint32_t cfg1;
+	uint32_t ecc_bch_cfg;
+	uint32_t exec;
+	uint32_t ecc_cfg;
+	uint32_t clrfstatus;
+	uint32_t clrrstatus;
+};
+
+/*
+ * Function that validates page read/write MTD parameters received from upper
+ * layers such as MTD/YAFFS2 and returns error for any unsupported operations
+ * by the driver. In case of success, it also maps the data and oob buffer
+ * received for DMA.
+ */
+static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read,
+					loff_t offset,
+					struct mtd_oob_ops *ops,
+					struct msm_nand_rw_params *args)
+{
+	struct msm_nand_info *info = mtd->priv;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	int err = 0;
+
+	pr_debug("========================================================\n");
+	pr_debug("offset 0x%llx mode %d\ndatbuf 0x%pK datlen 0x%x\n",
+			offset, ops->mode, ops->datbuf, ops->len);
+	pr_debug("oobbuf 0x%pK ooblen 0x%x\n", ops->oobbuf, ops->ooblen);
+
+	if (ops->mode == MTD_OPS_PLACE_OOB) {
+		pr_err("MTD_OPS_PLACE_OOB is not supported\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (mtd->writesize == PAGE_SIZE_2K)
+		args->page = offset >> 11;
+
+	if (mtd->writesize == PAGE_SIZE_4K)
+		args->page = offset >> 12;
+
+	args->oob_len_cmd = ops->ooblen;
+	args->oob_len_data = ops->ooblen;
+	args->cwperpage = (mtd->writesize >> 9);
+	args->read = (read ? true : false);
+
+	if (offset & (mtd->writesize - 1)) {
+		pr_err("unsupported offset 0x%llx\n", offset);
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (!read && !ops->datbuf) {
+		pr_err("No data buffer provided for write!!\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (ops->mode == MTD_OPS_RAW) {
+		if (!ops->datbuf) {
+			pr_err("No data buffer provided for RAW mode\n");
+			err =  -EINVAL;
+			goto out;
+		} else if ((ops->len % (mtd->writesize +
+				mtd->oobsize)) != 0) {
+			pr_err("unsupported data len %d for RAW mode\n",
+				ops->len);
+			err = -EINVAL;
+			goto out;
+		}
+		args->page_count = ops->len / (mtd->writesize + mtd->oobsize);
+
+	} else if (ops->mode == MTD_OPS_AUTO_OOB) {
+		if (ops->datbuf && (ops->len % mtd->writesize) != 0) {
+			/* when ops->datbuf is NULL, ops->len can be ooblen */
+			pr_err("unsupported data len %d for AUTO mode\n",
+					ops->len);
+			err = -EINVAL;
+			goto out;
+		}
+		if (read && ops->oobbuf && !ops->datbuf) {
+			args->start_sector = args->cwperpage - 1;
+			args->page_count = ops->ooblen / mtd->oobavail;
+			if ((args->page_count == 0) && (ops->ooblen))
+				args->page_count = 1;
+		} else if (ops->datbuf) {
+			args->page_count = ops->len / mtd->writesize;
+		}
+	}
+
+	if (ops->datbuf) {
+		if (read)
+			memset(ops->datbuf, 0xFF, ops->len);
+		args->data_dma_addr_curr = args->data_dma_addr =
+			msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
+				      (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
+		if (dma_mapping_error(chip->dev, args->data_dma_addr)) {
+			pr_err("dma mapping failed for 0x%pK\n", ops->datbuf);
+			err = -EIO;
+			goto out;
+		}
+	}
+	if (ops->oobbuf) {
+		if (read)
+			memset(ops->oobbuf, 0xFF, ops->ooblen);
+		args->oob_dma_addr_curr = args->oob_dma_addr =
+			msm_nand_dma_map(chip->dev, ops->oobbuf, ops->ooblen,
+				(read ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE));
+		if (dma_mapping_error(chip->dev, args->oob_dma_addr)) {
+			pr_err("dma mapping failed for 0x%pK\n", ops->oobbuf);
+			err = -EIO;
+			goto dma_map_oobbuf_failed;
+		}
+	}
+	goto out;
+dma_map_oobbuf_failed:
+	if (ops->datbuf)
+		dma_unmap_page(chip->dev, args->data_dma_addr, ops->len,
+				(read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
+out:
+	return err;
+}
+
+/*
+ * Function that updates NANDc register data (struct msm_nand_rw_reg_data)
+ * required for page read/write.
+ */
+static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip,
+					struct mtd_oob_ops *ops,
+					struct msm_nand_rw_params *args,
+					struct msm_nand_rw_reg_data *data)
+{
+	if (args->read) {
+		if (ops->mode != MTD_OPS_RAW) {
+			data->cmd = MSM_NAND_CMD_PAGE_READ_ECC;
+			data->cfg0 =
+			(chip->cfg0 & ~(7U << CW_PER_PAGE)) |
+			(((args->cwperpage-1) - args->start_sector)
+			 << CW_PER_PAGE);
+			data->cfg1 = chip->cfg1;
+			data->ecc_bch_cfg = chip->ecc_bch_cfg;
+		} else {
+			data->cmd = MSM_NAND_CMD_PAGE_READ_ALL;
+			data->cfg0 =
+			(chip->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+			(((args->cwperpage-1) - args->start_sector)
+			 << CW_PER_PAGE);
+			data->cfg1 = chip->cfg1_raw;
+			data->ecc_bch_cfg = chip->ecc_cfg_raw;
+		}
+
+	} else {
+		if (ops->mode != MTD_OPS_RAW) {
+			data->cmd = MSM_NAND_CMD_PRG_PAGE;
+			data->cfg0 = chip->cfg0;
+			data->cfg1 = chip->cfg1;
+			data->ecc_bch_cfg = chip->ecc_bch_cfg;
+		} else {
+			data->cmd = MSM_NAND_CMD_PRG_PAGE_ALL;
+			data->cfg0 = chip->cfg0_raw;
+			data->cfg1 = chip->cfg1_raw;
+			data->ecc_bch_cfg = chip->ecc_cfg_raw;
+		}
+		data->clrfstatus = MSM_NAND_RESET_FLASH_STS;
+		data->clrrstatus = MSM_NAND_RESET_READ_STS;
+	}
+	data->exec = 1;
+	data->ecc_cfg = chip->ecc_buf_cfg;
+}
+
+/*
+ * Function to prepare series of SPS command descriptors required for a page
+ * read/write operation.
+ */
+static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops,
+				struct msm_nand_rw_params *args,
+				struct msm_nand_rw_reg_data *data,
+				struct msm_nand_info *info,
+				uint32_t curr_cw,
+				struct msm_nand_rw_cmd_desc *cmd_list,
+				uint32_t *cw_desc_cnt,
+				uint32_t ecc_parity_bytes)
+{
+	struct msm_nand_chip *chip = &info->nand_chip;
+	uint32_t rdata;
+	/* read_location register parameters */
+	uint32_t offset, size, last_read;
+	struct sps_command_element *curr_ce, *start_ce;
+	uint32_t *flags_ptr, *num_ce_ptr;
+
+	if (curr_cw == args->start_sector) {
+		curr_ce = start_ce = &cmd_list->setup_desc.ce[0];
+		num_ce_ptr = &cmd_list->setup_desc.num_ce;
+		flags_ptr = &cmd_list->setup_desc.flags;
+		*flags_ptr = CMD_LCK;
+		cmd_list->count = 1;
+		msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_CMD(info), WRITE,
+				data->cmd);
+		curr_ce++;
+
+		msm_nand_prep_ce(curr_ce, MSM_NAND_ADDR0(info), WRITE,
+				data->addr0);
+		curr_ce++;
+
+		msm_nand_prep_ce(curr_ce, MSM_NAND_ADDR1(info), WRITE,
+				data->addr1);
+		curr_ce++;
+
+		msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_CFG0(info), WRITE,
+				data->cfg0);
+		curr_ce++;
+
+		msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_CFG1(info), WRITE,
+				data->cfg1);
+		curr_ce++;
+
+		msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+				data->ecc_bch_cfg);
+		curr_ce++;
+
+		msm_nand_prep_ce(curr_ce, MSM_NAND_EBI2_ECC_BUF_CFG(info),
+				WRITE, data->ecc_cfg);
+		curr_ce++;
+
+		if (!args->read) {
+			msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
+					WRITE, data->clrfstatus);
+			curr_ce++;
+			goto sub_exec_cmd;
+		} else {
+			msm_nand_prep_ce(curr_ce,
+					MSM_NAND_ERASED_CW_DETECT_CFG(info),
+					WRITE, CLR_ERASED_PAGE_DET);
+			curr_ce++;
+			msm_nand_prep_ce(curr_ce,
+					MSM_NAND_ERASED_CW_DETECT_CFG(info),
+					WRITE, SET_ERASED_PAGE_DET);
+			curr_ce++;
+		}
+	} else {
+		curr_ce = start_ce = &cmd_list->cw_desc[*cw_desc_cnt].ce[0];
+		num_ce_ptr = &cmd_list->cw_desc[*cw_desc_cnt].num_ce;
+		flags_ptr = &cmd_list->cw_desc[*cw_desc_cnt].flags;
+		*cw_desc_cnt += 1;
+		*flags_ptr = CMD;
+		cmd_list->count++;
+	}
+	if (!args->read)
+		goto sub_exec_cmd;
+
+	if (ops->mode == MTD_OPS_RAW) {
+		if (ecc_parity_bytes) {
+			rdata = (BYTES_517 << 0) | (ecc_parity_bytes << 16)
+				| (1 << 31);
+			msm_nand_prep_ce(curr_ce,
+					MSM_NAND_READ_LOCATION_0(info),
+					WRITE, rdata);
+			curr_ce++;
+		} else {
+			rdata = (0 << 0) | (chip->cw_size << 16) | (1 << 31);
+			msm_nand_prep_ce(curr_ce,
+					MSM_NAND_READ_LOCATION_0(info),
+					WRITE, rdata);
+			curr_ce++;
+		}
+	}
+	if (ops->mode == MTD_OPS_AUTO_OOB) {
+		if (ops->datbuf) {
+			offset = 0;
+			size = (curr_cw < (args->cwperpage - 1)) ? 516 :
+				(512 - ((args->cwperpage - 1) << 2));
+			last_read = (curr_cw < (args->cwperpage - 1)) ? 1 :
+				(ops->oobbuf ? 0 : 1);
+			rdata = (offset << 0) | (size << 16) |
+				(last_read << 31);
+
+			msm_nand_prep_ce(curr_ce,
+					MSM_NAND_READ_LOCATION_0(info),
+					WRITE,
+					rdata);
+			curr_ce++;
+		}
+		if (curr_cw == (args->cwperpage - 1) && ops->oobbuf) {
+			offset = 512 - ((args->cwperpage - 1) << 2);
+			size = (args->cwperpage) << 2;
+			if (size > args->oob_len_cmd)
+				size = args->oob_len_cmd;
+			args->oob_len_cmd -= size;
+			last_read = 1;
+			rdata = (offset << 0) | (size << 16) |
+				(last_read << 31);
+
+			if (!ops->datbuf)
+				msm_nand_prep_ce(curr_ce,
+						MSM_NAND_READ_LOCATION_0(info),
+						WRITE, rdata);
+			else
+				msm_nand_prep_ce(curr_ce,
+						MSM_NAND_READ_LOCATION_1(info),
+						WRITE, rdata);
+			curr_ce++;
+		}
+	}
+sub_exec_cmd:
+	*flags_ptr |= NWD;
+	msm_nand_prep_ce(curr_ce, MSM_NAND_EXEC_CMD(info), WRITE, data->exec);
+	curr_ce++;
+
+	*num_ce_ptr = curr_ce - start_ce;
+}
+
+/*
+ * Function to prepare and submit SPS data descriptors required for a page
+ * read/write operation.
+ */
+static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops,
+				struct msm_nand_rw_params *args,
+				struct msm_nand_info *info,
+				uint32_t curr_cw,
+				uint32_t ecc_parity_bytes)
+{
+	struct msm_nand_chip *chip = &info->nand_chip;
+	struct sps_pipe *data_pipe_handle;
+	uint32_t sectordatasize, sectoroobsize;
+	uint32_t sps_flags = 0;
+	int err = 0;
+
+	if (args->read)
+		data_pipe_handle = info->sps.data_prod.handle;
+	else
+		data_pipe_handle = info->sps.data_cons.handle;
+
+	if (ops->mode == MTD_OPS_RAW) {
+		if (ecc_parity_bytes && args->read) {
+			if (curr_cw == (args->cwperpage - 1))
+				sps_flags |= SPS_IOVEC_FLAG_INT;
+
+			/* read only ecc bytes */
+			err = sps_transfer_one(data_pipe_handle,
+					args->ecc_dma_addr_curr,
+					ecc_parity_bytes, NULL,
+					sps_flags);
+			if (err)
+				goto out;
+			args->ecc_dma_addr_curr += ecc_parity_bytes;
+		} else {
+			sectordatasize = chip->cw_size;
+			if (!args->read)
+				sps_flags = SPS_IOVEC_FLAG_EOT;
+			if (curr_cw == (args->cwperpage - 1))
+				sps_flags |= SPS_IOVEC_FLAG_INT;
+
+			err = sps_transfer_one(data_pipe_handle,
+					args->data_dma_addr_curr,
+					sectordatasize, NULL,
+					sps_flags);
+			if (err)
+				goto out;
+			args->data_dma_addr_curr += sectordatasize;
+		}
+	} else if (ops->mode == MTD_OPS_AUTO_OOB) {
+		if (ops->datbuf) {
+			sectordatasize = (curr_cw < (args->cwperpage - 1))
+			? 516 : (512 - ((args->cwperpage - 1) << 2));
+
+			if (!args->read) {
+				sps_flags = SPS_IOVEC_FLAG_EOT;
+				if (curr_cw == (args->cwperpage - 1) &&
+						ops->oobbuf)
+					sps_flags = 0;
+			}
+			if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf)
+				sps_flags |= SPS_IOVEC_FLAG_INT;
+
+			err = sps_transfer_one(data_pipe_handle,
+					args->data_dma_addr_curr,
+					sectordatasize, NULL,
+					sps_flags);
+			if (err)
+				goto out;
+			args->data_dma_addr_curr += sectordatasize;
+		}
+
+		if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) {
+			sectoroobsize = args->cwperpage << 2;
+			if (sectoroobsize > args->oob_len_data)
+				sectoroobsize = args->oob_len_data;
+
+			if (!args->read)
+				sps_flags |= SPS_IOVEC_FLAG_EOT;
+			sps_flags |= SPS_IOVEC_FLAG_INT;
+			err = sps_transfer_one(data_pipe_handle,
+					args->oob_dma_addr_curr,
+					sectoroobsize, NULL,
+					sps_flags);
+			if (err)
+				goto out;
+			args->oob_dma_addr_curr += sectoroobsize;
+			args->oob_len_data -= sectoroobsize;
+		}
+	}
+out:
+	return err;
+}
+
+/*
+ * Read ECC bytes and check whether page is erased or not.
+ *
+ * The NAND devices manufactured with newer process node technology are
+ * susceptible to bit-flips. These bit-flips are easily fixable with the
+ * ECC engine and ECC information stored on the NAND device. This device
+ * specific information is found in the data sheet for the NAND device
+ * and is usually specified as a "number of bit-flips expected per code-
+ * word". For example, "a single bit-flip per codeword". Also this means
+ * that the number of ECC errors don't increase over period of time as in
+ * the past and can't be used to predict a "bad-block about to happen"
+ * situation anymore.
+ *
+ * So what this means to erased pages:
+ * Since ECC data for an erased page is all 0xFF's, the ECC engine would
+ * not be able to correct any bit-flips that occur in these newer parts.
+ * If the NAND controller is unable to identify the erased page due to
+ * the bit-flips, then there would be "uncorrectable ECC errors" detected
+ * and would get reported to file system layer (YAFFS2/UBIFS etc) and would
+ * result in a good block being marked as a bad block and also lead to
+ * error scenarios.
+
+ * So to handle this, the following will be done by software until newer
+ * NAND controller hardware is avialable that can detected erased pages
+ * with bit-flips successfully.
+ *
+ * 1. msm_nand_read_oob() calls this function when "uncorrectable ECC
+ *	errors" occur.
+ * 2. This function then performs a raw read of the page.
+ * 3. This read is done to extract ECC bytes and not data from that page.
+ * 4. For each codeword’s ECC data, the following is done
+ *	a. Count number of zero bits
+ *	b. If that count is greater than <BIT-FLIPS-EXPECTED>, then it is
+ *		not an erased page.
+ *	c. Else repeat for next codeword’s ECC data
+ *	d. If all codewords have less than <BIT-FLIPS-EXPECTED> bits of
+ *		zeros, then it’s considered an erased page.
+ *
+ * Since "uncorrectable ECC errors" do not occur except for either an
+ * erased page or in the case of an actual errror, this solution would
+ * work.
+ *
+ */
+static int msm_nand_is_erased_page(struct mtd_info *mtd, loff_t from,
+			     struct mtd_oob_ops *ops,
+			     struct msm_nand_rw_params *rw_params,
+			     bool *erased_page)
+{
+	struct msm_nand_info *info = mtd->priv;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	uint32_t cwperpage = (mtd->writesize >> 9);
+	int err, submitted_num_desc = 0;
+	uint32_t n = 0, num_zero_bits = 0, total_ecc_byte_cnt;
+	struct msm_nand_rw_reg_data data;
+	struct sps_iovec *iovec;
+	struct sps_iovec iovec_temp;
+	struct mtd_oob_ops raw_ops;
+
+	/*
+	 * The following 6 commands will be sent only once for the first
+	 * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
+	 * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will
+	 * be sent for every CW - flash, read_location_0, read_location_1,
+	 * exec, flash_status and buffer_status.
+	 */
+	struct msm_nand_rw_cmd_desc *cmd_list = NULL;
+	uint32_t cw_desc_cnt = 0;
+	struct {
+		struct sps_transfer xfer;
+		struct sps_iovec cmd_iovec[MAX_DESC];
+		struct {
+			uint32_t count;
+			struct msm_nand_cmd_setup_desc setup_desc;
+			struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC - 1];
+		} cmd_list;
+		struct {
+			uint32_t flash_status;
+			uint32_t buffer_status;
+			uint32_t erased_cw_status;
+		} result[MAX_CW_PER_PAGE];
+	} *dma_buffer;
+	uint8_t *ecc;
+
+	pr_debug("========================================================\n");
+	total_ecc_byte_cnt = (chip->ecc_parity_bytes * cwperpage);
+	memcpy(&raw_ops, ops, sizeof(struct mtd_oob_ops));
+	raw_ops.mode = MTD_OPS_RAW;
+	ecc = kzalloc(total_ecc_byte_cnt, GFP_KERNEL);
+
+	wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+					chip, sizeof(*dma_buffer))));
+
+	memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+	msm_nand_update_rw_reg_data(chip, &raw_ops, rw_params, &data);
+	cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list;
+
+	/* map the ecc for dma operations */
+	rw_params->ecc_dma_addr_curr = rw_params->ecc_dma_addr =
+		dma_map_single(chip->dev, ecc, total_ecc_byte_cnt,
+				DMA_FROM_DEVICE);
+
+	data.addr0 = (rw_params->page << 16) | rw_params->oob_col;
+	data.addr1 = (rw_params->page >> 16) & 0xff;
+	for (n = rw_params->start_sector; n < cwperpage; n++) {
+		struct sps_command_element *curr_ce, *start_ce;
+
+		dma_buffer->result[n].flash_status = 0xeeeeeeee;
+		dma_buffer->result[n].buffer_status = 0xeeeeeeee;
+		dma_buffer->result[n].erased_cw_status = 0xeeeeee00;
+
+		msm_nand_prep_rw_cmd_desc(&raw_ops, rw_params, &data, info,
+				n, cmd_list, &cw_desc_cnt,
+				chip->ecc_parity_bytes);
+
+		start_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0];
+		curr_ce = start_ce;
+		cmd_list->cw_desc[cw_desc_cnt].flags = CMD;
+		if (n == (cwperpage - 1))
+			cmd_list->cw_desc[cw_desc_cnt].flags |=
+				INT_UNLCK;
+		cmd_list->count++;
+
+		msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
+				READ, msm_virt_to_dma(chip,
+				&dma_buffer->result[n].flash_status));
+		curr_ce++;
+
+		msm_nand_prep_ce(curr_ce, MSM_NAND_BUFFER_STATUS(info),
+				READ, msm_virt_to_dma(chip,
+				&dma_buffer->result[n].buffer_status));
+		curr_ce++;
+
+		msm_nand_prep_ce(curr_ce,
+				MSM_NAND_ERASED_CW_DETECT_STATUS(info),
+				READ, msm_virt_to_dma(chip,
+				&dma_buffer->result[n].erased_cw_status));
+		curr_ce++;
+		cmd_list->cw_desc[cw_desc_cnt++].num_ce = curr_ce -
+			start_ce;
+	}
+
+	dma_buffer->xfer.iovec_count = cmd_list->count;
+	dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+	dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+			&dma_buffer->cmd_iovec);
+	iovec = dma_buffer->xfer.iovec;
+
+	iovec->addr =  msm_virt_to_dma(chip,
+			&cmd_list->setup_desc.ce[0]);
+	iovec->size = sizeof(struct sps_command_element) *
+		cmd_list->setup_desc.num_ce;
+	iovec->flags = cmd_list->setup_desc.flags;
+	iovec++;
+	for (n = 0; n < (cmd_list->count - 1); n++) {
+		iovec->addr =  msm_virt_to_dma(chip,
+				&cmd_list->cw_desc[n].ce[0]);
+		iovec->size = sizeof(struct sps_command_element) *
+			cmd_list->cw_desc[n].num_ce;
+		iovec->flags = cmd_list->cw_desc[n].flags;
+		iovec++;
+	}
+	mutex_lock(&info->lock);
+	err = msm_nand_get_device(chip->dev);
+	if (err)
+		goto unlock_mutex;
+	/* Submit data descriptors */
+	for (n = rw_params->start_sector; n < cwperpage; n++) {
+		err = msm_nand_submit_rw_data_desc(&raw_ops,
+				rw_params, info, n,
+				chip->ecc_parity_bytes);
+		if (err) {
+			pr_err("Failed to submit data descs %d\n", err);
+			panic("error in nand driver\n");
+			goto put_dev;
+		}
+	}
+	submitted_num_desc = cwperpage - rw_params->start_sector;
+
+	/* Submit command descriptors */
+	err =  sps_transfer(info->sps.cmd_pipe.handle,
+			&dma_buffer->xfer);
+	if (err) {
+		pr_err("Failed to submit commands %d\n", err);
+		goto put_dev;
+	}
+
+	err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+			info->sps.cmd_pipe.index,
+			dma_buffer->xfer.iovec_count,
+			&iovec_temp);
+	if (err) {
+		pr_err("Failed to get iovec for pipe %d: (err:%d)\n",
+				(info->sps.cmd_pipe.index), err);
+		goto put_dev;
+	}
+	err = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
+			info->sps.data_prod.index, submitted_num_desc,
+			&iovec_temp);
+	if (err) {
+		pr_err("Failed to get iovec for pipe %d: (err:%d)\n",
+				(info->sps.data_prod.index), err);
+		goto put_dev;
+	}
+
+	err = msm_nand_put_device(chip->dev);
+	mutex_unlock(&info->lock);
+	if (err)
+		goto free_dma;
+
+	pr_debug("addr0: 0x%08x, addr1: 0x%08x\n", data.addr0, data.addr1);
+	for (n = rw_params->start_sector; n < cwperpage; n++)
+		pr_debug("cw %d: flash_sts %x buffr_sts %x, erased_cw_status: %x\n",
+				n, dma_buffer->result[n].flash_status,
+				dma_buffer->result[n].buffer_status,
+				dma_buffer->result[n].erased_cw_status);
+
+	goto free_dma;
+put_dev:
+	msm_nand_put_device(chip->dev);
+unlock_mutex:
+	mutex_unlock(&info->lock);
+free_dma:
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	/* umap ecc dma memory */
+	dma_unmap_single(chip->dev, rw_params->ecc_dma_addr,
+			total_ecc_byte_cnt, DMA_FROM_DEVICE);
+	/* check for bit flips in ecc data */
+	for (n = rw_params->start_sector; n < cwperpage; n++) {
+		uint8_t *ecc_temp = ecc;
+		int last_pos = 0, next_pos = 0;
+		int ecc_bytes_percw_in_bits = (chip->ecc_parity_bytes * 8);
+
+		do {
+			last_pos = find_next_zero_bit(ecc_temp,
+					ecc_bytes_percw_in_bits, next_pos);
+
+			if (last_pos < ecc_bytes_percw_in_bits)
+				num_zero_bits++;
+
+			if (num_zero_bits > 4) {
+				*erased_page = false;
+				goto free_mem;
+			}
+
+			next_pos = last_pos + 1;
+		} while (last_pos < ecc_bytes_percw_in_bits);
+
+		num_zero_bits = last_pos = next_pos = 0;
+		ecc_temp += chip->ecc_parity_bytes;
+	}
+
+	if ((n == cwperpage) && (num_zero_bits <= 4))
+		*erased_page = true;
+free_mem:
+	kfree(ecc);
+	pr_debug("========================================================\n");
+	return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to read a
+ * page with main or/and spare data.
+ */
+static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
+			     struct mtd_oob_ops *ops)
+{
+	struct msm_nand_info *info = mtd->priv;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	struct flash_identification *flash_dev = &info->flash_dev;
+	uint32_t cwperpage = (mtd->writesize >> 9);
+	int err, pageerr = 0, rawerr = 0, submitted_num_desc = 0;
+	uint32_t n = 0, pages_read = 0;
+	uint32_t ecc_errors = 0, total_ecc_errors = 0, ecc_capability;
+	struct msm_nand_rw_params rw_params;
+	struct msm_nand_rw_reg_data data;
+	struct sps_iovec *iovec;
+	struct sps_iovec iovec_temp;
+	bool erased_page;
+	uint64_t fix_data_in_pages = 0;
+
+	/*
+	 * The following 6 commands will be sent only once for the first
+	 * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
+	 * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will
+	 * be sent for every CW - flash, read_location_0, read_location_1,
+	 * exec, flash_status and buffer_status.
+	 */
+	struct {
+		struct sps_transfer xfer;
+		struct sps_iovec cmd_iovec[MAX_DESC];
+		struct {
+			uint32_t count;
+			struct msm_nand_cmd_setup_desc setup_desc;
+			struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC - 1];
+		} cmd_list;
+		struct {
+			uint32_t flash_status;
+			uint32_t buffer_status;
+			uint32_t erased_cw_status;
+		} result[MAX_CW_PER_PAGE];
+	} *dma_buffer;
+	struct msm_nand_rw_cmd_desc *cmd_list = NULL;
+
+	memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
+	err = msm_nand_validate_mtd_params(mtd, true, from, ops, &rw_params);
+	if (err)
+		goto validate_mtd_params_failed;
+
+	wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+			    chip, sizeof(*dma_buffer))));
+
+	rw_params.oob_col = rw_params.start_sector * chip->cw_size;
+	if (chip->cfg1 & (1 << WIDE_FLASH))
+		rw_params.oob_col >>= 1;
+
+	memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+	msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
+	cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list;
+
+	ecc_capability = flash_dev->ecc_capability;
+
+	while (rw_params.page_count-- > 0) {
+		uint32_t cw_desc_cnt = 0;
+
+		erased_page = false;
+		data.addr0 = (rw_params.page << 16) | rw_params.oob_col;
+		data.addr1 = (rw_params.page >> 16) & 0xff;
+
+		for (n = rw_params.start_sector; n < cwperpage; n++) {
+			struct sps_command_element *curr_ce, *start_ce;
+
+			dma_buffer->result[n].flash_status = 0xeeeeeeee;
+			dma_buffer->result[n].buffer_status = 0xeeeeeeee;
+			dma_buffer->result[n].erased_cw_status = 0xeeeeee00;
+
+			msm_nand_prep_rw_cmd_desc(ops, &rw_params, &data, info,
+					n, cmd_list, &cw_desc_cnt, 0);
+
+			start_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0];
+			curr_ce = start_ce;
+			cmd_list->cw_desc[cw_desc_cnt].flags = CMD;
+			if (n == (cwperpage - 1))
+				cmd_list->cw_desc[cw_desc_cnt].flags |=
+								INT_UNLCK;
+			cmd_list->count++;
+
+			msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
+				READ, msm_virt_to_dma(chip,
+					&dma_buffer->result[n].flash_status));
+			curr_ce++;
+
+			msm_nand_prep_ce(curr_ce, MSM_NAND_BUFFER_STATUS(info),
+				READ, msm_virt_to_dma(chip,
+					&dma_buffer->result[n].buffer_status));
+			curr_ce++;
+
+			msm_nand_prep_ce(curr_ce,
+				MSM_NAND_ERASED_CW_DETECT_STATUS(info),
+				READ, msm_virt_to_dma(chip,
+				&dma_buffer->result[n].erased_cw_status));
+			curr_ce++;
+			cmd_list->cw_desc[cw_desc_cnt++].num_ce = curr_ce -
+				start_ce;
+		}
+
+		dma_buffer->xfer.iovec_count = cmd_list->count;
+		dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+		dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+						&dma_buffer->cmd_iovec);
+		iovec = dma_buffer->xfer.iovec;
+
+		iovec->addr =  msm_virt_to_dma(chip,
+				&cmd_list->setup_desc.ce[0]);
+		iovec->size = sizeof(struct sps_command_element) *
+			cmd_list->setup_desc.num_ce;
+		iovec->flags = cmd_list->setup_desc.flags;
+		iovec++;
+		for (n = 0; n < (cmd_list->count - 1); n++) {
+			iovec->addr =  msm_virt_to_dma(chip,
+						&cmd_list->cw_desc[n].ce[0]);
+			iovec->size = sizeof(struct sps_command_element) *
+						cmd_list->cw_desc[n].num_ce;
+			iovec->flags = cmd_list->cw_desc[n].flags;
+			iovec++;
+		}
+		mutex_lock(&info->lock);
+		err = msm_nand_get_device(chip->dev);
+		if (err)
+			goto unlock_mutex;
+		/* Submit data descriptors */
+		for (n = rw_params.start_sector; n < cwperpage; n++) {
+			err = msm_nand_submit_rw_data_desc(ops,
+						&rw_params, info, n, 0);
+			if (err) {
+				pr_err("Failed to submit data descs %d\n", err);
+				panic("error in nand driver\n");
+				goto put_dev;
+			}
+		}
+
+		if (ops->mode == MTD_OPS_RAW) {
+			submitted_num_desc = cwperpage - rw_params.start_sector;
+		} else if (ops->mode == MTD_OPS_AUTO_OOB) {
+			if (ops->datbuf)
+				submitted_num_desc = cwperpage -
+							rw_params.start_sector;
+			if (ops->oobbuf)
+				submitted_num_desc++;
+		}
+
+		/* Submit command descriptors */
+		err =  sps_transfer(info->sps.cmd_pipe.handle,
+				&dma_buffer->xfer);
+		if (err) {
+			pr_err("Failed to submit commands %d\n", err);
+			goto put_dev;
+		}
+
+		err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+				info->sps.cmd_pipe.index,
+				dma_buffer->xfer.iovec_count,
+				&iovec_temp);
+		if (err) {
+			pr_err("Failed to get iovec for pipe %d: (err: %d)\n",
+					(info->sps.cmd_pipe.index), err);
+			goto put_dev;
+		}
+		err = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
+				info->sps.data_prod.index, submitted_num_desc,
+				&iovec_temp);
+		if (err) {
+			pr_err("Failed to get iovec for pipe %d: (err: %d)\n",
+					(info->sps.data_prod.index), err);
+			goto put_dev;
+		}
+
+		err = msm_nand_put_device(chip->dev);
+		mutex_unlock(&info->lock);
+		if (err)
+			goto free_dma;
+		/* Check for flash status errors */
+		pageerr = rawerr = 0;
+		for (n = rw_params.start_sector; n < cwperpage; n++) {
+			if (dma_buffer->result[n].flash_status & (FS_OP_ERR |
+					FS_MPU_ERR)) {
+				rawerr = -EIO;
+				/*
+				 * Check if ECC error was due to an erased
+				 * codeword. If so, ignore the error.
+				 *
+				 * NOTE: There is a bug in erased page
+				 * detection hardware block when reading
+				 * only spare data. In order to work around
+				 * this issue, instead of using PAGE_ALL_ERASED
+				 * bit to check for whether a whole page is
+				 * erased or not, we use CODEWORD_ALL_ERASED
+				 * and  CODEWORD_ERASED bits together and check
+				 * each codeword that has FP_OP_ERR bit set is
+				 * an erased codeword or not.
+				 */
+				if ((dma_buffer->result[n].erased_cw_status &
+					ERASED_CW) == ERASED_CW) {
+					/*
+					 * At least one code word is detected
+					 * as an erased code word.
+					 */
+					pr_debug("erased codeword detected - ignore ecc error\n");
+					continue;
+				}
+				pageerr = rawerr;
+				break;
+			}
+		}
+		/* check for uncorrectable errors */
+		if (pageerr) {
+			for (n = rw_params.start_sector; n < cwperpage; n++) {
+				if (dma_buffer->result[n].buffer_status &
+					BS_UNCORRECTABLE_BIT) {
+					/*
+					 * Check if page is actually
+					 * erased or not.
+					 */
+					err = msm_nand_is_erased_page(mtd,
+							from, ops,
+							&rw_params,
+							&erased_page);
+					if (err)
+						goto free_dma;
+					if (!erased_page) {
+						mtd->ecc_stats.failed++;
+						pageerr = -EBADMSG;
+						break;
+					}
+					pageerr = 0;
+					pr_debug("Uncorrectable ECC errors dectected on an erased page and has been fixed.\n");
+					break;
+				}
+			}
+		}
+
+		if (rawerr && !pageerr && erased_page) {
+			/*
+			 * This means an erased page had bit flips and now
+			 * those bit-flips need to be cleared in the data
+			 * being sent to upper layers. This will keep track
+			 * of those pages and at the end, the data will be
+			 * fixed before this function returns.
+			 * Note that a whole page worth of data will be fixed
+			 * and this will only handle about 64 pages being read
+			 * at a time i.e. one erase block worth of pages.
+			 */
+			fix_data_in_pages |= BIT(rw_params.page_count);
+		}
+		/* check for correctable errors */
+		if (!rawerr) {
+			for (n = rw_params.start_sector; n < cwperpage; n++) {
+				ecc_errors =
+				    dma_buffer->result[n].buffer_status
+				    & BS_CORRECTABLE_ERR_MSK;
+				if (ecc_errors) {
+					total_ecc_errors += ecc_errors;
+					mtd->ecc_stats.corrected += ecc_errors;
+					/*
+					 * Since the nand device can have the
+					 * ecc errors even on the first ever
+					 * write. Any reporting of EUCLEAN
+					 * when there are less then the ecc
+					 * capability of the device is not
+					 * useful.
+					 *
+					 * Also don't report EUCLEAN unless
+					 * the enable_euclean is set.
+					 */
+					if (enable_euclean &&
+					    ecc_errors >= ecc_capability)
+						pageerr = -EUCLEAN;
+				}
+			}
+		}
+		if (pageerr && (pageerr != -EUCLEAN || err == 0))
+			err = pageerr;
+
+		if (rawerr && !pageerr) {
+			pr_debug("%llx %x %x empty page\n",
+			       (loff_t)rw_params.page * mtd->writesize,
+			       ops->len, ops->ooblen);
+		} else {
+			for (n = rw_params.start_sector; n < cwperpage; n++)
+				pr_debug("cw %d: flash_sts %x buffr_sts %x, erased_cw_status: %x, pageerr: %d, rawerr: %d\n",
+				n, dma_buffer->result[n].flash_status,
+				dma_buffer->result[n].buffer_status,
+				dma_buffer->result[n].erased_cw_status,
+				pageerr, rawerr);
+		}
+		if (err && err != -EUCLEAN && err != -EBADMSG)
+			goto free_dma;
+		pages_read++;
+		rw_params.page++;
+	}
+	goto free_dma;
+put_dev:
+	msm_nand_put_device(chip->dev);
+unlock_mutex:
+	mutex_unlock(&info->lock);
+free_dma:
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	if (ops->oobbuf)
+		dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
+				 ops->ooblen, DMA_FROM_DEVICE);
+	if (ops->datbuf)
+		dma_unmap_page(chip->dev, rw_params.data_dma_addr,
+				 ops->len, DMA_BIDIRECTIONAL);
+	/*
+	 * If there were any erased pages detected with ECC errors, then
+	 * it is most likely that the data is not all 0xff. So memset that
+	 * page to all 0xff.
+	 */
+	while (fix_data_in_pages) {
+		int temp_page = 0, oobsize = rw_params.cwperpage << 2;
+		int count = 0, offset = 0;
+
+		temp_page = fix_data_in_pages & BIT_MASK(0);
+		fix_data_in_pages = fix_data_in_pages >> 1;
+		count++;
+
+		if (!temp_page)
+			continue;
+
+		offset = (count - 1) * mtd->writesize;
+		if (ops->datbuf)
+			memset((ops->datbuf + offset), 0xff, mtd->writesize);
+
+		offset = (count - 1) * oobsize;
+		if (ops->oobbuf)
+			memset(ops->oobbuf + offset, 0xff, oobsize);
+	}
+validate_mtd_params_failed:
+	if (ops->mode != MTD_OPS_RAW)
+		ops->retlen = mtd->writesize * pages_read;
+	else
+		ops->retlen = (mtd->writesize +  mtd->oobsize) * pages_read;
+	ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
+	if (err)
+		pr_err("0x%llx datalen 0x%x ooblen %x err %d corrected %d\n",
+		       from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
+		       total_ecc_errors);
+	pr_debug("ret %d, retlen %d oobretlen %d\n",
+			err, ops->retlen, ops->oobretlen);
+
+	pr_debug("========================================================\n");
+	return err;
+}
+
+/**
+ * msm_nand_read_partial_page() - read partial page
+ * @mtd: pointer to mtd info
+ * @from: start address of the page
+ * @ops: pointer to mtd_oob_ops
+ *
+ * Reads a page into a bounce buffer and copies the required
+ * number of bytes to actual buffer. The pages that are aligned
+ * do not use bounce buffer.
+ */
+static int msm_nand_read_partial_page(struct mtd_info *mtd,
+		loff_t from, struct mtd_oob_ops *ops)
+{
+	int err = 0;
+	unsigned char *actual_buf;
+	unsigned char *bounce_buf;
+	loff_t aligned_from;
+	loff_t offset;
+	size_t len;
+	size_t actual_len, ret_len;
+	int is_euclean = 0;
+	int is_ebadmsg = 0;
+
+	actual_len = ops->len;
+	ret_len = 0;
+	actual_buf = ops->datbuf;
+
+	bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL);
+	if (!bounce_buf) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	/* Get start address of page to read from */
+	ops->len = mtd->writesize;
+	offset = from & (mtd->writesize - 1);
+	aligned_from = from - offset;
+
+	for (;;) {
+		bool no_copy = false;
+
+		len = mtd->writesize - offset;
+		if (len > actual_len)
+			len = actual_len;
+
+		if (offset == 0 && len == mtd->writesize)
+			no_copy = true;
+
+		if (!virt_addr_valid(actual_buf) &&
+				!is_buffer_in_page(actual_buf, ops->len))
+			no_copy = false;
+
+		ops->datbuf = no_copy ? actual_buf : bounce_buf;
+		err = msm_nand_read_oob(mtd, aligned_from, ops);
+		if (err == -EUCLEAN) {
+			is_euclean = 1;
+			err = 0;
+		}
+
+		if (err == -EBADMSG) {
+			is_ebadmsg = 1;
+			err = 0;
+		}
+
+		if (err < 0) {
+			/* Clear previously set EUCLEAN / EBADMSG */
+			is_euclean = 0;
+			is_ebadmsg = 0;
+			ret_len = ops->retlen;
+			break;
+		}
+
+		if (!no_copy)
+			memcpy(actual_buf, bounce_buf + offset, len);
+
+		actual_len -= len;
+		ret_len += len;
+
+		if (actual_len == 0)
+			break;
+
+		actual_buf += len;
+		offset = 0;
+		aligned_from += mtd->writesize;
+	}
+
+	ops->retlen = ret_len;
+	kfree(bounce_buf);
+out:
+	if (is_euclean == 1)
+		err = -EUCLEAN;
+
+	/* Snub EUCLEAN if we also have EBADMSG */
+	if (is_ebadmsg == 1)
+		err = -EBADMSG;
+	return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to read a
+ * page with only main data.
+ */
+static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+	      size_t *retlen, u_char *buf)
+{
+	int ret;
+	int is_euclean = 0;
+	int is_ebadmsg = 0;
+	struct mtd_oob_ops ops;
+	unsigned char *bounce_buf = NULL;
+
+	ops.mode = MTD_OPS_AUTO_OOB;
+	ops.retlen = 0;
+	ops.ooblen = 0;
+	ops.oobbuf = NULL;
+	*retlen = 0;
+
+	if (!(from & (mtd->writesize - 1)) && !(len % mtd->writesize)) {
+		/*
+		 * Handle reading of large size read buffer in vmalloc
+		 * address space that does not fit in an MMU page.
+		 */
+		if (!virt_addr_valid(buf) && !is_buffer_in_page(buf, len)) {
+			ops.len = mtd->writesize;
+
+			bounce_buf = kmalloc(ops.len, GFP_KERNEL);
+			if (!bounce_buf) {
+				ret = -ENOMEM;
+				goto out;
+			}
+
+			for (;;) {
+				bool no_copy = false;
+
+				if (!is_buffer_in_page(buf, ops.len)) {
+					memcpy(bounce_buf, buf, ops.len);
+					ops.datbuf = (uint8_t *) bounce_buf;
+				} else {
+					ops.datbuf = (uint8_t *) buf;
+					no_copy = true;
+				}
+				ret = msm_nand_read_oob(mtd, from, &ops);
+				if (ret == -EUCLEAN) {
+					is_euclean = 1;
+					ret = 0;
+				}
+				if (ret == -EBADMSG) {
+					is_ebadmsg = 1;
+					ret = 0;
+				}
+				if (ret < 0) {
+					/* Clear previously set errors */
+					is_euclean = 0;
+					is_ebadmsg = 0;
+					break;
+				}
+
+
+				if (!no_copy)
+					memcpy(buf, bounce_buf, ops.retlen);
+
+				len -= ops.retlen;
+				*retlen += ops.retlen;
+				if (len == 0)
+					break;
+				buf += ops.retlen;
+				from += ops.retlen;
+
+				if (len < mtd->writesize) {
+					ops.len = len;
+					ops.datbuf = buf;
+					ret = msm_nand_read_partial_page(
+						mtd, from, &ops);
+					*retlen += ops.retlen;
+					break;
+				}
+			}
+			kfree(bounce_buf);
+		} else {
+			ops.len = len;
+			ops.datbuf = (uint8_t *)buf;
+			ret =  msm_nand_read_oob(mtd, from, &ops);
+			*retlen = ops.retlen;
+		}
+	} else {
+		ops.len = len;
+		ops.datbuf = (uint8_t *)buf;
+		ret = msm_nand_read_partial_page(mtd, from, &ops);
+		*retlen = ops.retlen;
+	}
+out:
+	if (is_euclean == 1)
+		ret = -EUCLEAN;
+
+	/* Snub EUCLEAN if we also have EBADMSG */
+	if (is_ebadmsg == 1)
+		ret = -EBADMSG;
+
+	return ret;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to write a
+ * page with both main and spare data.
+ */
+static int msm_nand_write_oob(struct mtd_info *mtd, loff_t to,
+				struct mtd_oob_ops *ops)
+{
+	struct msm_nand_info *info = mtd->priv;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	uint32_t cwperpage = (mtd->writesize >> 9);
+	uint32_t n, flash_sts, pages_written = 0;
+	int err = 0, submitted_num_desc = 0;
+	struct msm_nand_rw_params rw_params;
+	struct msm_nand_rw_reg_data data;
+	struct sps_iovec *iovec;
+	struct sps_iovec iovec_temp;
+	/*
+	 * The following 7 commands will be sent only once :
+	 * For first codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
+	 * dev0_ecc_cfg, ebi2_ecc_buf_cfg.
+	 * For last codeword (CW) - read_status(write)
+	 *
+	 * The following 4 commands will be sent for every CW :
+	 * flash, exec, flash_status (read), flash_status (write).
+	 */
+	struct {
+		struct sps_transfer xfer;
+		struct sps_iovec cmd_iovec[MAX_DESC + 1];
+		struct {
+			uint32_t count;
+			struct msm_nand_cmd_setup_desc setup_desc;
+			struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC];
+		} cmd_list;
+		struct {
+			uint32_t flash_status;
+		} data[MAX_CW_PER_PAGE];
+	} *dma_buffer;
+	struct msm_nand_rw_cmd_desc *cmd_list = NULL;
+
+	memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
+	err = msm_nand_validate_mtd_params(mtd, false, to, ops, &rw_params);
+	if (err)
+		goto validate_mtd_params_failed;
+
+	wait_event(chip->dma_wait_queue, (dma_buffer =
+			msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
+
+	memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+	msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
+	cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list;
+
+	while (rw_params.page_count-- > 0) {
+		uint32_t cw_desc_cnt = 0;
+		struct sps_command_element *curr_ce, *start_ce;
+
+		data.addr0 = (rw_params.page << 16);
+		data.addr1 = (rw_params.page >> 16) & 0xff;
+
+		for (n = 0; n < cwperpage ; n++) {
+			dma_buffer->data[n].flash_status = 0xeeeeeeee;
+
+			msm_nand_prep_rw_cmd_desc(ops, &rw_params, &data, info,
+					n, cmd_list, &cw_desc_cnt, 0);
+
+			curr_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0];
+			cmd_list->cw_desc[cw_desc_cnt].flags = CMD;
+			cmd_list->count++;
+
+			msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
+				READ, msm_virt_to_dma(chip,
+					&dma_buffer->data[n].flash_status));
+			cmd_list->cw_desc[cw_desc_cnt++].num_ce = 1;
+		}
+
+		start_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0];
+		curr_ce = start_ce;
+		cmd_list->cw_desc[cw_desc_cnt].flags = CMD_INT_UNLCK;
+		cmd_list->count++;
+		msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
+				WRITE, data.clrfstatus);
+		curr_ce++;
+
+		msm_nand_prep_ce(curr_ce, MSM_NAND_READ_STATUS(info),
+				WRITE, data.clrrstatus);
+		curr_ce++;
+		cmd_list->cw_desc[cw_desc_cnt++].num_ce = curr_ce - start_ce;
+
+		dma_buffer->xfer.iovec_count = cmd_list->count;
+		dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+		dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+						&dma_buffer->cmd_iovec);
+		iovec = dma_buffer->xfer.iovec;
+
+		iovec->addr =  msm_virt_to_dma(chip,
+				&cmd_list->setup_desc.ce[0]);
+		iovec->size = sizeof(struct sps_command_element) *
+					cmd_list->setup_desc.num_ce;
+		iovec->flags = cmd_list->setup_desc.flags;
+		iovec++;
+		for (n = 0; n < (cmd_list->count - 1); n++) {
+			iovec->addr =  msm_virt_to_dma(chip,
+					&cmd_list->cw_desc[n].ce[0]);
+			iovec->size = sizeof(struct sps_command_element) *
+					cmd_list->cw_desc[n].num_ce;
+			iovec->flags = cmd_list->cw_desc[n].flags;
+			iovec++;
+		}
+		mutex_lock(&info->lock);
+		err = msm_nand_get_device(chip->dev);
+		if (err)
+			goto unlock_mutex;
+		/* Submit data descriptors */
+		for (n = 0; n < cwperpage; n++) {
+			err = msm_nand_submit_rw_data_desc(ops,
+						&rw_params, info, n, 0);
+			if (err) {
+				pr_err("Failed to submit data descs %d\n", err);
+				panic("Error in nand driver\n");
+				goto put_dev;
+			}
+		}
+
+		if (ops->mode == MTD_OPS_RAW) {
+			submitted_num_desc = n;
+		} else if (ops->mode == MTD_OPS_AUTO_OOB) {
+			if (ops->datbuf)
+				submitted_num_desc = n;
+			if (ops->oobbuf)
+				submitted_num_desc++;
+		}
+
+		/* Submit command descriptors */
+		err =  sps_transfer(info->sps.cmd_pipe.handle,
+				&dma_buffer->xfer);
+		if (err) {
+			pr_err("Failed to submit commands %d\n", err);
+			goto put_dev;
+		}
+
+		err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+				info->sps.cmd_pipe.index,
+				dma_buffer->xfer.iovec_count,
+				&iovec_temp);
+		if (err) {
+			pr_err("Failed to get iovec for pipe %d (err:%d)\n",
+					(info->sps.cmd_pipe.index), err);
+			goto put_dev;
+		}
+		err = msm_nand_sps_get_iovec(info->sps.data_cons.handle,
+				info->sps.data_cons.index, submitted_num_desc,
+				&iovec_temp);
+		if (err) {
+			pr_err("Failed to get iovec for pipe %d (err:%d)\n",
+					(info->sps.data_cons.index), err);
+			goto put_dev;
+		}
+
+		err = msm_nand_put_device(chip->dev);
+		mutex_unlock(&info->lock);
+		if (err)
+			goto free_dma;
+
+		for (n = 0; n < cwperpage; n++)
+			pr_debug("write pg %d: flash_status[%d] = %x\n",
+				rw_params.page, n,
+				dma_buffer->data[n].flash_status);
+
+		/*  Check for flash status errors */
+		for (n = 0; n < cwperpage; n++) {
+			flash_sts = dma_buffer->data[n].flash_status;
+			if (flash_sts & (FS_OP_ERR | FS_MPU_ERR)) {
+				pr_err("MPU/OP err (0x%x) set\n", flash_sts);
+				err = -EIO;
+				goto free_dma;
+			}
+			if (n == (cwperpage - 1)) {
+				if (!(flash_sts & FS_DEVICE_WP) ||
+					(flash_sts & FS_DEVICE_STS_ERR)) {
+					pr_err("Dev sts err 0x%x\n", flash_sts);
+					err = -EIO;
+					goto free_dma;
+				}
+			}
+		}
+		pages_written++;
+		rw_params.page++;
+	}
+	goto free_dma;
+put_dev:
+	msm_nand_put_device(chip->dev);
+unlock_mutex:
+	mutex_unlock(&info->lock);
+free_dma:
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	if (ops->oobbuf)
+		dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
+				 ops->ooblen, DMA_TO_DEVICE);
+	if (ops->datbuf)
+		dma_unmap_page(chip->dev, rw_params.data_dma_addr,
+				ops->len, DMA_TO_DEVICE);
+validate_mtd_params_failed:
+	if (ops->mode != MTD_OPS_RAW)
+		ops->retlen = mtd->writesize * pages_written;
+	else
+		ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
+
+	ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
+	if (err)
+		pr_err("to %llx datalen %x ooblen %x failed with err %d\n",
+		       to, ops->len, ops->ooblen, err);
+	pr_debug("ret %d, retlen %d oobretlen %d\n",
+			err, ops->retlen, ops->oobretlen);
+
+	pr_debug("================================================\n");
+	return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to write a
+ * page with only main data.
+ */
+static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+			  size_t *retlen, const u_char *buf)
+{
+	int ret;
+	struct mtd_oob_ops ops;
+	unsigned char *bounce_buf = NULL;
+
+	ops.mode = MTD_OPS_AUTO_OOB;
+	ops.retlen = 0;
+	ops.ooblen = 0;
+	ops.oobbuf = NULL;
+
+	/* partial page writes are not supported */
+	if ((to & (mtd->writesize - 1)) || (len % mtd->writesize)) {
+		ret = -EINVAL;
+		*retlen = ops.retlen;
+		pr_err("%s: partial page writes are not supported\n", __func__);
+		goto out;
+	}
+
+	/*
+	 * Handle writing of large size write buffer in vmalloc
+	 * address space that does not fit in an MMU page.
+	 */
+	if (!virt_addr_valid(buf) && !is_buffer_in_page(buf, len)) {
+		ops.len = mtd->writesize;
+
+		bounce_buf = kmalloc(ops.len, GFP_KERNEL);
+		if (!bounce_buf) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		for (;;) {
+			if (!is_buffer_in_page(buf, ops.len)) {
+				memcpy(bounce_buf, buf, ops.len);
+				ops.datbuf = (uint8_t *) bounce_buf;
+			} else {
+				ops.datbuf = (uint8_t *) buf;
+			}
+			ret = msm_nand_write_oob(mtd, to, &ops);
+			if (ret < 0)
+				break;
+
+			len -= mtd->writesize;
+			*retlen += mtd->writesize;
+			if (len == 0)
+				break;
+
+			buf += mtd->writesize;
+			to += mtd->writesize;
+		}
+		kfree(bounce_buf);
+	} else {
+		ops.len = len;
+		ops.datbuf = (uint8_t *)buf;
+		ret =  msm_nand_write_oob(mtd, to, &ops);
+		*retlen = ops.retlen;
+	}
+out:
+	return ret;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for Erase operation.
+ */
+struct msm_nand_erase_reg_data {
+	struct msm_nand_common_cfgs cfg;
+	uint32_t exec;
+	uint32_t flash_status;
+	uint32_t clrfstatus;
+	uint32_t clrrstatus;
+};
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to erase a
+ * block within NAND device.
+ */
+#define ERASE_CMDS 9
+static int msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	int i = 0, err = 0;
+	struct msm_nand_info *info = mtd->priv;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	uint32_t page = 0;
+	struct msm_nand_sps_cmd *cmd, *curr_cmd;
+	struct msm_nand_erase_reg_data data;
+	struct sps_iovec *iovec;
+	struct sps_iovec iovec_temp;
+	/*
+	 * The following 9 commands are required to erase a page -
+	 * flash, addr0, addr1, cfg0, cfg1, exec, flash_status(read),
+	 * flash_status(write), read_status.
+	 */
+	struct {
+		struct sps_transfer xfer;
+		struct sps_iovec cmd_iovec[ERASE_CMDS];
+		struct msm_nand_sps_cmd cmd[ERASE_CMDS];
+		uint32_t flash_status;
+	} *dma_buffer;
+
+	if (mtd->writesize == PAGE_SIZE_2K)
+		page = instr->addr >> 11;
+
+	if (mtd->writesize == PAGE_SIZE_4K)
+		page = instr->addr >> 12;
+
+	if (instr->addr & (mtd->erasesize - 1)) {
+		pr_err("unsupported erase address, 0x%llx\n", instr->addr);
+		err = -EINVAL;
+		goto out;
+	}
+	if (instr->len != mtd->erasesize) {
+		pr_err("unsupported erase len, %lld\n", instr->len);
+		err = -EINVAL;
+		goto out;
+	}
+
+	wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+			    chip, sizeof(*dma_buffer))));
+	cmd = dma_buffer->cmd;
+
+	memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
+	data.cfg.cmd = MSM_NAND_CMD_BLOCK_ERASE;
+	data.cfg.addr0 = page;
+	data.cfg.addr1 = 0;
+	data.cfg.cfg0 = chip->cfg0 & (~(7 << CW_PER_PAGE));
+	data.cfg.cfg1 = chip->cfg1;
+	data.exec = 1;
+	dma_buffer->flash_status = 0xeeeeeeee;
+	data.clrfstatus = MSM_NAND_RESET_FLASH_STS;
+	data.clrrstatus = MSM_NAND_RESET_READ_STS;
+
+	curr_cmd = cmd;
+	msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+	cmd = curr_cmd;
+	msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+			data.exec, SPS_IOVEC_FLAG_NWD);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+		msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), WRITE,
+			data.clrfstatus, 0);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_READ_STATUS(info), WRITE,
+			data.clrrstatus,
+			SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+	cmd++;
+
+	WARN_ON((cmd - dma_buffer->cmd) > ERASE_CMDS);
+	dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+	dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+	dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+					&dma_buffer->cmd_iovec);
+	iovec = dma_buffer->xfer.iovec;
+
+	for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+		iovec->addr =  msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+		iovec->size = sizeof(struct sps_command_element);
+		iovec->flags = dma_buffer->cmd[i].flags;
+		iovec++;
+	}
+	mutex_lock(&info->lock);
+	err = msm_nand_get_device(chip->dev);
+	if (err)
+		goto unlock_mutex;
+
+	err =  sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+	if (err) {
+		pr_err("Failed to submit commands %d\n", err);
+		goto put_dev;
+	}
+	err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+			info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count,
+			&iovec_temp);
+	if (err) {
+		pr_err("Failed to get iovec for pipe %d (err: %d)\n",
+				(info->sps.cmd_pipe.index), err);
+		goto put_dev;
+	}
+	err = msm_nand_put_device(chip->dev);
+	if (err)
+		goto unlock_mutex;
+
+	/*  Check for flash status errors */
+	if (dma_buffer->flash_status & (FS_OP_ERR |
+			FS_MPU_ERR | FS_DEVICE_STS_ERR)) {
+		pr_err("MPU/OP/DEV err (0x%x) set\n", dma_buffer->flash_status);
+		err = -EIO;
+	}
+	if (!(dma_buffer->flash_status & FS_DEVICE_WP)) {
+		pr_err("Device is write protected\n");
+		err = -EIO;
+	}
+	if (err) {
+		pr_err("Erase failed, 0x%llx\n", instr->addr);
+		instr->fail_addr = instr->addr;
+		instr->state = MTD_ERASE_FAILED;
+	} else {
+		instr->state = MTD_ERASE_DONE;
+		instr->fail_addr = 0xffffffff;
+		mtd_erase_callback(instr);
+	}
+	goto unlock_mutex;
+put_dev:
+	msm_nand_put_device(chip->dev);
+unlock_mutex:
+	mutex_unlock(&info->lock);
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+out:
+	return err;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for checking if a block is bad.
+ */
+struct msm_nand_blk_isbad_data {
+	struct msm_nand_common_cfgs cfg;
+	uint32_t ecc_bch_cfg;
+	uint32_t exec;
+	uint32_t read_offset;
+};
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to check if
+ * a block is bad. This is done by reading the first page within a block and
+ * checking whether the bad block byte location contains 0xFF or not. If it
+ * doesn't contain 0xFF, then it is considered as bad block.
+ */
+#define ISBAD_CMDS 9
+static int msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct msm_nand_info *info = mtd->priv;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	int i = 0, ret = 0, bad_block = 0, submitted_num_desc = 1;
+	uint8_t *buf;
+	uint32_t page = 0, rdata, cwperpage;
+	struct msm_nand_sps_cmd *cmd, *curr_cmd;
+	struct msm_nand_blk_isbad_data data;
+	struct sps_iovec *iovec;
+	struct sps_iovec iovec_temp;
+	/*
+	 * The following 9 commands are required to check bad block -
+	 * flash, addr0, addr1, cfg0, cfg1, ecc_cfg, read_loc_0,
+	 * exec, flash_status(read).
+	 */
+	struct {
+		struct sps_transfer xfer;
+		struct sps_iovec cmd_iovec[ISBAD_CMDS];
+		struct msm_nand_sps_cmd cmd[ISBAD_CMDS];
+		uint32_t flash_status;
+	} *dma_buffer;
+
+	if (mtd->writesize == PAGE_SIZE_2K)
+		page = ofs >> 11;
+
+	if (mtd->writesize == PAGE_SIZE_4K)
+		page = ofs >> 12;
+
+	cwperpage = (mtd->writesize >> 9);
+
+	if (ofs > mtd->size) {
+		pr_err("Invalid offset 0x%llx\n", ofs);
+		bad_block = -EINVAL;
+		goto out;
+	}
+	if (ofs & (mtd->erasesize - 1)) {
+		pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
+		bad_block = -EINVAL;
+		goto out;
+	}
+
+	wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+				chip, sizeof(*dma_buffer) + 4)));
+	buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
+
+	cmd = dma_buffer->cmd;
+	memset(&data, 0, sizeof(struct msm_nand_blk_isbad_data));
+	data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
+	data.cfg.cfg0 = chip->cfg0_raw & ~(7U << CW_PER_PAGE);
+	data.cfg.cfg1 = chip->cfg1_raw;
+
+	if (chip->cfg1 & (1 << WIDE_FLASH))
+		data.cfg.addr0 = (page << 16) |
+			((chip->cw_size * (cwperpage-1)) >> 1);
+	else
+		data.cfg.addr0 = (page << 16) |
+			(chip->cw_size * (cwperpage-1));
+
+	data.cfg.addr1 = (page >> 16) & 0xff;
+	data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+	data.exec = 1;
+	data.read_offset = (mtd->writesize - (chip->cw_size * (cwperpage-1)));
+	dma_buffer->flash_status = 0xeeeeeeee;
+
+	curr_cmd = cmd;
+	msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+	cmd = curr_cmd;
+	msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+			data.ecc_bch_cfg, 0);
+	cmd++;
+
+	rdata = (data.read_offset << 0) | (4 << 16) | (1 << 31);
+	msm_nand_prep_single_desc(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
+			rdata, 0);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+			data.exec, SPS_IOVEC_FLAG_NWD);
+	cmd++;
+
+	msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+		msm_virt_to_dma(chip, &dma_buffer->flash_status),
+		SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_UNLOCK);
+	cmd++;
+
+	WARN_ON(cmd - dma_buffer->cmd > ISBAD_CMDS);
+	dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+	dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+	dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+					&dma_buffer->cmd_iovec);
+	iovec = dma_buffer->xfer.iovec;
+
+	for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+		iovec->addr =  msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+		iovec->size = sizeof(struct sps_command_element);
+		iovec->flags = dma_buffer->cmd[i].flags;
+		iovec++;
+	}
+	mutex_lock(&info->lock);
+	ret = msm_nand_get_device(chip->dev);
+	if (ret) {
+		mutex_unlock(&info->lock);
+		goto free_dma;
+	}
+	/* Submit data descriptor */
+	ret = sps_transfer_one(info->sps.data_prod.handle,
+			msm_virt_to_dma(chip, buf),
+			4, NULL, SPS_IOVEC_FLAG_INT);
+
+	if (ret) {
+		pr_err("Failed to submit data desc %d\n", ret);
+		goto put_dev;
+	}
+	/* Submit command descriptor */
+	ret =  sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+	if (ret) {
+		pr_err("Failed to submit commands %d\n", ret);
+		goto put_dev;
+	}
+
+	ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+			info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count,
+			&iovec_temp);
+	if (ret) {
+		pr_err("Failed to get iovec for pipe %d (ret: %d)\n",
+				(info->sps.cmd_pipe.index), ret);
+		goto put_dev;
+	}
+	ret = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
+			info->sps.data_prod.index, submitted_num_desc,
+			&iovec_temp);
+	if (ret) {
+		pr_err("Failed to get iovec for pipe %d (ret: %d)\n",
+				(info->sps.data_prod.index), ret);
+		goto put_dev;
+	}
+
+	ret = msm_nand_put_device(chip->dev);
+	mutex_unlock(&info->lock);
+	if (ret)
+		goto free_dma;
+
+	/* Check for flash status errors */
+	if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+		pr_err("MPU/OP err set: %x\n", dma_buffer->flash_status);
+		bad_block = -EIO;
+		goto free_dma;
+	}
+
+	/* Check for bad block marker byte */
+	if (chip->cfg1 & (1 << WIDE_FLASH)) {
+		if (buf[0] != 0xFF || buf[1] != 0xFF)
+			bad_block = 1;
+	} else {
+		if (buf[0] != 0xFF)
+			bad_block = 1;
+	}
+	goto free_dma;
+put_dev:
+	msm_nand_put_device(chip->dev);
+	mutex_unlock(&info->lock);
+free_dma:
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
+out:
+	return ret ? ret : bad_block;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to mark a
+ * block as bad. This is done by writing the first page within a block with 0,
+ * thus setting the bad block byte location as well to 0.
+ */
+static int msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct mtd_oob_ops ops;
+	int ret;
+	uint8_t *buf;
+	size_t len;
+
+	if (ofs > mtd->size) {
+		pr_err("Invalid offset 0x%llx\n", ofs);
+		ret = -EINVAL;
+		goto out;
+	}
+	if (ofs & (mtd->erasesize - 1)) {
+		pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
+		ret = -EINVAL;
+		goto out;
+	}
+	len = mtd->writesize + mtd->oobsize;
+	buf = kzalloc(len, GFP_KERNEL);
+	if (!buf) {
+		pr_err("unable to allocate memory for 0x%x size\n", len);
+		ret = -ENOMEM;
+		goto out;
+	}
+	ops.mode = MTD_OPS_RAW;
+	ops.len = len;
+	ops.retlen = 0;
+	ops.ooblen = 0;
+	ops.datbuf = buf;
+	ops.oobbuf = NULL;
+	ret =  msm_nand_write_oob(mtd, ofs, &ops);
+	kfree(buf);
+out:
+	return ret;
+}
+
+/*
+ * Function that scans for the attached NAND device. This fills out all
+ * the uninitialized function pointers with the defaults. The flash ID is
+ * read and the mtd/chip structures are filled with the appropriate values.
+ */
+static int msm_nand_scan(struct mtd_info *mtd)
+{
+	struct msm_nand_info *info = mtd->priv;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	struct flash_identification *supported_flash = &info->flash_dev;
+	int err = 0;
+	uint32_t i, j, mtd_writesize;
+	uint8_t dev_found = 0, wide_bus;
+	uint32_t manid, devid, devcfg;
+	uint32_t flash_id = 0, flash_id2 = 0;
+	uint8_t id_byte[NAND_MAX_ID_LEN];
+	uint32_t bad_block_byte, spare_bytes;
+	struct nand_flash_dev *flashdev = NULL;
+	struct nand_manufacturers  *flashman = NULL;
+
+	/* Probe the Flash device for ONFI compliance */
+	if (!msm_nand_flash_onfi_probe(info)) {
+		dev_found = 1;
+	} else {
+		err = msm_nand_flash_read_id(info, 0, &flash_id, &flash_id2);
+		if (err < 0) {
+			pr_err("Failed to read Flash ID\n");
+			err = -EINVAL;
+			goto out;
+		}
+		manid  = id_byte[0] = flash_id & 0xFF;
+		devid  = id_byte[1] = (flash_id >> 8) & 0xFF;
+		devcfg = id_byte[3] = (flash_id >> 24) & 0xFF;
+		id_byte[2] = (flash_id >> 16) & 0xFF;
+		id_byte[4] = flash_id2 & 0xFF;
+		id_byte[5] = (flash_id2 >> 8) & 0xFF;
+		id_byte[6] = (flash_id2 >> 16) & 0xFF;
+		id_byte[7] = (flash_id2 >> 24) & 0xFF;
+
+		for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
+			if (nand_manuf_ids[i].id == manid)
+				flashman = &nand_manuf_ids[i];
+		for (i = 0; !flashdev && nand_flash_ids[i].id; ++i) {
+			/*
+			 * If id_len is specified for an entry in the nand ids
+			 * array, then at least 4 bytes of the nand id is
+			 * present in the nand ids array - use that to identify
+			 * the nand device first. If that is not present, only
+			 * then fall back to searching the legacy or extended
+			 * ids in the nand ids array.
+			 * The id_len number of bytes in the nand id read from
+			 * the device are checked against those in the nand id
+			 * table for exact match.
+			 */
+			if (nand_flash_ids[i].id_len) {
+				for (j = 0; j < nand_flash_ids[i].id_len; j++) {
+					if (nand_flash_ids[i].id[j] ==
+							id_byte[j])
+						continue;
+					else
+						break;
+				}
+				if (j == nand_flash_ids[i].id_len)
+					flashdev = &nand_flash_ids[i];
+			} else if (!nand_flash_ids[i].id_len &&
+					nand_flash_ids[i].dev_id == devid)
+				flashdev = &nand_flash_ids[i];
+		}
+		if (!flashdev || !flashman) {
+			pr_err("unknown nand flashid=%x manuf=%x devid=%x\n",
+				flash_id, manid, devid);
+			err = -ENOENT;
+			goto out;
+		}
+		dev_found = 1;
+		if (!flashdev->pagesize) {
+			pr_err("missing page size info - extract from NAND ID\n");
+			supported_flash->widebus = devcfg & (1 << 6) ? 1 : 0;
+			supported_flash->pagesize = 1024 << (devcfg & 0x3);
+			supported_flash->blksize = (64 * 1024) <<
+							((devcfg >> 4) & 0x3);
+			supported_flash->oobsize = (8 << ((devcfg >> 2) & 1)) *
+				(supported_flash->pagesize >> 9);
+		} else {
+			supported_flash->widebus = flashdev->options &
+				       NAND_BUSWIDTH_16 ? 1 : 0;
+			supported_flash->pagesize = flashdev->pagesize;
+			supported_flash->blksize = flashdev->erasesize;
+			supported_flash->oobsize = flashdev->oobsize;
+			supported_flash->ecc_correctability =
+					flashdev->ecc.strength_ds;
+			if (!flashdev->ecc.strength_ds)
+				pr_err("num ecc correctable bit not specified and defaults to 4 bit BCH\n");
+		}
+		supported_flash->flash_id = flash_id;
+		supported_flash->density = ((uint64_t)flashdev->chipsize) << 20;
+	}
+
+	if (dev_found) {
+		wide_bus       = supported_flash->widebus;
+		mtd->size      = supported_flash->density;
+		mtd->writesize = supported_flash->pagesize;
+		mtd->oobsize   = supported_flash->oobsize;
+		mtd->erasesize = supported_flash->blksize;
+		mtd->writebufsize = mtd->writesize;
+		mtd_writesize = mtd->writesize;
+
+		/* Check whether NAND device support 8bit ECC*/
+		if (supported_flash->ecc_correctability >= 8) {
+			chip->bch_caps = MSM_NAND_CAP_8_BIT_BCH;
+			supported_flash->ecc_capability = 8;
+		} else {
+			chip->bch_caps = MSM_NAND_CAP_4_BIT_BCH;
+			supported_flash->ecc_capability = 4;
+		}
+
+		pr_info("NAND Id: 0x%x Buswidth: %dBits Density: %lld MByte\n",
+			supported_flash->flash_id, (wide_bus) ? 16 : 8,
+			(mtd->size >> 20));
+		pr_info("pagesize: %d Erasesize: %d oobsize: %d (in Bytes)\n",
+			mtd->writesize, mtd->erasesize, mtd->oobsize);
+		pr_info("BCH ECC: %d Bit\n", supported_flash->ecc_capability);
+	}
+
+	chip->cw_size = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ? 532 : 528;
+	chip->cfg0 = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
+		|  (516 <<  UD_SIZE_BYTES)
+		|  (0 << DISABLE_STATUS_AFTER_WRITE)
+		|  (5 << NUM_ADDR_CYCLES);
+
+	bad_block_byte = (mtd_writesize - (chip->cw_size * (
+					(mtd_writesize >> 9) - 1)) + 1);
+	chip->cfg1 = (7 <<  NAND_RECOVERY_CYCLES)
+		|    (0 <<  CS_ACTIVE_BSY)
+		|    (bad_block_byte <<  BAD_BLOCK_BYTE_NUM)
+		|    (0 << BAD_BLOCK_IN_SPARE_AREA)
+		|    (2 << WR_RD_BSY_GAP)
+		| ((wide_bus ? 1 : 0) << WIDE_FLASH)
+		| (1 << ENABLE_BCH_ECC);
+
+	/*
+	 * For 4bit BCH ECC (default ECC), parity bytes = 7(x8) or 8(x16 I/O)
+	 * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
+	 */
+	chip->ecc_parity_bytes = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ?
+				(wide_bus ? 14 : 13) : (wide_bus ? 8 : 7);
+
+	spare_bytes = chip->cw_size - (BYTES_512 + chip->ecc_parity_bytes);
+	chip->cfg0_raw = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
+		|	(5 << NUM_ADDR_CYCLES)
+		|	(spare_bytes << SPARE_SIZE_BYTES)
+		|	(BYTES_512 << UD_SIZE_BYTES);
+
+	chip->cfg1_raw = (2 << WR_RD_BSY_GAP)
+		|    (1 << BAD_BLOCK_IN_SPARE_AREA)
+		|    (21 <<  BAD_BLOCK_BYTE_NUM)
+		|    (0 <<  CS_ACTIVE_BSY)
+		| (7 <<  NAND_RECOVERY_CYCLES)
+		| ((wide_bus ? 1 : 0) << WIDE_FLASH)
+		| (1 << DEV0_CFG1_ECC_DISABLE);
+
+	chip->ecc_bch_cfg = (0 << ECC_CFG_ECC_DISABLE)
+			|   (0 << ECC_SW_RESET)
+			|   (516 << ECC_NUM_DATA_BYTES)
+			|   (chip->ecc_parity_bytes << ECC_PARITY_SIZE_BYTES)
+			|   (1 << ECC_FORCE_CLK_OPEN);
+
+	chip->ecc_cfg_raw = (1 << ECC_FORCE_CLK_OPEN)
+			|   (BYTES_512 << ECC_NUM_DATA_BYTES)
+			|   (chip->ecc_parity_bytes << ECC_PARITY_SIZE_BYTES)
+			|   (0 << ECC_SW_RESET)
+			|   (1 << ECC_CFG_ECC_DISABLE);
+
+	if (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) {
+		chip->cfg0 |= (wide_bus ? 0 << SPARE_SIZE_BYTES :
+				2 << SPARE_SIZE_BYTES);
+		chip->ecc_bch_cfg |= (1 << ECC_MODE);
+		chip->ecc_cfg_raw |= (1 << ECC_MODE);
+	} else {
+		chip->cfg0 |= (wide_bus ? 2 << SPARE_SIZE_BYTES :
+				4 << SPARE_SIZE_BYTES);
+		chip->ecc_bch_cfg |= (0 << ECC_MODE);
+		chip->ecc_cfg_raw |= (0 << ECC_MODE);
+	}
+
+	chip->ecc_buf_cfg = 0x203; /* No of bytes covered by ECC - 516 bytes */
+
+	pr_info("CFG0: 0x%08x,           CFG1: 0x%08x\n"
+		"            RAWCFG0: 0x%08x,        RAWCFG1: 0x%08x\n"
+		"          ECCBUFCFG: 0x%08x,      ECCBCHCFG: 0x%08x\n"
+		"          RAWECCCFG: 0x%08x, BAD BLOCK BYTE: 0x%08x\n",
+		chip->cfg0, chip->cfg1,	chip->cfg0_raw, chip->cfg1_raw,
+		chip->ecc_buf_cfg, chip->ecc_bch_cfg,
+		chip->ecc_cfg_raw, bad_block_byte);
+
+	if (mtd->writesize == 2048)
+		mtd->oobavail = 16;
+	else if (mtd->writesize == 4096)
+		mtd->oobavail = 32;
+	else {
+		pr_err("Unsupported NAND pagesize: 0x%x\n", mtd->writesize);
+		err = -ENODEV;
+		goto out;
+	}
+
+	/* Fill in remaining MTD driver data */
+	mtd->type = MTD_NANDFLASH;
+	mtd->flags = MTD_CAP_NANDFLASH;
+	mtd->_erase = msm_nand_erase;
+	mtd->_block_isbad = msm_nand_block_isbad;
+	mtd->_block_markbad = msm_nand_block_markbad;
+	mtd->_read = msm_nand_read;
+	mtd->_write = msm_nand_write;
+	mtd->_read_oob  = msm_nand_read_oob;
+	mtd->_write_oob = msm_nand_write_oob;
+	mtd->owner = THIS_MODULE;
+out:
+	return err;
+}
+
+#define BAM_APPS_PIPE_LOCK_GRP0 0
+#define BAM_APPS_PIPE_LOCK_GRP1 1
+/*
+ * This function allocates, configures, connects an end point and
+ * also registers event notification for an end point. It also allocates
+ * DMA memory for descriptor FIFO of a pipe.
+ */
+static int msm_nand_init_endpoint(struct msm_nand_info *info,
+				struct msm_nand_sps_endpt *end_point,
+				uint32_t pipe_index)
+{
+	int rc = 0;
+	struct sps_pipe *pipe_handle;
+	struct sps_connect *sps_config = &end_point->config;
+	struct sps_register_event *sps_event = &end_point->event;
+
+	pipe_handle = sps_alloc_endpoint();
+	if (!pipe_handle) {
+		pr_err("sps_alloc_endpoint() failed\n");
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	rc = sps_get_config(pipe_handle, sps_config);
+	if (rc) {
+		pr_err("sps_get_config() failed %d\n", rc);
+		goto free_endpoint;
+	}
+
+	if (pipe_index == SPS_DATA_PROD_PIPE_INDEX) {
+		/* READ CASE: source - BAM; destination - system memory */
+		sps_config->source = info->sps.bam_handle;
+		sps_config->destination = SPS_DEV_HANDLE_MEM;
+		sps_config->mode = SPS_MODE_SRC;
+		sps_config->src_pipe_index = pipe_index;
+	} else if (pipe_index == SPS_DATA_CONS_PIPE_INDEX ||
+			pipe_index == SPS_CMD_CONS_PIPE_INDEX) {
+		/* WRITE CASE: source - system memory; destination - BAM */
+		sps_config->source = SPS_DEV_HANDLE_MEM;
+		sps_config->destination = info->sps.bam_handle;
+		sps_config->mode = SPS_MODE_DEST;
+		sps_config->dest_pipe_index = pipe_index;
+	}
+
+	sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_POLL |
+				SPS_O_ACK_TRANSFERS;
+
+	if (pipe_index == SPS_DATA_PROD_PIPE_INDEX ||
+			pipe_index == SPS_DATA_CONS_PIPE_INDEX)
+		sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP0;
+	else if (pipe_index == SPS_CMD_CONS_PIPE_INDEX)
+		sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP1;
+
+	/*
+	 * Descriptor FIFO is a cyclic FIFO. If SPS_MAX_DESC_NUM descriptors
+	 * are allowed to be submitted before we get any ack for any of them,
+	 * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
+	 * sizeof(struct sps_iovec).
+	 */
+	sps_config->desc.size = (SPS_MAX_DESC_NUM + 1) *
+					sizeof(struct sps_iovec);
+	sps_config->desc.base = dmam_alloc_coherent(info->nand_chip.dev,
+					sps_config->desc.size,
+					&sps_config->desc.phys_base,
+					GFP_KERNEL);
+	if (!sps_config->desc.base) {
+		pr_err("dmam_alloc_coherent() failed for size %x\n",
+				sps_config->desc.size);
+		rc = -ENOMEM;
+		goto free_endpoint;
+	}
+	memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+	rc = sps_connect(pipe_handle, sps_config);
+	if (rc) {
+		pr_err("sps_connect() failed %d\n", rc);
+		goto free_endpoint;
+	}
+
+	sps_event->options = SPS_O_EOT;
+	sps_event->mode = SPS_TRIGGER_WAIT;
+	sps_event->user = (void *)info;
+
+	rc = sps_register_event(pipe_handle, sps_event);
+	if (rc) {
+		pr_err("sps_register_event() failed %d\n", rc);
+		goto sps_disconnect;
+	}
+	end_point->index = pipe_index;
+	end_point->handle = pipe_handle;
+	pr_debug("pipe handle 0x%x for pipe %d\n", (uint32_t)pipe_handle,
+			pipe_index);
+	goto out;
+sps_disconnect:
+	sps_disconnect(pipe_handle);
+free_endpoint:
+	sps_free_endpoint(pipe_handle);
+out:
+	return rc;
+}
+
+/* This function disconnects and frees an end point */
+static void msm_nand_deinit_endpoint(struct msm_nand_info *info,
+				struct msm_nand_sps_endpt *end_point)
+{
+	sps_disconnect(end_point->handle);
+	sps_free_endpoint(end_point->handle);
+}
+
+/*
+ * This function registers BAM device and initializes its end points for
+ * the following pipes -
+ * system consumer pipe for data (pipe#0),
+ * system producer pipe for data (pipe#1),
+ * system consumer pipe for commands (pipe#2).
+ */
+static int msm_nand_bam_init(struct msm_nand_info *nand_info)
+{
+	struct sps_bam_props bam = {0};
+	int rc = 0;
+
+	bam.phys_addr = nand_info->bam_phys;
+	bam.virt_addr = nand_info->bam_base;
+	bam.irq = nand_info->bam_irq;
+	/*
+	 * NAND device is accessible from both Apps and Modem processor and
+	 * thus, NANDc and BAM are shared between both the processors. But BAM
+	 * must be enabled and instantiated only once during boot up by
+	 * Trustzone before Modem/Apps is brought out from reset.
+	 *
+	 * This is indicated to SPS driver on Apps by marking flag
+	 * SPS_BAM_MGR_DEVICE_REMOTE. The following are the global
+	 * initializations that will be done by Trustzone - Execution
+	 * Environment, Pipes assignment to Apps/Modem, Pipe Super groups and
+	 * Descriptor summing threshold.
+	 *
+	 * NANDc BAM device supports 2 execution environments - Modem and Apps
+	 * and thus the flag SPS_BAM_MGR_MULTI_EE is set.
+	 */
+	bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
+	bam.ipc_loglevel = QPIC_BAM_DEFAULT_IPC_LOGLVL;
+
+	rc = sps_phy2h(bam.phys_addr, &nand_info->sps.bam_handle);
+	if (!rc)
+		goto init_sps_ep;
+	rc = sps_register_bam_device(&bam, &nand_info->sps.bam_handle);
+	if (rc) {
+		pr_err("%s: sps_register_bam_device() failed with %d\n",
+			__func__, rc);
+		goto out;
+	}
+	pr_info("%s: BAM device registered: bam_handle 0x%lx\n",
+			__func__, nand_info->sps.bam_handle);
+init_sps_ep:
+	rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_prod,
+					SPS_DATA_PROD_PIPE_INDEX);
+	if (rc)
+		goto out;
+	rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_cons,
+					SPS_DATA_CONS_PIPE_INDEX);
+	if (rc)
+		goto deinit_data_prod;
+
+	rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.cmd_pipe,
+					SPS_CMD_CONS_PIPE_INDEX);
+	if (rc)
+		goto deinit_data_cons;
+	goto out;
+deinit_data_cons:
+	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
+deinit_data_prod:
+	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
+out:
+	return rc;
+}
+
+/*
+ * This function disconnects and frees its end points for all the pipes.
+ * Since the BAM is shared resource, it is not deregistered as its handle
+ * might be in use with LCDC.
+ */
+static void msm_nand_bam_free(struct msm_nand_info *nand_info)
+{
+	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
+	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
+	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.cmd_pipe);
+}
+
+/* This function enables DMA support for the NANDc in BAM mode. */
+static int msm_nand_enable_dma(struct msm_nand_info *info)
+{
+	struct msm_nand_sps_cmd *sps_cmd;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	int ret, submitted_num_desc = 1;
+	struct sps_iovec iovec_temp;
+
+	wait_event(chip->dma_wait_queue,
+		   (sps_cmd = msm_nand_get_dma_buffer(chip, sizeof(*sps_cmd))));
+
+	msm_nand_prep_single_desc(sps_cmd, MSM_NAND_CTRL(info), WRITE,
+			(1 << BAM_MODE_EN), SPS_IOVEC_FLAG_INT);
+
+	mutex_lock(&info->lock);
+	ret = msm_nand_get_device(chip->dev);
+	if (ret) {
+		mutex_unlock(&info->lock);
+		goto out;
+	}
+	ret = sps_transfer_one(info->sps.cmd_pipe.handle,
+			msm_virt_to_dma(chip, &sps_cmd->ce),
+			sizeof(struct sps_command_element), NULL,
+			sps_cmd->flags);
+	if (ret) {
+		pr_err("Failed to submit command: %d\n", ret);
+		goto put_dev;
+	}
+	ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+			info->sps.cmd_pipe.index, submitted_num_desc,
+			&iovec_temp);
+	if (ret) {
+		pr_err("Failed to get iovec for pipe %d (ret: %d)\n",
+				(info->sps.cmd_pipe.index), ret);
+		goto put_dev;
+	}
+put_dev:
+	ret = msm_nand_put_device(chip->dev);
+out:
+	mutex_unlock(&info->lock);
+	msm_nand_release_dma_buffer(chip, sps_cmd, sizeof(*sps_cmd));
+	return ret;
+
+}
+
+static int msm_nand_parse_smem_ptable(int *nr_parts)
+{
+
+	uint32_t  i, j;
+	uint32_t len = FLASH_PTABLE_HDR_LEN;
+	struct flash_partition_entry *pentry;
+	char *delimiter = ":";
+	void *temp_ptable = NULL;
+
+	pr_info("Parsing partition table info from SMEM\n");
+	temp_ptable = smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len, 0,
+					SMEM_ANY_HOST_FLAG);
+
+	if (!temp_ptable) {
+		pr_err("Error reading partition table header\n");
+		goto out;
+	}
+
+	/* Read only the header portion of ptable */
+	ptable = *(struct flash_partition_table *)temp_ptable;
+
+	/* Verify ptable magic */
+	if (ptable.magic1 != FLASH_PART_MAGIC1 ||
+			ptable.magic2 != FLASH_PART_MAGIC2) {
+		pr_err("Partition table magic verification failed\n");
+		goto out;
+	}
+	/* Ensure that # of partitions is less than the max we have allocated */
+	if (ptable.numparts > FLASH_PTABLE_MAX_PARTS_V4) {
+		pr_err("Partition numbers exceed the max limit\n");
+		goto out;
+	}
+	/* Find out length of partition data based on table version. */
+	if (ptable.version <= FLASH_PTABLE_V3) {
+		len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V3 *
+			sizeof(struct flash_partition_entry);
+	} else if (ptable.version == FLASH_PTABLE_V4) {
+		len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V4 *
+			sizeof(struct flash_partition_entry);
+	} else {
+		pr_err("Unknown ptable version (%d)", ptable.version);
+		goto out;
+	}
+
+	*nr_parts = ptable.numparts;
+
+	/*
+	 * Now that the partition table header has been parsed, verified
+	 * and the length of the partition table calculated, read the
+	 * complete partition table.
+	 */
+	temp_ptable = smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len, 0,
+					SMEM_ANY_HOST_FLAG);
+	if (!temp_ptable) {
+		pr_err("Error reading partition table\n");
+		goto out;
+	}
+
+	/* Read only the header portion of ptable */
+	ptable = *(struct flash_partition_table *)temp_ptable;
+
+	for (i = 0; i < ptable.numparts; i++) {
+		pentry = &ptable.part_entry[i];
+		if (pentry->name[0] == '\0')
+			continue;
+		/* Convert name to lower case and discard the initial chars */
+		mtd_part[i].name        = pentry->name;
+		for (j = 0; j < strlen(mtd_part[i].name); j++)
+			*(mtd_part[i].name + j) =
+				tolower(*(mtd_part[i].name + j));
+		strsep(&(mtd_part[i].name), delimiter);
+		mtd_part[i].offset      = pentry->offset;
+		mtd_part[i].mask_flags  = pentry->attr;
+		mtd_part[i].size        = pentry->length;
+		pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
+			i, pentry->name, pentry->offset, pentry->length,
+			pentry->attr);
+	}
+	pr_info("SMEM partition table found: ver: %d len: %d\n",
+		ptable.version, ptable.numparts);
+	return 0;
+out:
+	return -EINVAL;
+}
+
+#define BOOT_DEV_MASK 0x1E
+#define BOOT_DEV_NAND 0x4
+
+/*
+ * This function gets called when its device named msm-nand is added to
+ * device tree .dts file with all its resources such as physical addresses
+ * for NANDc and BAM, BAM IRQ.
+ *
+ * It also expects the NAND flash partition information to be passed in .dts
+ * file so that it can parse the partitions by calling MTD function
+ * mtd_device_parse_register().
+ *
+ */
+static int msm_nand_probe(struct platform_device *pdev)
+{
+	struct msm_nand_info *info;
+	struct resource *res;
+	int i, err, nr_parts;
+	struct device *dev;
+	u32 adjustment_offset;
+	void __iomem *boot_cfg_base;
+	u32 boot_dev;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"boot_cfg");
+	if (res && res->start) {
+		boot_cfg_base = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+		if (!boot_cfg_base) {
+			pr_err("ioremap() failed for addr 0x%x size 0x%x\n",
+				res->start, resource_size(res));
+			return -ENOMEM;
+		}
+		boot_dev = (readl_relaxed(boot_cfg_base) & BOOT_DEV_MASK) >> 1;
+		if (boot_dev != BOOT_DEV_NAND) {
+			pr_err("disabling nand as boot device (%x) is not NAND\n",
+					boot_dev);
+			return -ENODEV;
+		}
+	}
+	/*
+	 * The partition information can also be passed from kernel command
+	 * line. Also, the MTD core layer supports adding the whole device as
+	 * one MTD device when no partition information is available at all.
+	 */
+	info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info),
+				GFP_KERNEL);
+	if (!info) {
+		err = -ENOMEM;
+		goto out;
+	}
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"nand_phys");
+	if (!res || !res->start) {
+		pr_err("NAND phys address range is not provided\n");
+		err = -ENODEV;
+		goto out;
+	}
+	info->nand_phys = res->start;
+
+	err = of_property_read_u32(pdev->dev.of_node,
+				   "qcom,reg-adjustment-offset",
+				   &adjustment_offset);
+	if (err) {
+		pr_err("adjustment_offset not found, err = %d\n", err);
+		WARN_ON(1);
+		return err;
+	}
+
+	info->nand_phys_adjusted = info->nand_phys + adjustment_offset;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"bam_phys");
+	if (!res || !res->start) {
+		pr_err("BAM phys address range is not provided\n");
+		err = -ENODEV;
+		goto out;
+	}
+	info->bam_phys = res->start;
+	info->bam_base = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+	if (!info->bam_base) {
+		pr_err("BAM ioremap() failed for addr 0x%x size 0x%x\n",
+			res->start, resource_size(res));
+		err = -ENOMEM;
+		goto out;
+	}
+
+	info->bam_irq = platform_get_irq_byname(pdev, "bam_irq");
+	if (info->bam_irq < 0) {
+		pr_err("BAM IRQ is not provided\n");
+		err = -ENODEV;
+		goto out;
+	}
+
+	info->mtd.name = dev_name(&pdev->dev);
+	info->mtd.priv = info;
+	info->mtd.owner = THIS_MODULE;
+	info->nand_chip.dev = &pdev->dev;
+	init_waitqueue_head(&info->nand_chip.dma_wait_queue);
+	mutex_init(&info->lock);
+
+	dev = &pdev->dev;
+	if (dma_supported(dev, DMA_BIT_MASK(32))) {
+		info->dma_mask = DMA_BIT_MASK(32);
+		dev->coherent_dma_mask = info->dma_mask;
+	}
+
+	info->nand_chip.dma_virt_addr =
+		dmam_alloc_coherent(&pdev->dev, MSM_NAND_DMA_BUFFER_SIZE,
+			&info->nand_chip.dma_phys_addr, GFP_KERNEL);
+	if (!info->nand_chip.dma_virt_addr) {
+		pr_err("No memory for DMA buffer size %x\n",
+				MSM_NAND_DMA_BUFFER_SIZE);
+		err = -ENOMEM;
+		goto out;
+	}
+	err = msm_nand_bus_register(pdev, info);
+	if (err)
+		goto out;
+	info->clk_data.qpic_clk = devm_clk_get(&pdev->dev, "core_clk");
+	if (!IS_ERR_OR_NULL(info->clk_data.qpic_clk)) {
+		err = clk_set_rate(info->clk_data.qpic_clk,
+			MSM_NAND_BUS_VOTE_MAX_RATE);
+	} else {
+		err = PTR_ERR(info->clk_data.qpic_clk);
+		pr_err("Failed to get clock handle, err=%d\n", err);
+	}
+	if (err)
+		goto bus_unregister;
+
+	err = msm_nand_setup_clocks_and_bus_bw(info, true);
+	if (err)
+		goto bus_unregister;
+	dev_set_drvdata(&pdev->dev, info);
+	err = pm_runtime_set_active(&pdev->dev);
+	if (err)
+		pr_err("pm_runtime_set_active() failed with error %d", err);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_NAND_IDLE_TIMEOUT);
+
+	err = msm_nand_bam_init(info);
+	if (err) {
+		pr_err("msm_nand_bam_init() failed %d\n", err);
+		goto clk_rpm_disable;
+	}
+	err = msm_nand_enable_dma(info);
+	if (err) {
+		pr_err("Failed to enable DMA in NANDc\n");
+		goto free_bam;
+	}
+	err = msm_nand_parse_smem_ptable(&nr_parts);
+	if (err < 0) {
+		pr_err("Failed to parse partition table in SMEM\n");
+		goto free_bam;
+	}
+	if (msm_nand_scan(&info->mtd)) {
+		pr_err("No nand device found\n");
+		err = -ENXIO;
+		goto free_bam;
+	}
+	for (i = 0; i < nr_parts; i++) {
+		mtd_part[i].offset *= info->mtd.erasesize;
+		mtd_part[i].size *= info->mtd.erasesize;
+	}
+	err = mtd_device_parse_register(&info->mtd, NULL, NULL,
+		&mtd_part[0], nr_parts);
+	if (err < 0) {
+		pr_err("Unable to register MTD partitions %d\n", err);
+		goto free_bam;
+	}
+
+	pr_info("NANDc phys addr 0x%lx, BAM phys addr 0x%lx, BAM IRQ %d\n",
+			info->nand_phys, info->bam_phys, info->bam_irq);
+	pr_info("Allocated DMA buffer at virt_addr 0x%pK, phys_addr 0x%x\n",
+		info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr);
+	goto out;
+free_bam:
+	msm_nand_bam_free(info);
+clk_rpm_disable:
+	msm_nand_setup_clocks_and_bus_bw(info, false);
+	pm_runtime_disable(&(pdev)->dev);
+	pm_runtime_set_suspended(&(pdev)->dev);
+bus_unregister:
+	msm_nand_bus_unregister(info);
+out:
+	return err;
+}
+
+/*
+ * Remove functionality that gets called when driver/device msm-nand
+ * is removed.
+ */
+static int msm_nand_remove(struct platform_device *pdev)
+{
+	struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
+
+	if (pm_runtime_suspended(&(pdev)->dev))
+		pm_runtime_resume(&(pdev)->dev);
+
+	pm_runtime_disable(&(pdev)->dev);
+	pm_runtime_set_suspended(&(pdev)->dev);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	if (info) {
+		msm_nand_setup_clocks_and_bus_bw(info, false);
+		if (info->clk_data.client_handle)
+			msm_nand_bus_unregister(info);
+		mtd_device_unregister(&info->mtd);
+		msm_nand_bam_free(info);
+	}
+	return 0;
+}
+
+#define DRIVER_NAME "msm_qpic_nand"
+static const struct of_device_id msm_nand_match_table[] = {
+	{ .compatible = "qcom,msm-nand", },
+	{},
+};
+
+static const struct dev_pm_ops msm_nand_pm_ops = {
+	.suspend		= msm_nand_suspend,
+	.resume			= msm_nand_resume,
+	.runtime_suspend	= msm_nand_runtime_suspend,
+	.runtime_resume		= msm_nand_runtime_resume,
+};
+
+static struct platform_driver msm_nand_driver = {
+	.probe		= msm_nand_probe,
+	.remove		= msm_nand_remove,
+	.driver = {
+		.name		= DRIVER_NAME,
+		.of_match_table = msm_nand_match_table,
+		.pm		= &msm_nand_pm_ops,
+	},
+};
+
+module_param(enable_euclean, bool, 0644);
+MODULE_PARM_DESC(enable_euclean, "Set this parameter to enable reporting EUCLEAN to upper layer when the correctable bitflips are equal to the max correctable limit.");
+
+module_platform_driver(msm_nand_driver);
+
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM QPIC NAND flash driver");
diff --git a/drivers/mtd/devices/msm_qpic_nand.h b/drivers/mtd/devices/msm_qpic_nand.h
new file mode 100644
index 0000000..9b6701c
--- /dev/null
+++ b/drivers/mtd/devices/msm_qpic_nand.h
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __QPIC_NAND_H
+#define __QPIC_NAND_H
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/crc16.h>
+#include <linux/bitrev.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/ctype.h>
+#include <linux/msm-sps.h>
+#include <linux/msm-bus.h>
+#include <soc/qcom/smem.h>
+
+#define PAGE_SIZE_2K 2048
+#define PAGE_SIZE_4K 4096
+
+#undef WRITE /* To avoid redefinition in above header files */
+#undef READ /* To avoid redefinition in above header files */
+#define WRITE 1
+#define READ 0
+
+#define MSM_NAND_IDLE_TIMEOUT   200 /* msecs */
+#define MSM_NAND_BUS_VOTE_MAX_RATE  100000000 /* Hz */
+
+/*
+ * The maximum no of descriptors per transfer (page read/write) won't be more
+ * than 64. For more details on what those commands are, please refer to the
+ * page read and page write functions in the driver.
+ */
+#define SPS_MAX_DESC_NUM 64
+#define SPS_DATA_CONS_PIPE_INDEX 0
+#define SPS_DATA_PROD_PIPE_INDEX 1
+#define SPS_CMD_CONS_PIPE_INDEX 2
+
+#define msm_virt_to_dma(chip, vaddr) \
+	((chip)->dma_phys_addr + \
+	((uint8_t *)(vaddr) - (chip)->dma_virt_addr))
+
+/*
+ * A single page read/write request would typically need DMA memory of about
+ * 1K memory approximately. So for a single request this memory is more than
+ * enough.
+ *
+ * But to accommodate multiple clients we allocate 8K of memory. Though only
+ * one client request can be submitted to NANDc at any time, other clients can
+ * still prepare the descriptors while waiting for current client request to
+ * be done. Thus for a total memory of 8K, the driver can currently support
+ * maximum clients up to 7 or 8 at a time. The client for which there is no
+ * free DMA memory shall wait on the wait queue until other clients free up
+ * the required memory.
+ */
+#define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
+/*
+ * This defines the granularity at which the buffer management is done. The
+ * total number of slots is based on the size of the atomic_t variable
+ * dma_buffer_busy(number of bits) within the structure msm_nand_chip.
+ */
+#define MSM_NAND_DMA_BUFFER_SLOT_SZ \
+	(MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
+
+/* ONFI(Open NAND Flash Interface) parameters */
+#define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
+#define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
+#define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
+#define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
+#define ONFI_PARAM_INFO_LENGTH 0x0200
+#define ONFI_PARAM_PAGE_LENGTH 0x0100
+#define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
+#define FLASH_READ_ONFI_SIGNATURE_ADDRESS 0x20
+#define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
+#define FLASH_READ_DEVICE_ID_ADDRESS 0x00
+
+#define MSM_NAND_RESET_FLASH_STS 0x00000020
+#define MSM_NAND_RESET_READ_STS 0x000000C0
+
+/* QPIC NANDc (NAND Controller) Register Set */
+#define MSM_NAND_REG(info, off)		    (info->nand_phys + off)
+#define MSM_NAND_REG_ADJUSTED(info, off)    (info->nand_phys_adjusted + off)
+#define MSM_NAND_QPIC_VERSION(info)	    MSM_NAND_REG_ADJUSTED(info, 0x20100)
+#define MSM_NAND_FLASH_CMD(info)	    MSM_NAND_REG(info, 0x30000)
+#define MSM_NAND_ADDR0(info)                MSM_NAND_REG(info, 0x30004)
+#define MSM_NAND_ADDR1(info)                MSM_NAND_REG(info, 0x30008)
+#define MSM_NAND_EXEC_CMD(info)             MSM_NAND_REG(info, 0x30010)
+#define MSM_NAND_FLASH_STATUS(info)         MSM_NAND_REG(info, 0x30014)
+#define FS_OP_ERR (1 << 4)
+#define FS_MPU_ERR (1 << 8)
+#define FS_DEVICE_STS_ERR (1 << 16)
+#define FS_DEVICE_WP (1 << 23)
+
+#define MSM_NAND_BUFFER_STATUS(info)        MSM_NAND_REG(info, 0x30018)
+#define BS_UNCORRECTABLE_BIT (1 << 8)
+#define BS_CORRECTABLE_ERR_MSK 0x1F
+
+#define MSM_NAND_DEV0_CFG0(info)            MSM_NAND_REG(info, 0x30020)
+#define DISABLE_STATUS_AFTER_WRITE 4
+#define CW_PER_PAGE	6
+#define UD_SIZE_BYTES	9
+#define SPARE_SIZE_BYTES 23
+#define NUM_ADDR_CYCLES	27
+
+#define MSM_NAND_DEV0_CFG1(info)            MSM_NAND_REG(info, 0x30024)
+#define DEV0_CFG1_ECC_DISABLE	0
+#define WIDE_FLASH		1
+#define NAND_RECOVERY_CYCLES	2
+#define CS_ACTIVE_BSY		5
+#define BAD_BLOCK_BYTE_NUM	6
+#define BAD_BLOCK_IN_SPARE_AREA 16
+#define WR_RD_BSY_GAP		17
+#define ENABLE_BCH_ECC		27
+
+#define BYTES_512		512
+#define BYTES_516		516
+#define BYTES_517		517
+
+#define MSM_NAND_DEV0_ECC_CFG(info)	    MSM_NAND_REG(info, 0x30028)
+#define ECC_CFG_ECC_DISABLE	0
+#define ECC_SW_RESET	1
+#define ECC_MODE	4
+#define ECC_PARITY_SIZE_BYTES 8
+#define ECC_NUM_DATA_BYTES 16
+#define ECC_FORCE_CLK_OPEN 30
+
+#define MSM_NAND_READ_ID(info)              MSM_NAND_REG(info, 0x30040)
+#define MSM_NAND_READ_STATUS(info)          MSM_NAND_REG(info, 0x30044)
+#define MSM_NAND_READ_ID2(info)              MSM_NAND_REG(info, 0x30048)
+#define EXTENDED_FETCH_ID           BIT(19)
+#define MSM_NAND_DEV_CMD1(info)             MSM_NAND_REG(info, 0x300A4)
+#define MSM_NAND_DEV_CMD_VLD(info)          MSM_NAND_REG(info, 0x300AC)
+#define MSM_NAND_EBI2_ECC_BUF_CFG(info)     MSM_NAND_REG(info, 0x300F0)
+
+#define MSM_NAND_ERASED_CW_DETECT_CFG(info)	MSM_NAND_REG(info, 0x300E8)
+#define ERASED_CW_ECC_MASK	1
+#define AUTO_DETECT_RES		0
+#define MASK_ECC		(1 << ERASED_CW_ECC_MASK)
+#define RESET_ERASED_DET	(1 << AUTO_DETECT_RES)
+#define ACTIVE_ERASED_DET	(0 << AUTO_DETECT_RES)
+#define CLR_ERASED_PAGE_DET	(RESET_ERASED_DET | MASK_ECC)
+#define SET_ERASED_PAGE_DET	(ACTIVE_ERASED_DET | MASK_ECC)
+
+#define MSM_NAND_ERASED_CW_DETECT_STATUS(info)  MSM_NAND_REG(info, 0x300EC)
+#define PAGE_ALL_ERASED		7
+#define CODEWORD_ALL_ERASED	6
+#define PAGE_ERASED		5
+#define CODEWORD_ERASED		4
+#define ERASED_PAGE	((1 << PAGE_ALL_ERASED) | (1 << PAGE_ERASED))
+#define ERASED_CW	((1 << CODEWORD_ALL_ERASED) | (1 << CODEWORD_ERASED))
+
+#define MSM_NAND_CTRL(info)		    MSM_NAND_REG(info, 0x30F00)
+#define BAM_MODE_EN	0
+#define MSM_NAND_VERSION(info)         MSM_NAND_REG_ADJUSTED(info, 0x30F08)
+#define MSM_NAND_READ_LOCATION_0(info)      MSM_NAND_REG(info, 0x30F20)
+#define MSM_NAND_READ_LOCATION_1(info)      MSM_NAND_REG(info, 0x30F24)
+
+/* device commands */
+#define MSM_NAND_CMD_PAGE_READ          0x32
+#define MSM_NAND_CMD_PAGE_READ_ECC      0x33
+#define MSM_NAND_CMD_PAGE_READ_ALL      0x34
+#define MSM_NAND_CMD_PAGE_READ_ONFI     0x35
+#define MSM_NAND_CMD_PRG_PAGE           0x36
+#define MSM_NAND_CMD_PRG_PAGE_ECC       0x37
+#define MSM_NAND_CMD_PRG_PAGE_ALL       0x39
+#define MSM_NAND_CMD_BLOCK_ERASE        0x3A
+#define MSM_NAND_CMD_FETCH_ID           0x0B
+
+/* Version Mask */
+#define MSM_NAND_VERSION_MAJOR_MASK	0xF0000000
+#define MSM_NAND_VERSION_MAJOR_SHIFT	28
+#define MSM_NAND_VERSION_MINOR_MASK	0x0FFF0000
+#define MSM_NAND_VERSION_MINOR_SHIFT	16
+
+#define CMD		SPS_IOVEC_FLAG_CMD
+#define CMD_LCK		(CMD | SPS_IOVEC_FLAG_LOCK)
+#define INT		SPS_IOVEC_FLAG_INT
+#define INT_UNLCK	(INT | SPS_IOVEC_FLAG_UNLOCK)
+#define CMD_INT_UNLCK	(CMD | INT_UNLCK)
+#define NWD		SPS_IOVEC_FLAG_NWD
+
+/* Structure that defines a NAND SPS command element */
+struct msm_nand_sps_cmd {
+	struct sps_command_element ce;
+	uint32_t flags;
+};
+
+struct msm_nand_cmd_setup_desc {
+	struct sps_command_element ce[11];
+	uint32_t flags;
+	uint32_t num_ce;
+};
+
+struct msm_nand_cmd_cw_desc {
+	struct sps_command_element ce[3];
+	uint32_t flags;
+	uint32_t num_ce;
+};
+
+struct msm_nand_rw_cmd_desc {
+	uint32_t count;
+	struct msm_nand_cmd_setup_desc setup_desc;
+	struct msm_nand_cmd_cw_desc cw_desc[];
+};
+
+/*
+ * Structure that defines the NAND controller properties as per the
+ * NAND flash device/chip that is attached.
+ */
+struct msm_nand_chip {
+	struct device *dev;
+	/*
+	 * DMA memory will be allocated only once during probe and this memory
+	 * will be used by all NAND clients. This wait queue is needed to
+	 * make the applications wait for DMA memory to be free'd when the
+	 * complete memory is exhausted.
+	 */
+	wait_queue_head_t dma_wait_queue;
+	atomic_t dma_buffer_busy;
+	uint8_t *dma_virt_addr;
+	dma_addr_t dma_phys_addr;
+	uint32_t ecc_parity_bytes;
+	uint32_t bch_caps; /* Controller BCH ECC capabilities */
+#define MSM_NAND_CAP_4_BIT_BCH      (1 << 0)
+#define MSM_NAND_CAP_8_BIT_BCH      (1 << 1)
+	uint32_t cw_size;
+	/* NANDc register configurations */
+	uint32_t cfg0, cfg1, cfg0_raw, cfg1_raw;
+	uint32_t ecc_buf_cfg;
+	uint32_t ecc_bch_cfg;
+	uint32_t ecc_cfg_raw;
+};
+
+/* Structure that defines an SPS end point for a NANDc BAM pipe. */
+struct msm_nand_sps_endpt {
+	struct sps_pipe *handle;
+	struct sps_connect config;
+	struct sps_register_event event;
+	struct completion completion;
+	uint32_t index;
+};
+
+/*
+ * Structure that defines NANDc SPS data - BAM handle and an end point
+ * for each BAM pipe.
+ */
+struct msm_nand_sps_info {
+	unsigned long bam_handle;
+	struct msm_nand_sps_endpt data_prod;
+	struct msm_nand_sps_endpt data_cons;
+	struct msm_nand_sps_endpt cmd_pipe;
+};
+
+/*
+ * Structure that contains flash device information. This gets updated after
+ * the NAND flash device detection.
+ */
+struct flash_identification {
+	uint32_t flash_id;
+	uint64_t density;
+	uint32_t widebus;
+	uint32_t pagesize;
+	uint32_t blksize;
+	uint32_t oobsize;
+	uint32_t ecc_correctability;
+	uint32_t ecc_capability; /* Set based on the ECC capability selected. */
+};
+
+struct msm_nand_clk_data {
+	struct clk *qpic_clk;
+	struct msm_bus_scale_pdata *use_cases;
+	uint32_t client_handle;
+	atomic_t clk_enabled;
+	atomic_t curr_vote;
+};
+
+/* Structure that defines NANDc private data. */
+struct msm_nand_info {
+	struct mtd_info		mtd;
+	struct msm_nand_chip	nand_chip;
+	struct msm_nand_sps_info sps;
+	unsigned long bam_phys;
+	unsigned long nand_phys;
+	unsigned long nand_phys_adjusted;
+	void __iomem *bam_base;
+	int bam_irq;
+	/*
+	 * This lock must be acquired before submitting any command or data
+	 * descriptors to BAM pipes and must be held until all the submitted
+	 * descriptors are processed.
+	 *
+	 * This is required to ensure that both command and descriptors are
+	 * submitted atomically without interruption from other clients,
+	 * when there are requests from more than client at any time.
+	 * Othewise, data and command descriptors can be submitted out of
+	 * order for a request which can cause data corruption.
+	 */
+	struct mutex lock;
+	struct flash_identification flash_dev;
+	struct msm_nand_clk_data clk_data;
+	u64 dma_mask;
+};
+
+/* Structure that defines an ONFI parameter page (512B) */
+struct onfi_param_page {
+	uint32_t parameter_page_signature;
+	uint16_t revision_number;
+	uint16_t features_supported;
+	uint16_t optional_commands_supported;
+	uint8_t  reserved0[22];
+	uint8_t  device_manufacturer[12];
+	uint8_t  device_model[20];
+	uint8_t  jedec_manufacturer_id;
+	uint16_t date_code;
+	uint8_t  reserved1[13];
+	uint32_t number_of_data_bytes_per_page;
+	uint16_t number_of_spare_bytes_per_page;
+	uint32_t number_of_data_bytes_per_partial_page;
+	uint16_t number_of_spare_bytes_per_partial_page;
+	uint32_t number_of_pages_per_block;
+	uint32_t number_of_blocks_per_logical_unit;
+	uint8_t  number_of_logical_units;
+	uint8_t  number_of_address_cycles;
+	uint8_t  number_of_bits_per_cell;
+	uint16_t maximum_bad_blocks_per_logical_unit;
+	uint16_t block_endurance;
+	uint8_t  guaranteed_valid_begin_blocks;
+	uint16_t guaranteed_valid_begin_blocks_endurance;
+	uint8_t  number_of_programs_per_page;
+	uint8_t  partial_program_attributes;
+	uint8_t  number_of_bits_ecc_correctability;
+	uint8_t  number_of_interleaved_address_bits;
+	uint8_t  interleaved_operation_attributes;
+	uint8_t  reserved2[13];
+	uint8_t  io_pin_capacitance;
+	uint16_t timing_mode_support;
+	uint16_t program_cache_timing_mode_support;
+	uint16_t maximum_page_programming_time;
+	uint16_t maximum_block_erase_time;
+	uint16_t maximum_page_read_time;
+	uint16_t maximum_change_column_setup_time;
+	uint8_t  reserved3[23];
+	uint16_t vendor_specific_revision_number;
+	uint8_t  vendor_specific[88];
+	uint16_t integrity_crc;
+} __attribute__((__packed__));
+
+#define FLASH_PART_MAGIC1	0x55EE73AA
+#define FLASH_PART_MAGIC2	0xE35EBDDB
+#define FLASH_PTABLE_V3		3
+#define FLASH_PTABLE_V4		4
+#define FLASH_PTABLE_MAX_PARTS_V3 16
+#define FLASH_PTABLE_MAX_PARTS_V4 32
+#define FLASH_PTABLE_HDR_LEN (4*sizeof(uint32_t))
+#define FLASH_PTABLE_ENTRY_NAME_SIZE 16
+
+struct flash_partition_entry {
+	char name[FLASH_PTABLE_ENTRY_NAME_SIZE];
+	u32 offset;     /* Offset in blocks from beginning of device */
+	u32 length;     /* Length of the partition in blocks */
+	u8 attr;	/* Flags for this partition */
+};
+
+struct flash_partition_table {
+	u32 magic1;
+	u32 magic2;
+	u32 version;
+	u32 numparts;
+	struct flash_partition_entry part_entry[FLASH_PTABLE_MAX_PARTS_V4];
+};
+
+static struct flash_partition_table ptable;
+
+static struct mtd_partition mtd_part[FLASH_PTABLE_MAX_PARTS_V4];
+
+static inline bool is_buffer_in_page(const void *buf, size_t len)
+{
+	return !(((unsigned long) buf & ~PAGE_MASK) + len > PAGE_SIZE);
+}
+#endif /* __QPIC_NAND_H */
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index fccdd49..c62923b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -617,7 +617,7 @@
 	return ret;
 }
 
-int mtd_add_partition(struct mtd_info *master, const char *name,
+int mtd_add_partition(struct mtd_info *master, char *name,
 		      long long offset, long long length)
 {
 	struct mtd_partition part;
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index ede407d..2312412 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -112,7 +112,7 @@
 		partname = of_get_property(pp, "label", &len);
 		if (!partname)
 			partname = of_get_property(pp, "name", &len);
-		parts[i].name = partname;
+		parts[i].name = (char *)partname;
 
 		if (of_get_property(pp, "read-only", &len))
 			parts[i].mask_flags |= MTD_WRITEABLE;
@@ -186,7 +186,7 @@
 		if (names && (plen > 0)) {
 			int len = strlen(names) + 1;
 
-			parts[i].name = names;
+			parts[i].name = (char *)names;
 			plen -= len;
 			names += len;
 		} else {
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index 1fffa7c..7c77280 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -31,7 +31,7 @@
 #define GENI_SE_IOMMU_VA_SIZE	(0xC0000000)
 
 #define NUM_LOG_PAGES 2
-
+#define MAX_CLK_PERF_LEVEL 32
 static unsigned long default_bus_bw_set[] = {0, 19200000, 50000000, 100000000};
 
 /**
@@ -52,6 +52,8 @@
  * @cur_ib:		Current Bus Instantaneous BW request value.
  * @bus_bw_set:		Clock plan for the bus driver.
  * @cur_bus_bw_idx:	Current index within the bus clock plan.
+ * @num_clk_levels:	Number of valid clock levels in clk_perf_tbl.
+ * @clk_perf_tbl:	Table of clock frequency input to Serial Engine clock.
  * @log_ctx:		Logging context to hold the debug information
  */
 struct geni_se_device {
@@ -72,6 +74,8 @@
 	int bus_bw_set_size;
 	unsigned long *bus_bw_set;
 	int cur_bus_bw_idx;
+	unsigned int num_clk_levels;
+	unsigned long *clk_perf_tbl;
 	void *log_ctx;
 };
 
@@ -809,6 +813,111 @@
 EXPORT_SYMBOL(geni_se_resources_init);
 
 /**
+ * geni_se_clk_tbl_get() - Get the clock table to program DFS
+ * @rsc:	Resource for which the clock table is requested.
+ * @tbl:	Table in which the output is returned.
+ *
+ * This function is called by the protocol drivers to determine the different
+ * clock frequencies supported by Serail Engine Core Clock. The protocol
+ * drivers use the output to determine the clock frequency index to be
+ * programmed into DFS.
+ *
+ * Return:	number of valid performance levels in the table on success,
+ *		standard Linux error codes on failure.
+ */
+int geni_se_clk_tbl_get(struct se_geni_rsc *rsc, unsigned long **tbl)
+{
+	struct geni_se_device *geni_se_dev;
+	int i;
+	unsigned long prev_freq = 0;
+
+	if (unlikely(!rsc || !rsc->wrapper_dev || !rsc->se_clk || !tbl))
+		return -EINVAL;
+
+	*tbl = NULL;
+	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
+	if (unlikely(!geni_se_dev))
+		return -EPROBE_DEFER;
+
+	if (geni_se_dev->clk_perf_tbl) {
+		*tbl = geni_se_dev->clk_perf_tbl;
+		return geni_se_dev->num_clk_levels;
+	}
+
+	geni_se_dev->clk_perf_tbl = kzalloc(sizeof(*geni_se_dev->clk_perf_tbl) *
+						MAX_CLK_PERF_LEVEL, GFP_KERNEL);
+	if (!geni_se_dev->clk_perf_tbl)
+		return -ENOMEM;
+
+	for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) {
+		geni_se_dev->clk_perf_tbl[i] = clk_round_rate(rsc->se_clk,
+								prev_freq + 1);
+		if (geni_se_dev->clk_perf_tbl[i] == prev_freq) {
+			geni_se_dev->clk_perf_tbl[i] = 0;
+			break;
+		}
+		prev_freq = geni_se_dev->clk_perf_tbl[i];
+	}
+	geni_se_dev->num_clk_levels = i;
+	*tbl = geni_se_dev->clk_perf_tbl;
+	return geni_se_dev->num_clk_levels;
+}
+EXPORT_SYMBOL(geni_se_clk_tbl_get);
+
+/**
+ * geni_se_clk_freq_match() - Get the matching or closest SE clock frequency
+ * @rsc:	Resource for which the clock frequency is requested.
+ * @req_freq:	Requested clock frequency.
+ * @index:	Index of the resultant frequency in the table.
+ * @res_freq:	Resultant frequency which matches or is closer to the
+ *		requested frequency.
+ * @exact:	Flag to indicate exact multiple requirement of the requested
+ *		frequency .
+ *
+ * This function is called by the protocol drivers to determine the matching
+ * or closest frequency of the Serial Engine clock to be selected in order
+ * to meet the performance requirements.
+ *
+ * Return:	0 on success, standard Linux error codes on failure.
+ */
+int geni_se_clk_freq_match(struct se_geni_rsc *rsc, unsigned long req_freq,
+			   unsigned int *index, unsigned long *res_freq,
+			   bool exact)
+{
+	unsigned long *tbl;
+	int num_clk_levels;
+	int i;
+
+	num_clk_levels = geni_se_clk_tbl_get(rsc, &tbl);
+	if (num_clk_levels < 0)
+		return num_clk_levels;
+
+	if (num_clk_levels == 0)
+		return -EFAULT;
+
+	*res_freq = 0;
+	for (i = 0; i < num_clk_levels; i++) {
+		if (!(tbl[i] % req_freq)) {
+			*index = i;
+			*res_freq = tbl[i];
+			return 0;
+		}
+
+		if (!(*res_freq) || ((tbl[i] > *res_freq) &&
+				     (tbl[i] < req_freq))) {
+			*index = i;
+			*res_freq = tbl[i];
+		}
+	}
+
+	if (exact || !(*res_freq))
+		return -ENOKEY;
+
+	return 0;
+}
+EXPORT_SYMBOL(geni_se_clk_freq_match);
+
+/**
  * geni_se_tx_dma_prep() - Prepare the Serial Engine for TX DMA transfer
  * @wrapper_dev:	QUPv3 Wrapper Device to which the TX buffer is mapped.
  * @base:		Base address of the SE register block.
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index 907c94e8..fbf8773 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -2088,7 +2088,7 @@
 				unsigned long *dev_handle)
 {
 	struct sps_bam *bam = NULL;
-	void *virt_addr = NULL;
+	void __iomem *virt_addr = NULL;
 	char bam_name[MAX_MSG_LEN];
 	u32 manage;
 	int ok;
diff --git a/drivers/platform/msm/sps/sps_bam.h b/drivers/platform/msm/sps/sps_bam.h
index 468c492..7cb0670 100644
--- a/drivers/platform/msm/sps/sps_bam.h
+++ b/drivers/platform/msm/sps/sps_bam.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -202,7 +202,7 @@
 	/* BAM device state */
 	u32 state;
 	struct mutex lock;
-	void *base; /* BAM virtual base address */
+	void __iomem *base; /* BAM virtual base address */
 	u32 version;
 	spinlock_t isr_lock;
 	spinlock_t connection_lock;
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 2577ac6..dc303e2 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,4 +1,6 @@
 KASAN_SANITIZE_scm.o := n
+KCOV_INSTRUMENT_scm.o := n
+
 obj-$(CONFIG_QCOM_CPUSS_DUMP) += cpuss_dump.o
 obj-$(CONFIG_QCOM_GSBI)	+=	qcom_gsbi.o
 obj-$(CONFIG_QCOM_LLCC) += llcc-core.o llcc-slice.o
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 585836a..21b2034 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -431,7 +431,7 @@
 				link = 0;
 			}
 
-			prev_off  = off;
+			prev_off  = off + entry->len - 1;
 			prev_addr = addr;
 			}
 		}
diff --git a/drivers/soc/qcom/msm-core.c b/drivers/soc/qcom/msm-core.c
index 4ec791c..f8103de 100644
--- a/drivers/soc/qcom/msm-core.c
+++ b/drivers/soc/qcom/msm-core.c
@@ -293,23 +293,27 @@
 	struct cpu_static_info *sp, *clear_sp;
 	int cpumask, cluster;
 	bool pdata_valid[NR_CPUS] = {0};
+	bool cpu_found = false;
 
 	get_user(cpumask, &argp->cpumask);
 	get_user(cluster, &argp->cluster);
 
 	pr_debug("%s: cpumask %d, cluster: %d\n", __func__, cpumask,
 					cluster);
-	for (i = 0; i < MAX_CORES_PER_CLUSTER; i++, cpumask >>= 1) {
+	for (i = 0; cpumask > 0; i++, cpumask >>= 1) {
 		if (!(cpumask & 0x01))
 			continue;
 
 		for_each_possible_cpu(cpu) {
-			if ((cpu_topology[cpu].core_id != i) &&
+			if ((cpu_topology[cpu].core_id != i) ||
 				(cpu_topology[cpu].cluster_id != cluster))
 				continue;
 
+			cpu_found = true;
 			break;
 		}
+		if (cpu_found)
+			break;
 	}
 
 	if ((cpu < 0) || (cpu >= num_possible_cpus()))
@@ -346,7 +350,7 @@
 	 */
 	get_user(cpumask, &argp->cpumask);
 	spin_lock(&update_lock);
-	for (i = 0; i < MAX_CORES_PER_CLUSTER; i++, cpumask >>= 1) {
+	for (i = 0; cpumask > 0; i++, cpumask >>= 1) {
 		if (!(cpumask & 0x01))
 			continue;
 		for_each_possible_cpu(cpu) {
@@ -396,6 +400,7 @@
 	struct sched_params __user *argp = (struct sched_params __user *)arg;
 	int i, cpu = num_possible_cpus();
 	int cluster, cpumask;
+	bool cpu_found = false;
 
 	if (!argp)
 		return -EINVAL;
@@ -416,8 +421,11 @@
 				(cpu_topology[cpu].cluster_id != cluster)))
 					continue;
 
+				cpu_found = true;
 				break;
 			}
+			if (cpu_found)
+				break;
 		}
 		if (cpu >= num_possible_cpus())
 			break;
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index b4713ac..fcb3731 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -134,6 +134,7 @@
 #define R3_STR "r3"
 #define R4_STR "r4"
 #define R5_STR "r5"
+#define R6_STR "r6"
 
 #endif
 
@@ -481,6 +482,7 @@
 	register u32 r3 asm("r3") = w3;
 	register u32 r4 asm("r4") = w4;
 	register u32 r5 asm("r5") = w5;
+	register u32 r6 asm("r6") = 0;
 
 	do {
 		asm volatile(
@@ -494,13 +496,14 @@
 			__asmeq("%7", R3_STR)
 			__asmeq("%8", R4_STR)
 			__asmeq("%9", R5_STR)
+			__asmeq("%10", R6_STR)
 #ifdef REQUIRES_SEC
 			".arch_extension sec\n"
 #endif
 			"smc	#0\n"
 			: "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
 			: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
-			 "r" (r5));
+			 "r" (r5), "r" (r6));
 
 	} while (r0 == SCM_INTERRUPTED);
 
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
index d8c5a8f..f4f4c36 100644
--- a/drivers/soc/qcom/system_pm.c
+++ b/drivers/soc/qcom/system_pm.c
@@ -24,7 +24,7 @@
 
 static int setup_wakeup(uint64_t sleep_val)
 {
-	struct tcs_cmd cmd[3] = { { 0 } };
+	struct tcs_cmd cmd[2] = { { 0 } };
 
 	cmd[0].data = (sleep_val >> 32) & PDC_TIME_UPPER_MASK;
 	cmd[0].data |= 1 << PDC_TIME_VALID_SHIFT;
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 08eb00a..ad3eb187 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -110,14 +110,6 @@
 	return spi;
 }
 
-static int get_sclk(u32 speed_hz, unsigned long *sclk_freq)
-{
-	u32 root_freq[] = { 19200000 };
-
-	*sclk_freq = root_freq[0];
-	return 0;
-}
-
 static int do_spi_clk_cfg(u32 speed_hz, struct spi_geni_master *mas)
 {
 	unsigned long sclk_freq;
@@ -131,14 +123,20 @@
 	clk_sel &= ~CLK_SEL_MSK;
 	m_clk_cfg &= ~CLK_DIV_MSK;
 
-	idx = get_sclk(speed_hz, &sclk_freq);
-	if (idx < 0)
-		return -EINVAL;
+	ret = geni_se_clk_freq_match(&mas->spi_rsc, speed_hz, &idx,
+					&sclk_freq, true);
+	if (ret) {
+		dev_err(mas->dev, "%s: Failed(%d) to find src clk for 0x%x\n",
+						__func__, ret, speed_hz);
+		return ret;
+	}
 
 	div = ((sclk_freq / SPI_OVERSAMPLING) / speed_hz);
 	if (!div)
 		return -EINVAL;
 
+	dev_dbg(mas->dev, "%s: req %u sclk %lu, idx %d, div %d\n", __func__,
+						speed_hz, sclk_freq, idx, div);
 	clk_sel |= (idx & CLK_SEL_MSK);
 	m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
 	ret = clk_set_rate(rsc->se_clk, sclk_freq);
@@ -362,13 +360,13 @@
 	reinit_completion(&mas->xfer_done);
 	/* Speed and bits per word can be overridden per transfer */
 	if (xfer->speed_hz != mas->cur_speed_hz) {
+		mas->cur_speed_hz = xfer->speed_hz;
 		ret = do_spi_clk_cfg(mas->cur_speed_hz, mas);
 		if (ret) {
 			dev_err(mas->dev, "%s:Err setting clks:%d\n",
 								__func__, ret);
 			goto geni_transfer_one_exit;
 		}
-		mas->cur_speed_hz = xfer->speed_hz;
 	}
 
 	setup_fifo_xfer(xfer, mas, slv->mode, spi);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index dcb41a9..4660e31 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -617,8 +617,8 @@
 	list_add_tail(&req->list, &dep->started_list);
 
 	/* First, prepare a normal TRB, point to the fake buffer */
-	trb = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
-	dep->trb_enqueue++;
+	trb = &dep->trb_pool[dep->trb_enqueue];
+	dwc3_ep_inc_enq(dep);
 	memset(trb, 0, sizeof(*trb));
 
 	req->trb = trb;
@@ -629,8 +629,8 @@
 	req->trb_dma = dwc3_trb_dma_offset(dep, trb);
 
 	/* Second, prepare a Link TRB that points to the first TRB*/
-	trb_link = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
-	dep->trb_enqueue++;
+	trb_link = &dep->trb_pool[dep->trb_enqueue];
+	dwc3_ep_inc_enq(dep);
 	memset(trb_link, 0, sizeof(*trb_link));
 
 	trb_link->bpl = lower_32_bits(req->trb_dma);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index df0427c..19b9cfb 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -164,12 +164,12 @@
 		*index = 0;
 }
 
-static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
+void dwc3_ep_inc_enq(struct dwc3_ep *dep)
 {
 	dwc3_ep_inc_trb(&dep->trb_enqueue);
 }
 
-static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
+void dwc3_ep_inc_deq(struct dwc3_ep *dep)
 {
 	dwc3_ep_inc_trb(&dep->trb_dequeue);
 }
@@ -901,6 +901,7 @@
 
 	req->epnum	= dep->number;
 	req->dep	= dep;
+	req->request.dma = DMA_ERROR_CODE;
 
 	dep->allocated_requests++;
 
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index e973ad3..8d0a5eb 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -98,6 +98,8 @@
 void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
 irqreturn_t dwc3_interrupt(int irq, void *_dwc);
 void dwc3_bh_work(struct work_struct *w);
+void dwc3_ep_inc_enq(struct dwc3_ep *dep);
+void dwc3_ep_inc_deq(struct dwc3_ep *dep);
 
 static inline dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
 		struct dwc3_trb *trb)
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index fc85994..1633d4a 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -880,7 +880,7 @@
 				is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
 		req->num_mapped_sgs = 0;
-	} else {
+	} else if (req->dma != DMA_ERROR_CODE) {
 		dma_unmap_single(dev, req->dma, req->length,
 				is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 	}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index a3e0b3b..e57d463 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3438,6 +3438,8 @@
 	ee_len = ext4_ext_get_actual_len(ex);
 	zero_ex1.ee_len = 0;
 	zero_ex2.ee_len = 0;
+	zero_ex1.ee_start_lo = 0;
+	zero_ex2.ee_start_lo = 0;
 
 	trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
 
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c08d499..de47a29 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3979,6 +3979,7 @@
 
 int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 {
+#if 0
 	struct super_block *sb = inode->i_sb;
 	ext4_lblk_t first_block, stop_block;
 	struct address_space *mapping = inode->i_mapping;
@@ -4109,6 +4110,12 @@
 out_mutex:
 	inode_unlock(inode);
 	return ret;
+#else
+	/*
+	 * Disabled as per b/28760453
+	 */
+	return -EOPNOTSUPP;
+#endif
 }
 
 int ext4_inode_attach_jinode(struct inode *inode)
diff --git a/include/dt-bindings/clock/qcom,camcc-sdm845.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h
index e16b69a..7218261 100644
--- a/include/dt-bindings/clock/qcom,camcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h
@@ -102,26 +102,15 @@
 #define CAM_CC_SOC_AHB_CLK					85
 #define CAM_CC_SYS_TMR_CLK					86
 
-#define TITAN_CAM_CC_BPS_BCR					0
-#define TITAN_CAM_CC_CAMNOC_BCR					1
-#define TITAN_CAM_CC_CCI_BCR					2
-#define TITAN_CAM_CC_CPAS_BCR					3
-#define TITAN_CAM_CC_CSI0PHY_BCR				4
-#define TITAN_CAM_CC_CSI1PHY_BCR				5
-#define TITAN_CAM_CC_CSI2PHY_BCR				6
-#define TITAN_CAM_CC_FD_BCR					7
-#define TITAN_CAM_CC_ICP_BCR					8
-#define TITAN_CAM_CC_IFE_0_BCR					9
-#define TITAN_CAM_CC_IFE_1_BCR					10
-#define TITAN_CAM_CC_IFE_LITE_BCR				11
-#define TITAN_CAM_CC_IPE_0_BCR					12
-#define TITAN_CAM_CC_IPE_1_BCR					13
-#define TITAN_CAM_CC_JPEG_BCR					14
-#define TITAN_CAM_CC_LRME_BCR					15
-#define TITAN_CAM_CC_MCLK0_BCR					16
-#define TITAN_CAM_CC_MCLK1_BCR					17
-#define TITAN_CAM_CC_MCLK2_BCR					18
-#define TITAN_CAM_CC_MCLK3_BCR					19
-#define TITAN_CAM_CC_TITAN_TOP_BCR				20
+#define TITAN_CAM_CC_CCI_BCR					0
+#define TITAN_CAM_CC_CPAS_BCR					1
+#define TITAN_CAM_CC_CSI0PHY_BCR				2
+#define TITAN_CAM_CC_CSI1PHY_BCR				3
+#define TITAN_CAM_CC_CSI2PHY_BCR				4
+#define TITAN_CAM_CC_MCLK0_BCR					5
+#define TITAN_CAM_CC_MCLK1_BCR					6
+#define TITAN_CAM_CC_MCLK2_BCR					7
+#define TITAN_CAM_CC_MCLK3_BCR					8
+#define TITAN_CAM_CC_TITAN_TOP_BCR				9
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
index 91ea077..42bb59f 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -56,9 +56,6 @@
 #define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC				39
 #define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC				40
 
-#define DISP_CC_MDSS_CORE_BCR					0
-#define DISP_CC_MDSS_GCC_CLOCKS_BCR				1
-#define DISP_CC_MDSS_RSCC_BCR					2
-#define DISP_CC_MDSS_SPDM_BCR					3
+#define DISP_CC_MDSS_RSCC_BCR					0
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index f6f4bc3..678a885 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -204,34 +204,33 @@
 
 
 /* GCC reset clocks */
-#define GCC_GPU_BCR						0
-#define GCC_MMSS_BCR						1
-#define GCC_PCIE_0_BCR						2
-#define GCC_PCIE_1_BCR						3
-#define GCC_PCIE_PHY_BCR					4
-#define GCC_PDM_BCR						5
-#define GCC_PRNG_BCR						6
-#define GCC_QUPV3_WRAPPER_0_BCR					7
-#define GCC_QUPV3_WRAPPER_1_BCR					8
-#define GCC_QUSB2PHY_PRIM_BCR					9
-#define GCC_QUSB2PHY_SEC_BCR					10
-#define GCC_SDCC2_BCR						11
-#define GCC_SDCC4_BCR						12
-#define GCC_TSIF_BCR						13
-#define GCC_UFS_CARD_BCR					14
-#define GCC_UFS_PHY_BCR						15
-#define GCC_USB30_PRIM_BCR					16
-#define GCC_USB30_SEC_BCR					17
-#define GCC_USB3_PHY_PRIM_BCR					18
-#define GCC_USB3PHY_PHY_PRIM_BCR				19
-#define GCC_USB3_DP_PHY_PRIM_BCR				20
-#define GCC_USB3_PHY_SEC_BCR					21
-#define GCC_USB3PHY_PHY_SEC_BCR					22
-#define GCC_USB3_DP_PHY_SEC_BCR					23
-#define GCC_USB_PHY_CFG_AHB2PHY_BCR				24
-#define GCC_PCIE_0_PHY_BCR					25
-#define GCC_PCIE_1_PHY_BCR					26
-#define GCC_SDCC1_BCR						27
+#define GCC_MMSS_BCR						0
+#define GCC_PCIE_0_BCR						1
+#define GCC_PCIE_1_BCR						2
+#define GCC_PCIE_PHY_BCR					3
+#define GCC_PDM_BCR						4
+#define GCC_PRNG_BCR						5
+#define GCC_QUPV3_WRAPPER_0_BCR					6
+#define GCC_QUPV3_WRAPPER_1_BCR					7
+#define GCC_QUSB2PHY_PRIM_BCR					8
+#define GCC_QUSB2PHY_SEC_BCR					9
+#define GCC_SDCC2_BCR						10
+#define GCC_SDCC4_BCR						11
+#define GCC_TSIF_BCR						12
+#define GCC_UFS_CARD_BCR					13
+#define GCC_UFS_PHY_BCR						14
+#define GCC_USB30_PRIM_BCR					15
+#define GCC_USB30_SEC_BCR					16
+#define GCC_USB3_PHY_PRIM_BCR					17
+#define GCC_USB3PHY_PHY_PRIM_BCR				18
+#define GCC_USB3_DP_PHY_PRIM_BCR				19
+#define GCC_USB3_PHY_SEC_BCR					20
+#define GCC_USB3PHY_PHY_SEC_BCR					21
+#define GCC_USB3_DP_PHY_SEC_BCR					22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR				23
+#define GCC_PCIE_0_PHY_BCR					24
+#define GCC_PCIE_1_PHY_BCR					25
+#define GCC_SDCC1_BCR						26
 
 /* Dummy clocks for rate measurement */
 #define MEASURE_ONLY_SNOC_CLK					0
diff --git a/include/dt-bindings/clock/qcom,videocc-sdm845.h b/include/dt-bindings/clock/qcom,videocc-sdm845.h
index b362852d..21b5092 100644
--- a/include/dt-bindings/clock/qcom,videocc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,videocc-sdm845.h
@@ -28,9 +28,4 @@
 #define VIDEO_CC_VENUS_CTL_CORE_CLK				11
 #define VIDEO_PLL0						12
 
-#define VIDEO_CC_INTERFACE_BCR					0
-#define VIDEO_CC_VCODEC0_BCR					1
-#define VIDEO_CC_VCODEC1_BCR					2
-#define VIDEO_CC_VENUS_BCR					3
-
 #endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 744ea4f..2b8b6e0 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -125,7 +125,15 @@
  * BVEC_POOL_IDX()
  */
 #define BIO_RESET_BITS	10
-#define BIO_INLINECRYPT 15
+
+
+/*
+ * Added for Req based dm which need to perform post processing. This flag
+ * ensures blk_update_request does not free the bios or request, this is done
+ * at the dm level
+ */
+#define BIO_DONTFREE	10
+#define BIO_INLINECRYPT	11
 
 /*
  * We support 6 different bvec pools, the last one is magic in that it
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e47a7f7..fb910c6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -816,6 +816,7 @@
 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 			 struct scsi_ioctl_command __user *);
 
+extern void blk_recalc_rq_segments(struct request *rq);
 extern int blk_queue_enter(struct request_queue *q, bool nowait);
 extern void blk_queue_exit(struct request_queue *q);
 extern void blk_start_queue(struct request_queue *q);
@@ -1031,6 +1032,8 @@
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
 
 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
+extern int blk_rq_map_sg_no_cluster(struct request_queue *q, struct request *rq,
+				struct scatterlist *sglist);
 extern void blk_dump_rq_flags(struct request *, char *);
 extern long nr_blockdev_pages(void);
 
diff --git a/include/linux/coresight-stm.h b/include/linux/coresight-stm.h
index 4619158..cd53673 100644
--- a/include/linux/coresight-stm.h
+++ b/include/linux/coresight-stm.h
@@ -70,6 +70,7 @@
  * @stmheer:		settings for register STMHEER.
  * @stmheter:		settings for register STMHETER.
  * @stmhebsr:		settings for register STMHEBSR.
+ * @ch_alloc_fail_count:	Number of ch allocation failures over time.
  */
 struct stm_drvdata {
 	void __iomem		*base;
@@ -90,6 +91,7 @@
 	u32			stmheer;
 	u32			stmheter;
 	u32			stmhebsr;
+	u32			ch_alloc_fail_count;
 };
 
 #ifdef CONFIG_CORESIGHT_STM
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index cf86f52..20e26d9 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -650,4 +650,12 @@
 	return (n << SECTOR_SHIFT);
 }
 
+/*-----------------------------------------------------------------
+ * Helper for block layer and dm core operations
+ *-----------------------------------------------------------------
+ */
+void dm_dispatch_request(struct request *rq);
+void dm_kill_unmapped_request(struct request *rq, int error);
+void dm_end_request(struct request *clone, int error);
+
 #endif	/* _LINUX_DEVICE_MAPPER_H */
diff --git a/include/linux/msm-sps.h b/include/linux/msm-sps.h
index 4a9b8a8..662cd9f 100644
--- a/include/linux/msm-sps.h
+++ b/include/linux/msm-sps.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -430,7 +430,7 @@
 
 	u32 options;
 	phys_addr_t phys_addr;
-	void *virt_addr;
+	void __iomem *virt_addr;
 	u32 virt_size;
 	u32 irq;
 	u32 num_pipes;
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 70736e1..33a95d2 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -37,7 +37,7 @@
  */
 
 struct mtd_partition {
-	const char *name;		/* identifier string */
+	char *name;			/* identifier string */
 	uint64_t size;			/* partition size */
 	uint64_t offset;		/* offset within the master MTD space */
 	uint32_t mask_flags;		/* master MTD flags to mask out for this partition */
@@ -97,7 +97,7 @@
 		      deregister_mtd_parser)
 
 int mtd_is_partition(const struct mtd_info *mtd);
-int mtd_add_partition(struct mtd_info *master, const char *name,
+int mtd_add_partition(struct mtd_info *master, char *name,
 		      long long offset, long long length);
 int mtd_del_partition(struct mtd_info *master, int partno);
 uint64_t mtd_get_device_size(const struct mtd_info *mtd);
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 5947107..6c9ddcd 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -565,6 +565,41 @@
 			   unsigned long ab, unsigned long ib);
 
 /**
+ * geni_se_clk_tbl_get() - Get the clock table to program DFS
+ * @rsc:	Resource for which the clock table is requested.
+ * @tbl:	Table in which the output is returned.
+ *
+ * This function is called by the protocol drivers to determine the different
+ * clock frequencies supported by Serail Engine Core Clock. The protocol
+ * drivers use the output to determine the clock frequency index to be
+ * programmed into DFS.
+ *
+ * Return:	number of valid performance levels in the table on success,
+ *		standard Linux error codes on failure.
+ */
+int geni_se_clk_tbl_get(struct se_geni_rsc *rsc, unsigned long **tbl);
+
+/**
+ * geni_se_clk_freq_match() - Get the matching or closest SE clock frequency
+ * @rsc:	Resource for which the clock frequency is requested.
+ * @req_freq:	Requested clock frequency.
+ * @index:	Index of the resultant frequency in the table.
+ * @res_freq:	Resultant frequency which matches or is closer to the
+ *		requested frequency.
+ * @exact:	Flag to indicate exact multiple requirement of the requested
+ *		frequency .
+ *
+ * This function is called by the protocol drivers to determine the matching
+ * or closest frequency of the Serial Engine clock to be selected in order
+ * to meet the performance requirements.
+ *
+ * Return:	0 on success, standard Linux error codes on failure.
+ */
+int geni_se_clk_freq_match(struct se_geni_rsc *rsc, unsigned long req_freq,
+			   unsigned int *index, unsigned long *res_freq,
+			   bool exact);
+
+/**
  * geni_se_tx_dma_prep() - Prepare the Serial Engine for TX DMA transfer
  * @wrapper_dev:	QUPv3 Wrapper Device to which the TX buffer is mapped.
  * @base:		Base address of the SE register block.
@@ -796,6 +831,19 @@
 	return -ENXIO;
 }
 
+static inline int geni_se_clk_tbl_get(struct se_geni_rsc *rsc,
+					unsigned long **tbl)
+{
+	return -ENXIO;
+}
+
+static inline int geni_se_clk_freq_match(struct se_geni_rsc *rsc,
+			unsigned long req_freq, unsigned int *index,
+			unsigned long *res_freq, bool exact)
+{
+	return -ENXIO;
+}
+
 static inline int geni_se_tx_dma_prep(struct device *wrapper_dev,
 	void __iomem *base, void *tx_buf, int tx_len, dma_addr_t *tx_dma)
 {
diff --git a/include/linux/qcrypto.h b/include/linux/qcrypto.h
index 252464a..ff0e64c 100644
--- a/include/linux/qcrypto.h
+++ b/include/linux/qcrypto.h
@@ -15,6 +15,7 @@
 
 #include <linux/crypto.h>
 #include <crypto/hash.h>
+#include <crypto/skcipher.h>
 
 #define QCRYPTO_CTX_KEY_MASK		0x000000ff
 #define QCRYPTO_CTX_USE_HW_KEY		0x00000001
@@ -29,7 +30,7 @@
 int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev);
 /*int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);*/
 
-int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags);
+int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags);
 int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags);
 /*int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);*/
 
@@ -47,16 +48,16 @@
 int qcrypto_get_num_engines(void);
 void qcrypto_get_engine_list(size_t num_engines,
 				struct crypto_engine_entry *arr);
-int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req,
+int qcrypto_cipher_set_device_hw(struct skcipher_request *req,
 				unsigned int fde_pfe,
 				unsigned int hw_inst);
 
 
 struct qcrypto_func_set {
-	int (*cipher_set)(struct ablkcipher_request *req,
+	int (*cipher_set)(struct skcipher_request *req,
 			unsigned int fde_pfe,
 			unsigned int hw_inst);
-	int (*cipher_flag)(struct ablkcipher_request *req, unsigned int flags);
+	int (*cipher_flag)(struct skcipher_request *req, unsigned int flags);
 	int (*get_num_engines)(void);
 	void (*get_engine_list)(size_t num_engines,
 				struct crypto_engine_entry *arr);
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
index f921909..1450caa 100644
--- a/include/linux/sde_rsc.h
+++ b/include/linux/sde_rsc.h
@@ -179,13 +179,14 @@
  * sde_rsc_client_vote() - ab/ib vote from rsc client
  *
  * @client:	 Client pointer provided by sde_rsc_client_create().
+ * @bus_id:	 data bus identifier
  * @ab:		 aggregated bandwidth vote from client.
  * @ib:		 instant bandwidth vote from client.
  *
  * Return: error code.
  */
 int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
-	u64 ab_vote, u64 ib_vote);
+	u32 bus_id, u64 ab_vote, u64 ib_vote);
 
 /**
  * sde_rsc_register_event - register a callback function for an event
@@ -243,7 +244,7 @@
 }
 
 static inline int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
-	u64 ab_vote, u64 ib_vote)
+	u32 bus_id, u64 ab_vote, u64 ib_vote)
 {
 	return 0;
 }
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index 8053c8a..bb5a21c 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -49,6 +49,7 @@
 	HAL_BUFFER_INTERNAL_PERSIST = 0x200,
 	HAL_BUFFER_INTERNAL_PERSIST_1 = 0x400,
 	HAL_BUFFER_INTERNAL_CMD_QUEUE = 0x800,
+	HAL_BUFFER_INTERNAL_RECON = 0x1000,
 };
 
 struct dma_mapping_info {
@@ -60,15 +61,17 @@
 };
 
 struct msm_smem {
-	int mem_type;
-	size_t size;
+	u32 refcount;
+	int fd;
+	void *dma_buf;
+	void *handle;
 	void *kvaddr;
-	ion_phys_addr_t device_addr;
+	u32 device_addr;
+	unsigned int offset;
+	unsigned int size;
 	unsigned long flags;
-	void *smem_priv;
 	enum hal_buffer buffer_type;
 	struct dma_mapping_info mapping_info;
-	unsigned int offset;
 };
 
 enum smem_cache_ops {
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 1ea6e0d..bf8f149 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -881,6 +881,11 @@
 	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
 );
 
+DEFINE_EVENT(sched_task_util, sched_task_util_boosted,
+	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
+
 DEFINE_EVENT(sched_task_util, sched_task_util_overutilzed,
 	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
 	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index ab38f9e..eb7e0c6 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -19,7 +19,6 @@
 #define __MSM_DRM_H__
 
 #include "drm.h"
-#include <stddef.h>
 #include "sde_drm.h"
 
 #if defined(__cplusplus)
diff --git a/include/uapi/media/cam_defs.h b/include/uapi/media/cam_defs.h
index a4557d1..5f0f070 100644
--- a/include/uapi/media/cam_defs.h
+++ b/include/uapi/media/cam_defs.h
@@ -7,14 +7,15 @@
 
 
 /* camera op codes */
-#define CAM_COMMON_OPCODE_BASE                  0
-#define CAM_QUERY_CAP                           1
-#define CAM_ACQUIRE_DEV                         2
-#define CAM_START_DEV                           3
-#define CAM_STOP_DEV                            4
-#define CAM_CONFIG_DEV                          5
-#define CAM_RELEASE_DEV                         6
-#define CAM_COMMON_OPCODE_MAX                   7
+#define CAM_COMMON_OPCODE_BASE                  0x100
+#define CAM_QUERY_CAP                           (CAM_COMMON_OPCODE_BASE + 0x1)
+#define CAM_ACQUIRE_DEV                         (CAM_COMMON_OPCODE_BASE + 0x2)
+#define CAM_START_DEV                           (CAM_COMMON_OPCODE_BASE + 0x3)
+#define CAM_STOP_DEV                            (CAM_COMMON_OPCODE_BASE + 0x4)
+#define CAM_CONFIG_DEV                          (CAM_COMMON_OPCODE_BASE + 0x5)
+#define CAM_RELEASE_DEV                         (CAM_COMMON_OPCODE_BASE + 0x6)
+#define CAM_SD_SHUTDOWN                         (CAM_COMMON_OPCODE_BASE + 0x7)
+#define CAM_COMMON_OPCODE_MAX                   (CAM_COMMON_OPCODE_BASE + 0x8)
 
 /* camera handle type */
 #define CAM_HANDLE_USER_POINTER                 1
diff --git a/include/uapi/media/msm_vidc.h b/include/uapi/media/msm_vidc.h
index 7161102..038dd48 100644
--- a/include/uapi/media/msm_vidc.h
+++ b/include/uapi/media/msm_vidc.h
@@ -354,6 +354,15 @@
 	MSM_VIDC_TRANSFER_SRGB = 13,
 	MSM_VIDC_TRANSFER_BT_2020_10 = 14,
 	MSM_VIDC_TRANSFER_BT_2020_12 = 15,
+#define MSM_VIDC_TRANSFER_SMPTE_ST2084 \
+	MSM_VIDC_TRANSFER_SMPTE_ST2084
+	MSM_VIDC_TRANSFER_SMPTE_ST2084 = 16,
+#define MSM_VIDC_TRANSFER_SMPTE_ST428_1 \
+	MSM_VIDC_TRANSFER_SMPTE_ST428_1
+	MSM_VIDC_TRANSFER_SMPTE_ST428_1 = 17,
+#define MSM_VIDC_TRANSFER_HLG \
+	MSM_VIDC_TRANSFER_HLG
+	MSM_VIDC_TRANSFER_HLG = 18,
 };
 
 enum msm_vidc_pixel_depth {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e4b706d..45f404b 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6870,8 +6870,7 @@
 			if (new_util > capacity_orig_of(i))
 				continue;
 
-			cpu_idle_idx = cpu_rq(i)->nr_running ? -1 :
-				       idle_get_state_idx(cpu_rq(i));
+			cpu_idle_idx = idle_get_state_idx(cpu_rq(i));
 
 			if (!need_idle &&
 			    add_capacity_margin(new_util_cum) <
@@ -6999,6 +6998,18 @@
 			return target_cpu;
 		}
 
+		/*
+		 * We always want to migrate the task to the best CPU when
+		 * placement boost is active.
+		 */
+		if (placement_boost) {
+			trace_sched_task_util_boosted(p, task_cpu(p),
+						task_util(p),
+						target_cpu,
+						target_cpu, 0, need_idle);
+			return target_cpu;
+		}
+
 #ifdef CONFIG_SCHED_WALT
 		if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
 			task_util_boosted = 0;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ec90319..65b34b4 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1970,9 +1970,6 @@
 
 				if (sysctl_sched_cstate_aware)
 					cpu_idle_idx =
-					    (cpu == smp_processor_id() ||
-					     cpu_rq(cpu)->nr_running) ?
-					     -1 :
 					     idle_get_state_idx(cpu_rq(cpu));
 
 				if (add_capacity_margin(new_util_cum) <
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d4a0612..566e103 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1491,6 +1491,10 @@
 static inline int idle_get_state_idx(struct rq *rq)
 {
 	WARN_ON(!rcu_read_lock_held());
+
+	if (rq->nr_running || cpu_of(rq) == raw_smp_processor_id())
+		return -1;
+
 	return rq->idle_state_idx;
 }
 #else
diff --git a/net/netfilter/xt_HARDIDLETIMER.c b/net/netfilter/xt_HARDIDLETIMER.c
index eb5b452..fc0b83f 100644
--- a/net/netfilter/xt_HARDIDLETIMER.c
+++ b/net/netfilter/xt_HARDIDLETIMER.c
@@ -180,6 +180,8 @@
 		pr_debug("couldn't add file to sysfs");
 		goto out_free_attr;
 	}
+	/*  notify userspace  */
+	kobject_uevent(hardidletimer_tg_kobj, KOBJ_ADD);
 
 	list_add(&info->timer->entry, &hardidletimer_tg_list);
 
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index f11aa28..04a1b97 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -301,6 +301,8 @@
 		pr_debug("couldn't add file to sysfs");
 		goto out_free_attr;
 	}
+	/* notify userspace */
+	kobject_uevent(idletimer_tg_kobj, KOBJ_ADD);
 
 	list_add(&info->timer->entry, &idletimer_tg_list);