Merge "drm/msm/sde: reset ubwc static config during enable" into msm-4.9
diff --git a/Documentation/devicetree/bindings/arm/msm/cmd-db.txt b/Documentation/devicetree/bindings/arm/msm/cmd-db.txt
index b989d8a..e704d70 100644
--- a/Documentation/devicetree/bindings/arm/msm/cmd-db.txt
+++ b/Documentation/devicetree/bindings/arm/msm/cmd-db.txt
@@ -24,10 +24,12 @@
 	Value type: <prop-encoded-array>
 	Definition: First element is the base address of shared memory
 		Second element is the size of the shared memory region
+		Points to the dictionary address that houses the command DB
+		start address and the size of the command DB region
 
 Example:
 
 qcom,cmd-db@861e0000 {
 	compatible = "qcom,cmd-db";
-	reg = <0x861e0000 0x4000>;
+	reg = <0xc3f000c 0x8>;
 }
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
new file mode 100644
index 0000000..2ed913c
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
@@ -0,0 +1,130 @@
+* Qualcomm Technologies, Inc. MSM Camera SMMU
+
+The MSM camera SMMU device provides SMMU context bank definitions
+for all HW blocks that need to map IOVA to physical memory. These
+definitions consist of various properties that define how the
+IOVA address space is laid out for each HW block in the camera
+subsystem.
+
+=======================
+Required Node Structure
+=======================
+The camera SMMU device must be described in three levels of device nodes. The
+first level describes the overall SMMU device. Within it, second level nodes
+describe individual context banks that map different stream ids. There can
+also be second level nodes describing firmware device nodes. Each HW block
+such as IFE, ICP maps into these second level device nodes. All context bank
+specific properties that define how the IOVA is laid out is contained within
+third level device nodes within the second level device nodes.
+
+During the kernel initialization all the devices are probed recursively and
+a device pointer is created for each context bank keeping track of the IOVA
+mapping information.
+
+Duplicate regions of the same type are not allowed within the same
+context bank. All context banks must contain an IO region at the very least.
+
+==================================
+First Level Node - CAM SMMU device
+==================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,msm-cam-smmu".
+
+===================================================================
+Second Level Node - CAM SMMU context bank device or firmware device
+===================================================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,msm-cam-smmu-cb" or "qcom,msm-cam-smmu-fw-dev".
+
+- memory-region
+  Usage: optional
+  Value type: <phandle>
+  Definition: Should specify the phandle of the memory region for firmware.
+    allocation
+
+- iommus
+  Usage: required
+  Value type: <phandle>
+  Definition: Should specify the phandle of the iommu sid.
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should specify a string label to identify the context bank.
+
+=============================================
+Third Level Node - CAM SMMU memory map device
+=============================================
+- iova-region-name
+  Usage: required
+  Value type: <string>
+  Definition: Should specify a string label to identify the IOVA region.
+
+- iova-region-start
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify start IOVA for region.
+
+- iova-region-len
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify length for IOVA region.
+
+- iova-region-id
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the numerical identifier for IOVA region.
+    Allowed values are: 0x00 to 0x03
+      - Firmware region: 0x00
+      - Shared region: 0x01
+      - Scratch region: 0x02
+      - IO region: 0x03
+
+Example:
+	qcom,cam_smmu@0 {
+		compatible = "qcom,msm-cam-smmu";
+
+		msm_cam_smmu_icp {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x1078>,
+				<&apps_smmu 0x1020>,
+				<&apps_smmu 0x1028>,
+				<&apps_smmu 0x1040>,
+				<&apps_smmu 0x1048>,
+				<&apps_smmu 0x1030>,
+				<&apps_smmu 0x1050>;
+			label = "icp";
+			icp_iova_mem_map: iova-mem-map {
+				iova-mem-region-firmware {
+					/* Firmware region is 5MB */
+				        iova-region-name = "firmware";
+				        iova-region-start = <0x0>;
+				        iova-region-len = <0x500000>;
+					iova-region-id = <0x0>;
+					status = "ok";
+				};
+
+			        iova-mem-region-shared {
+					/* Shared region is 100MB long */
+				        iova-region-name = "shared";
+				        iova-region-start = <0x7400000>;
+				        iova-region-len = <0x6400000>;
+					iova-region-id = <0x1>;
+				        status = "ok";
+				};
+
+			        iova-mem-region-io {
+				        /* IO region is approximately 3.5 GB */
+				        iova-region-name = "io";
+					iova-region-start = <0xd800000>;
+				        iova-region-len = <0xd2800000>;
+				        iova-region-id = <0x3>;
+				        status = "ok";
+				};
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index ea828da..bc844de 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -16,6 +16,10 @@
 		      If "halt_base" is in same 4K pages this register then
 		      this will be defined else "halt_q6", "halt_modem",
 		      "halt_nc" is required.
+		      "pdc_sync" is the power domain register introduced in
+		      sdm845 for power domain of subsystems.
+		      If alternative reset is required, "alt_reset" maps to
+		      mss_alt_ares.
 - interrupts:         The modem watchdog interrupt
 - vdd_cx-supply:      Reference to the regulator that supplies the vdd_cx domain.
 - vdd_cx-voltage:     Voltage corner/level(max) for cx rail.
diff --git a/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt b/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
index bb20644..fc8ec87 100644
--- a/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
+++ b/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
@@ -12,10 +12,17 @@
 - interrupts:      PMIC temperature alarm interrupt
 - label:           A string used as a descriptive name for this thermal device.
 		    This name should be 19 characters or less.
+- #thermal-sensor-cells: Must be 0. Please refer to
+			 <devicetree/bindings/thermal/thermal.txt> for more
+			 details.
 
 Required structure:
 - A qcom,qpnp-temp-alarm node must be a child of an SPMI node that has specified
    the spmi-slave-container property
+- A top level device tree node named "thermal-zones" must exist. It must
+   contain a subnode with a property named "thermal-sensors" which is assigned
+   a phandle to the qpnp-temp-alarm device node. See
+   <devicetree/bindings/thermal/thermal.txt> for more details.
 
 Optional properties:
 - qcom,channel-num:    VADC channel number associated PMIC DIE_TEMP thermistor.
@@ -38,11 +45,6 @@
 			 1 = 50 Hz
 			 2 = 25 Hz
 			 3 = 12.5 Hz
-- qcom,allow-override: Boolean which controls the ability of software to
-			override shutdowns.  If present, then software is
-			allowed to override automatic PMIC hardware stage 2 and
-			stage 3 over temperature shutdowns.  Otherwise, software
-			is not allowed to override automatic shutdown.
 - qcom,default-temp:   Specifies the default temperature in millicelcius to use
 			if no ADC channel is present to read the real time
 			temperature.
@@ -64,7 +66,7 @@
 		#address-cells = <1>;
 		#size-cells = <1>;
 
-		qcom,temp-alarm@2400 {
+		pm8941_tz: qcom,temp-alarm@2400 {
 			compatible = "qcom,qpnp-temp-alarm";
 			reg = <0x2400 0x100>;
 			interrupts = <0x0 0x24 0x0>;
@@ -72,6 +74,36 @@
 			qcom,channel-num = <8>;
 			qcom,threshold-set = <0>;
 			qcom,temp_alarm-vadc = <&pm8941_vadc>;
+			#thermal-sensor-cells = <0>;
+		};
+	};
+};
+
+Below is an example thermal zone definition for the temperature alarm
+peripheral.
+thermal-zones {
+	pm8941_tz {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&pm8941_tz>;
+
+		trips {
+			pm8941-trip0 {
+				temperature = <105000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8941-trip1 {
+				temperature = <125000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8941-trip2 {
+				temperature = <145000>;
+				hysteresis = <0>;
+				type = "critical";
+			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
index 967e71f..78a26c2 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
@@ -26,4 +26,5 @@
 &blsp1_uart2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart2_console_active>;
+	status = "ok";
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 0afa5a8..0078617 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -153,5 +153,8 @@
 		reg = <0x831000 0x200>;
 		interrupts = <0 26 0>;
 		status = "disabled";
+		clocks = <&clock_gcc GCC_BLSP1_UART2_APPS_CLK>,
+			<&clock_gcc GCC_BLSP1_AHB_CLK>;
+		clock-names = "core", "iface";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index c32324f..ff2cc3e 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -11,7 +11,8 @@
 	sdm845-v2-cdp.dtb \
 	sdm845-qrd.dtb \
 	sdm845-4k-panel-mtp.dtb \
-	sdm845-4k-panel-cdp.dtb
+	sdm845-4k-panel-cdp.dtb \
+	sdm845-4k-panel-qrd.dtb
 
 dtb-$(CONFIG_ARCH_SDM830) += sdm830-sim.dtb \
 	sdm830-rumi.dtb \
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index f1501fa..16d2474 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -61,7 +61,7 @@
 		reg = <0x15000000 0x80000>,
 			<0x150c2000 0x20>;
 		reg-names = "base", "tcu-base";
-		#iommu-cells = <1>;
+		#iommu-cells = <2>;
 		qcom,skip-init;
 		#global-interrupts = <1>;
 		#size-cells = <1>;
@@ -339,6 +339,6 @@
 		 * This SID belongs to PCIE. We can't use a fake SID for
 		 * the apps_smmu device.
 		 */
-		iommus = <&apps_smmu 0x1c03>;
+		iommus = <&apps_smmu 0x1c03 0>;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
index 4036ce5..655f447 100644
--- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
@@ -362,7 +362,7 @@
 		compatible = "qcom,msm-audio-ion";
 		qcom,smmu-version = <2>;
 		qcom,smmu-enabled;
-		iommus = <&apps_smmu 0x1821>;
+		iommus = <&apps_smmu 0x1821 0x0>;
 	};
 
 	qcom,msm-adsp-loader {
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index 15db8da..b119305 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -62,11 +62,12 @@
 			};
 		};
 
-		qcom,temp-alarm@2400 {
+		pm8998_tz: qcom,temp-alarm@2400 {
 			compatible = "qcom,qpnp-temp-alarm";
 			reg = <0x2400 0x100>;
 			interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
 			label = "pm8998_tz";
+			#thermal-sensor-cells = <0>;
 		};
 
 		pm8998_gpios: pinctrl@c000 {
@@ -197,3 +198,30 @@
 		#size-cells = <0>;
 	};
 };
+
+&thermal_zones {
+	pm8998_temp_alarm: pm8998_tz {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&pm8998_tz>;
+
+		trips {
+			pm8998_trip0: pm8998-trip0 {
+				temperature = <105000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8998_trip1: pm8998-trip1 {
+				temperature = <125000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8998_trip2: pm8998-trip2 {
+				temperature = <145000>;
+				hysteresis = <0>;
+				type = "critical";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 923804f..b53f7ac 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -31,11 +31,12 @@
 			reg = <0x800 0x100>;
 		};
 
-		qcom,temp-alarm@2400 {
+		pmi8998_tz: qcom,temp-alarm@2400 {
 			compatible = "qcom,qpnp-temp-alarm";
 			reg = <0x2400 0x100>;
 			interrupts = <0x2 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
 			label = "pmi8998_tz";
+			#thermal-sensor-cells = <0>;
 		};
 
 		pmi8998_gpios: pinctrl@c000 {
@@ -796,4 +797,28 @@
 			};
 		};
 	};
+
+	pmi8998_tz {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pmi8998_tz>;
+
+		trips {
+			pmi8998_trip0: pmi8998-trip0 {
+				temperature = <105000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			 pmi8998_trip1: pmi8998-trip1 {
+				temperature = <125000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			 pmi8998_trip2: pmi8998-trip2 {
+				temperature = <145000>;
+				hysteresis = <0>;
+				type = "critical";
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts
new file mode 100644
index 0000000..6171c7b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+#include "sdm845-qrd.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. sdm845 4K Display Panel QRD";
+	compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
+	qcom,board-id = <11 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index 9d59a16..69dfe46 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -17,7 +17,7 @@
 #include <dt-bindings/clock/qcom,audio-ext-clk.h>
 
 &msm_audio_ion {
-	iommus = <&apps_smmu 0x1821>;
+	iommus = <&apps_smmu 0x1821 0x0>;
 	qcom,smmu-sid-mask = /bits/ 64 <0xf>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index 434de76..4d0c9b2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -380,6 +380,32 @@
 
 	};
 
+	hwevent: hwevent@0x014066f0 {
+		compatible = "qcom,coresight-hwevent";
+		reg = <0x14066f0 0x4>,
+		      <0x14166f0 0x4>,
+		      <0x1406038 0x4>,
+		      <0x1416038 0x4>;
+		reg-names = "ddr-ch0-cfg", "ddr-ch23-cfg", "ddr-ch0-ctrl",
+			    "ddr-ch23-ctrl";
+
+		coresight-name = "coresight-hwevent";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
+	csr: csr@6001000 {
+		compatible = "qcom,coresight-csr";
+		reg = <0x6001000 0x1000>;
+		reg-names = "csr-base";
+
+		coresight-name = "coresight-csr";
+
+		qcom,blk-size = <1>;
+	};
+
 	funnel_in0: funnel@0x6041000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b908>;
@@ -1279,6 +1305,30 @@
 		};
 	};
 
+	cti_ddr0: cti@69e1000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b969>;
+		reg = <0x69e1000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ddr0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_ddr1: cti@69e4000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b969>;
+		reg = <0x69e4000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ddr1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
 	cti0: cti@6010000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index bfbaabb..5e21756 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -248,13 +248,13 @@
 
 
 		clocks = <&clock_gpucc GPU_CC_CX_GMU_CLK>,
-				<&clock_gcc GCC_GPU_CFG_AHB_CLK>,
 				<&clock_gpucc GPU_CC_CXO_CLK>,
 				<&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
-				<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>;
+				<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+				<&clock_gpucc GPU_CC_AHB_CLK>;
 
-		clock-names = "gmu_clk", "ahb_clk", "cxo_clk",
-				"axi_clk", "memnoc_clk";
+		clock-names = "gmu_clk", "cxo_clk", "axi_clk",
+				"memnoc_clk", "ahb_clk";
 
 		qcom,gmu-pwrlevels {
 			#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 6989f326..5c39e86 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -74,7 +74,7 @@
 		qcom,cpr-step-quot-init-min = <11>;
 		qcom,cpr-step-quot-init-max = <12>;
 		qcom,cpr-count-mode = <0>;		/* All at once */
-		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-count-repeat = <20>;
 		qcom,cpr-down-error-step-limit = <1>;
 		qcom,cpr-up-error-step-limit = <1>;
 		qcom,cpr-corner-switch-delay-time = <1042>;
@@ -103,7 +103,7 @@
 		thread@1 {
 			qcom,cpr-thread-id = <1>;
 			qcom,cpr-consecutive-up = <0>;
-			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-consecutive-down = <0>;
 			qcom,cpr-up-threshold = <2>;
 			qcom,cpr-down-threshold = <2>;
 
@@ -180,7 +180,7 @@
 		thread@0 {
 			qcom,cpr-thread-id = <0>;
 			qcom,cpr-consecutive-up = <0>;
-			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-consecutive-down = <0>;
 			qcom,cpr-up-threshold = <2>;
 			qcom,cpr-down-threshold = <2>;
 
@@ -264,7 +264,7 @@
 		qcom,cpr-step-quot-init-min = <9>;
 		qcom,cpr-step-quot-init-max = <14>;
 		qcom,cpr-count-mode = <0>;		/* All at once */
-		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-count-repeat = <20>;
 		qcom,cpr-down-error-step-limit = <1>;
 		qcom,cpr-up-error-step-limit = <1>;
 		qcom,cpr-corner-switch-delay-time = <1042>;
@@ -296,7 +296,7 @@
 		thread@0 {
 			qcom,cpr-thread-id = <0>;
 			qcom,cpr-consecutive-up = <0>;
-			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-consecutive-down = <0>;
 			qcom,cpr-up-threshold = <2>;
 			qcom,cpr-down-threshold = <2>;
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index e56073c..7f24c8b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -107,8 +107,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -130,8 +130,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -153,8 +153,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -176,8 +176,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -222,8 +222,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -245,8 +245,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -268,8 +268,8 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
-		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 76e4b6b..a15c845 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -37,8 +37,8 @@
 		interrupts = <0 83 0>;
 		interrupt-controller;
 		#interrupt-cells = <1>;
-		iommus = <&apps_smmu 0x880>, <&apps_smmu 0x888>,
-			<&apps_smmu 0xc80>, <&apps_smmu 0xc88>;
+		iommus = <&apps_smmu 0x880 0x0>, <&apps_smmu 0x888 0x0>,
+			<&apps_smmu 0xc80 0x0>, <&apps_smmu 0xc88 0x0>;
 
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -258,14 +258,14 @@
 
 		smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
 			compatible = "qcom,smmu_sde_rot_unsec";
-			iommus = <&apps_smmu 0x1090>;
+			iommus = <&apps_smmu 0x1090 0x0>;
 			gdsc-mdss-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>;
 		};
 
 		smmu_rot_sec: qcom,smmu_rot_sec_cb {
 			status = "disabled";
 			compatible = "qcom,smmu_sde_rot_sec";
-			iommus = <&apps_smmu 0x1091>;
+			iommus = <&apps_smmu 0x1091 0x0>;
 			gdsc-mdss-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>;
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index c80343a..aac63ee 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -303,6 +303,13 @@
 		qcom,reset-ep-after-lpm-resume;
 	};
 
+	usb_audio_qmi_dev {
+		compatible = "qcom,usb-audio-qmi-dev";
+		iommus = <&apps_smmu 0x182c>;
+		qcom,usb-audio-stream-id = <0xc>;
+		qcom,usb-audio-intr-num = <2>;
+	};
+
 	usb_nop_phy: usb_nop_phy {
 		compatible = "usb-nop-xceiv";
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index 4fdf383..efd8c32 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -16,3 +16,7 @@
 	model = "Qualcomm Technologies, Inc. SDM845 V2";
 	qcom,msm-id = <321 0x20000>;
 };
+
+&spmi_debug_bus {
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index b9dc816..6f4b4ca 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -97,9 +97,9 @@
 			compatible = "qcom,msm-vidc,context-bank";
 			label = "venus_ns";
 			iommus =
-				<&apps_smmu 0x10a0>,
-				<&apps_smmu 0x10a8>,
-				<&apps_smmu 0x10b0>;
+				<&apps_smmu 0x10a0 0x0>,
+				<&apps_smmu 0x10a8 0x0>,
+				<&apps_smmu 0x10b0 0x0>;
 			buffer-types = <0xfff>;
 			virtual-addr-pool = <0x70800000 0x6f800000>;
 		};
@@ -108,10 +108,10 @@
 			compatible = "qcom,msm-vidc,context-bank";
 			label = "venus_sec_bitstream";
 			iommus =
-				<&apps_smmu 0x10a1>,
-				<&apps_smmu 0x10a9>,
-				<&apps_smmu 0x10a5>,
-				<&apps_smmu 0x10ad>;
+				<&apps_smmu 0x10a1 0x0>,
+				<&apps_smmu 0x10a9 0x0>,
+				<&apps_smmu 0x10a5 0x0>,
+				<&apps_smmu 0x10ad 0x0>;
 			buffer-types = <0x241>;
 			virtual-addr-pool = <0x4b000000 0x25800000>;
 			qcom,secure-context-bank;
@@ -121,8 +121,8 @@
 			compatible = "qcom,msm-vidc,context-bank";
 			label = "venus_sec_pixel";
 			iommus =
-				<&apps_smmu 0x10a3>,
-				<&apps_smmu 0x10ab>;
+				<&apps_smmu 0x10a3 0x0>,
+				<&apps_smmu 0x10ab 0x0>;
 			buffer-types = <0x106>;
 			virtual-addr-pool = <0x25800000 0x25800000>;
 			qcom,secure-context-bank;
@@ -132,9 +132,9 @@
 			compatible = "qcom,msm-vidc,context-bank";
 			label = "venus_sec_non_pixel";
 			iommus =
-				<&apps_smmu 0x10a4>,
-				<&apps_smmu 0x10ac>,
-				<&apps_smmu 0x10b4>;
+				<&apps_smmu 0x10a4 0x0>,
+				<&apps_smmu 0x10ac 0x0>,
+				<&apps_smmu 0x10b4 0x0>;
 			buffer-types = <0x480>;
 			virtual-addr-pool = <0x1000000 0x24800000>;
 			qcom,secure-context-bank;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 54e0162..87d8a56 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -553,6 +553,7 @@
 		qcom,fuse-disable-bit = <12>;
 		#address-cells = <2>;
 		#size-cells = <0>;
+		status = "disabled";
 
 		qcom,pm8998-debug@0 {
 			compatible = "qcom,spmi-pmic";
@@ -1289,9 +1290,12 @@
 		      <0x1f65000 0x008>,
 		      <0x1f64000 0x008>,
 		      <0x4180000 0x020>,
-		      <0xc2b0000 0x004>;
+		      <0xc2b0000 0x004>,
+		      <0xb2e0100 0x004>,
+		      <0x4180044 0x004>;
 		reg-names = "qdsp6_base", "halt_q6", "halt_modem",
-			    "halt_nc", "rmb_base", "restart_reg";
+			    "halt_nc", "rmb_base", "restart_reg",
+			    "pdc_sync", "alt_reset";
 
 		clocks = <&clock_rpmh RPMH_CXO_CLK>,
 			 <&clock_gcc GCC_MSS_CFG_AHB_CLK>,
@@ -1536,76 +1540,76 @@
 		qcom,msm_fastrpc_compute_cb1 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1401>,
-				 <&apps_smmu 0x1421>;
+			iommus = <&apps_smmu 0x1401 0x0>,
+				 <&apps_smmu 0x1421 0x0>;
 		};
 		qcom,msm_fastrpc_compute_cb2 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1402>,
-				 <&apps_smmu 0x1422>;
+			iommus = <&apps_smmu 0x1402 0x0>,
+				 <&apps_smmu 0x1422 0x0>;
 		};
 		qcom,msm_fastrpc_compute_cb3 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1403>,
-				 <&apps_smmu 0x1423>;
+			iommus = <&apps_smmu 0x1403 0x0>,
+				 <&apps_smmu 0x1423 0x0>;
 		};
 		qcom,msm_fastrpc_compute_cb4 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1404>,
-				 <&apps_smmu 0x1424>;
+			iommus = <&apps_smmu 0x1404 0x0>,
+				 <&apps_smmu 0x1424 0x0>;
 		};
 		qcom,msm_fastrpc_compute_cb5 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1405>,
-				 <&apps_smmu 0x1425>;
+			iommus = <&apps_smmu 0x1405 0x0>,
+				 <&apps_smmu 0x1425 0x0>;
 		};
 		qcom,msm_fastrpc_compute_cb6 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1406>,
-				 <&apps_smmu 0x1426>;
+			iommus = <&apps_smmu 0x1406 0x0>,
+				 <&apps_smmu 0x1426 0x0>;
 		};
 		qcom,msm_fastrpc_compute_cb7 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1407>,
-				 <&apps_smmu 0x1427>;
+			iommus = <&apps_smmu 0x1407 0x0>,
+				 <&apps_smmu 0x1427 0x0>;
 		};
 		qcom,msm_fastrpc_compute_cb8 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
-			iommus = <&apps_smmu 0x1408>,
-				 <&apps_smmu 0x1428>;
+			iommus = <&apps_smmu 0x1408 0x0>,
+				 <&apps_smmu 0x1428 0x0>;
 		};
 		qcom,msm_fastrpc_compute_cb9 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
 			qcom,secure-context-bank;
-			iommus = <&apps_smmu 0x1409>,
-				 <&apps_smmu 0x1419>,
-				 <&apps_smmu 0x1429>;
+			iommus = <&apps_smmu 0x1409 0x0>,
+				 <&apps_smmu 0x1419 0x0>,
+				 <&apps_smmu 0x1429 0x0>;
 		};
 		qcom,msm_fastrpc_compute_cb10 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "cdsprpc-smd";
 			qcom,secure-context-bank;
-			iommus = <&apps_smmu 0x140A>,
-				 <&apps_smmu 0x141A>,
-				 <&apps_smmu 0x142A>;
+			iommus = <&apps_smmu 0x140A 0x0>,
+				 <&apps_smmu 0x141A 0x0>,
+				 <&apps_smmu 0x142A 0x0>;
 		};
 		qcom,msm_fastrpc_compute_cb11 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "adsprpc-smd";
-			iommus = <&apps_smmu 0x1823>;
+			iommus = <&apps_smmu 0x1823 0x0>;
 		};
 		qcom,msm_fastrpc_compute_cb12 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
 			label = "adsprpc-smd";
-			iommus = <&apps_smmu 0x1824>;
+			iommus = <&apps_smmu 0x1824 0x0>;
 		};
 	};
 
@@ -2300,18 +2304,18 @@
 
 		ipa_smmu_ap: ipa_smmu_ap {
 			compatible = "qcom,ipa-smmu-ap-cb";
-			iommus = <&apps_smmu 0x720>;
+			iommus = <&apps_smmu 0x720 0x0>;
 			qcom,iova-mapping = <0x20000000 0x40000000>;
 		};
 
 		ipa_smmu_wlan: ipa_smmu_wlan {
 			compatible = "qcom,ipa-smmu-wlan-cb";
-			iommus = <&apps_smmu 0x721>;
+			iommus = <&apps_smmu 0x721 0x0>;
 		};
 
 		ipa_smmu_uc: ipa_smmu_uc {
 			compatible = "qcom,ipa-smmu-uc-cb";
-			iommus = <&apps_smmu 0x722>;
+			iommus = <&apps_smmu 0x722 0x0>;
 			qcom,iova-mapping = <0x40000000 0x20000000>;
 		};
 	};
@@ -2355,7 +2359,7 @@
 
 	cmd_db: qcom,cmd-db@861e0000 {
 		compatible = "qcom,cmd-db";
-		reg = <0x861e0000 0x4000>;
+		reg = <0xc3f000c 8>;
 	};
 
 	dcc: dcc_v2@10a2000 {
@@ -2378,8 +2382,8 @@
 		      <0xa0000000 0x10000000>,
 		      <0xb0000000 0x10000>;
 		reg-names = "membase", "smmu_iova_base", "smmu_iova_ipa";
-		iommus = <&apps_smmu 0x0040>,
-			 <&apps_smmu 0x0041>;
+		iommus = <&apps_smmu 0x0040 0x0>,
+			 <&apps_smmu 0x0041 0x0>;
 		interrupts = <0 414 0 /* CE0 */ >,
 			     <0 415 0 /* CE1 */ >,
 			     <0 416 0 /* CE2 */ >,
@@ -3555,3 +3559,44 @@
 #include "sdm845-audio.dtsi"
 #include "sdm845-gpu.dtsi"
 #include "sdm845-usb.dtsi"
+
+&pm8998_temp_alarm {
+	cooling-maps {
+		trip0_cpu0 {
+			trip = <&pm8998_trip0>;
+			cooling-device = <&CPU0 21 21>;
+		};
+		trip0_cpu4 {
+			trip = <&pm8998_trip0>;
+			cooling-device = <&CPU4 21 21>;
+		};
+		trip1_cpu1 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU1 22 22>;
+		};
+		trip1_cpu2 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU2 22 22>;
+		};
+		trip1_cpu3 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU3 22 22>;
+		};
+		trip1_cpu4 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU4 22 22>;
+		};
+		trip1_cpu5 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU5 22 22>;
+		};
+		trip1_cpu6 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU6 22 22>;
+		};
+		trip1_cpu7 {
+			trip = <&pm8998_trip1>;
+			cooling-device = <&CPU7 22 22>;
+		};
+	};
+};
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index fa6bae8..c5cfa18 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -473,6 +473,7 @@
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_ICNSS=y
 CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_ADSP_LOADER=y
 CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index a71aa64..100ca68 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -493,6 +493,7 @@
 CONFIG_ICNSS=y
 CONFIG_ICNSS_DEBUG=y
 CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_ADSP_LOADER=y
 CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
@@ -596,6 +597,7 @@
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index a95e1e5..f18ae62 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -229,20 +229,22 @@
 static int fw_lookup_and_allocate_buf(const char *fw_name,
 				      struct firmware_cache *fwc,
 				      struct firmware_buf **buf, void *dbuf,
-				      size_t size)
+				      size_t size, unsigned int opt_flags)
 {
 	struct firmware_buf *tmp;
 
 	spin_lock(&fwc->lock);
-	tmp = __fw_lookup_buf(fw_name);
-	if (tmp) {
-		kref_get(&tmp->ref);
-		spin_unlock(&fwc->lock);
-		*buf = tmp;
-		return 1;
+	if (!(opt_flags & FW_OPT_NOCACHE)) {
+		tmp = __fw_lookup_buf(fw_name);
+		if (tmp) {
+			kref_get(&tmp->ref);
+			spin_unlock(&fwc->lock);
+			*buf = tmp;
+			return 1;
+		}
 	}
 	tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
-	if (tmp)
+	if (tmp && !(opt_flags & FW_OPT_NOCACHE))
 		list_add(&tmp->list, &fwc->head);
 	spin_unlock(&fwc->lock);
 
@@ -1051,7 +1053,8 @@
  */
 static int
 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
-			  struct device *device, void *dbuf, size_t size)
+			  struct device *device, void *dbuf, size_t size,
+			  unsigned int opt_flags)
 {
 	struct firmware *firmware;
 	struct firmware_buf *buf;
@@ -1069,7 +1072,8 @@
 		return 0; /* assigned */
 	}
 
-	ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
+	ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size,
+					opt_flags);
 
 	/*
 	 * bind with 'buf' now to avoid warning in failure path
@@ -1147,7 +1151,8 @@
 		goto out;
 	}
 
-	ret = _request_firmware_prepare(&fw, name, device, buf, size);
+	ret = _request_firmware_prepare(&fw, name, device, buf, size,
+					opt_flags);
 	if (ret <= 0) /* error or already assigned */
 		goto out;
 
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index b248b1b..1b545d6 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1709,8 +1709,14 @@
 		}
 	}
 
+	/*
+	 * The Fabia PLLs only have 16 bits to program the fractional divider.
+	 * Hence the programmed rate might be slightly different than the
+	 * requested one.
+	 */
 	if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
-	    best_parent_rate != parent->rate)
+		(DIV_ROUND_CLOSEST(best_parent_rate, 1000) !=
+			DIV_ROUND_CLOSEST(parent->rate, 1000)))
 		top = clk_calc_new_rates(parent, best_parent_rate);
 
 out:
@@ -2345,6 +2351,56 @@
 	NULL,
 };
 
+static void clk_state_subtree(struct clk_core *c)
+{
+	int vdd_level = 0;
+	struct clk_core *child;
+
+	if (!c)
+		return;
+
+	if (c->vdd_class) {
+		vdd_level = clk_find_vdd_level(c, c->rate);
+		if (vdd_level < 0)
+			vdd_level = 0;
+	}
+
+	trace_clk_state(c->name, c->prepare_count, c->enable_count,
+						c->rate, vdd_level);
+
+	hlist_for_each_entry(child, &c->children, child_node)
+		clk_state_subtree(child);
+}
+
+static int clk_state_show(struct seq_file *s, void *data)
+{
+	struct clk_core *c;
+	struct hlist_head **lists = (struct hlist_head **)s->private;
+
+	clk_prepare_lock();
+
+	for (; *lists; lists++)
+		hlist_for_each_entry(c, *lists, child_node)
+			clk_state_subtree(c);
+
+	clk_prepare_unlock();
+
+	return 0;
+}
+
+
+static int clk_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, clk_state_show, inode->i_private);
+}
+
+static const struct file_operations clk_state_fops = {
+	.open		= clk_state_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
 				 int level)
 {
@@ -2980,6 +3036,11 @@
 	if (!d)
 		return -ENOMEM;
 
+	d = debugfs_create_file("trace_clocks", 0444, rootdir, &all_lists,
+				&clk_state_fops);
+	if (!d)
+		return -ENOMEM;
+
 	mutex_lock(&clk_debug_lock);
 	hlist_for_each_entry(core, &clk_debug_list, debug_node)
 		clk_debug_create_one(core, rootdir);
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 6ff621d..d15d1bb 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -307,6 +307,15 @@
 	u64 quotient;
 	int alpha_bw = ALPHA_BITWIDTH;
 
+	/*
+	 * The PLLs parent rate is zero probably since the parent hasn't
+	 * registered yet. Return early with the requested rate.
+	 */
+	if (!prate) {
+		pr_debug("PLLs parent rate hasn't been initialized.\n");
+		return rate;
+	}
+
 	quotient = rate;
 	remainder = do_div(quotient, prate);
 	*l = quotient;
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 3daefbc..2cb9d05 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -23,9 +23,10 @@
 
 #define VCO_DELAY_USEC 1
 
-#define MHZ_375		375000000UL
-#define MHZ_750		750000000UL
-#define MHZ_1500	1500000000UL
+#define MHZ_250		250000000UL
+#define MHZ_500		500000000UL
+#define MHZ_1000	1000000000UL
+#define MHZ_1100	1100000000UL
 #define MHZ_1900	1900000000UL
 #define MHZ_3000	3000000000UL
 
@@ -99,6 +100,7 @@
 	u32 frac_div_start_low;
 	u32 frac_div_start_mid;
 	u32 frac_div_start_high;
+	u32 pll_clock_inverters;
 	u32 ssc_stepsize_low;
 	u32 ssc_stepsize_high;
 	u32 ssc_div_per_low;
@@ -209,20 +211,36 @@
 	u64 dec, dec_multiple;
 	u32 frac;
 	u64 multiplier;
+	u32 i;
 
 	target_freq = rsc->vco_current_rate;
 	pr_debug("target_freq = %llu\n", target_freq);
 
 	if (config->div_override) {
 		computed_output_div = config->output_div;
+
+		/*
+		 * Computed_output_div = 2 ^ div_log
+		 * To get div_log from output div just get the index of the
+		 * 1 bit in the value.
+		 * div_log ranges from 0-3. so check the 4 lsbs
+		 */
+
+		for (i = 0; i < 4; i++) {
+			if (computed_output_div & (1 << i)) {
+				div_log = i;
+				break;
+			}
+		}
+
 	} else {
-		if (target_freq < MHZ_375) {
+		if (target_freq < MHZ_250) {
 			computed_output_div = 8;
 			div_log = 3;
-		} else if (target_freq < MHZ_750) {
+		} else if (target_freq < MHZ_500) {
 			computed_output_div = 4;
 			div_log = 2;
-		} else if (target_freq < MHZ_1500) {
+		} else if (target_freq < MHZ_1000) {
 			computed_output_div = 2;
 			div_log = 1;
 		} else {
@@ -251,6 +269,10 @@
 		regs->pll_prop_gain_rate = 10;
 	else
 		regs->pll_prop_gain_rate = 12;
+	if (pll_freq < MHZ_1100)
+		regs->pll_clock_inverters = 8;
+	else
+		regs->pll_clock_inverters = 0;
 
 	regs->pll_outdiv_rate = div_log;
 	regs->pll_lockdet_rate = config->lock_timer;
@@ -375,7 +397,7 @@
 	MDSS_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, reg->pll_outdiv_rate);
 	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
 	MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x10);
-	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
 
 }
 
@@ -1085,7 +1107,7 @@
 
 static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
-	.min_rate = 1500000000UL,
+	.min_rate = 1000000000UL,
 	.max_rate = 3500000000UL,
 	.hw.init = &(struct clk_init_data){
 			.name = "dsi0pll_vco_clk",
@@ -1098,7 +1120,7 @@
 
 static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
-	.min_rate = 1500000000UL,
+	.min_rate = 1000000000UL,
 	.max_rate = 3500000000UL,
 	.hw.init = &(struct clk_init_data){
 			.name = "dsi1pll_vco_clk",
@@ -1219,7 +1241,7 @@
 
 static struct clk_regmap_mux dsi0pll_byteclk_mux = {
 	.shift = 0,
-	.width = 0,
+	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0_phy_pll_out_byteclk",
@@ -1233,7 +1255,7 @@
 
 static struct clk_regmap_mux dsi1pll_byteclk_mux = {
 	.shift = 0,
-	.width = 0,
+	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1_phy_pll_out_byteclk",
@@ -1247,7 +1269,7 @@
 
 static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
 	.shift = 0,
-	.width = 0,
+	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0pll_pclk_src_mux",
@@ -1262,7 +1284,7 @@
 
 static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
 	.shift = 0,
-	.width = 0,
+	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1pll_pclk_src_mux",
@@ -1307,7 +1329,7 @@
 
 static struct clk_regmap_mux dsi0pll_pclk_mux = {
 	.shift = 0,
-	.width = 0,
+	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0_phy_pll_out_dsiclk",
@@ -1321,7 +1343,7 @@
 
 static struct clk_regmap_mux dsi1pll_pclk_mux = {
 	.shift = 0,
-	.width = 0,
+	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1_phy_pll_out_dsiclk",
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 550a59c..5db1897 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -441,15 +441,14 @@
 {
 	u32 cntkctl = arch_timer_get_cntkctl();
 
-	/* Disable user access to the timers and the physical counter */
+	/* Disable user access to the timers */
 	/* Also disable virtual event stream */
 	cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
 			| ARCH_TIMER_USR_VT_ACCESS_EN
-			| ARCH_TIMER_VIRT_EVT_EN
-			| ARCH_TIMER_USR_PCT_ACCESS_EN);
+			| ARCH_TIMER_VIRT_EVT_EN);
 
-	/* Enable user access to the virtual counter */
-	cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+	/* Enable user access to the virtual and physical counters */
+	cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN | ARCH_TIMER_USR_PCT_ACCESS_EN;
 
 	arch_timer_set_cntkctl(cntkctl);
 }
diff --git a/drivers/devfreq/governor_bw_hwmon.c b/drivers/devfreq/governor_bw_hwmon.c
index d7cc425..53c0f8a 100644
--- a/drivers/devfreq/governor_bw_hwmon.c
+++ b/drivers/devfreq/governor_bw_hwmon.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -143,25 +143,27 @@
 {									\
 	struct devfreq *df = to_devfreq(dev);				\
 	struct hwmon_node *hw = df->data;				\
-	int ret;							\
+	int ret, numvals;						\
 	unsigned int i = 0, val;					\
+	char **strlist;							\
 									\
-	do {								\
-		ret = kstrtoint(buf, 10, &val);				\
+	strlist = argv_split(GFP_KERNEL, buf, &numvals);		\
+	if (!strlist)							\
+		return -ENOMEM;						\
+	numvals = min(numvals, n - 1);					\
+	for (i = 0; i < numvals; i++) {					\
+		ret = kstrtouint(strlist[i], 10, &val);			\
 		if (ret)						\
-			break;						\
-		buf = strnchr(buf, PAGE_SIZE, ' ');			\
-		if (buf)						\
-			buf++;						\
+			goto out;					\
 		val = max(val, _min);					\
 		val = min(val, _max);					\
 		hw->name[i] = val;					\
-		i++;							\
-	} while (buf && i < n - 1);					\
-	if (i < 1)							\
-		return -EINVAL;						\
+	}								\
+	ret = count;							\
+out:									\
+	argv_free(strlist);						\
 	hw->name[i] = 0;						\
-	return count;							\
+	return ret;							\
 }
 
 #define gov_list_attr(__attr, n, min, max)	\
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index d5289c0..6a91a65 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -109,6 +109,15 @@
 #define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
 
 /**
+ * Ctl start interrupt status bit definitions
+ */
+#define SDE_INTR_CTL_0_START BIT(9)
+#define SDE_INTR_CTL_1_START BIT(10)
+#define SDE_INTR_CTL_2_START BIT(11)
+#define SDE_INTR_CTL_3_START BIT(12)
+#define SDE_INTR_CTL_4_START BIT(13)
+
+/**
  * Concurrent WB overflow interrupt status bit definitions
  */
 #define SDE_INTR_CWB_2_OVERFLOW BIT(14)
@@ -325,15 +334,21 @@
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	/* irq_idx: 40-43 */
+	/* irq_idx: 40 */
 	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
 		SDE_INTR_PING_PONG_S0_RD_PTR, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	/* irq_idx: 44-47 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 41-45 */
+	{ SDE_IRQ_TYPE_CTL_START, CTL_0,
+		SDE_INTR_CTL_0_START, 1},
+	{ SDE_IRQ_TYPE_CTL_START, CTL_1,
+		SDE_INTR_CTL_1_START, 1},
+	{ SDE_IRQ_TYPE_CTL_START, CTL_2,
+		SDE_INTR_CTL_2_START, 1},
+	{ SDE_IRQ_TYPE_CTL_START, CTL_3,
+		SDE_INTR_CTL_3_START, 1},
+	{ SDE_IRQ_TYPE_CTL_START, CTL_4,
+		SDE_INTR_CTL_4_START, 1},
+	/* irq_idx: 46-47 */
 	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, 1},
 	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, 1},
 	/* irq_idx: 48-51 */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
index 7805df1..7991994 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
@@ -55,6 +55,7 @@
  * @SDE_IRQ_TYPE_SFI_CMD_2_OUT:		DSI CMD2 static frame INTR out-of static
  * @SDE_IRQ_TYPE_PROG_LINE:		Programmable Line interrupt
  * @SDE_IRQ_TYPE_AD4_BL_DONE:		AD4 backlight
+ * @SDE_IRQ_TYPE_CTL_START:		Control start
  * @SDE_IRQ_TYPE_RESERVED:		Reserved for expansion
  */
 enum sde_intr_type {
@@ -84,6 +85,7 @@
 	SDE_IRQ_TYPE_SFI_CMD_2_OUT,
 	SDE_IRQ_TYPE_PROG_LINE,
 	SDE_IRQ_TYPE_AD4_BL_DONE,
+	SDE_IRQ_TYPE_CTL_START,
 	SDE_IRQ_TYPE_RESERVED,
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index 01fe3c8..d15b804 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -574,9 +574,16 @@
 
 	rot_cmd.video_mode = data->video_mode;
 	rot_cmd.fps = data->fps;
+
+	/*
+	 * DRM rotation property is specified in counter clockwise direction
+	 * whereas rotator h/w rotates in clockwise direction.
+	 * Convert rotation property to clockwise 90 by toggling h/v flip
+	 */
 	rot_cmd.rot90 = data->rot90;
-	rot_cmd.hflip = data->hflip;
-	rot_cmd.vflip = data->vflip;
+	rot_cmd.hflip = data->rot90 ? !data->hflip : data->hflip;
+	rot_cmd.vflip = data->rot90 ? !data->vflip : data->vflip;
+
 	rot_cmd.secure = data->secure;
 	rot_cmd.clkrate = data->clkrate;
 	rot_cmd.data_bw = 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 8662207..93268be 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1961,21 +1961,29 @@
 		ret = sde_plane_rot_submit_command(plane, state,
 				SDE_HW_ROT_CMD_VALIDATE);
 
-	} else if (sde_plane_enabled(state)) {
+	} else {
 
 		SDE_DEBUG("plane%d.%d bypass rotator\n", plane->base.id,
 				rstate->sequence_id);
 
 		/* bypass rotator - initialize output setting as input */
+		for (i = 0; i < ARRAY_SIZE(rstate->out_fb_modifier); i++)
+			rstate->out_fb_modifier[i] = state->fb ?
+				state->fb->modifier[i] : 0x0;
+
+		if (state->fb) {
+			rstate->out_fb_pixel_format = state->fb->pixel_format;
+			rstate->out_fb_flags = state->fb->flags;
+			rstate->out_fb_width = state->fb->width;
+			rstate->out_fb_height = state->fb->height;
+		} else {
+			rstate->out_fb_pixel_format = 0x0;
+			rstate->out_fb_flags = 0x0;
+			rstate->out_fb_width = 0;
+			rstate->out_fb_height = 0;
+		}
+
 		rstate->out_rotation = rstate->in_rotation;
-		rstate->out_fb_pixel_format = state->fb->pixel_format;
-
-		for (i = 0.; i < ARRAY_SIZE(rstate->out_fb_modifier); i++)
-			rstate->out_fb_modifier[i] = state->fb->modifier[i];
-
-		rstate->out_fb_flags = state->fb->flags;
-		rstate->out_fb_width = state->fb->width;
-		rstate->out_fb_height = state->fb->height;
 		rstate->out_src_x = state->src_x;
 		rstate->out_src_y = state->src_y;
 		rstate->out_src_w = state->src_w;
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 3618479..1e4f6b1 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -841,7 +841,15 @@
 			goto data_bus_hdl_err;
 		}
 
-		if (!phandle->rsc_client_init) {
+		/*
+		 * - When the target is RSCC enabled, regulator should
+		 *   be enabled by the s/w only for the first time during
+		 *   bootup. After that, RSCC hardware takes care of enabling/
+		 *   disabling it.
+		 * - When the target is not RSCC enabled, regulator should
+		 *   be totally handled by the software.
+		 */
+		if (!phandle->rsc_client) {
 			rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
 									enable);
 			if (rc) {
@@ -883,7 +891,7 @@
 		sde_power_reg_bus_update(phandle->reg_bus_hdl,
 							max_usecase_ndx);
 
-		if (!phandle->rsc_client_init)
+		if (!phandle->rsc_client)
 			msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg,
 									enable);
 		sde_power_data_bus_update(&phandle->data_bus_handle, enable);
@@ -901,7 +909,7 @@
 rsc_err:
 	sde_power_reg_bus_update(phandle->reg_bus_hdl, prev_usecase_ndx);
 reg_bus_hdl_err:
-	if (!phandle->rsc_client_init)
+	if (!phandle->rsc_client)
 		msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
 vreg_err:
 	sde_power_data_bus_update(&phandle->data_bus_handle, 0);
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index ed5b714..1cb0259 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1045,6 +1045,13 @@
 	 */
 	if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
 		set_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy);
+	/*
+	 *  Set the fault tolerance policy to FT_REPLAY - As context wants
+	 *  to invalidate it after a replay attempt fails. This doesn't
+	 *  require to execute the default FT policy.
+	 */
+	else if (drawctxt->base.flags & KGSL_CONTEXT_INVALIDATE_ON_FAULT)
+		set_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy);
 	else
 		cmdobj->fault_policy = adreno_dev->ft_policy;
 }
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index f77d438..ab574d8 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -349,6 +349,7 @@
 		KGSL_CONTEXT_PER_CONTEXT_TS |
 		KGSL_CONTEXT_USER_GENERATED_TS |
 		KGSL_CONTEXT_NO_FAULT_TOLERANCE |
+		KGSL_CONTEXT_INVALIDATE_ON_FAULT |
 		KGSL_CONTEXT_CTX_SWITCH |
 		KGSL_CONTEXT_PRIORITY_MASK |
 		KGSL_CONTEXT_TYPE_MASK |
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index d955aa0..db105c5 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -80,6 +80,7 @@
 	{ KGSL_CONTEXT_PER_CONTEXT_TS, "PER_CONTEXT_TS" }, \
 	{ KGSL_CONTEXT_USER_GENERATED_TS, "USER_TS" }, \
 	{ KGSL_CONTEXT_NO_FAULT_TOLERANCE, "NO_FT" }, \
+	{ KGSL_CONTEXT_INVALIDATE_ON_FAULT, "INVALIDATE_ON_FAULT" }, \
 	{ KGSL_CONTEXT_PWR_CONSTRAINT, "PWR" }, \
 	{ KGSL_CONTEXT_SAVE_GMEM, "SAVE_GMEM" }
 
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 86d4d61..d635519 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1608,6 +1608,8 @@
 			ret = PTR_ERR(mmu->defaultpagetable);
 			mmu->defaultpagetable = NULL;
 			return ret;
+		} else if (mmu->defaultpagetable == NULL) {
+			return -ENOMEM;
 		}
 	}
 
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 94b2e2f9..e233e76 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -20,6 +20,7 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/clk.h>
+#include <dt-bindings/clock/qcom,aop-qmp.h>
 #include <linux/coresight.h>
 #include <linux/of_platform.h>
 #include <linux/delay.h>
@@ -940,6 +941,14 @@
 	atomic_t *refcnts = NULL;
 	struct coresight_device *csdev;
 	struct coresight_connection *conns = NULL;
+	struct clk *pclk;
+
+	pclk = clk_get(desc->dev, "apb_pclk");
+	if (!IS_ERR(pclk)) {
+		ret = clk_set_rate(pclk, QDSS_CLK_LEVEL_DYNAMIC);
+		if (ret)
+			dev_err(desc->dev, "clk set rate failed\n");
+	}
 
 	csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
 	if (!csdev) {
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index c897669..19de267 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -2,3 +2,4 @@
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_utils/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_core/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu/
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
index f8c864f..87707b1 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
@@ -1 +1,3 @@
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_dev.o cam_req_mgr_util.o cam_req_mgr_core.o  cam_req_mgr_workq.o
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_dev.o cam_req_mgr_util.o cam_req_mgr_core.o cam_req_mgr_workq.o cam_mem_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
new file mode 100644
index 0000000..f3ef0e9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -0,0 +1,968 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-MEM-MGR %s:%d " fmt, __func__, __LINE__
+
+#ifdef CONFIG_MEM_MGR_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/msm_ion.h>
+#include <asm/cacheflush.h>
+
+#include "cam_req_mgr_util.h"
+#include "cam_mem_mgr.h"
+#include "cam_smmu_api.h"
+
+static struct cam_mem_table tbl;
+
+static int cam_mem_util_map_cpu_va(struct ion_handle *hdl,
+	uint64_t *vaddr,
+	size_t *len)
+{
+	*vaddr = (uintptr_t)ion_map_kernel(tbl.client, hdl);
+	if (IS_ERR_OR_NULL((void *)*vaddr)) {
+		pr_err("kernel map fail");
+		return -ENOSPC;
+	}
+
+	if (ion_handle_get_size(tbl.client, hdl, len)) {
+		pr_err("kernel get len failed");
+		ion_unmap_kernel(tbl.client, hdl);
+		return -ENOSPC;
+	}
+
+	return 0;
+}
+
+static int cam_mem_util_get_dma_dir(uint32_t flags)
+{
+	int rc = -EINVAL;
+
+	if (flags & CAM_MEM_FLAG_HW_READ_ONLY)
+		rc = DMA_TO_DEVICE;
+	else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY)
+		rc = DMA_FROM_DEVICE;
+	else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
+		rc = DMA_BIDIRECTIONAL;
+
+	return rc;
+}
+
+static int cam_mem_util_client_create(void)
+{
+	int rc = 0;
+
+	tbl.client = msm_ion_client_create("camera_global_pool");
+	if (IS_ERR_OR_NULL(tbl.client)) {
+		pr_err("fail to create client\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static void cam_mem_util_client_destroy(void)
+{
+	ion_client_destroy(tbl.client);
+	tbl.client = NULL;
+}
+
+int cam_mem_mgr_init(void)
+{
+	int rc;
+	int i;
+	int bitmap_size;
+
+	memset(tbl.bufq, 0, sizeof(tbl.bufq));
+
+	rc = cam_mem_util_client_create();
+	if (rc < 0) {
+		pr_err("fail to create ion client\n");
+		goto client_fail;
+	}
+
+	bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
+	tbl.bitmap = kzalloc(sizeof(bitmap_size), GFP_KERNEL);
+	if (!tbl.bitmap) {
+		rc = -ENOMEM;
+		goto bitmap_fail;
+	}
+	tbl.bits = bitmap_size * BITS_PER_BYTE;
+	bitmap_zero(tbl.bitmap, tbl.bits);
+	/* We need to reserve slot 0 because 0 is invalid */
+	set_bit(0, tbl.bitmap);
+
+	for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
+		tbl.bufq[i].fd = -1;
+		tbl.bufq[i].buf_handle = -1;
+	}
+	mutex_init(&tbl.m_lock);
+	return rc;
+
+bitmap_fail:
+	cam_mem_util_client_destroy();
+client_fail:
+	return rc;
+}
+
+static int cam_mem_mgr_cleanup_table(void)
+{
+	int i;
+
+	mutex_lock(&tbl.m_lock);
+	for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
+		if (!tbl.bufq[i].active) {
+			CDBG("Buffer inactive at idx=%d, continuing\n", i);
+			continue;
+		} else {
+			pr_err("Active buffer at idx=%d, possible leak\n", i);
+		}
+
+		mutex_lock(&tbl.bufq[i].q_lock);
+		ion_free(tbl.client, tbl.bufq[i].i_hdl);
+		tbl.bufq[i].fd = -1;
+		tbl.bufq[i].flags = 0;
+		tbl.bufq[i].buf_handle = -1;
+		tbl.bufq[i].vaddr = 0;
+		tbl.bufq[i].len = 0;
+		memset(tbl.bufq[i].hdls, 0,
+			sizeof(int32_t) * tbl.bufq[i].num_hdl);
+		tbl.bufq[i].num_hdl = 0;
+		tbl.bufq[i].i_hdl = NULL;
+		tbl.bufq[i].active = false;
+		mutex_unlock(&tbl.bufq[i].q_lock);
+		mutex_destroy(&tbl.bufq[i].q_lock);
+	}
+	bitmap_zero(tbl.bitmap, tbl.bits);
+	/* We need to reserve slot 0 because 0 is invalid */
+	set_bit(0, tbl.bitmap);
+	mutex_unlock(&tbl.m_lock);
+
+	return 0;
+}
+
+void cam_mem_mgr_deinit(void)
+{
+	cam_mem_mgr_cleanup_table();
+	mutex_lock(&tbl.m_lock);
+	bitmap_zero(tbl.bitmap, tbl.bits);
+	kfree(tbl.bitmap);
+	tbl.bitmap = NULL;
+	cam_mem_util_client_destroy();
+	mutex_unlock(&tbl.m_lock);
+	mutex_destroy(&tbl.m_lock);
+}
+
+static int32_t cam_mem_get_slot(void)
+{
+	int32_t idx;
+
+	mutex_lock(&tbl.m_lock);
+	idx = find_first_zero_bit(tbl.bitmap, tbl.bits);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		mutex_unlock(&tbl.m_lock);
+		return -ENOMEM;
+	}
+
+	set_bit(idx, tbl.bitmap);
+	tbl.bufq[idx].active = true;
+	mutex_init(&tbl.bufq[idx].q_lock);
+	mutex_unlock(&tbl.m_lock);
+
+	return idx;
+}
+
+static void cam_mem_put_slot(int32_t idx)
+{
+	mutex_lock(&tbl.m_lock);
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	tbl.bufq[idx].active = false;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	mutex_destroy(&tbl.bufq[idx].q_lock);
+	clear_bit(idx, tbl.bitmap);
+	mutex_unlock(&tbl.m_lock);
+}
+
+int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
+	uint64_t *iova_ptr, size_t *len_ptr)
+{
+	int rc = 0, idx;
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+		return -EINVAL;
+
+	if (!tbl.bufq[idx].active)
+		return -EINVAL;
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	if (buf_handle != tbl.bufq[idx].buf_handle) {
+		rc = -EINVAL;
+		goto handle_mismatch;
+	}
+
+	rc = cam_smmu_get_iova(mmu_handle,
+		tbl.bufq[idx].fd,
+		iova_ptr,
+		len_ptr);
+	if (rc < 0)
+		pr_err("fail to get buf hdl :%d", buf_handle);
+
+handle_mismatch:
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_get_io_buf);
+
+int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr, size_t *len)
+{
+	int rc = 0;
+	int idx;
+	struct ion_handle *ion_hdl = NULL;
+	uint64_t kvaddr = 0;
+	size_t klen = 0;
+
+	if (!buf_handle || !vaddr_ptr || !len)
+		return -EINVAL;
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+		return -EINVAL;
+
+	if (!tbl.bufq[idx].active)
+		return -EPERM;
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	if (buf_handle != tbl.bufq[idx].buf_handle) {
+		rc = -EINVAL;
+		goto exit_func;
+	}
+
+	ion_hdl = tbl.bufq[idx].i_hdl;
+	if (!ion_hdl) {
+		pr_err("Invalid ION handle\n");
+		rc = -EINVAL;
+		goto exit_func;
+	}
+
+	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
+		if (!tbl.bufq[idx].kmdvaddr) {
+			rc = cam_mem_util_map_cpu_va(ion_hdl,
+				&kvaddr, &klen);
+			if (rc)
+				goto exit_func;
+			tbl.bufq[idx].kmdvaddr = kvaddr;
+		}
+	} else {
+		rc = -EINVAL;
+		goto exit_func;
+	}
+
+	*vaddr_ptr = tbl.bufq[idx].kmdvaddr;
+	*len = tbl.bufq[idx].len;
+
+exit_func:
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_get_cpu_buf);
+
+int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
+{
+	int rc = 0, idx;
+	uint32_t ion_cache_ops;
+	unsigned long ion_flag = 0;
+
+	if (!cmd)
+		return -EINVAL;
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+		return -EINVAL;
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+
+	if (!tbl.bufq[idx].active) {
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	if (cmd->buf_handle != tbl.bufq[idx].buf_handle) {
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	rc = ion_handle_get_flags(tbl.client, tbl.bufq[idx].i_hdl,
+		&ion_flag);
+	if (rc) {
+		pr_err("cache get flags failed %d\n", rc);
+		goto fail;
+	}
+
+	if (ION_IS_CACHED(ion_flag)) {
+		switch (cmd->mem_cache_ops) {
+		case CAM_MEM_CLEAN_CACHE:
+			ion_cache_ops = ION_IOC_CLEAN_CACHES;
+			break;
+		case CAM_MEM_INV_CACHE:
+			ion_cache_ops = ION_IOC_INV_CACHES;
+			break;
+		case CAM_MEM_CLEAN_INV_CACHE:
+			ion_cache_ops = ION_IOC_CLEAN_INV_CACHES;
+			break;
+		default:
+			pr_err("invalid cache ops :%d", cmd->mem_cache_ops);
+			rc = -EINVAL;
+			goto fail;
+		}
+
+		rc = msm_ion_do_cache_op(tbl.client,
+				tbl.bufq[idx].i_hdl,
+				(void *)tbl.bufq[idx].vaddr,
+				tbl.bufq[idx].len,
+				ion_cache_ops);
+		if (rc)
+			pr_err("cache operation failed %d\n", rc);
+	}
+fail:
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
+
+static int cam_mem_util_get_ion_buffer(size_t len,
+	size_t align,
+	unsigned int heap_id_mask,
+	unsigned int flags,
+	struct ion_handle **hdl,
+	int *fd)
+{
+	int rc = 0;
+
+	*hdl = ion_alloc(tbl.client, len, align, heap_id_mask, flags);
+	if (IS_ERR_OR_NULL(*hdl))
+		return -ENOMEM;
+
+	*fd = ion_share_dma_buf_fd(tbl.client, *hdl);
+	if (*fd < 0) {
+		pr_err("dma buf get fd fail");
+		rc = -EINVAL;
+		goto get_fd_fail;
+	}
+
+	return rc;
+
+get_fd_fail:
+	ion_free(tbl.client, *hdl);
+	return rc;
+}
+
+static int cam_mem_util_ion_alloc(struct cam_mem_mgr_alloc_cmd *cmd,
+	struct ion_handle **hdl,
+	int *fd)
+{
+	uint32_t heap_id;
+	uint32_t ion_flag = 0;
+	int rc;
+
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
+	else
+		heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
+
+	if (cmd->flags & CAM_MEM_FLAG_CACHE)
+		ion_flag |= ION_FLAG_CACHED;
+	else
+		ion_flag &= ~ION_FLAG_CACHED;
+
+	rc = cam_mem_util_get_ion_buffer(cmd->len,
+		cmd->align,
+		heap_id,
+		ion_flag,
+		hdl,
+		fd);
+
+	return rc;
+}
+
+
+static int cam_mem_util_check_flags(struct cam_mem_mgr_alloc_cmd *cmd)
+{
+	if (!cmd->flags) {
+		pr_err("Invalid flags\n");
+		return -EINVAL;
+	}
+
+	if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
+		pr_err("Num of mmu hdl exceeded maximum(%d)\n",
+			CAM_MEM_MMU_MAX_HANDLE);
+		return -EINVAL;
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
+		cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
+		pr_err("Kernel mapping in secure mode not allowed");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
+{
+	if (!cmd->flags) {
+		pr_err("Invalid flags\n");
+		return -EINVAL;
+	}
+
+	if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
+		pr_err("Num of mmu hdl exceeded maximum(%d)\n",
+			CAM_MEM_MMU_MAX_HANDLE);
+		return -EINVAL;
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
+		cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
+		pr_err("Kernel mapping in secure mode not allowed");
+		return -EINVAL;
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+		pr_err("Shared memory buffers are not allowed to be mapped\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_mem_util_map_hw_va(uint32_t flags,
+	int32_t *mmu_hdls,
+	int32_t num_hdls,
+	int fd,
+	dma_addr_t *hw_vaddr,
+	size_t *len,
+	enum cam_smmu_region_id region)
+{
+	int i;
+	int rc = -1;
+	int dir = cam_mem_util_get_dma_dir(flags);
+
+	if (dir < 0) {
+		pr_err("fail to map DMA direction\n");
+		return dir;
+	}
+
+	if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
+		for (i = 0; i < num_hdls; i++) {
+			rc = cam_smmu_map_sec_iova(mmu_hdls[i],
+				fd,
+				dir,
+				(dma_addr_t *)hw_vaddr,
+				len);
+
+			if (rc < 0) {
+				pr_err("Failed to securely map to smmu");
+				goto multi_map_fail;
+			}
+		}
+	} else {
+		for (i = 0; i < num_hdls; i++) {
+			rc = cam_smmu_map_iova(mmu_hdls[i],
+				fd,
+				dir,
+				(dma_addr_t *)hw_vaddr,
+				len,
+				region);
+
+			if (rc < 0) {
+				pr_err("Failed to map to smmu");
+				goto multi_map_fail;
+			}
+		}
+	}
+
+	return rc;
+multi_map_fail:
+	if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		for (--i; i > 0; i--)
+			cam_smmu_unmap_sec_iova(mmu_hdls[i], fd);
+	else
+		for (--i; i > 0; i--)
+			cam_smmu_unmap_iova(mmu_hdls[i],
+				fd,
+				CAM_SMMU_REGION_IO);
+	return rc;
+
+}
+
+int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
+{
+	int rc;
+	int32_t idx;
+	struct ion_handle *ion_hdl;
+	int ion_fd;
+	dma_addr_t hw_vaddr = 0;
+	size_t len;
+
+	if (!cmd) {
+		pr_err(" Invalid argument\n");
+		return -EINVAL;
+	}
+	len = cmd->len;
+
+	rc = cam_mem_util_check_flags(cmd);
+	if (rc) {
+		pr_err("Invalid flags: flags = %X\n", cmd->flags);
+		return rc;
+	}
+
+	rc = cam_mem_util_ion_alloc(cmd,
+		&ion_hdl,
+		&ion_fd);
+	if (rc) {
+		pr_err("Ion allocation failed\n");
+		return rc;
+	}
+
+	idx = cam_mem_get_slot();
+	if (idx < 0) {
+		rc = -ENOMEM;
+		goto slot_fail;
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
+		cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+
+		enum cam_smmu_region_id region;
+
+		if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE)
+			region = CAM_SMMU_REGION_IO;
+
+
+		if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+			region = CAM_SMMU_REGION_SHARED;
+
+		rc = cam_mem_util_map_hw_va(cmd->flags,
+			cmd->mmu_hdls,
+			cmd->num_hdl,
+			ion_fd,
+			&hw_vaddr,
+			&len,
+			region);
+		if (rc)
+			goto map_hw_fail;
+	}
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	tbl.bufq[idx].fd = ion_fd;
+	tbl.bufq[idx].flags = cmd->flags;
+	tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, ion_fd);
+	tbl.bufq[idx].kmdvaddr = 0;
+
+	if (cmd->num_hdl > 0)
+		tbl.bufq[idx].vaddr = hw_vaddr;
+	else
+		tbl.bufq[idx].vaddr = 0;
+
+	tbl.bufq[idx].i_hdl = ion_hdl;
+	tbl.bufq[idx].len = cmd->len;
+	tbl.bufq[idx].num_hdl = cmd->num_hdl;
+	memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
+		sizeof(int32_t) * cmd->num_hdl);
+	tbl.bufq[idx].is_imported = false;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+
+	cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
+	cmd->out.fd = tbl.bufq[idx].fd;
+	cmd->out.vaddr = 0;
+
+	CDBG("buf handle: %x, fd: %d, len: %zu\n",
+		cmd->out.buf_handle, cmd->out.fd,
+		tbl.bufq[idx].len);
+
+	return rc;
+
+map_hw_fail:
+	cam_mem_put_slot(idx);
+slot_fail:
+	ion_free(tbl.client, ion_hdl);
+	return rc;
+}
+
+int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
+{
+	int32_t idx;
+	int rc;
+	struct ion_handle *ion_hdl;
+	dma_addr_t hw_vaddr = 0;
+	size_t len = 0;
+
+	if (!cmd || (cmd->fd < 0)) {
+		pr_err("Invalid argument\n");
+		return -EINVAL;
+	}
+
+	if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE)
+		return -EINVAL;
+
+	rc = cam_mem_util_check_map_flags(cmd);
+	if (rc) {
+		pr_err("Invalid flags: flags = %X\n", cmd->flags);
+		return rc;
+	}
+
+	ion_hdl = ion_import_dma_buf_fd(tbl.client, cmd->fd);
+	if (IS_ERR_OR_NULL((void *)(ion_hdl))) {
+		pr_err("Failed to import ion fd\n");
+		return -EINVAL;
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) {
+		rc = cam_mem_util_map_hw_va(cmd->flags,
+			cmd->mmu_hdls,
+			cmd->num_hdl,
+			cmd->fd,
+			&hw_vaddr,
+			&len,
+			CAM_SMMU_REGION_IO);
+		if (rc)
+			goto map_fail;
+	}
+
+	idx = cam_mem_get_slot();
+	if (idx < 0) {
+		rc = -ENOMEM;
+		goto map_fail;
+	}
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	tbl.bufq[idx].fd = cmd->fd;
+	tbl.bufq[idx].flags = cmd->flags;
+	tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
+	tbl.bufq[idx].kmdvaddr = 0;
+
+	if (cmd->num_hdl > 0)
+		tbl.bufq[idx].vaddr = hw_vaddr;
+	else
+		tbl.bufq[idx].vaddr = 0;
+
+	tbl.bufq[idx].i_hdl = ion_hdl;
+	tbl.bufq[idx].len = len;
+	tbl.bufq[idx].num_hdl = cmd->num_hdl;
+	memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
+		sizeof(int32_t) * cmd->num_hdl);
+	tbl.bufq[idx].is_imported = true;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+
+	cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
+	cmd->out.vaddr = 0;
+
+	return rc;
+
+map_fail:
+	ion_free(tbl.client, ion_hdl);
+	return rc;
+}
+
+static int cam_mem_util_unmap_hw_va(int32_t idx,
+	enum cam_smmu_region_id region)
+{
+	int i;
+	uint32_t flags;
+	int32_t *mmu_hdls;
+	int num_hdls;
+	int fd;
+	int rc = -EINVAL;
+
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		pr_err("Incorrect index\n");
+		return rc;
+	}
+
+	flags = tbl.bufq[idx].flags;
+	mmu_hdls = tbl.bufq[idx].hdls;
+	num_hdls = tbl.bufq[idx].num_hdl;
+	fd = tbl.bufq[idx].fd;
+
+	if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
+		for (i = 0; i < num_hdls; i++) {
+			rc = cam_smmu_unmap_sec_iova(mmu_hdls[i], fd);
+			if (rc < 0)
+				goto unmap_end;
+		}
+	} else {
+		for (i = 0; i < num_hdls; i++) {
+			rc = cam_smmu_unmap_iova(mmu_hdls[i],
+				fd,
+				region);
+			if (rc < 0)
+				goto unmap_end;
+		}
+	}
+
+unmap_end:
+	return rc;
+}
+
+static int cam_mem_util_unmap(int32_t idx)
+{
+	int rc = 0;
+	enum cam_smmu_region_id region;
+
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		pr_err("Incorrect index\n");
+		return -EINVAL;
+	}
+
+	CDBG("Flags = %X\n", tbl.bufq[idx].flags);
+
+	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
+		if (tbl.bufq[idx].i_hdl && tbl.bufq[idx].kmdvaddr)
+			ion_unmap_kernel(tbl.client, tbl.bufq[idx].i_hdl);
+
+	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
+		region = CAM_SMMU_REGION_IO;
+
+	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+		region = CAM_SMMU_REGION_SHARED;
+
+	rc = cam_mem_util_unmap_hw_va(idx,
+		region);
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	tbl.bufq[idx].flags = 0;
+	tbl.bufq[idx].buf_handle = -1;
+	tbl.bufq[idx].vaddr = 0;
+	memset(tbl.bufq[idx].hdls, 0,
+		sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
+
+	CDBG("Ion handle at idx = %d freeing = %pK, fd = %d\n",
+		idx, tbl.bufq[idx].i_hdl, tbl.bufq[idx].fd);
+
+	if (tbl.bufq[idx].i_hdl && !tbl.bufq[idx].is_imported) {
+		CDBG("Freeing up non-imported buffer at fd = %d, hdl = %pK",
+			tbl.bufq[idx].fd,
+			tbl.bufq[idx].i_hdl);
+		ion_free(tbl.client, tbl.bufq[idx].i_hdl);
+		tbl.bufq[idx].i_hdl = NULL;
+	} else {
+		CDBG("Not freeing up imported buffer at fd = %d",
+			tbl.bufq[idx].fd);
+	}
+
+	tbl.bufq[idx].fd = -1;
+	tbl.bufq[idx].is_imported = false;
+	tbl.bufq[idx].i_hdl = NULL;
+	tbl.bufq[idx].len = 0;
+	tbl.bufq[idx].num_hdl = 0;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	cam_mem_put_slot(idx);
+
+	return rc;
+}
+
+int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
+{
+	int idx;
+	int rc;
+
+	if (!cmd) {
+		pr_err("Invalid argument\n");
+		return -EINVAL;
+	}
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		pr_err("Incorrect index extracted from mem handle\n");
+		return -EINVAL;
+	}
+
+	if (!tbl.bufq[idx].active) {
+		pr_err("Released buffer state should be active\n");
+		return -EINVAL;
+	}
+
+	if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
+		pr_err("Released buf handle not matching within table\n");
+		return -EINVAL;
+	}
+
+	CDBG("Releasing hdl = %u\n", cmd->buf_handle);
+	rc = cam_mem_util_unmap(idx);
+
+	return rc;
+}
+
+int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
+	struct cam_mem_mgr_memory_desc *out)
+{
+	struct ion_handle *hdl;
+	int ion_fd;
+	int rc = 0;
+	uint32_t heap_id;
+	int32_t ion_flag = 0;
+	uint64_t kvaddr;
+	dma_addr_t iova = 0;
+	size_t request_len = 0;
+	int32_t idx;
+	uint32_t mem_handle;
+	int32_t smmu_hdl = 0;
+	int32_t num_hdl = 0;
+	enum cam_smmu_region_id region;
+
+	if (!inp || !out) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	if (inp->region != CAM_MEM_MGR_REGION_SHARED &&
+		inp->region != CAM_MEM_MGR_REGION_NON_SECURE_IO) {
+		pr_err("Invalid flags for request mem\n");
+		return -EINVAL;
+	}
+
+	if (inp->flags & CAM_MEM_FLAG_CACHE)
+		ion_flag |= ION_FLAG_CACHED;
+	else
+		ion_flag &= ~ION_FLAG_CACHED;
+
+	heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
+
+	rc = cam_mem_util_get_ion_buffer(inp->size,
+		inp->align,
+		heap_id,
+		ion_flag,
+		&hdl,
+		&ion_fd);
+
+	if (rc) {
+		pr_err("ION alloc failed for shared buffer\n");
+		goto ion_fail;
+	} else {
+		CDBG("Got ION fd = %d, hdl = %pK\n", ion_fd, hdl);
+	}
+
+	rc = cam_mem_util_map_cpu_va(hdl, &kvaddr, &request_len);
+	if (rc) {
+		pr_err("Failed to get kernel vaddr\n");
+		goto map_fail;
+	}
+
+	if (!inp->smmu_hdl) {
+		pr_err("Invalid SMMU handle\n");
+		rc = -EINVAL;
+		goto smmu_fail;
+	}
+
+	if (inp->region == CAM_MEM_MGR_REGION_SHARED)
+		region = CAM_SMMU_REGION_SHARED;
+
+	if (inp->region == CAM_MEM_MGR_REGION_NON_SECURE_IO)
+		region = CAM_SMMU_REGION_IO;
+
+	rc = cam_smmu_map_iova(inp->smmu_hdl,
+		ion_fd,
+		CAM_SMMU_MAP_RW,
+		&iova,
+		&request_len,
+		region);
+
+	if (rc < 0) {
+		pr_err("SMMU mapping failed\n");
+		goto smmu_fail;
+	}
+
+	smmu_hdl = inp->smmu_hdl;
+	num_hdl = 1;
+
+	idx = cam_mem_get_slot();
+	if (idx < 0) {
+		rc = -ENOMEM;
+		goto slot_fail;
+	}
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	mem_handle = GET_MEM_HANDLE(idx, ion_fd);
+	tbl.bufq[idx].fd = ion_fd;
+	tbl.bufq[idx].flags = inp->flags;
+	tbl.bufq[idx].buf_handle = mem_handle;
+	tbl.bufq[idx].kmdvaddr = kvaddr;
+
+	tbl.bufq[idx].vaddr = iova;
+
+	tbl.bufq[idx].i_hdl = hdl;
+	tbl.bufq[idx].len = inp->size;
+	tbl.bufq[idx].num_hdl = num_hdl;
+	memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
+		sizeof(int32_t));
+	tbl.bufq[idx].is_imported = false;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+
+	out->kva = kvaddr;
+	out->iova = (uint32_t)iova;
+	out->smmu_hdl = smmu_hdl;
+	out->mem_handle = mem_handle;
+	out->len = inp->size;
+	out->region = inp->region;
+
+	return rc;
+slot_fail:
+	cam_smmu_unmap_iova(inp->smmu_hdl,
+		ion_fd,
+		inp->region);
+smmu_fail:
+	ion_unmap_kernel(tbl.client, hdl);
+map_fail:
+	ion_free(tbl.client, hdl);
+ion_fail:
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_request_mem);
+
+int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
+{
+	int32_t idx;
+	int rc;
+
+	if (!inp) {
+		pr_err("Invalid argument\n");
+		return -EINVAL;
+	}
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		pr_err("Incorrect index extracted from mem handle\n");
+		return -EINVAL;
+	}
+
+	if (!tbl.bufq[idx].active) {
+		pr_err("Released buffer state should be active\n");
+		return -EINVAL;
+	}
+
+	if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
+		pr_err("Released buf handle not matching within table\n");
+		return -EINVAL;
+	}
+
+	CDBG("Releasing hdl = %X\n", inp->mem_handle);
+	rc = cam_mem_util_unmap(idx);
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_release_mem);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
new file mode 100644
index 0000000..c5f839b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
@@ -0,0 +1,121 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_MEM_MGR_H_
+#define _CAM_MEM_MGR_H_
+
+#include <media/cam_req_mgr.h>
+#include "cam_mem_mgr_api.h"
+
+#define CAM_MEM_BUFQ_MAX 1024
+
+/**
+ * struct cam_mem_buf_queue
+ *
+ * @i_hdl:       ion handle for the buffer
+ * @q_lock:      mutex lock for buffer
+ * @hdls:        list of mapped handles
+ * @num_hdl:     number of handles
+ * @fd:          file descriptor of buffer
+ * @buf_handle:  unique handle for buffer
+ * @align:       alignment for allocation
+ * @len:         size of buffer
+ * @flags:       attributes of buffer
+ * @vaddr:       IOVA of buffer
+ * @kmdvaddr:    Kernel virtual address
+ * @active:      state of the buffer
+ * @is_imported: Flag indicating if buffer is imported from an FD in user space
+ */
+struct cam_mem_buf_queue {
+	struct ion_handle *i_hdl;
+	struct mutex q_lock;
+	int32_t hdls[CAM_MEM_MMU_MAX_HANDLE];
+	int32_t num_hdl;
+	int32_t fd;
+	int32_t buf_handle;
+	int32_t align;
+	size_t len;
+	uint32_t flags;
+	uint64_t vaddr;
+	uint64_t kmdvaddr;
+	bool active;
+	bool is_imported;
+};
+
+/**
+ * struct cam_mem_table
+ *
+ * @m_lock: mutex lock for table
+ * @bitmap: bitmap of the mem mgr utility
+ * @bits: max bits of the utility
+ * @client: ion client pointer
+ * @bufq: array of buffers
+ */
+struct cam_mem_table {
+	struct mutex m_lock;
+	void *bitmap;
+	size_t bits;
+	struct ion_client *client;
+	struct cam_mem_buf_queue bufq[CAM_MEM_BUFQ_MAX];
+};
+
+/**
+ * @brief: Allocates and maps buffer
+ *
+ * @cmd:   Allocation information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd);
+
+/**
+ * @brief: Releases a buffer reference
+ *
+ * @cmd:   Buffer release information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd);
+
+/**
+ * @brief Maps a buffer
+ *
+ * @cmd: Buffer mapping information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd);
+
+/**
+ * @brief: Perform cache ops on the buffer
+ *
+ * @cmd:   Cache ops information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd);
+
+/**
+ * @brief: Initializes the memory manager
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_init(void);
+
+/**
+ * @brief:  Tears down the memory manager
+ *
+ * @return None
+ */
+void cam_mem_mgr_deinit(void);
+
+#endif /* _CAM_MEM_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
new file mode 100644
index 0000000..32a754e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_MEM_MGR_API_H_
+#define _CAM_MEM_MGR_API_H_
+
+#include <media/cam_req_mgr.h>
+
+/* Region IDs for memory manager */
+#define CAM_MEM_MGR_REGION_FIRMWARE      0
+#define CAM_MEM_MGR_REGION_SHARED        1
+#define CAM_MEM_MGR_REGION_NON_SECURE_IO 2
+#define CAM_MEM_MGR_REGION_SECURE_IO     3
+#define CAM_MEM_MGR_REGION_SCRATCH       4
+
+/**
+ * struct cam_mem_mgr_request_desc
+ *
+ * @size    : Size of memory requested for allocation
+ * @align   : Alignment of requested memory
+ * @smmu_hdl: SMMU handle to identify context bank where memory will be mapped
+ * @flags   : Flags to indicate cached/uncached property
+ * @region  : Region where memory should be allocated
+ */
+struct cam_mem_mgr_request_desc {
+	uint64_t size;
+	uint64_t align;
+	int32_t smmu_hdl;
+	uint32_t flags;
+	uint32_t region;
+};
+
+/**
+ * struct cam_mem_mgr_memory_desc
+ *
+ * @kva        : Kernel virtual address of allocated memory
+ * @iova       : IOVA of allocated memory
+ * @smmu_hdl   : SMMU handle of allocated memory
+ * @mem_handle : Mem handle identifying allocated memory
+ * @len        : Length of allocated memory
+ * @region     : Region to which allocated memory belongs
+ */
+struct cam_mem_mgr_memory_desc {
+	uint64_t kva;
+	uint32_t iova;
+	int32_t smmu_hdl;
+	uint32_t mem_handle;
+	uint64_t len;
+	uint32_t region;
+};
+
+/**
+ * @brief: Requests a memory buffer
+ *
+ * @inp:   Information specifying requested buffer properties
+ * @out:   Information about allocated buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
+	struct cam_mem_mgr_memory_desc *out);
+
+/**
+ * @brief: Releases a memory buffer
+ *
+ * @inp:   Information specifying buffer to be released
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp);
+
+/**
+ * @brief: Returns IOVA information about buffer
+ *
+ * @buf_handle: Handle of the buffer
+ * @mmu_handle: SMMU handle where buffer is mapped
+ * @iova_ptr  : Pointer to mmu's iova
+ * @len_ptr   : Length of the buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
+	uint64_t *iova_ptr, size_t *len_ptr);
+/**
+ * @brief: Returns CPU address information about buffer
+ *
+ * @buf_handle: Handle for the buffer
+ * @vaddr_ptr : pointer to kernel virtual address
+ * @len_ptr   : Length of the buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr,
+	size_t *len);
+
+#endif /* _CAM_MEM_MGR_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index f3af1bd..43b020c6 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
 #include "cam_req_mgr_util.h"
 #include "cam_req_mgr_core.h"
 #include "cam_subdev.h"
+#include "cam_mem_mgr.h"
 
 #define CAM_REQ_MGR_EVENT_MAX 30
 
@@ -115,7 +116,18 @@
 	spin_unlock_bh(&g_dev.cam_eventq_lock);
 
 	g_dev.open_cnt++;
+	rc = cam_mem_mgr_init();
+	if (rc) {
+		g_dev.open_cnt--;
+		pr_err("mem mgr init failed\n");
+		goto mem_mgr_init_fail;
+	}
 
+	mutex_unlock(&g_dev.cam_lock);
+	return rc;
+
+mem_mgr_init_fail:
+	v4l2_fh_release(filep);
 end:
 	mutex_unlock(&g_dev.cam_lock);
 	return rc;
@@ -154,6 +166,7 @@
 	spin_unlock_bh(&g_dev.cam_eventq_lock);
 
 	cam_req_mgr_util_free_hdls();
+	cam_mem_mgr_deinit();
 	mutex_unlock(&g_dev.cam_lock);
 
 	return 0;
@@ -316,6 +329,84 @@
 		rc = cam_req_mgr_sync_mode(&sync_mode);
 		}
 		break;
+	case CAM_REQ_MGR_ALLOC_BUF: {
+		struct cam_mem_mgr_alloc_cmd cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			(void *)k_ioctl->handle,
+			k_ioctl->size)) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_mem_mgr_alloc_and_map(&cmd);
+		if (!rc)
+			if (copy_to_user((void *)k_ioctl->handle,
+				&cmd, k_ioctl->size)) {
+				rc = -EFAULT;
+				break;
+			}
+		}
+		break;
+	case CAM_REQ_MGR_MAP_BUF: {
+		struct cam_mem_mgr_map_cmd cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			(void *)k_ioctl->handle,
+			k_ioctl->size)) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_mem_mgr_map(&cmd);
+		if (!rc)
+			if (copy_to_user((void *)k_ioctl->handle,
+				&cmd, k_ioctl->size)) {
+				rc = -EFAULT;
+				break;
+			}
+		}
+		break;
+	case CAM_REQ_MGR_RELEASE_BUF: {
+		struct cam_mem_mgr_release_cmd cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			(void *)k_ioctl->handle,
+			k_ioctl->size)) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_mem_mgr_release(&cmd);
+		}
+		break;
+	case CAM_REQ_MGR_CACHE_OPS: {
+		struct cam_mem_cache_ops_cmd cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			(void *)k_ioctl->handle,
+			k_ioctl->size)) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_mem_mgr_cache_ops(&cmd);
+		if (rc)
+			rc = -EINVAL;
+		}
+		break;
 	default:
 		return -ENOIOCTLCMD;
 	}
@@ -444,6 +535,7 @@
 static int cam_req_mgr_remove(struct platform_device *pdev)
 {
 	cam_req_mgr_core_device_deinit();
+	cam_mem_mgr_deinit();
 	cam_req_mgr_util_deinit();
 	cam_media_device_cleanup();
 	cam_video_device_cleanup();
@@ -482,6 +574,12 @@
 		goto req_mgr_util_fail;
 	}
 
+	rc = cam_mem_mgr_init();
+	if (rc) {
+		pr_err("mem mgr init failed\n");
+		goto mem_mgr_init_fail;
+	}
+
 	rc = cam_req_mgr_core_device_init();
 	if (rc) {
 		pr_err("core device setup failed\n");
@@ -493,8 +591,12 @@
 	return rc;
 
 req_mgr_core_fail:
+	cam_mem_mgr_deinit();
+mem_mgr_init_fail:
 	cam_req_mgr_util_deinit();
 req_mgr_util_fail:
+	mutex_destroy(&g_dev.dev_lock);
+	mutex_destroy(&g_dev.cam_lock);
 	cam_video_device_cleanup();
 video_setup_fail:
 	cam_media_device_cleanup();
diff --git a/drivers/media/platform/msm/camera/cam_smmu/Makefile b/drivers/media/platform/msm/camera/cam_smmu/Makefile
new file mode 100644
index 0000000..3619da7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_smmu/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu_api.o
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
new file mode 100644
index 0000000..f4215b5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -0,0 +1,2284 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-SMMU %s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <linux/workqueue.h>
+#include <linux/genalloc.h>
+
+#include "cam_smmu_api.h"
+
+#define SHARED_MEM_POOL_GRANULARITY 12
+
+#define IOMMU_INVALID_DIR -1
+#define BYTE_SIZE 8
+#define COOKIE_NUM_BYTE 2
+#define COOKIE_SIZE (BYTE_SIZE*COOKIE_NUM_BYTE)
+#define COOKIE_MASK ((1<<COOKIE_SIZE)-1)
+#define HANDLE_INIT (-1)
+#define CAM_SMMU_CB_MAX 2
+
+#define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK))
+#define GET_SMMU_TABLE_IDX(x) (((x) >> COOKIE_SIZE) & COOKIE_MASK)
+
+#ifdef CONFIG_CAM_SMMU_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+struct firmware_alloc_info {
+	struct device *fw_dev;
+	void *fw_kva;
+	dma_addr_t fw_dma_hdl;
+};
+
+struct firmware_alloc_info icp_fw;
+
+struct cam_smmu_work_payload {
+	int idx;
+	struct iommu_domain *domain;
+	struct device *dev;
+	unsigned long iova;
+	int flags;
+	void *token;
+	struct list_head list;
+};
+
+enum cam_protection_type {
+	CAM_PROT_INVALID,
+	CAM_NON_SECURE,
+	CAM_SECURE,
+	CAM_PROT_MAX,
+};
+
+enum cam_iommu_type {
+	CAM_SMMU_INVALID,
+	CAM_QSMMU,
+	CAM_ARM_SMMU,
+	CAM_SMMU_MAX,
+};
+
+enum cam_smmu_buf_state {
+	CAM_SMMU_BUFF_EXIST,
+	CAM_SMMU_BUFF_NOT_EXIST
+};
+
+enum cam_smmu_init_dir {
+	CAM_SMMU_TABLE_INIT,
+	CAM_SMMU_TABLE_DEINIT,
+};
+
+struct scratch_mapping {
+	void *bitmap;
+	size_t bits;
+	unsigned int order;
+	dma_addr_t base;
+};
+
+struct cam_smmu_region_info {
+	dma_addr_t iova_start;
+	size_t iova_len;
+};
+
+struct cam_context_bank_info {
+	struct device *dev;
+	struct dma_iommu_mapping *mapping;
+	dma_addr_t va_start;
+	size_t va_len;
+	const char *name;
+	bool is_secure;
+	uint8_t scratch_buf_support;
+	uint8_t firmware_support;
+	uint8_t shared_support;
+	uint8_t io_support;
+	bool is_fw_allocated;
+
+	struct scratch_mapping scratch_map;
+	struct gen_pool *shared_mem_pool;
+
+	struct cam_smmu_region_info scratch_info;
+	struct cam_smmu_region_info firmware_info;
+	struct cam_smmu_region_info shared_info;
+	struct cam_smmu_region_info io_info;
+
+	struct list_head smmu_buf_list;
+	struct mutex lock;
+	int handle;
+	enum cam_smmu_ops_param state;
+
+	void (*handler[CAM_SMMU_CB_MAX])(struct iommu_domain *,
+		struct device *, unsigned long,
+		int, void*);
+	void *token[CAM_SMMU_CB_MAX];
+	int cb_count;
+};
+
+struct cam_iommu_cb_set {
+	struct cam_context_bank_info *cb_info;
+	u32 cb_num;
+	u32 cb_init_count;
+	struct work_struct smmu_work;
+	struct mutex payload_list_lock;
+	struct list_head payload_list;
+};
+
+static const struct of_device_id msm_cam_smmu_dt_match[] = {
+	{ .compatible = "qcom,msm-cam-smmu", },
+	{ .compatible = "qcom,msm-cam-smmu-cb", },
+	{ .compatible = "qcom,msm-cam-smmu-fw-dev", },
+	{}
+};
+
+struct cam_dma_buff_info {
+	struct dma_buf *buf;
+	struct dma_buf_attachment *attach;
+	struct sg_table *table;
+	enum dma_data_direction dir;
+	enum cam_smmu_region_id region_id;
+	int iommu_dir;
+	int ref_count;
+	dma_addr_t paddr;
+	struct list_head list;
+	int ion_fd;
+	size_t len;
+	size_t phys_len;
+};
+
+static struct cam_iommu_cb_set iommu_cb_set;
+
+static enum dma_data_direction cam_smmu_translate_dir(
+	enum cam_smmu_map_dir dir);
+
+static int cam_smmu_check_handle_unique(int hdl);
+
+static int cam_smmu_create_iommu_handle(int idx);
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+	int *hdl);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+	int ion_fd);
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+	dma_addr_t base, size_t size,
+	int order);
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+	size_t size,
+	dma_addr_t *iova);
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+	dma_addr_t addr, size_t size);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+	dma_addr_t virt_addr);
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+	enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr,
+	enum cam_smmu_region_id region_id);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+	size_t virt_len,
+	size_t phys_len,
+	unsigned int iommu_dir,
+	dma_addr_t *virt_addr);
+
+static int cam_smmu_unmap_buf_and_remove_from_list(
+	struct cam_dma_buff_info *mapping_info, int idx);
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+	struct cam_dma_buff_info *mapping_info,
+	int idx);
+
+static void cam_smmu_clean_buffer_list(int idx);
+
+static void cam_smmu_print_list(int idx);
+
+static void cam_smmu_print_table(void);
+
+static int cam_smmu_probe(struct platform_device *pdev);
+
+static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr);
+
+static void cam_smmu_page_fault_work(struct work_struct *work)
+{
+	int j;
+	int idx;
+	struct cam_smmu_work_payload *payload;
+
+	mutex_lock(&iommu_cb_set.payload_list_lock);
+	if (list_empty(&iommu_cb_set.payload_list)) {
+		pr_err("Payload list empty\n");
+		mutex_unlock(&iommu_cb_set.payload_list_lock);
+		return;
+	}
+
+	payload = list_first_entry(&iommu_cb_set.payload_list,
+		struct cam_smmu_work_payload,
+		list);
+	list_del(&payload->list);
+	mutex_unlock(&iommu_cb_set.payload_list_lock);
+
+	/* Dereference the payload to call the handler */
+	idx = payload->idx;
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	cam_smmu_check_vaddr_in_range(idx, (void *)payload->iova);
+	for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+		if ((iommu_cb_set.cb_info[idx].handler[j])) {
+			iommu_cb_set.cb_info[idx].handler[j](
+				payload->domain,
+				payload->dev,
+				payload->iova,
+				payload->flags,
+				iommu_cb_set.cb_info[idx].token[j]);
+		}
+	}
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	kfree(payload);
+}
+
+static void cam_smmu_print_list(int idx)
+{
+	struct cam_dma_buff_info *mapping;
+
+	pr_err("index = %d\n", idx);
+	list_for_each_entry(mapping,
+		&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+		pr_err("ion_fd = %d, paddr= 0x%pK, len = %u, region = %d\n",
+			 mapping->ion_fd, (void *)mapping->paddr,
+			 (unsigned int)mapping->len,
+			 mapping->region_id);
+	}
+}
+
+static void cam_smmu_print_table(void)
+{
+	int i;
+
+	for (i = 0; i < iommu_cb_set.cb_num; i++) {
+		pr_err("i= %d, handle= %d, name_addr=%pK\n", i,
+			   (int)iommu_cb_set.cb_info[i].handle,
+			   (void *)iommu_cb_set.cb_info[i].name);
+		pr_err("dev = %pK\n", iommu_cb_set.cb_info[i].dev);
+	}
+}
+
+static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr)
+{
+	struct cam_dma_buff_info *mapping;
+	unsigned long start_addr, end_addr, current_addr;
+
+	current_addr = (unsigned long)vaddr;
+	list_for_each_entry(mapping,
+			&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+		start_addr = (unsigned long)mapping->paddr;
+		end_addr = (unsigned long)mapping->paddr + mapping->len;
+
+		if (start_addr <= current_addr && current_addr < end_addr) {
+			pr_err("va %pK valid: range:%pK-%pK, fd = %d cb: %s\n",
+				vaddr, (void *)start_addr, (void *)end_addr,
+				mapping->ion_fd,
+				iommu_cb_set.cb_info[idx].name);
+			goto end;
+		} else {
+			CDBG("va %pK is not in this range: %pK-%pK, fd = %d\n",
+				vaddr, (void *)start_addr, (void *)end_addr,
+				mapping->ion_fd);
+		}
+	}
+	pr_err("Cannot find vaddr:%pK in SMMU %s uses invalid virt address\n",
+		vaddr, iommu_cb_set.cb_info[idx].name);
+end:
+	return;
+}
+
+void cam_smmu_reg_client_page_fault_handler(int handle,
+	void (*client_page_fault_handler)(struct iommu_domain *,
+	struct device *, unsigned long,
+	int, void*), void *token)
+{
+	int idx, i = 0;
+
+	if (!token || (handle == HANDLE_INIT)) {
+		pr_err("Error: token is NULL or invalid handle\n");
+		return;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return;
+	}
+
+	if (client_page_fault_handler) {
+		if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) {
+			pr_err("%s Should not regiester more handlers\n",
+				iommu_cb_set.cb_info[idx].name);
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return;
+		}
+		iommu_cb_set.cb_info[idx].cb_count++;
+		for (i = 0; i < iommu_cb_set.cb_info[idx].cb_count; i++) {
+			if (iommu_cb_set.cb_info[idx].token[i] == NULL) {
+				iommu_cb_set.cb_info[idx].token[i] = token;
+				iommu_cb_set.cb_info[idx].handler[i] =
+					client_page_fault_handler;
+				break;
+			}
+		}
+	} else {
+		for (i = 0; i < CAM_SMMU_CB_MAX; i++) {
+			if (iommu_cb_set.cb_info[idx].token[i] == token) {
+				iommu_cb_set.cb_info[idx].token[i] = NULL;
+				iommu_cb_set.cb_info[idx].handler[i] =
+					NULL;
+				iommu_cb_set.cb_info[idx].cb_count--;
+				break;
+			}
+		}
+		if (i == CAM_SMMU_CB_MAX)
+			pr_err("Error: hdl %x no matching tokens: %s\n",
+				handle, iommu_cb_set.cb_info[idx].name);
+	}
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+}
+
+static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova,
+	int flags, void *token)
+{
+	char *cb_name;
+	int idx;
+	struct cam_smmu_work_payload *payload;
+
+	if (!token) {
+		pr_err("Error: token is NULL\n");
+		pr_err("Error: domain = %pK, device = %pK\n", domain, dev);
+		pr_err("iova = %lX, flags = %d\n", iova, flags);
+		return 0;
+	}
+
+	cb_name = (char *)token;
+	/* Check whether it is in the table */
+	for (idx = 0; idx < iommu_cb_set.cb_num; idx++) {
+		if (!strcmp(iommu_cb_set.cb_info[idx].name, cb_name))
+			break;
+	}
+
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: index is not valid, index = %d, token = %s\n",
+			idx, cb_name);
+		return 0;
+	}
+
+	payload = kzalloc(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC);
+	if (!payload)
+		return 0;
+
+	payload->domain = domain;
+	payload->dev = dev;
+	payload->iova = iova;
+	payload->flags = flags;
+	payload->token = token;
+	payload->idx = idx;
+
+	mutex_lock(&iommu_cb_set.payload_list_lock);
+	list_add_tail(&payload->list, &iommu_cb_set.payload_list);
+	mutex_unlock(&iommu_cb_set.payload_list_lock);
+
+	schedule_work(&iommu_cb_set.smmu_work);
+
+	return 0;
+}
+
+static int cam_smmu_translate_dir_to_iommu_dir(
+	enum cam_smmu_map_dir dir)
+{
+	switch (dir) {
+	case CAM_SMMU_MAP_READ:
+		return IOMMU_READ;
+	case CAM_SMMU_MAP_WRITE:
+		return IOMMU_WRITE;
+	case CAM_SMMU_MAP_RW:
+		return IOMMU_READ|IOMMU_WRITE;
+	case CAM_SMMU_MAP_INVALID:
+	default:
+		pr_err("Error: Direction is invalid. dir = %d\n", dir);
+		break;
+	};
+	return IOMMU_INVALID_DIR;
+}
+
+static enum dma_data_direction cam_smmu_translate_dir(
+	enum cam_smmu_map_dir dir)
+{
+	switch (dir) {
+	case CAM_SMMU_MAP_READ:
+		return DMA_FROM_DEVICE;
+	case CAM_SMMU_MAP_WRITE:
+		return DMA_TO_DEVICE;
+	case CAM_SMMU_MAP_RW:
+		return DMA_BIDIRECTIONAL;
+	case CAM_SMMU_MAP_INVALID:
+	default:
+		pr_err("Error: Direction is invalid. dir = %d\n", (int)dir);
+		break;
+	}
+	return DMA_NONE;
+}
+
+void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops)
+{
+	unsigned int i;
+	int j = 0;
+
+	for (i = 0; i < iommu_cb_set.cb_num; i++) {
+		iommu_cb_set.cb_info[i].handle = HANDLE_INIT;
+		INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_list);
+		iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH;
+		iommu_cb_set.cb_info[i].dev = NULL;
+		iommu_cb_set.cb_info[i].cb_count = 0;
+		for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+			iommu_cb_set.cb_info[i].token[j] = NULL;
+			iommu_cb_set.cb_info[i].handler[j] = NULL;
+		}
+		if (ops == CAM_SMMU_TABLE_INIT)
+			mutex_init(&iommu_cb_set.cb_info[i].lock);
+		else
+			mutex_destroy(&iommu_cb_set.cb_info[i].lock);
+	}
+}
+
+static int cam_smmu_check_handle_unique(int hdl)
+{
+	int i;
+
+	if (hdl == HANDLE_INIT) {
+		CDBG("iommu handle is init number. Need to try again\n");
+		return 1;
+	}
+
+	for (i = 0; i < iommu_cb_set.cb_num; i++) {
+		if (iommu_cb_set.cb_info[i].handle == HANDLE_INIT)
+			continue;
+
+		if (iommu_cb_set.cb_info[i].handle == hdl) {
+			CDBG("iommu handle %d conflicts\n", (int)hdl);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+/**
+ *  use low 2 bytes for handle cookie
+ */
+static int cam_smmu_create_iommu_handle(int idx)
+{
+	int rand, hdl = 0;
+
+	get_random_bytes(&rand, COOKIE_NUM_BYTE);
+	hdl = GET_SMMU_HDL(idx, rand);
+	CDBG("create handle value = %x\n", (int)hdl);
+	return hdl;
+}
+
+static int cam_smmu_attach_device(int idx)
+{
+	int rc;
+	struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx];
+
+	/* attach the mapping to device */
+	rc = arm_iommu_attach_device(cb->dev, cb->mapping);
+	if (rc < 0) {
+		pr_err("Error: ARM IOMMU attach failed. ret = %d\n", rc);
+		rc = -ENODEV;
+	}
+
+	return rc;
+}
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+	int *hdl)
+{
+	int i;
+	int handle;
+
+	/* create handle and add in the iommu hardware table */
+	for (i = 0; i < iommu_cb_set.cb_num; i++) {
+		if (!strcmp(iommu_cb_set.cb_info[i].name, name)) {
+			mutex_lock(&iommu_cb_set.cb_info[i].lock);
+			if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) {
+				pr_err("Error: %s already got handle 0x%x\n",
+					name,
+					iommu_cb_set.cb_info[i].handle);
+				mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+				return -EINVAL;
+			}
+
+			/* make sure handle is unique */
+			do {
+				handle = cam_smmu_create_iommu_handle(i);
+			} while (cam_smmu_check_handle_unique(handle));
+
+			/* put handle in the table */
+			iommu_cb_set.cb_info[i].handle = handle;
+			iommu_cb_set.cb_info[i].cb_count = 0;
+			*hdl = handle;
+			CDBG("%s creates handle 0x%x\n", name, handle);
+			mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+			return 0;
+		}
+	}
+
+	pr_err("Error: Cannot find name %s or all handle exist!\n",
+			name);
+	cam_smmu_print_table();
+	return -EINVAL;
+}
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+					dma_addr_t base, size_t size,
+					int order)
+{
+	unsigned int count = size >> (PAGE_SHIFT + order);
+	unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+	int err = 0;
+
+	if (!count) {
+		err = -EINVAL;
+		pr_err("Page count is zero, size passed = %zu\n", size);
+		goto bail;
+	}
+
+	scratch_map->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!scratch_map->bitmap) {
+		err = -ENOMEM;
+		goto bail;
+	}
+
+	scratch_map->base = base;
+	scratch_map->bits = BITS_PER_BYTE * bitmap_size;
+	scratch_map->order = order;
+
+bail:
+	return err;
+}
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+	size_t size,
+	dma_addr_t *iova)
+{
+	unsigned int order = get_order(size);
+	unsigned int align = 0;
+	unsigned int count, start;
+
+	count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+		 (1 << mapping->order) - 1) >> mapping->order;
+
+	/*
+	 * Transparently, add a guard page to the total count of pages
+	 * to be allocated
+	 */
+	count++;
+
+	if (order > mapping->order)
+		align = (1 << (order - mapping->order)) - 1;
+
+	start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+					   count, align);
+
+	if (start > mapping->bits)
+		return -ENOMEM;
+
+	bitmap_set(mapping->bitmap, start, count);
+	*iova = mapping->base + (start << (mapping->order + PAGE_SHIFT));
+
+	return 0;
+}
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+	dma_addr_t addr, size_t size)
+{
+	unsigned int start = (addr - mapping->base) >>
+			     (mapping->order + PAGE_SHIFT);
+	unsigned int count = ((size >> PAGE_SHIFT) +
+			      (1 << mapping->order) - 1) >> mapping->order;
+
+	if (!addr) {
+		pr_err("Error: Invalid address\n");
+		return -EINVAL;
+	}
+
+	if (start + count > mapping->bits) {
+		pr_err("Error: Invalid page bits in scratch map\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Transparently, add a guard page to the total count of pages
+	 * to be freed
+	 */
+	count++;
+	bitmap_clear(mapping->bitmap, start, count);
+
+	return 0;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+	dma_addr_t virt_addr)
+{
+	struct cam_dma_buff_info *mapping;
+
+	list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+			list) {
+		if (mapping->paddr == virt_addr) {
+			CDBG("Found virtual address %lx\n",
+				 (unsigned long)virt_addr);
+			return mapping;
+		}
+	}
+
+	pr_err("Error: Cannot find virtual address %lx by index %d\n",
+		(unsigned long)virt_addr, idx);
+	return NULL;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+	int ion_fd)
+{
+	struct cam_dma_buff_info *mapping;
+
+	list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+			list) {
+		if (mapping->ion_fd == ion_fd) {
+			CDBG(" find ion_fd %d\n", ion_fd);
+			return mapping;
+		}
+	}
+
+	pr_err("Error: Cannot find fd %d by index %d\n",
+		ion_fd, idx);
+	return NULL;
+}
+
+static void cam_smmu_clean_buffer_list(int idx)
+{
+	int ret;
+	struct cam_dma_buff_info *mapping_info, *temp;
+
+	list_for_each_entry_safe(mapping_info, temp,
+			&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+		CDBG("Free mapping address %pK, i = %d, fd = %d\n",
+			(void *)mapping_info->paddr, idx,
+			mapping_info->ion_fd);
+
+		if (mapping_info->ion_fd == 0xDEADBEEF)
+			/* Clean up scratch buffers */
+			ret = cam_smmu_free_scratch_buffer_remove_from_list(
+							mapping_info, idx);
+		else
+			/* Clean up regular mapped buffers */
+			ret = cam_smmu_unmap_buf_and_remove_from_list(
+					mapping_info,
+					idx);
+
+		if (ret < 0) {
+			pr_err("Buffer delete failed: idx = %d\n", idx);
+			pr_err("Buffer delete failed: addr = %lx, fd = %d\n",
+					(unsigned long)mapping_info->paddr,
+					mapping_info->ion_fd);
+			/*
+			 * Ignore this error and continue to delete other
+			 * buffers in the list
+			 */
+			continue;
+		}
+	}
+}
+
+static int cam_smmu_attach(int idx)
+{
+	int ret;
+
+	if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) {
+		ret = -EALREADY;
+	} else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
+		ret = cam_smmu_attach_device(idx);
+		if (ret < 0) {
+			pr_err("Error: ATTACH fail\n");
+			return -ENODEV;
+		}
+		iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH;
+		ret = 0;
+	} else {
+		pr_err("Error: Not detach/attach: %d\n",
+			iommu_cb_set.cb_info[idx].state);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int cam_smmu_detach_device(int idx)
+{
+	int rc = 0;
+	struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx];
+
+	/* detach the mapping to device if not already detached */
+	if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
+		rc = -EALREADY;
+	} else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) {
+		arm_iommu_detach_device(cb->dev);
+		iommu_cb_set.cb_info[idx].state = CAM_SMMU_DETACH;
+	}
+
+	return rc;
+}
+
+static int cam_smmu_alloc_iova(size_t size,
+	int32_t smmu_hdl, uint32_t *iova)
+{
+	int rc = 0;
+	int idx;
+	uint32_t vaddr = 0;
+
+	if (!iova || !size || (smmu_hdl == HANDLE_INIT)) {
+		pr_err("Error: Input args are invalid\n");
+		return -EINVAL;
+	}
+
+	CDBG("Allocating iova size = %zu for smmu hdl=%X\n", size, smmu_hdl);
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, smmu_hdl);
+		return -EINVAL;
+	}
+
+	if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, smmu_hdl);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].shared_support) {
+		pr_err("Error: Shared memory not supported for hdl = %X\n",
+			smmu_hdl);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	vaddr = gen_pool_alloc(iommu_cb_set.cb_info[idx].shared_mem_pool, size);
+	if (!vaddr)
+		return -ENOMEM;
+
+	*iova = vaddr;
+
+get_addr_end:
+	return rc;
+}
+
+static int cam_smmu_free_iova(uint32_t addr, size_t size,
+	int32_t smmu_hdl)
+{
+	int rc = 0;
+	int idx;
+
+	if (!size || (smmu_hdl == HANDLE_INIT)) {
+		pr_err("Error: Input args are invalid\n");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, smmu_hdl);
+		return -EINVAL;
+	}
+
+	if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, smmu_hdl);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	gen_pool_free(iommu_cb_set.cb_info[idx].shared_mem_pool, addr, size);
+
+get_addr_end:
+	return rc;
+}
+
+int cam_smmu_alloc_firmware(int32_t smmu_hdl,
+	dma_addr_t *iova,
+	uint64_t *cpuva,
+	size_t *len)
+{
+	int rc;
+	int32_t idx;
+	size_t firmware_len = 0;
+	size_t firmware_start = 0;
+	struct iommu_domain *domain;
+
+	if (!iova || !len || !cpuva || (smmu_hdl == HANDLE_INIT)) {
+		pr_err("Error: Input args are invalid\n");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, smmu_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].firmware_support) {
+		pr_err("Firmware memory not supported for this SMMU handle\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].is_fw_allocated) {
+		pr_err("Trying to allocate twice\n");
+		rc = -ENOMEM;
+		goto unlock_and_end;
+	}
+
+	firmware_len = iommu_cb_set.cb_info[idx].firmware_info.iova_len;
+	firmware_start = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+	CDBG("Firmware area len from DT = %zu\n", firmware_len);
+
+	icp_fw.fw_kva = dma_alloc_coherent(icp_fw.fw_dev,
+		firmware_len,
+		&icp_fw.fw_dma_hdl,
+		GFP_KERNEL);
+	if (!icp_fw.fw_kva) {
+		pr_err("FW memory alloc failed\n");
+		rc = -ENOMEM;
+		goto unlock_and_end;
+	} else {
+		CDBG("DMA alloc returned fw = %pK, hdl = %pK\n",
+			icp_fw.fw_kva, (void *)icp_fw.fw_dma_hdl);
+	}
+
+	domain = iommu_cb_set.cb_info[idx].mapping->domain;
+	rc = iommu_map(domain,
+		firmware_start,
+		icp_fw.fw_dma_hdl,
+		firmware_len,
+		IOMMU_READ|IOMMU_WRITE|IOMMU_PRIV);
+
+	if (rc) {
+		pr_err("Failed to map FW into IOMMU\n");
+		rc = -ENOMEM;
+		goto alloc_fail;
+	}
+	iommu_cb_set.cb_info[idx].is_fw_allocated = true;
+
+	*iova = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+	*cpuva = (uint64_t)icp_fw.fw_kva;
+	*len = firmware_len;
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+	return rc;
+
+alloc_fail:
+	dma_free_coherent(icp_fw.fw_dev,
+		firmware_len,
+		icp_fw.fw_kva,
+		icp_fw.fw_dma_hdl);
+unlock_and_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_alloc_firmware);
+
+int cam_smmu_dealloc_firmware(int32_t smmu_hdl)
+{
+	int rc = 0;
+	int32_t idx;
+	size_t firmware_len = 0;
+	size_t firmware_start = 0;
+	struct iommu_domain *domain;
+	size_t unmapped = 0;
+
+	if (smmu_hdl == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, smmu_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].firmware_support) {
+		pr_err("Firmware memory not supported for this SMMU handle\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (!iommu_cb_set.cb_info[idx].is_fw_allocated) {
+		pr_err("Trying to deallocate firmware that is not allocated\n");
+		rc = -ENOMEM;
+		goto unlock_and_end;
+	}
+
+	firmware_len = iommu_cb_set.cb_info[idx].firmware_info.iova_len;
+	firmware_start = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+	domain = iommu_cb_set.cb_info[idx].mapping->domain;
+	unmapped = iommu_unmap(domain,
+		firmware_start,
+		firmware_len);
+
+	if (unmapped != firmware_len) {
+		pr_err("Only %zu unmapped out of total %zu\n",
+			unmapped,
+			firmware_len);
+		rc = -EINVAL;
+	}
+
+	dma_free_coherent(icp_fw.fw_dev,
+		firmware_len,
+		icp_fw.fw_kva,
+		icp_fw.fw_dma_hdl);
+
+	icp_fw.fw_kva = 0;
+	icp_fw.fw_dma_hdl = 0;
+
+	iommu_cb_set.cb_info[idx].is_fw_allocated = false;
+
+unlock_and_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_dealloc_firmware);
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+	 enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+	 size_t *len_ptr,
+	 enum cam_smmu_region_id region_id)
+{
+	int rc = -1;
+	struct cam_dma_buff_info *mapping_info;
+	struct dma_buf *buf = NULL;
+	struct dma_buf_attachment *attach = NULL;
+	struct sg_table *table = NULL;
+	struct iommu_domain *domain;
+	size_t size = 0;
+	uint32_t iova = 0;
+
+	/* allocate memory for each buffer information */
+	buf = dma_buf_get(ion_fd);
+	if (IS_ERR_OR_NULL(buf)) {
+		rc = PTR_ERR(buf);
+		pr_err("Error: dma get buf failed. fd = %d\n", ion_fd);
+		goto err_out;
+	}
+
+	attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev);
+	if (IS_ERR_OR_NULL(attach)) {
+		rc = PTR_ERR(attach);
+		pr_err("Error: dma buf attach failed\n");
+		goto err_put;
+	}
+
+	table = dma_buf_map_attachment(attach, dma_dir);
+	if (IS_ERR_OR_NULL(table)) {
+		rc = PTR_ERR(table);
+		pr_err("Error: dma buf map attachment failed\n");
+		goto err_detach;
+	}
+
+	if (region_id == CAM_SMMU_REGION_SHARED) {
+		domain = iommu_cb_set.cb_info[idx].mapping->domain;
+		if (!domain) {
+			pr_err("CB has no domain set\n");
+			goto err_unmap_sg;
+		}
+
+		rc = cam_smmu_alloc_iova(*len_ptr,
+			iommu_cb_set.cb_info[idx].handle,
+			&iova);
+
+		if (rc < 0) {
+			pr_err("IOVA alloc failed for shared memory\n");
+			goto err_unmap_sg;
+		}
+
+		size = iommu_map_sg(domain,
+			iova,
+			table->sgl,
+			table->nents,
+			IOMMU_READ | IOMMU_WRITE);
+
+		if (size < 0) {
+			pr_err("IOMMU mapping failed\n");
+			rc = cam_smmu_free_iova(iova,
+				size,
+				iommu_cb_set.cb_info[idx].handle);
+
+			if (rc)
+				pr_err("IOVA free failed\n");
+			rc = -ENOMEM;
+			goto err_unmap_sg;
+		} else {
+			CDBG("iommu_map_sg returned %zu\n", size);
+			*paddr_ptr = iova;
+			*len_ptr = size;
+		}
+	} else if (region_id == CAM_SMMU_REGION_IO) {
+		rc = msm_dma_map_sg_lazy(iommu_cb_set.cb_info[idx].dev,
+			table->sgl, table->nents, dma_dir, buf);
+
+		if (rc != table->nents) {
+			pr_err("Error: msm_dma_map_sg_lazy failed\n");
+			rc = -ENOMEM;
+			goto err_unmap_sg;
+		} else {
+			*paddr_ptr = sg_dma_address(table->sgl);
+			*len_ptr = (size_t)sg_dma_len(table->sgl);
+		}
+	} else {
+		pr_err("Error: Wrong region id passed for %s\n", __func__);
+		rc = -EINVAL;
+		goto err_unmap_sg;
+	}
+
+	if (table->sgl) {
+		CDBG("DMA buf: %pK, device: %pK, attach: %pK, table: %pK\n",
+				(void *)buf,
+				(void *)iommu_cb_set.cb_info[idx].dev,
+				(void *)attach, (void *)table);
+		CDBG("table sgl: %pK, rc: %d, dma_address: 0x%x\n",
+				(void *)table->sgl, rc,
+				(unsigned int)table->sgl->dma_address);
+	} else {
+		rc = -EINVAL;
+		pr_err("Error: table sgl is null\n");
+		goto err_unmap_sg;
+	}
+
+	/* fill up mapping_info */
+	mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+	if (!mapping_info) {
+		rc = -ENOSPC;
+		goto err_alloc;
+	}
+	mapping_info->ion_fd = ion_fd;
+	mapping_info->buf = buf;
+	mapping_info->attach = attach;
+	mapping_info->table = table;
+	mapping_info->paddr = *paddr_ptr;
+	mapping_info->len = *len_ptr;
+	mapping_info->dir = dma_dir;
+	mapping_info->ref_count = 1;
+	mapping_info->region_id = region_id;
+
+	if (!*paddr_ptr || !*len_ptr) {
+		pr_err("Error: Space Allocation failed!\n");
+		kfree(mapping_info);
+		rc = -ENOSPC;
+		goto err_alloc;
+	}
+	CDBG("ion_fd = %d, dev = %pK, paddr= %pK, len = %u\n", ion_fd,
+		(void *)iommu_cb_set.cb_info[idx].dev,
+		(void *)*paddr_ptr, (unsigned int)*len_ptr);
+
+	/* add to the list */
+	list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+	return 0;
+
+err_alloc:
+	if (region_id == CAM_SMMU_REGION_SHARED) {
+		cam_smmu_free_iova(iova,
+			size,
+			iommu_cb_set.cb_info[idx].handle);
+
+		iommu_unmap(iommu_cb_set.cb_info[idx].mapping->domain,
+			*paddr_ptr,
+			*len_ptr);
+	} else if (region_id == CAM_SMMU_REGION_IO) {
+		msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev,
+			table->sgl,
+			table->nents,
+			dma_dir,
+			buf);
+	}
+err_unmap_sg:
+	dma_buf_unmap_attachment(attach, table, dma_dir);
+err_detach:
+	dma_buf_detach(buf, attach);
+err_put:
+	dma_buf_put(buf);
+err_out:
+	return rc;
+}
+
+static int cam_smmu_unmap_buf_and_remove_from_list(
+	struct cam_dma_buff_info *mapping_info,
+	int idx)
+{
+	int rc;
+	size_t size;
+	struct iommu_domain *domain;
+
+	if ((!mapping_info->buf) || (!mapping_info->table) ||
+		(!mapping_info->attach)) {
+		pr_err("Error: Invalid params dev = %pK, table = %pK\n",
+			(void *)iommu_cb_set.cb_info[idx].dev,
+			(void *)mapping_info->table);
+		pr_err("Error:dma_buf = %pK, attach = %pK\n",
+			(void *)mapping_info->buf,
+			(void *)mapping_info->attach);
+		return -EINVAL;
+	}
+
+	if (mapping_info->region_id == CAM_SMMU_REGION_SHARED) {
+		CDBG("Removing SHARED buffer paddr = %pK, len = %zu\n",
+			(void *)mapping_info->paddr, mapping_info->len);
+
+		domain = iommu_cb_set.cb_info[idx].mapping->domain;
+
+		size = iommu_unmap(domain,
+			mapping_info->paddr,
+			mapping_info->len);
+
+		if (size != mapping_info->len) {
+			pr_err("IOMMU unmap failed\n");
+			pr_err("Unmapped = %zu, requested = %zu\n",
+				size,
+				mapping_info->len);
+		}
+
+		rc = cam_smmu_free_iova(mapping_info->paddr,
+			mapping_info->len,
+			iommu_cb_set.cb_info[idx].handle);
+
+		if (rc)
+			pr_err("IOVA free failed\n");
+
+	} else if (mapping_info->region_id == CAM_SMMU_REGION_IO) {
+		msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev,
+			mapping_info->table->sgl, mapping_info->table->nents,
+			mapping_info->dir, mapping_info->buf);
+	}
+
+	dma_buf_unmap_attachment(mapping_info->attach,
+		mapping_info->table, mapping_info->dir);
+	dma_buf_detach(mapping_info->buf, mapping_info->attach);
+	dma_buf_put(mapping_info->buf);
+
+	mapping_info->buf = NULL;
+
+	list_del_init(&mapping_info->list);
+
+	/* free one buffer */
+	kfree(mapping_info);
+	return 0;
+}
+
+static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
+	int ion_fd, dma_addr_t *paddr_ptr,
+	size_t *len_ptr)
+{
+	struct cam_dma_buff_info *mapping;
+
+	list_for_each_entry(mapping,
+		&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+		if (mapping->ion_fd == ion_fd) {
+			mapping->ref_count++;
+			*paddr_ptr = mapping->paddr;
+			*len_ptr = mapping->len;
+			return CAM_SMMU_BUFF_EXIST;
+		}
+	}
+
+	return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
+int cam_smmu_get_handle(char *identifier, int *handle_ptr)
+{
+	int ret = 0;
+
+	if (!identifier) {
+		pr_err("Error: iommu hardware name is NULL\n");
+		return -EINVAL;
+	}
+
+	if (!handle_ptr) {
+		pr_err("Error: handle pointer is NULL\n");
+		return -EINVAL;
+	}
+
+	/* create and put handle in the table */
+	ret = cam_smmu_create_add_handle_in_table(identifier, handle_ptr);
+	if (ret < 0)
+		pr_err("Error: %s get handle fail\n", identifier);
+
+	return ret;
+}
+EXPORT_SYMBOL(cam_smmu_get_handle);
+
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param ops)
+{
+	int ret = 0, idx;
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: Index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return -EINVAL;
+	}
+
+	switch (ops) {
+	case CAM_SMMU_ATTACH: {
+		ret = cam_smmu_attach(idx);
+		break;
+	}
+	case CAM_SMMU_DETACH: {
+		ret = cam_smmu_detach_device(idx);
+		break;
+	}
+	case CAM_SMMU_VOTE:
+	case CAM_SMMU_DEVOTE:
+	default:
+		pr_err("Error: idx = %d, ops = %d\n", idx, ops);
+		ret = -EINVAL;
+	}
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return ret;
+}
+EXPORT_SYMBOL(cam_smmu_ops);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+	size_t virt_len,
+	size_t phys_len,
+	unsigned int iommu_dir,
+	dma_addr_t *virt_addr)
+{
+	unsigned long nents = virt_len / phys_len;
+	struct cam_dma_buff_info *mapping_info = NULL;
+	size_t unmapped;
+	dma_addr_t iova = 0;
+	struct scatterlist *sg;
+	int i = 0;
+	int rc;
+	struct iommu_domain *domain = NULL;
+	struct page *page;
+	struct sg_table *table = NULL;
+
+	CDBG("%s: nents = %lu, idx = %d, virt_len  = %zx\n",
+		__func__, nents, idx, virt_len);
+	CDBG("%s: phys_len = %zx, iommu_dir = %d, virt_addr = %pK\n",
+		__func__, phys_len, iommu_dir, virt_addr);
+
+	/*
+	 * This table will go inside the 'mapping' structure
+	 * where it will be held until put_scratch_buffer is called
+	 */
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table) {
+		rc = -ENOMEM;
+		goto err_table_alloc;
+	}
+
+	rc = sg_alloc_table(table, nents, GFP_KERNEL);
+	if (rc < 0) {
+		rc = -EINVAL;
+		goto err_sg_alloc;
+	}
+
+	page = alloc_pages(GFP_KERNEL, get_order(phys_len));
+	if (!page) {
+		rc = -ENOMEM;
+		goto err_page_alloc;
+	}
+
+	/* Now we create the sg list */
+	for_each_sg(table->sgl, sg, table->nents, i)
+		sg_set_page(sg, page, phys_len, 0);
+
+
+	/* Get the domain from within our cb_set struct and map it*/
+	domain = iommu_cb_set.cb_info[idx].mapping->domain;
+
+	rc = cam_smmu_alloc_scratch_va(&iommu_cb_set.cb_info[idx].scratch_map,
+		virt_len, &iova);
+
+	if (rc < 0) {
+		pr_err("Could not find valid iova for scratch buffer");
+		goto err_iommu_map;
+	}
+
+	if (iommu_map_sg(domain,
+		iova,
+		table->sgl,
+		table->nents,
+		iommu_dir) != virt_len) {
+		pr_err("iommu_map_sg() failed");
+		goto err_iommu_map;
+	}
+
+	/* Now update our mapping information within the cb_set struct */
+	mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+	if (!mapping_info) {
+		rc = -ENOMEM;
+		goto err_mapping_info;
+	}
+
+	mapping_info->ion_fd = 0xDEADBEEF;
+	mapping_info->buf = NULL;
+	mapping_info->attach = NULL;
+	mapping_info->table = table;
+	mapping_info->paddr = iova;
+	mapping_info->len = virt_len;
+	mapping_info->iommu_dir = iommu_dir;
+	mapping_info->ref_count = 1;
+	mapping_info->phys_len = phys_len;
+	mapping_info->region_id = CAM_SMMU_REGION_SCRATCH;
+
+	CDBG("%s: paddr = %pK, len = %zx, phys_len = %zx",
+		__func__, (void *)mapping_info->paddr,
+		mapping_info->len, mapping_info->phys_len);
+
+	list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+	*virt_addr = (dma_addr_t)iova;
+
+	CDBG("%s: mapped virtual address = %lx\n", __func__,
+		(unsigned long)*virt_addr);
+	return 0;
+
+err_mapping_info:
+	unmapped = iommu_unmap(domain, iova,  virt_len);
+	if (unmapped != virt_len)
+		pr_err("Unmapped only %zx instead of %zx", unmapped, virt_len);
+err_iommu_map:
+	__free_pages(page, get_order(phys_len));
+err_page_alloc:
+	sg_free_table(table);
+err_sg_alloc:
+	kfree(table);
+err_table_alloc:
+	return rc;
+}
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+	struct cam_dma_buff_info *mapping_info,
+	int idx)
+{
+	int rc = 0;
+	size_t unmapped;
+	struct iommu_domain *domain =
+		iommu_cb_set.cb_info[idx].mapping->domain;
+	struct scratch_mapping *scratch_map =
+		&iommu_cb_set.cb_info[idx].scratch_map;
+
+	if (!mapping_info->table) {
+		pr_err("Error: Invalid params: dev = %pK, table = %pK",
+			(void *)iommu_cb_set.cb_info[idx].dev,
+			(void *)mapping_info->table);
+		return -EINVAL;
+	}
+
+	/* Clean up the mapping_info struct from the list */
+	unmapped = iommu_unmap(domain, mapping_info->paddr, mapping_info->len);
+	if (unmapped != mapping_info->len)
+		pr_err("Unmapped only %zx instead of %zx",
+			unmapped, mapping_info->len);
+
+	rc = cam_smmu_free_scratch_va(scratch_map,
+		mapping_info->paddr,
+		mapping_info->len);
+	if (rc < 0) {
+		pr_err("Error: Invalid iova while freeing scratch buffer\n");
+		rc = -EINVAL;
+	}
+
+	__free_pages(sg_page(mapping_info->table->sgl),
+			get_order(mapping_info->phys_len));
+	sg_free_table(mapping_info->table);
+	kfree(mapping_info->table);
+	list_del_init(&mapping_info->list);
+
+	kfree(mapping_info);
+	mapping_info = NULL;
+
+	return rc;
+}
+
+int cam_smmu_get_scratch_iova(int handle,
+	enum cam_smmu_map_dir dir,
+	dma_addr_t *paddr_ptr,
+	size_t virt_len,
+	size_t phys_len)
+{
+	int idx, rc;
+	unsigned int iommu_dir;
+
+	if (!paddr_ptr || !virt_len || !phys_len) {
+		pr_err("Error: Input pointer or lengths invalid\n");
+		return -EINVAL;
+	}
+
+	if (virt_len < phys_len) {
+		pr_err("Error: virt_len > phys_len\n");
+		return -EINVAL;
+	}
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	iommu_dir = cam_smmu_translate_dir_to_iommu_dir(dir);
+	if (iommu_dir == IOMMU_INVALID_DIR) {
+		pr_err("Error: translate direction failed. dir = %d\n", dir);
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+		pr_err("Error: Context bank does not support scratch bufs\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	CDBG("%s: smmu handle = %x, idx = %d, dir = %d\n",
+		__func__, handle, idx, dir);
+	CDBG("%s: virt_len = %zx, phys_len  = %zx\n",
+		__func__, phys_len, virt_len);
+
+	if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+		pr_err("Err:Dev %s should call SMMU attach before map buffer\n",
+				iommu_cb_set.cb_info[idx].name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!IS_ALIGNED(virt_len, PAGE_SIZE)) {
+		pr_err("Requested scratch buffer length not page aligned\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!IS_ALIGNED(virt_len, phys_len)) {
+		pr_err("Requested virt length not aligned with phys length\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	rc = cam_smmu_alloc_scratch_buffer_add_to_list(idx,
+		virt_len,
+		phys_len,
+		iommu_dir,
+		paddr_ptr);
+	if (rc < 0)
+		pr_err("Error: mapping or add list fail\n");
+
+error:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+
+int cam_smmu_put_scratch_iova(int handle,
+	dma_addr_t paddr)
+{
+	int idx;
+	int rc = -1;
+	struct cam_dma_buff_info *mapping_info;
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	/* find index in the iommu_cb_set.cb_info */
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto handle_err;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+		pr_err("Error: Context bank does not support scratch buffers\n");
+		rc = -EINVAL;
+		goto handle_err;
+	}
+
+	/* Based on virtual address and index, we can find mapping info
+	 * of the scratch buffer
+	 */
+	mapping_info = cam_smmu_find_mapping_by_virt_address(idx, paddr);
+	if (!mapping_info) {
+		pr_err("Error: Invalid params\n");
+		rc = -ENODEV;
+		goto handle_err;
+	}
+
+	/* unmapping one buffer from device */
+	rc = cam_smmu_free_scratch_buffer_remove_from_list(mapping_info, idx);
+	if (rc < 0) {
+		pr_err("Error: unmap or remove list fail\n");
+		goto handle_err;
+	}
+
+handle_err:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+
+int cam_smmu_map_sec_iova(int handle, int ion_fd,
+	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr)
+{
+	/* not implemented yet */
+	return -EPERM;
+}
+EXPORT_SYMBOL(cam_smmu_map_sec_iova);
+
+int cam_smmu_map_iova(int handle, int ion_fd,
+	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+	int idx, rc;
+	enum dma_data_direction dma_dir;
+	enum cam_smmu_buf_state buf_state;
+
+	if (!paddr_ptr || !len_ptr) {
+		pr_err("Input pointers are invalid\n");
+		return -EINVAL;
+	}
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Invalid handle\n");
+		return -EINVAL;
+	}
+
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	if (region_id != CAM_SMMU_REGION_SHARED)
+		*len_ptr = (size_t)0;
+
+	dma_dir = cam_smmu_translate_dir(dir);
+	if (dma_dir == DMA_NONE) {
+		pr_err("translate direction failed. dir = %d\n", dir);
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+		pr_err("Err:Dev %s should call SMMU attach before map buffer\n",
+				iommu_cb_set.cb_info[idx].name);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr,
+		len_ptr);
+	if (buf_state == CAM_SMMU_BUFF_EXIST) {
+		CDBG("ion_fd:%d already in the list, give same addr back",
+				 ion_fd);
+		rc = 0;
+		goto get_addr_end;
+	}
+	rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir,
+			paddr_ptr, len_ptr, region_id);
+	if (rc < 0)
+		pr_err("mapping or add list fail\n");
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_map_iova);
+
+
+int cam_smmu_get_iova(int handle, int ion_fd,
+	dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+	int idx, rc = 0;
+	enum cam_smmu_buf_state buf_state;
+
+	if (!paddr_ptr || !len_ptr) {
+		pr_err("Error: Input pointers are invalid\n");
+		return -EINVAL;
+	}
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	*len_ptr = (size_t)0;
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
+	if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
+		CDBG("ion_fd:%d not in the mapped list", ion_fd);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_get_iova);
+
+int cam_smmu_unmap_sec_iova(int handle, int ion_fd)
+{
+	/* not implemented yet */
+	return -EPERM;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_sec_iova);
+
+int cam_smmu_unmap_iova(int handle,
+	int ion_fd,
+	enum cam_smmu_region_id region_id)
+{
+	int idx, rc;
+	struct cam_dma_buff_info *mapping_info;
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	/* find index in the iommu_cb_set.cb_info */
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto unmap_end;
+	}
+
+	/* Based on ion fd and index, we can find mapping info of buffer */
+	mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+	if (!mapping_info) {
+		pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+			idx, ion_fd);
+		rc = -EINVAL;
+		goto unmap_end;
+	}
+
+	mapping_info->ref_count--;
+	if (mapping_info->ref_count > 0) {
+		CDBG("There are still %u buffer(s) with same fd %d",
+			mapping_info->ref_count, mapping_info->ion_fd);
+		rc = 0;
+		goto unmap_end;
+	}
+
+	/* Unmapping one buffer from device */
+	CDBG("SMMU: removing buffer idx = %d\n", idx);
+	rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
+	if (rc < 0)
+		pr_err("Error: unmap or remove list fail\n");
+
+unmap_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_iova);
+
+int cam_smmu_put_iova(int handle, int ion_fd)
+{
+	int idx;
+	int rc = 0;
+	struct cam_dma_buff_info *mapping_info;
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	/* find index in the iommu_cb_set.cb_info */
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto put_addr_end;
+	}
+
+	/* based on ion fd and index, we can find mapping info of buffer */
+	mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+	if (!mapping_info) {
+		pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+			idx, ion_fd);
+		rc = -EINVAL;
+		goto put_addr_end;
+	}
+
+	mapping_info->ref_count--;
+
+put_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_put_iova);
+
+int cam_smmu_destroy_handle(int handle)
+{
+	int idx;
+
+	if (handle == HANDLE_INIT) {
+		pr_err("Error: Invalid handle\n");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return -EINVAL;
+	}
+
+	if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) {
+		pr_err("Client %s buffer list is not clean!\n",
+			iommu_cb_set.cb_info[idx].name);
+		cam_smmu_print_list(idx);
+		cam_smmu_clean_buffer_list(idx);
+	}
+
+	iommu_cb_set.cb_info[idx].cb_count = 0;
+	iommu_cb_set.cb_info[idx].handle = HANDLE_INIT;
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return 0;
+}
+EXPORT_SYMBOL(cam_smmu_destroy_handle);
+
+static void cam_smmu_deinit_cb(struct cam_context_bank_info *cb)
+{
+	arm_iommu_detach_device(cb->dev);
+
+	if (cb->io_support && cb->mapping) {
+		arm_iommu_release_mapping(cb->mapping);
+		cb->mapping = NULL;
+	}
+
+	if (cb->shared_support) {
+		gen_pool_destroy(cb->shared_mem_pool);
+		cb->shared_mem_pool = NULL;
+	}
+
+	if (cb->scratch_buf_support) {
+		kfree(cb->scratch_map.bitmap);
+		cb->scratch_map.bitmap = NULL;
+	}
+}
+
+static void cam_smmu_release_cb(struct platform_device *pdev)
+{
+	int i = 0;
+
+	for (i = 0; i < iommu_cb_set.cb_num; i++)
+		cam_smmu_deinit_cb(&iommu_cb_set.cb_info[i]);
+
+	devm_kfree(&pdev->dev, iommu_cb_set.cb_info);
+	iommu_cb_set.cb_num = 0;
+}
+
+static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
+	struct device *dev)
+{
+	int rc = 0;
+
+	if (!cb || !dev) {
+		pr_err("Error: invalid input params\n");
+		return -EINVAL;
+	}
+
+	cb->dev = dev;
+	cb->is_fw_allocated = false;
+
+	/* Create a pool with 4K granularity for supporting shared memory */
+	if (cb->shared_support) {
+		cb->shared_mem_pool = gen_pool_create(
+			SHARED_MEM_POOL_GRANULARITY, -1);
+
+		if (!cb->shared_mem_pool)
+			return -ENOMEM;
+
+		rc = gen_pool_add(cb->shared_mem_pool,
+			cb->shared_info.iova_start,
+			cb->shared_info.iova_len,
+			-1);
+
+		CDBG("Shared mem start->%lX\n",
+			(unsigned long)cb->shared_info.iova_start);
+		CDBG("Shared mem len->%zu\n", cb->shared_info.iova_len);
+
+		if (rc) {
+			pr_err("Genpool chunk creation failed\n");
+			gen_pool_destroy(cb->shared_mem_pool);
+			cb->shared_mem_pool = NULL;
+			return rc;
+		}
+	}
+
+	if (cb->scratch_buf_support) {
+		rc = cam_smmu_init_scratch_map(&cb->scratch_map,
+			cb->scratch_info.iova_start,
+			cb->scratch_info.iova_len,
+			0);
+		if (rc < 0) {
+			pr_err("Error: failed to create scratch map\n");
+			rc = -ENODEV;
+			goto end;
+		}
+	}
+
+	/* create a virtual mapping */
+	if (cb->io_support) {
+		cb->mapping = arm_iommu_create_mapping(&platform_bus_type,
+			cb->io_info.iova_start, cb->io_info.iova_len);
+		if (IS_ERR(cb->mapping)) {
+			pr_err("Error: create mapping Failed\n");
+			rc = -ENODEV;
+			goto end;
+		}
+	} else {
+		pr_err("Context bank does not have IO region\n");
+		rc = -ENODEV;
+		goto end;
+	}
+
+	return rc;
+end:
+	if (cb->shared_support) {
+		gen_pool_destroy(cb->shared_mem_pool);
+		cb->shared_mem_pool = NULL;
+	}
+
+	if (cb->scratch_buf_support) {
+		kfree(cb->scratch_map.bitmap);
+		cb->scratch_map.bitmap = NULL;
+	}
+
+	return rc;
+}
+
+static int cam_alloc_smmu_context_banks(struct device *dev)
+{
+	struct device_node *domains_child_node = NULL;
+
+	if (!dev) {
+		pr_err("Error: Invalid device\n");
+		return -ENODEV;
+	}
+
+	iommu_cb_set.cb_num = 0;
+
+	/* traverse thru all the child nodes and increment the cb count */
+	for_each_available_child_of_node(dev->of_node, domains_child_node) {
+		if (of_device_is_compatible(domains_child_node,
+			"qcom,msm-cam-smmu-cb"))
+			iommu_cb_set.cb_num++;
+
+		if (of_device_is_compatible(domains_child_node,
+			"qcom,qsmmu-cam-cb"))
+			iommu_cb_set.cb_num++;
+	}
+
+	if (iommu_cb_set.cb_num == 0) {
+		pr_err("Error: no context banks present\n");
+		return -ENOENT;
+	}
+
+	/* allocate memory for the context banks */
+	iommu_cb_set.cb_info = devm_kzalloc(dev,
+		iommu_cb_set.cb_num * sizeof(struct cam_context_bank_info),
+		GFP_KERNEL);
+
+	if (!iommu_cb_set.cb_info) {
+		pr_err("Error: cannot allocate context banks\n");
+		return -ENOMEM;
+	}
+
+	cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_INIT);
+	iommu_cb_set.cb_init_count = 0;
+
+	CDBG("no of context banks :%d\n", iommu_cb_set.cb_num);
+	return 0;
+}
+
+static int cam_smmu_get_memory_regions_info(struct device_node *of_node,
+	struct cam_context_bank_info *cb)
+{
+	int rc = 0;
+	struct device_node *mem_map_node = NULL;
+	struct device_node *child_node = NULL;
+	const char *region_name;
+	int num_regions = 0;
+
+	if (!of_node || !cb) {
+		pr_err("Invalid argument(s)\n");
+		return -EINVAL;
+	}
+
+	mem_map_node = of_get_child_by_name(of_node, "iova-mem-map");
+	if (!mem_map_node) {
+		pr_err("iova-mem-map not present\n");
+		return -EINVAL;
+	}
+
+	for_each_available_child_of_node(mem_map_node, child_node) {
+		uint32_t region_start;
+		uint32_t region_len;
+		uint32_t region_id;
+
+		num_regions++;
+		rc = of_property_read_string(child_node,
+			"iova-region-name", &region_name);
+		if (rc < 0) {
+			of_node_put(mem_map_node);
+			pr_err("IOVA region not found\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(child_node,
+			"iova-region-start", &region_start);
+		if (rc < 0) {
+			of_node_put(mem_map_node);
+			pr_err("Failed to read iova-region-start\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(child_node,
+			"iova-region-len", &region_len);
+		if (rc < 0) {
+			of_node_put(mem_map_node);
+			pr_err("Failed to read iova-region-len\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(child_node,
+			"iova-region-id", &region_id);
+		if (rc < 0) {
+			of_node_put(mem_map_node);
+			pr_err("Failed to read iova-region-id\n");
+			return -EINVAL;
+		}
+
+		switch (region_id) {
+		case CAM_SMMU_REGION_FIRMWARE:
+			cb->firmware_support = 1;
+			cb->firmware_info.iova_start = region_start;
+			cb->firmware_info.iova_len = region_len;
+			break;
+		case CAM_SMMU_REGION_SHARED:
+			cb->shared_support = 1;
+			cb->shared_info.iova_start = region_start;
+			cb->shared_info.iova_len = region_len;
+			break;
+		case CAM_SMMU_REGION_SCRATCH:
+			cb->scratch_buf_support = 1;
+			cb->scratch_info.iova_start = region_start;
+			cb->scratch_info.iova_len = region_len;
+			break;
+		case CAM_SMMU_REGION_IO:
+			cb->io_support = 1;
+			cb->io_info.iova_start = region_start;
+			cb->io_info.iova_len = region_len;
+			break;
+		default:
+			pr_err("Incorrect region id present in DT file: %d\n",
+				region_id);
+		}
+
+		CDBG("Found label -> %s\n", cb->name);
+		CDBG("Found region -> %s\n", region_name);
+		CDBG("region_start -> %X\n", region_start);
+		CDBG("region_len -> %X\n", region_len);
+		CDBG("region_id -> %X\n", region_id);
+	}
+	of_node_put(mem_map_node);
+
+	if (!num_regions) {
+		pr_err("No memory regions found, at least one needed\n");
+		rc = -ENODEV;
+	}
+
+	return rc;
+}
+
+static int cam_populate_smmu_context_banks(struct device *dev,
+	enum cam_iommu_type type)
+{
+	int rc = 0;
+	struct cam_context_bank_info *cb;
+	struct device *ctx = NULL;
+
+	if (!dev) {
+		pr_err("Error: Invalid device\n");
+		return -ENODEV;
+	}
+
+	/* check the bounds */
+	if (iommu_cb_set.cb_init_count >= iommu_cb_set.cb_num) {
+		pr_err("Error: populate more than allocated cb\n");
+		rc = -EBADHANDLE;
+		goto cb_init_fail;
+	}
+
+	/* read the context bank from cb set */
+	cb = &iommu_cb_set.cb_info[iommu_cb_set.cb_init_count];
+
+	/* set the name of the context bank */
+	rc = of_property_read_string(dev->of_node, "label", &cb->name);
+	if (rc < 0) {
+		pr_err("Error: failed to read label from sub device\n");
+		goto cb_init_fail;
+	}
+
+	rc = cam_smmu_get_memory_regions_info(dev->of_node,
+		cb);
+	if (rc < 0) {
+		pr_err("Error: Getting region info\n");
+		return rc;
+	}
+
+	/* set up the iommu mapping for the  context bank */
+	if (type == CAM_QSMMU) {
+		pr_err("Error: QSMMU ctx not supported for : %s\n", cb->name);
+		return -ENODEV;
+	}
+
+	ctx = dev;
+	CDBG("getting Arm SMMU ctx : %s\n", cb->name);
+
+	rc = cam_smmu_setup_cb(cb, ctx);
+	if (rc < 0) {
+		pr_err("Error: failed to setup cb : %s\n", cb->name);
+		goto cb_init_fail;
+	}
+
+	if (cb->io_support && cb->mapping)
+		iommu_set_fault_handler(cb->mapping->domain,
+			cam_smmu_iommu_fault_handler,
+			(void *)cb->name);
+
+	/* increment count to next bank */
+	iommu_cb_set.cb_init_count++;
+
+	CDBG("X: cb init count :%d\n", iommu_cb_set.cb_init_count);
+
+cb_init_fail:
+	return rc;
+}
+
+static int cam_smmu_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct device *dev = &pdev->dev;
+
+	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) {
+		rc = cam_alloc_smmu_context_banks(dev);
+		if (rc < 0) {
+			pr_err("Error: allocating context banks\n");
+			return -ENOMEM;
+		}
+	}
+	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-cb")) {
+		rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU);
+		if (rc < 0) {
+			pr_err("Error: populating context banks\n");
+			return -ENOMEM;
+		}
+		return rc;
+	}
+	if (of_device_is_compatible(dev->of_node, "qcom,qsmmu-cam-cb")) {
+		rc = cam_populate_smmu_context_banks(dev, CAM_QSMMU);
+		if (rc < 0) {
+			pr_err("Error: populating context banks\n");
+			return -ENOMEM;
+		}
+		return rc;
+	}
+
+	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-fw-dev")) {
+		icp_fw.fw_dev = &pdev->dev;
+		icp_fw.fw_kva = NULL;
+		icp_fw.fw_dma_hdl = 0;
+		return rc;
+	}
+
+	/* probe through all the subdevices */
+	rc = of_platform_populate(pdev->dev.of_node, msm_cam_smmu_dt_match,
+				NULL, &pdev->dev);
+	if (rc < 0) {
+		pr_err("Error: populating devices\n");
+	} else {
+		INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work);
+		mutex_init(&iommu_cb_set.payload_list_lock);
+		INIT_LIST_HEAD(&iommu_cb_set.payload_list);
+	}
+
+	return rc;
+}
+
+static int cam_smmu_remove(struct platform_device *pdev)
+{
+	/* release all the context banks and memory allocated */
+	cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_DEINIT);
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cam-smmu"))
+		cam_smmu_release_cb(pdev);
+	return 0;
+}
+
+static struct platform_driver cam_smmu_driver = {
+	.probe = cam_smmu_probe,
+	.remove = cam_smmu_remove,
+	.driver = {
+		.name = "msm_cam_smmu",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_cam_smmu_dt_match,
+	},
+};
+
+static int __init cam_smmu_init_module(void)
+{
+	return platform_driver_register(&cam_smmu_driver);
+}
+
+static void __exit cam_smmu_exit_module(void)
+{
+	platform_driver_unregister(&cam_smmu_driver);
+}
+
+module_init(cam_smmu_init_module);
+module_exit(cam_smmu_exit_module);
+MODULE_DESCRIPTION("MSM Camera SMMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
new file mode 100644
index 0000000..76e9135
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -0,0 +1,255 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SMMU_API_H_
+#define _CAM_SMMU_API_H_
+
+#include <linux/dma-direction.h>
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/msm_ion.h>
+
+/*Enum for possible CAM SMMU operations */
+enum cam_smmu_ops_param {
+	CAM_SMMU_ATTACH,
+	CAM_SMMU_DETACH,
+	CAM_SMMU_VOTE,
+	CAM_SMMU_DEVOTE,
+	CAM_SMMU_OPS_INVALID
+};
+
+enum cam_smmu_map_dir {
+	CAM_SMMU_MAP_READ,
+	CAM_SMMU_MAP_WRITE,
+	CAM_SMMU_MAP_RW,
+	CAM_SMMU_MAP_INVALID
+};
+
+enum cam_smmu_region_id {
+	CAM_SMMU_REGION_FIRMWARE,
+	CAM_SMMU_REGION_SHARED,
+	CAM_SMMU_REGION_SCRATCH,
+	CAM_SMMU_REGION_IO
+};
+
+/**
+ * @brief           : Gets an smmu handle
+ *
+ * @param identifier: Unique identifier to be used by clients which they
+ *                    should get from device tree. CAM SMMU driver will
+ *                    not enforce how this string is obtained and will
+ *                    only validate this against the list of permitted
+ *                    identifiers
+ * @param handle_ptr: Based on the indentifier, CAM SMMU drivier will
+ *                    fill the handle pointed by handle_ptr
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_handle(char *identifier, int *handle_ptr);
+
+/**
+ * @brief       : Performs IOMMU operations
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param op    : Operation to be performed. Can be either CAM_SMMU_ATTACH
+ *                or CAM_SMMU_DETACH
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
+
+/**
+ * @brief       : Maps IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ * @dir         : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
+ *                DMA_TO_DEVICE or DMA_FROM_DEVICE
+ * @dma_addr    : Pointer to physical address where mapped address will be
+ *                returned if region_id is CAM_SMMU_REGION_IO. If region_id is
+ *                CAM_SMMU_REGION_SHARED, dma_addr is used as an input parameter
+ *                which specifies the cpu virtual address to map.
+ * @len         : Length of buffer mapped returned by CAM SMMU driver.
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_iova(int handle,
+	int ion_fd, enum cam_smmu_map_dir dir,
+	dma_addr_t *dma_addr, size_t *len_ptr,
+	enum cam_smmu_region_id region_id);
+
+/**
+ * @brief       : Unmaps IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_iova(int handle,
+	int ion_fd,
+	enum cam_smmu_region_id region_id);
+
+/**
+ * @brief          : Allocates a scratch buffer
+ *
+ * This function allocates a scratch virtual buffer of length virt_len in the
+ * device virtual address space mapped to phys_len physically contiguous bytes
+ * in that device's SMMU.
+ *
+ * virt_len and phys_len are expected to be aligned to PAGE_SIZE and with each
+ * other, otherwise -EINVAL is returned.
+ *
+ * -EINVAL will be returned if virt_len is less than phys_len.
+ *
+ * Passing a too large phys_len might also cause failure if that much size is
+ * not available for allocation in a physically contiguous way.
+ *
+ * @param handle   : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param dir      : Direction of mapping which will translate to IOMMU_READ
+ *                   IOMMU_WRITE or a bit mask of both.
+ * @param paddr_ptr: Device virtual address that the client device will be
+ *                   able to read from/write to
+ * @param virt_len : Virtual length of the scratch buffer
+ * @param phys_len : Physical length of the scratch buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_get_scratch_iova(int handle,
+	enum cam_smmu_map_dir dir,
+	dma_addr_t *paddr_ptr,
+	size_t virt_len,
+	size_t phys_len);
+
+/**
+ * @brief          : Frees a scratch buffer
+ *
+ * This function frees a scratch buffer and releases the corresponding SMMU
+ * mappings.
+ *
+ * @param handle   : Handle to identify the CAMSMMU client (IFE, ICP, etc.)
+ * @param paddr    : Device virtual address of client's scratch buffer that
+ *                   will be freed.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_put_scratch_iova(int handle,
+	dma_addr_t paddr);
+
+/**
+ *@brief        : Destroys an smmu handle
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_destroy_handle(int handle);
+
+/**
+ * @brief       : Finds index by handle in the smmu client table
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @return Index of SMMU client. Nagative in case of error.
+ */
+int cam_smmu_find_index_by_handle(int hdl);
+
+/**
+ * @brief       : Registers smmu fault handler for client
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param client_page_fault_handler: It is triggered in IOMMU page fault
+ * @param token: It is input param when trigger page fault handler
+ */
+void cam_smmu_reg_client_page_fault_handler(int handle,
+	void (*client_page_fault_handler)(struct iommu_domain *,
+	struct device *, unsigned long,
+	int, void*), void *token);
+
+/**
+ * @brief Maps memory from an ION fd into IOVA space
+ *
+ * @param handle: SMMU handle identifying the context bank to map to
+ * @param ion_fd: ION fd of memory to map to
+ * @param paddr_ptr: Pointer IOVA address that will be returned
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_iova(int handle, int ion_fd,
+	dma_addr_t *paddr_ptr, size_t *len_ptr);
+/**
+ * @brief Unmaps memory from context bank
+ *
+ * @param handle: SMMU handle identifying the context bank
+ * @param ion_fd: ION fd of memory to unmap
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_put_iova(int handle, int ion_fd);
+
+/**
+ * @brief Maps secure memory for SMMU handle
+ *
+ * @param handle: SMMU handle identifying context bank
+ * @param ion_fd: ION fd to map securely
+ * @param dir: DMA Direction for the mapping
+ * @param dma_addr: Returned IOVA address after mapping
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_sec_iova(int handle,
+	int ion_fd, enum cam_smmu_map_dir dir,
+	dma_addr_t *dma_addr, size_t *len_ptr);
+
+/**
+ * @brief Unmaps secure memopry for SMMU handle
+ *
+ * @param handle: SMMU handle identifying context bank
+ * @param ion_fd: ION fd to unmap
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_sec_iova(int handle, int ion_fd);
+
+
+/**
+ * @brief Allocates firmware for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying context bank
+ * @param iova: IOVA address of allocated firmware
+ * @param kvaddr: CPU mapped address of allocated firmware
+ * @param len: Length of allocated firmware memory
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_alloc_firmware(int32_t smmu_hdl,
+	dma_addr_t *iova,
+	uint64_t *kvaddr,
+	size_t *len);
+
+/**
+ * @brief Deallocates firmware memory for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_dealloc_firmware(int32_t smmu_hdl);
+#endif /* _CAM_SMMU_API_H_ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 50c08b0..6233b46 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -551,6 +551,12 @@
 	if (!input)
 		dir = DMA_FROM_DEVICE;
 
+	if (buffer->plane_count > SDE_ROT_MAX_PLANES) {
+		SDEROT_ERR("buffer plane_count exceeds MAX_PLANE limit:%d\n",
+				buffer->plane_count);
+		return -EINVAL;
+	}
+
 	data->sbuf = buffer->sbuf;
 	data->scid = buffer->scid;
 	data->writeback = buffer->writeback;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 15ecc10..90b7194 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -438,6 +438,8 @@
 {
 	struct sde_rotator_ctx *ctx = vb2_get_drv_priv(q);
 	struct sde_rotator_device *rot_dev = ctx->rot_dev;
+	struct sde_rotator_request *request;
+	struct list_head *curr, *next;
 	int i;
 	int ret;
 
@@ -458,6 +460,21 @@
 		sde_rot_mgr_lock(rot_dev->mgr);
 		sde_rotator_cancel_all_requests(rot_dev->mgr, ctx->private);
 		sde_rot_mgr_unlock(rot_dev->mgr);
+		list_for_each_safe(curr, next, &ctx->pending_list) {
+			request = container_of(curr, struct sde_rotator_request,
+						list);
+
+			SDEDEV_DBG(rot_dev->dev, "cancel request s:%d\n",
+					ctx->session_id);
+			mutex_unlock(q->lock);
+			cancel_work_sync(&request->submit_work);
+			cancel_work_sync(&request->retire_work);
+			mutex_lock(q->lock);
+			spin_lock(&ctx->list_lock);
+			list_del_init(&request->list);
+			list_add_tail(&request->list, &ctx->retired_list);
+			spin_unlock(&ctx->list_lock);
+		}
 	}
 
 	sde_rotator_return_all_buffers(q, VB2_BUF_STATE_ERROR);
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index ac6ded0..7215fdf 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -920,6 +920,8 @@
 	pkt->session_id = hash32_ptr(session);
 	pkt->num_properties = 1;
 
+	dprintk(VIDC_DBG, "Setting HAL Property = 0x%x\n", ptype);
+
 	switch (ptype) {
 	case HAL_CONFIG_FRAME_RATE:
 	{
@@ -1273,6 +1275,7 @@
 		hfi->qp_packed = hal_quant->qpi | hal_quant->qpp << 8 |
 			hal_quant->qpb << 16;
 		hfi->layer_id = hal_quant->layer_id;
+		hfi->enable = hal_quant->enable;
 		pkt->size += sizeof(u32) + sizeof(struct hfi_quantization);
 		break;
 	}
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 3d3b7e9..a86c677 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -23,7 +23,6 @@
 #define MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS MIN_NUM_CAPTURE_BUFFERS
 #define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8010
 #define MB_SIZE_IN_PIXEL (16 * 16)
-#define MAX_OPERATING_FRAME_RATE (300 << 16)
 #define OPERATING_FRAME_RATE_STEP (1 << 16)
 
 static const char *const mpeg_video_stream_format[] = {
@@ -360,7 +359,7 @@
 		.name = "Set Decoder Operating rate",
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = 0,
-		.maximum = MAX_OPERATING_FRAME_RATE,
+		.maximum = INT_MAX,
 		.default_value = 0,
 		.step = OPERATING_FRAME_RATE_STEP,
 	},
@@ -368,17 +367,6 @@
 
 #define NUM_CTRLS ARRAY_SIZE(msm_vdec_ctrls)
 
-static u32 get_frame_size_nv12(int plane,
-					u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
-}
-
-static u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
-}
-
 static u32 get_frame_size_compressed_full_yuv(int plane,
 					u32 max_mbs_per_frame, u32 size_per_mb)
 {
@@ -391,11 +379,6 @@
 	return (max_mbs_per_frame * size_per_mb * 3/2)/2;
 }
 
-static u32 get_frame_size_nv12_ubwc_10bit(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
-}
-
 static u32 get_frame_size(struct msm_vidc_inst *inst,
 					const struct msm_vidc_format *fmt,
 					int fmt_type, int plane)
@@ -446,7 +429,7 @@
 		.name = "UBWC YCbCr Semiplanar 4:2:0 10bit",
 		.description = "UBWC Y/CbCr 4:2:0 10bit",
 		.fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC,
-		.get_frame_size = get_frame_size_nv12_ubwc_10bit,
+		.get_frame_size = get_frame_size_tp10_ubwc,
 		.type = CAPTURE_PORT,
 	},
 	{
@@ -681,7 +664,7 @@
 	inst->bufq[OUTPUT_PORT].num_planes = 1;
 	inst->bufq[CAPTURE_PORT].num_planes = 1;
 	inst->prop.fps = DEFAULT_FPS;
-	inst->operating_rate = 0;
+	inst->clk_data.operating_rate = 0;
 	memcpy(&inst->fmts[OUTPUT_PORT], &vdec_formats[2],
 			sizeof(struct msm_vidc_format));
 	memcpy(&inst->fmts[CAPTURE_PORT], &vdec_formats[0],
@@ -968,8 +951,12 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
 		dprintk(VIDC_DBG,
 			"inst(%pK) operating rate changed from %d to %d\n",
-			inst, inst->operating_rate >> 16, ctrl->val >> 16);
-		inst->operating_rate = ctrl->val;
+			inst, inst->clk_data.operating_rate >> 16,
+				ctrl->val >> 16);
+		inst->clk_data.operating_rate = ctrl->val;
+
+		msm_vidc_update_operating_rate(inst);
+
 		break;
 	default:
 		break;
@@ -980,8 +967,8 @@
 
 	if (!rc && property_id) {
 		dprintk(VIDC_DBG,
-			"Control: HAL property=%#x,ctrl: id=%#x,value=%#x\n",
-			property_id, ctrl->id, ctrl->val);
+			"Control: Name = %s, ID = 0x%x Value = %d\n",
+				ctrl->name, ctrl->id, ctrl->val);
 		rc = call_hfi_op(hdev, session_set_property, (void *)
 				inst->session, property_id, pdata);
 	}
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index e198d8e..19a21ab 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -23,7 +23,6 @@
 #define DEFAULT_BIT_RATE 64000
 #define BIT_RATE_STEP 100
 #define DEFAULT_FRAME_RATE 15
-#define MAX_OPERATING_FRAME_RATE (300 << 16)
 #define OPERATING_FRAME_RATE_STEP (1 << 16)
 #define MAX_SLICE_BYTE_SIZE ((MAX_BIT_RATE)>>3)
 #define MIN_SLICE_BYTE_SIZE 512
@@ -286,6 +285,17 @@
 		.qmenu = NULL,
 	},
 	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK,
+		.name = "QP mask for diff frame types",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 1,
+		.maximum = 7,
+		.default_value = 7,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+	},
+	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES,
 		.name = "Intra Period for B frames",
 		.type = V4L2_CTRL_TYPE_INTEGER,
@@ -867,7 +877,7 @@
 		.name = "Set Encoder Operating rate",
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = 0,
-		.maximum = MAX_OPERATING_FRAME_RATE,
+		.maximum = INT_MAX,
 		.default_value = 0,
 		.step = OPERATING_FRAME_RATE_STEP,
 	},
@@ -983,26 +993,6 @@
 
 #define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
 
-static u32 get_frame_size_nv12(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
-}
-
-static u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
-}
-
-static u32 get_frame_size_rgba(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_RGBA8888, width, height);
-}
-
-static u32 get_frame_size_nv21(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV21, width, height);
-}
-
 static u32 get_frame_size_compressed(int plane, u32 height, u32 width)
 {
 	int sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2;
@@ -1060,6 +1050,13 @@
 		.get_frame_size = get_frame_size_nv21,
 		.type = OUTPUT_PORT,
 	},
+	{
+		.name = "TP10 UBWC 4:2:0",
+		.description = "TP10 UBWC 4:2:0",
+		.fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC,
+		.get_frame_size = get_frame_size_tp10_ubwc,
+		.type = OUTPUT_PORT,
+	},
 };
 
 static int msm_venc_set_csc(struct msm_vidc_inst *inst);
@@ -1224,7 +1221,7 @@
 		bitrate.bit_rate = ctrl->val;
 		bitrate.layer_id = 0;
 		pdata = &bitrate;
-		inst->bitrate = ctrl->val;
+		inst->clk_data.bitrate = ctrl->val;
 		break;
 	}
 	case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
@@ -1644,43 +1641,65 @@
 		pdata = &baselayerid;
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP: {
-		struct v4l2_ctrl *qpp, *qpb;
+		struct v4l2_ctrl *qpp, *qpb, *mask;
 
 		property_id = HAL_CONFIG_VENC_FRAME_QP;
 		qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP);
 		qpb = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP);
+		mask = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK);
 
 		quant.qpi = ctrl->val;
 		quant.qpp = qpp->val;
 		quant.qpb = qpb->val;
+		quant.enable = mask->val;
 		quant.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &quant;
 		break;
 	}
 	case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP: {
-		struct v4l2_ctrl *qpi, *qpb;
+		struct v4l2_ctrl *qpi, *qpb, *mask;
 
 		property_id = HAL_CONFIG_VENC_FRAME_QP;
 		qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP);
 		qpb = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP);
+		mask = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK);
 
 		quant.qpp = ctrl->val;
 		quant.qpi = qpi->val;
 		quant.qpb = qpb->val;
+		quant.enable = mask->val;
 		quant.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &quant;
 		break;
 	}
 	case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP: {
-		struct v4l2_ctrl *qpp, *qpi;
+		struct v4l2_ctrl *qpp, *qpi, *mask;
 
 		property_id = HAL_CONFIG_VENC_FRAME_QP;
 		qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP);
 		qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP);
+		mask = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK);
 
 		quant.qpb = ctrl->val;
 		quant.qpp = qpp->val;
 		quant.qpi = qpi->val;
+		quant.enable = mask->val;
+		quant.layer_id = MSM_VIDC_ALL_LAYER_ID;
+		pdata = &quant;
+		break;
+	}
+	case V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK: {
+		struct v4l2_ctrl *qpi, *qpp, *qpb;
+
+		property_id = HAL_CONFIG_VENC_FRAME_QP;
+		qpi = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP);
+		qpp = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP);
+		qpb = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP);
+
+		quant.qpi = qpi->val;
+		quant.qpp = qpp->val;
+		quant.qpb = qpb->val;
+		quant.enable = ctrl->val;
 		quant.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &quant;
 		break;
@@ -1714,8 +1733,12 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
 		dprintk(VIDC_DBG,
 			"inst(%pK) operating rate changed from %d to %d\n",
-			inst, inst->operating_rate >> 16, ctrl->val >> 16);
-		inst->operating_rate = ctrl->val;
+			inst, inst->clk_data.operating_rate >> 16,
+				ctrl->val >> 16);
+		inst->clk_data.operating_rate = ctrl->val;
+
+		msm_vidc_update_operating_rate(inst);
+
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_TYPE:
 	{
@@ -1798,6 +1821,7 @@
 		else
 			enable.enable = 0;
 		pdata = &enable;
+		inst->clk_data.low_latency_mode = (bool) enable.enable;
 		break;
 	}
 	case V4L2_CID_MPEG_VIDC_VIDEO_H264_TRANSFORM_8x8:
@@ -1835,9 +1859,9 @@
 #undef TRY_GET_CTRL
 
 	if (!rc && property_id) {
-		dprintk(VIDC_DBG, "Control: HAL property=%x,ctrl_value=%d\n",
-				property_id,
-				ctrl->val);
+		dprintk(VIDC_DBG,
+			"Control: Name = %s, ID = 0x%x Value = %d\n",
+				ctrl->name, ctrl->id, ctrl->val);
 		rc = call_hfi_op(hdev, session_set_property,
 				(void *)inst->session, property_id, pdata);
 	}
@@ -1852,7 +1876,7 @@
 	struct v4l2_ext_control *control;
 	struct hfi_device *hdev;
 	struct hal_ltr_mode ltr_mode;
-	u32 property_id = 0, layer_id = MSM_VIDC_ALL_LAYER_ID;
+	u32 property_id = 0;
 	void *pdata = NULL;
 	struct msm_vidc_capability *cap = NULL;
 	struct hal_aspect_ratio sar;
@@ -1930,76 +1954,75 @@
 			pdata = &blur_res;
 			break;
 		case V4L2_CID_MPEG_VIDC_VIDEO_LAYER_ID:
-			layer_id = control[i].value;
+			qp.layer_id = control[i].value;
+			/* Enable QP for all frame types by default */
+			qp.enable = 7;
+			qp_range.layer_id = control[i].value;
 			i++;
 			while (i < ctrl->count) {
 			switch (control[i].id) {
 			case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP:
 				qp.qpi = control[i].value;
-				qp.layer_id = layer_id;
 				property_id =
 					HAL_CONFIG_VENC_FRAME_QP;
 				pdata = &qp;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP:
 				qp.qpp = control[i].value;
-				qp.layer_id = layer_id;
 				property_id =
 					HAL_CONFIG_VENC_FRAME_QP;
 				pdata = &qp;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP:
 				qp.qpb = control[i].value;
-				qp.layer_id = layer_id;
+				property_id =
+					HAL_CONFIG_VENC_FRAME_QP;
+				pdata = &qp;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK:
+				qp.enable = control[i].value;
 				property_id =
 					HAL_CONFIG_VENC_FRAME_QP;
 				pdata = &qp;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MIN:
 				qp_range.qpi_min = control[i].value;
-				qp_range.layer_id = layer_id;
 				property_id =
 					HAL_PARAM_VENC_SESSION_QP_RANGE;
 				pdata = &qp_range;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MIN:
 				qp_range.qpp_min = control[i].value;
-				qp_range.layer_id = layer_id;
 				property_id =
-				HAL_PARAM_VENC_SESSION_QP_RANGE;
+					HAL_PARAM_VENC_SESSION_QP_RANGE;
 				pdata = &qp_range;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MIN:
 				qp_range.qpb_min = control[i].value;
-				qp_range.layer_id = layer_id;
 				property_id =
 					HAL_PARAM_VENC_SESSION_QP_RANGE;
 				pdata = &qp_range;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MAX:
 				qp_range.qpi_max = control[i].value;
-				qp_range.layer_id = layer_id;
 				property_id =
 					HAL_PARAM_VENC_SESSION_QP_RANGE;
 				pdata = &qp_range;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MAX:
 				qp_range.qpp_max = control[i].value;
-				qp_range.layer_id = layer_id;
 				property_id =
 					HAL_PARAM_VENC_SESSION_QP_RANGE;
 				pdata = &qp_range;
 				break;
 			case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX:
 				qp_range.qpb_max = control[i].value;
-				qp_range.layer_id = layer_id;
 				property_id =
 					HAL_PARAM_VENC_SESSION_QP_RANGE;
 				pdata = &qp_range;
 				break;
 			case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE:
 				bitrate.bit_rate = control[i].value;
-				bitrate.layer_id = layer_id;
 				property_id =
 					HAL_CONFIG_VENC_TARGET_BITRATE;
 				pdata = &bitrate;
@@ -2051,7 +2074,7 @@
 	/* To start with, both ports are 1 plane each */
 	inst->bufq[OUTPUT_PORT].num_planes = 1;
 	inst->bufq[CAPTURE_PORT].num_planes = 1;
-	inst->operating_rate = 0;
+	inst->clk_data.operating_rate = 0;
 
 	memcpy(&inst->fmts[CAPTURE_PORT], &venc_formats[4],
 			sizeof(struct msm_vidc_format));
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index e071037..8f97b15 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1433,7 +1433,9 @@
 			"Failed to move inst: %pK to start done state\n", inst);
 		goto fail_start;
 	}
-	msm_dcvs_init(inst);
+
+	msm_clock_data_reset(inst);
+
 	if (msm_comm_get_stream_output_mode(inst) ==
 			HAL_VIDEO_DECODER_SECONDARY) {
 		rc = msm_comm_queue_output_buffers(inst);
@@ -1519,6 +1521,9 @@
 		dprintk(VIDC_ERR,
 			"Failed to move inst: %pK to state %d\n",
 				inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+
+	msm_clock_data_reset(inst);
+
 	return rc;
 }
 
@@ -1971,10 +1976,11 @@
 	inst->session_type = session_type;
 	inst->state = MSM_VIDC_CORE_UNINIT_DONE;
 	inst->core = core;
-	inst->freq = 0;
-	inst->core_id = VIDC_CORE_ID_DEFAULT;
+	inst->clk_data.min_freq = 0;
+	inst->clk_data.curr_freq = 0;
+	inst->clk_data.bitrate = 0;
+	inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
 	inst->bit_depth = MSM_VIDC_BIT_DEPTH_8;
-	inst->bitrate = 0;
 	inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE;
 	inst->colour_space = MSM_VIDC_BT601_6_525;
 	inst->profile = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
@@ -2037,6 +2043,8 @@
 		goto fail_init;
 	}
 
+	msm_comm_scale_clocks_and_bus(inst);
+
 	inst->debugfs_root =
 		msm_vidc_debugfs_init_inst(inst, core->debugfs_root);
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index d43ae5a..cd518fb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -23,17 +23,12 @@
 	struct msm_vidc_inst *inst = NULL;
 	struct vidc_bus_vote_data *vote_data = NULL;
 
-	if (!core) {
+	if (!core || !core->device) {
 		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
 		return -EINVAL;
 	}
 
 	hdev = core->device;
-	if (!hdev) {
-		dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
-				__func__, hdev);
-		return -EINVAL;
-	}
 
 	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list)
@@ -65,12 +60,17 @@
 		vote_data[i].height = max(inst->prop.height[CAPTURE_PORT],
 				inst->prop.height[OUTPUT_PORT]);
 
-		if (inst->operating_rate)
-			vote_data[i].fps = (inst->operating_rate >> 16) ?
-				inst->operating_rate >> 16 : 1;
+		if (inst->clk_data.operating_rate)
+			vote_data[i].fps =
+				(inst->clk_data.operating_rate >> 16) ?
+				inst->clk_data.operating_rate >> 16 : 1;
 		else
 			vote_data[i].fps = inst->prop.fps;
 
+		if (!msm_vidc_clock_scaling ||
+			inst->clk_data.buffer_counter < DCVS_FTB_WINDOW)
+			vote_data[i].power_mode = VIDC_POWER_TURBO;
+
 		/*
 		 * TODO: support for OBP-DBP split mode hasn't been yet
 		 * implemented, once it is, this part of code needs to be
@@ -126,18 +126,19 @@
 	int buffers_outside_fw = 0;
 	struct msm_vidc_core *core;
 	struct hal_buffer_requirements *output_buf_req;
-	struct dcvs_stats *dcvs;
+	struct clock_data *dcvs;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
 		return -EINVAL;
 	}
-	if (!inst->dcvs_mode) {
+
+	if (!inst->clk_data.dcvs_mode) {
 		dprintk(VIDC_DBG, "DCVS is not enabled\n");
 		return 0;
 	}
 
-	dcvs = &inst->dcvs;
+	dcvs = &inst->clk_data;
 
 	core = inst->core;
 	mutex_lock(&inst->lock);
@@ -210,7 +211,7 @@
 	}
 	mutex_unlock(&inst->freqs.lock);
 
-	inst->dcvs.buffer_counter++;
+	inst->clk_data.buffer_counter++;
 }
 
 
@@ -227,12 +228,13 @@
 
 	/* If current requirement is within DCVS limits, try DCVS. */
 
-	if (freq < inst->dcvs.load_high) {
+	if (freq < inst->clk_data.load_high) {
 		dprintk(VIDC_DBG, "Calling DCVS now\n");
 		// TODO calling DCVS here may reduce the residency. Re-visit.
 		msm_dcvs_scale_clocks(inst);
-		freq = inst->dcvs.load;
+		freq = inst->clk_data.load;
 	}
+	dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq);
 
 	return freq;
 }
@@ -274,9 +276,10 @@
 	unsigned long freq = 0;
 	unsigned long vpp_cycles = 0, vsp_cycles = 0;
 	u32 vpp_cycles_per_mb;
-	u32 mbs_per_frame;
+	u32 mbs_per_second;
 
-	mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+	mbs_per_second = msm_comm_get_inst_load(inst,
+		LOAD_CALC_NO_QUIRKS);
 
 	/*
 	 * Calculate vpp, vsp cycles separately for encoder and decoder.
@@ -286,17 +289,17 @@
 
 	if (inst->session_type == MSM_VIDC_ENCODER) {
 		vpp_cycles_per_mb = inst->flags & VIDC_LOW_POWER ?
-			inst->entry->low_power_cycles :
-			inst->entry->vpp_cycles;
+			inst->clk_data.entry->low_power_cycles :
+			inst->clk_data.entry->vpp_cycles;
 
-		vsp_cycles = mbs_per_frame * inst->entry->vsp_cycles;
+		vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
 
 		/* 10 / 7 is overhead factor */
-		vsp_cycles += (inst->bitrate * 10) / 7;
+		vsp_cycles += (inst->clk_data.bitrate * 10) / 7;
 	} else if (inst->session_type == MSM_VIDC_DECODER) {
-		vpp_cycles = mbs_per_frame * inst->entry->vpp_cycles;
+		vpp_cycles = mbs_per_second * inst->clk_data.entry->vpp_cycles;
 
-		vsp_cycles = mbs_per_frame * inst->entry->vsp_cycles;
+		vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
 		/* 10 / 7 is overhead factor */
 		vsp_cycles += (inst->prop.fps * filled_len * 8 * 10) / 7;
 
@@ -306,6 +309,8 @@
 		return freq;
 	}
 
+	dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq);
+
 	freq = max(vpp_cycles, vsp_cycles);
 
 	return freq;
@@ -321,7 +326,7 @@
 
 	hdev = core->device;
 	allowed_clks_tbl = core->resources.allowed_clks_tbl;
-	if (!hdev || !allowed_clks_tbl) {
+	if (!allowed_clks_tbl) {
 		dprintk(VIDC_ERR,
 			"%s Invalid parameters\n", __func__);
 		return -EINVAL;
@@ -329,35 +334,104 @@
 
 	mutex_lock(&core->lock);
 	list_for_each_entry(temp, &core->instances, list) {
-		freq += temp->freq;
+		freq += temp->clk_data.curr_freq;
 	}
 	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
 		rate = allowed_clks_tbl[i].clock_rate;
 		if (rate >= freq)
 			break;
 	}
+	core->min_freq = freq;
+	core->curr_freq = rate;
 	mutex_unlock(&core->lock);
 
-	core->freq = rate;
-	dprintk(VIDC_PROF, "Voting for freq = %lu", freq);
+	dprintk(VIDC_PROF, "Min freq = %lu Current Freq = %lu\n",
+		core->min_freq, core->curr_freq);
 	rc = call_hfi_op(hdev, scale_clocks,
-			hdev->hfi_device_data, rate);
+			hdev->hfi_device_data, core->curr_freq);
 
 	return rc;
 }
 
-static unsigned long msm_vidc_max_freq(struct msm_vidc_inst *inst)
+static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core)
 {
 	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
 	unsigned long freq = 0;
 
-	allowed_clks_tbl = inst->core->resources.allowed_clks_tbl;
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
 	freq = allowed_clks_tbl[0].clock_rate;
 	dprintk(VIDC_PROF, "Max rate = %lu", freq);
 
 	return freq;
 }
 
+int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst)
+{
+	struct v4l2_ctrl *ctrl = NULL;
+	struct msm_vidc_inst *temp;
+	struct msm_vidc_core *core;
+	unsigned long max_freq, freq_left, ops_left, load, cycles, freq = 0;
+	unsigned long mbs_per_frame;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
+		return -EINVAL;
+	}
+	core = inst->core;
+
+	mutex_lock(&core->lock);
+	max_freq = msm_vidc_max_freq(core);
+	list_for_each_entry(temp, &core->instances, list) {
+		if (temp == inst ||
+				temp->state < MSM_VIDC_START_DONE ||
+				temp->state >= MSM_VIDC_RELEASE_RESOURCES_DONE)
+			continue;
+
+		freq += temp->clk_data.min_freq;
+	}
+
+	freq_left = max_freq - freq;
+
+	list_for_each_entry(temp, &core->instances, list) {
+
+		mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+		cycles = temp->clk_data.entry->vpp_cycles;
+		if (inst->session_type == MSM_VIDC_ENCODER)
+			cycles = temp->flags & VIDC_LOW_POWER ?
+				inst->clk_data.entry->low_power_cycles :
+				cycles;
+
+		load = cycles * mbs_per_frame;
+
+		ops_left = load ? (freq_left / load) : 0;
+		/* Convert remaining operating rate to Q16 format */
+		ops_left = ops_left << 16;
+
+		ctrl = v4l2_ctrl_find(&temp->ctrl_handler,
+			V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE);
+		if (ctrl) {
+			dprintk(VIDC_DBG,
+				"%s: Before Range = %lld --> %lld\n",
+				ctrl->name, ctrl->minimum, ctrl->maximum);
+			dprintk(VIDC_DBG,
+				"%s: Before Def value = %lld Cur val = %d\n",
+				ctrl->name, ctrl->default_value, ctrl->val);
+			v4l2_ctrl_modify_range(ctrl, ctrl->minimum,
+				ctrl->val + ops_left, ctrl->step,
+				ctrl->minimum);
+			dprintk(VIDC_DBG,
+				"%s: Updated Range = %lld --> %lld\n",
+				ctrl->name, ctrl->minimum, ctrl->maximum);
+			dprintk(VIDC_DBG,
+				"%s: Updated Def value = %lld Cur val = %d\n",
+				ctrl->name, ctrl->default_value, ctrl->val);
+		}
+	}
+	mutex_unlock(&core->lock);
+
+	return 0;
+}
+
 int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
 {
 	struct vb2_buf_entry *temp, *next;
@@ -365,9 +439,10 @@
 	u32 filled_len = 0;
 	ion_phys_addr_t device_addr = 0;
 
-	if (inst->dcvs.buffer_counter < DCVS_FTB_WINDOW) {
-		freq = msm_vidc_max_freq(inst);
-		goto decision_done;
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
+			__func__, inst);
+		return -EINVAL;
 	}
 
 	mutex_lock(&inst->pendingq.lock);
@@ -381,7 +456,7 @@
 	mutex_unlock(&inst->pendingq.lock);
 
 	if (!filled_len || !device_addr) {
-		freq = inst->freq;
+		dprintk(VIDC_PROF, "No Change in frequency\n");
 		goto decision_done;
 	}
 
@@ -391,8 +466,15 @@
 
 	freq = msm_vidc_adjust_freq(inst);
 
+	inst->clk_data.min_freq = freq;
+
+	if (inst->clk_data.buffer_counter < DCVS_FTB_WINDOW ||
+		!msm_vidc_clock_scaling)
+		inst->clk_data.curr_freq = msm_vidc_max_freq(inst->core);
+	else
+		inst->clk_data.curr_freq = freq;
+
 decision_done:
-	inst->freq = freq;
 	msm_vidc_set_clocks(inst->core);
 	return 0;
 }
@@ -422,29 +504,29 @@
 
 int msm_dcvs_try_enable(struct msm_vidc_inst *inst)
 {
-	bool force_disable = false;
-
 	if (!inst) {
 		dprintk(VIDC_ERR, "%s: Invalid args: %p\n", __func__, inst);
 		return -EINVAL;
 	}
 
-	force_disable = inst->session_type == MSM_VIDC_ENCODER ?
-		!msm_vidc_enc_dcvs_mode :
-		!msm_vidc_dec_dcvs_mode;
-
-	if (force_disable || inst->flags & VIDC_THUMBNAIL) {
-		dprintk(VIDC_PROF, "Thumbnail sessions don't need DCVS : %pK\n",
-			inst);
-		inst->dcvs.extra_capture_buffer_count = 0;
-		inst->dcvs.extra_output_buffer_count = 0;
+	if (!msm_vidc_clock_scaling ||
+			inst->flags & VIDC_THUMBNAIL ||
+			inst->clk_data.low_latency_mode) {
+		dprintk(VIDC_PROF,
+			"This session doesn't need DCVS : %pK\n",
+				inst);
+		inst->clk_data.extra_capture_buffer_count = 0;
+		inst->clk_data.extra_output_buffer_count = 0;
+		inst->clk_data.dcvs_mode = false;
 		return false;
 	}
-	inst->dcvs_mode = true;
+	inst->clk_data.dcvs_mode = true;
 
 	// TODO : Update with proper number based on on-target tuning.
-	inst->dcvs.extra_capture_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
-	inst->dcvs.extra_output_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
+	inst->clk_data.extra_capture_buffer_count =
+		DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
+	inst->clk_data.extra_output_buffer_count =
+		DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
 	return true;
 }
 
@@ -482,6 +564,12 @@
 	struct clock_profile_entry *entry = NULL;
 	int fourcc;
 
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
+			__func__, inst);
+		return -EINVAL;
+	}
+
 	clk_freq_tbl = &inst->core->resources.clock_freq_tbl;
 	fourcc = inst->session_type == MSM_VIDC_DECODER ?
 		inst->fmts[OUTPUT_PORT].fourcc :
@@ -498,7 +586,7 @@
 				inst->session_type);
 
 		if (matched) {
-			inst->entry = entry;
+			inst->clk_data.entry = entry;
 			break;
 		}
 	}
@@ -512,7 +600,7 @@
 	return rc;
 }
 
-static inline void msm_dcvs_print_dcvs_stats(struct dcvs_stats *dcvs)
+static inline void msm_dcvs_print_dcvs_stats(struct clock_data *dcvs)
 {
 	dprintk(VIDC_DBG,
 		"DCVS: Load_Low %d, Load High %d\n",
@@ -524,31 +612,31 @@
 		dcvs->min_threshold, dcvs->max_threshold);
 }
 
-void msm_dcvs_init(struct msm_vidc_inst *inst)
+void msm_clock_data_reset(struct msm_vidc_inst *inst)
 {
 	struct msm_vidc_core *core;
-	int i = 0;
+	int i = 0, rc = 0;
 	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
 	u64 total_freq = 0, rate = 0, load;
 	int cycles;
-	struct dcvs_stats *dcvs;
+	struct clock_data *dcvs;
 
 	dprintk(VIDC_DBG, "Init DCVS Load\n");
 
 	if (!inst || !inst->core) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
+		dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
+			__func__, inst);
 		return;
 	}
 
 	core = inst->core;
-	dcvs = &inst->dcvs;
-	inst->dcvs = (struct dcvs_stats){0};
+	dcvs = &inst->clk_data;
 	load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
-	cycles = inst->entry->vpp_cycles;
+	cycles = inst->clk_data.entry->vpp_cycles;
 	allowed_clks_tbl = core->resources.allowed_clks_tbl;
 	if (inst->session_type == MSM_VIDC_ENCODER) {
 		cycles = inst->flags & VIDC_LOW_POWER ?
-			inst->entry->low_power_cycles :
+			inst->clk_data.entry->low_power_cycles :
 			cycles;
 
 		dcvs->buffer_type = HAL_BUFFER_INPUT;
@@ -573,7 +661,17 @@
 	dcvs->load = dcvs->load_high = rate;
 	dcvs->load_low = allowed_clks_tbl[i+1].clock_rate;
 
+	inst->clk_data.buffer_counter = 0;
+
 	msm_dcvs_print_dcvs_stats(dcvs);
+
+	msm_vidc_update_operating_rate(inst);
+
+	rc = msm_comm_scale_clocks_and_bus(inst);
+
+	if (rc)
+		dprintk(VIDC_ERR, "%s Failed to scale Clocks and Bus\n",
+			__func__);
 }
 
 int msm_vidc_get_extra_buff_count(struct msm_vidc_inst *inst,
@@ -585,23 +683,26 @@
 	}
 
 	return buffer_type == HAL_BUFFER_INPUT ?
-		inst->dcvs.extra_output_buffer_count :
-		inst->dcvs.extra_capture_buffer_count;
+		inst->clk_data.extra_output_buffer_count :
+		inst->clk_data.extra_capture_buffer_count;
 }
 
 int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
-	bool low_latency_mode;
 	struct hfi_device *hdev;
 	struct hal_video_work_mode pdata;
 	struct hal_enable latency;
 
-	hdev = inst->core->device;
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR,
+			"%s Invalid args: Inst = %pK\n",
+			__func__, inst);
+		return -EINVAL;
+	}
 
-	low_latency_mode = msm_comm_g_ctrl_for_id(inst,
-			V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE);
-	if (low_latency_mode) {
+	hdev = inst->core->device;
+	if (inst->clk_data.low_latency_mode) {
 		pdata.video_work_mode = VIDC_WORK_MODE_1;
 		goto decision_done;
 	}
@@ -636,7 +737,7 @@
 
 decision_done:
 
-	inst->work_mode = pdata.video_work_mode;
+	inst->clk_data.work_mode = pdata.video_work_mode;
 	rc = call_hfi_op(hdev, session_set_property,
 			(void *)inst->session, HAL_PARAM_VIDEO_WORK_MODE,
 			(void *)&pdata);
@@ -646,7 +747,8 @@
 
 	/* For WORK_MODE_1, set Low Latency mode by default to HW. */
 
-	if (inst->work_mode == VIDC_WORK_MODE_1) {
+	if (inst->session_type == MSM_VIDC_ENCODER &&
+			inst->clk_data.work_mode == VIDC_WORK_MODE_1) {
 		latency.enable = 1;
 		rc = call_hfi_op(hdev, session_set_property,
 			(void *)inst->session, HAL_PARAM_VENC_LOW_LATENCY,
@@ -675,8 +777,8 @@
 		return 0;
 	}
 	mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
-	if (inst->core->resources.max_hq_mbs_per_frame > mbs_per_frame ||
-		inst->core->resources.max_hq_fps > inst->prop.fps) {
+	if (mbs_per_frame >= inst->core->resources.max_hq_mbs_per_frame ||
+		inst->prop.fps >= inst->core->resources.max_hq_fps) {
 		enable = true;
 	}
 
@@ -692,9 +794,12 @@
 			__func__, inst);
 		goto fail_power_mode_set;
 	}
-	inst->flags |= VIDC_LOW_POWER;
-	dprintk(VIDC_PROF, "Power Save Mode set for inst: %pK\n", inst);
+	inst->flags = enable ?
+		inst->flags | VIDC_LOW_POWER :
+		inst->flags & ~VIDC_LOW_POWER;
 
+	dprintk(VIDC_PROF,
+		"Power Save Mode for inst: %pK Enable = %d\n", inst, enable);
 fail_power_mode_set:
 	return rc;
 }
@@ -704,15 +809,10 @@
 {
 	struct msm_vidc_inst *inst = NULL;
 
-	if (!core) {
-		dprintk(VIDC_ERR, "Invalid args: %pK\n", core);
-		return -EINVAL;
-	}
-
 	dprintk(VIDC_PROF, "Core %d : Moving all inst to LP mode\n", core_id);
 	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list) {
-		if (inst->core_id == core_id &&
+		if (inst->clk_data.core_id == core_id &&
 			inst->session_type == MSM_VIDC_ENCODER)
 			msm_vidc_power_save_mode_enable(inst, true);
 	}
@@ -731,19 +831,19 @@
 	list_for_each_entry(inst, &core->instances, list) {
 		u32 cycles, lp_cycles;
 
-		if (!inst->core_id && core_id)
+		if (!(inst->clk_data.core_id && core_id))
 			continue;
 		if (inst->session_type == MSM_VIDC_DECODER) {
-			cycles = lp_cycles = inst->entry->vpp_cycles;
+			cycles = lp_cycles = inst->clk_data.entry->vpp_cycles;
 		} else if (inst->session_type == MSM_VIDC_ENCODER) {
 			lp_mode |= inst->flags & VIDC_LOW_POWER;
 			cycles = lp_mode ?
-				inst->entry->low_power_cycles :
-				inst->entry->vpp_cycles;
+				inst->clk_data.entry->low_power_cycles :
+				inst->clk_data.entry->vpp_cycles;
 		} else {
 			continue;
 		}
-		if (inst->core_id == 3)
+		if (inst->clk_data.core_id == 3)
 			cycles = cycles / 2;
 
 		current_inst_mbs_per_sec = msm_comm_get_inst_load(inst,
@@ -768,10 +868,17 @@
 		min_load = 0, min_lp_load = 0;
 	u32 min_core_id, min_lp_core_id;
 
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR,
+			"%s Invalid args: Inst = %pK\n",
+			__func__, inst);
+		return -EINVAL;
+	}
+
 	core = inst->core;
 	hdev = core->device;
-	max_freq = msm_vidc_max_freq(inst);
-	inst->core_id = 0;
+	max_freq = msm_vidc_max_freq(inst->core);
+	inst->clk_data.core_id = 0;
 
 	core0_load = get_core_load(core, VIDC_CORE_ID_1, false);
 	core1_load = get_core_load(core, VIDC_CORE_ID_2, false);
@@ -786,11 +893,11 @@
 		VIDC_CORE_ID_1 : VIDC_CORE_ID_2;
 
 	lp_cycles = inst->session_type == MSM_VIDC_ENCODER ?
-			inst->entry->low_power_cycles :
-			inst->entry->vpp_cycles;
+			inst->clk_data.entry->low_power_cycles :
+			inst->clk_data.entry->vpp_cycles;
 
 	current_inst_load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS) *
-		inst->entry->vpp_cycles;
+		inst->clk_data.entry->vpp_cycles;
 
 	current_inst_lp_load = msm_comm_get_inst_load(inst,
 		LOAD_CALC_NO_QUIRKS) * lp_cycles;
@@ -814,7 +921,7 @@
 	if (inst->session_type == MSM_VIDC_ENCODER && hier_mode) {
 		if (current_inst_load / 2 + core0_load <= max_freq &&
 			current_inst_load / 2 + core1_load <= max_freq) {
-			inst->core_id = VIDC_CORE_ID_3;
+			inst->clk_data.core_id = VIDC_CORE_ID_3;
 			msm_vidc_power_save_mode_enable(inst, false);
 			goto decision_done;
 		}
@@ -825,32 +932,32 @@
 				core0_lp_load <= max_freq &&
 			current_inst_lp_load / 2 +
 				core1_lp_load <= max_freq) {
-			inst->core_id = VIDC_CORE_ID_3;
+			inst->clk_data.core_id = VIDC_CORE_ID_3;
 			msm_vidc_power_save_mode_enable(inst, true);
 			goto decision_done;
 		}
 	}
 
 	if (current_inst_load + min_load < max_freq) {
-		inst->core_id = min_core_id;
+		inst->clk_data.core_id = min_core_id;
 		dprintk(VIDC_DBG,
 			"Selected normally : Core ID = %d\n",
-				inst->core_id);
+				inst->clk_data.core_id);
 		msm_vidc_power_save_mode_enable(inst, false);
 	} else if (current_inst_lp_load + min_load < max_freq) {
 		/* Move current instance to LP and return */
-		inst->core_id = min_core_id;
+		inst->clk_data.core_id = min_core_id;
 		dprintk(VIDC_DBG,
 			"Selected by moving current to LP : Core ID = %d\n",
-				inst->core_id);
+				inst->clk_data.core_id);
 		msm_vidc_power_save_mode_enable(inst, true);
 
 	} else if (current_inst_lp_load + min_lp_load < max_freq) {
 		/* Move all instances to LP mode and return */
-		inst->core_id = min_lp_core_id;
+		inst->clk_data.core_id = min_lp_core_id;
 		dprintk(VIDC_DBG,
 			"Moved all inst's to LP: Core ID = %d\n",
-				inst->core_id);
+				inst->clk_data.core_id);
 		msm_vidc_move_core_to_power_save_mode(core, min_lp_core_id);
 	} else {
 		rc = -EINVAL;
@@ -860,7 +967,7 @@
 	}
 
 decision_done:
-	core_info.video_core_enable_mask = inst->core_id;
+	core_info.video_core_enable_mask = inst->clk_data.core_id;
 
 	rc = call_hfi_op(hdev, session_set_property,
 			(void *)inst->session,
@@ -874,3 +981,4 @@
 	return rc;
 }
 
+
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index d01f074..fe4822b 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -31,7 +31,8 @@
 /* Considering one safeguard buffer */
 #define DCVS_BUFFER_SAFEGUARD (DCVS_DEC_EXTRA_OUTPUT_BUFFERS - 1)
 
-void msm_dcvs_init(struct msm_vidc_inst *inst);
+void msm_clock_data_reset(struct msm_vidc_inst *inst);
+int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst);
 int msm_vidc_get_extra_buff_count(struct msm_vidc_inst *inst,
 	enum hal_buffer buffer_type);
 int msm_dcvs_try_enable(struct msm_vidc_inst *inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 853edf5..d4562ce 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -584,9 +584,9 @@
 	capture_port_mbs = NUM_MBS_PER_FRAME(inst->prop.width[CAPTURE_PORT],
 		inst->prop.height[CAPTURE_PORT]);
 
-	if (inst->operating_rate) {
-		fps = (inst->operating_rate >> 16) ?
-			inst->operating_rate >> 16 : 1;
+	if (inst->clk_data.operating_rate) {
+		fps = (inst->clk_data.operating_rate >> 16) ?
+			inst->clk_data.operating_rate >> 16 : 1;
 		/*
 		 * Check if operating rate is less than fps.
 		 * If Yes, then use fps to scale clocks
@@ -2510,7 +2510,7 @@
 	}
 
 	tl = msm_comm_vidc_thermal_level(vidc_driver->thermal_level);
-	freq = core->freq;
+	freq = core->curr_freq;
 
 	is_turbo = is_core_turbo(core, freq);
 	dprintk(VIDC_DBG,
@@ -4738,9 +4738,9 @@
 		return 0;
 	}
 
-	// Finish FLUSH As Soon As Possible.
-	inst->dcvs.buffer_counter = 0;
-	msm_comm_scale_clocks_and_bus(inst);
+	/* Finish FLUSH As Soon As Possible. */
+
+	msm_clock_data_reset(inst);
 
 	msm_comm_flush_dynamic_buffers(inst);
 
@@ -5474,7 +5474,7 @@
 	}
 	core = inst->core;
 
-	dprintk(VIDC_ERR, "Venus core frequency = %lu", core->freq);
+	dprintk(VIDC_ERR, "Venus core frequency = %lu", core->curr_freq);
 	mutex_lock(&core->lock);
 	dprintk(VIDC_ERR, "Printing instance info that caused Error\n");
 	msm_comm_print_inst_info(inst);
@@ -5520,3 +5520,28 @@
 	mutex_unlock(&inst->lock);
 	return rc;
 }
+
+u32 get_frame_size_nv12(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
+}
+
+u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
+}
+
+u32 get_frame_size_rgba(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_RGBA8888, width, height);
+}
+
+u32 get_frame_size_nv21(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV21, width, height);
+}
+
+u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width)
+{
+	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
+}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 9c7eec5..098063d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -98,4 +98,9 @@
 int msm_comm_v4l2_to_hal(int id, int value);
 int msm_comm_hal_to_v4l2(int id, int value);
 int msm_comm_session_continue(void *instance);
+u32 get_frame_size_nv12(int plane, u32 height, u32 width);
+u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width);
+u32 get_frame_size_rgba(int plane, u32 height, u32 width);
+u32 get_frame_size_nv21(int plane, u32 height, u32 width);
+u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width);
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 15ee8a8..f62c132 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -26,12 +26,10 @@
 int msm_vidc_fw_low_power_mode = 1;
 int msm_vidc_hw_rsp_timeout = 2000;
 bool msm_vidc_fw_coverage = !true;
-bool msm_vidc_dec_dcvs_mode = true;
-bool msm_vidc_enc_dcvs_mode = true;
 bool msm_vidc_sys_idle_indicator = !true;
 int msm_vidc_firmware_unload_delay = 15000;
 bool msm_vidc_thermal_mitigation_disabled = !true;
-bool msm_vidc_bitrate_clock_scaling = true;
+bool msm_vidc_clock_scaling = true;
 bool msm_vidc_debug_timeout = !true;
 
 #define MAX_DBG_BUF_SIZE 4096
@@ -174,8 +172,6 @@
 	__debugfs_create(x32, "fw_level", &msm_vidc_fw_debug) &&
 	__debugfs_create(u32, "fw_debug_mode", &msm_vidc_fw_debug_mode) &&
 	__debugfs_create(bool, "fw_coverage", &msm_vidc_fw_coverage) &&
-	__debugfs_create(bool, "dcvs_dec_mode", &msm_vidc_dec_dcvs_mode) &&
-	__debugfs_create(bool, "dcvs_enc_mode", &msm_vidc_enc_dcvs_mode) &&
 	__debugfs_create(u32, "fw_low_power_mode",
 			&msm_vidc_fw_low_power_mode) &&
 	__debugfs_create(u32, "debug_output", &msm_vidc_debug_out) &&
@@ -186,8 +182,8 @@
 			&msm_vidc_firmware_unload_delay) &&
 	__debugfs_create(bool, "disable_thermal_mitigation",
 			&msm_vidc_thermal_mitigation_disabled) &&
-	__debugfs_create(bool, "bitrate_clock_scaling",
-			&msm_vidc_bitrate_clock_scaling) &&
+	__debugfs_create(bool, "clock_scaling",
+			&msm_vidc_clock_scaling) &&
 	__debugfs_create(bool, "debug_timeout",
 			&msm_vidc_debug_timeout);
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index cf5ce22..f5c8e5a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -59,12 +59,10 @@
 extern int msm_vidc_fw_low_power_mode;
 extern int msm_vidc_hw_rsp_timeout;
 extern bool msm_vidc_fw_coverage;
-extern bool msm_vidc_dec_dcvs_mode;
-extern bool msm_vidc_enc_dcvs_mode;
 extern bool msm_vidc_sys_idle_indicator;
 extern int msm_vidc_firmware_unload_delay;
 extern bool msm_vidc_thermal_mitigation_disabled;
-extern bool msm_vidc_bitrate_clock_scaling;
+extern bool msm_vidc_clock_scaling;
 extern bool msm_vidc_debug_timeout;
 
 #define VIDC_MSG_PRIO2STRING(__level) ({ \
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 53bc068..37bccbd 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -52,7 +52,7 @@
 
 
 /* Maintains the number of FTB's between each FBD over a window */
-#define DCVS_FTB_WINDOW 32
+#define DCVS_FTB_WINDOW 16
 
 #define V4L2_EVENT_VIDC_BASE  10
 
@@ -205,7 +205,7 @@
 	int ebd;
 };
 
-struct dcvs_stats {
+struct clock_data {
 	int buffer_counter;
 	int load;
 	int load_low;
@@ -215,6 +215,15 @@
 	unsigned int extra_capture_buffer_count;
 	unsigned int extra_output_buffer_count;
 	enum hal_buffer buffer_type;
+	bool dcvs_mode;
+	unsigned long bitrate;
+	unsigned long min_freq;
+	unsigned long curr_freq;
+	u32 operating_rate;
+	struct clock_profile_entry *entry;
+	u32 core_id;
+	enum hal_work_mode work_mode;
+	bool low_latency_mode;
 };
 
 struct profile_data {
@@ -259,7 +268,8 @@
 	struct msm_vidc_capability *capabilities;
 	struct delayed_work fw_unload_work;
 	bool smmu_fault_handled;
-	unsigned long freq;
+	unsigned long min_freq;
+	unsigned long curr_freq;
 };
 
 struct msm_vidc_inst {
@@ -293,28 +303,21 @@
 	void *priv;
 	struct msm_vidc_debug debug;
 	struct buf_count count;
-	struct dcvs_stats dcvs;
+	struct clock_data clk_data;
 	enum msm_vidc_modes flags;
 	struct msm_vidc_capability capability;
 	u32 buffer_size_limit;
 	enum buffer_mode_type buffer_mode_set[MAX_PORT_NUM];
 	struct v4l2_ctrl **ctrls;
-	bool dcvs_mode;
 	enum msm_vidc_pixel_depth bit_depth;
 	struct kref kref;
-	unsigned long bitrate;
-	unsigned long freq;
 	u32 buffers_held_in_driver;
 	atomic_t in_flush;
 	u32 pic_struct;
 	u32 colour_space;
-	u32 operating_rate;
 	u32 profile;
 	u32 level;
 	u32 entropy_mode;
-	struct clock_profile_entry *entry;
-	u32 core_id;
-	enum hal_work_mode work_mode;
 };
 
 extern struct msm_vidc_drv *vidc_driver;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index f8e0a6a..8752378 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -595,6 +595,7 @@
 	u32 qpp;
 	u32 qpb;
 	u32 layer_id;
+	u32 enable;
 };
 
 struct hal_quantization_range {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index d6c4bcb..77164be 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -536,7 +536,8 @@
 struct hfi_quantization {
 	u32 qp_packed;
 	u32 layer_id;
-	u32 reserved[4];
+	u32 enable;
+	u32 reserved[3];
 };
 
 struct hfi_quantization_range {
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index bacecbd..2afa1eb 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -360,6 +360,10 @@
 
 	if (copy_in_user(up, up32, 2 * sizeof(__u32)) ||
 		copy_in_user(&up->data_offset, &up32->data_offset,
+				sizeof(__u32)) ||
+		copy_in_user(up->reserved, up32->reserved,
+				sizeof(up->reserved)) ||
+		copy_in_user(&up->length, &up32->length,
 				sizeof(__u32)))
 		return -EFAULT;
 
@@ -386,7 +390,9 @@
 {
 	if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
 		copy_in_user(&up32->data_offset, &up->data_offset,
-				sizeof(__u32)))
+				sizeof(__u32)) ||
+		copy_in_user(up32->reserved, up->reserved,
+				sizeof(up32->reserved)))
 		return -EFAULT;
 
 	/* For MMAP, driver might've set up the offset, so copy it back.
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 2c02d2d..877c4d1 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -5869,7 +5869,8 @@
 		 * It will return false if it is GPCE based crypto instance or
 		 * ICE is setup properly
 		 */
-		if (qseecom_enable_ice_setup(create_key_req.usage))
+		ret = qseecom_enable_ice_setup(create_key_req.usage);
+		if (ret)
 			goto free_buf;
 
 		do {
@@ -5998,7 +5999,8 @@
 		 * It will return false if it is GPCE based crypto instance or
 		 * ICE is setup properly
 		 */
-		if (qseecom_enable_ice_setup(wipe_key_req.usage))
+		ret = qseecom_enable_ice_setup(wipe_key_req.usage);
+		if (ret)
 			goto free_buf;
 
 		ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 6523cb0..16c9096 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -6764,12 +6764,12 @@
 		PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
 			pcie_dev->rc_idx);
 
-		msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
-
 	if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
 		pinctrl_select_state(pcie_dev->pinctrl,
 					pcie_dev->pins_sleep);
 
+	msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
+
 	PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
 
 	return ret;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index e474a40..b60c7a6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1480,17 +1480,24 @@
 
 void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx)
 {
+	struct ipa_flt_tbl *tbl;
 	struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx];
 
 	mutex_lock(&ipa_ctx->lock);
 	if (ep->dflt_flt4_rule_hdl) {
+		tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
 		__ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
 		ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4);
+		/* Reset the sticky flag. */
+		tbl->sticky_rear = false;
 		ep->dflt_flt4_rule_hdl = 0;
 	}
 	if (ep->dflt_flt6_rule_hdl) {
+		tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
 		__ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
 		ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6);
+		/* Reset the sticky flag. */
+		tbl->sticky_rear = false;
 		ep->dflt_flt6_rule_hdl = 0;
 	}
 	mutex_unlock(&ipa_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 9d25e4a..89dd274 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1004,6 +1004,7 @@
 	struct ipa3_ep_context *ep;
 	int empty;
 	int result;
+	int i;
 
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
 	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
@@ -1038,7 +1039,17 @@
 	if (IPA_CLIENT_IS_CONS(ep->client))
 		cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
 	flush_workqueue(ep->sys->wq);
-	result = ipa3_stop_gsi_channel(clnt_hdl);
+	/* channel stop might fail on timeout if IPA is busy */
+	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+		result = ipa3_stop_gsi_channel(clnt_hdl);
+		if (result == GSI_STATUS_SUCCESS)
+			break;
+
+		if (result != -GSI_STATUS_AGAIN &&
+		    result != -GSI_STATUS_TIMED_OUT)
+			break;
+	}
+
 	if (result != GSI_STATUS_SUCCESS) {
 		IPAERR("GSI stop chan err: %d.\n", result);
 		ipa_assert();
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 0cc1206..ff763c4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1389,16 +1389,23 @@
 void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
 {
 	struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
+	struct ipa3_flt_tbl *tbl;
 
 	mutex_lock(&ipa3_ctx->lock);
 	if (ep->dflt_flt4_rule_hdl) {
+		tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
 		__ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
 		ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+		/* Reset the sticky flag. */
+		tbl->sticky_rear = false;
 		ep->dflt_flt4_rule_hdl = 0;
 	}
 	if (ep->dflt_flt6_rule_hdl) {
+		tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
 		__ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
 		ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+		/* Reset the sticky flag. */
+		tbl->sticky_rear = false;
 		ep->dflt_flt6_rule_hdl = 0;
 	}
 	mutex_unlock(&ipa3_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 5a38db3..f066d94 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -4352,21 +4352,30 @@
 
 	memset(&mem, 0, sizeof(mem));
 
-	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
-		IPADBG("Calling gsi_stop_channel\n");
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		IPADBG("Calling gsi_stop_channel ch:%lu\n",
+			ep->gsi_chan_hdl);
 		res = gsi_stop_channel(ep->gsi_chan_hdl);
-		IPADBG("gsi_stop_channel returned %d\n", res);
+		IPADBG("gsi_stop_channel ch: %lu returned %d\n",
+			ep->gsi_chan_hdl, res);
+		goto end_sequence;
+	}
+
+	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+		IPADBG("Calling gsi_stop_channel ch:%lu\n",
+			ep->gsi_chan_hdl);
+		res = gsi_stop_channel(ep->gsi_chan_hdl);
+		IPADBG("gsi_stop_channel ch: %lu returned %d\n",
+			ep->gsi_chan_hdl, res);
 		if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
 			goto end_sequence;
 
-		if (IPA_CLIENT_IS_CONS(ep->client)) {
-			IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
-			/* Send a 1B packet DMA_TASK to IPA and try again */
-			res = ipa3_inject_dma_task_for_gsi();
-			if (res) {
-				IPAERR("Failed to inject DMA TASk for GSI\n");
-				goto end_sequence;
-			}
+		IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
+		/* Send a 1B packet DMA_TASK to IPA and try again */
+		res = ipa3_inject_dma_task_for_gsi();
+		if (res) {
+			IPAERR("Failed to inject DMA TASk for GSI\n");
+			goto end_sequence;
 		}
 
 		/* sleep for short period to flush IPA */
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index 0c2ba4d..5cc04c0 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -241,18 +241,28 @@
 
 static int cmd_db_dev_probe(struct platform_device *pdev)
 {
-	struct resource *res;
+	struct resource res;
+	void __iomem *dict;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
+	dict = of_iomap(pdev->dev.of_node, 0);
+	if (!dict) {
 		cmd_db_status = -ENOMEM;
 		goto failed;
 	}
 
-	start_addr = devm_ioremap_resource(&pdev->dev, res);
+	/*
+	 * Read start address and size of the command DB address from
+	 * shared dictionary location
+	 */
+	res.start = readl_relaxed(dict);
+	res.end = res.start + readl_relaxed(dict + 0x4);
+	res.flags = IORESOURCE_MEM;
+	iounmap(dict);
 
-	cmd_db_header = devm_kzalloc(&pdev->dev, sizeof(*cmd_db_header),
-			GFP_KERNEL);
+	start_addr = devm_ioremap_resource(&pdev->dev, &res);
+
+	cmd_db_header = devm_kzalloc(&pdev->dev,
+			sizeof(*cmd_db_header), GFP_KERNEL);
 
 	if (!cmd_db_header) {
 		cmd_db_status = -ENOMEM;
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index fb3d7d9..c5ba279 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -75,6 +75,10 @@
 #define MSS_RESTART_ID			0xA
 
 #define MSS_MAGIC			0XAABADEAD
+
+#define MSS_PDC_OFFSET			8
+#define MSS_PDC_MASK			BIT(MSS_PDC_OFFSET)
+
 enum scm_cmd {
 	PAS_MEM_SETUP_CMD = 2,
 };
@@ -204,6 +208,33 @@
 		clk_disable_unprepare(drv->ahb_clk);
 }
 
+static void pil_mss_pdc_sync(struct q6v5_data *drv, bool pdc_sync)
+{
+	u32 val = 0;
+
+	if (drv->pdc_sync) {
+		val = readl_relaxed(drv->pdc_sync);
+		if (pdc_sync)
+			val |= MSS_PDC_MASK;
+		else
+			val &= ~MSS_PDC_MASK;
+		writel_relaxed(val, drv->pdc_sync);
+		/* Ensure PDC is written before next write */
+		wmb();
+		udelay(2);
+	}
+}
+
+static void pil_mss_alt_reset(struct q6v5_data *drv, u32 val)
+{
+	if (drv->alt_reset) {
+		writel_relaxed(val, drv->alt_reset);
+		/* Ensure alt reset is written before restart reg */
+		wmb();
+		udelay(2);
+	}
+}
+
 static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart)
 {
 	int ret = 0;
@@ -235,6 +266,32 @@
 	return ret;
 }
 
+static int pil_mss_assert_resets(struct q6v5_data *drv)
+{
+	int ret = 0;
+
+	pil_mss_pdc_sync(drv, 1);
+	pil_mss_alt_reset(drv, 1);
+	ret = pil_mss_restart_reg(drv, true);
+
+	return ret;
+}
+
+static int pil_mss_deassert_resets(struct q6v5_data *drv)
+{
+	int ret = 0;
+
+	ret = pil_mss_restart_reg(drv, 0);
+	if (ret)
+		return ret;
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+	pil_mss_alt_reset(drv, 0);
+	pil_mss_pdc_sync(drv, false);
+
+	return ret;
+}
+
 static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
 {
 	struct device *dev = drv->desc.dev;
@@ -304,7 +361,10 @@
 									ret);
 	}
 
-	ret = pil_mss_restart_reg(drv, 1);
+	pil_mss_assert_resets(drv);
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+	ret = pil_mss_deassert_resets(drv);
 
 	if (drv->is_booted) {
 		pil_mss_disable_clks(drv);
@@ -450,6 +510,7 @@
 {
 	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
 	phys_addr_t start_addr = pil_get_entry_addr(pil);
+	u32 debug_val;
 	int ret;
 
 	if (drv->mba_dp_phys)
@@ -463,15 +524,22 @@
 	if (ret)
 		goto err_power;
 
-	/* Deassert reset to subsystem and wait for propagation */
-	ret = pil_mss_restart_reg(drv, 0);
-	if (ret)
-		goto err_restart;
-
 	ret = pil_mss_enable_clks(drv);
 	if (ret)
 		goto err_clks;
 
+	/* Save state of modem debug register before full reset */
+	debug_val = readl_relaxed(drv->reg_base + QDSP6SS_DBG_CFG);
+
+	/* Assert reset to subsystem */
+	pil_mss_assert_resets(drv);
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+	ret = pil_mss_deassert_resets(drv);
+	if (ret)
+		goto err_restart;
+
+	writel_relaxed(debug_val, drv->reg_base + QDSP6SS_DBG_CFG);
 	if (modem_dbg_cfg)
 		writel_relaxed(modem_dbg_cfg, drv->reg_base + QDSP6SS_DBG_CFG);
 
@@ -519,12 +587,11 @@
 
 err_q6v5_reset:
 	modem_log_rmb_regs(drv->rmb_base);
+err_restart:
 	pil_mss_disable_clks(drv);
 	if (drv->ahb_clk_vote)
 		clk_disable_unprepare(drv->ahb_clk);
 err_clks:
-	pil_mss_restart_reg(drv, 1);
-err_restart:
 	pil_mss_power_down(drv);
 err_power:
 	return ret;
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index bbde4b6..df0c609c 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -284,6 +284,20 @@
 	if (!q6->restart_reg)
 		return -ENOMEM;
 
+	q6->pdc_sync = NULL;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pdc_sync");
+	if (res) {
+		q6->pdc_sync = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+	}
+
+	q6->alt_reset = NULL;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "alt_reset");
+	if (res) {
+		q6->alt_reset = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+	}
+
 	q6->vreg = NULL;
 
 	prop = of_find_property(pdev->dev.of_node, "vdd_mss-supply", NULL);
diff --git a/drivers/soc/qcom/pil-q6v5.h b/drivers/soc/qcom/pil-q6v5.h
index 1725253..9b4c811 100644
--- a/drivers/soc/qcom/pil-q6v5.h
+++ b/drivers/soc/qcom/pil-q6v5.h
@@ -44,6 +44,8 @@
 	void __iomem *axi_halt_mss;
 	void __iomem *axi_halt_nc;
 	void __iomem *restart_reg;
+	void __iomem *pdc_sync;
+	void __iomem *alt_reset;
 	struct regulator *vreg;
 	struct regulator *vreg_cx;
 	struct regulator *vreg_mx;
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
index 2855a15..d8c5a8f 100644
--- a/drivers/soc/qcom/system_pm.c
+++ b/drivers/soc/qcom/system_pm.c
@@ -56,9 +56,13 @@
 	 * Set up the wake up value offset from the current time.
 	 * Convert us to ns to allow div by 19.2 Mhz tick timer.
 	 */
-	sleep_val *= NSEC_PER_USEC;
-	do_div(sleep_val, NSEC_PER_SEC/ARCH_TIMER_HZ);
-	sleep_val += arch_counter_get_cntvct();
+	if (sleep_val) {
+		sleep_val *= NSEC_PER_USEC;
+		do_div(sleep_val, NSEC_PER_SEC/ARCH_TIMER_HZ);
+		sleep_val += arch_counter_get_cntvct();
+	} else {
+		sleep_val = ~0ULL;
+	}
 
 	return setup_wakeup(sleep_val);
 }
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
index 74f5ce0..bfaf7c7 100644
--- a/drivers/thermal/qcom/msm_lmh_dcvs.c
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -57,6 +57,7 @@
 #define LIMITS_DOMAIN_MIN           0x444D494E
 
 #define LIMITS_TEMP_DEFAULT         75000
+#define LIMITS_TEMP_HIGH_THRESH_MAX 120000
 #define LIMITS_LOW_THRESHOLD_OFFSET 500
 #define LIMITS_POLLING_DELAY_MS     10
 #define LIMITS_CLUSTER_0_REQ        0x179C1B04
@@ -259,7 +260,7 @@
 	struct limits_dcvs_hw *hw = (struct limits_dcvs_hw *)data;
 	int ret = 0;
 
-	if (high < LIMITS_LOW_THRESHOLD_OFFSET || low < 0) {
+	if (high >= LIMITS_TEMP_HIGH_THRESH_MAX || low < 0) {
 		pr_err("Value out of range low:%d high:%d\n",
 				low, high);
 		return -EINVAL;
diff --git a/drivers/thermal/qpnp-temp-alarm.c b/drivers/thermal/qpnp-temp-alarm.c
index e86a297..09c95e5 100644
--- a/drivers/thermal/qpnp-temp-alarm.c
+++ b/drivers/thermal/qpnp-temp-alarm.c
@@ -28,6 +28,8 @@
 #include <linux/thermal.h>
 #include <linux/qpnp/qpnp-adc.h>
 
+#include "thermal_core.h"
+
 #define QPNP_TM_DRIVER_NAME "qcom,qpnp-temp-alarm"
 
 enum qpnp_tm_registers {
@@ -97,7 +99,6 @@
 	unsigned int			subtype;
 	enum qpnp_tm_adc_type		adc_type;
 	int				temperature;
-	enum thermal_device_mode	mode;
 	unsigned int			thresh;
 	unsigned int			clock_rate;
 	unsigned int			stage;
@@ -105,18 +106,12 @@
 	int				irq;
 	enum qpnp_vadc_channels		adc_channel;
 	u16				base_addr;
-	bool				allow_software_override;
 	struct qpnp_vadc_chip		*vadc_dev;
 };
 
 /* Delay between TEMP_STAT IRQ going high and status value changing in ms. */
 #define STATUS_REGISTER_DELAY_MS       40
 
-enum pmic_thermal_override_mode {
-	SOFTWARE_OVERRIDE_DISABLED = 0,
-	SOFTWARE_OVERRIDE_ENABLED,
-};
-
 /* This array maps from GEN2 alarm state to GEN1 alarm stage */
 const unsigned int alarm_state_map[8] = {0, 1, 1, 2, 2, 3, 3, 3};
 
@@ -156,28 +151,6 @@
 	return rc;
 }
 
-
-static inline int qpnp_tm_shutdown_override(struct qpnp_tm_chip *chip,
-			    enum pmic_thermal_override_mode mode)
-{
-	int rc = 0;
-	u8 reg;
-
-	if (chip->allow_software_override) {
-		reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
-		reg |= (chip->clock_rate << SHUTDOWN_CTRL1_CLK_RATE_SHIFT)
-			& SHUTDOWN_CTRL1_CLK_RATE_MASK;
-
-		if (mode == SOFTWARE_OVERRIDE_ENABLED)
-			reg |= SHUTDOWN_CTRL1_OVERRIDE_STAGE2
-				| SHUTDOWN_CTRL1_OVERRIDE_STAGE3;
-
-		rc = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg, 1);
-	}
-
-	return rc;
-}
-
 static int qpnp_tm_update_temp(struct qpnp_tm_chip *chip)
 {
 	struct qpnp_vadc_result adc_result;
@@ -274,10 +247,9 @@
 	return 0;
 }
 
-static int qpnp_tz_get_temp_no_adc(struct thermal_zone_device *thermal,
-				     int *temperature)
+static int qpnp_tz_get_temp_no_adc(void *data, int *temperature)
 {
-	struct qpnp_tm_chip *chip = thermal->devdata;
+	struct qpnp_tm_chip *chip = (struct qpnp_tm_chip *)data;
 	int rc;
 
 	if (!temperature)
@@ -292,10 +264,9 @@
 	return 0;
 }
 
-static int qpnp_tz_get_temp_qpnp_adc(struct thermal_zone_device *thermal,
-				      int *temperature)
+static int qpnp_tz_get_temp_qpnp_adc(void *data, int *temperature)
 {
-	struct qpnp_tm_chip *chip = thermal->devdata;
+	struct qpnp_tm_chip *chip = (struct qpnp_tm_chip *)data;
 	int rc;
 
 	if (!temperature)
@@ -314,121 +285,12 @@
 	return 0;
 }
 
-static int qpnp_tz_get_mode(struct thermal_zone_device *thermal,
-			      enum thermal_device_mode *mode)
-{
-	struct qpnp_tm_chip *chip = thermal->devdata;
-
-	if (!mode)
-		return -EINVAL;
-
-	*mode = chip->mode;
-
-	return 0;
-}
-
-static int qpnp_tz_set_mode(struct thermal_zone_device *thermal,
-			      enum thermal_device_mode mode)
-{
-	struct qpnp_tm_chip *chip = thermal->devdata;
-	int rc = 0;
-
-	if (mode != chip->mode) {
-		if (mode == THERMAL_DEVICE_ENABLED)
-			rc = qpnp_tm_shutdown_override(chip,
-				SOFTWARE_OVERRIDE_ENABLED);
-		else
-			rc = qpnp_tm_shutdown_override(chip,
-				SOFTWARE_OVERRIDE_DISABLED);
-
-		chip->mode = mode;
-	}
-
-	return rc;
-}
-
-static int qpnp_tz_get_trip_type(struct thermal_zone_device *thermal,
-				   int trip, enum thermal_trip_type *type)
-{
-	if (trip < 0 || !type)
-		return -EINVAL;
-
-	switch (trip) {
-	case TRIP_STAGE3:
-		*type = THERMAL_TRIP_CRITICAL;
-		break;
-	case TRIP_STAGE2:
-		*type = THERMAL_TRIP_HOT;
-		break;
-	case TRIP_STAGE1:
-		*type = THERMAL_TRIP_HOT;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int qpnp_tz_get_trip_temp(struct thermal_zone_device *thermal,
-				   int trip, int *temperature)
-{
-	struct qpnp_tm_chip *chip = thermal->devdata;
-	int thresh_temperature;
-
-	if (trip < 0 || !temperature)
-		return -EINVAL;
-
-	thresh_temperature = chip->thresh * TEMP_THRESH_STEP + TEMP_THRESH_MIN;
-
-	switch (trip) {
-	case TRIP_STAGE3:
-		thresh_temperature += 2 * TEMP_STAGE_STEP;
-		break;
-	case TRIP_STAGE2:
-		thresh_temperature += TEMP_STAGE_STEP;
-		break;
-	case TRIP_STAGE1:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	*temperature = thresh_temperature;
-
-	return 0;
-}
-
-static int qpnp_tz_get_crit_temp(struct thermal_zone_device *thermal,
-				   int *temperature)
-{
-	struct qpnp_tm_chip *chip = thermal->devdata;
-
-	if (!temperature)
-		return -EINVAL;
-
-	*temperature = chip->thresh * TEMP_THRESH_STEP + TEMP_THRESH_MIN +
-		2 * TEMP_STAGE_STEP;
-
-	return 0;
-}
-
-static struct thermal_zone_device_ops qpnp_thermal_zone_ops_no_adc = {
+static struct thermal_zone_of_device_ops qpnp_thermal_zone_ops_no_adc = {
 	.get_temp = qpnp_tz_get_temp_no_adc,
-	.get_mode = qpnp_tz_get_mode,
-	.set_mode = qpnp_tz_set_mode,
-	.get_trip_type = qpnp_tz_get_trip_type,
-	.get_trip_temp = qpnp_tz_get_trip_temp,
-	.get_crit_temp = qpnp_tz_get_crit_temp,
 };
 
-static struct thermal_zone_device_ops qpnp_thermal_zone_ops_qpnp_adc = {
+static struct thermal_zone_of_device_ops qpnp_thermal_zone_ops_qpnp_adc = {
 	.get_temp = qpnp_tz_get_temp_qpnp_adc,
-	.get_mode = qpnp_tz_get_mode,
-	.set_mode = qpnp_tz_set_mode,
-	.get_trip_type = qpnp_tz_get_trip_type,
-	.get_trip_temp = qpnp_tz_get_trip_temp,
-	.get_crit_temp = qpnp_tz_get_crit_temp,
 };
 
 static void qpnp_tm_work(struct work_struct *work)
@@ -474,11 +336,7 @@
 				chip->tm_name, stage_new, chip->stage,
 				chip->thresh, chip->temperature);
 
-		thermal_zone_device_update(chip->tz_dev,
-						THERMAL_EVENT_UNSPECIFIED);
-
-		/* Notify user space */
-		sysfs_notify(&chip->tz_dev->device.kobj, NULL, "type");
+		of_thermal_handle_trip(chip->tz_dev);
 	}
 
 bail:
@@ -539,7 +397,7 @@
 	struct device_node *node;
 	unsigned int base;
 	struct qpnp_tm_chip *chip;
-	struct thermal_zone_device_ops *tz_ops;
+	struct thermal_zone_of_device_ops *tz_ops;
 	char *tm_name;
 	u32 default_temperature;
 	int rc = 0;
@@ -640,9 +498,6 @@
 	else
 		tz_ops = &qpnp_thermal_zone_ops_no_adc;
 
-	chip->allow_software_override
-		= of_property_read_bool(node, "qcom,allow-override");
-
 	default_temperature = DEFAULT_NO_ADC_TEMP;
 	rc = of_property_read_u32(node, "qcom,default-temp",
 					&default_temperature);
@@ -686,18 +541,8 @@
 		}
 	}
 
-	/* Start in HW control; switch to SW control when user changes mode. */
-	chip->mode = THERMAL_DEVICE_DISABLED;
-	rc = qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
-	if (rc) {
-		dev_err(&pdev->dev,
-			"%s: qpnp_tm_shutdown_override() failed, rc=%d\n",
-			__func__, rc);
-		goto err_cancel_work;
-	}
-
-	chip->tz_dev = thermal_zone_device_register(tm_name, TRIP_NUM, 0, chip,
-			tz_ops, NULL, 0, 0);
+	chip->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
+							tz_ops);
 	if (chip->tz_dev == NULL) {
 		dev_err(&pdev->dev,
 			"%s: thermal_zone_device_register() failed.\n",
@@ -717,7 +562,7 @@
 	return 0;
 
 err_free_tz:
-	thermal_zone_device_unregister(chip->tz_dev);
+	thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev);
 err_cancel_work:
 	cancel_delayed_work_sync(&chip->irq_work);
 	kfree(chip->tm_name);
@@ -731,10 +576,9 @@
 {
 	struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev);
 
+	thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev);
 	dev_set_drvdata(&pdev->dev, NULL);
-	thermal_zone_device_unregister(chip->tz_dev);
 	kfree(chip->tm_name);
-	qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
 	free_irq(chip->irq, chip);
 	cancel_delayed_work_sync(&chip->irq_work);
 	kfree(chip);
@@ -742,38 +586,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int qpnp_tm_suspend(struct device *dev)
-{
-	struct qpnp_tm_chip *chip = dev_get_drvdata(dev);
-
-	/* Clear override bits in suspend to allow hardware control */
-	qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
-
-	return 0;
-}
-
-static int qpnp_tm_resume(struct device *dev)
-{
-	struct qpnp_tm_chip *chip = dev_get_drvdata(dev);
-
-	/* Override hardware actions so software can control */
-	if (chip->mode == THERMAL_DEVICE_ENABLED)
-		qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_ENABLED);
-
-	return 0;
-}
-
-static const struct dev_pm_ops qpnp_tm_pm_ops = {
-	.suspend = qpnp_tm_suspend,
-	.resume = qpnp_tm_resume,
-};
-
-#define QPNP_TM_PM_OPS	(&qpnp_tm_pm_ops)
-#else
-#define QPNP_TM_PM_OPS	NULL
-#endif
-
 static const struct of_device_id qpnp_tm_match_table[] = {
 	{ .compatible = QPNP_TM_DRIVER_NAME, },
 	{}
@@ -789,7 +601,6 @@
 		.name		= QPNP_TM_DRIVER_NAME,
 		.of_match_table	= qpnp_tm_match_table,
 		.owner		= THIS_MODULE,
-		.pm		= QPNP_TM_PM_OPS,
 	},
 	.probe	  = qpnp_tm_probe,
 	.remove	  = qpnp_tm_remove,
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index 7586072..ad19e73 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -192,6 +192,42 @@
 	TP_ARGS(core, phase)
 );
 
+DECLARE_EVENT_CLASS(clk_state_dump,
+
+	TP_PROTO(const char *name, unsigned int prepare_count,
+	unsigned int enable_count, unsigned long rate, unsigned int vdd_level),
+
+	TP_ARGS(name, prepare_count, enable_count, rate, vdd_level),
+
+	TP_STRUCT__entry(
+		__string(name,			name)
+		__field(unsigned int,		prepare_count)
+		__field(unsigned int,		enable_count)
+		__field(unsigned long,		rate)
+		__field(unsigned int,		vdd_level)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->prepare_count = prepare_count;
+		__entry->enable_count = enable_count;
+		__entry->rate = rate;
+		__entry->vdd_level = vdd_level;
+	),
+
+	TP_printk("%s\tprepare:enable cnt [%u:%u]\trate: vdd_level [%lu:%u]",
+		__get_str(name), __entry->prepare_count, __entry->enable_count,
+		__entry->rate, __entry->vdd_level)
+);
+
+DEFINE_EVENT(clk_state_dump, clk_state,
+
+	TP_PROTO(const char *name, unsigned int prepare_count,
+	unsigned int enable_count, unsigned long rate, unsigned int vdd_level),
+
+	TP_ARGS(name, prepare_count, enable_count, rate, vdd_level)
+);
+
 #endif /* _TRACE_CLK_H */
 
 /* This part must be outside protection */
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index c190446..f05155b 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -67,6 +67,8 @@
 #define KGSL_CONTEXT_TYPE_RS		4
 #define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
 
+#define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000
+
 #define KGSL_CONTEXT_INVALID 0xffffffff
 
 /*
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index a62870e..cf96ac1 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1061,6 +1061,8 @@
 		(V4L2_CID_MPEG_MSM_VIDC_BASE + 106)
 #define V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX \
 		(V4L2_CID_MPEG_MSM_VIDC_BASE + 107)
+#define V4L2_CID_MPEG_VIDC_VIDEO_QP_MASK \
+		(V4L2_CID_MPEG_MSM_VIDC_BASE + 108)
 
 enum v4l2_mpeg_vidc_video_venc_iframesize_type {
 	V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_DEFAULT,
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 68db5e1..b736755 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -190,9 +190,6 @@
 #define CAM_REQ_MGR_MAP_BUF                     (CAM_COMMON_OPCODE_MAX + 10)
 #define CAM_REQ_MGR_RELEASE_BUF                 (CAM_COMMON_OPCODE_MAX + 11)
 #define CAM_REQ_MGR_CACHE_OPS                   (CAM_COMMON_OPCODE_MAX + 12)
-#define CAM_REQ_MGR_GET_MMU_HDLS_DEBUG          (CAM_COMMON_OPCODE_MAX + 13)
-#define CAM_REQ_MGR_GET_IO_BUF_DEBUG            (CAM_COMMON_OPCODE_MAX + 14)
-#define CAM_REQ_MGR_GET_KMD_BUF_DEBUG           (CAM_COMMON_OPCODE_MAX + 15)
 /* end of cam_req_mgr opcodes */
 
 #define CAM_MEM_FLAG_HW_READ_WRITE              (1<<0)
@@ -206,6 +203,7 @@
 #define CAM_MEM_FLAG_STATS_BUF_TYPE             (1<<8)
 #define CAM_MEM_FLAG_PACKET_BUF_TYPE            (1<<9)
 #define CAM_MEM_FLAG_CACHE                      (1<<10)
+#define CAM_MEM_FLAG_HW_SHARED_ACCESS           (1<<11)
 
 #define CAM_MEM_MMU_MAX_HANDLE                  16
 
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index f93db0e..a0d45ef 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2438,15 +2438,6 @@
 						     $herecurr);
 					}
 					$shorttext = AFTER_SHORTTEXT;
-				} elsif (length($line) > (SHORTTEXT_LIMIT +
-							  $shorttext_exspc)
-					 && $line !~ /^:([0-7]{6}\s){2}
-						      ([[:xdigit:]]+\.*
-						       \s){2}\w+\s\w+/xms) {
-					WARN("LONG_COMMIT_TEXT",
-					     "commit text line over " .
-					     SHORTTEXT_LIMIT .
-					     " characters\n" . $herecurr);
 				} elsif ($line=~/^\s*change-id:/i ||
 					 $line=~/^\s*signed-off-by:/i ||
 					 $line=~/^\s*crs-fixed:/i ||
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index 2b8c9c7..304bf47 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -155,6 +155,21 @@
 	u32 index;
 };
 
+enum pinctrl_pin_state {
+	STATE_DISABLE = 0, /* All pins are in sleep state */
+	STATE_MI2S_ACTIVE,  /* IS2 = active, TDM = sleep */
+	STATE_TDM_ACTIVE,  /* IS2 = sleep, TDM = active */
+};
+
+struct msm_pinctrl_info {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *mi2s_disable;
+	struct pinctrl_state *tdm_disable;
+	struct pinctrl_state *mi2s_active;
+	struct pinctrl_state *tdm_active;
+	enum pinctrl_pin_state curr_state;
+};
+
 struct msm_asoc_mach_data {
 	u32 mclk_freq;
 	int us_euro_gpio; /* used by gpio driver API */
@@ -162,6 +177,7 @@
 	struct device_node *hph_en1_gpio_p; /* used by pinctrl API */
 	struct device_node *hph_en0_gpio_p; /* used by pinctrl API */
 	struct snd_info_entry *codec_root;
+	struct msm_pinctrl_info pinctrl_info;
 };
 
 struct msm_asoc_wcd93xx_codec {
@@ -170,6 +186,9 @@
 	void (*mbhc_hs_detect_exit)(struct snd_soc_codec *codec);
 };
 
+static const char *const pin_states[] = {"sleep", "i2s-active",
+					 "tdm-active"};
+
 enum {
 	TDM_0 = 0,
 	TDM_1,
@@ -509,6 +528,7 @@
 	.key_code[7] = 0,
 	.linein_th = 5000,
 	.moisture_en = true,
+	.mbhc_micbias = MIC_BIAS_2,
 	.anc_micbias = MIC_BIAS_2,
 	.enable_anc_mic_detect = false,
 };
@@ -3802,6 +3822,275 @@
 	return ret;
 }
 
+static int msm_set_pinctrl(struct msm_pinctrl_info *pinctrl_info,
+				enum pinctrl_pin_state new_state)
+{
+	int ret = 0;
+	int curr_state = 0;
+
+	if (pinctrl_info == NULL) {
+		pr_err("%s: pinctrl_info is NULL\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+	curr_state = pinctrl_info->curr_state;
+	pinctrl_info->curr_state = new_state;
+	pr_debug("%s: curr_state = %s new_state = %s\n", __func__,
+		 pin_states[curr_state], pin_states[pinctrl_info->curr_state]);
+
+	if (curr_state == pinctrl_info->curr_state) {
+		pr_debug("%s: Already in same state\n", __func__);
+		goto err;
+	}
+
+	if (curr_state != STATE_DISABLE &&
+		pinctrl_info->curr_state != STATE_DISABLE) {
+		pr_debug("%s: state already active cannot switch\n", __func__);
+		ret = -EIO;
+		goto err;
+	}
+
+	switch (pinctrl_info->curr_state) {
+	case STATE_MI2S_ACTIVE:
+		ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->mi2s_active);
+		if (ret) {
+			pr_err("%s: MI2S state select failed with %d\n",
+				__func__, ret);
+			ret = -EIO;
+			goto err;
+		}
+		break;
+	case STATE_TDM_ACTIVE:
+		ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->tdm_active);
+		if (ret) {
+			pr_err("%s: TDM state select failed with %d\n",
+				__func__, ret);
+			ret = -EIO;
+			goto err;
+		}
+		break;
+	case STATE_DISABLE:
+		if (curr_state == STATE_MI2S_ACTIVE) {
+			ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->mi2s_disable);
+		} else {
+			ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->tdm_disable);
+		}
+		if (ret) {
+			pr_err("%s:  state disable failed with %d\n",
+				__func__, ret);
+			ret = -EIO;
+			goto err;
+		}
+		break;
+	default:
+		pr_err("%s: TLMM pin state is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+err:
+	return ret;
+}
+
+static void msm_release_pinctrl(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+	if (pinctrl_info->pinctrl) {
+		devm_pinctrl_put(pinctrl_info->pinctrl);
+		pinctrl_info->pinctrl = NULL;
+	}
+}
+
+static int msm_get_pinctrl(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = NULL;
+	struct pinctrl *pinctrl;
+	int ret;
+
+	pinctrl_info = &pdata->pinctrl_info;
+
+	if (pinctrl_info == NULL) {
+		pr_err("%s: pinctrl_info is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(pinctrl)) {
+		pr_err("%s: Unable to get pinctrl handle\n", __func__);
+		return -EINVAL;
+	}
+	pinctrl_info->pinctrl = pinctrl;
+
+	/* get all the states handles from Device Tree */
+	pinctrl_info->mi2s_disable = pinctrl_lookup_state(pinctrl,
+						"quat-mi2s-sleep");
+	if (IS_ERR(pinctrl_info->mi2s_disable)) {
+		pr_err("%s: could not get mi2s_disable pinstate\n", __func__);
+		goto err;
+	}
+	pinctrl_info->mi2s_active = pinctrl_lookup_state(pinctrl,
+						"quat-mi2s-active");
+	if (IS_ERR(pinctrl_info->mi2s_active)) {
+		pr_err("%s: could not get mi2s_active pinstate\n", __func__);
+		goto err;
+	}
+	pinctrl_info->tdm_disable = pinctrl_lookup_state(pinctrl,
+						"quat-tdm-sleep");
+	if (IS_ERR(pinctrl_info->tdm_disable)) {
+		pr_err("%s: could not get tdm_disable pinstate\n", __func__);
+		goto err;
+	}
+	pinctrl_info->tdm_active = pinctrl_lookup_state(pinctrl,
+						"quat-tdm-active");
+	if (IS_ERR(pinctrl_info->tdm_active)) {
+		pr_err("%s: could not get tdm_active pinstate\n",
+			__func__);
+		goto err;
+	}
+	/* Reset the TLMM pins to a default state */
+	ret = pinctrl_select_state(pinctrl_info->pinctrl,
+					pinctrl_info->mi2s_disable);
+	if (ret != 0) {
+		pr_err("%s: Disable TLMM pins failed with %d\n",
+			__func__, ret);
+		ret = -EIO;
+		goto err;
+	}
+	pinctrl_info->curr_state = STATE_DISABLE;
+
+	return 0;
+
+err:
+	devm_pinctrl_put(pinctrl);
+	pinctrl_info->pinctrl = NULL;
+	return -EINVAL;
+}
+
+static int msm_tdm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+				      struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_interval *rate = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_RATE);
+	struct snd_interval *channels = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_CHANNELS);
+
+	if (cpu_dai->id == AFE_PORT_ID_QUATERNARY_TDM_RX) {
+		channels->min = channels->max =
+				tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_rx_cfg[TDM_QUAT][TDM_0].bit_format);
+		rate->min = rate->max =
+				tdm_rx_cfg[TDM_QUAT][TDM_0].sample_rate;
+	} else if (cpu_dai->id == AFE_PORT_ID_SECONDARY_TDM_RX) {
+		channels->min = channels->max =
+				tdm_rx_cfg[TDM_SEC][TDM_0].channels;
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			       tdm_rx_cfg[TDM_SEC][TDM_0].bit_format);
+		rate->min = rate->max = tdm_rx_cfg[TDM_SEC][TDM_0].sample_rate;
+	} else {
+		pr_err("%s: dai id 0x%x not supported\n",
+			__func__, cpu_dai->id);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: dai id = 0x%x channels = %d rate = %d format = 0x%x\n",
+		__func__, cpu_dai->id, channels->max, rate->max,
+		params_format(params));
+
+	return 0;
+}
+
+static int sdm845_tdm_snd_hw_params(struct snd_pcm_substream *substream,
+				     struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int ret = 0;
+	int channels, slot_width, slots;
+	unsigned int slot_mask;
+	unsigned int slot_offset[8] = {0, 4, 8, 12, 16, 20, 24, 28};
+
+	pr_debug("%s: dai id = 0x%x\n", __func__, cpu_dai->id);
+
+	slots = tdm_rx_cfg[TDM_QUAT][TDM_0].channels;
+	/*2 slot config - bits 0 and 1 set for the first two slots */
+	slot_mask = 0x0000FFFF >> (16-slots);
+	slot_width = 32;
+	channels = slots;
+
+	pr_debug("%s: slot_width %d slots %d\n", __func__, slot_width, slots);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		pr_debug("%s: slot_width %d\n", __func__, slot_width);
+		ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0, slot_mask,
+			slots, slot_width);
+		if (ret < 0) {
+			pr_err("%s: failed to set tdm slot, err:%d\n",
+				__func__, ret);
+			goto end;
+		}
+
+		ret = snd_soc_dai_set_channel_map(cpu_dai,
+			0, NULL, channels, slot_offset);
+		if (ret < 0) {
+			pr_err("%s: failed to set channel map, err:%d\n",
+				__func__, ret);
+			goto end;
+		}
+	} else {
+		pr_err("%s: invalid use case, err:%d\n",
+			__func__, ret);
+	}
+
+end:
+	return ret;
+}
+
+static int sdm845_tdm_snd_startup(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+	ret = msm_set_pinctrl(pinctrl_info, STATE_TDM_ACTIVE);
+	if (ret)
+		pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static void sdm845_tdm_snd_shutdown(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+
+	ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+	if (ret)
+		pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+			__func__, ret);
+
+}
+
+static struct snd_soc_ops sdm845_tdm_be_ops = {
+	.hw_params = sdm845_tdm_snd_hw_params,
+	.startup = sdm845_tdm_snd_startup,
+	.shutdown = sdm845_tdm_snd_shutdown
+};
+
 static int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
 {
 	int ret = 0;
@@ -3809,6 +4098,9 @@
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	int index = cpu_dai->id;
 	unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
 
 	dev_dbg(rtd->card->dev,
 		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
@@ -3822,6 +4114,14 @@
 			__func__, cpu_dai->id);
 		goto err;
 	}
+	if (index == QUAT_MI2S) {
+		ret = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
+		if (ret) {
+			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+				__func__, ret);
+			goto err;
+		}
+	}
 	/*
 	 * Muxtex protection in case the same MI2S
 	 * interface using for both TX and RX  so
@@ -3874,6 +4174,9 @@
 	int ret;
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	int index = rtd->cpu_dai->id;
+	struct snd_soc_card *card = rtd->card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
 
 	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
 		 substream->name, substream->stream);
@@ -3892,6 +4195,13 @@
 		}
 	}
 	mutex_unlock(&mi2s_intf_conf[index].lock);
+
+	if (index == QUAT_MI2S) {
+		ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+		if (ret)
+			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
+				__func__, ret);
+	}
 }
 
 static struct snd_soc_ops msm_mi2s_be_ops = {
@@ -4929,8 +5239,8 @@
 		.no_pcm = 1,
 		.dpcm_playback = 1,
 		.id = MSM_BACKEND_DAI_QUAT_TDM_RX_0,
-		.be_hw_params_fixup = msm_be_hw_params_fixup,
-		.ops = &msm_tdm_be_ops,
+		.be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+		.ops = &sdm845_tdm_be_ops,
 		.ignore_suspend = 1,
 	},
 	{
@@ -6296,6 +6606,17 @@
 		dev_dbg(&pdev->dev, "msm_prepare_us_euro failed (%d)\n",
 			ret);
 
+	/* Parse pinctrl info from devicetree */
+	ret = msm_get_pinctrl(pdev);
+	if (!ret) {
+		pr_debug("%s: pinctrl parsing successful\n", __func__);
+	} else {
+		dev_dbg(&pdev->dev,
+			"%s: Parsing pinctrl failed with %d. Cannot use Ports\n",
+			__func__, ret);
+		ret = 0;
+	}
+
 	msm_i2s_auxpcm_init(pdev);
 
 	is_initial_boot = true;
@@ -6307,6 +6628,7 @@
 
 	return 0;
 err:
+	msm_release_pinctrl(pdev);
 	devm_kfree(&pdev->dev, pdata);
 	return ret;
 }
@@ -6323,6 +6645,7 @@
 	}
 	msm_i2s_auxpcm_deinit();
 
+	msm_release_pinctrl(pdev);
 	snd_soc_unregister_card(card);
 	return 0;
 }