Merge "drm/msm: add SDE IRQ domain before creating DRM objects" into msm-4.9
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_memory_dump.txt b/Documentation/devicetree/bindings/arm/msm/msm_memory_dump.txt
new file mode 100644
index 0000000..a415c8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_memory_dump.txt
@@ -0,0 +1,31 @@
+Qualcomm Technologies Inc. memory dump driver
+
+QTI memory dump driver allows various client subsystems to register and allocate respective
+dump regions. At the time of deadlocks or cpu hangs these dump regions
+are captured to give a snapshot of the system at the time of the crash.
+
+Required properties:
+
+-compatible: "qcom,mem-dump"
+-memory-region: phandle to the CMA region. The size of the CMA region
+		should be greater than sum of size of all child nodes
+		to account for padding.
+
+If any child nodes exist the following property are required:
+
+-qcom,dump-size: The size of memory that needs to be allocated for the
+		 particular node.
+-qcom,dump-id: The ID within the data dump table where this entry needs
+	       to be added.
+
+Example:
+
+	mem_dump {
+		compatible = "qcom,mem-dump";
+		memory-region = <&dump_mem>;
+
+		rpmh_dump {
+			qcom,dump-size = <0x2000000>;
+			qcom,dump-id = <0xEC>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
new file mode 100644
index 0000000..8598d0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
@@ -0,0 +1,63 @@
+QTI PDC interrupt controller
+
+PDC is QTI's platform parent interrupt controller that serves as wakeup source.
+
+Newer QTI SOCs are replacing MPM (MSM sleep Power Manager) with PDC (Power
+Domain Controller) to manage subsystem wakeups and resources during sleep.
+This driver marks the wakeup interrupts in APSS PDC such that it monitors the
+interrupts when the system is asleep, wakes up the APSS when one of these
+interrupts occur and replays it to the subsystem interrupt controller after it
+becomes operational.
+
+Earlier MPM architecture used arch-extension of GIC interrupt
+controller to mark enabled wake-up interrupts and monitor these when the
+system goes to sleep. Since the arch-extensions are no-longer available
+on newer kernel versions, this driver is implemented as hierarchical irq
+domain.  GIC is parent interrupt controller at the highest level.
+Platform interrupt controller PDC is next in hierarchy, followed by others.
+This driver only configures the interrupts, does not handle them.
+
+PDC interrupt configuration involves programming of 2 set of registers:
+IRQ_ENABLE_BANK    - Enable the irq
+IRQ_i_CFG          - Configure the interrupt i
+
+Properties:
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: Should contain "qcom,pdc-<target>"
+
+- reg:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: Specifies the base physical address for PDC hardware
+			block for DRV2.
+
+- interrupt-cells:
+	Usage: required
+	Value type: <u32>
+	Definition: Specifies the number of cells needed to encode an interrupt source.
+			Value must be 3.
+			The encoding of these cells are same as described in
+			Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+
+- interrupt-parent:
+	Usage: required
+	Value type: <phandle>
+	Definition: Specifies the interrupt parent necessary for hierarchical domain to operate.
+
+- interrupt-controller:
+	Usage: required
+	Value type: <bool>
+	Definition: Identifies the node as an interrupt controller.
+
+Example:
+
+pdcgic: interrupt-controller@0xb220000{
+	compatible = "qcom,pdc-sdm845";
+	reg = <0xb220000 0x30000>;
+	#interrupt-cells = <3>;
+	interrupt-parent = <&intc>;
+	interrupt-controller;
+};
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 2d971b7a..375eaf2 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -86,6 +86,11 @@
 		  useful if the upstream hardware is capable of switching
 		  between multiple domains within a single context bank.
 
+- qcom,use-3-lvl-tables:
+		  Some hardware configurations may not be optimized for using
+		  a four level page table configuration. Set to use a three
+		  level page table instead.
+
 - clocks        : List of clocks to be used during SMMU register access. See
                   Documentation/devicetree/bindings/clock/clock-bindings.txt
                   for information about the format. For each clock specified
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index 0295e1b..937ccb9 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -81,6 +81,32 @@
 				limits.
 - qcom,mdss-rot-vbif-qos-setting: This array is used to program vbif qos remapper register
 				  priority for rotator clients.
+- qcom,mdss-rot-cdp-setting:	Integer array of size two, to indicate client driven
+				prefetch is available or not. Index 0 represents
+				if CDP is enabled for read and index 1, if CDP
+				is enabled for write operation.
+- qcom,mdss-rot-qos-lut		A 4 cell property with the format of <rd_lut_0,
+				rd_lut_1, wr_lut_0, wr_lut_1> indicating the qos
+				lut settings for the rotator sspp and writeback
+				client.
+- qcom,mdss-rot-danger-lut	A two cell property with the format of <rd_lut,
+				wr_lut> indicating the danger lut settings for
+				the rotator sspp and writeback client.
+- qcom,mdss-rot-safe-lut	A two cell property with the format of <rd_lut,
+				wr_lut> indicating the safe lut settings for the
+				rotator sspp and writeback client.
+- qcom,mdss-inline-rot-qos-lut:	A 4 cell property with the format of <rd_lut_0,
+				rd_lut_1, wr_lut_0, wr_lut_1> indicating the qos
+				lut settings for the inline rotator sspp and
+				writeback client.
+- qcom,mdss-inline-rot-danger-lut: A two cell property with the format of
+				<rd_lut, wr_lut> indicating the danger lut
+				settings for the inline rotator sspp and
+				writeback client.
+- qcom,mdss-inline-rot-safe-lut: A two cell property with the format of
+				<rd_lut, wr_lut> indicating the safe lut
+				settings for the inline rotator sspp and
+				writeback client.
 - qcom,mdss-rot-mode:		This is integer value indicates operation mode
 				of the rotator device
 - qcom,mdss-sbuf-headroom:	This integer value indicates stream buffer headroom in lines.
@@ -146,9 +172,19 @@
 		/* VBIF QoS remapper settings*/
 		qcom,mdss-rot-vbif-qos-setting = <1 1 1 1>;
 
+		com,mdss-rot-cdp-setting = <1 1>;
+
 		qcom,mdss-default-ot-rd-limit = <8>;
 		qcom,mdss-default-ot-wr-limit = <16>;
 
+		qcom,mdss-rot-qos-lut = <0x0 0x0 0x0 0x0>;
+		qcom,mdss-rot-danger-lut = <0x0 0x0>;
+		qcom,mdss-rot-safe-lut = <0x0000ffff 0x0>;
+
+		qcom,mdss-inline-rot-qos-lut = <0x0 0x0 0x00112233 0x44556677>;
+		qcom,mdss-inline-rot-danger-lut = <0x0 0x0000ffff>;
+		qcom,mdss-inline-rot-safe-lut = <0x0 0x0000ff00>;
+
 		qcom,mdss-sbuf-headroom = <20>;
 		cache-slice-names = "rotator";
 		cache-slices = <&llcc 4>;
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index 6a3e8b4..ea89751 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -20,6 +20,7 @@
 		reg = <0x5040000 0x10000>;
 		#iommu-cells = <1>;
 		qcom,dynamic;
+		qcom,use-3-lvl-tables;
 		#global-interrupts = <2>;
 		qcom,regulator-names = "vdd";
 		vdd-supply = <&gpu_cx_gdsc>;
@@ -63,6 +64,7 @@
 		reg-names = "base", "tcu-base";
 		#iommu-cells = <2>;
 		qcom,skip-init;
+		qcom,use-3-lvl-tables;
 		#global-interrupts = <1>;
 		#size-cells = <1>;
 		#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index 122299c..f27b9da 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -22,7 +22,7 @@
 	qcom,board-id = <1 1>;
 };
 
-&dsi_dual_nt35597_truly_video_display {
+&dsi_dual_nt35597_truly_cmd_display {
 	/delete-property/ qcom,dsi-display-active;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index 55e615c..4627e60 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -22,7 +22,7 @@
 	qcom,board-id = <8 1>;
 };
 
-&dsi_dual_nt35597_truly_video_display {
+&dsi_dual_nt35597_truly_cmd_display {
 	/delete-property/ qcom,dsi-display-active;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index 1702e80..ea66a13 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -1073,6 +1073,15 @@
 			qcom,bcms = <&bcm_sn4>;
 		};
 
+		mas_alc: mas-alc {
+			cell-id = <MSM_BUS_MASTER_ALC>;
+			label = "mas-alc";
+			qcom,buswidth = <1>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mc_virt>;
+			qcom,bcms = <&bcm_alc>;
+		};
+
 		mas_llcc_mc_display: mas-llcc-mc_display {
 			cell-id = <MSM_BUS_MASTER_LLCC_DISPLAY>;
 			label = "mas-llcc-mc_display";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
index da5d6fa..c7a4d7d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
@@ -39,44 +39,44 @@
 				36 37>;
 		#interrupt-cells = <1>;
 		interrupt-map-mask = <0 0 0 0xffffffff>;
-		interrupt-map = <0 0 0 0 &intc 0 141 0
-				0 0 0 1 &intc 0 149 0
-				0 0 0 2 &intc 0 150 0
-				0 0 0 3 &intc 0 151 0
-				0 0 0 4 &intc 0 152 0
-				0 0 0 5 &intc 0 140 0
-				0 0 0 6 &intc 0 672 0
-				0 0 0 7 &intc 0 673 0
-				0 0 0 8 &intc 0 674 0
-				0 0 0 9 &intc 0 675 0
-				0 0 0 10 &intc 0 676 0
-				0 0 0 11 &intc 0 677 0
-				0 0 0 12 &intc 0 678 0
-				0 0 0 13 &intc 0 679 0
-				0 0 0 14 &intc 0 680 0
-				0 0 0 15 &intc 0 681 0
-				0 0 0 16 &intc 0 682 0
-				0 0 0 17 &intc 0 683 0
-				0 0 0 18 &intc 0 684 0
-				0 0 0 19 &intc 0 685 0
-				0 0 0 20 &intc 0 686 0
-				0 0 0 21 &intc 0 687 0
-				0 0 0 22 &intc 0 688 0
-				0 0 0 23 &intc 0 689 0
-				0 0 0 24 &intc 0 690 0
-				0 0 0 25 &intc 0 691 0
-				0 0 0 26 &intc 0 692 0
-				0 0 0 27 &intc 0 693 0
-				0 0 0 28 &intc 0 694 0
-				0 0 0 29 &intc 0 695 0
-				0 0 0 30 &intc 0 696 0
-				0 0 0 31 &intc 0 697 0
-				0 0 0 32 &intc 0 698 0
-				0 0 0 33 &intc 0 699 0
-				0 0 0 34 &intc 0 700 0
-				0 0 0 35 &intc 0 701 0
-				0 0 0 36 &intc 0 702 0
-				0 0 0 37 &intc 0 703 0>;
+		interrupt-map = <0 0 0 0 &pdc 0 141 0
+				0 0 0 1 &pdc 0 149 0
+				0 0 0 2 &pdc 0 150 0
+				0 0 0 3 &pdc 0 151 0
+				0 0 0 4 &pdc 0 152 0
+				0 0 0 5 &pdc 0 140 0
+				0 0 0 6 &pdc 0 672 0
+				0 0 0 7 &pdc 0 673 0
+				0 0 0 8 &pdc 0 674 0
+				0 0 0 9 &pdc 0 675 0
+				0 0 0 10 &pdc 0 676 0
+				0 0 0 11 &pdc 0 677 0
+				0 0 0 12 &pdc 0 678 0
+				0 0 0 13 &pdc 0 679 0
+				0 0 0 14 &pdc 0 680 0
+				0 0 0 15 &pdc 0 681 0
+				0 0 0 16 &pdc 0 682 0
+				0 0 0 17 &pdc 0 683 0
+				0 0 0 18 &pdc 0 684 0
+				0 0 0 19 &pdc 0 685 0
+				0 0 0 20 &pdc 0 686 0
+				0 0 0 21 &pdc 0 687 0
+				0 0 0 22 &pdc 0 688 0
+				0 0 0 23 &pdc 0 689 0
+				0 0 0 24 &pdc 0 690 0
+				0 0 0 25 &pdc 0 691 0
+				0 0 0 26 &pdc 0 692 0
+				0 0 0 27 &pdc 0 693 0
+				0 0 0 28 &pdc 0 694 0
+				0 0 0 29 &pdc 0 695 0
+				0 0 0 30 &pdc 0 696 0
+				0 0 0 31 &pdc 0 697 0
+				0 0 0 32 &pdc 0 698 0
+				0 0 0 33 &pdc 0 699 0
+				0 0 0 34 &pdc 0 700 0
+				0 0 0 35 &pdc 0 701 0
+				0 0 0 36 &pdc 0 702 0
+				0 0 0 37 &pdc 0 703 0>;
 
 		interrupt-names = "int_msi", "int_a", "int_b", "int_c",
 				"int_d", "int_global_int",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 1744574..cfbf3e5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -19,6 +19,7 @@
 		#gpio-cells = <2>;
 		interrupt-controller;
 		#interrupt-cells = <2>;
+		interrupt-parent = <&pdc>;
 
 		ufs_dev_reset_assert: ufs_dev_reset_assert {
 			config {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index 70e749b..6806145 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -145,4 +145,12 @@
 		reg = <0xC300000 0x1000>, <0xC3F0004 0x4>;
 		reg-names = "phys_addr_base", "offset_addr";
 	};
+
+	pdc: interrupt-controller@0xb220000{
+		compatible = "qcom,pdc-sdm845";
+		reg = <0xb220000 0x400>;
+		#interrupt-cells = <3>;
+		interrupt-parent = <&intc>;
+		interrupt-controller;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index b51996d..0aaac6f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -14,6 +14,27 @@
 #include <dt-bindings/gpio/gpio.h>
 
 /{
+	bluetooth: bt_wcn3990 {
+		compatible = "qca,wcn3990";
+		qca,bt-vdd-io-supply = <&pm8998_s3>;
+		qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+		qca,bt-vdd-core-supply = <&pm8998_l7>;
+		qca,bt-vdd-pa-supply = <&pm8998_l17>;
+		qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+
+		qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+		qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+		qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+		qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+		qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+
+		qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+	};
+
 	qrd_batterydata: qcom,battery-data {
 		qcom,batt-id-range-pct = <15>;
 		#include "fg-gen3-batterydata-itech-3000mah.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
index 0fb455f..1fa6e26 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
@@ -42,7 +42,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se6_4uart_active>;
 		pinctrl-1 = <&qupv3_se6_4uart_sleep>;
-		interrupts-extended = <&intc GIC_SPI 607 0>,
+		interrupts-extended = <&pdc GIC_SPI 607 0>,
 				<&tlmm 48 0>;
 		status = "disabled";
 		qcom,wakeup-byte = <0xFD>;
@@ -60,7 +60,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se7_4uart_active>;
 		pinctrl-1 = <&qupv3_se7_4uart_sleep>;
-		interrupts-extended = <&intc GIC_SPI 608 0>,
+		interrupts-extended = <&pdc GIC_SPI 608 0>,
 				<&tlmm 96 0>;
 		status = "disabled";
 		qcom,wakeup-byte = <0xFD>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 7ae63af..19b8744 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -36,7 +36,7 @@
 			reg = <0x3500 0x100>;
 			regulator-name = "pm8998_s12";
 			regulator-min-microvolt = <568000>;
-			regulator-max-microvolt = <1056000>;
+			regulator-max-microvolt = <1136000>;
 			qcom,enable-time = <500>;
 			regulator-always-on;
 		};
@@ -114,9 +114,9 @@
 				regulator-max-microvolt = <19>;
 
 				qcom,cpr-fuse-corners = <4>;
-				qcom,cpr-fuse-combos = <16>;
-				qcom,cpr-speed-bins = <2>;
-				qcom,cpr-speed-bin-corners = <19 19>;
+				qcom,cpr-fuse-combos = <24>;
+				qcom,cpr-speed-bins = <3>;
+				qcom,cpr-speed-bin-corners = <19 19 19>;
 				qcom,cpr-corners = <19>;
 
 				qcom,cpr-corner-fmax-map = <6 12 17 19>;
@@ -137,6 +137,11 @@
 					<568000  568000  568000  568000  568000
 					 568000  568000  568000  568000  584000
 					 584000  584000  632000  632000  632000
+					 632000  672000  712000  712000>,
+					/* Speed bin 2 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  584000
+					 584000  584000  632000  632000  632000
 					 632000  672000  712000  712000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
@@ -146,13 +151,30 @@
 					 32000  32000  40000  40000>;
 
 				qcom,corner-frequencies =
+					/* Speed bin 0 */
 					<300000000  422400000  499200000
 					 576000000  652800000  748800000
 					 825600000  902400000  979200000
 					1056000000 1132800000 1209600000
 					1286400000 1363200000 1440000000
 					1516800000 1593600000 1651200000
-					1708800000>;
+					1708800000>,
+					/* Speed bin 1 */
+					<300000000  422400000  499200000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1440000000
+					1516800000 1593600000 1651200000
+					1708800000>,
+					/* Speed bin 2 */
+					<300000000  422400000  499200000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1440000000
+					1516800000 1593600000 1670400000
+					1747200000>;
 
 				qcom,cpr-ro-scaling-factor =
 					<2594 2795 2576 2761 2469 2673 2198
@@ -185,6 +207,8 @@
 					/* Speed bin 0 */
 					<0 1 1 1 1 1 1 1>,
 					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 2 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
@@ -201,20 +225,41 @@
 			apc0_l3_vreg: regulator {
 				regulator-name = "apc0_l3_corner";
 				regulator-min-microvolt = <1>;
-				regulator-max-microvolt = <11>;
+				regulator-max-microvolt = <13>;
 
 				qcom,cpr-fuse-corners = <4>;
-				qcom,cpr-fuse-combos = <16>;
-				qcom,cpr-speed-bins = <2>;
-				qcom,cpr-speed-bin-corners = <11 11>;
-				qcom,cpr-corners = <11>;
+				qcom,cpr-fuse-combos = <24>;
+				qcom,cpr-speed-bins = <3>;
+				qcom,cpr-speed-bin-corners = <11 11 13>;
+				qcom,cpr-corners =
+					/* Speed bin 0 */
+					<11 11 11 11 11 11 11 11>,
+					/* Speed bin 1 */
+					<11 11 11 11 11 11 11 11>,
+					/* Speed bin 2 */
+					<13 13 13 13 13 13 13 13>;
 
-				qcom,cpr-corner-fmax-map = <4 7 9 11>;
+				qcom,cpr-corner-fmax-map =
+					/* Speed bin 0 */
+					<4 7 9 11>,
+					/* Speed bin 1 */
+					<4 7 9 11>,
+					/* Speed bin 2 */
+					<4 7 9 13>;
 
 				qcom,cpr-voltage-ceiling =
+					/* Speed bin 0 */
 					<872000  872000  872000  872000  872000
 					 872000  872000  872000  928000  996000
-					 996000>;
+					 996000>,
+					/* Speed bin 1 */
+					<872000  872000  872000  872000  872000
+					 872000  872000  872000  928000  996000
+					 996000>,
+					/* Speed bin 2 */
+					<872000  872000  872000  872000  872000
+					 872000  872000  872000  928000  996000
+					 996000  996000  996000>;
 
 				qcom,cpr-voltage-floor =
 					/* Speed bin 0 */
@@ -224,18 +269,43 @@
 					/* Speed bin 1 */
 					<568000  568000  568000  568000  568000
 					 584000  584000  632000  672000  712000
-					 712000>;
+					 712000>,
+					/* Speed bin 2 */
+					<568000  568000  568000  568000  568000
+					 584000  584000  632000  672000  712000
+					 712000  712000  712000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
+					/* Speed bin 0 */
 					<32000  32000  32000  32000  32000
 					 32000  32000  32000  32000  40000
-					 40000>;
+					 40000>,
+					/* Speed bin 1 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  40000
+					 40000>,
+					/* Speed bin 2 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  40000
+					 40000  40000  40000>;
 
 				qcom,corner-frequencies =
+					/* Speed bin 0 */
 					<300000000  422400000  499200000
 					 576000000  652800000  729600000
 					 806400000  883200000  960000000
-					1036800000 1094400000>;
+					1036800000 1094400000>,
+					/* Speed bin 1 */
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000
+					1036800000 1094400000>,
+					/* Speed bin 2 */
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000
+					1036800000 1113600000 1209600000
+					1305600000>;
 
 				qcom,cpr-ro-scaling-factor =
 					<2857 3056 2828 2952 2699 2796 2447
@@ -262,12 +332,14 @@
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
 
 				qcom,cpr-aging-max-voltage-adjustment = <15000>;
-				qcom,cpr-aging-ref-corner = <11>;
+				qcom,cpr-aging-ref-corner = <11 11 13>;
 				qcom,cpr-aging-ro-scaling-factor = <1620>;
 				qcom,allow-aging-voltage-adjustment =
 					/* Speed bin 0 */
 					<0 1 1 1 1 1 1 1>,
 					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 2 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
@@ -320,7 +392,7 @@
 		qcom,cpr-panic-reg-name-list =
 			"APSS_GOLD_CPRH_STATUS_0", "GOLD_SAW4_PMIC_STS";
 
-		qcom,cpr-aging-ref-voltage = <1056000>;
+		qcom,cpr-aging-ref-voltage = <1136000>;
 		vdd-supply = <&pm8998_s12>;
 
 		thread@0 {
@@ -333,23 +405,27 @@
 			apc1_perfcl_vreg: regulator {
 				regulator-name = "apc1_perfcl_corner";
 				regulator-min-microvolt = <1>;
-				regulator-max-microvolt = <26>;
+				regulator-max-microvolt = <27>;
 
 				qcom,cpr-fuse-corners = <3>;
-				qcom,cpr-fuse-combos = <16>;
-				qcom,cpr-speed-bins = <2>;
-				qcom,cpr-speed-bin-corners = <22 24>;
+				qcom,cpr-fuse-combos = <24>;
+				qcom,cpr-speed-bins = <3>;
+				qcom,cpr-speed-bin-corners = <22 24 25>;
 				qcom,cpr-corners =
 					/* Speed bin 0 */
 					<22 22 22 22 22 22 22 22>,
 					/* Speed bin 1 */
-					<24 24 24 24 24 24 24 24>;
+					<24 24 24 24 24 24 24 24>,
+					/* Speed bin 2 */
+					<25 25 25 25 25 25 25 25>;
 
 				qcom,cpr-corner-fmax-map =
 					/* Speed bin 0 */
 					<10 17 22>,
 					/* Speed bin 1 */
-					<10 17 24>;
+					<10 17 24>,
+					/* Speed bin 2 */
+					<10 17 25>;
 
 				qcom,cpr-voltage-ceiling =
 					/* Speed bin 0 */
@@ -357,13 +433,20 @@
 					 828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  828000  884000  952000  952000
-					1056000 1056000>,
+					1136000 1136000>,
 					/* Speed bin 1 */
 					<828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  828000  884000  952000  952000
-					1056000 1056000 1056000 1056000>;
+					1136000 1136000 1136000 1136000>,
+					/* Speed bin 2 */
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  884000  952000  952000
+					1136000 1136000 1136000 1136000
+					1136000>;
 
 				qcom,cpr-voltage-floor =
 					/* Speed bin 0 */
@@ -377,7 +460,14 @@
 					 568000  568000  568000  568000  568000
 					 584000  584000  632000  632000  632000
 					 632000  632000  672000  712000  712000
-					 772000  772000  772000  772000>;
+					 772000  772000  772000  772000>,
+					/* Speed bin 2 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 584000  584000  632000  632000  632000
+					 632000  632000  672000  712000  712000
+					 772000  772000  772000  772000
+					 772000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
 					/* Speed bin 0 */
@@ -391,7 +481,13 @@
 					 32000  32000  32000  32000  32000
 					 32000  32000  32000  32000  32000
 					 32000  32000  40000  40000  40000
-					 40000  40000  40000  40000>;
+					 40000  40000  40000  40000>,
+					/* Speed bin 2 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  40000  40000  40000
+					 40000  40000  40000  40000  40000>;
 
 				qcom,corner-frequencies =
 					/* Speed bin 0 */
@@ -411,7 +507,17 @@
 					1267200000 1344000000 1420800000
 					1497600000 1574400000 1651200000
 					1728000000 1804800000 1881600000
-					1958400000 2035200000 2092800000>;
+					1958400000 2035200000 2092800000>,
+					/* Speed bin 2 */
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000
+					1036800000 1113600000 1190400000
+					1267200000 1344000000 1420800000
+					1497600000 1574400000 1651200000
+					1728000000 1804800000 1881600000
+					1958400000 2035200000 2112000000
+					2208000000>;
 
 				qcom,cpr-ro-scaling-factor =
 					<2857 3056 2828 2952 2699 2796 2447
@@ -442,6 +548,15 @@
 					<     0      0      0>,
 					<     0      0      0>,
 					<     0      0      0>,
+					<     0      0      0>,
+					/* Speed bin 2 */
+					<100000 100000 100000>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
 					<     0      0      0>;
 
 				qcom,cpr-closed-loop-voltage-fuse-adjustment =
@@ -462,6 +577,15 @@
 					<     0      0      0>,
 					<     0      0      0>,
 					<     0      0      0>,
+					<     0      0      0>,
+					/* Speed bin 2 */
+					<100000 100000 100000>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
 					<     0      0      0>;
 
 				qcom,allow-voltage-interpolation;
@@ -469,12 +593,14 @@
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
 
 				qcom,cpr-aging-max-voltage-adjustment = <15000>;
-				qcom,cpr-aging-ref-corner = <22 24>;
+				qcom,cpr-aging-ref-corner = <22 24 25>;
 				qcom,cpr-aging-ro-scaling-factor = <1700>;
 				qcom,allow-aging-voltage-adjustment =
 					/* Speed bin 0 */
 					<0 1 1 1 1 1 1 1>,
 					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 2 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index e92bfd9..21819a9 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -35,7 +35,7 @@
 		sde-vdd-supply = <&mdss_core_gdsc>;
 
 		/* interrupt config */
-		interrupt-parent = <&intc>;
+		interrupt-parent = <&pdc>;
 		interrupts = <0 83 0>;
 		interrupt-controller;
 		#interrupt-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 6fb6fb8..3870d8f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -67,7 +67,6 @@
 		dwc3@a600000 {
 			compatible = "snps,dwc3";
 			reg = <0x0a600000 0xcd00>;
-			interrupt-parent = <&intc>;
 			interrupts = <0 133 0>;
 			usb-phy = <&qusb_phy0>, <&usb_qmp_dp_phy>;
 			tx-fifo-resize;
@@ -80,7 +79,6 @@
 		qcom,usbbam@a704000 {
 			compatible = "qcom,usb-bam-msm";
 			reg = <0xa704000 0x17000>;
-			interrupt-parent = <&intc>;
 			interrupts = <0 132 0>;
 
 			qcom,bam-type = <0>;
@@ -361,7 +359,6 @@
 		dwc3@a600000 {
 			compatible = "snps,dwc3";
 			reg = <0x0a800000 0xcd00>;
-			interrupt-parent = <&intc>;
 			interrupts = <0 138 0>;
 			usb-phy = <&qusb_phy1>, <&usb_qmp_phy>;
 			tx-fifo-resize;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index ad451ce..7fea651 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -30,7 +30,7 @@
 	model = "Qualcomm Technologies, Inc. SDM845";
 	compatible = "qcom,sdm845";
 	qcom,msm-id = <321 0x0>;
-	interrupt-parent = <&intc>;
+	interrupt-parent = <&pdc>;
 
 	aliases {
 		ufshc1 = &ufshc_mem; /* Embedded UFS slot */
@@ -567,6 +567,12 @@
 			size = <0 0x5c00000>;
 		};
 
+		dump_mem: mem_dump_region {
+			compatible = "shared-dma-pool";
+			reusable;
+			size = <0 0x2400000>;
+		};
+
 		/* global autoconfigured region for contiguous allocations */
 		linux,cma {
 			compatible = "shared-dma-pool";
@@ -600,6 +606,7 @@
 		reg = <0x17a00000 0x10000>,     /* GICD */
 		      <0x17a60000 0x100000>;    /* GICR * 8 */
 		interrupts = <1 9 4>;
+		interrupt-parent = <&intc>;
 	};
 
 	timer {
@@ -1712,7 +1719,7 @@
 	qcom,ssc@5c00000 {
 		compatible = "qcom,pil-tz-generic";
 		reg = <0x5c00000 0x4000>;
-		interrupts = <0 377 1>;
+		interrupts = <0 494 1>;
 
 		vdd_cx-supply = <&pm8998_l27_level>;
 		vdd_px-supply = <&pm8998_lvs2>;
@@ -3912,6 +3919,41 @@
 		#thermal-sensor-cells = <1>;
 	};
 
+	mem_dump {
+		compatible = "qcom,mem-dump";
+		memory-region = <&dump_mem>;
+
+		rpmh_dump {
+			qcom,dump-size = <0x2000000>;
+			qcom,dump-id = <0xec>;
+		};
+
+		rpm_sw_dump {
+			qcom,dump-size = <0x28000>;
+			qcom,dump-id = <0xea>;
+		};
+
+		pmic_dump {
+			qcom,dump-size = <0x10000>;
+			qcom,dump-id = <0xe4>;
+		};
+
+		tmc_etf_dump {
+			qcom,dump-size = <0x10000>;
+			qcom,dump-id = <0xf0>;
+		};
+
+		tmc_etf_swao_dump {
+			qcom,dump-size = <0x8400>;
+			qcom,dump-id = <0xf1>;
+		};
+
+		misc_data_dump {
+			qcom,dump-size = <0x1000>;
+			qcom,dump-id = <0xe8>;
+		};
+	};
+
 	gpi_dma0: qcom,gpi-dma@0x800000 {
 		#dma-cells = <6>;
 		compatible = "qcom,gpi-dma";
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 19a6db8..12b1f09 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -547,6 +547,7 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
 CONFIG_CRYPTO_XCBC=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 04a0d3e..23b1215 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -620,6 +620,7 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
 CONFIG_CRYPTO_XCBC=y
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
index bad4629..73f2fe8 100644
--- a/drivers/char/diag/diagfwd_glink.h
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index d15d1bb..fd3617b 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -59,7 +59,6 @@
 
 #define FABIA_USER_CTL_LO	0xc
 #define FABIA_USER_CTL_HI	0x10
-#define FABIA_CAL_L_VAL		0x8
 #define FABIA_FRAC_VAL		0x38
 #define FABIA_OPMODE		0x2c
 #define FABIA_PLL_STANDBY	0x0
@@ -463,12 +462,9 @@
 {
 	u32 val, mask;
 
-	if (config->l) {
+	if (config->l)
 		regmap_write(regmap, pll->offset + PLL_L_VAL,
 						config->l);
-		regmap_write(regmap, pll->offset + FABIA_CAL_L_VAL,
-						config->l);
-	}
 
 	if (config->frac)
 		regmap_write(regmap, pll->offset + FABIA_FRAC_VAL,
@@ -627,12 +623,6 @@
 	}
 
 	regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l);
-	/*
-	 * pll_cal_l_val is set to pll_l_val on MOST targets. Set it
-	 * explicitly here for PLL out-of-reset calibration to work
-	 * without a glitch on ALL of them.
-	 */
-	regmap_write(pll->clkr.regmap, off + FABIA_CAL_L_VAL, l);
 	regmap_write(pll->clkr.regmap, off + FABIA_FRAC_VAL, a);
 
 	/* Latch the PLL input */
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index ae9d509..a5548e0 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -105,20 +105,6 @@
 	"core_bi_pll_test_se",
 };
 
-static const struct parent_map gpu_cc_parent_map_2[] = {
-	{ P_BI_TCXO, 0 },
-	{ P_GPLL0_OUT_MAIN, 5 },
-	{ P_GPLL0_OUT_MAIN_DIV, 6 },
-	{ P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gpu_cc_parent_names_2[] = {
-	"bi_tcxo",
-	"gcc_gpu_gpll0_clk_src",
-	"gcc_gpu_gpll0_div_clk_src",
-	"core_bi_pll_test_se",
-};
-
 static struct pll_vco fabia_vco[] = {
 	{ 250000000, 2000000000, 0 },
 	{ 125000000, 1000000000, 1 },
@@ -232,29 +218,6 @@
 	},
 };
 
-static const struct freq_tbl ftbl_gpu_cc_rbcpr_clk_src[] = {
-	F(19200000, P_BI_TCXO, 1, 0, 0),
-	{ }
-};
-
-static struct clk_rcg2 gpu_cc_rbcpr_clk_src = {
-	.cmd_rcgr = 0x10b0,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = gpu_cc_parent_map_2,
-	.freq_tbl = ftbl_gpu_cc_rbcpr_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "gpu_cc_rbcpr_clk_src",
-		.parent_names = gpu_cc_parent_names_2,
-		.num_parents = 4,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_rcg2_ops,
-		VDD_CX_FMAX_MAP2(
-			MIN, 19200000,
-			NOMINAL, 50000000),
-	},
-};
-
 static struct clk_branch gpu_cc_acd_ahb_clk = {
 	.halt_reg = 0x1168,
 	.halt_check = BRANCH_HALT,
@@ -488,37 +451,6 @@
 	},
 };
 
-static struct clk_branch gpu_cc_rbcpr_ahb_clk = {
-	.halt_reg = 0x10f4,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x10f4,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gpu_cc_rbcpr_ahb_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch gpu_cc_rbcpr_clk = {
-	.halt_reg = 0x10f0,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x10f0,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gpu_cc_rbcpr_clk",
-			.parent_names = (const char *[]){
-				"gpu_cc_rbcpr_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_regmap *gpu_cc_sdm845_clocks[] = {
 	[GPU_CC_ACD_AHB_CLK] = &gpu_cc_acd_ahb_clk.clkr,
 	[GPU_CC_ACD_CXO_CLK] = &gpu_cc_acd_cxo_clk.clkr,
@@ -536,9 +468,6 @@
 	[GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
 	[GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
 	[GPU_CC_PLL_TEST_CLK] = &gpu_cc_pll_test_clk.clkr,
-	[GPU_CC_RBCPR_AHB_CLK] = &gpu_cc_rbcpr_ahb_clk.clkr,
-	[GPU_CC_RBCPR_CLK] = &gpu_cc_rbcpr_clk.clkr,
-	[GPU_CC_RBCPR_CLK_SRC] = &gpu_cc_rbcpr_clk_src.clkr,
 };
 
 static struct clk_regmap *gpu_cc_gfx_sdm845_clocks[] = {
@@ -554,7 +483,6 @@
 	[GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x10a0 },
 	[GPUCC_GPU_CC_GMU_BCR] = { 0x111c },
 	[GPUCC_GPU_CC_GX_BCR] = { 0x1008 },
-	[GPUCC_GPU_CC_RBCPR_BCR] = { 0x10ac },
 	[GPUCC_GPU_CC_SPDM_BCR] = { 0x1110 },
 	[GPUCC_GPU_CC_XO_BCR] = { 0x1000 },
 };
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index f184ee1..ff64631 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -583,44 +583,92 @@
 static void qcrypto_ce_set_bus(struct crypto_engine *pengine,
 				 bool high_bw_req)
 {
+	struct crypto_priv *cp = pengine->pcp;
+	unsigned int control_flag;
 	int ret = 0;
 
-	if (high_bw_req) {
+	if (cp->ce_support.req_bw_before_clk) {
+		if (high_bw_req)
+			control_flag = QCE_BW_REQUEST_FIRST;
+		else
+			control_flag = QCE_CLK_DISABLE_FIRST;
+	} else {
+		if (high_bw_req)
+			control_flag = QCE_CLK_ENABLE_FIRST;
+		else
+			control_flag = QCE_BW_REQUEST_RESET_FIRST;
+	}
+
+	switch (control_flag) {
+	case QCE_CLK_ENABLE_FIRST:
 		ret = qce_enable_clk(pengine->qce);
 		if (ret) {
 			pr_err("%s Unable enable clk\n", __func__);
-			goto clk_err;
+			return;
 		}
 		ret = msm_bus_scale_client_update_request(
 				pengine->bus_scale_handle, 1);
 		if (ret) {
-			pr_err("%s Unable to set to high bandwidth\n",
-						__func__);
-			qce_disable_clk(pengine->qce);
-			goto clk_err;
+			pr_err("%s Unable to set high bw\n", __func__);
+			ret = qce_disable_clk(pengine->qce);
+			if (ret)
+				pr_err("%s Unable disable clk\n", __func__);
+			return;
 		}
-	} else {
+		break;
+	case QCE_BW_REQUEST_FIRST:
+		ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 1);
+		if (ret) {
+			pr_err("%s Unable to set high bw\n", __func__);
+			return;
+		}
+		ret = qce_enable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 0);
+			if (ret)
+				pr_err("%s Unable to set low bw\n", __func__);
+			return;
+		}
+		break;
+	case QCE_CLK_DISABLE_FIRST:
+		ret = qce_disable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable to disable clk\n", __func__);
+			return;
+		}
 		ret = msm_bus_scale_client_update_request(
 				pengine->bus_scale_handle, 0);
 		if (ret) {
-			pr_err("%s Unable to set to low bandwidth\n",
-						__func__);
-			goto clk_err;
+			pr_err("%s Unable to set low bw\n", __func__);
+			ret = qce_enable_clk(pengine->qce);
+			if (ret)
+				pr_err("%s Unable enable clk\n", __func__);
+			return;
+		}
+		break;
+	case QCE_BW_REQUEST_RESET_FIRST:
+		ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 0);
+		if (ret) {
+			pr_err("%s Unable to set low bw\n", __func__);
+			return;
 		}
 		ret = qce_disable_clk(pengine->qce);
 		if (ret) {
-			pr_err("%s Unable disable clk\n", __func__);
+			pr_err("%s Unable to disable clk\n", __func__);
 			ret = msm_bus_scale_client_update_request(
 				pengine->bus_scale_handle, 1);
 			if (ret)
-				pr_err("%s Unable to set to high bandwidth\n",
-						__func__);
-			goto clk_err;
+				pr_err("%s Unable to set high bw\n", __func__);
+			return;
 		}
+		break;
+	default:
+		return;
 	}
-clk_err:
-	return;
-
 }
 
 static void qcrypto_bw_reaper_timer_callback(unsigned long data)
@@ -4856,12 +4904,36 @@
 	if (!pengine)
 		return -ENOMEM;
 
-	/* open qce */
+	cp->platform_support.bus_scale_table = (struct msm_bus_scale_pdata *)
+					msm_bus_cl_get_pdata(pdev);
+	if (!cp->platform_support.bus_scale_table) {
+		dev_err(&pdev->dev, "bus_scale_table is NULL\n");
+		pengine->bw_state = BUS_HAS_BANDWIDTH;
+	} else {
+		pengine->bus_scale_handle = msm_bus_scale_register_client(
+				(struct msm_bus_scale_pdata *)
+				cp->platform_support.bus_scale_table);
+		if (!pengine->bus_scale_handle) {
+			dev_err(&pdev->dev, "failed to get bus scale handle\n");
+			rc = -ENOMEM;
+			goto exit_kzfree;
+		}
+		pengine->bw_state = BUS_NO_BANDWIDTH;
+	}
+	rc = msm_bus_scale_client_update_request(pengine->bus_scale_handle, 1);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to set high bandwidth\n");
+		goto exit_kzfree;
+	}
 	handle = qce_open(pdev, &rc);
 	if (handle == NULL) {
-		kzfree(pengine);
-		platform_set_drvdata(pdev, NULL);
-		return rc;
+		rc = -ENODEV;
+		goto exit_free_pdata;
+	}
+	rc = msm_bus_scale_client_update_request(pengine->bus_scale_handle, 0);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to set low bandwidth\n");
+		goto exit_qce_close;
 	}
 
 	platform_set_drvdata(pdev, pengine);
@@ -4903,7 +4975,7 @@
 			pengine->max_req, GFP_KERNEL);
 	if (pqcrypto_req_control == NULL) {
 		rc = -ENOMEM;
-		goto err;
+		goto exit_unlock_mutex;
 	}
 	qcrypto_init_req_control(pengine, pqcrypto_req_control);
 	if (cp->ce_support.bam)	 {
@@ -4911,15 +4983,7 @@
 		cp->platform_support.shared_ce_resource = 0;
 		cp->platform_support.hw_key_support = cp->ce_support.hw_key;
 		cp->platform_support.sha_hmac = 1;
-
-		cp->platform_support.bus_scale_table =
-			(struct msm_bus_scale_pdata *)
-					msm_bus_cl_get_pdata(pdev);
-		if (!cp->platform_support.bus_scale_table)
-			pr_warn("bus_scale_table is NULL\n");
-
 		pengine->ce_device = cp->ce_support.ce_device;
-
 	} else {
 		platform_support =
 			(struct msm_ce_hw_support *)pdev->dev.platform_data;
@@ -4928,33 +4992,11 @@
 				platform_support->shared_ce_resource;
 		cp->platform_support.hw_key_support =
 				platform_support->hw_key_support;
-		cp->platform_support.bus_scale_table =
-				platform_support->bus_scale_table;
 		cp->platform_support.sha_hmac = platform_support->sha_hmac;
 	}
 
-	pengine->bus_scale_handle = 0;
-
-	if (cp->platform_support.bus_scale_table != NULL) {
-		pengine->bus_scale_handle =
-			msm_bus_scale_register_client(
-				(struct msm_bus_scale_pdata *)
-					cp->platform_support.bus_scale_table);
-		if (!pengine->bus_scale_handle) {
-			pr_err("%s not able to get bus scale\n",
-				__func__);
-			rc =  -ENOMEM;
-			goto err;
-		}
-		pengine->bw_state = BUS_NO_BANDWIDTH;
-	} else {
-		pengine->bw_state = BUS_HAS_BANDWIDTH;
-	}
-
-	if (cp->total_units != 1) {
-		mutex_unlock(&cp->engine_lock);
-		return 0;
-	}
+	if (cp->total_units != 1)
+		goto exit_unlock_mutex;
 
 	/* register crypto cipher algorithms the device supports */
 	for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
@@ -5243,13 +5285,19 @@
 	}
 	mutex_unlock(&cp->engine_lock);
 
-
 	return 0;
 err:
 	_qcrypto_remove_engine(pengine);
+	kzfree(pqcrypto_req_control);
+exit_unlock_mutex:
 	mutex_unlock(&cp->engine_lock);
+exit_qce_close:
 	if (pengine->qce)
 		qce_close(pengine->qce);
+exit_free_pdata:
+	msm_bus_scale_client_update_request(pengine->bus_scale_handle, 0);
+	platform_set_drvdata(pdev, NULL);
+exit_kzfree:
 	kzfree(pengine);
 	return rc;
 };
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 5dcdf46..2d7b174 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -142,7 +142,8 @@
 void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
 				     struct dsi_mode_info *mode,
 				     u32 h_stride,
-				     u32 vc_id);
+				     u32 vc_id,
+				     struct dsi_rect *roi);
 void dsi_ctrl_hw_cmn_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
 void dsi_ctrl_hw_cmn_soft_reset(struct dsi_ctrl_hw *ctrl);
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index f187ad1..39b797e 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -1558,7 +1558,6 @@
 
 int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
 {
-	struct dsi_mode_info video_timing;
 	int rc = 0;
 
 	if (!dsi_ctrl) {
@@ -1568,12 +1567,6 @@
 
 	mutex_lock(&dsi_ctrl->ctrl_lock);
 
-	/* replace video mode width with actual roi width */
-	memcpy(&video_timing, &dsi_ctrl->host_config.video_timing,
-			sizeof(video_timing));
-	video_timing.h_active = dsi_ctrl->roi.w;
-	video_timing.v_active = dsi_ctrl->roi.h;
-
 	dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
 					&dsi_ctrl->host_config.lane_map);
 
@@ -1586,9 +1579,10 @@
 					&dsi_ctrl->host_config.u.cmd_engine);
 
 		dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
-				&video_timing,
-				video_timing.h_active * 3,
-				0x0);
+				&dsi_ctrl->host_config.video_timing,
+				dsi_ctrl->host_config.video_timing.h_active * 3,
+				0x0,
+				&dsi_ctrl->roi);
 		dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, true);
 	} else {
 		dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
@@ -1690,7 +1684,8 @@
 		dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
 				&dsi_ctrl->host_config.video_timing,
 				dsi_ctrl->host_config.video_timing.h_active * 3,
-				0x0);
+				0x0,
+				NULL);
 	} else {
 		dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
 					&dsi_ctrl->host_config.common_config,
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index 859d707..bb72807 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -319,7 +319,8 @@
 	void (*setup_cmd_stream)(struct dsi_ctrl_hw *ctrl,
 				 struct dsi_mode_info *mode,
 				 u32 h_stride,
-				 u32 vc_id);
+				 u32 vc_id,
+				 struct dsi_rect *roi);
 
 	/**
 	 * ctrl_en() - enable DSI controller engine
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 48c2370..a024c43 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -19,6 +19,7 @@
 #include "dsi_ctrl_hw.h"
 #include "dsi_ctrl_reg.h"
 #include "dsi_hw.h"
+#include "dsi_panel.h"
 
 #define MMSS_MISC_CLAMP_REG_OFF           0x0014
 #define DSI_CTRL_DYNAMIC_FORCE_ON         (0x23F|BIT(8)|BIT(9)|BIT(11)|BIT(21))
@@ -234,21 +235,36 @@
 void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
 				     struct dsi_mode_info *mode,
 				     u32 h_stride,
-				     u32 vc_id)
+				     u32 vc_id,
+				     struct dsi_rect *roi)
 {
-	u32 reg = 0;
 	u32 width_final, stride_final;
+	u32 height_final;
+	u32 stream_total = 0, stream_ctrl = 0;
+	u32 reg_ctrl = 0, reg_ctrl2 = 0;
+
+	if (roi && (!roi->w || !roi->h))
+		return;
 
 	if (mode->dsc_enabled && mode->dsc) {
+		u32 reg = 0;
 		u32 offset = 0;
-		u32 reg_ctrl, reg_ctrl2;
+		int pic_width, this_frame_slices, intf_ip_w;
+		struct msm_display_dsc_info dsc;
+
+		memcpy(&dsc, mode->dsc, sizeof(dsc));
+		pic_width = roi ? roi->w : mode->h_active;
+		this_frame_slices = pic_width / dsc.slice_width;
+		intf_ip_w = this_frame_slices * dsc.slice_width;
+		dsi_dsc_pclk_param_calc(&dsc, intf_ip_w);
 
 		if (vc_id != 0)
 			offset = 16;
 		reg_ctrl = DSI_R32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL);
 		reg_ctrl2 = DSI_R32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2);
-		width_final = mode->dsc->pclk_per_line;
-		stride_final = mode->dsc->bytes_per_pkt;
+		width_final = dsc.pclk_per_line;
+		stride_final = dsc.bytes_per_pkt;
+		height_final = roi ? roi->h : mode->v_active;
 
 		reg = 0x39 << 8;
 		/*
@@ -258,34 +274,45 @@
 		 * 2 == 4 pkt
 		 * 3 pkt is not support
 		 */
-		if (mode->dsc->pkt_per_line == 4)
-			reg |= (mode->dsc->pkt_per_line - 2) << 6;
+		if (dsc.pkt_per_line == 4)
+			reg |= (dsc.pkt_per_line - 2) << 6;
 		else
-			reg |= (mode->dsc->pkt_per_line - 1) << 6;
-		reg |= mode->dsc->eol_byte_num << 4;
+			reg |= (dsc.pkt_per_line - 1) << 6;
+		reg |= dsc.eol_byte_num << 4;
 		reg |= 1;
 
 		reg_ctrl &= ~(0xFFFF << offset);
 		reg_ctrl |= (reg << offset);
 		reg_ctrl2 &= ~(0xFFFF << offset);
-		reg_ctrl2 |= (mode->dsc->bytes_in_slice << offset);
+		reg_ctrl2 |= (dsc.bytes_in_slice << offset);
 		DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl);
 		DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2);
+
+		pr_debug("ctrl %d reg_ctrl 0x%x reg_ctrl2 0x%x\n", ctrl->index,
+				reg_ctrl, reg_ctrl2);
+	} else if (roi) {
+		width_final = roi->w;
+		stride_final = roi->w * 3;
+		height_final = roi->h;
 	} else {
 		width_final = mode->h_active;
 		stride_final = h_stride;
+		height_final = mode->v_active;
 	}
 
-	reg = (stride_final + 1) << 16;
-	reg |= (vc_id & 0x3) << 8;
-	reg |= 0x39; /* packet data type */
+	stream_ctrl = (stride_final + 1) << 16;
+	stream_ctrl |= (vc_id & 0x3) << 8;
+	stream_ctrl |= 0x39; /* packet data type */
 
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, reg);
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, reg);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, stream_ctrl);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, stream_ctrl);
 
-	reg = (mode->v_active << 16) | width_final;
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_TOTAL, reg);
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_TOTAL, reg);
+	stream_total = (height_final << 16) | width_final;
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_TOTAL, stream_total);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_TOTAL, stream_total);
+
+	pr_debug("ctrl %d stream_ctrl 0x%x stream_total 0x%x\n", ctrl->index,
+			stream_ctrl, stream_total);
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 4c9fbbe..f254af5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -249,4 +249,6 @@
 int dsi_panel_send_roi_dcs(struct dsi_panel *panel, int ctrl_idx,
 		struct dsi_rect *roi);
 
+void dsi_dsc_pclk_param_calc(struct msm_display_dsc_info *dsc, int intf_width);
+
 #endif /* _DSI_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 3061099..acd7af5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -341,7 +341,8 @@
 			if (obj->import_attach && mmu->funcs->map_dma_buf) {
 				ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt,
 						obj->import_attach->dmabuf,
-						DMA_BIDIRECTIONAL);
+						DMA_BIDIRECTIONAL,
+						msm_obj->flags);
 				if (ret) {
 					DRM_ERROR("Unable to map dma buf\n");
 					return ret;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 2cf170d..19c7726 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -23,6 +23,7 @@
 
 /* Additional internal-use only BO flags: */
 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
+#define MSM_BO_KEEPATTRS     0x20000000     /* keep h/w bus attributes */
 
 struct msm_gem_object {
 	struct drm_gem_object base;
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index ee93339..fbf7e7b 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -43,7 +43,7 @@
 	void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
 		enum dma_data_direction dir);
 	int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
-			struct dma_buf *dma_buf, int dir);
+			struct dma_buf *dma_buf, int dir, u32 flags);
 	void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
 			struct dma_buf *dma_buf, int dir);
 	void (*destroy)(struct msm_mmu *mmu);
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index c279d01..4d45898 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -25,6 +25,7 @@
 #include <soc/qcom/secure_buffer.h>
 
 #include "msm_drv.h"
+#include "msm_gem.h"
 #include "msm_mmu.h"
 
 #ifndef SZ_4G
@@ -220,14 +221,18 @@
 }
 
 static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
-			struct dma_buf *dma_buf, int dir)
+			struct dma_buf *dma_buf, int dir, u32 flags)
 {
 	struct msm_smmu *smmu = to_msm_smmu(mmu);
 	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+	unsigned long attrs = 0x0;
 	int ret;
 
-	ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, dir,
-			dma_buf);
+	if (flags & MSM_BO_KEEPATTRS)
+		attrs |= DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+
+	ret = msm_dma_map_sg_attrs(client->dev, sgt->sgl, sgt->nents, dir,
+			dma_buf, attrs);
 	if (ret != sgt->nents) {
 		DRM_ERROR("dma map sg failed\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index a457938..d0ade33 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -612,12 +612,15 @@
 	int i;
 
 	if (!dim_layer->rect.w || !dim_layer->rect.h) {
-		SDE_DEBUG("empty dim layer\n");
+		SDE_DEBUG("empty dim_layer\n");
 		return;
 	}
 
 	cstate = to_sde_crtc_state(crtc->state);
 
+	SDE_DEBUG("dim_layer - flags:%d, stage:%d\n",
+			dim_layer->flags, dim_layer->stage);
+
 	split_dim_layer.stage = dim_layer->stage;
 	split_dim_layer.color_fill = dim_layer->color_fill;
 
@@ -651,9 +654,13 @@
 		} else {
 			split_dim_layer.rect.x =
 					split_dim_layer.rect.x -
-					cstate->lm_bounds[i].w;
+						cstate->lm_bounds[i].x;
 		}
 
+		SDE_DEBUG("split_dim_layer - LM:%d, rect:{%d,%d,%d,%d}}\n",
+			i, split_dim_layer.rect.x, split_dim_layer.rect.y,
+			split_dim_layer.rect.w, split_dim_layer.rect.h);
+
 		lm = mixer[i].hw_lm;
 		mixer[i].mixer_op_mode |= 1 << split_dim_layer.stage;
 		lm->ops.setup_dim_layer(lm, &split_dim_layer);
@@ -858,9 +865,24 @@
 	sde_crtc = to_sde_crtc(crtc);
 	crtc_state = to_sde_crtc_state(state);
 
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
-		if (!sde_kms_rect_is_null(&crtc_state->lm_roi[i]))
-			disp_bitmask |= BIT(i);
+	/* pingpong split: one ROI, one LM, two physical displays */
+	if (crtc_state->is_ppsplit) {
+		u32 lm_split_width = crtc_state->lm_bounds[0].w / 2;
+		struct sde_rect *roi = &crtc_state->lm_roi[0];
+
+		if (sde_kms_rect_is_null(roi))
+			disp_bitmask = 0;
+		else if ((u32)roi->x + (u32)roi->w <= lm_split_width)
+			disp_bitmask = BIT(0);		/* left only */
+		else if (roi->x >= lm_split_width)
+			disp_bitmask = BIT(1);		/* right only */
+		else
+			disp_bitmask = BIT(0) | BIT(1); /* left and right */
+	} else {
+		for (i = 0; i < sde_crtc->num_mixers; i++) {
+			if (!sde_kms_rect_is_null(&crtc_state->lm_roi[i]))
+				disp_bitmask |= BIT(i);
+		}
 	}
 
 	SDE_DEBUG("affected displays 0x%x\n", disp_bitmask);
@@ -881,9 +903,6 @@
 	sde_crtc = to_sde_crtc(crtc);
 	crtc_state = to_sde_crtc_state(state);
 
-	if (sde_crtc->num_mixers == 1)
-		return 0;
-
 	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
 		SDE_ERROR("%s: unsupported number of mixers: %d\n",
 				sde_crtc->name, sde_crtc->num_mixers);
@@ -891,9 +910,41 @@
 	}
 
 	/*
-	 * On certain HW, ROIs must be centered on the split between LMs,
-	 * and be of equal width.
+	 * If using pingpong split: one ROI, one LM, two physical displays
+	 * then the ROI must be centered on the panel split boundary and
+	 * be of equal width across the split.
 	 */
+	if (crtc_state->is_ppsplit) {
+		u16 panel_split_width;
+		u32 display_mask;
+
+		roi[0] = &crtc_state->lm_roi[0];
+
+		if (sde_kms_rect_is_null(roi[0]))
+			return 0;
+
+		display_mask = _sde_crtc_get_displays_affected(crtc, state);
+		if (display_mask != (BIT(0) | BIT(1)))
+			return 0;
+
+		panel_split_width = crtc_state->lm_bounds[0].w / 2;
+		if (roi[0]->x + roi[0]->w / 2 != panel_split_width) {
+			SDE_ERROR("%s: roi x %d w %d split %d\n",
+					sde_crtc->name, roi[0]->x, roi[0]->w,
+					panel_split_width);
+			return -EINVAL;
+		}
+
+		return 0;
+	}
+
+	/*
+	 * On certain HW, if using 2 LM, ROIs must be split evenly between the
+	 * LMs and be of equal width.
+	 */
+	if (sde_crtc->num_mixers == 1)
+		return 0;
+
 	roi[0] = &crtc_state->lm_roi[0];
 	roi[1] = &crtc_state->lm_roi[1];
 
@@ -1181,6 +1232,69 @@
 	_sde_crtc_program_lm_output_roi(crtc);
 }
 
+static void _sde_crtc_swap_mixers_for_right_partial_update(
+		struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	struct drm_encoder *drm_enc;
+	bool is_right_only;
+	bool encoder_in_dsc_merge = false;
+
+	if (!crtc || !crtc->state)
+		return;
+
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
+
+	if (sde_crtc->num_mixers != CRTC_DUAL_MIXERS)
+		return;
+
+	drm_for_each_encoder(drm_enc, crtc->dev) {
+		if (drm_enc->crtc == crtc &&
+				sde_encoder_is_dsc_merge(drm_enc)) {
+			encoder_in_dsc_merge = true;
+			break;
+		}
+	}
+
+	/**
+	 * For right-only partial update with DSC merge, we swap LM0 & LM1.
+	 * This is due to two reasons:
+	 * - On 8996, there is a DSC HW requirement that in DSC Merge Mode,
+	 *   the left DSC must be used, right DSC cannot be used alone.
+	 *   For right-only partial update, this means swap layer mixers to map
+	 *   Left LM to Right INTF. On later HW this was relaxed.
+	 * - In DSC Merge mode, the physical encoder has already registered
+	 *   PP0 as the master, to switch to right-only we would have to
+	 *   reprogram to be driven by PP1 instead.
+	 * To support both cases, we prefer to support the mixer swap solution.
+	 */
+	if (!encoder_in_dsc_merge)
+		return;
+
+	is_right_only = sde_kms_rect_is_null(&cstate->lm_roi[0]) &&
+			!sde_kms_rect_is_null(&cstate->lm_roi[1]);
+
+	if (is_right_only && !sde_crtc->mixers_swapped) {
+		/* right-only update swap mixers */
+		swap(sde_crtc->mixers[0], sde_crtc->mixers[1]);
+		sde_crtc->mixers_swapped = true;
+	} else if (!is_right_only && sde_crtc->mixers_swapped) {
+		/* left-only or full update, swap back */
+		swap(sde_crtc->mixers[0], sde_crtc->mixers[1]);
+		sde_crtc->mixers_swapped = false;
+	}
+
+	SDE_DEBUG("%s: right_only %d swapped %d, mix0->lm%d, mix1->lm%d\n",
+			sde_crtc->name, is_right_only, sde_crtc->mixers_swapped,
+			sde_crtc->mixers[0].hw_lm->idx - LM_0,
+			sde_crtc->mixers[1].hw_lm->idx - LM_0);
+	SDE_EVT32(DRMID(crtc), is_right_only, sde_crtc->mixers_swapped,
+			sde_crtc->mixers[0].hw_lm->idx - LM_0,
+			sde_crtc->mixers[1].hw_lm->idx - LM_0);
+}
+
 /**
  * _sde_crtc_blend_setup - configure crtc mixers
  * @crtc: Pointer to drm crtc structure
@@ -1226,6 +1340,8 @@
 			lm->ops.clear_dim_layer(lm);
 	}
 
+	_sde_crtc_swap_mixers_for_right_partial_update(crtc);
+
 	/* initialize stage cfg */
 	memset(&sde_crtc->stage_cfg, 0, sizeof(struct sde_hw_stage_cfg));
 
@@ -1534,26 +1650,28 @@
 {
 	struct sde_drm_dim_layer_v1 dim_layer_v1;
 	struct sde_drm_dim_layer_cfg *user_cfg;
+	struct sde_hw_dim_layer *dim_layer;
 	u32 count, i;
 
 	if (!cstate) {
 		SDE_ERROR("invalid cstate\n");
 		return;
 	}
+	dim_layer = cstate->dim_layer;
 
 	if (!usr_ptr) {
-		SDE_DEBUG("dim layer data removed\n");
+		SDE_DEBUG("dim_layer data removed\n");
 		return;
 	}
 
 	if (copy_from_user(&dim_layer_v1, usr_ptr, sizeof(dim_layer_v1))) {
-		SDE_ERROR("failed to copy dim layer data\n");
+		SDE_ERROR("failed to copy dim_layer data\n");
 		return;
 	}
 
 	count = dim_layer_v1.num_layers;
-	if (!count || (count > SDE_MAX_DIM_LAYERS)) {
-		SDE_ERROR("invalid number of Dim Layers:%d", count);
+	if (count > SDE_MAX_DIM_LAYERS) {
+		SDE_ERROR("invalid number of dim_layers:%d", count);
 		return;
 	}
 
@@ -1561,22 +1679,31 @@
 	cstate->num_dim_layers = count;
 	for (i = 0; i < count; i++) {
 		user_cfg = &dim_layer_v1.layer_cfg[i];
-		cstate->dim_layer[i].flags = user_cfg->flags;
-		cstate->dim_layer[i].stage = user_cfg->stage + SDE_STAGE_0;
 
-		cstate->dim_layer[i].rect.x = user_cfg->rect.x1;
-		cstate->dim_layer[i].rect.y = user_cfg->rect.y1;
-		cstate->dim_layer[i].rect.w = user_cfg->rect.x2 -
-						user_cfg->rect.x1 + 1;
-		cstate->dim_layer[i].rect.h = user_cfg->rect.y2 -
-						user_cfg->rect.y1 + 1;
+		dim_layer[i].flags = user_cfg->flags;
+		dim_layer[i].stage = user_cfg->stage + SDE_STAGE_0;
 
-		cstate->dim_layer[i].color_fill = (struct sde_mdss_color) {
+		dim_layer[i].rect.x = user_cfg->rect.x1;
+		dim_layer[i].rect.y = user_cfg->rect.y1;
+		dim_layer[i].rect.w = user_cfg->rect.x2 - user_cfg->rect.x1;
+		dim_layer[i].rect.h = user_cfg->rect.y2 - user_cfg->rect.y1;
+
+		dim_layer[i].color_fill = (struct sde_mdss_color) {
 				user_cfg->color_fill.color_0,
 				user_cfg->color_fill.color_1,
 				user_cfg->color_fill.color_2,
 				user_cfg->color_fill.color_3,
 		};
+
+		SDE_DEBUG("dim_layer[%d] - flags:%d, stage:%d\n",
+				i, dim_layer[i].flags, dim_layer[i].stage);
+		SDE_DEBUG(" rect:{%d,%d,%d,%d}, color:{%d,%d,%d,%d}\n",
+				dim_layer[i].rect.x, dim_layer[i].rect.y,
+				dim_layer[i].rect.w, dim_layer[i].rect.h,
+				dim_layer[i].color_fill.color_0,
+				dim_layer[i].color_fill.color_1,
+				dim_layer[i].color_fill.color_2,
+				dim_layer[i].color_fill.color_3);
 	}
 }
 
@@ -1700,6 +1827,23 @@
 	mutex_unlock(&sde_crtc->crtc_lock);
 }
 
+static void _sde_crtc_setup_is_ppsplit(struct drm_crtc_state *state)
+{
+	int i;
+	struct sde_crtc_state *cstate;
+
+	cstate = to_sde_crtc_state(state);
+
+	cstate->is_ppsplit = false;
+	for (i = 0; i < cstate->num_connectors; i++) {
+		struct drm_connector *conn = cstate->connectors[i];
+
+		if (sde_connector_get_topology_name(conn) ==
+				SDE_RM_TOPOLOGY_PPSPLIT)
+			cstate->is_ppsplit = true;
+	}
+}
+
 static void _sde_crtc_setup_lm_bounds(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
@@ -1764,6 +1908,7 @@
 
 	if (!sde_crtc->num_mixers) {
 		_sde_crtc_setup_mixers(crtc);
+		_sde_crtc_setup_is_ppsplit(crtc->state);
 		_sde_crtc_setup_lm_bounds(crtc, crtc->state);
 	}
 
@@ -1991,7 +2136,13 @@
 	dev = crtc->dev;
 
 	if (enable) {
-		if (_sde_crtc_power_enable(sde_crtc, true))
+		int ret;
+
+		/* drop lock since power crtc cb may try to re-acquire lock */
+		mutex_unlock(&sde_crtc->crtc_lock);
+		ret = _sde_crtc_power_enable(sde_crtc, true);
+		mutex_lock(&sde_crtc->crtc_lock);
+		if (ret)
 			return;
 
 		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
@@ -2012,7 +2163,11 @@
 
 			sde_encoder_register_vblank_callback(enc, NULL, NULL);
 		}
+
+		/* drop lock since power crtc cb may try to re-acquire lock */
+		mutex_unlock(&sde_crtc->crtc_lock);
 		_sde_crtc_power_enable(sde_crtc, false);
+		mutex_lock(&sde_crtc->crtc_lock);
 	}
 }
 
@@ -2486,6 +2641,7 @@
 
 	mixer_width = sde_crtc_mixer_width(sde_crtc, mode);
 
+	_sde_crtc_setup_is_ppsplit(state);
 	_sde_crtc_setup_lm_bounds(crtc, state);
 
 	 /* get plane state for all drm planes associated with crtc state */
@@ -2508,9 +2664,10 @@
 		/* check dim layer stage with every plane */
 		for (i = 0; i < cstate->num_dim_layers; i++) {
 			if (pstates[cnt].stage == cstate->dim_layer[i].stage) {
-				SDE_ERROR("plane%d/dimlayer in same stage:%d\n",
-						plane->base.id,
-						cstate->dim_layer[i].stage);
+				SDE_ERROR(
+					"plane:%d/dim_layer:%i-same stage:%d\n",
+					plane->base.id, i,
+					cstate->dim_layer[i].stage);
 				rc = -EINVAL;
 				goto end;
 			}
@@ -2781,16 +2938,18 @@
 	msm_property_install_blob(&sde_crtc->property_info, "capabilities",
 		DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO);
 
-	if (catalog->has_dim_layer) {
-		msm_property_install_volatile_range(&sde_crtc->property_info,
-			"dim_layer_v1", 0x0, 0, ~0, 0, CRTC_PROP_DIM_LAYER_V1);
-	}
-
 	msm_property_install_volatile_range(&sde_crtc->property_info,
 		"sde_drm_roi_v1", 0x0, 0, ~0, 0, CRTC_PROP_ROI_V1);
 
 	sde_kms_info_reset(info);
 
+	if (catalog->has_dim_layer) {
+		msm_property_install_volatile_range(&sde_crtc->property_info,
+			"dim_layer_v1", 0x0, 0, ~0, 0, CRTC_PROP_DIM_LAYER_V1);
+		sde_kms_info_add_keyint(info, "dim_layer_v1_max_layers",
+				SDE_MAX_DIM_LAYERS);
+	}
+
 	sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
 	sde_kms_info_add_keyint(info, "max_linewidth",
 			catalog->max_mixer_width);
@@ -2990,6 +3149,7 @@
 	struct drm_display_mode *mode;
 	struct drm_framebuffer *fb;
 	struct drm_plane_state *state;
+	struct sde_crtc_state *cstate;
 
 	int i, out_width;
 
@@ -2998,6 +3158,7 @@
 
 	sde_crtc = s->private;
 	crtc = &sde_crtc->base;
+	cstate = to_sde_crtc_state(crtc->state);
 
 	mutex_lock(&sde_crtc->crtc_lock);
 	mode = &crtc->state->adjusted_mode;
@@ -3022,6 +3183,23 @@
 
 	seq_puts(s, "\n");
 
+	for (i = 0; i < cstate->num_dim_layers; i++) {
+		struct sde_hw_dim_layer *dim_layer = &cstate->dim_layer[i];
+
+		seq_printf(s, "\tdim_layer:%d] stage:%d flags:%d\n",
+				i, dim_layer->stage, dim_layer->flags);
+		seq_printf(s, "\tdst_x:%d dst_y:%d dst_w:%d dst_h:%d\n",
+				dim_layer->rect.x, dim_layer->rect.y,
+				dim_layer->rect.w, dim_layer->rect.h);
+		seq_printf(s,
+			"\tcolor_0:%d color_1:%d color_2:%d color_3:%d\n",
+				dim_layer->color_fill.color_0,
+				dim_layer->color_fill.color_1,
+				dim_layer->color_fill.color_2,
+				dim_layer->color_fill.color_3);
+		seq_puts(s, "\n");
+	}
+
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
 		pstate = to_sde_plane_state(plane->state);
 		state = plane->state;
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 6a22115..9ef6f25 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -108,6 +108,8 @@
  * @name          : ASCII description of this crtc
  * @num_ctls      : Number of ctl paths in use
  * @num_mixers    : Number of mixers in use
+ * @mixers_swapped: Whether the mixers have been swapped for left/right update
+ *                  especially in the case of DSC Merge.
  * @mixers        : List of active mixers
  * @event         : Pointer to last received drm vblank event. If there is a
  *                  pending vblank event, this will be non-null.
@@ -147,6 +149,7 @@
 	/* HW Resources reserved for the crtc */
 	u32 num_ctls;
 	u32 num_mixers;
+	bool mixers_swapped;
 	struct sde_crtc_mixer mixers[CRTC_DUAL_MIXERS];
 
 	struct drm_pending_vblank_event *event;
@@ -251,6 +254,7 @@
  * @num_connectors: Number of associated drm connectors
  * @intf_mode     : Interface mode of the primary connector
  * @rsc_client    : sde rsc client when mode is valid
+ * @is_ppsplit    : Whether current topology requires PPSplit special handling
  * @crtc_roi      : Current CRTC ROI. Possibly sub-rectangle of mode.
  *                  Origin top left of CRTC.
  * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
@@ -277,6 +281,7 @@
 	struct sde_rsc_client *rsc_client;
 	bool rsc_update;
 
+	bool is_ppsplit;
 	struct sde_rect crtc_roi;
 	struct sde_rect lm_bounds[CRTC_DUAL_MIXERS];
 	struct sde_rect lm_roi[CRTC_DUAL_MIXERS];
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index f11ba51..e3ad960 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -123,8 +123,11 @@
  * @cur_master:		Pointer to the current master in this mode. Optimization
  *			Only valid after enable. Cleared as disable.
  * @hw_pp		Handle to the pingpong blocks used for the display. No.
- *                      pingpong blocks can be different than num_phys_encs.
+ *			pingpong blocks can be different than num_phys_encs.
  * @hw_dsc:		Array of DSC block handles used for the display.
+ * @intfs_swapped	Whether or not the phys_enc interfaces have been swapped
+ *			for partial update right-only cases, such as pingpong
+ *			split where virtual pingpong does not generate IRQs
  * @crtc_vblank_cb:	Callback into the upper layer / CRTC for
  *			notification of the VBLANK
  * @crtc_vblank_cb_data:	Data from upper layer for VBLANK notification
@@ -155,6 +158,8 @@
  * @topology:                   topology of the display
  * @mode_set_complete:          flag to indicate modeset completion
  * @rsc_cfg:			rsc configuration
+ * @cur_conn_roi:		current connector roi
+ * @prv_conn_roi:		previous connector roi to optimize if unchanged
  */
 struct sde_encoder_virt {
 	struct drm_encoder base;
@@ -169,6 +174,8 @@
 	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
 
+	bool intfs_swapped;
+
 	void (*crtc_vblank_cb)(void *);
 	void *crtc_vblank_cb_data;
 
@@ -195,17 +202,51 @@
 	bool mode_set_complete;
 
 	struct sde_encoder_rsc_config rsc_cfg;
+	struct sde_rect cur_conn_roi;
+	struct sde_rect prv_conn_roi;
 };
 
 #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
 
-inline bool _sde_is_dsc_enabled(struct sde_encoder_virt *sde_enc)
+bool sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
+
 {
-	struct msm_compression_info *comp_info = &sde_enc->disp_info.comp_info;
+	struct sde_encoder_virt *sde_enc;
+	struct msm_compression_info *comp_info;
+
+	if (!drm_enc)
+		return false;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	comp_info = &sde_enc->disp_info.comp_info;
 
 	return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
 }
 
+bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc)
+{
+	enum sde_rm_topology_name topology;
+	struct sde_encoder_virt *sde_enc;
+	struct drm_connector *drm_conn;
+
+	if (!drm_enc)
+		return false;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!sde_enc->cur_master)
+		return false;
+
+	drm_conn = sde_enc->cur_master->connector;
+	if (!drm_conn)
+		return false;
+
+	topology = sde_connector_get_topology_name(drm_conn);
+	if (topology == SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE)
+		return true;
+
+	return false;
+}
+
 static inline int _sde_encoder_power_enable(struct sde_encoder_virt *sde_enc,
 								bool enable)
 {
@@ -320,7 +361,22 @@
 
 	sde_enc = to_sde_encoder_virt(phys_enc->parent);
 	hw_mdptop = phys_enc->hw_mdptop;
-	cfg.en = phys_enc->split_role != ENC_ROLE_SOLO;
+
+	/**
+	 * disable split modes since encoder will be operating in as the only
+	 * encoder, either for the entire use case in the case of, for example,
+	 * single DSI, or for this frame in the case of left/right only partial
+	 * update.
+	 */
+	if (phys_enc->split_role == ENC_ROLE_SOLO) {
+		if (hw_mdptop->ops.setup_split_pipe)
+			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+		if (hw_mdptop->ops.setup_pp_split)
+			hw_mdptop->ops.setup_pp_split(hw_mdptop, &cfg);
+		return;
+	}
+
+	cfg.en = true;
 	cfg.mode = phys_enc->intf_mode;
 	cfg.intf = interface;
 
@@ -334,8 +390,7 @@
 	else
 		cfg.pp_split_slave = INTF_MAX;
 
-	if (phys_enc->split_role != ENC_ROLE_SLAVE) {
-		/* master/solo encoder */
+	if (phys_enc->split_role == ENC_ROLE_MASTER) {
 		SDE_DEBUG_ENC(sde_enc, "enable %d\n", cfg.en);
 
 		if (hw_mdptop->ops.setup_split_pipe)
@@ -555,8 +610,14 @@
 
 static void _sde_encoder_dsc_pipe_cfg(struct sde_hw_dsc *hw_dsc,
 		struct sde_hw_pingpong *hw_pp, struct msm_display_dsc_info *dsc,
-		u32 common_mode, bool ich_reset)
+		u32 common_mode, bool ich_reset, bool enable)
 {
+	if (!enable) {
+		if (hw_pp->ops.disable_dsc)
+			hw_pp->ops.disable_dsc(hw_pp);
+		return;
+	}
+
 	if (hw_dsc->ops.dsc_config)
 		hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, ich_reset);
 
@@ -570,9 +631,27 @@
 		hw_pp->ops.enable_dsc(hw_pp);
 }
 
+static void _sde_encoder_get_connector_roi(
+		struct sde_encoder_virt *sde_enc,
+		struct sde_rect *merged_conn_roi)
+{
+	struct drm_connector *drm_conn;
+	struct sde_connector_state *c_state;
+
+	if (!sde_enc || !merged_conn_roi)
+		return;
+
+	drm_conn = sde_enc->phys_encs[0]->connector;
+
+	if (!drm_conn || !drm_conn->state)
+		return;
+
+	c_state = to_sde_connector_state(drm_conn->state);
+	sde_kms_rect_merge_rectangles(&c_state->rois, merged_conn_roi);
+}
+
 static int _sde_encoder_dsc_1_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
 {
-	int pic_width, pic_height;
 	int this_frame_slices;
 	int intf_ip_w, enc_ip_w;
 	int ich_res, dsc_common_mode = 0;
@@ -580,22 +659,18 @@
 	struct sde_hw_pingpong *hw_pp = sde_enc->hw_pp[0];
 	struct sde_hw_dsc *hw_dsc = sde_enc->hw_dsc[0];
 	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
-	struct sde_hw_mdp *hw_mdp_top  = enc_master->hw_mdptop;
+	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
 	struct msm_display_dsc_info *dsc =
 		&sde_enc->disp_info.comp_info.dsc_info;
 
-	if (dsc == NULL || hw_dsc == NULL || hw_pp == NULL ||
-						hw_mdp_top == NULL) {
+	if (dsc == NULL || hw_dsc == NULL || hw_pp == NULL || !enc_master) {
 		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
 		return -EINVAL;
 	}
 
-	pic_width = dsc->pic_width;
-	pic_height = dsc->pic_height;
+	_sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
 
-	_sde_encoder_dsc_update_pic_dim(dsc, pic_width, pic_height);
-
-	this_frame_slices = pic_width / dsc->slice_width;
+	this_frame_slices = roi->w / dsc->slice_width;
 	intf_ip_w = this_frame_slices * dsc->slice_width;
 	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
 
@@ -608,132 +683,208 @@
 		dsc_common_mode = DSC_MODE_VIDEO;
 
 	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
-		pic_width, pic_height, dsc_common_mode);
-	SDE_EVT32(DRMID(&sde_enc->base), pic_width, pic_height,
-			dsc_common_mode);
+		roi->w, roi->h, dsc_common_mode);
+	SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h, dsc_common_mode);
 
 	_sde_encoder_dsc_pipe_cfg(hw_dsc, hw_pp, dsc, dsc_common_mode,
-			ich_res);
+			ich_res, true);
 
 	return 0;
 }
-static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc)
+static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc,
+		struct sde_encoder_kickoff_params *params)
 {
-	int pic_width, pic_height;
 	int this_frame_slices;
 	int intf_ip_w, enc_ip_w;
 	int ich_res, dsc_common_mode;
 
 	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
-	struct sde_hw_dsc *l_hw_dsc = sde_enc->hw_dsc[0];
-	struct sde_hw_dsc *r_hw_dsc = sde_enc->hw_dsc[1];
-	struct sde_hw_pingpong *l_hw_pp = sde_enc->hw_pp[0];
-	struct sde_hw_pingpong *r_hw_pp = sde_enc->hw_pp[1];
-	struct sde_hw_mdp *hw_mdp_top  = enc_master->hw_mdptop;
-	struct msm_display_dsc_info *dsc =
-		&sde_enc->disp_info.comp_info.dsc_info;
+	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
+	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+	struct msm_display_dsc_info dsc[MAX_CHANNELS_PER_ENC];
+	bool half_panel_partial_update;
+	int i;
 
-	if (l_hw_dsc == NULL || r_hw_dsc == NULL || hw_mdp_top == NULL ||
-		l_hw_pp == NULL || r_hw_pp == NULL) {
-		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
-		return -EINVAL;
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+		hw_pp[i] = sde_enc->hw_pp[i];
+		hw_dsc[i] = sde_enc->hw_dsc[i];
+
+		if (!hw_pp[i] || !hw_dsc[i]) {
+			SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
+			return -EINVAL;
+		}
 	}
 
-	pic_width = dsc->pic_width * sde_enc->display_num_of_h_tiles;
-	pic_height = dsc->pic_height;
+	half_panel_partial_update =
+			hweight_long(params->affected_displays) == 1;
 
-	_sde_encoder_dsc_update_pic_dim(dsc, pic_width, pic_height);
-
-	this_frame_slices = pic_width / dsc->slice_width;
-	intf_ip_w = this_frame_slices * dsc->slice_width;
-
-	intf_ip_w /= 2;
-	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
-
-	enc_ip_w = intf_ip_w;
-	_sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
-
-	ich_res = _sde_encoder_dsc_ich_reset_override_needed(false, dsc);
-
-	dsc_common_mode = DSC_MODE_SPLIT_PANEL;
+	dsc_common_mode = 0;
+	if (!half_panel_partial_update)
+		dsc_common_mode |= DSC_MODE_SPLIT_PANEL;
 	if (enc_master->intf_mode == INTF_MODE_VIDEO)
 		dsc_common_mode |= DSC_MODE_VIDEO;
 
-	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
-		pic_width, pic_height, dsc_common_mode);
-	SDE_EVT32(DRMID(&sde_enc->base), pic_width, pic_height,
-			dsc_common_mode);
+	memcpy(&dsc[0], &sde_enc->disp_info.comp_info.dsc_info, sizeof(dsc[0]));
+	memcpy(&dsc[1], &sde_enc->disp_info.comp_info.dsc_info, sizeof(dsc[1]));
 
-	_sde_encoder_dsc_pipe_cfg(l_hw_dsc, l_hw_pp, dsc, dsc_common_mode,
-			ich_res);
-	_sde_encoder_dsc_pipe_cfg(r_hw_dsc, r_hw_pp, dsc, dsc_common_mode,
-			ich_res);
+	/*
+	 * Since both DSC use same pic dimension, set same pic dimension
+	 * to both DSC structures.
+	 */
+	_sde_encoder_dsc_update_pic_dim(&dsc[0], roi->w, roi->h);
+	_sde_encoder_dsc_update_pic_dim(&dsc[1], roi->w, roi->h);
+
+	this_frame_slices = roi->w / dsc[0].slice_width;
+	intf_ip_w = this_frame_slices * dsc[0].slice_width;
+
+	if (!half_panel_partial_update)
+		intf_ip_w /= 2;
+
+	/*
+	 * In this topology when both interfaces are active, they have same
+	 * load so intf_ip_w will be same.
+	 */
+	_sde_encoder_dsc_pclk_param_calc(&dsc[0], intf_ip_w);
+	_sde_encoder_dsc_pclk_param_calc(&dsc[1], intf_ip_w);
+
+	/*
+	 * In this topology, since there is no dsc_merge, uncompressed input
+	 * to encoder and interface is same.
+	 */
+	enc_ip_w = intf_ip_w;
+	_sde_encoder_dsc_initial_line_calc(&dsc[0], enc_ip_w);
+	_sde_encoder_dsc_initial_line_calc(&dsc[1], enc_ip_w);
+
+	/*
+	 * __is_ich_reset_override_needed should be called only after
+	 * updating pic dimension, mdss_panel_dsc_update_pic_dim.
+	 */
+	ich_res = _sde_encoder_dsc_ich_reset_override_needed(
+			half_panel_partial_update, &dsc[0]);
+
+	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
+			roi->w, roi->h, dsc_common_mode);
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		bool active = !!((1 << i) & params->affected_displays);
+
+		SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h,
+				dsc_common_mode, i, active);
+		_sde_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], &dsc[i],
+				dsc_common_mode, ich_res, active);
+	}
 
 	return 0;
 }
 
-static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc)
+static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc,
+		struct sde_encoder_kickoff_params *params)
 {
-	int pic_width, pic_height;
 	int this_frame_slices;
 	int intf_ip_w, enc_ip_w;
 	int ich_res, dsc_common_mode;
 
 	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
-	struct sde_hw_dsc *l_hw_dsc = sde_enc->hw_dsc[0];
-	struct sde_hw_dsc *r_hw_dsc = sde_enc->hw_dsc[1];
-	struct sde_hw_pingpong *l_hw_pp = sde_enc->hw_pp[0];
-	struct sde_hw_pingpong *r_hw_pp = sde_enc->hw_pp[1];
-	struct sde_hw_mdp *hw_mdp_top  = enc_master->hw_mdptop;
+	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
+	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 	struct msm_display_dsc_info *dsc =
 		&sde_enc->disp_info.comp_info.dsc_info;
+	bool half_panel_partial_update;
+	int i;
 
-	if (l_hw_dsc == NULL || r_hw_dsc == NULL || hw_mdp_top == NULL ||
-					l_hw_pp == NULL || r_hw_pp == NULL) {
-		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
-		return -EINVAL;
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+		hw_pp[i] = sde_enc->hw_pp[i];
+		hw_dsc[i] = sde_enc->hw_dsc[i];
+
+		if (!hw_pp[i] || !hw_dsc[i]) {
+			SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
+			return -EINVAL;
+		}
 	}
 
-	pic_width = dsc->pic_width;
-	pic_height = dsc->pic_height;
-	_sde_encoder_dsc_update_pic_dim(dsc, pic_width, pic_height);
+	half_panel_partial_update =
+			hweight_long(params->affected_displays) == 1;
 
-	this_frame_slices = pic_width / dsc->slice_width;
+	dsc_common_mode = 0;
+	if (!half_panel_partial_update)
+		dsc_common_mode |= DSC_MODE_SPLIT_PANEL | DSC_MODE_MULTIPLEX;
+	if (enc_master->intf_mode == INTF_MODE_VIDEO)
+		dsc_common_mode |= DSC_MODE_VIDEO;
+
+	_sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
+
+	this_frame_slices = roi->w / dsc->slice_width;
 	intf_ip_w = this_frame_slices * dsc->slice_width;
 	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
 
 	/*
-	 * when using 2 encoders for the same stream, no. of slices
-	 * need to be same on both the encoders.
+	 * dsc merge case: when using 2 encoders for the same stream,
+	 * no. of slices need to be same on both the encoders.
 	 */
 	enc_ip_w = intf_ip_w / 2;
 	_sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
 
-	ich_res = _sde_encoder_dsc_ich_reset_override_needed(false, dsc);
-
-	dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
-	if (enc_master->intf_mode == INTF_MODE_VIDEO)
-		dsc_common_mode |= DSC_MODE_VIDEO;
+	ich_res = _sde_encoder_dsc_ich_reset_override_needed(
+			half_panel_partial_update, dsc);
 
 	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
-		pic_width, pic_height, dsc_common_mode);
-	SDE_EVT32(DRMID(&sde_enc->base), pic_width, pic_height,
-			dsc_common_mode);
+			roi->w, roi->h, dsc_common_mode);
+	SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h,
+			dsc_common_mode, i, params->affected_displays);
 
-	_sde_encoder_dsc_pipe_cfg(l_hw_dsc, l_hw_pp, dsc, dsc_common_mode,
-			ich_res);
-	_sde_encoder_dsc_pipe_cfg(r_hw_dsc, r_hw_pp, dsc, dsc_common_mode,
-			ich_res);
+	_sde_encoder_dsc_pipe_cfg(hw_dsc[0], hw_pp[0], dsc, dsc_common_mode,
+			ich_res, true);
+	_sde_encoder_dsc_pipe_cfg(hw_dsc[1], hw_pp[1], dsc, dsc_common_mode,
+			ich_res, !half_panel_partial_update);
 
 	return 0;
 }
 
-static int _sde_encoder_dsc_setup(struct sde_encoder_virt *sde_enc)
+static int _sde_encoder_update_roi(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct drm_connector *drm_conn;
+	struct drm_display_mode *adj_mode;
+	struct sde_rect roi;
+
+	if (!drm_enc || !drm_enc->crtc || !drm_enc->crtc->state)
+		return -EINVAL;
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	if (!sde_enc->cur_master)
+		return -EINVAL;
+
+	adj_mode = &sde_enc->base.crtc->state->adjusted_mode;
+	drm_conn = sde_enc->cur_master->connector;
+
+	_sde_encoder_get_connector_roi(sde_enc, &roi);
+	if (sde_kms_rect_is_null(&roi)) {
+		roi.w = adj_mode->hdisplay;
+		roi.h = adj_mode->vdisplay;
+	}
+
+	memcpy(&sde_enc->prv_conn_roi, &sde_enc->cur_conn_roi,
+			sizeof(sde_enc->prv_conn_roi));
+	memcpy(&sde_enc->cur_conn_roi, &roi, sizeof(sde_enc->cur_conn_roi));
+
+	return 0;
+}
+
+static int _sde_encoder_dsc_setup(struct sde_encoder_virt *sde_enc,
+		struct sde_encoder_kickoff_params *params)
 {
 	enum sde_rm_topology_name topology;
-	struct drm_connector *drm_conn = sde_enc->phys_encs[0]->connector;
+	struct drm_connector *drm_conn;
 	int ret = 0;
 
+	if (!sde_enc || !params || !sde_enc->phys_encs[0] ||
+			!sde_enc->phys_encs[0]->connector)
+		return -EINVAL;
+
+	drm_conn = sde_enc->phys_encs[0]->connector;
+
 	topology = sde_connector_get_topology_name(drm_conn);
 	if (topology == SDE_RM_TOPOLOGY_NONE) {
 		SDE_ERROR_ENC(sde_enc, "topology not set yet\n");
@@ -743,15 +894,19 @@
 	SDE_DEBUG_ENC(sde_enc, "\n");
 	SDE_EVT32(DRMID(&sde_enc->base));
 
+	if (sde_kms_rect_is_equal(&sde_enc->cur_conn_roi,
+			&sde_enc->prv_conn_roi))
+		return ret;
+
 	switch (topology) {
 	case SDE_RM_TOPOLOGY_SINGLEPIPE_DSC:
 		ret = _sde_encoder_dsc_1_lm_1_enc_1_intf(sde_enc);
 		break;
 	case SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE:
-		ret = _sde_encoder_dsc_2_lm_2_enc_1_intf(sde_enc);
+		ret = _sde_encoder_dsc_2_lm_2_enc_1_intf(sde_enc, params);
 		break;
 	case SDE_RM_TOPOLOGY_DUALPIPE_DSC:
-		ret = _sde_encoder_dsc_2_lm_2_enc_2_intf(sde_enc);
+		ret = _sde_encoder_dsc_2_lm_2_enc_2_intf(sde_enc, params);
 		break;
 	default:
 		SDE_ERROR_ENC(sde_enc, "No DSC support for topology %d",
@@ -1217,7 +1372,6 @@
 	struct sde_kms *sde_kms;
 	struct sde_hw_mdp *hw_mdptop;
 	int i = 0;
-	int ret = 0;
 	struct sde_watchdog_te_status te_cfg = { 0 };
 
 	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
@@ -1252,12 +1406,6 @@
 				sde_enc->cur_master->hw_mdptop,
 				sde_kms->catalog);
 
-	if (_sde_is_dsc_enabled(sde_enc)) {
-		ret = _sde_encoder_dsc_setup(sde_enc);
-		if (ret)
-			SDE_ERROR_ENC(sde_enc, "failed to setup DSC:%d\n", ret);
-	}
-
 	if (hw_mdptop->ops.setup_vsync_sel) {
 		for (i = 0; i < sde_enc->num_phys_encs; i++)
 			te_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
@@ -1763,6 +1911,65 @@
 	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 }
 
+static void _sde_encoder_ppsplit_swap_intf_for_right_only_update(
+		struct drm_encoder *drm_enc,
+		unsigned long *affected_displays,
+		int num_active_phys)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *master;
+	enum sde_rm_topology_name topology;
+	bool is_right_only;
+
+	if (!drm_enc || !affected_displays)
+		return;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	master = sde_enc->cur_master;
+	if (!master || !master->connector)
+		return;
+
+	topology = sde_connector_get_topology_name(master->connector);
+	if (topology != SDE_RM_TOPOLOGY_PPSPLIT)
+		return;
+
+	/*
+	 * For pingpong split, the slave pingpong won't generate IRQs. For
+	 * right-only updates, we can't swap pingpongs, or simply swap the
+	 * master/slave assignment, we actually have to swap the interfaces
+	 * so that the master physical encoder will use a pingpong/interface
+	 * that generates irqs on which to wait.
+	 */
+	is_right_only = !test_bit(0, affected_displays) &&
+			test_bit(1, affected_displays);
+
+	if (is_right_only && !sde_enc->intfs_swapped) {
+		/* right-only update swap interfaces */
+		swap(sde_enc->phys_encs[0]->intf_idx,
+				sde_enc->phys_encs[1]->intf_idx);
+		sde_enc->intfs_swapped = true;
+	} else if (!is_right_only && sde_enc->intfs_swapped) {
+		/* left-only or full update, swap back */
+		swap(sde_enc->phys_encs[0]->intf_idx,
+				sde_enc->phys_encs[1]->intf_idx);
+		sde_enc->intfs_swapped = false;
+	}
+
+	SDE_DEBUG_ENC(sde_enc,
+			"right_only %d swapped %d phys0->intf%d, phys1->intf%d\n",
+			is_right_only, sde_enc->intfs_swapped,
+			sde_enc->phys_encs[0]->intf_idx - INTF_0,
+			sde_enc->phys_encs[1]->intf_idx - INTF_0);
+	SDE_EVT32(DRMID(drm_enc), is_right_only, sde_enc->intfs_swapped,
+			sde_enc->phys_encs[0]->intf_idx - INTF_0,
+			sde_enc->phys_encs[1]->intf_idx - INTF_0,
+			*affected_displays);
+
+	/* ppsplit always uses master since ppslave invalid for irqs*/
+	if (num_active_phys == 1)
+		*affected_displays = BIT(0);
+}
+
 static void _sde_encoder_update_master(struct drm_encoder *drm_enc,
 		struct sde_encoder_kickoff_params *params)
 {
@@ -1785,6 +1992,10 @@
 	SDE_DEBUG_ENC(sde_enc, "affected_displays 0x%lx num_active_phys %d\n",
 			params->affected_displays, num_active_phys);
 
+	/* for left/right only update, ppsplit master switches interface */
+	_sde_encoder_ppsplit_swap_intf_for_right_only_update(drm_enc,
+			&params->affected_displays, num_active_phys);
+
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		enum sde_enc_split_role prv_role, new_role;
 		bool active;
@@ -1814,6 +2025,9 @@
 		SDE_DEBUG_ENC(sde_enc, "pp %d role prv %d new %d active %d\n",
 				phys->hw_pp->idx - PINGPONG_0, prv_role,
 				phys->split_role, active);
+		SDE_EVT32(DRMID(drm_enc), params->affected_displays,
+				phys->hw_pp->idx - PINGPONG_0, prv_role,
+				phys->split_role, active, num_active_phys);
 	}
 }
 
@@ -1892,6 +2106,8 @@
 
 	_sde_encoder_update_master(drm_enc, params);
 
+	_sde_encoder_update_roi(drm_enc);
+
 	if (sde_enc->cur_master && sde_enc->cur_master->connector) {
 		rc = sde_connector_pre_kickoff(sde_enc->cur_master->connector);
 		if (rc)
@@ -1899,6 +2115,12 @@
 					sde_enc->cur_master->connector->base.id,
 					rc);
 	}
+
+	if (sde_encoder_is_dsc_enabled(drm_enc)) {
+		rc = _sde_encoder_dsc_setup(sde_enc, params);
+		if (rc)
+			SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n", rc);
+	}
 }
 
 void sde_encoder_kickoff(struct drm_encoder *drm_enc)
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 7292a12..6ef245b 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -149,6 +149,20 @@
 void sde_encoder_virt_restore(struct drm_encoder *encoder);
 
 /**
+ * sde_encoder_is_dsc_enabled - check if encoder is in DSC mode
+ * @drm_enc: Pointer to drm encoder object
+ * @Return: true if encoder is in DSC mode
+ */
+bool sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc);
+
+/**
+ * sde_encoder_is_dsc_merge - check if encoder is in DSC merge mode
+ * @drm_enc: Pointer to drm encoder object
+ * @Return: true if encoder is in DSC merge mode
+ */
+bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc);
+
+/**
  * sde_encoder_init - initialize virtual encoder object
  * @dev:        Pointer to drm device structure
  * @disp_info:  Pointer to display information structure
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 572bd9e..53f5b89 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -615,7 +615,8 @@
 			phys_enc->hw_pp->idx - PINGPONG_0);
 	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
 
-	_sde_encoder_phys_cmd_update_intf_cfg(phys_enc);
+	if (!_sde_encoder_phys_is_ppsplit_slave(phys_enc))
+		_sde_encoder_phys_cmd_update_intf_cfg(phys_enc);
 	sde_encoder_phys_cmd_tearcheck_config(phys_enc);
 }
 
@@ -832,15 +833,28 @@
 		struct sde_encoder_phys *phys_enc,
 		enum sde_enc_split_role role)
 {
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-	enum sde_enc_split_role old_role = phys_enc->split_role;
+	struct sde_encoder_phys_cmd *cmd_enc;
+	enum sde_enc_split_role old_role;
+	bool is_ppsplit;
+
+	if (!phys_enc)
+		return;
+
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+	old_role = phys_enc->split_role;
+	is_ppsplit = _sde_encoder_phys_is_ppsplit(phys_enc);
+
+	phys_enc->split_role = role;
 
 	SDE_DEBUG_CMDENC(cmd_enc, "old role %d new role %d\n",
 			old_role, role);
 
-	phys_enc->split_role = role;
-	if (role == ENC_ROLE_SKIP || role == old_role)
+	/*
+	 * ppsplit solo needs to reprogram because intf may have swapped without
+	 * role changing on left-only, right-only back-to-back commits
+	 */
+	if (!(is_ppsplit && role == ENC_ROLE_SOLO) &&
+			(role == old_role || role == ENC_ROLE_SKIP))
 		return;
 
 	sde_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index d15b804..d5f03a6a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -563,6 +563,10 @@
 	case SDE_HW_ROT_CMD_COMMIT:
 		cmd_type = SDE_ROTATOR_INLINE_CMD_COMMIT;
 		break;
+	case SDE_HW_ROT_CMD_START:
+		cmd_type = SDE_ROTATOR_INLINE_CMD_START;
+		priv_handle = data->priv_handle;
+		break;
 	case SDE_HW_ROT_CMD_CLEANUP:
 		cmd_type = SDE_ROTATOR_INLINE_CMD_CLEANUP;
 		priv_handle = data->priv_handle;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.h b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
index a4f5b49..e490052 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
@@ -24,11 +24,13 @@
  * enum sde_hw_rot_cmd_type - type of rotator hardware command
  * @SDE_HW_ROT_CMD_VALDIATE: validate rotator command; do not commit
  * @SDE_HW_ROT_CMD_COMMIT: commit/execute rotator command
+ * @SDE_HW_ROT_CMD_START: mdp is ready to start
  * @SDE_HW_ROT_CMD_CLEANUP: cleanup rotator command after it is done
  */
 enum sde_hw_rot_cmd_type {
 	SDE_HW_ROT_CMD_VALIDATE,
 	SDE_HW_ROT_CMD_COMMIT,
+	SDE_HW_ROT_CMD_START,
 	SDE_HW_ROT_CMD_CLEANUP,
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_kms_utils.c b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
index dcc0bd5..b77d64d 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms_utils.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
@@ -166,7 +166,7 @@
 	r = min((r1->x + r1->w), (r2->x + r2->w));
 	b = min((r1->y + r1->h), (r2->y + r2->h));
 
-	if (r < l || b < t) {
+	if (r <= l || b <= t) {
 		memset(result, 0, sizeof(*result));
 	} else {
 		result->x = l;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index ad207d6..1721c67 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -2041,6 +2041,23 @@
 }
 
 /**
+ * sde_plane_rot_flush - perform final flush related rotator options
+ * @plane: Pointer to drm plane
+ * @pstate: Pointer to sde plane state
+ */
+static void sde_plane_rot_flush(struct drm_plane *plane,
+		struct sde_plane_state *pstate)
+{
+	if (!plane || !pstate || !pstate->rot.rot_hw ||
+			!pstate->rot.rot_hw->ops.commit)
+		return;
+
+	pstate->rot.rot_hw->ops.commit(pstate->rot.rot_hw,
+			&pstate->rot.rot_cmd,
+			SDE_HW_ROT_CMD_START);
+}
+
+/**
  * sde_plane_rot_destroy_state - destroy state for rotator stage
  * @plane: Pointer to drm plane
  * @state: Pointer to state to be destroyed
@@ -2709,13 +2726,15 @@
 void sde_plane_flush(struct drm_plane *plane)
 {
 	struct sde_plane *psde;
+	struct sde_plane_state *pstate;
 
-	if (!plane) {
+	if (!plane || !plane->state) {
 		SDE_ERROR("invalid plane\n");
 		return;
 	}
 
 	psde = to_sde_plane(plane);
+	pstate = to_sde_plane_state(plane->state);
 
 	/*
 	 * These updates have to be done immediately before the plane flush
@@ -2736,7 +2755,10 @@
 
 	/* flag h/w flush complete */
 	if (plane->state)
-		to_sde_plane_state(plane->state)->pending = false;
+		pstate->pending = false;
+
+	/* signal inline rotator start */
+	sde_plane_rot_flush(plane, pstate);
 }
 
 static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index cab7e0f..46d1df8 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -123,6 +123,7 @@
 void sde_rsc_client_destroy(struct sde_rsc_client *client)
 {
 	struct sde_rsc_priv *rsc;
+	enum sde_rsc_state state;
 
 	if (!client) {
 		pr_debug("invalid client\n");
@@ -138,9 +139,13 @@
 		goto end;
 
 	mutex_lock(&rsc->client_lock);
-	if (client->current_state != SDE_RSC_IDLE_STATE)
+	state = client->current_state;
+	mutex_unlock(&rsc->client_lock);
+
+	if (state != SDE_RSC_IDLE_STATE)
 		sde_rsc_client_state_update(client, SDE_RSC_IDLE_STATE,
 								NULL, -1);
+	mutex_lock(&rsc->client_lock);
 	list_del_init(&client->list);
 	mutex_unlock(&rsc->client_lock);
 
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index e5cfd69..e982afe 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -114,6 +114,7 @@
 #define A6XX_RBBM_INT_0_STATUS                   0x201
 #define A6XX_RBBM_STATUS                         0x210
 #define A6XX_RBBM_STATUS3                        0x213
+#define A6XX_RBBM_VBIF_GX_RESET_STATUS           0x215
 #define A6XX_RBBM_PERFCTR_CP_0_LO                0x400
 #define A6XX_RBBM_PERFCTR_CP_0_HI                0x401
 #define A6XX_RBBM_PERFCTR_CP_1_LO                0x402
@@ -782,6 +783,7 @@
 #define A6XX_GMU_GX_SPTPRAC_POWER_CONTROL	0x1A881
 #define A6XX_GMU_CM3_ITCM_START			0x1B400
 #define A6XX_GMU_CM3_DTCM_START			0x1C400
+#define A6XX_GMU_NMI_CONTROL_STATUS		0x1CBF0
 #define A6XX_GMU_BOOT_SLUMBER_OPTION		0x1CBF8
 #define A6XX_GMU_GX_VOTE_IDX			0x1CBF9
 #define A6XX_GMU_MX_VOTE_IDX			0x1CBFA
@@ -793,6 +795,7 @@
 #define A6XX_GMU_CM3_BOOT_CONFIG		0x1F801
 #define A6XX_GMU_CM3_FW_BUSY			0x1F81A
 #define A6XX_GMU_CM3_FW_INIT_RESULT		0x1F81C
+#define A6XX_GMU_CM3_CFG			0x1F82D
 #define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL	0x1F8C0
 #define A6XX_GMU_PWR_COL_INTER_FRAME_HYST	0x1F8C1
 #define A6XX_GMU_PWR_COL_SPTPRAC_HYST		0x1F8C2
@@ -802,6 +805,8 @@
 #define A6XX_GMU_RPMH_HYST_CTRL			0x1F8E9
 #define A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE    0x1F8EC
 #define A6XX_GMU_BOOT_KMD_LM_HANDSHAKE		0x1F9F0
+#define A6XX_GMU_LLM_GLM_SLEEP_CTRL		0x1F957
+#define A6XX_GMU_LLM_GLM_SLEEP_STATUS		0x1F958
 
 /* HFI registers*/
 #define A6XX_GMU_ALWAYS_ON_COUNTER_L		0x1F888
@@ -831,6 +836,10 @@
 #define A6XX_GMU_HOST2GMU_INTR_INFO_3		0x1F99E
 #define A6XX_GMU_GENERAL_7			0x1F9CC
 
+/* ISENSE registers */
+#define A6XX_GMU_ISENSE_CTRL			0x1F95D
+#define A6XX_GPU_CS_ENABLE_REG			0x23120
+
 #define A6XX_GMU_AO_INTERRUPT_EN		0x23B03
 #define A6XX_GMU_AO_HOST_INTERRUPT_CLR		0x23B04
 #define A6XX_GMU_AO_HOST_INTERRUPT_STATUS	0x23B05
@@ -865,6 +874,10 @@
 #define A6XX_RSCC_OVERRIDE_START_ADDR			0x23500
 #define A6XX_RSCC_SEQ_BUSY_DRV0				0x23501
 #define A6XX_RSCC_SEQ_MEM_0_DRV0			0x23580
+#define A6XX_RSCC_TCS0_DRV0_STATUS			0x23746
+#define A6XX_RSCC_TCS1_DRV0_STATUS                      0x238AE
+#define A6XX_RSCC_TCS2_DRV0_STATUS                      0x23A16
+#define A6XX_RSCC_TCS3_DRV0_STATUS                      0x23B7E
 
 /* GPU PDC sequencer registers in AOSS.RPMh domain */
 #define	PDC_GPU_ENABLE_PDC			0x21140
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 2e92335..627b351 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -56,9 +56,6 @@
 #define DRIVER_VERSION_MAJOR   3
 #define DRIVER_VERSION_MINOR   1
 
-/* Number of times to try hard reset */
-#define NUM_TIMES_RESET_RETRY 5
-
 #define KGSL_LOG_LEVEL_DEFAULT 3
 
 static void adreno_input_work(struct work_struct *work);
@@ -514,8 +511,6 @@
 	.id_table = adreno_input_ids,
 };
 
-static int adreno_soft_reset(struct kgsl_device *device);
-
 /*
  * _soft_reset() - Soft reset GPU
  * @adreno_dev: Pointer to adreno device
@@ -526,7 +521,7 @@
  * all the HW logic, restores GPU registers to default state and
  * flushes out pending VBIF transactions.
  */
-static void _soft_reset(struct adreno_device *adreno_dev)
+static int _soft_reset(struct adreno_device *adreno_dev)
 {
 	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
 	unsigned int reg;
@@ -555,6 +550,8 @@
 
 	if (gpudev->regulator_enable)
 		gpudev->regulator_enable(adreno_dev);
+
+	return 0;
 }
 
 
@@ -1623,7 +1620,7 @@
  * Power up the GPU and initialize it.  If priority is specified then elevate
  * the thread priority for the duration of the start operation
  */
-static int adreno_start(struct kgsl_device *device, int priority)
+int adreno_start(struct kgsl_device *device, int priority)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	int nice = task_nice(current);
@@ -1640,38 +1637,6 @@
 	return ret;
 }
 
-/**
- * adreno_vbif_clear_pending_transactions() - Clear transactions in VBIF pipe
- * @device: Pointer to the device whose VBIF pipe is to be cleared
- */
-static int adreno_vbif_clear_pending_transactions(struct kgsl_device *device)
-{
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
-	unsigned int val;
-	unsigned long wait_for_vbif;
-	int ret = 0;
-
-	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask);
-	/* wait for the transactions to clear */
-	wait_for_vbif = jiffies + msecs_to_jiffies(100);
-	while (1) {
-		adreno_readreg(adreno_dev,
-			ADRENO_REG_VBIF_XIN_HALT_CTRL1, &val);
-		if ((val & mask) == mask)
-			break;
-		if (time_after(jiffies, wait_for_vbif)) {
-			KGSL_DRV_ERR(device,
-				"Wait limit reached for VBIF XIN Halt\n");
-			ret = -ETIMEDOUT;
-			break;
-		}
-	}
-	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
-	return ret;
-}
-
 static void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
 {
 	int i;
@@ -2340,12 +2305,20 @@
  * The GPU hardware is reset but we never pull power so we can skip
  * a lot of the standard adreno_stop/adreno_start sequence
  */
-static int adreno_soft_reset(struct kgsl_device *device)
+int adreno_soft_reset(struct kgsl_device *device)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	int ret;
 
+	if (gpudev->oob_set) {
+		ret = gpudev->oob_set(adreno_dev, OOB_CPINIT_SET_MASK,
+				OOB_CPINIT_CHECK_MASK,
+				OOB_CPINIT_CLEAR_MASK);
+		if (ret)
+			return ret;
+	}
+
 	kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
 	adreno_set_active_ctxs_null(adreno_dev);
 
@@ -2359,7 +2332,15 @@
 	adreno_perfcounter_save(adreno_dev);
 
 	/* Reset the GPU */
-	_soft_reset(adreno_dev);
+	if (gpudev->soft_reset)
+		ret = gpudev->soft_reset(adreno_dev);
+	else
+		ret = _soft_reset(adreno_dev);
+	if (ret) {
+		if (gpudev->oob_clear)
+			gpudev->oob_clear(adreno_dev, OOB_CPINIT_CLEAR_MASK);
+		return ret;
+	}
 
 	/* Set the page table back to the default page table */
 	adreno_ringbuffer_set_global(adreno_dev, 0);
@@ -2401,6 +2382,9 @@
 	/* Restore physical performance counter values after soft reset */
 	adreno_perfcounter_restore(adreno_dev);
 
+	if (gpudev->oob_clear)
+		gpudev->oob_clear(adreno_dev, OOB_CPINIT_CLEAR_MASK);
+
 	return ret;
 }
 
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 78cecd0..91f03d0 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -159,10 +159,12 @@
 #define KGSL_END_OF_PROFILE_IDENTIFIER	0x2DEFADE2
 #define KGSL_PWRON_FIXUP_IDENTIFIER	0x2AFAFAFA
 
+/* Number of times to try hard reset */
+#define NUM_TIMES_RESET_RETRY 5
+
 /* One cannot wait forever for the core to idle, so set an upper limit to the
  * amount of time to wait for the core to go idle
  */
-
 #define ADRENO_IDLE_TIMEOUT (20 * 1000)
 
 #define ADRENO_UCHE_GMEM_BASE	0x100000
@@ -204,6 +206,7 @@
 #define ADRENO_TIMEOUT_FAULT BIT(2)
 #define ADRENO_IOMMU_PAGE_FAULT BIT(3)
 #define ADRENO_PREEMPT_FAULT BIT(4)
+#define ADRENO_GMU_FAULT BIT(5)
 
 #define ADRENO_SPTP_PC_CTRL 0
 #define ADRENO_PPD_CTRL     1
@@ -499,6 +502,7 @@
  * attached and enabled
  * @ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED - Set if a CACHE_FLUSH_TS irq storm
  * is in progress
+ * @ADRENO_DEVICE_HARD_RESET - Set if soft reset fails and hard reset is needed
  */
 enum adreno_device_flags {
 	ADRENO_DEVICE_PWRON = 0,
@@ -515,6 +519,7 @@
 	ADRENO_DEVICE_GPMU_INITIALIZED = 11,
 	ADRENO_DEVICE_ISDB_ENABLED = 12,
 	ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED = 13,
+	ADRENO_DEVICE_HARD_RESET = 14,
 };
 
 /**
@@ -864,6 +869,8 @@
 	int (*wait_for_gmu_idle)(struct adreno_device *);
 	const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
 				unsigned int fsynr1);
+	int (*reset)(struct kgsl_device *, int fault);
+	int (*soft_reset)(struct adreno_device *);
 };
 
 /**
@@ -952,6 +959,8 @@
 extern int adreno_wake_nice;
 extern unsigned int adreno_wake_timeout;
 
+int adreno_start(struct kgsl_device *device, int priority);
+int adreno_soft_reset(struct kgsl_device *device);
 long adreno_ioctl(struct kgsl_device_private *dev_priv,
 		unsigned int cmd, unsigned long arg);
 
@@ -1707,4 +1716,37 @@
 	kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
 }
 
+/**
+ * adreno_vbif_clear_pending_transactions() - Clear transactions in VBIF pipe
+ * @device: Pointer to the device whose VBIF pipe is to be cleared
+ */
+static inline int adreno_vbif_clear_pending_transactions(
+	struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
+	unsigned int val;
+	unsigned long wait_for_vbif;
+	int ret = 0;
+
+	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask);
+	/* wait for the transactions to clear */
+	wait_for_vbif = jiffies + msecs_to_jiffies(100);
+	while (1) {
+		adreno_readreg(adreno_dev,
+			ADRENO_REG_VBIF_XIN_HALT_CTRL1, &val);
+		if ((val & mask) == mask)
+			break;
+		if (time_after(jiffies, wait_for_vbif)) {
+			KGSL_DRV_ERR(device,
+				"Wait limit reached for VBIF XIN Halt\n");
+			ret = -ETIMEDOUT;
+			break;
+		}
+	}
+	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
+	return ret;
+}
+
 #endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index e157e7b..6e025c8 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -29,8 +29,6 @@
 #include "kgsl_gmu.h"
 #include "kgsl_trace.h"
 
-#define OOB_REQUEST_TIMEOUT	10 /* ms */
-
 #define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
 		(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))
 
@@ -789,8 +787,10 @@
 	wmb();
 }
 
-#define GMU_START_TIMEOUT 10	/* ms */
-#define GPU_START_TIMEOUT 100	/* ms */
+#define GMU_START_TIMEOUT	10	/* ms */
+#define GPU_START_TIMEOUT	100	/* ms */
+#define GPU_RESET_TIMEOUT	1	/* ms */
+#define GPU_RESET_TIMEOUT_US	10	/* us */
 
 /*
  * timed_poll_check() - polling *gmu* register at given offset until
@@ -962,7 +962,9 @@
 			GPU_START_TIMEOUT,
 			check_mask)) {
 		ret = -ETIMEDOUT;
-		dev_err(&gmu->pdev->dev, "OOB set timed out\n");
+		dev_err(&gmu->pdev->dev,
+			"OOB set timed out, mask %x\n", set_mask);
+		WARN_ON(true);
 	}
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, clear_mask);
@@ -1073,7 +1075,7 @@
 	ret = regulator_enable(gmu->gx_gdsc);
 	if (ret) {
 		dev_err(&gmu->pdev->dev,
-				"Failed to turn on GPU HM HS\n");
+			"Failed to turn on GPU HM HS\n");
 		return ret;
 	}
 
@@ -1119,11 +1121,14 @@
 
 	/* If GMU does not control HM we must */
 	if (gmu->idle_level < GPU_HW_IFPC) {
+
 		ret = a6xx_hm_enable(ADRENO_DEVICE(device));
 		if (ret) {
 			dev_err(&gmu->pdev->dev, "Failed to power on GPU HM\n");
 			return ret;
 		}
+
+
 	}
 
 	/* If GMU does not control SPTPRAC we must */
@@ -1162,19 +1167,6 @@
 }
 
 /*
- * a6xx_hm_sptprac_control() - Turn HM and SPTPRAC on or off
- * @device: Pointer to KGSL device
- * @on: True to turn on or false to turn off
- */
-static int a6xx_hm_sptprac_control(struct kgsl_device *device, bool on)
-{
-	if (on)
-		return a6xx_hm_sptprac_enable(device);
-	else
-		return a6xx_hm_sptprac_disable(device);
-}
-
-/*
  * a6xx_gfx_rail_on() - request GMU to power GPU at given OPP.
  * @device: Pointer to KGSL device
  *
@@ -1206,6 +1198,8 @@
 	return ret;
 }
 
+#define GMU_POWER_STATE_SLUMBER 15
+
 /*
  * a6xx_notify_slumber() - initiate request to GMU to prepare to slumber
  * @device: Pointer to KGSL device
@@ -1281,13 +1275,12 @@
 	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
 
 	/* Turn on the HM and SPTP head switches */
-	ret = a6xx_hm_sptprac_control(device, true);
+	ret = a6xx_hm_sptprac_enable(device);
 
 	return ret;
-
 error_rsc:
 	dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
-	return -EINVAL;
+		return -EINVAL;
 }
 
 static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
@@ -1296,7 +1289,7 @@
 	int val, ret = 0;
 
 	/* Turn off the SPTP and HM head switches */
-	ret = a6xx_hm_sptprac_control(device, false);
+	ret = a6xx_hm_sptprac_disable(device);
 
 	/* RSC sleep sequence */
 	kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
@@ -1339,7 +1332,12 @@
 	struct gmu_memdesc *mem_addr = gmu->hfi_mem;
 	int ret, i;
 
-	if (boot_state == GMU_COLD_BOOT || boot_state == GMU_RESET) {
+	switch (boot_state) {
+	case GMU_COLD_BOOT:
+		/* Turn on the HM and SPTP head switches */
+		ret = a6xx_hm_sptprac_enable(device);
+		if (ret)
+			return ret;
 
 		/* Turn on TCM retention */
 		kgsl_gmu_regwrite(device, A6XX_GMU_GENERAL_7, 1);
@@ -1347,7 +1345,7 @@
 		if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags)) {
 			_load_gmu_rpmh_ucode(device);
 			/* Turn on the HM and SPTP head switches */
-			ret = a6xx_hm_sptprac_control(device, true);
+			ret = a6xx_hm_sptprac_enable(device);
 			if (ret)
 				return ret;
 		} else {
@@ -1371,10 +1369,19 @@
 					gmu->load_mode);
 			return -EINVAL;
 		}
-	} else {
+		break;
+	case GMU_WARM_BOOT:
 		ret = a6xx_rpmh_power_on_gpu(device);
 		if (ret)
 			return ret;
+		break;
+	case GMU_RESET:
+		/* Turn on the HM and SPTP head switches */
+		ret = a6xx_hm_sptprac_enable(device);
+		if (ret)
+			return ret;
+	default:
+		break;
 	}
 
 	/* Clear init result to make sure we are getting fresh value */
@@ -1394,8 +1401,7 @@
 	if (ret)
 		return ret;
 
-	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)
-			&& boot_state == GMU_COLD_BOOT) {
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
 		ret = a6xx_gfx_rail_on(device);
 		if (ret) {
 			a6xx_oob_clear(adreno_dev,
@@ -1425,7 +1431,7 @@
 		unsigned int perf_idx, unsigned int bw_idx)
 {
 	struct hfi_dcvs_cmd dcvs_cmd = {
-		.ack_type = ACK_BLOCK,
+		.ack_type = ACK_NONBLOCK,
 		.freq = {
 			.perf_idx = perf_idx,
 			.clkset_opt = OPTION_AT_LEAST,
@@ -1439,10 +1445,6 @@
 	union gpu_perf_vote vote;
 	int ret;
 
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND)
-		dcvs_cmd.ack_type = ACK_NONBLOCK;
-
 	kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_ACK_OPTION, dcvs_cmd.ack_type);
 
 	vote.fvote = dcvs_cmd.freq;
@@ -1469,43 +1471,6 @@
 	return ret;
 }
 
-/*
- * a6xx_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface
- * @adreno_dev: Pointer to adreno device
- * @mode: requested power mode
- * @arg1: first argument for mode control
- * @arg2: second argument for mode control
- */
-static int a6xx_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
-		unsigned int mode, unsigned int arg1, unsigned int arg2)
-{
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct gmu_device *gmu = &device->gmu;
-	int ret;
-
-	switch (mode) {
-	case GMU_FW_START:
-		ret = a6xx_gmu_fw_start(device, arg1);
-		break;
-	case GMU_FW_STOP:
-		ret = a6xx_rpmh_power_off_gpu(device);
-		break;
-	case GMU_DCVS_NOHFI:
-		ret = a6xx_gmu_dcvs_nohfi(device, arg1, arg2);
-		break;
-	case GMU_NOTIFY_SLUMBER:
-		ret = a6xx_notify_slumber(device);
-		break;
-	default:
-		dev_err(&gmu->pdev->dev,
-				"unsupported GMU power ctrl mode:%d\n", mode);
-		ret = -EINVAL;
-		break;
-	}
-
-	return ret;
-}
-
 static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
 {
 	unsigned int reg;
@@ -1585,6 +1550,290 @@
 			ADRENO_FW(adreno_dev, ADRENO_FW_SQE));
 }
 
+#define VBIF_RESET_ACK_TIMEOUT	100
+#define VBIF_RESET_ACK_MASK	0x00f0
+
+static int a6xx_soft_reset(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	unsigned int reg;
+
+	/*
+	 * For the soft reset case with GMU enabled this part is done
+	 * by the GMU firmware
+	 */
+	if (kgsl_gmu_isenabled(device) &&
+		!test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv))
+		return 0;
+
+
+	adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 1);
+	/*
+	 * Do a dummy read to get a brief read cycle delay for the
+	 * reset to take effect
+	 */
+	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, &reg);
+	adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 0);
+
+	/* Check VBIF status after reset */
+	if (timed_poll_check(device,
+			A6XX_RBBM_VBIF_GX_RESET_STATUS,
+			VBIF_RESET_ACK_MASK,
+			VBIF_RESET_ACK_TIMEOUT,
+			VBIF_RESET_ACK_MASK))
+		return -ETIMEDOUT;
+
+	a6xx_sptprac_enable(adreno_dev);
+
+	return 0;
+}
+
+#define A6XX_STATE_OF_CHILD             (BIT(4) | BIT(5))
+#define A6XX_IDLE_FULL_LLM              BIT(0)
+#define A6XX_WAKEUP_ACK                 BIT(1)
+#define A6XX_IDLE_FULL_ACK              BIT(0)
+#define A6XX_VBIF_XIN_HALT_CTRL1_ACKS   (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+static void a6xx_isense_disable(struct kgsl_device *device)
+{
+	unsigned int val;
+	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+		return;
+
+	kgsl_gmu_regread(device, A6XX_GPU_CS_ENABLE_REG, &val);
+	if (val) {
+		kgsl_gmu_regwrite(device, A6XX_GPU_CS_ENABLE_REG, 0);
+		kgsl_gmu_regwrite(device, A6XX_GMU_ISENSE_CTRL, 0);
+	}
+}
+
+static int a6xx_llm_glm_handshake(struct kgsl_device *device)
+{
+	unsigned int val;
+	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct gmu_device *gmu = &device->gmu;
+
+	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+		return 0;
+
+	kgsl_gmu_regread(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, &val);
+	if (!(val & A6XX_STATE_OF_CHILD)) {
+		kgsl_gmu_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0, BIT(4));
+		kgsl_gmu_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0,
+				A6XX_IDLE_FULL_LLM);
+		if (timed_poll_check(device, A6XX_GMU_LLM_GLM_SLEEP_STATUS,
+				A6XX_IDLE_FULL_ACK, GPU_RESET_TIMEOUT,
+				A6XX_IDLE_FULL_ACK)) {
+			dev_err(&gmu->pdev->dev, "LLM-GLM handshake failed\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int a6xx_complete_rpmh_votes(struct kgsl_device *device)
+{
+	int ret = 0;
+
+	if (!kgsl_gmu_isenabled(device))
+		return ret;
+
+	ret |= timed_poll_check(device, A6XX_RSCC_TCS0_DRV0_STATUS, BIT(0),
+			GPU_RESET_TIMEOUT, BIT(0));
+	ret |= timed_poll_check(device, A6XX_RSCC_TCS1_DRV0_STATUS, BIT(0),
+			GPU_RESET_TIMEOUT, BIT(0));
+	ret |= timed_poll_check(device, A6XX_RSCC_TCS2_DRV0_STATUS, BIT(0),
+			GPU_RESET_TIMEOUT, BIT(0));
+	ret |= timed_poll_check(device, A6XX_RSCC_TCS3_DRV0_STATUS, BIT(0),
+			GPU_RESET_TIMEOUT, BIT(0));
+
+	return ret;
+}
+
+static int a6xx_gmu_suspend(struct kgsl_device *device)
+{
+	/* Max GX clients on A6xx is 2: GMU and KMD */
+	int ret = 0, max_client_num = 2;
+	struct gmu_device *gmu = &device->gmu;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	/* do it only if LM feature is enabled */
+	/* Disable ISENSE if it's on */
+	a6xx_isense_disable(device);
+
+	/* LLM-GLM handshake sequence */
+	a6xx_llm_glm_handshake(device);
+
+	/* If SPTP_RAC is on, turn off SPTP_RAC HS */
+	a6xx_sptprac_disable(adreno_dev);
+
+	/* Disconnect GPU from BUS. Clear and reconnected after reset */
+	adreno_vbif_clear_pending_transactions(device);
+	/* Unnecessary: a6xx_soft_reset(adreno_dev); */
+
+	/* Check no outstanding RPMh voting */
+	a6xx_complete_rpmh_votes(device);
+
+	if (gmu->idle_level < GPU_HW_IFPC) {
+		/* HM GDSC is controlled by KGSL */
+		ret = a6xx_hm_disable(ADRENO_DEVICE(device));
+		if (ret)
+			dev_err(&gmu->pdev->dev,
+				"suspend: fail: power off GPU HM\n");
+	} else if (gmu->gx_gdsc) {
+		if (regulator_is_enabled(gmu->gx_gdsc)) {
+			/* Switch gx gdsc control from GMU to CPU
+			 * force non-zero reference count in clk driver
+			 * so next disable call will turn
+			 * off the GDSC
+			 */
+			ret = regulator_enable(gmu->gx_gdsc);
+			if (ret)
+				dev_err(&gmu->pdev->dev,
+					"suspend fail: gx enable\n");
+
+			while ((max_client_num)) {
+				ret = regulator_disable(gmu->gx_gdsc);
+				if (!regulator_is_enabled(gmu->gx_gdsc))
+					break;
+				max_client_num -= 1;
+			}
+
+			if (!max_client_num)
+				dev_err(&gmu->pdev->dev,
+					"suspend fail: cannot disable gx\n");
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * a6xx_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface
+ * @adreno_dev: Pointer to adreno device
+ * @mode: requested power mode
+ * @arg1: first argument for mode control
+ * @arg2: second argument for mode control
+ */
+static int a6xx_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
+		unsigned int mode, unsigned int arg1, unsigned int arg2)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+	int ret;
+
+	switch (mode) {
+	case GMU_FW_START:
+		ret = a6xx_gmu_fw_start(device, arg1);
+		break;
+	case GMU_SUSPEND:
+		ret = a6xx_gmu_suspend(device);
+		break;
+	case GMU_FW_STOP:
+		ret = a6xx_rpmh_power_off_gpu(device);
+		break;
+	case GMU_DCVS_NOHFI:
+		ret = a6xx_gmu_dcvs_nohfi(device, arg1, arg2);
+		break;
+	case GMU_NOTIFY_SLUMBER:
+		ret = a6xx_notify_slumber(device);
+		break;
+	default:
+		dev_err(&gmu->pdev->dev,
+				"unsupported GMU power ctrl mode:%d\n", mode);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * a6xx_reset() - Helper function to reset the GPU
+ * @device: Pointer to the KGSL device structure for the GPU
+ * @fault: Type of fault. Needed to skip soft reset for MMU fault
+ *
+ * Try to reset the GPU to recover from a fault.  First, try to do a low latency
+ * soft reset.  If the soft reset fails for some reason, then bring out the big
+ * guns and toggle the footswitch.
+ */
+static int a6xx_reset(struct kgsl_device *device, int fault)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	int ret = -EINVAL;
+	int i = 0;
+
+	/* Use the regular reset sequence for No GMU */
+	if (!kgsl_gmu_isenabled(device))
+		return adreno_reset(device, fault);
+
+	/* Transition from ACTIVE to RESET state */
+	kgsl_pwrctrl_change_state(device, KGSL_STATE_RESET);
+
+	/* Try soft reset first */
+	if (!(fault & ADRENO_IOMMU_PAGE_FAULT)) {
+		int acked;
+
+		/* NMI */
+		kgsl_gmu_regwrite(device, A6XX_GMU_NMI_CONTROL_STATUS, 0);
+		kgsl_gmu_regwrite(device, A6XX_GMU_CM3_CFG, (1 << 9));
+
+		for (i = 0; i < 10; i++) {
+			kgsl_gmu_regread(device,
+					A6XX_GMU_NMI_CONTROL_STATUS, &acked);
+
+			/* NMI FW ACK recevied */
+			if (acked == 0x1)
+				break;
+
+			udelay(100);
+		}
+
+		if (acked)
+			ret = adreno_soft_reset(device);
+		if (ret)
+			KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
+	}
+	if (ret) {
+		/* If soft reset failed/skipped, then pull the power */
+		set_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv);
+		/* since device is officially off now clear start bit */
+		clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
+
+		/* Keep trying to start the device until it works */
+		for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) {
+			ret = adreno_start(device, 0);
+			if (!ret)
+				break;
+
+			msleep(20);
+		}
+	}
+
+	clear_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv);
+
+	if (ret)
+		return ret;
+
+	if (i != 0)
+		KGSL_DRV_WARN(device, "Device hard reset tried %d tries\n", i);
+
+	/*
+	 * If active_cnt is non-zero then the system was active before
+	 * going into a reset - put it back in that state
+	 */
+
+	if (atomic_read(&device->active_cnt))
+		kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
+	else
+		kgsl_pwrctrl_change_state(device, KGSL_STATE_NAP);
+
+	return ret;
+}
+
 static void a6xx_cp_hw_err_callback(struct adreno_device *adreno_dev, int bit)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1671,7 +1920,6 @@
 	wmb();
 }
 
-
 /*
  * a6xx_llc_configure_gpu_scid() - Program the sub-cache ID for all GPU blocks
  * @adreno_dev: The adreno device pointer
@@ -2293,7 +2541,6 @@
 				A6XX_GMU_HOST2GMU_INTR_CLR),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
 				A6XX_GMU_HOST2GMU_INTR_RAW_INFO),
-
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
 				A6XX_RBBM_SECVID_TRUST_CNTL),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
@@ -2337,4 +2584,6 @@
 	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
 	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
 	.iommu_fault_block = a6xx_iommu_fault_block,
+	.reset = a6xx_reset,
+	.soft_reset = a6xx_soft_reset,
 };
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index b831d0d..d01a5e9 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2183,7 +2183,11 @@
 		kgsl_process_event_group(device, &hung_rb->events);
 	}
 
-	ret = adreno_reset(device, fault);
+	if (gpudev->reset)
+		ret = gpudev->reset(device, fault);
+	else
+		ret = adreno_reset(device, fault);
+
 	mutex_unlock(&device->mutex);
 	/* if any other fault got in until reset then ignore */
 	atomic_set(&dispatcher->fault, 0);
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index be379e3..ee4e7ef 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -40,6 +40,8 @@
  * that the KGSL module believes a device is idle (has been inactive	*
  * past its timer) and all system resources are released.  SUSPEND is	*
  * requested by the kernel and will be enforced upon all open devices.	*
+ * RESET indicates that GPU or GMU hang happens. KGSL is handling	*
+ * snapshot or recover GPU from hang.					*
  */
 
 #define KGSL_STATE_NONE		0x00000000
@@ -49,6 +51,7 @@
 #define KGSL_STATE_SUSPEND	0x00000010
 #define KGSL_STATE_AWARE	0x00000020
 #define KGSL_STATE_SLUMBER	0x00000080
+#define KGSL_STATE_RESET	0x00000100
 
 /**
  * enum kgsl_event_results - result codes passed to an event callback when the
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 8de1a7e..7354e82 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -748,6 +748,7 @@
 {
 	struct gmu_device *gmu = data;
 	struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	unsigned int status = 0;
 
 	adreno_read_gmureg(ADRENO_DEVICE(device),
@@ -756,9 +757,12 @@
 			ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, status);
 
 	/* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */
-	if (status & GMU_INT_WDOG_BITE)
+	if (status & GMU_INT_WDOG_BITE) {
 		dev_err_ratelimited(&gmu->pdev->dev,
 				"GMU watchdog expired interrupt received\n");
+		adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
+		adreno_dispatcher_schedule(device);
+	}
 	if (status & GMU_INT_HOST_AHB_BUS_ERR)
 		dev_err_ratelimited(&gmu->pdev->dev,
 				"AHB bus error interrupt received\n");
@@ -775,6 +779,7 @@
 	struct kgsl_hfi *hfi = data;
 	struct gmu_device *gmu = container_of(hfi, struct gmu_device, hfi);
 	struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	unsigned int status = 0;
 
 	adreno_read_gmureg(ADRENO_DEVICE(device),
@@ -784,9 +789,12 @@
 
 	if (status & HFI_IRQ_MSGQ_MASK)
 		tasklet_hi_schedule(&hfi->tasklet);
-	if (status & HFI_IRQ_CM3_FAULT_MASK)
+	if (status & HFI_IRQ_CM3_FAULT_MASK) {
 		dev_err_ratelimited(&gmu->pdev->dev,
 				"GMU CM3 fault interrupt received\n");
+		adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
+		adreno_dispatcher_schedule(device);
+	}
 	if (status & ~HFI_IRQ_MASK)
 		dev_err_ratelimited(&gmu->pdev->dev,
 				"Unhandled HFI interrupts 0x%lx\n",
@@ -850,14 +858,6 @@
 	}
 
 	if (is_gmu) {
-		if (!devm_request_mem_region(&gmu->pdev->dev, res->start,
-					resource_size(res),
-					res->name)) {
-			dev_err(&gmu->pdev->dev,
-				"GMU regs request mem region failed\n");
-			return -ENOMEM;
-		}
-
 		gmu->reg_phys = res->start;
 		gmu->reg_len = resource_size(res);
 		gmu->reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
@@ -1253,32 +1253,78 @@
 	return ret;
 }
 
+static int gmu_fast_boot(struct kgsl_device *device)
+{
+	int ret;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+
+	hfi_stop(gmu);
+	clear_bit(GMU_HFI_ON, &gmu->flags);
+
+	ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
+		GMU_RESET, 0);
+	if (ret)
+		return ret;
+
+	/*FIXME: enabling WD interrupt*/
+
+	ret = hfi_start(gmu, GMU_WARM_BOOT);
+	if (ret)
+		return ret;
+
+	ret = gpudev->oob_set(adreno_dev, OOB_CPINIT_SET_MASK,
+			OOB_CPINIT_CHECK_MASK, OOB_CPINIT_CLEAR_MASK);
+
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+		gpudev->oob_clear(adreno_dev,
+				OOB_BOOT_SLUMBER_CLEAR_MASK);
+
+	return ret;
+}
+
+static int gmu_suspend(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+
+	if (!test_bit(GMU_CLK_ON, &gmu->flags))
+		return 0;
+
+	/* Pending message in all queues are abandoned */
+	hfi_stop(gmu);
+	clear_bit(GMU_HFI_ON, &gmu->flags);
+	gmu_irq_disable(device);
+
+	if (gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_SUSPEND, 0, 0))
+		return -EINVAL;
+
+	gmu_disable_clks(gmu);
+	gmu_disable_gdsc(gmu);
+	return 0;
+}
+
 /* To be called to power on both GPU and GMU */
 int gmu_start(struct kgsl_device *device)
 {
-	int ret = 0;
+	int ret = 0, perf_idx;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 	struct gmu_device *gmu = &device->gmu;
 	int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
 
-	if (!kgsl_gmu_isenabled(device))
-		return 0;
+	switch (device->state) {
+	case KGSL_STATE_INIT:
+	case KGSL_STATE_SUSPEND:
+		WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
+		gmu_enable_gdsc(gmu);
+		gmu_enable_clks(gmu);
 
-	if (test_bit(GMU_CLK_ON, &gmu->flags))
-		return 0;
-
-	ret = gmu_enable_gdsc(gmu);
-	if (ret)
-		return ret;
-
-	gmu_enable_clks(gmu);
-
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND) {
 		/* Convert to RPMh frequency index */
-		int perf_idx = gmu->num_gpupwrlevels -
+		perf_idx = gmu->num_gpupwrlevels -
 				pwr->default_pwrlevel - 1;
 
 		/* Vote for 300MHz DDR for GMU to init */
@@ -1305,8 +1351,16 @@
 		ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
 		if (ret)
 			goto error_gpu;
-	} else {
-		int perf_idx = gmu->num_gpupwrlevels - gmu->wakeup_pwrlevel - 1;
+
+		msm_bus_scale_client_update_request(gmu->pcl, 0);
+		break;
+
+	case KGSL_STATE_SLUMBER:
+		WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
+		gmu_enable_gdsc(gmu);
+		gmu_enable_clks(gmu);
+
+		perf_idx = gmu->num_gpupwrlevels - gmu->wakeup_pwrlevel - 1;
 
 		ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
 				GMU_WARM_BOOT, 0);
@@ -1325,6 +1379,46 @@
 				goto error_gpu;
 			gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
 		}
+		break;
+
+	case KGSL_STATE_RESET:
+		if (test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv)) {
+			gmu_suspend(device);
+			gmu_enable_gdsc(gmu);
+			gmu_enable_clks(gmu);
+
+			perf_idx = gmu->num_gpupwrlevels -
+				pwr->active_pwrlevel - 1;
+
+			bus_level =
+				pwr->pwrlevels[pwr->active_pwrlevel].bus_freq;
+			ret = gpudev->rpmh_gpu_pwrctrl(
+				adreno_dev, GMU_FW_START, GMU_RESET, 0);
+			if (ret)
+				goto error_clks;
+
+			gmu_irq_enable(device);
+
+			ret = hfi_start(gmu, GMU_WARM_BOOT);
+			if (ret)
+				goto error_gpu;
+
+			/* Send DCVS level prior to reset*/
+			ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
+			if (ret)
+				goto error_gpu;
+
+			ret = gpudev->oob_set(adreno_dev,
+				OOB_CPINIT_SET_MASK,
+				OOB_CPINIT_CHECK_MASK,
+				OOB_CPINIT_CLEAR_MASK);
+
+		} else {
+			gmu_fast_boot(device);
+		}
+		break;
+	default:
+		break;
 	}
 
 	/*
@@ -1332,30 +1426,20 @@
 	 * In v2, this function call shall move ahead
 	 * of hfi_start() to save power.
 	 */
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+		gpudev->oob_clear(adreno_dev,
+				OOB_BOOT_SLUMBER_CLEAR_MASK);
 
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND) {
-		msm_bus_scale_client_update_request(gmu->pcl, 0);
-		if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
-			gpudev->oob_clear(adreno_dev,
-					OOB_BOOT_SLUMBER_CLEAR_MASK);
-	}
-
-	return 0;
+	return ret;
 
 error_gpu:
 	hfi_stop(gmu);
 	gmu_irq_disable(device);
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND) {
 		if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
 			gpudev->oob_clear(adreno_dev,
 					OOB_BOOT_SLUMBER_CLEAR_MASK);
-	}
 	gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
 error_bus:
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND)
 		msm_bus_scale_client_update_request(gmu->pcl, 0);
 error_clks:
 	gmu_disable_clks(gmu);
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index 4cfc120..a741beb 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -139,7 +139,7 @@
 enum gmu_pwrctrl_mode {
 	GMU_FW_START,
 	GMU_FW_STOP,
-	GMU_POWER_RESET,
+	GMU_SUSPEND,
 	GMU_DCVS_NOHFI,
 	GMU_NOTIFY_SLUMBER,
 	INVALID_POWER_CTRL
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 7811079..7ffb42b 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -2493,6 +2493,8 @@
 		/* Force power on to do the stop */
 		status = kgsl_pwrctrl_enable(device);
 	case KGSL_STATE_ACTIVE:
+		/* fall through */
+	case KGSL_STATE_RESET:
 		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
 		del_timer_sync(&device->idle_timer);
 		kgsl_pwrscale_midframe_timer_cancel(device);
@@ -2595,6 +2597,11 @@
 	int status = 0;
 
 	switch (device->state) {
+	case KGSL_STATE_RESET:
+		if (!kgsl_gmu_isenabled(device))
+			break;
+		status = gmu_start(device);
+		break;
 	case KGSL_STATE_INIT:
 		status = kgsl_pwrctrl_enable(device);
 		break;
@@ -2645,6 +2652,7 @@
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
 		/* fallthrough */
 	case KGSL_STATE_SLUMBER:
+	case KGSL_STATE_RESET:
 		break;
 	case KGSL_STATE_AWARE:
 		KGSL_PWR_WARN(device,
@@ -2787,6 +2795,8 @@
 		break;
 	case KGSL_STATE_SUSPEND:
 		status = _suspend(device);
+	case KGSL_STATE_RESET:
+		kgsl_pwrctrl_set_state(device, KGSL_STATE_RESET);
 		break;
 	default:
 		KGSL_PWR_INFO(device, "bad state request 0x%x\n", state);
@@ -2838,6 +2848,8 @@
 		return "SUSPEND";
 	case KGSL_STATE_SLUMBER:
 		return "SLUMBER";
+	case KGSL_STATE_RESET:
+		return "RESET";
 	default:
 		break;
 	}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index dd96670..b91a6b5 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -411,6 +411,7 @@
 #define ARM_SMMU_OPT_FATAL_ASF		(1 << 1)
 #define ARM_SMMU_OPT_SKIP_INIT		(1 << 2)
 #define ARM_SMMU_OPT_DYNAMIC		(1 << 3)
+#define ARM_SMMU_OPT_3LVL_TABLES	(1 << 4)
 	u32				options;
 	enum arm_smmu_arch_version	version;
 	enum arm_smmu_implementation	model;
@@ -529,6 +530,7 @@
 	{ ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
 	{ ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
 	{ ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
+	{ ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
 	{ 0, NULL},
 };
 
@@ -1567,6 +1569,8 @@
 		oas = smmu->ipa_size;
 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
 			fmt = ARM_64_LPAE_S1;
+			if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
+				ias = min(ias, 39UL);
 		} else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
 			fmt = ARM_32_LPAE_S1;
 			ias = min(ias, 32UL);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 41515bb..ee50a61 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -299,3 +299,5 @@
 config STM32_EXTI
 	bool
 	select IRQ_DOMAIN
+
+source "drivers/irqchip/qcom/Kconfig"
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 987bd89..450059c 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -75,3 +75,4 @@
 obj-$(CONFIG_EZNPS_GIC)			+= irq-eznps.o
 obj-$(CONFIG_ARCH_ASPEED)		+= irq-aspeed-vic.o
 obj-$(CONFIG_STM32_EXTI) 		+= irq-stm32-exti.o
+obj-$(CONFIG_QTI_PDC)			+= qcom/
diff --git a/drivers/irqchip/qcom/Kconfig b/drivers/irqchip/qcom/Kconfig
new file mode 100644
index 0000000..e4a7a88
--- /dev/null
+++ b/drivers/irqchip/qcom/Kconfig
@@ -0,0 +1,15 @@
+config QTI_PDC
+        bool "QTI PDC"
+        depends on ARCH_QCOM
+	select IRQ_DOMAIN
+	select IRQ_DOMAIN_HIERARCHY
+        help
+          QTI Power Domain Controller driver to manage and configure wakeup
+          IRQs
+
+config QTI_PDC_SDM845
+        bool "QTI PDC SDM845"
+        select QTI_PDC
+        default y if ARCH_SDM845
+        help
+          QTI Power Domain Controller for SDM845
diff --git a/drivers/irqchip/qcom/Makefile b/drivers/irqchip/qcom/Makefile
new file mode 100644
index 0000000..1b7856d
--- /dev/null
+++ b/drivers/irqchip/qcom/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_QTI_PDC)			+= pdc.o
+obj-$(CONFIG_QTI_PDC_SDM845)		+= pdc-sdm845.o
diff --git a/drivers/irqchip/qcom/pdc-sdm845.c b/drivers/irqchip/qcom/pdc-sdm845.c
new file mode 100644
index 0000000..178cf1f0
--- /dev/null
+++ b/drivers/irqchip/qcom/pdc-sdm845.c
@@ -0,0 +1,139 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/irqchip.h>
+#include "pdc.h"
+
+static struct pdc_pin sdm845_data[] = {
+	{0, 512}, /* rpmh_wake */
+	{1, 513}, /* ee0_apps_hlos_spmi_periph_irq */
+	{2, 514}, /* ee1_apps_trustzone_spmi_periph_irq */
+	{3, 515}, /* secure_wdog_expired */
+	{4, 516}, /* secure_wdog_bark_irq */
+	{5, 517}, /* aop_wdog_expired_irq */
+	{6, 518}, /* qmp_usb3_lfps_rxterm_irq */
+	{7, 519}, /* qmp_usb3_lfps_rxterm_irq */
+	{8, 520}, /* eud_p0_dmse_int_mx */
+	{9, 521}, /* eud_p0_dpse_int_mx */
+	{10, 522}, /* eud_p1_dmse_int_mx */
+	{11, 523}, /* eud_p1_dpse_int_mx */
+	{12, 524}, /* eud_int_mx[1] */
+	{13, 525}, /* ssc_xpu_irq_summary */
+	{14, 526}, /* wd_bite_apps */
+	{15, 527}, /* ssc_vmidmt_irq_summary */
+	{16, 528}, /* q6ss_irq_out_apps_ipc[4] */
+	{17, 529}, /* not-connected */
+	{18, 530}, /* aoss_pmic_arb_mpu_xpu_summary_irq */
+	{19, 531}, /* apps_pdc_irq_in_19 */
+	{20, 532}, /* apps_pdc_irq_in_20 */
+	{21, 533}, /* apps_pdc_irq_in_21 */
+	{22, 534}, /* pdc_apps_epcb_timeout_summary_irq */
+	{23, 535}, /* spmi_protocol_irq */
+	{24, 536}, /* tsense0_tsense_max_min_int */
+	{25, 537}, /* tsense1_tsense_max_min_int */
+	{26, 538}, /* tsense0_upper_lower_intr */
+	{27, 539}, /* tsense1_upper_lower_intr */
+	{28, 540}, /* tsense0_critical_intr */
+	{29, 541}, /* tsense1_critical_intr */
+	{30, 542}, /* core_bi_px_gpio_1 */
+	{31, 543}, /* core_bi_px_gpio_3 */
+	{32, 544}, /* core_bi_px_gpio_5 */
+	{33, 545}, /* core_bi_px_gpio_10 */
+	{34, 546}, /* core_bi_px_gpio_11 */
+	{35, 547}, /* core_bi_px_gpio_20 */
+	{36, 548}, /* core_bi_px_gpio_22 */
+	{37, 549}, /* core_bi_px_gpio_24 */
+	{38, 550}, /* core_bi_px_gpio_26 */
+	{39, 551}, /* core_bi_px_gpio_30 */
+	{41, 553}, /* core_bi_px_gpio_32 */
+	{42, 554}, /* core_bi_px_gpio_34 */
+	{43, 555}, /* core_bi_px_gpio_36 */
+	{44, 556}, /* core_bi_px_gpio_37 */
+	{45, 557}, /* core_bi_px_gpio_38 */
+	{46, 558}, /* core_bi_px_gpio_39 */
+	{47, 559}, /* core_bi_px_gpio_40 */
+	{49, 561}, /* core_bi_px_gpio_43 */
+	{50, 562}, /* core_bi_px_gpio_44 */
+	{51, 563}, /* core_bi_px_gpio_46 */
+	{52, 564}, /* core_bi_px_gpio_48 */
+	{54, 566}, /* core_bi_px_gpio_52 */
+	{55, 567}, /* core_bi_px_gpio_53 */
+	{56, 568}, /* core_bi_px_gpio_54 */
+	{57, 569}, /* core_bi_px_gpio_56 */
+	{58, 570}, /* core_bi_px_gpio_57 */
+	{59, 571}, /* core_bi_px_gpio_58 */
+	{60, 572}, /* core_bi_px_gpio_59 */
+	{61, 573}, /* core_bi_px_gpio_60 */
+	{62, 574}, /* core_bi_px_gpio_61 */
+	{63, 575}, /* core_bi_px_gpio_62 */
+	{64, 576}, /* core_bi_px_gpio_63 */
+	{65, 577}, /* core_bi_px_gpio_64 */
+	{66, 578}, /* core_bi_px_gpio_66 */
+	{67, 579}, /* core_bi_px_gpio_68 */
+	{68, 580}, /* core_bi_px_gpio_71 */
+	{69, 581}, /* core_bi_px_gpio_73 */
+	{70, 582}, /* core_bi_px_gpio_77 */
+	{71, 583}, /* core_bi_px_gpio_78 */
+	{72, 584}, /* core_bi_px_gpio_79 */
+	{73, 585}, /* core_bi_px_gpio_80 */
+	{74, 586}, /* core_bi_px_gpio_84 */
+	{75, 587}, /* core_bi_px_gpio_85 */
+	{76, 588}, /* core_bi_px_gpio_86 */
+	{77, 589}, /* core_bi_px_gpio_88 */
+	{79, 591}, /* core_bi_px_gpio_91 */
+	{80, 592}, /* core_bi_px_gpio_92 */
+	{81, 593}, /* core_bi_px_gpio_95 */
+	{82, 594}, /* core_bi_px_gpio_96 */
+	{83, 595}, /* core_bi_px_gpio_97 */
+	{84, 596}, /* core_bi_px_gpio_101 */
+	{85, 597}, /* core_bi_px_gpio_103 */
+	{86, 598}, /* core_bi_px_gpio_104 */
+	{87, 599}, /* core_bi_px_to_mpm[6] */
+	{88, 600}, /* core_bi_px_to_mpm[0] */
+	{89, 601}, /* core_bi_px_to_mpm[1] */
+	{90, 602}, /* core_bi_px_gpio_115 */
+	{91, 603}, /* core_bi_px_gpio_116 */
+	{92, 604}, /* core_bi_px_gpio_117 */
+	{93, 605}, /* core_bi_px_gpio_118 */
+	{94, 641}, /* core_bi_px_gpio_119 */
+	{95, 642}, /* core_bi_px_gpio_120 */
+	{96, 643}, /* core_bi_px_gpio_121 */
+	{97, 644}, /* core_bi_px_gpio_122 */
+	{98, 645}, /* core_bi_px_gpio_123 */
+	{99, 646}, /* core_bi_px_gpio_124 */
+	{100, 647}, /* core_bi_px_gpio_125 */
+	{101, 648}, /* core_bi_px_to_mpm[5] */
+	{102, 649}, /* core_bi_px_gpio_127 */
+	{103, 650}, /* core_bi_px_gpio_128 */
+	{104, 651}, /* core_bi_px_gpio_129 */
+	{105, 652}, /* core_bi_px_gpio_130 */
+	{106, 653}, /* core_bi_px_gpio_132 */
+	{107, 654}, /* core_bi_px_gpio_133 */
+	{108, 655}, /* core_bi_px_gpio_145 */
+	{119, 666}, /* core_bi_px_to_mpm[2] */
+	{120, 667}, /* core_bi_px_to_mpm[3] */
+	{121, 668}, /* core_bi_px_to_mpm[4] */
+	{122, 669}, /* core_bi_px_gpio_41 */
+	{123, 670}, /* core_bi_px_gpio_89 */
+	{124, 671}, /* core_bi_px_gpio_31 */
+	{125, 672}, /* core_bi_px_gpio_49 */
+	{-1}
+};
+
+static int __init qcom_pdc_gic_init(struct device_node *node,
+		struct device_node *parent)
+{
+	return qcom_pdc_init(node, parent, sdm845_data);
+}
+
+IRQCHIP_DECLARE(pdc_sdm845, "qcom,pdc-sdm845", qcom_pdc_gic_init);
diff --git a/drivers/irqchip/qcom/pdc.c b/drivers/irqchip/qcom/pdc.c
new file mode 100644
index 0000000..923552f
--- /dev/null
+++ b/drivers/irqchip/qcom/pdc.c
@@ -0,0 +1,299 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "pdc.h"
+#define CREATE_TRACE_POINTS
+#include "trace/events/pdc.h"
+
+#define MAX_IRQS 126
+#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr))
+#define ENABLE_INTR(reg, intr) (reg | (1 << intr))
+
+enum pdc_register_offsets {
+	IRQ_ENABLE_BANK = 0x10,
+	IRQ_i_CFG = 0x110,
+};
+
+static DEFINE_SPINLOCK(pdc_lock);
+static void __iomem *pdc_base;
+
+static int get_pdc_pin(irq_hw_number_t hwirq, void *data)
+{
+	int i;
+	struct pdc_pin *pdc_data = (struct pdc_pin *) data;
+
+	for (i = 0; pdc_data[i].pin >= 0; i++) {
+		if (pdc_data[i].hwirq == hwirq)
+			return pdc_data[i].pin;
+	}
+
+	return -EINVAL;
+}
+
+static inline int pdc_enable_intr(struct irq_data *d, bool on)
+{
+	int pin_out = get_pdc_pin(d->hwirq, d->chip_data);
+	unsigned int index, mask;
+	u32 enable, r_enable;
+	unsigned long flags;
+
+	if (pin_out < 0)
+		return 0;
+
+	index = pin_out / 32;
+	mask = pin_out % 32;
+	spin_lock_irqsave(&pdc_lock, flags);
+
+	enable = readl_relaxed(pdc_base + IRQ_ENABLE_BANK + (index *
+					sizeof(uint32_t)));
+	if (on)
+		enable = ENABLE_INTR(enable, mask);
+	else
+		enable = CLEAR_INTR(enable, mask);
+
+	writel_relaxed(enable, pdc_base + IRQ_ENABLE_BANK + (index *
+						sizeof(uint32_t)));
+
+	do {
+		r_enable = readl_relaxed(pdc_base + IRQ_ENABLE_BANK +
+					(index * sizeof(uint32_t)));
+		if (r_enable == enable)
+			break;
+		udelay(5);
+	} while (1);
+
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	trace_irq_pin_config("enable", (u32)pin_out, (u32)d->hwirq,
+			0, on);
+
+	return 0;
+}
+
+static void qcom_pdc_gic_mask(struct irq_data *d)
+{
+	pdc_enable_intr(d, false);
+	irq_chip_mask_parent(d);
+}
+
+static void qcom_pdc_gic_unmask(struct irq_data *d)
+{
+	pdc_enable_intr(d, true);
+	irq_chip_unmask_parent(d);
+}
+
+static void qcom_pdc_gic_enable(struct irq_data *d)
+{
+	pdc_enable_intr(d, true);
+	irq_chip_enable_parent(d);
+}
+
+static void qcom_pdc_gic_disable(struct irq_data *d)
+{
+	pdc_enable_intr(d, false);
+	irq_chip_disable_parent(d);
+}
+
+/*
+ * GIC does not handle falling edge or active low. To allow falling edge and
+ * active low interrupts to be handled at GIC, PDC has an inverter that inverts
+ * falling edge into a rising edge and active low into an active high.
+ * For the inverter to work, the polarity bit in the IRQ_CONFIG register has to
+ * set as per the table below.
+ * (polarity, falling edge, rising edge )  ORIG          POL CONV     POLARITY
+ * 3'b0 00  Level sensitive active low    (~~~|_____)   (___|~~~~~)   LOW
+ * 3'b0 01  Rising edge sensitive         (___|~~|__)   (~~~|__|~~)   NOT USED
+ * 3'b0 10  Falling edge sensitive        (~~~|__|~~)   (___|~~|__)   LOW
+ * 3'b0 11  Dual Edge sensitive                                       NOT USED
+ * 3'b1 00  Level senstive active High    (___|~~~~~)   (___|~~~~~)   HIGH
+ * 3'b1 01  Falling Edge sensitive        (~~~|__|~~)   (~~~|__|~~)   NOT USED
+ * 3'b1 10  Rising edge sensitive         (___|~~|__)   (___|~~|__)   HIGH
+ * 3'b1 11  Dual Edge sensitive                                       HIGH
+ */
+enum pdc_irq_config_bits {
+	POLARITY_LOW = 0, //0 00
+	FALLING_EDGE = 2, //0 10
+	POLARITY_HIGH = 4,//1 00
+	RISING_EDGE = 6,  //1 10
+	DUAL_EDGE = 7,    //1 11
+};
+
+static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
+{
+	int pin_out = get_pdc_pin(d->hwirq, d->chip_data);
+	u32 pdc_type = 0, config;
+
+	if (pin_out < 0)
+		goto fwd_to_parent;
+
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		pdc_type = RISING_EDGE;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		pdc_type = FALLING_EDGE;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		pdc_type = DUAL_EDGE;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		pdc_type = POLARITY_HIGH;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		pdc_type = POLARITY_LOW;
+		break;
+	default:
+		pdc_type = POLARITY_HIGH;
+		break;
+	}
+	writel_relaxed(pdc_type, pdc_base + IRQ_i_CFG +
+			(pin_out * sizeof(uint32_t)));
+
+	do {
+		config = readl_relaxed(pdc_base + IRQ_i_CFG +
+				(pin_out * sizeof(uint32_t)));
+		if (config == pdc_type)
+			break;
+		udelay(5);
+	} while (1);
+
+	trace_irq_pin_config("type_config", (u32)pin_out, (u32)d->hwirq,
+			pdc_type, 0);
+
+	/*
+	 * If type is edge triggered, forward that as Rising edge as PDC
+	 * takes care of converting falling edge to rising edge signal
+	 */
+	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+		type = IRQ_TYPE_EDGE_RISING;
+
+	/*
+	 * If type is level, then forward that as level high as PDC
+	 * takes care of converting falling edge to rising edge signal
+	 */
+	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+		type = IRQ_TYPE_LEVEL_HIGH;
+
+fwd_to_parent:
+
+	return irq_chip_set_type_parent(d, type);
+}
+
+static struct irq_chip qcom_pdc_gic_chip = {
+	.name			= "PDC-GIC",
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_mask		= qcom_pdc_gic_mask,
+	.irq_enable		= qcom_pdc_gic_enable,
+	.irq_unmask		= qcom_pdc_gic_unmask,
+	.irq_disable		= qcom_pdc_gic_disable,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+	.irq_set_type		= qcom_pdc_gic_set_type,
+	.flags			= IRQCHIP_MASK_ON_SUSPEND |
+					IRQCHIP_SET_TYPE_MASKED |
+					IRQCHIP_SKIP_SET_WAKE,
+	.irq_set_vcpu_affinity	= irq_chip_set_vcpu_affinity_parent,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+#endif
+};
+
+static int qcom_pdc_translate(struct irq_domain *d,
+	struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type)
+{
+	return d->parent->ops->translate(d->parent, fwspec, hwirq, type);
+}
+
+static int qcom_pdc_alloc(struct irq_domain *domain,
+	unsigned int virq, unsigned int nr_irqs, void *data)
+{
+	struct irq_fwspec *fwspec = data;
+	struct irq_fwspec parent_fwspec;
+	irq_hw_number_t hwirq;
+	int i;
+	unsigned int type;
+	int ret;
+
+	ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return -EINVAL;
+
+	for (i = 0; i < nr_irqs; i++)
+		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+			&qcom_pdc_gic_chip, domain->host_data);
+
+	parent_fwspec = *fwspec;
+	parent_fwspec.fwnode = domain->parent->fwnode;
+
+	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+					    &parent_fwspec);
+}
+
+static const struct irq_domain_ops qcom_pdc_ops = {
+	.translate	= qcom_pdc_translate,
+	.alloc		= qcom_pdc_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+int qcom_pdc_init(struct device_node *node,
+		struct device_node *parent, void *data)
+{
+	struct irq_domain *parent_domain;
+	int ret;
+	struct irq_domain *pdc_domain;
+
+	pdc_base = of_iomap(node, 0);
+	if (!pdc_base) {
+		pr_err("%s(): unable to map PDC registers\n", node->full_name);
+		return -ENXIO;
+	}
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("unable to obtain PDC parent domain\n");
+		ret = -ENXIO;
+		goto failure;
+	}
+
+	pdc_domain = irq_domain_add_hierarchy(parent_domain, 0, MAX_IRQS,
+			node, &qcom_pdc_ops, data);
+	if (!pdc_domain) {
+		pr_err("GIC domain add failed\n");
+		ret = -ENOMEM;
+		goto failure;
+	}
+
+	pdc_domain->name = "qcom,pdc";
+
+	return 0;
+
+failure:
+	iounmap(pdc_base);
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_pdc_init);
diff --git a/drivers/irqchip/qcom/pdc.h b/drivers/irqchip/qcom/pdc.h
new file mode 100644
index 0000000..7c4d89c
--- /dev/null
+++ b/drivers/irqchip/qcom/pdc.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/irq.h>
+#include <linux/device.h>
+
+struct pdc_pin {
+	int pin;
+	irq_hw_number_t hwirq;
+};
+
+int qcom_pdc_init(struct device_node *node,
+		struct device_node *parent, void *data);
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 76dd1f3..5a4e6e9 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -59,15 +59,22 @@
 		}
 
 		if (!bubble_state) {
-			CDBG("%s: Sync success: fd 0x%x\n", __func__,
+			CDBG("%s: Sync with success: fd 0x%x\n", __func__,
 				   req_isp->fence_map_out[j].sync_id);
-			cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_SUCCESS);
+			if (rc)
+				pr_err("%s: Sync failed with rc = %d\n",
+					__func__, rc);
+
 		} else if (!req_isp->bubble_report) {
-			CDBG("%s: Sync failure: fd 0x%x\n", __func__,
+			CDBG("%s: Sync with failure: fd 0x%x\n", __func__,
 				   req_isp->fence_map_out[j].sync_id);
-			cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_ERROR);
+			if (rc)
+				pr_err("%s: Sync failed with rc = %d\n",
+					__func__, rc);
 		} else {
 			/*
 			 * Ignore the buffer done if bubble detect is on
@@ -277,7 +284,7 @@
 
 	ctx_isp->frame_id++;
 	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
-	pr_err("%s: next substate %d\n", __func__,
+	CDBG("%s: next substate %d\n", __func__,
 		ctx_isp->substate_activated);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 259e773..49085d7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -1493,19 +1493,6 @@
 	if (i == ctx->num_base)
 		master_base_idx = ctx->base[0].idx;
 
-	/* Stop the master CIDs first */
-	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
-			master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
-
-	/* stop rest of the CIDs  */
-	for (i = 0; i < ctx->num_base; i++) {
-		if (i == master_base_idx)
-			continue;
-		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
-			ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
-	}
-
-
 	/* Stop the master CSID path first */
 	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
 			master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
@@ -1519,6 +1506,18 @@
 			ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
 	}
 
+	/* Stop the master CIDs first */
+	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+			master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+
+	/* stop rest of the CIDs  */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (i == master_base_idx)
+			continue;
+		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+			ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+	}
+
 	if (cam_cdm_stream_off(ctx->cdm_handle))
 		pr_err("%s%d: CDM stream off failed %d\n",
 			__func__, __LINE__, ctx->cdm_handle);
@@ -2884,7 +2883,7 @@
 	int i, j;
 	struct cam_iommu_handle cdm_handles;
 
-	pr_info("%s: Enter\n", __func__);
+	CDBG("%s: Enter\n", __func__);
 
 	memset(&g_ife_hw_mgr, 0, sizeof(g_ife_hw_mgr));
 
@@ -3037,7 +3036,7 @@
 	hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
 	hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
 
-	pr_info("%s: Exit\n", __func__);
+	CDBG("%s: Exit\n", __func__);
 	return 0;
 end:
 	if (rc) {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 6306df3..3ec9aa6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -1133,7 +1133,7 @@
 	if (rc)
 		return rc;
 
-	/**
+	/*
 	 * configure the IPP and enable the time stamp capture.
 	 * enable the HW measrurement blocks
 	 */
@@ -1417,7 +1417,7 @@
 	if (rc)
 		return rc;
 
-	/**
+	/*
 	 * RDI path config and enable the time stamp capture
 	 * Enable the measurement blocks
 	 */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index e62c101..b97593b 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -1092,10 +1092,9 @@
 	slot = &in_q->slot[in_q->wr_idx];
 
 	if (slot->status != CRM_SLOT_STATUS_NO_REQ &&
-		slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
-		CRM_ERR("in_q overwrite %d", slot->status);
-		/* @TODO: error handling */
-	}
+		slot->status != CRM_SLOT_STATUS_REQ_APPLIED)
+		CRM_WARN("in_q overwrite %d", slot->status);
+
 	CRM_DBG("sched_req %lld at slot %d",
 		sched_req->req_id, in_q->wr_idx);
 
@@ -1106,7 +1105,6 @@
 	__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
 	mutex_unlock(&link->req.lock);
 
-	complete(&link->workq_comp);
 end:
 	return rc;
 }
@@ -1371,6 +1369,7 @@
 		goto end;
 	}
 
+	CRM_DBG("E: dev %x dev req %lld", add_req->dev_hdl, add_req->req_id);
 	link = (struct cam_req_mgr_core_link *)
 		cam_get_device_priv(add_req->link_hdl);
 
@@ -1404,6 +1403,7 @@
 	dev_req->dev_hdl = add_req->dev_hdl;
 	task->process_cb = &cam_req_mgr_process_add_req;
 	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+	CRM_DBG("X: dev %x dev req %lld", add_req->dev_hdl, add_req->req_id);
 
 end:
 	return rc;
@@ -1919,11 +1919,10 @@
 			struct cam_req_mgr_sched_request *sched_req)
 {
 	int                               rc = 0;
-	struct crm_workq_task            *task = NULL;
 	struct cam_req_mgr_core_link     *link = NULL;
 	struct cam_req_mgr_core_session  *session = NULL;
 	struct cam_req_mgr_sched_request *sched;
-	struct crm_task_payload          *task_data;
+	struct crm_task_payload           task_data;
 
 	if (!sched_req) {
 		CRM_ERR("csl_req is NULL");
@@ -1942,14 +1941,10 @@
 		CRM_WARN("session ptr NULL %x", sched_req->link_hdl);
 		return -EINVAL;
 	}
+	CRM_DBG("link %x req %lld", sched_req->link_hdl, sched_req->req_id);
 
-	task = cam_req_mgr_workq_get_task(link->workq);
-	if (!task)
-		return -ENOMEM;
-
-	task_data = (struct crm_task_payload *)task->payload;
-	task_data->type = CRM_WORKQ_TASK_SCHED_REQ;
-	sched = (struct cam_req_mgr_sched_request *)&task_data->u;
+	task_data.type = CRM_WORKQ_TASK_SCHED_REQ;
+	sched = (struct cam_req_mgr_sched_request *)&task_data.u;
 	sched->req_id = sched_req->req_id;
 	sched->link_hdl = sched_req->link_hdl;
 	if (session->force_err_recovery == AUTO_RECOVERY) {
@@ -1958,14 +1953,10 @@
 		sched->bubble_enable =
 		(session->force_err_recovery == FORCE_ENABLE_RECOVERY) ? 1 : 0;
 	}
-	task->process_cb = &cam_req_mgr_process_sched_req;
-	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
 
-	/* Blocking call */
-	init_completion(&link->workq_comp);
-	rc = wait_for_completion_timeout(
-		&link->workq_comp,
-		msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
+	rc = cam_req_mgr_process_sched_req(link, &task_data);
+
+	CRM_DBG("DONE dev %x req %lld", sched_req->link_hdl, sched_req->req_id);
 end:
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 889ee9c..3ee0e2f 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -27,7 +27,7 @@
 #define FORCE_ENABLE_RECOVERY   1
 #define AUTO_RECOVERY           0
 
-#define CRM_WORKQ_NUM_TASKS 30
+#define CRM_WORKQ_NUM_TASKS 60
 
 /**
  * enum crm_workq_task_type
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index f53e41c..b026b7c 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -148,24 +148,25 @@
 		goto end;
 	}
 
-	spin_lock_bh(&workq->lock_bh);
 	if (task->cancel == 1) {
 		cam_req_mgr_workq_put_task(task);
 		CRM_WARN("task aborted and queued back to pool");
 		rc = 0;
-		spin_unlock_bh(&workq->lock_bh);
 		goto end;
 	}
 	task->priv = priv;
 	task->priority =
 		(prio < CRM_TASK_PRIORITY_MAX && prio >= CRM_TASK_PRIORITY_0)
 		? prio : CRM_TASK_PRIORITY_0;
+
+	spin_lock_bh(&workq->lock_bh);
 	list_add_tail(&task->entry,
 		&workq->task.process_head[task->priority]);
+	spin_unlock_bh(&workq->lock_bh);
+
 	atomic_add(1, &workq->task.pending_cnt);
 	CRM_DBG("enq task %pK pending_cnt %d",
 		task, atomic_read(&workq->task.pending_cnt));
-	spin_unlock_bh(&workq->lock_bh);
 
 	queue_work(workq->job, &workq->work);
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
index 34243e6..15b8a2d 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
@@ -227,9 +227,10 @@
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	u32 ot_lim;
-	u32 reg_off_vbif_lim_conf = (params->xin_id / 4) * 4 +
-		params->reg_off_vbif_lim_conf;
-	u32 bit_off_vbif_lim_conf = (params->xin_id % 4) * 8;
+	u32 reg_off_vbif_lim_conf = ((params->xin_id / mdata->npriority_lvl)
+					* mdata->npriority_lvl)
+					+ params->reg_off_vbif_lim_conf;
+	u32 bit_off_vbif_lim_conf = (params->xin_id % mdata->npriority_lvl) * 8;
 	u32 reg_val;
 	u32 sts;
 	bool forced_on;
@@ -420,6 +421,136 @@
 	}
 }
 
+static void sde_mdp_parse_cdp_setting(struct platform_device *pdev,
+		struct sde_rot_data_type *mdata)
+{
+	int rc;
+	u32 len, data[SDE_ROT_OP_MAX] = {0};
+
+	len = sde_mdp_parse_dt_prop_len(pdev,
+			"qcom,mdss-rot-cdp-setting");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-rot-cdp-setting", data, len);
+		if (rc) {
+			SDEROT_ERR("invalid CDP setting\n");
+			goto end;
+		}
+
+		set_bit(SDE_QOS_CDP, mdata->sde_qos_map);
+		mdata->enable_cdp[SDE_ROT_RD] = data[SDE_ROT_RD];
+		mdata->enable_cdp[SDE_ROT_WR] = data[SDE_ROT_WR];
+		return;
+	}
+end:
+	clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
+}
+
+static void sde_mdp_parse_rot_lut_setting(struct platform_device *pdev,
+		struct sde_rot_data_type *mdata)
+{
+	int rc;
+	u32 len, data[4];
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-qos-lut");
+	if (len == 4) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-rot-qos-lut", data, len);
+		if (!rc) {
+			mdata->lut_cfg[SDE_ROT_RD].creq_lut_0 = data[0];
+			mdata->lut_cfg[SDE_ROT_RD].creq_lut_1 = data[1];
+			mdata->lut_cfg[SDE_ROT_WR].creq_lut_0 = data[2];
+			mdata->lut_cfg[SDE_ROT_WR].creq_lut_1 = data[3];
+			set_bit(SDE_QOS_LUT, mdata->sde_qos_map);
+		} else {
+			SDEROT_DBG("qos lut setting not found\n");
+		}
+	}
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-danger-lut");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-rot-danger-lut", data, len);
+		if (!rc) {
+			mdata->lut_cfg[SDE_ROT_RD].danger_lut
+							= data[SDE_ROT_RD];
+			mdata->lut_cfg[SDE_ROT_WR].danger_lut
+							= data[SDE_ROT_WR];
+			set_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map);
+		} else {
+			SDEROT_DBG("danger lut setting not found\n");
+		}
+	}
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-safe-lut");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-rot-safe-lut", data, len);
+		if (!rc) {
+			mdata->lut_cfg[SDE_ROT_RD].safe_lut = data[SDE_ROT_RD];
+			mdata->lut_cfg[SDE_ROT_WR].safe_lut = data[SDE_ROT_WR];
+			set_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map);
+		} else {
+			SDEROT_DBG("safe lut setting not found\n");
+		}
+	}
+}
+
+static void sde_mdp_parse_inline_rot_lut_setting(struct platform_device *pdev,
+		struct sde_rot_data_type *mdata)
+{
+	int rc;
+	u32 len, data[4];
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-inline-rot-qos-lut");
+	if (len == 4) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-inline-rot-qos-lut", data, len);
+		if (!rc) {
+			mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0 = data[0];
+			mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1 = data[1];
+			mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0 = data[2];
+			mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1 = data[3];
+			set_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map);
+		} else {
+			SDEROT_DBG("inline qos lut setting not found\n");
+		}
+	}
+
+	len = sde_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-inline-rot-danger-lut");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-inline-rot-danger-lut", data, len);
+		if (!rc) {
+			mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut
+							= data[SDE_ROT_RD];
+			mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut
+							= data[SDE_ROT_WR];
+			set_bit(SDE_INLINE_QOS_DANGER_LUT,
+					mdata->sde_inline_qos_map);
+		} else {
+			SDEROT_DBG("inline danger lut setting not found\n");
+		}
+	}
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-inline-rot-safe-lut");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-inline-rot-safe-lut", data, len);
+		if (!rc) {
+			mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut
+							= data[SDE_ROT_RD];
+			mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut
+							= data[SDE_ROT_WR];
+			set_bit(SDE_INLINE_QOS_SAFE_LUT,
+					mdata->sde_inline_qos_map);
+		} else {
+			SDEROT_DBG("inline safe lut setting not found\n");
+		}
+	}
+}
+
 static int sde_mdp_parse_dt_misc(struct platform_device *pdev,
 		struct sde_rot_data_type *mdata)
 {
@@ -444,8 +575,14 @@
 		SDEROT_DBG(
 			"Could not read optional property: highest bank bit\n");
 
+	sde_mdp_parse_cdp_setting(pdev, mdata);
+
 	sde_mdp_parse_vbif_qos(pdev, mdata);
 
+	sde_mdp_parse_rot_lut_setting(pdev, mdata);
+
+	sde_mdp_parse_inline_rot_lut_setting(pdev, mdata);
+
 	mdata->mdp_base = mdata->sde_io.base + SDE_MDP_OFFSET;
 
 	return 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index 9194b44..313c709 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -40,6 +40,9 @@
 #define SDE_MDP_HW_REV_301	SDE_MDP_REV(3, 0, 1)	/* 8998 v1.1 */
 #define SDE_MDP_HW_REV_400	SDE_MDP_REV(4, 0, 0)	/* sdm845 v1.0 */
 
+#define SDE_MDP_VBIF_4_LEVEL_REMAPPER	4
+#define SDE_MDP_VBIF_8_LEVEL_REMAPPER	8
+
 struct sde_mult_factor {
 	uint32_t numer;
 	uint32_t denom;
@@ -77,9 +80,19 @@
 	SDE_QOS_PER_PIPE_LUT,
 	SDE_QOS_SIMPLIFIED_PREFILL,
 	SDE_QOS_VBLANK_PANIC_CTRL,
+	SDE_QOS_LUT,
+	SDE_QOS_DANGER_LUT,
+	SDE_QOS_SAFE_LUT,
 	SDE_QOS_MAX,
 };
 
+enum sde_inline_qos_settings {
+	SDE_INLINE_QOS_LUT,
+	SDE_INLINE_QOS_DANGER_LUT,
+	SDE_INLINE_QOS_SAFE_LUT,
+	SDE_INLINE_QOS_MAX,
+};
+
 /**
  * enum sde_rot_type: SDE rotator HW version
  * @SDE_ROT_TYPE_V1_0: V1.0 HW version
@@ -98,6 +111,7 @@
  * @SDE_CAPS_R3_1P5_DOWNSCALE: 1.5x downscale rotator support
  * @SDE_CAPS_SBUF_1: stream buffer support for inline rotation
  * @SDE_CAPS_UBWC_2: universal bandwidth compression version 2
+ * @SDE_CAPS_PARTIALWR: partial write override
  */
 enum sde_caps_settings {
 	SDE_CAPS_R1_WB,
@@ -106,6 +120,7 @@
 	SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
 	SDE_CAPS_SBUF_1,
 	SDE_CAPS_UBWC_2,
+	SDE_CAPS_PARTIALWR,
 	SDE_CAPS_MAX,
 };
 
@@ -115,6 +130,12 @@
 	SDE_MAX_BUS_CLIENTS
 };
 
+enum sde_rot_op {
+	SDE_ROT_RD,
+	SDE_ROT_WR,
+	SDE_ROT_OP_MAX
+};
+
 enum sde_rot_regdump_access {
 	SDE_ROT_REGDUMP_READ,
 	SDE_ROT_REGDUMP_WRITE,
@@ -165,6 +186,13 @@
 	enum sde_rot_regdump_access access;
 };
 
+struct sde_rot_lut_cfg {
+	u32 creq_lut_0;
+	u32 creq_lut_1;
+	u32 danger_lut;
+	u32 safe_lut;
+};
+
 struct sde_rot_data_type {
 	u32 mdss_version;
 
@@ -177,6 +205,7 @@
 
 	/* bitmap to track qos applicable settings */
 	DECLARE_BITMAP(sde_qos_map, SDE_QOS_MAX);
+	DECLARE_BITMAP(sde_inline_qos_map, SDE_QOS_MAX);
 
 	/* bitmap to track capability settings */
 	DECLARE_BITMAP(sde_caps_map, SDE_CAPS_MAX);
@@ -210,6 +239,11 @@
 	void *sde_rot_hw;
 	int sec_cam_en;
 
+	u32 enable_cdp[SDE_ROT_OP_MAX];
+
+	struct sde_rot_lut_cfg lut_cfg[SDE_ROT_OP_MAX];
+	struct sde_rot_lut_cfg inline_lut_cfg[SDE_ROT_OP_MAX];
+
 	struct ion_client *iclient;
 
 	bool clk_always_on;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 30fda07..44a29aa 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -60,6 +60,9 @@
 /* waiting for hw time out, 3 vsync for 30fps*/
 #define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100
 
+/* waiting for inline hw start */
+#define ROT_INLINE_START_TIMEOUT_IN_MS 2000
+
 /* default pixel per clock ratio */
 #define ROT_PIXEL_PER_CLK_NUMERATOR	36
 #define ROT_PIXEL_PER_CLK_DENOMINATOR	10
@@ -299,13 +302,13 @@
 	return 0;
 }
 
-static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
+static int sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
 {
 	int ret;
 
-	if (WARN_ON(mgr->regulator_enable == on)) {
+	if (mgr->regulator_enable == on) {
 		SDEROT_ERR("Regulators already in selected mode on=%d\n", on);
-		return;
+		return 0;
 	}
 
 	SDEROT_EVTLOG(on);
@@ -327,9 +330,9 @@
 		ret = sde_rot_enable_vreg(mgr->module_power.vreg_config,
 			mgr->module_power.num_vreg, on);
 	if (ret) {
-		SDEROT_WARN("Rotator regulator failed to %s\n",
-			on ? "enable" : "disable");
-		return;
+		pr_err("rotator regulator failed to %s ret:%d client:%d\n",
+		      on ? "enable" : "disable", ret, mgr->rsc_client != NULL);
+		return ret;
 	}
 
 	if (mgr->ops_hw_post_pmevent)
@@ -341,6 +344,7 @@
 	}
 
 	mgr->regulator_enable = on;
+	return 0;
 }
 
 static int sde_rotator_enable_clk(struct sde_rot_mgr *mgr, int clk_idx)
@@ -1508,6 +1512,8 @@
 	if (entry->item.ts)
 		entry->item.ts[SDE_ROTATOR_TS_FLUSH] = ktime_get();
 
+	SDEROT_EVTLOG(entry->item.session_id, 1);
+
 	queue_work(entry->doneq->rot_work_queue, &entry->done_work);
 	sde_rot_mgr_unlock(mgr);
 	return;
@@ -1564,6 +1570,13 @@
 		entry->item.flags,
 		entry->dnsc_factor_w, entry->dnsc_factor_h);
 
+	wait_for_completion_timeout(
+			&entry->item.inline_start,
+			msecs_to_jiffies(ROT_INLINE_START_TIMEOUT_IN_MS));
+
+	if (entry->item.ts)
+		entry->item.ts[SDE_ROTATOR_TS_START] = ktime_get();
+
 	SDEROT_EVTLOG(entry->item.session_id, 0);
 	ret = mgr->ops_wait_for_entry(hw, entry);
 	if (ret) {
@@ -2332,11 +2345,36 @@
 	for (i = 0; i < count; i++) {
 		req->entries[i].item = items[i];
 		req->entries[i].private = private;
+
+		init_completion(&req->entries[i].item.inline_start);
+		complete_all(&req->entries[i].item.inline_start);
 	}
 
 	return req;
 }
 
+void sde_rotator_req_reset_start(struct sde_rot_entry_container *req)
+{
+	int i;
+
+	if (!req)
+		return;
+
+	for (i = 0; i < req->count; i++)
+		reinit_completion(&req->entries[i].item.inline_start);
+}
+
+void sde_rotator_req_set_start(struct sde_rot_entry_container *req)
+{
+	int i;
+
+	if (!req)
+		return;
+
+	for (i = 0; i < req->count; i++)
+		complete_all(&req->entries[i].item.inline_start);
+}
+
 void sde_rotator_req_finish(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private,
 	struct sde_rot_entry_container *req)
@@ -2885,12 +2923,11 @@
 	}
 
 	*pmgr = mgr;
-
-	pm_runtime_set_suspended(&pdev->dev);
-	pm_runtime_enable(&pdev->dev);
-	if (!pm_runtime_enabled(&pdev->dev)) {
-		SDEROT_ERR("fail to enable power, force on\n");
-		sde_rotator_footswitch_ctrl(mgr, true);
+	ret = sde_rotator_footswitch_ctrl(mgr, true);
+	if (ret) {
+		SDEROT_ERR("res_init failed %d\n", ret);
+		ret = -EPROBE_DEFER;
+		goto error_fs_en_fail;
 	}
 
 	/* enable power and clock before h/w initialization/query */
@@ -2931,6 +2968,9 @@
 	/* disable power and clock after h/w initialization/query */
 	sde_rotator_clk_ctrl(mgr, false);
 	sde_rotator_resource_ctrl(mgr, false);
+	sde_rotator_footswitch_ctrl(mgr, false);
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
 
 	return 0;
 
@@ -2940,7 +2980,8 @@
 error_map_hw_ops:
 	sde_rotator_clk_ctrl(mgr, false);
 	sde_rotator_resource_ctrl(mgr, false);
-	pm_runtime_disable(mgr->device);
+	sde_rotator_footswitch_ctrl(mgr, false);
+error_fs_en_fail:
 	sde_rotator_res_destroy(mgr);
 error_res_init:
 error_parse_dt:
@@ -3024,8 +3065,7 @@
 
 	SDEROT_DBG("begin runtime_active\n");
 	ATRACE_BEGIN("runtime_active");
-	sde_rotator_footswitch_ctrl(mgr, true);
-	return 0;
+	return sde_rotator_footswitch_ctrl(mgr, true);
 }
 
 /*
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index 0051e96..7b8a066 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -21,6 +21,7 @@
 #include <linux/types.h>
 #include <linux/cdev.h>
 #include <linux/pm_runtime.h>
+#include <linux/completion.h>
 
 #include "sde_rotator_base.h"
 #include "sde_rotator_util.h"
@@ -115,6 +116,7 @@
 	SDE_ROTATOR_TS_QUEUE,		/* wait for h/w resource */
 	SDE_ROTATOR_TS_COMMIT,		/* prepare h/w command */
 	SDE_ROTATOR_TS_FLUSH,		/* initiate h/w processing */
+	SDE_ROTATOR_TS_START,		/* h/w triggered (if inline) */
 	SDE_ROTATOR_TS_DONE,		/* receive h/w completion */
 	SDE_ROTATOR_TS_RETIRE,		/* signal destination buffer fence */
 	SDE_ROTATOR_TS_SRCDQB,		/* dequeue source buffer */
@@ -199,6 +201,9 @@
 
 	/* Time stamp for profiling purposes */
 	ktime_t		*ts;
+
+	/* Completion structure for inline rotation */
+	struct completion inline_start;
 };
 
 /*
@@ -604,6 +609,23 @@
 	u32 count, u32 flags);
 
 /*
+ * sde_rotator_req_reset_start - reset inline h/w 'start' indicator
+ *	For inline rotations, the time of rotation start is not controlled
+ *	by the rotator driver. This function resets an internal 'start'
+ *	indicator that allows the rotator to delay its rotator
+ *	timeout waiting until such time as the inline rotation has
+ *	really started.
+ * @req: Pointer to rotation request
+ */
+void sde_rotator_req_reset_start(struct sde_rot_entry_container *req);
+
+/*
+ * sde_rotator_req_set_start - set inline h/w 'start' indicator
+ * @req: Pointer to rotation request
+ */
+void sde_rotator_req_set_start(struct sde_rot_entry_container *req);
+
+/*
  * sde_rotator_req_finish - notify manager that client is finished with the
  *	given request and manager can release the request as required
  * @rot_dev: Pointer to rotator device
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index e9ff67c..3e686e9 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -789,7 +789,7 @@
 					start_time));
 
 		seq_printf(s,
-			"s:%d sq:%lld dq:%lld fe:%lld q:%lld c:%lld fl:%lld d:%lld sdq:%lld ddq:%lld t:%lld oht:%lld\n",
+			"s:%d sq:%lld dq:%lld fe:%lld q:%lld c:%lld fl:%lld st:%lld d:%lld sdq:%lld ddq:%lld t:%lld oht:%lld\n",
 			i,
 			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FENCE],
 					ts[SDE_ROTATOR_TS_SRCQB])),
@@ -801,8 +801,10 @@
 					ts[SDE_ROTATOR_TS_QUEUE])),
 			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FLUSH],
 					ts[SDE_ROTATOR_TS_COMMIT])),
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DONE],
+			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_START],
 					ts[SDE_ROTATOR_TS_FLUSH])),
+			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DONE],
+					ts[SDE_ROTATOR_TS_START])),
 			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_RETIRE],
 					ts[SDE_ROTATOR_TS_DONE])),
 			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_SRCDQB],
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 90b7194..2e91d54 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -1467,7 +1467,9 @@
 		int scid = llcc_get_slice_id(ctx->slice);
 
 		/* allocate slot for timestamp */
-		ts = stats->ts[stats->count++ % SDE_ROTATOR_NUM_EVENTS];
+		ts = stats->ts[stats->count % SDE_ROTATOR_NUM_EVENTS];
+		if (cmd_type == SDE_ROTATOR_INLINE_CMD_COMMIT)
+			stats->count++;
 
 		if (cmd->rot90)
 			flags |= SDE_ROTATION_90;
@@ -1637,6 +1639,8 @@
 			goto error_handle_request;
 		}
 
+		sde_rotator_req_reset_start(req);
+
 		sde_rotator_commit_request(rot_dev->mgr, ctx->private, req);
 
 		request->committed = true;
@@ -1644,6 +1648,15 @@
 		/* save request in private handle */
 		cmd->priv_handle = request;
 
+	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_START) {
+		if (!cmd->priv_handle) {
+			ret = -EINVAL;
+			SDEROT_ERR("invalid private handle\n");
+			goto error_invalid_handle;
+		}
+
+		request = cmd->priv_handle;
+		sde_rotator_req_set_start(request->req);
 	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_CLEANUP) {
 		if (!cmd->priv_handle) {
 			ret = -EINVAL;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
index 051db78..de448a4 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -65,6 +65,8 @@
 #define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF2		0x00C8
 #define MMSS_VBIF_NRT_VBIF_OUT_RD_LIM_CONF0		0x00D0
 #define MMSS_VBIF_NRT_VBIF_OUT_WR_LIM_CONF0		0x00D4
+#define MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000		0x0550
+#define MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000		0x0590
 
 #define SDE_MDP_REG_TRAFFIC_SHAPER_EN			BIT(31)
 #define SDE_MDP_REG_TRAFFIC_SHAPER_RD_CLIENT(num)	(0x030 + (num * 4))
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
index 27fd0c3..705eb27 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
@@ -25,11 +25,13 @@
  * enum sde_rotator_inline_cmd_type - inline rotator command stages
  * @SDE_ROTATOR_INLINE_CMD_VALIDATE: validate command only
  * @SDE_ROTATOR_INLINE_CMD_COMMIT: commit command to hardware
+ * @SDE_ROTATOR_INLINE_CMD_START: ready to start inline rotation
  * @SDE_ROTATOR_INLINE_CMD_CLEANUP: cleanup after commit is done
  */
 enum sde_rotator_inline_cmd_type {
 	SDE_ROTATOR_INLINE_CMD_VALIDATE,
 	SDE_ROTATOR_INLINE_CMD_COMMIT,
+	SDE_ROTATOR_INLINE_CMD_START,
 	SDE_ROTATOR_INLINE_CMD_CLEANUP,
 };
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 980df9f..6ebfc1a 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -493,6 +493,12 @@
 		SDE_ROT_REGDUMP_VBIF },
 };
 
+struct sde_rot_cdp_params {
+	bool enable;
+	struct sde_mdp_format_params *fmt;
+	u32 offset;
+};
+
 /* Invalid software timestamp value for initialization */
 #define SDE_REGDMA_SWTS_INVALID	(~0)
 
@@ -741,6 +747,76 @@
 }
 
 /*
+ * sde_hw_rotator_vbif_setting - helper function to set vbif QoS remapper
+ * levels, enable write gather enable and avoid clk gating setting for
+ * debug purpose.
+ *
+ * @rot: Pointer to rotator hw
+ */
+static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
+{
+	u32 i, mask, vbif_qos, reg_val = 0;
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+
+	/* VBIF_ROT QoS remapper setting */
+	switch (mdata->npriority_lvl) {
+
+	case SDE_MDP_VBIF_4_LEVEL_REMAPPER:
+		for (i = 0; i < mdata->npriority_lvl; i++) {
+			reg_val = SDE_VBIF_READ(mdata,
+					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
+			mask = 0x3 << (XIN_SSPP * 2);
+			vbif_qos = mdata->vbif_nrt_qos[i];
+			reg_val |= vbif_qos << (XIN_SSPP * 2);
+			/* ensure write is issued after the read operation */
+			mb();
+			SDE_VBIF_WRITE(mdata,
+					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
+					reg_val);
+		}
+		break;
+
+	case SDE_MDP_VBIF_8_LEVEL_REMAPPER:
+		mask = mdata->npriority_lvl - 1;
+		for (i = 0; i < mdata->npriority_lvl; i++) {
+			/* RD and WR client */
+			reg_val |= (mdata->vbif_nrt_qos[i] & mask)
+							<< (XIN_SSPP * 4);
+			reg_val |= (mdata->vbif_nrt_qos[i] & mask)
+							<< (XIN_WRITEBACK * 4);
+
+			SDE_VBIF_WRITE(mdata,
+				MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + i*8,
+				reg_val);
+			SDE_VBIF_WRITE(mdata,
+				MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + i*8,
+				reg_val);
+		}
+		break;
+
+	default:
+		SDEROT_DBG("invalid vbif remapper levels\n");
+	}
+
+	/* Enable write gather for writeback to remove write gaps, which
+	 * may hang AXI/BIMC/SDE.
+	 */
+	SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
+			BIT(XIN_WRITEBACK));
+
+	/*
+	 * For debug purpose, disable clock gating, i.e. Clocks always on
+	 */
+	if (mdata->clk_always_on) {
+		SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
+		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
+		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
+				0xFFFF);
+		SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
+	}
+}
+
+/*
  * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
  * @ctx: Pointer to rotator context
  * @mask: Bit mask location of the timestamp
@@ -796,6 +872,156 @@
 }
 
 /*
+ * sde_hw_rotator_cdp_configs - configures the CDP registers
+ * @ctx: Pointer to rotator context
+ * @params: Pointer to parameters needed for CDP configs
+ */
+static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
+		struct sde_rot_cdp_params *params)
+{
+	int reg_val;
+	u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
+	if (!params->enable) {
+		SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
+		goto end;
+	}
+
+	reg_val = BIT(0); /* enable cdp */
+
+	if (sde_mdp_is_ubwc_format(params->fmt))
+		reg_val |= BIT(1); /* enable UBWC meta cdp */
+
+	if (sde_mdp_is_ubwc_format(params->fmt)
+			|| sde_mdp_is_tilea4x_format(params->fmt)
+			|| sde_mdp_is_tilea5x_format(params->fmt))
+		reg_val |= BIT(2); /* enable tile amortize */
+
+	reg_val |= BIT(3); /* enable preload addr ahead cnt 64 */
+
+	SDE_REGDMA_WRITE(wrptr, params->offset, reg_val);
+
+end:
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+}
+
+/*
+ * sde_hw_rotator_setup_qos_lut_wr - Set QoS LUT/Danger LUT/Safe LUT configs
+ * for the WRITEBACK rotator for inline and offline rotation.
+ *
+ * @ctx: Pointer to rotator context
+ */
+static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
+{
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+	u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
+	/* Offline rotation setting */
+	if (!ctx->sbuf_mode) {
+		/* QOS LUT WR setting */
+		if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
+					mdata->lut_cfg[SDE_ROT_WR].creq_lut_0);
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
+					mdata->lut_cfg[SDE_ROT_WR].creq_lut_1);
+		}
+
+		/* Danger LUT WR setting */
+		if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
+					mdata->lut_cfg[SDE_ROT_WR].danger_lut);
+
+		/* Safe LUT WR setting */
+		if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
+					mdata->lut_cfg[SDE_ROT_WR].safe_lut);
+
+	/* Inline rotation setting */
+	} else {
+		/* QOS LUT WR setting */
+		if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
+				mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0);
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
+				mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1);
+		}
+
+		/* Danger LUT WR setting */
+		if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
+					mdata->sde_inline_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
+				mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut);
+
+		/* Safe LUT WR setting */
+		if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
+					mdata->sde_inline_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
+				mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut);
+	}
+
+	/* Update command queue write ptr */
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+}
+
+/*
+ * sde_hw_rotator_setup_qos_lut_rd - Set QoS LUT/Danger LUT/Safe LUT configs
+ * for the SSPP rotator for inline and offline rotation.
+ *
+ * @ctx: Pointer to rotator context
+ */
+static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
+{
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+	u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
+	/* Offline rotation setting */
+	if (!ctx->sbuf_mode) {
+		/* QOS LUT RD setting */
+		if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
+					mdata->lut_cfg[SDE_ROT_RD].creq_lut_0);
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
+					mdata->lut_cfg[SDE_ROT_RD].creq_lut_1);
+		}
+
+		/* Danger LUT RD setting */
+		if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
+					mdata->lut_cfg[SDE_ROT_RD].danger_lut);
+
+		/* Safe LUT RD setting */
+		if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
+					mdata->lut_cfg[SDE_ROT_RD].safe_lut);
+
+	/* inline rotation setting */
+	} else {
+		/* QOS LUT RD setting */
+		if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
+				mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0);
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
+				mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1);
+		}
+
+		/* Danger LUT RD setting */
+		if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
+					mdata->sde_inline_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
+				mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut);
+
+		/* Safe LUT RD setting */
+		if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
+					mdata->sde_inline_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
+				mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut);
+	}
+
+	/* Update command queue write ptr */
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+}
+
+/*
  * sde_hw_rotator_setup_fetchengine - setup fetch engine
  * @ctx: Pointer to rotator context
  * @queue_id: Priority queue identifier
@@ -814,6 +1040,7 @@
 	struct sde_hw_rotator *rot = ctx->rot;
 	struct sde_mdp_format_params *fmt;
 	struct sde_mdp_data *data;
+	struct sde_rot_cdp_params cdp_params = {0};
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	u32 *wrptr;
 	u32 opmode = 0;
@@ -985,13 +1212,29 @@
 		ctx->is_secure = false;
 	}
 
+	/* Update command queue write ptr */
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+
+	/* CDP register RD setting */
+	cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
+					 mdata->enable_cdp[SDE_ROT_RD] : false;
+	cdp_params.fmt = fmt;
+	cdp_params.offset = ROT_SSPP_CDP_CNTL;
+	sde_hw_rotator_cdp_configs(ctx, &cdp_params);
+
+	/* QOS LUT/ Danger LUT/ Safe Lut WR setting */
+	sde_hw_rotator_setup_qos_lut_rd(ctx);
+
+	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
 	/*
 	 * Determine if traffic shaping is required. Only enable traffic
 	 * shaping when content is 4k@30fps. The actual traffic shaping
 	 * bandwidth calculation is done in output setup.
 	 */
-	if (((cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD) &&
-			(cfg->fps <= 30)) {
+	if (((!ctx->sbuf_mode)
+			&& (cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD)
+			&& (cfg->fps <= 30)) {
 		SDEROT_DBG("Enable Traffic Shaper\n");
 		ctx->is_traffic_shaping = true;
 	} else {
@@ -1017,9 +1260,11 @@
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	struct sde_mdp_format_params *fmt;
+	struct sde_rot_cdp_params cdp_params = {0};
 	u32 *wrptr;
 	u32 pack = 0;
 	u32 dst_format = 0;
+	u32 partial_write = 0;
 	int i;
 
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
@@ -1103,8 +1348,13 @@
 			cfg->v_downscale_factor |
 			(cfg->h_downscale_factor << 16));
 
+	/* partial write check */
+	if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map) &&
+			!sde_mdp_is_ubwc_format(fmt))
+		partial_write = BIT(10);
+
 	/* write config setup for bank configuration */
-	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
+	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, partial_write |
 			(ctx->rot->highest_bank & 0x3) << 8);
 
 	if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
@@ -1120,8 +1370,23 @@
 	SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
 			(flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
 
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+
+	/* CDP register WR setting */
+	cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
+					mdata->enable_cdp[SDE_ROT_WR] : false;
+	cdp_params.fmt = fmt;
+	cdp_params.offset = ROT_WB_CDP_CNTL;
+	sde_hw_rotator_cdp_configs(ctx, &cdp_params);
+
+	/* QOS LUT/ Danger LUT/ Safe LUT WR setting */
+	sde_hw_rotator_setup_qos_lut_wr(ctx);
+
+	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
 	/* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
-	if (ctx->is_traffic_shaping || cfg->prefill_bw) {
+	if (!ctx->sbuf_mode &&
+			(ctx->is_traffic_shaping || cfg->prefill_bw)) {
 		u32 bw;
 
 		/*
@@ -2136,7 +2401,7 @@
 			item->input.format, item->output.format,
 			entry->perf->config.frame_rate);
 
-	if (mdata->default_ot_rd_limit) {
+	if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) {
 		struct sde_mdp_set_ot_params ot_params;
 
 		memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
@@ -2158,7 +2423,7 @@
 		sde_mdp_set_ot_limit(&ot_params);
 	}
 
-	if (mdata->default_ot_wr_limit) {
+	if (!ctx->sbuf_mode && mdata->default_ot_wr_limit) {
 		struct sde_mdp_set_ot_params ot_params;
 
 		memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
@@ -2189,46 +2454,9 @@
 		SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
 	}
 
-	/* Set CDP control registers to 0 if CDP is disabled */
-	if (!test_bit(SDE_QOS_CDP, mdata->sde_qos_map)) {
-		SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CDP_CNTL, 0x0);
-		SDE_ROTREG_WRITE(rot->mdss_base, ROT_WB_CDP_CNTL, 0x0);
-	}
-
-	if (mdata->npriority_lvl > 0) {
-		u32 mask, reg_val, i, vbif_qos;
-
-		for (i = 0; i < mdata->npriority_lvl; i++) {
-			reg_val = SDE_VBIF_READ(mdata,
-					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
-			mask = 0x3 << (XIN_SSPP * 2);
-			reg_val &= ~(mask);
-			vbif_qos = mdata->vbif_nrt_qos[i];
-			reg_val |= vbif_qos << (XIN_SSPP * 2);
-			/* ensure write is issued after the read operation */
-			mb();
-			SDE_VBIF_WRITE(mdata,
-					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
-					reg_val);
-		}
-	}
-
-	/* Enable write gather for writeback to remove write gaps, which
-	 * may hang AXI/BIMC/SDE.
-	 */
-	SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
-			BIT(XIN_WRITEBACK));
-
-	/*
-	 * For debug purpose, disable clock gating, i.e. Clocks always on
-	 */
-	if (mdata->clk_always_on) {
-		SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
-		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
-		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
-				0xFFFF);
-		SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
-	}
+	/* VBIF QoS and other settings */
+	if (!ctx->sbuf_mode)
+		sde_hw_rotator_vbif_setting(rot);
 
 	return 0;
 
@@ -2337,7 +2565,6 @@
 
 	clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
 	set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
-	clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
 	set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
 	set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
 	clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
@@ -2368,6 +2595,7 @@
 		SDEROT_DBG("Supporting sys cache inline rotation\n");
 		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
 		set_bit(SDE_CAPS_UBWC_2,  mdata->sde_caps_map);
+		set_bit(SDE_CAPS_PARTIALWR,  mdata->sde_caps_map);
 		rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
 		rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
 		rot->outpixfmts = sde_hw_rotator_v4_outpixfmts;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
index aa762dd..d2b81d5 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
@@ -81,6 +81,8 @@
 #define ROT_SSPP_CREQ_LUT                       (SDE_ROT_SSPP_OFFSET+0x68)
 #define ROT_SSPP_QOS_CTRL                       (SDE_ROT_SSPP_OFFSET+0x6C)
 #define ROT_SSPP_SRC_ADDR_SW_STATUS             (SDE_ROT_SSPP_OFFSET+0x70)
+#define ROT_SSPP_CREQ_LUT_0                     (SDE_ROT_SSPP_OFFSET+0x74)
+#define ROT_SSPP_CREQ_LUT_1                     (SDE_ROT_SSPP_OFFSET+0x78)
 #define ROT_SSPP_CURRENT_SRC0_ADDR              (SDE_ROT_SSPP_OFFSET+0xA4)
 #define ROT_SSPP_CURRENT_SRC1_ADDR              (SDE_ROT_SSPP_OFFSET+0xA8)
 #define ROT_SSPP_CURRENT_SRC2_ADDR              (SDE_ROT_SSPP_OFFSET+0xAC)
@@ -167,6 +169,8 @@
 #define ROT_WB_CREQ_LUT                         (SDE_ROT_WB_OFFSET+0x08C)
 #define ROT_WB_QOS_CTRL                         (SDE_ROT_WB_OFFSET+0x090)
 #define ROT_WB_SYS_CACHE_MODE                   (SDE_ROT_WB_OFFSET+0x094)
+#define ROT_WB_CREQ_LUT_0                       (SDE_ROT_WB_OFFSET+0x098)
+#define ROT_WB_CREQ_LUT_1                       (SDE_ROT_WB_OFFSET+0x09C)
 #define ROT_WB_UBWC_STATIC_CTRL                 (SDE_ROT_WB_OFFSET+0x144)
 #define ROT_WB_SBUF_STATUS_PLANE0               (SDE_ROT_WB_OFFSET+0x148)
 #define ROT_WB_SBUF_STATUS_PLANE1               (SDE_ROT_WB_OFFSET+0x14C)
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index a477340..e7e9278 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -515,13 +515,13 @@
 		buffer = HFI_BUFFER_EXTRADATA_OUTPUT2;
 		break;
 	case HAL_BUFFER_INTERNAL_SCRATCH:
-		buffer = HFI_BUFFER_INTERNAL_SCRATCH;
+		buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH;
 		break;
 	case HAL_BUFFER_INTERNAL_SCRATCH_1:
-		buffer = HFI_BUFFER_INTERNAL_SCRATCH_1;
+		buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1;
 		break;
 	case HAL_BUFFER_INTERNAL_SCRATCH_2:
-		buffer = HFI_BUFFER_INTERNAL_SCRATCH_2;
+		buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2;
 		break;
 	case HAL_BUFFER_INTERNAL_PERSIST:
 		buffer = HFI_BUFFER_INTERNAL_PERSIST;
@@ -1863,14 +1863,6 @@
 		pkt->size += sizeof(u32) + sizeof(*work_mode);
 		break;
 	}
-	case HAL_PARAM_USE_SYS_CACHE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_USE_SYS_CACHE,
-			(((struct hal_enable *) pdata)->enable));
-		pkt->size += sizeof(u32) * 2;
-		break;
-	}
 	/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
 	case HAL_CONFIG_BUFFER_REQUIREMENTS:
 	case HAL_CONFIG_PRIORITY:
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index b424fbb..89e8356 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -1082,19 +1082,19 @@
 			buffreq->buffer[5].buffer_type =
 				HAL_BUFFER_EXTRADATA_OUTPUT2;
 			break;
-		case HFI_BUFFER_INTERNAL_SCRATCH:
+		case HFI_BUFFER_COMMON_INTERNAL_SCRATCH:
 			memcpy(&buffreq->buffer[6], hfi_buf_req,
 			sizeof(struct hfi_buffer_requirements));
 			buffreq->buffer[6].buffer_type =
 				HAL_BUFFER_INTERNAL_SCRATCH;
 			break;
-		case HFI_BUFFER_INTERNAL_SCRATCH_1:
+		case HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1:
 			memcpy(&buffreq->buffer[7], hfi_buf_req,
 				sizeof(struct hfi_buffer_requirements));
 			buffreq->buffer[7].buffer_type =
 				HAL_BUFFER_INTERNAL_SCRATCH_1;
 			break;
-		case HFI_BUFFER_INTERNAL_SCRATCH_2:
+		case HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2:
 			memcpy(&buffreq->buffer[8], hfi_buf_req,
 				sizeof(struct hfi_buffer_requirements));
 			buffreq->buffer[8].buffer_type =
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 3d3d567..074ea4fa 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -94,10 +94,17 @@
 		trace_msm_smem_buffer_iommu_op_start("MAP", 0, 0,
 			align, *iova, *buffer_size);
 
-		/* Map a scatterlist into an SMMU with system cacheability */
-		rc = msm_dma_map_sg_attrs(cb->dev, table->sgl,
-			table->nents, DMA_BIDIRECTIONAL,
-			buf, DMA_ATTR_IOMMU_USE_UPSTREAM_HINT);
+		/* Map a scatterlist into SMMU */
+		if (smem_client->res->sys_cache_present) {
+			/* with sys cache attribute & delayed unmap */
+			rc = msm_dma_map_sg_attrs(cb->dev, table->sgl,
+				table->nents, DMA_BIDIRECTIONAL,
+				buf, DMA_ATTR_IOMMU_USE_UPSTREAM_HINT);
+		} else {
+			/* with delayed unmap */
+			rc = msm_dma_map_sg_lazy(cb->dev, table->sgl,
+				table->nents, DMA_BIDIRECTIONAL, buf);
+		}
 
 		if (rc != table->nents) {
 			dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 14eb3ab..aa5f18d 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1202,6 +1202,16 @@
 		else if (ctrl->id == V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES)
 			num_b = ctrl->val;
 
+		if ((num_b < inst->capability.bframe.min) ||
+			(num_b > inst->capability.bframe.max)) {
+			dprintk(VIDC_ERR,
+				"Error setting num b frames %d min, max supported is %d, %d\n",
+				num_b, inst->capability.bframe.min,
+				inst->capability.bframe.max);
+			rc = -ENOTSUPP;
+			break;
+		}
+
 		property_id = HAL_CONFIG_VENC_INTRA_PERIOD;
 		intra_period.pframes = num_p;
 		intra_period.bframes = num_b;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 05d6d63..499d851 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -168,6 +168,9 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_HEIGHT:
 		msm_vidc_ctrl_get_range(ctrl, &inst->capability.blur_height);
 		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES:
+		msm_vidc_ctrl_get_range(ctrl, &inst->capability.bframe);
+		break;
 	default:
 		rc = -EINVAL;
 	}
@@ -1447,8 +1450,6 @@
 		}
 	}
 
-	msm_comm_set_use_sys_cache(inst);
-
 	/*
 	 * For seq_changed_insufficient, driver should set session_continue
 	 * to firmware after the following sequence
@@ -1934,6 +1935,7 @@
 		ctrl->val = bufreq->buffer_count_min_host;
 		break;
 	case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+		msm_comm_try_get_bufreqs(inst);
 		bufreq = get_buff_req_buffer(inst, HAL_BUFFER_INPUT);
 		if (!bufreq) {
 			dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 058af0e..b1a8e8b 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -5636,40 +5636,3 @@
 	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
 }
 
-void msm_comm_set_use_sys_cache(struct msm_vidc_inst *inst)
-{
-	struct hal_enable syscache_use;
-	int rc = 0;
-
-
-	if (!inst->core->resources.sys_cache_enabled)
-		goto exit;
-
-	syscache_use.enable = false;
-	inst->clk_data.use_sys_cache = false;
-
-	if (inst->flags & VIDC_REALTIME)
-		syscache_use.enable = true;
-
-	if (inst->flags & VIDC_THUMBNAIL)
-		syscache_use.enable = false;
-
-	dprintk(VIDC_DBG,
-		"set_use_sys_cache: enable = %d inst = %pK flags =%d\n",
-		syscache_use.enable, inst, inst->flags);
-	rc = msm_comm_try_set_prop(inst, HAL_PARAM_USE_SYS_CACHE,
-		&syscache_use);
-	if (rc) {
-		dprintk(VIDC_ERR, "set_use_sys_cache: failed!!\n");
-			inst->clk_data.use_sys_cache = false;
-		goto exit;
-	}
-
-	inst->clk_data.use_sys_cache = syscache_use.enable;
-
-	return;
-
-exit:
-	return;
-}
-
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index f62c132..a8f776f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -31,6 +31,7 @@
 bool msm_vidc_thermal_mitigation_disabled = !true;
 bool msm_vidc_clock_scaling = true;
 bool msm_vidc_debug_timeout = !true;
+bool msm_vidc_syscache_disable = true;
 
 #define MAX_DBG_BUF_SIZE 4096
 
@@ -186,6 +187,8 @@
 			&msm_vidc_clock_scaling) &&
 	__debugfs_create(bool, "debug_timeout",
 			&msm_vidc_debug_timeout);
+	__debugfs_create(bool, "disable_video_syscache",
+			&msm_vidc_syscache_disable);
 
 #undef __debugfs_create
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index f5c8e5a..c23ff82 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -64,6 +64,7 @@
 extern bool msm_vidc_thermal_mitigation_disabled;
 extern bool msm_vidc_clock_scaling;
 extern bool msm_vidc_debug_timeout;
+extern bool msm_vidc_syscache_disable;
 
 #define VIDC_MSG_PRIO2STRING(__level) ({ \
 	char *__str; \
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 17c3045..37bccbd 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -224,7 +224,6 @@
 	u32 core_id;
 	enum hal_work_mode work_mode;
 	bool low_latency_mode;
-	bool use_sys_cache;
 };
 
 struct profile_data {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 5cf4628..d259072 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -275,12 +275,12 @@
 			"cache-slice-names", c, &vsc->name);
 	}
 
-	res->sys_cache_enabled = true;
+	res->sys_cache_present = true;
 
 	return 0;
 
 err_load_subcache_table_fail:
-	res->sys_cache_enabled = false;
+	res->sys_cache_present = false;
 	subcaches->count = 0;
 	subcaches->subcache_tbl = NULL;
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index d76985e..b07785a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -159,6 +159,7 @@
 	struct dcvs_table *dcvs_tbl;
 	uint32_t dcvs_tbl_size;
 	struct dcvs_limit *dcvs_limit;
+	bool sys_cache_present;
 	bool sys_cache_enabled;
 	struct subcache_set subcache_set;
 	struct reg_set reg_set;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 5a8dd26..8968764 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -120,6 +120,11 @@
 	return device->state != VENUS_STATE_DEINIT;
 }
 
+static inline bool is_sys_cache_present(struct venus_hfi_device *device)
+{
+	return device->res->sys_cache_present;
+}
+
 static void __dump_packet(u8 *packet, enum vidc_msg_prio log_level)
 {
 	u32 c = 0, packet_size = *(u32 *)packet;
@@ -3492,7 +3497,7 @@
 		goto exit;
 	}
 
-	if (!device->res->sys_cache_enabled)
+	if (!is_sys_cache_present(device))
 		goto exit;
 
 	venus_hfi_for_each_subcache_reverse(device, sinfo) {
@@ -3519,7 +3524,7 @@
 		return -EINVAL;
 	}
 
-	if (!device->res->sys_cache_enabled)
+	if (!is_sys_cache_present(device))
 		return 0;
 
 	venus_hfi_for_each_subcache(device, sinfo) {
@@ -3764,7 +3769,7 @@
 	struct hfi_resource_subcache_type *sc_res;
 	struct vidc_resource_hdr rhdr;
 
-	if (!device->res->sys_cache_enabled)
+	if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
 		return 0;
 
 	memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
@@ -3812,6 +3817,8 @@
 
 	dprintk(VIDC_DBG, "Activated & Set Subcaches to Venus\n");
 
+	device->res->sys_cache_enabled = true;
+
 	return 0;
 
 err_fail_set_subacaches:
@@ -3830,7 +3837,7 @@
 	struct hfi_resource_subcache_type *sc_res;
 	struct vidc_resource_hdr rhdr;
 
-	if (!device->res->sys_cache_enabled)
+	if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
 		return 0;
 
 	dprintk(VIDC_DBG, "Disabling Subcaches\n");
@@ -3877,6 +3884,8 @@
 		}
 	}
 
+	device->res->sys_cache_enabled = false;
+
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 48a6f17..5601f1b 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -56,13 +56,6 @@
 #define  HFI_ERR_SESSION_START_CODE_NOT_FOUND		\
 	(HFI_OX_BASE + 0x1004)
 
-#define HFI_BUFFER_INTERNAL_SCRATCH (HFI_OX_BASE + 0x1)
-#define HFI_BUFFER_EXTRADATA_INPUT (HFI_OX_BASE + 0x2)
-#define HFI_BUFFER_EXTRADATA_OUTPUT (HFI_OX_BASE + 0x3)
-#define HFI_BUFFER_EXTRADATA_OUTPUT2 (HFI_OX_BASE + 0x4)
-#define HFI_BUFFER_INTERNAL_SCRATCH_1 (HFI_OX_BASE + 0x5)
-#define HFI_BUFFER_INTERNAL_SCRATCH_2 (HFI_OX_BASE + 0x6)
-#define HFI_BUFFER_INTERNAL_RECON (HFI_OX_BASE + 0x9)
 
 #define HFI_BUFFER_MODE_DYNAMIC (HFI_OX_BASE + 0x3)
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index da18377..cc35bb3 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -224,7 +224,6 @@
 	HAL_PARAM_VIDEO_CORES_USAGE,
 	HAL_PARAM_VIDEO_WORK_MODE,
 	HAL_PARAM_SECURE,
-	HAL_PARAM_USE_SYS_CACHE,
 };
 
 enum hal_domain {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index fc638f0..0df4812 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -155,6 +155,13 @@
 #define HFI_BUFFER_OUTPUT2				(HFI_COMMON_BASE + 0x3)
 #define HFI_BUFFER_INTERNAL_PERSIST		(HFI_COMMON_BASE + 0x4)
 #define HFI_BUFFER_INTERNAL_PERSIST_1		(HFI_COMMON_BASE + 0x5)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH	(HFI_COMMON_BASE + 0x6)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1	(HFI_COMMON_BASE + 0x7)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2	(HFI_COMMON_BASE + 0x8)
+#define HFI_BUFFER_COMMON_INTERNAL_RECON	(HFI_COMMON_BASE + 0x9)
+#define HFI_BUFFER_EXTRADATA_OUTPUT		(HFI_COMMON_BASE + 0xA)
+#define HFI_BUFFER_EXTRADATA_OUTPUT2		(HFI_COMMON_BASE + 0xB)
+#define HFI_BUFFER_EXTRADATA_INPUT		(HFI_COMMON_BASE + 0xC)
 
 #define  HFI_BITDEPTH_8				(HFI_COMMON_BASE + 0x0)
 #define  HFI_BITDEPTH_9				(HFI_COMMON_BASE + 0x1)
@@ -220,8 +227,6 @@
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x010)
 #define  HFI_PROPERTY_PARAM_SECURE_SESSION		\
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x011)
-#define  HFI_PROPERTY_PARAM_USE_SYS_CACHE				\
-	(HFI_PROPERTY_PARAM_COMMON_START + 0x012)
 #define  HFI_PROPERTY_PARAM_WORK_MODE                       \
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x015)
 
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 2a1367e..9520166 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -17,6 +17,7 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_irq.h>
 #include <linux/platform_device.h>
 #include <linux/pinctrl/machine.h>
 #include <linux/pinctrl/pinctrl.h>
@@ -31,7 +32,7 @@
 #include <linux/reboot.h>
 #include <linux/pm.h>
 #include <linux/log2.h>
-
+#include <linux/irq.h>
 #include "../core.h"
 #include "../pinconf.h"
 #include "pinctrl-msm.h"
@@ -749,6 +750,91 @@
 	.irq_set_wake   = msm_gpio_irq_set_wake,
 };
 
+static void msm_dirconn_irq_mask(struct irq_data *d)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_mask)
+		parent_data->chip->irq_mask(parent_data);
+}
+
+static void msm_dirconn_irq_unmask(struct irq_data *d)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_unmask)
+		parent_data->chip->irq_unmask(parent_data);
+}
+
+static void msm_dirconn_irq_ack(struct irq_data *d)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_ack)
+		parent_data->chip->irq_ack(parent_data);
+}
+
+static void msm_dirconn_irq_eoi(struct irq_data *d)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_eoi)
+		parent_data->chip->irq_eoi(parent_data);
+}
+
+static int msm_dirconn_irq_set_affinity(struct irq_data *d,
+		const struct cpumask *maskval, bool force)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_set_affinity)
+		return parent_data->chip->irq_set_affinity(parent_data,
+				maskval, force);
+	return 0;
+}
+
+static int msm_dirconn_irq_set_vcpu_affinity(struct irq_data *d,
+		void *vcpu_info)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_set_vcpu_affinity)
+		return parent_data->chip->irq_set_vcpu_affinity(parent_data,
+				vcpu_info);
+	return 0;
+}
+
+static int msm_dirconn_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_set_type)
+		return parent_data->chip->irq_set_type(parent_data, type);
+
+	return 0;
+}
+
+static struct irq_chip msm_dirconn_irq_chip = {
+	.name			= "msmgpio-dc",
+	.irq_mask		= msm_dirconn_irq_mask,
+	.irq_unmask		= msm_dirconn_irq_unmask,
+	.irq_eoi		= msm_dirconn_irq_eoi,
+	.irq_ack		= msm_dirconn_irq_ack,
+	.irq_set_type		= msm_dirconn_irq_set_type,
+	.irq_set_affinity	= msm_dirconn_irq_set_affinity,
+	.irq_set_vcpu_affinity	= msm_dirconn_irq_set_vcpu_affinity,
+	.flags			= IRQCHIP_SKIP_SET_WAKE
+					| IRQCHIP_MASK_ON_SUSPEND
+					| IRQCHIP_SET_TYPE_MASKED,
+};
+
 static void msm_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -783,6 +869,55 @@
 	chained_irq_exit(chip, desc);
 }
 
+static void msm_gpio_dirconn_handler(struct irq_desc *desc)
+{
+	struct irq_data *irqd = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
+	chained_irq_enter(chip, desc);
+	generic_handle_irq(irqd->irq);
+	chained_irq_exit(chip, desc);
+}
+
+static void msm_gpio_setup_dir_connects(struct msm_pinctrl *pctrl)
+{
+	struct device_node *parent_node;
+	struct irq_domain *parent_domain;
+	struct irq_fwspec fwspec;
+	unsigned int i;
+
+	parent_node = of_irq_find_parent(pctrl->dev->of_node);
+
+	if (!parent_node)
+		return;
+
+	parent_domain = irq_find_host(parent_node);
+	if (!parent_domain)
+		return;
+
+	fwspec.fwnode = parent_domain->fwnode;
+	for (i = 0; i < pctrl->soc->n_dir_conns; i++) {
+		const struct msm_dir_conn *dirconn = &pctrl->soc->dir_conn[i];
+		unsigned int parent_irq;
+		int irq;
+
+		fwspec.param[0] = 0; /* SPI */
+		fwspec.param[1] = dirconn->hwirq;
+		fwspec.param[2] = IRQ_TYPE_NONE;
+		fwspec.param_count = 3;
+		parent_irq = irq_create_fwspec_mapping(&fwspec);
+
+		irq = irq_find_mapping(pctrl->chip.irqdomain, dirconn->gpio);
+
+		irq_set_parent(irq, parent_irq);
+		irq_set_chip(irq, &msm_dirconn_irq_chip);
+		irq_set_chip_data(irq, irq_get_irq_data(parent_irq));
+		__irq_set_handler(parent_irq, msm_gpio_dirconn_handler,
+				false, NULL);
+		irq_set_handler_data(parent_irq, irq_get_irq_data(irq));
+	}
+}
+
 static int msm_gpio_init(struct msm_pinctrl *pctrl)
 {
 	struct gpio_chip *chip;
@@ -827,6 +962,7 @@
 	gpiochip_set_chained_irqchip(chip, &msm_gpio_irq_chip, pctrl->irq,
 				     msm_gpio_irq_handler);
 
+	msm_gpio_setup_dir_connects(pctrl);
 	return 0;
 }
 
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index e986fda..0e223e0 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -95,6 +95,16 @@
 	unsigned intr_polarity_bit:5;
 	unsigned intr_detection_bit:5;
 	unsigned intr_detection_width:5;
+}
+
+/**
+ * struct msm_dir_conn - Direct GPIO connect configuration
+ * @gpio:	GPIO pin number
+ * @hwirq:	The GIC interrupt that the pin is connected to
+ */;
+struct msm_dir_conn {
+	unsigned int gpio;
+	irq_hw_number_t hwirq;
 };
 
 /**
@@ -106,6 +116,8 @@
  * @groups:     An array describing all pin groups the pin SoC supports.
  * @ngroups:    The numbmer of entries in @groups.
  * @ngpio:      The number of pingroups the driver should expose as GPIOs.
+ * @dir_conn:   An array describing all the pins directly connected to GIC.
+ * @ndirconns:  The number of pins directly connected to GIC
  */
 struct msm_pinctrl_soc_data {
 	const struct pinctrl_pin_desc *pins;
@@ -115,6 +127,8 @@
 	const struct msm_pingroup *groups;
 	unsigned ngroups;
 	unsigned ngpios;
+	const struct msm_dir_conn *dir_conn;
+	unsigned int n_dir_conns;
 };
 
 int msm_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index 30c31a8..7d125eb 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -2377,6 +2377,84 @@
 	UFS_RESET(ufs_reset, 0x99f000),
 };
 
+static const struct msm_dir_conn sdm845_dir_conn[] = {
+	{1, 510},
+	{3, 511},
+	{5, 512},
+	{10, 513},
+	{11, 514},
+	{20, 515},
+	{22, 516},
+	{24, 517},
+	{26, 518},
+	{30, 519},
+	{31, 639},
+	{32, 521},
+	{34, 522},
+	{36, 523},
+	{37, 524},
+	{38, 525},
+	{39, 526},
+	{40, 527},
+	{41, 637},
+	{43, 529},
+	{44, 530},
+	{46, 531},
+	{48, 532},
+	{49, 640},
+	{52, 534},
+	{53, 535},
+	{54, 536},
+	{56, 537},
+	{57, 538},
+	{58, 539},
+	{59, 540},
+	{60, 541},
+	{61, 542},
+	{62, 543},
+	{63, 544},
+	{64, 545},
+	{66, 546},
+	{68, 547},
+	{71, 548},
+	{73, 549},
+	{77, 550},
+	{78, 551},
+	{79, 552},
+	{80, 553},
+	{84, 554},
+	{85, 555},
+	{86, 556},
+	{88, 557},
+	{89, 638},
+	{91, 559},
+	{92, 560},
+	{95, 561},
+	{96, 562},
+	{97, 563},
+	{101, 564},
+	{103, 565},
+	{104, 566},
+	{115, 570},
+	{116, 571},
+	{117, 572},
+	{118, 573},
+	{119, 609},
+	{120, 610},
+	{121, 611},
+	{122, 612},
+	{123, 613},
+	{124, 614},
+	{125, 615},
+	{127, 617},
+	{128, 618},
+	{129, 619},
+	{130, 620},
+	{132, 621},
+	{133, 622},
+	{145, 623},
+};
+
 static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
 	.pins = sdm845_pins,
 	.npins = ARRAY_SIZE(sdm845_pins),
@@ -2385,6 +2463,8 @@
 	.groups = sdm845_groups,
 	.ngroups = ARRAY_SIZE(sdm845_groups),
 	.ngpios = 150,
+	.dir_conn = sdm845_dir_conn,
+	.n_dir_conns = ARRAY_SIZE(sdm845_dir_conn),
 };
 
 static int sdm845_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index bf13ac5..04d807f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -3659,7 +3659,6 @@
 	struct ipa_mem_buffer mem_info = {0};
 	static int total_cnt;
 
-	IPADBG("\n");
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
 		ipa3_ctx->ep[clnt_hdl].valid == 0) {
 		IPAERR("bad parm 0x%x\n", clnt_hdl);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ad627fb..7ad650e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2980,7 +2980,8 @@
 		goto out2;
 
 	if (rdev->supply && (rdev->desc->min_dropout_uV ||
-				!rdev->desc->ops->get_voltage)) {
+				!(rdev->desc->ops->get_voltage ||
+					rdev->desc->ops->get_voltage_sel))) {
 		int current_supply_uV;
 		int selector;
 
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index cf7c35d..deb0ce5 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -86,7 +86,7 @@
  */
 #define CPRH_MSM8998_KBSS_FUSE_COMBO_COUNT	32
 #define CPRH_SDM660_KBSS_FUSE_COMBO_COUNT	16
-#define CPRH_SDM845_KBSS_FUSE_COMBO_COUNT	16
+#define CPRH_SDM845_KBSS_FUSE_COMBO_COUNT	24
 
 /*
  * Constants which define the name of each fuse corner.
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index f7f0269..a72cb17 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -223,6 +223,7 @@
 		/* make sure autosuspend is not called until ADSP comes up*/
 		pm_runtime_get_noresume(dev->dev);
 		dev->state = MSM_CTRL_DOWN;
+		dev->qmi.deferred_resp = false;
 		msm_slim_sps_exit(dev, false);
 		ngd_dom_down(dev);
 		mutex_unlock(&dev->tx_lock);
@@ -2019,19 +2020,18 @@
 	if (!pm_runtime_enabled(dev) ||
 		(!pm_runtime_suspended(dev) &&
 			cdev->state == MSM_CTRL_IDLE)) {
+		cdev->qmi.deferred_resp = true;
 		ret = ngd_slim_runtime_suspend(dev);
 		/*
 		 * If runtime-PM still thinks it's active, then make sure its
 		 * status is in sync with HW status.
-		 * Since this suspend calls QMI api, it results in holding a
-		 * wakelock. That results in failure of first suspend.
-		 * Subsequent suspend should not call low-power transition
-		 * again since the HW is already in suspended state.
 		 */
 		if (!ret) {
 			pm_runtime_disable(dev);
 			pm_runtime_set_suspended(dev);
 			pm_runtime_enable(dev);
+		} else {
+			cdev->qmi.deferred_resp = false;
 		}
 	}
 	if (ret == -EBUSY) {
@@ -2053,13 +2053,29 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+	int ret = 0;
+
+	/*
+	 * If deferred response was requested for power-off and it failed,
+	 * mark runtime-pm status as active to be consistent
+	 * with HW status
+	 */
+	if (cdev->qmi.deferred_resp) {
+		ret = msm_slim_qmi_deferred_status_req(cdev);
+		if (ret) {
+			pm_runtime_disable(dev);
+			pm_runtime_set_active(dev);
+			pm_runtime_enable(dev);
+		}
+		cdev->qmi.deferred_resp = false;
+	}
 	/*
 	 * Rely on runtime-PM to call resume in case it is enabled.
 	 * Even if it's not enabled, rely on 1st client transaction to do
 	 * clock/power on
 	 */
 	SLIM_INFO(cdev, "system resume\n");
-	return 0;
+	return ret;
 }
 #endif /* CONFIG_PM_SLEEP */
 
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
index e7d3381..ef10e64 100644
--- a/drivers/slimbus/slim-msm.c
+++ b/drivers/slimbus/slim-msm.c
@@ -1224,12 +1224,16 @@
 #define SLIMBUS_QMI_POWER_RESP_V01 0x0021
 #define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ 0x0022
 #define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP 0x0022
+#define SLIMBUS_QMI_DEFERRED_STATUS_REQ 0x0023
+#define SLIMBUS_QMI_DEFERRED_STATUS_RESP 0x0023
 
-#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 14
 #define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
 #define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
 #define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
 #define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_DEFERRED_STATUS_REQ_MSG_MAX_MSG_LEN 0
+#define SLIMBUS_QMI_DEFERRED_STATUS_RESP_STAT_MSG_MAX_MSG_LEN 7
 
 enum slimbus_mode_enum_type_v01 {
 	/* To force a 32 bit signed enum. Do not change or use*/
@@ -1247,6 +1251,13 @@
 	SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
 };
 
+enum slimbus_resp_enum_type_v01 {
+	SLIMBUS_RESP_ENUM_TYPE_MIN_VAL_V01 = INT_MIN,
+	SLIMBUS_RESP_SYNCHRONOUS_V01 = 1,
+	SLIMBUS_RESP_DEFERRED_V01 = 2,
+	SLIMBUS_RESP_ENUM_TYPE_MAX_VAL_V01 = INT_MAX,
+};
+
 struct slimbus_select_inst_req_msg_v01 {
 	/* Mandatory */
 	/* Hardware Instance Selection */
@@ -1269,6 +1280,12 @@
 	/* Mandatory */
 	/* Power Request Operation */
 	enum slimbus_pm_enum_type_v01 pm_req;
+
+	/* Optional */
+	/* Optional Deferred Response type Operation */
+	/* Must be set to true if type is being passed */
+	uint8_t resp_type_valid;
+	enum slimbus_resp_enum_type_v01 resp_type;
 };
 
 struct slimbus_power_resp_msg_v01 {
@@ -1283,6 +1300,9 @@
 	struct qmi_response_type_v01 resp;
 };
 
+struct slimbus_deferred_status_resp {
+	struct qmi_response_type_v01 resp;
+};
 
 static struct elem_info slimbus_select_inst_req_msg_v01_ei[] = {
 	{
@@ -1359,6 +1379,24 @@
 		.ei_array  = NULL,
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct slimbus_power_req_msg_v01,
+					   resp_type_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum slimbus_resp_enum_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct slimbus_power_req_msg_v01,
+					   resp_type),
+	},
+	{
 		.data_type = QMI_EOTI,
 		.elem_len  = 0,
 		.elem_size = 0,
@@ -1411,6 +1449,22 @@
 	},
 };
 
+static struct elem_info slimbus_deferred_status_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct slimbus_deferred_status_resp,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+	},
+};
 static void msm_slim_qmi_recv_msg(struct kthread_work *work)
 {
 	int rc;
@@ -1488,32 +1542,56 @@
 	return 0;
 }
 
+static void slim_qmi_resp_cb(struct qmi_handle *handle, unsigned int msg_id,
+			     void *msg, void *resp_cb_data, int stat)
+{
+	struct slimbus_power_resp_msg_v01 *resp = msg;
+	struct msm_slim_ctrl *dev = resp_cb_data;
+
+	if (msg_id != SLIMBUS_QMI_POWER_RESP_V01)
+		SLIM_WARN(dev, "incorrect msg id in qmi-resp CB:0x%x", msg_id);
+	else if (resp->resp.result != QMI_RESULT_SUCCESS_V01)
+		SLIM_ERR(dev, "%s: QMI power failed 0x%x (%s)\n", __func__,
+			 resp->resp.result, get_qmi_error(&resp->resp));
+
+	complete(&dev->qmi.defer_comp);
+}
+
 static int msm_slim_qmi_send_power_request(struct msm_slim_ctrl *dev,
 				struct slimbus_power_req_msg_v01 *req)
 {
-	struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
-	struct msg_desc req_desc, resp_desc;
+	struct slimbus_power_resp_msg_v01 *resp =
+		(struct slimbus_power_resp_msg_v01 *)&dev->qmi.resp;
+	struct msg_desc req_desc;
+	struct msg_desc *resp_desc = &dev->qmi.resp_desc;
 	int rc;
 
 	req_desc.msg_id = SLIMBUS_QMI_POWER_REQ_V01;
 	req_desc.max_msg_len = SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN;
 	req_desc.ei_array = slimbus_power_req_msg_v01_ei;
 
-	resp_desc.msg_id = SLIMBUS_QMI_POWER_RESP_V01;
-	resp_desc.max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
-	resp_desc.ei_array = slimbus_power_resp_msg_v01_ei;
+	resp_desc->msg_id = SLIMBUS_QMI_POWER_RESP_V01;
+	resp_desc->max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
+	resp_desc->ei_array = slimbus_power_resp_msg_v01_ei;
 
-	rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
-			&resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
-	if (rc < 0) {
+	if (dev->qmi.deferred_resp)
+		rc = qmi_send_req_nowait(dev->qmi.handle, &req_desc, req,
+				       sizeof(*req), resp_desc, resp,
+				       sizeof(*resp), slim_qmi_resp_cb, dev);
+	else
+		rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req,
+				       sizeof(*req), resp_desc, resp,
+				       sizeof(*resp), SLIM_QMI_RESP_TOUT);
+	if (rc < 0)
 		SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+
+	if (rc < 0 || dev->qmi.deferred_resp)
 		return rc;
-	}
 
 	/* Check the response */
-	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
 		SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
-				resp.resp.result, get_qmi_error(&resp.resp));
+				resp->resp.result, get_qmi_error(&resp->resp));
 		return -EREMOTEIO;
 	}
 
@@ -1527,6 +1605,7 @@
 	struct slimbus_select_inst_req_msg_v01 req;
 
 	kthread_init_worker(&dev->qmi.kworker);
+	init_completion(&dev->qmi.defer_comp);
 
 	dev->qmi.task = kthread_run(kthread_worker_fn,
 			&dev->qmi.kworker, "msm_slim_qmi_clnt%d", dev->ctrl.nr);
@@ -1604,6 +1683,13 @@
 	else
 		req.pm_req = SLIMBUS_PM_INACTIVE_V01;
 
+	if (dev->qmi.deferred_resp) {
+		req.resp_type = SLIMBUS_RESP_DEFERRED_V01;
+		req.resp_type_valid = 1;
+	} else {
+		req.resp_type_valid = 0;
+	}
+
 	return msm_slim_qmi_send_power_request(dev, &req);
 }
 
@@ -1635,3 +1721,46 @@
 	}
 	return 0;
 }
+
+int msm_slim_qmi_deferred_status_req(struct msm_slim_ctrl *dev)
+{
+	struct slimbus_deferred_status_resp resp = { { 0, 0 } };
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.msg_id = SLIMBUS_QMI_DEFERRED_STATUS_REQ;
+	req_desc.max_msg_len = 0;
+	req_desc.ei_array = NULL;
+
+	resp_desc.msg_id = SLIMBUS_QMI_DEFERRED_STATUS_RESP;
+	resp_desc.max_msg_len =
+		SLIMBUS_QMI_DEFERRED_STATUS_RESP_STAT_MSG_MAX_MSG_LEN;
+	resp_desc.ei_array = slimbus_deferred_status_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, NULL, 0,
+		&resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
+	if (rc < 0) {
+		SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+		return rc;
+	}
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n",
+			__func__, resp.resp.result, get_qmi_error(&resp.resp));
+		return -EREMOTEIO;
+	}
+
+	/* wait for the deferred response */
+	rc = wait_for_completion_timeout(&dev->qmi.defer_comp, HZ);
+	if (rc == 0) {
+		SLIM_WARN(dev, "slimbus power deferred response not rcvd\n");
+		return -ETIMEDOUT;
+	}
+	/* Check what response we got in callback */
+	if (dev->qmi.resp.result != QMI_RESULT_SUCCESS_V01) {
+		SLIM_WARN(dev, "QMI power req failed in CB");
+		return -EREMOTEIO;
+	}
+
+	return 0;
+}
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index 65b9fae..ee0f625 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -228,6 +228,10 @@
 	struct kthread_worker		kworker;
 	struct completion		qmi_comp;
 	struct notifier_block		nb;
+	bool				deferred_resp;
+	struct qmi_response_type_v01	resp;
+	struct msg_desc			resp_desc;
+	struct completion		defer_comp;
 };
 
 enum msm_slim_dom {
@@ -437,4 +441,5 @@
 int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master);
 int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active);
 int msm_slim_qmi_check_framer_request(struct msm_slim_ctrl *dev);
+int msm_slim_qmi_deferred_status_req(struct msm_slim_ctrl *dev);
 #endif
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 93e6994..42f146d 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -154,6 +154,7 @@
 	void			*sram_buf;
 	struct msm_dump_data	sram_data;
 	uint8_t			curr_list;
+	uint8_t			cti_trig;
 };
 
 static bool dcc_ready(struct dcc_drvdata *drvdata)
@@ -601,7 +602,8 @@
 		}
 
 		/* 4. Configure trigger, data sink and function type */
-		dcc_writel(drvdata, BIT(9) | ((drvdata->data_sink << 4) |
+		dcc_writel(drvdata, BIT(9) | ((drvdata->cti_trig << 8) |
+			   (drvdata->data_sink << 4) |
 			   (drvdata->func_type[list])), DCC_LL_CFG(list));
 
 		/* 5. Clears interrupt status register */
@@ -1252,6 +1254,43 @@
 }
 static DEVICE_ATTR(config_write, 0200, NULL, dcc_write);
 
+static ssize_t dcc_show_cti_trig(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", drvdata->cti_trig);
+}
+
+static ssize_t dcc_store_cti_trig(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	unsigned long val;
+	int ret = 0;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+
+	if (drvdata->enable[drvdata->curr_list]) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (val)
+		drvdata->cti_trig = 1;
+	else
+		drvdata->cti_trig = 0;
+out:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+static DEVICE_ATTR(cti_trig, 0644,
+		   dcc_show_cti_trig, dcc_store_cti_trig);
+
 static const struct device_attribute *dcc_attrs[] = {
 	&dev_attr_func_type,
 	&dev_attr_data_sink,
@@ -1266,6 +1305,7 @@
 	&dev_attr_rd_mod_wr,
 	&dev_attr_curr_list,
 	&dev_attr_config_write,
+	&dev_attr_cti_trig,
 	NULL,
 };
 
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index b9ce417..5ed66bf 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,9 @@
 #include <linux/of_address.h>
 #include <soc/qcom/memory_dump.h>
 #include <soc/qcom/scm.h>
+#include <linux/of_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
 
 #define MSM_DUMP_TABLE_VERSION		MSM_DUMP_MAKE_VERSION(2, 0)
 
@@ -195,3 +198,84 @@
 }
 early_initcall(init_debug_lar_unlock);
 #endif
+
+static int mem_dump_probe(struct platform_device *pdev)
+{
+	struct device_node *child_node;
+	const struct device_node *node = pdev->dev.of_node;
+	static dma_addr_t dump_addr;
+	static void *dump_vaddr;
+	struct msm_dump_data *dump_data;
+	struct msm_dump_entry dump_entry;
+	int ret;
+	u32 size, id;
+
+	for_each_available_child_of_node(node, child_node) {
+		ret = of_property_read_u32(child_node, "qcom,dump-size", &size);
+		if (ret) {
+			dev_err(&pdev->dev, "Unable to find size for %s\n",
+					child_node->name);
+			continue;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,dump-id", &id);
+		if (ret) {
+			dev_err(&pdev->dev, "Unable to find id for %s\n",
+					child_node->name);
+			continue;
+		}
+
+		dump_vaddr = (void *) dma_alloc_coherent(&pdev->dev, size,
+						&dump_addr, GFP_KERNEL);
+
+		if (!dump_vaddr) {
+			dev_err(&pdev->dev, "Couldn't get memory for dumping\n");
+			continue;
+		}
+
+		memset(dump_vaddr, 0x0, size);
+
+		dump_data = devm_kzalloc(&pdev->dev,
+				sizeof(struct msm_dump_data), GFP_KERNEL);
+		if (!dump_data) {
+			dma_free_coherent(&pdev->dev, size, dump_vaddr,
+					dump_addr);
+			continue;
+		}
+
+		dump_data->addr = dump_addr;
+		dump_data->len = size;
+		dump_entry.id = id;
+		dump_entry.addr = virt_to_phys(dump_data);
+		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+		if (ret) {
+			dev_err(&pdev->dev, "Data dump setup failed, id = %d\n",
+				id);
+			dma_free_coherent(&pdev->dev, size, dump_vaddr,
+					dump_addr);
+			devm_kfree(&pdev->dev, dump_data);
+		}
+	}
+	return 0;
+}
+
+static const struct of_device_id mem_dump_match_table[] = {
+	{.compatible = "qcom,mem-dump",},
+	{}
+};
+
+static struct platform_driver mem_dump_driver = {
+	.probe = mem_dump_probe,
+	.driver = {
+		.name = "msm_mem_dump",
+		.owner = THIS_MODULE,
+		.of_match_table = mem_dump_match_table,
+	},
+};
+
+static int __init mem_dump_init(void)
+{
+	return platform_driver_register(&mem_dump_driver);
+}
+
+pure_initcall(mem_dump_init);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
index bf5a526..9d0adbb 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -24,8 +24,10 @@
 #define NUM_LNODES	3
 #define MAX_STR_CL	50
 
-#define MSM_BUS_MAS_ALC	144
-#define MSM_BUS_RSC_APPS 8000
+#define MSM_BUS_MAS_ALC			144
+#define MSM_BUS_RSC_APPS		8000
+#define MSM_BUS_RSC_DISP		8001
+#define BCM_TCS_CMD_ACV_APPS		0x8
 
 struct bus_search_type {
 	struct list_head link;
@@ -127,16 +129,14 @@
 		goto exit_bcm_add_bus_req;
 	}
 
-	if (cur_dev->node_info->bcm_req_idx != -1)
-		goto exit_bcm_add_bus_req;
-
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_add_bus_req;
 
 	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		if (cur_dev->node_info->bcm_req_idx[i] != -1)
+			continue;
 		bcm_dev = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
 		max_num_lnodes = bcm_dev->bcmdev->num_bus_devs;
-
 		if (!bcm_dev->num_lnodes) {
 			bcm_dev->lnode_list = devm_kzalloc(dev,
 				sizeof(struct link_node) * max_num_lnodes,
@@ -183,7 +183,7 @@
 
 		lnode->in_use = 1;
 		lnode->bus_dev_id = cur_dev->node_info->id;
-		cur_dev->node_info->bcm_req_idx = lnode_idx;
+		cur_dev->node_info->bcm_req_idx[i] = lnode_idx;
 		memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
 		memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
 	}
@@ -483,11 +483,35 @@
 	return first_hop;
 }
 
+static void bcm_update_acv_req(struct msm_bus_node_device_type *cur_rsc,
+				uint64_t max_ab, uint64_t max_ib,
+				uint64_t *vec_a, uint64_t *vec_b,
+				uint32_t *acv, int ctx)
+{
+	uint32_t acv_bmsk = 0;
+	/*
+	 * Base ACV voting on current RSC until mapping is set up in commanddb
+	 * that allows us to vote ACV based on master.
+	 */
+
+	if (cur_rsc->node_info->id == MSM_BUS_RSC_APPS)
+		acv_bmsk = BCM_TCS_CMD_ACV_APPS;
+
+	if (max_ab == 0 && max_ib == 0)
+		*acv = *acv & ~acv_bmsk;
+	else
+		*acv = *acv | acv_bmsk;
+	*vec_a = 0;
+	*vec_b = *acv;
+}
+
 static void bcm_update_bus_req(struct device *dev, int ctx)
 {
 	struct msm_bus_node_device_type *cur_dev = NULL;
 	struct msm_bus_node_device_type *bcm_dev = NULL;
-	int i;
+	struct msm_bus_node_device_type *cur_rsc = NULL;
+
+	int i, j;
 	uint64_t max_ib = 0;
 	uint64_t max_ab = 0;
 	int lnode_idx = 0;
@@ -507,7 +531,7 @@
 		if (!bcm_dev)
 			goto exit_bcm_update_bus_req;
 
-		lnode_idx = cur_dev->node_info->bcm_req_idx;
+		lnode_idx = cur_dev->node_info->bcm_req_idx[i];
 		bcm_dev->lnode_list[lnode_idx].lnode_ib[ctx] =
 			msm_bus_div64(cur_dev->node_bw[ctx].max_ib *
 					(uint64_t)bcm_dev->bcmdev->width,
@@ -519,19 +543,19 @@
 				cur_dev->node_info->agg_params.buswidth *
 				cur_dev->node_info->agg_params.num_aggports);
 
-		for (i = 0; i < bcm_dev->num_lnodes; i++) {
+		for (j = 0; j < bcm_dev->num_lnodes; j++) {
 			if (ctx == ACTIVE_CTX) {
 				max_ib = max(max_ib,
-				max(bcm_dev->lnode_list[i].lnode_ib[ACTIVE_CTX],
-				bcm_dev->lnode_list[i].lnode_ib[DUAL_CTX]));
+				max(bcm_dev->lnode_list[j].lnode_ib[ACTIVE_CTX],
+				bcm_dev->lnode_list[j].lnode_ib[DUAL_CTX]));
 				max_ab = max(max_ab,
-				bcm_dev->lnode_list[i].lnode_ab[ACTIVE_CTX] +
-				bcm_dev->lnode_list[i].lnode_ab[DUAL_CTX]);
+				bcm_dev->lnode_list[j].lnode_ab[ACTIVE_CTX] +
+				bcm_dev->lnode_list[j].lnode_ab[DUAL_CTX]);
 			} else {
 				max_ib = max(max_ib,
-					bcm_dev->lnode_list[i].lnode_ib[ctx]);
+					bcm_dev->lnode_list[j].lnode_ib[ctx]);
 				max_ab = max(max_ab,
-					bcm_dev->lnode_list[i].lnode_ab[ctx]);
+					bcm_dev->lnode_list[j].lnode_ab[ctx]);
 			}
 		}
 		bcm_dev->node_bw[ctx].max_ab = max_ab;
@@ -540,8 +564,18 @@
 		max_ab = msm_bus_div64(max_ab, bcm_dev->bcmdev->unit_size);
 		max_ib = msm_bus_div64(max_ib, bcm_dev->bcmdev->unit_size);
 
-		bcm_dev->node_vec[ctx].vec_a = max_ab;
-		bcm_dev->node_vec[ctx].vec_b = max_ib;
+		if (bcm_dev->node_info->id == MSM_BUS_BCM_ACV) {
+			cur_rsc = to_msm_bus_node(bcm_dev->node_info->
+						rsc_devs[0]);
+			bcm_update_acv_req(cur_rsc, max_ab, max_ib,
+					&bcm_dev->node_vec[ctx].vec_a,
+					&bcm_dev->node_vec[ctx].vec_b,
+					&cur_rsc->rscdev->acv[ctx], ctx);
+
+		} else {
+			bcm_dev->node_vec[ctx].vec_a = max_ab;
+			bcm_dev->node_vec[ctx].vec_b = max_ib;
+		}
 	}
 exit_bcm_update_bus_req:
 	return;
@@ -551,7 +585,8 @@
 {
 	struct msm_bus_node_device_type *cur_dev = NULL;
 	struct msm_bus_node_device_type *bcm_dev = NULL;
-	int i;
+	struct msm_bus_node_device_type *cur_rsc = NULL;
+	int i, j;
 	uint64_t max_query_ib = 0;
 	uint64_t max_query_ab = 0;
 	int lnode_idx = 0;
@@ -571,7 +606,7 @@
 		if (!bcm_dev)
 			goto exit_bcm_query_bus_req;
 
-		lnode_idx = cur_dev->node_info->bcm_req_idx;
+		lnode_idx = cur_dev->node_info->bcm_req_idx[i];
 		bcm_dev->lnode_list[lnode_idx].lnode_query_ib[ctx] =
 			msm_bus_div64(cur_dev->node_bw[ctx].max_query_ib *
 					(uint64_t)bcm_dev->bcmdev->width,
@@ -583,25 +618,25 @@
 				cur_dev->node_info->agg_params.num_aggports *
 				cur_dev->node_info->agg_params.buswidth);
 
-		for (i = 0; i < bcm_dev->num_lnodes; i++) {
+		for (j = 0; j < bcm_dev->num_lnodes; j++) {
 			if (ctx == ACTIVE_CTX) {
 				max_query_ib = max(max_query_ib,
-				max(bcm_dev->lnode_list[i].
+				max(bcm_dev->lnode_list[j].
 					lnode_query_ib[ACTIVE_CTX],
-				bcm_dev->lnode_list[i].
+				bcm_dev->lnode_list[j].
 					lnode_query_ib[DUAL_CTX]));
 
 				max_query_ab = max(max_query_ab,
-				bcm_dev->lnode_list[i].
+				bcm_dev->lnode_list[j].
 						lnode_query_ab[ACTIVE_CTX] +
-				bcm_dev->lnode_list[i].
+				bcm_dev->lnode_list[j].
 						lnode_query_ab[DUAL_CTX]);
 			} else {
 				max_query_ib = max(max_query_ib,
-					bcm_dev->lnode_list[i].
+					bcm_dev->lnode_list[j].
 						lnode_query_ib[ctx]);
 				max_query_ab = max(max_query_ab,
-					bcm_dev->lnode_list[i].
+					bcm_dev->lnode_list[j].
 						lnode_query_ab[ctx]);
 			}
 		}
@@ -611,6 +646,18 @@
 		max_query_ib = msm_bus_div64(max_query_ib,
 						bcm_dev->bcmdev->unit_size);
 
+		if (bcm_dev->node_info->id == MSM_BUS_BCM_ACV) {
+			cur_rsc = to_msm_bus_node(bcm_dev->node_info->
+						rsc_devs[0]);
+			bcm_update_acv_req(cur_rsc, max_query_ab, max_query_ib,
+					&bcm_dev->node_vec[ctx].query_vec_a,
+					&bcm_dev->node_vec[ctx].query_vec_b,
+					&cur_rsc->rscdev->query_acv[ctx], ctx);
+		} else {
+			bcm_dev->node_vec[ctx].query_vec_a = max_query_ab;
+			bcm_dev->node_vec[ctx].query_vec_b = max_query_ib;
+		}
+
 		bcm_dev->node_bw[ctx].max_query_ab = max_query_ab;
 		bcm_dev->node_bw[ctx].max_query_ib = max_query_ib;
 	}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index c950367..458cf0d 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -418,8 +418,8 @@
 				commit = true;
 			}
 			tcs_cmd_gen(cur_bcm, &cmdlist_active[k],
-				cur_bcm->node_bw[ACTIVE_CTX].max_query_ib,
-				cur_bcm->node_bw[ACTIVE_CTX].max_query_ab,
+				cur_bcm->node_vec[ACTIVE_CTX].query_vec_a,
+				cur_bcm->node_vec[ACTIVE_CTX].query_vec_b,
 								commit);
 			k++;
 		}
@@ -433,26 +433,30 @@
 {
 	int ret = 0;
 	int cur_vcd = 0;
+	int i = 0;
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_clist_add;
 
-	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
-	cur_vcd = cur_bcm->bcmdev->clk_domain;
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+		cur_vcd = cur_bcm->bcmdev->clk_domain;
 
-	if (!cur_bcm->node_info->num_rsc_devs)
-		goto exit_bcm_clist_add;
+		if (!cur_bcm->node_info->num_rsc_devs)
+			goto exit_bcm_clist_add;
 
-	if (!cur_rsc)
-		cur_rsc = to_msm_bus_node(cur_bcm->node_info->rsc_devs[0]);
+		if (!cur_rsc)
+			cur_rsc = to_msm_bus_node(cur_bcm->node_info->
+								rsc_devs[0]);
 
-	if (!cur_bcm->dirty) {
-		list_add_tail(&cur_bcm->link,
+		if (!cur_bcm->dirty) {
+			list_add_tail(&cur_bcm->link,
 					&cur_rsc->rscdev->bcm_clist[cur_vcd]);
-		cur_bcm->dirty = true;
+			cur_bcm->dirty = true;
+		}
+		cur_bcm->updated = false;
 	}
-	cur_bcm->updated = false;
 
 exit_bcm_clist_add:
 	return ret;
@@ -462,17 +466,20 @@
 {
 	int ret = 0;
 	int cur_vcd = 0;
+	int i = 0;
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_query_list_add;
 
-	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
-	cur_vcd = cur_bcm->bcmdev->clk_domain;
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+		cur_vcd = cur_bcm->bcmdev->clk_domain;
 
-	if (!cur_bcm->query_dirty)
-		list_add_tail(&cur_bcm->query_link,
+		if (!cur_bcm->query_dirty)
+			list_add_tail(&cur_bcm->query_link,
 					&bcm_query_list_inorder[cur_vcd]);
+	}
 
 exit_bcm_query_list_add:
 	return ret;
@@ -481,20 +488,23 @@
 static int bcm_clist_clean(struct msm_bus_node_device_type *cur_dev)
 {
 	int ret = 0;
+	int i = 0;
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_clist_clean;
 
-	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
 
-	if (cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
+		if (cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
 			cur_bcm->node_vec[ACTIVE_CTX].vec_a == 0 &&
 			cur_bcm->node_vec[DUAL_CTX].vec_b == 0 &&
 			cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
 			init_time == false) {
-		cur_bcm->dirty = false;
-		list_del_init(&cur_bcm->link);
+			cur_bcm->dirty = false;
+			list_del_init(&cur_bcm->link);
+		}
 	}
 
 exit_bcm_clist_clean:
@@ -504,15 +514,18 @@
 static int bcm_query_list_clean(struct msm_bus_node_device_type *cur_dev)
 {
 	int ret = 0;
+	int i = 0;
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_clist_add;
 
-	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
 
-	cur_bcm->query_dirty = false;
-	list_del_init(&cur_bcm->query_link);
+		cur_bcm->query_dirty = false;
+		list_del_init(&cur_bcm->query_link);
+	}
 
 exit_bcm_clist_add:
 	return ret;
@@ -1081,7 +1094,7 @@
 static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
 				struct device *bus_dev)
 {
-	int ret = 0;
+	int ret = 0, i = 0;
 	struct msm_bus_node_info_type *node_info = NULL;
 	struct msm_bus_node_info_type *pdata_node_info = NULL;
 	struct msm_bus_node_device_type *bus_node = NULL;
@@ -1100,7 +1113,17 @@
 
 	node_info->name = pdata_node_info->name;
 	node_info->id =  pdata_node_info->id;
-	node_info->bcm_req_idx = -1;
+	node_info->bcm_req_idx = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_bcm_devs,
+			GFP_KERNEL);
+	if (!node_info->bcm_req_idx) {
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	for (i = 0; i < pdata_node_info->num_bcm_devs; i++)
+		node_info->bcm_req_idx[i] = -1;
+
 	node_info->bus_device_id = pdata_node_info->bus_device_id;
 	node_info->mas_rpm_id = pdata_node_info->mas_rpm_id;
 	node_info->slv_rpm_id = pdata_node_info->slv_rpm_id;
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
index fad7afa..f7f17c3 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -71,12 +71,16 @@
 struct nodevector {
 	uint64_t vec_a;
 	uint64_t vec_b;
+	uint64_t query_vec_a;
+	uint64_t query_vec_b;
 };
 
 struct msm_bus_rsc_device_type {
 	struct rpmh_client *mbox;
 	struct list_head bcm_clist[VCD_MAX_CNT];
 	int req_state;
+	uint32_t acv[NUM_CTX];
+	uint32_t query_acv[NUM_CTX];
 };
 
 struct msm_bus_bcm_device_type {
@@ -157,7 +161,7 @@
 	struct device **black_connections;
 	struct device **bcm_devs;
 	struct device **rsc_devs;
-	int bcm_req_idx;
+	int *bcm_req_idx;
 	unsigned int bus_device_id;
 	struct device *bus_device;
 	struct rule_update_path_info rule;
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 11e1b4d..1f28712 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -464,6 +464,8 @@
 	if (region == NULL) {
 		pil_err(priv->desc, "Failed to allocate relocatable region of size %zx\n",
 					size);
+		priv->region_start = 0;
+		priv->region_end = 0;
 		return -ENOMEM;
 	}
 
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index 62e2384..221ae0c 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -373,13 +373,6 @@
 	mutex_unlock(&qmi_client_release_lock);
 	pr_info("Connection established between QMI handle and %d service\n",
 							data->instance_id);
-	/* Register for indication messages about service */
-	rc = qmi_register_ind_cb(data->clnt_handle, root_service_service_ind_cb,
-							(void *)data);
-	if (rc < 0)
-		pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
-							data->instance_id, rc);
-
 	mutex_lock(&notif_add_lock);
 	mutex_lock(&service_list_lock);
 	list_for_each_entry(service_notif, &service_list, list) {
@@ -402,6 +395,12 @@
 	}
 	mutex_unlock(&service_list_lock);
 	mutex_unlock(&notif_add_lock);
+	/* Register for indication messages about service */
+	rc = qmi_register_ind_cb(data->clnt_handle,
+		root_service_service_ind_cb, (void *)data);
+	if (rc < 0)
+		pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
+							data->instance_id, rc);
 }
 
 static void root_service_service_exit(struct qmi_client_info *data,
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 119ede3..8aff84c 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -36,6 +36,7 @@
 #include <soc/qcom/boot_stats.h>
 
 #define BUILD_ID_LENGTH 32
+#define CHIP_ID_LENGTH 32
 #define SMEM_IMAGE_VERSION_BLOCKS_COUNT 32
 #define SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE 128
 #define SMEM_IMAGE_VERSION_SIZE 4096
@@ -199,6 +200,20 @@
 struct socinfo_v0_13 {
 	struct socinfo_v0_12 v0_12;
 	uint32_t nproduct_id;
+	char chip_name[CHIP_ID_LENGTH];
+};
+
+struct socinfo_v0_14 {
+	struct socinfo_v0_13 v0_13;
+	uint32_t num_clusters;
+	uint32_t ncluster_array_offset;
+	uint32_t num_defective_parts;
+	uint32_t ndefective_parts_array_offset;
+};
+
+struct socinfo_v0_15 {
+	struct socinfo_v0_14 v0_14;
+	uint32_t nmodem_supported;
 };
 
 static union {
@@ -215,10 +230,12 @@
 	struct socinfo_v0_11 v0_11;
 	struct socinfo_v0_12 v0_12;
 	struct socinfo_v0_13 v0_13;
+	struct socinfo_v0_14 v0_14;
+	struct socinfo_v0_15 v0_15;
 } *socinfo;
 
 /* max socinfo format version supported */
-#define MAX_SOCINFO_FORMAT SOCINFO_VERSION(0, 13)
+#define MAX_SOCINFO_FORMAT SOCINFO_VERSION(0, 15)
 
 static struct msm_soc_info cpu_of_id[] = {
 
@@ -705,6 +722,14 @@
 		: 0;
 }
 
+static char *socinfo_get_chip_name(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 13) ?
+			socinfo->v0_13.chip_name : "N/A")
+		: "N/A";
+}
+
 static uint32_t socinfo_get_nproduct_id(void)
 {
 	return socinfo ?
@@ -713,6 +738,46 @@
 		: 0;
 }
 
+static uint32_t socinfo_get_num_clusters(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 14) ?
+			socinfo->v0_14.num_clusters : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_ncluster_array_offset(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 14) ?
+			socinfo->v0_14.ncluster_array_offset : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_num_defective_parts(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 14) ?
+			socinfo->v0_14.num_defective_parts : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_ndefective_parts_array_offset(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 14) ?
+			socinfo->v0_14.ndefective_parts_array_offset : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_nmodem_supported(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 15) ?
+			socinfo->v0_15.nmodem_supported : 0)
+		: 0;
+}
+
 enum pmic_model socinfo_get_pmic_model(void)
 {
 	return socinfo ?
@@ -890,6 +955,15 @@
 }
 
 static ssize_t
+msm_get_chip_name(struct device *dev,
+		   struct device_attribute *attr,
+		   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+			socinfo_get_chip_name());
+}
+
+static ssize_t
 msm_get_nproduct_id(struct device *dev,
 			struct device_attribute *attr,
 			char *buf)
@@ -899,6 +973,51 @@
 }
 
 static ssize_t
+msm_get_num_clusters(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_num_clusters());
+}
+
+static ssize_t
+msm_get_ncluster_array_offset(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_ncluster_array_offset());
+}
+
+static ssize_t
+msm_get_num_defective_parts(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_num_defective_parts());
+}
+
+static ssize_t
+msm_get_ndefective_parts_array_offset(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_ndefective_parts_array_offset());
+}
+
+static ssize_t
+msm_get_nmodem_supported(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_nmodem_supported());
+}
+
+static ssize_t
 msm_get_pmic_model(struct device *dev,
 			struct device_attribute *attr,
 			char *buf)
@@ -1146,10 +1265,34 @@
 	__ATTR(raw_device_number, S_IRUGO,
 			msm_get_raw_device_number, NULL);
 
+static struct device_attribute msm_soc_attr_chip_name =
+	__ATTR(chip_name, 0444,
+			msm_get_chip_name, NULL);
+
 static struct device_attribute msm_soc_attr_nproduct_id =
 	__ATTR(nproduct_id, 0444,
 			msm_get_nproduct_id, NULL);
 
+static struct device_attribute msm_soc_attr_num_clusters =
+	__ATTR(num_clusters, 0444,
+			msm_get_num_clusters, NULL);
+
+static struct device_attribute msm_soc_attr_ncluster_array_offset =
+	__ATTR(ncluster_array_offset, 0444,
+			msm_get_ncluster_array_offset, NULL);
+
+static struct device_attribute msm_soc_attr_num_defective_parts =
+	__ATTR(num_defective_parts, 0444,
+			msm_get_num_defective_parts, NULL);
+
+static struct device_attribute msm_soc_attr_ndefective_parts_array_offset =
+	__ATTR(ndefective_parts_array_offset, 0444,
+			msm_get_ndefective_parts_array_offset, NULL);
+
+static struct device_attribute msm_soc_attr_nmodem_supported =
+	__ATTR(nmodem_supported, 0444,
+			msm_get_nmodem_supported, NULL);
+
 static struct device_attribute msm_soc_attr_pmic_model =
 	__ATTR(pmic_model, S_IRUGO,
 			msm_get_pmic_model, NULL);
@@ -1280,9 +1423,23 @@
 	device_create_file(msm_soc_device, &images);
 
 	switch (socinfo_format) {
+	case SOCINFO_VERSION(0, 15):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_nmodem_supported);
+	case SOCINFO_VERSION(0, 14):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_num_clusters);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_ncluster_array_offset);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_num_defective_parts);
+		device_create_file(msm_soc_device,
+				&msm_soc_attr_ndefective_parts_array_offset);
 	case SOCINFO_VERSION(0, 13):
 		 device_create_file(msm_soc_device,
 					&msm_soc_attr_nproduct_id);
+		 device_create_file(msm_soc_device,
+					&msm_soc_attr_chip_name);
 	case SOCINFO_VERSION(0, 12):
 		device_create_file(msm_soc_device,
 					&msm_soc_attr_chip_family);
@@ -1522,6 +1679,53 @@
 			socinfo->v0_13.nproduct_id);
 		break;
 
+	case SOCINFO_VERSION(0, 14):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u serial_number=%u num_pmics=%u chip_family=0x%x raw_device_family=0x%x raw_device_number=0x%x nproduct_id=0x%x num_clusters=0x%x ncluster_array_offset=0x%x num_defective_parts=0x%x ndefective_parts_array_offset=0x%x\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id,
+			socinfo->v0_10.serial_number,
+			socinfo->v0_11.num_pmics,
+			socinfo->v0_12.chip_family,
+			socinfo->v0_12.raw_device_family,
+			socinfo->v0_12.raw_device_number,
+			socinfo->v0_13.nproduct_id,
+			socinfo->v0_14.num_clusters,
+			socinfo->v0_14.ncluster_array_offset,
+			socinfo->v0_14.num_defective_parts,
+			socinfo->v0_14.ndefective_parts_array_offset);
+		break;
+
+	case SOCINFO_VERSION(0, 15):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u serial_number=%u num_pmics=%u chip_family=0x%x raw_device_family=0x%x raw_device_number=0x%x nproduct_id=0x%x num_clusters=0x%x ncluster_array_offset=0x%x num_defective_parts=0x%x ndefective_parts_array_offset=0x%x nmodem_supported=0x%x\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id,
+			socinfo->v0_10.serial_number,
+			socinfo->v0_11.num_pmics,
+			socinfo->v0_12.chip_family,
+			socinfo->v0_12.raw_device_family,
+			socinfo->v0_12.raw_device_number,
+			socinfo->v0_13.nproduct_id,
+			socinfo->v0_14.num_clusters,
+			socinfo->v0_14.ncluster_array_offset,
+			socinfo->v0_14.num_defective_parts,
+			socinfo->v0_14.ndefective_parts_array_offset,
+			socinfo->v0_15.nmodem_supported);
+		break;
+
 	default:
 		pr_err("Unknown format found: v%u.%u\n", f_maj, f_min);
 		break;
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index 342160e..04320d8 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -1973,8 +1973,6 @@
 			chip->sensor[sen_idx].thermal_node = true;
 			snprintf(name, sizeof(name), "%s",
 				chip->adc->adc_channels[sen_idx].name);
-			chip->sensor[sen_idx].meas_interval =
-				QPNP_ADC_TM_MEAS_INTERVAL;
 			chip->sensor[sen_idx].low_thr =
 						QPNP_ADC_TM_M0_LOW_THR;
 			chip->sensor[sen_idx].high_thr =
@@ -2027,7 +2025,7 @@
 
 	rc = devm_request_irq(&pdev->dev, chip->adc->adc_irq_eoc,
 			qpnp_adc_tm_rc_thr_isr,
-		IRQF_TRIGGER_RISING, "qpnp_adc_tm_interrupt", chip);
+		IRQF_TRIGGER_HIGH, "qpnp_adc_tm_interrupt", chip);
 	if (rc)
 		dev_err(&pdev->dev, "failed to request adc irq\n");
 	else
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 6deff2e..2e12c3f 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -1020,6 +1020,7 @@
 	disable_irq(uport->irq);
 	free_irq(uport->irq, msm_port);
 	if (uart_console(uport)) {
+		console_stop(uport->cons);
 		se_geni_resources_off(&msm_port->serial_rsc);
 	} else {
 		if (msm_port->wakeup_irq > 0) {
@@ -1845,7 +1846,6 @@
 	if (uart_console(uport)) {
 		uart_suspend_port((struct uart_driver *)uport->private_data,
 					uport);
-		se_geni_resources_off(&port->serial_rsc);
 	} else {
 		if (!pm_runtime_status_suspended(dev)) {
 			dev_info(dev, "%s: Is still active\n", __func__);
diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm845.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
index c43a9f8..323beaf 100644
--- a/include/dt-bindings/clock/qcom,gpucc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
@@ -40,13 +40,10 @@
 #define GPU_CC_PLL1_OUT_ODD					22
 #define GPU_CC_PLL1_OUT_TEST					23
 #define GPU_CC_PLL_TEST_CLK					24
-#define GPU_CC_RBCPR_AHB_CLK					25
-#define GPU_CC_RBCPR_CLK					26
-#define GPU_CC_RBCPR_CLK_SRC					27
-#define GPU_CC_SLEEP_CLK					28
-#define GPU_CC_GMU_CLK_SRC					29
-#define GPU_CC_CX_GFX3D_CLK					30
-#define GPU_CC_CX_GFX3D_SLV_CLK					31
+#define GPU_CC_SLEEP_CLK					25
+#define GPU_CC_GMU_CLK_SRC					26
+#define GPU_CC_CX_GFX3D_CLK					27
+#define GPU_CC_CX_GFX3D_SLV_CLK					28
 
 /* GPUCC reset clock registers */
 #define GPUCC_GPU_CC_ACD_BCR					0
@@ -54,9 +51,8 @@
 #define GPUCC_GPU_CC_GFX3D_AON_BCR				2
 #define GPUCC_GPU_CC_GMU_BCR					3
 #define GPUCC_GPU_CC_GX_BCR					4
-#define GPUCC_GPU_CC_RBCPR_BCR					5
-#define GPUCC_GPU_CC_SPDM_BCR					6
-#define GPUCC_GPU_CC_XO_BCR					7
+#define GPUCC_GPU_CC_SPDM_BCR					5
+#define GPUCC_GPU_CC_XO_BCR					6
 
 /* GFX3D clock registers */
 #define GPU_CC_PLL0						0
diff --git a/include/trace/events/pdc.h b/include/trace/events/pdc.h
new file mode 100644
index 0000000..400e959
--- /dev/null
+++ b/include/trace/events/pdc.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pdc
+
+#if !defined(_TRACE_PDC_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PDC_H_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(irq_pin_config,
+
+	TP_PROTO(char *func, u32 pin, u32 hwirq, u32 type, u32 enable),
+
+	TP_ARGS(func, pin, hwirq, type, enable),
+
+	TP_STRUCT__entry(
+		__field(char *, func)
+		__field(u32, pin)
+		__field(u32, hwirq)
+		__field(u32, type)
+		__field(u32, enable)
+	),
+
+	TP_fast_assign(
+		__entry->pin = pin;
+		__entry->func = func;
+		__entry->hwirq = hwirq;
+		__entry->type = type;
+		__entry->enable = enable;
+	),
+
+	TP_printk("%s hwirq:%u pin:%u type:%u enable:%u",
+		__entry->func, __entry->pin, __entry->hwirq, __entry->type,
+		__entry->enable)
+);
+
+#endif
+#define TRACE_INCLUDE_FILE pdc
+#include <trace/define_trace.h>
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index a70b90d..c61c56f 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -26,6 +26,7 @@
 #include <linux/interrupt.h>
 #include <linux/debug_locks.h>
 #include <linux/osq_lock.h>
+#include <linux/delay.h>
 
 /*
  * In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -378,6 +379,17 @@
 		 * values at the cost of a few extra spins.
 		 */
 		cpu_relax_lowlatency();
+
+		/*
+		 * On arm systems, we must slow down the waiter's repeated
+		 * aquisition of spin_mlock and atomics on the lock count, or
+		 * we risk starving out a thread attempting to release the
+		 * mutex. The mutex slowpath release must take spin lock
+		 * wait_lock. This spin lock can share a monitor with the
+		 * other waiter atomics in the mutex data structure, so must
+		 * take care to rate limit the waiters.
+		 */
+		udelay(1);
 	}
 
 	osq_unlock(&lock->osq);
diff --git a/sound/soc/codecs/wcd9330.c b/sound/soc/codecs/wcd9330.c
index 0b07393..4278e36 100644
--- a/sound/soc/codecs/wcd9330.c
+++ b/sound/soc/codecs/wcd9330.c
@@ -1536,6 +1536,13 @@
 	tomtom_mad_input = ucontrol->value.integer.value[0];
 	micb_4_int_reg = tomtom->resmgr.reg_addr->micb_4_int_rbias;
 
+	if (tomtom_mad_input >= ARRAY_SIZE(tomtom_conn_mad_text)) {
+		dev_err(codec->dev,
+			"%s: tomtom_mad_input = %d out of bounds\n",
+			__func__, tomtom_mad_input);
+		return -EINVAL;
+	}
+
 	pr_debug("%s: tomtom_mad_input = %s\n", __func__,
 			tomtom_conn_mad_text[tomtom_mad_input]);
 
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index d4db55f..36382ba 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -1,5 +1,5 @@
-snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o msm-pcm-routing-v2.o \
-			msm-compress-q6-v2.o msm-compr-q6-v2.o \
+snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o \
+			msm-pcm-routing-v2.o msm-compress-q6-v2.o \
 			msm-pcm-afe-v2.o msm-pcm-voip-v2.o \
 			msm-pcm-voice-v2.o msm-dai-q6-hdmi-v2.o \
 			msm-lsm-client.o msm-pcm-host-voice-v2.o \
diff --git a/sound/soc/msm/qdsp6v2/audio_cal_utils.c b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
index 5d4a0ba..820aa1b 100644
--- a/sound/soc/msm/qdsp6v2/audio_cal_utils.c
+++ b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
@@ -646,7 +646,9 @@
 	return cal_block;
 err:
 	kfree(cal_block->cal_info);
+	cal_block->cal_info = NULL;
 	kfree(cal_block->client_info);
+	cal_block->client_info = NULL;
 	kfree(cal_block);
 	cal_block = NULL;
 	return cal_block;
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
deleted file mode 100644
index 449325c..0000000
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ /dev/null
@@ -1,1714 +0,0 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/time.h>
-#include <linux/wait.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <sound/core.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/pcm.h>
-#include <sound/initval.h>
-#include <sound/control.h>
-#include <sound/q6asm-v2.h>
-#include <sound/pcm_params.h>
-#include <asm/dma.h>
-#include <linux/dma-mapping.h>
-#include <linux/msm_audio_ion.h>
-
-#include <sound/timer.h>
-
-#include "msm-compr-q6-v2.h"
-#include "msm-pcm-routing-v2.h"
-#include <sound/tlv.h>
-
-#define COMPRE_CAPTURE_NUM_PERIODS	16
-/* Allocate the worst case frame size for compressed audio */
-#define COMPRE_CAPTURE_HEADER_SIZE	(sizeof(struct snd_compr_audio_info))
-/* Changing period size to 4032. 4032 will make sure COMPRE_CAPTURE_PERIOD_SIZE
- * is 4096 with meta data size of 64 and MAX_NUM_FRAMES_PER_BUFFER 1
- */
-#define COMPRE_CAPTURE_MAX_FRAME_SIZE	(4032)
-#define COMPRE_CAPTURE_PERIOD_SIZE	((COMPRE_CAPTURE_MAX_FRAME_SIZE + \
-					  COMPRE_CAPTURE_HEADER_SIZE) * \
-					  MAX_NUM_FRAMES_PER_BUFFER)
-#define COMPRE_OUTPUT_METADATA_SIZE	(sizeof(struct output_meta_data_st))
-#define COMPRESSED_LR_VOL_MAX_STEPS	0x20002000
-
-#define MAX_AC3_PARAM_SIZE		(18*2*sizeof(int))
-#define AMR_WB_BAND_MODE 8
-#define AMR_WB_DTX_MODE 0
-
-
-const DECLARE_TLV_DB_LINEAR(compr_rx_vol_gain, 0,
-			    COMPRESSED_LR_VOL_MAX_STEPS);
-
-static struct audio_locks the_locks;
-
-static struct snd_pcm_hardware msm_compr_hardware_capture = {
-	.info =		 (SNDRV_PCM_INFO_MMAP |
-				SNDRV_PCM_INFO_BLOCK_TRANSFER |
-				SNDRV_PCM_INFO_MMAP_VALID |
-				SNDRV_PCM_INFO_INTERLEAVED |
-				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
-	.formats =	      SNDRV_PCM_FMTBIT_S16_LE,
-	.rates =		SNDRV_PCM_RATE_8000_48000,
-	.rate_min =	     8000,
-	.rate_max =	     48000,
-	.channels_min =	 1,
-	.channels_max =	 8,
-	.buffer_bytes_max =
-		COMPRE_CAPTURE_PERIOD_SIZE * COMPRE_CAPTURE_NUM_PERIODS,
-	.period_bytes_min =	COMPRE_CAPTURE_PERIOD_SIZE,
-	.period_bytes_max = COMPRE_CAPTURE_PERIOD_SIZE,
-	.periods_min =	  COMPRE_CAPTURE_NUM_PERIODS,
-	.periods_max =	  COMPRE_CAPTURE_NUM_PERIODS,
-	.fifo_size =	    0,
-};
-
-static struct snd_pcm_hardware msm_compr_hardware_playback = {
-	.info =		 (SNDRV_PCM_INFO_MMAP |
-				SNDRV_PCM_INFO_BLOCK_TRANSFER |
-				SNDRV_PCM_INFO_MMAP_VALID |
-				SNDRV_PCM_INFO_INTERLEAVED |
-				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
-	.formats =	      SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
-	.rates =		SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT,
-	.rate_min =	     8000,
-	.rate_max =	     48000,
-	.channels_min =	 1,
-	.channels_max =	 8,
-	.buffer_bytes_max =     1024 * 1024,
-	.period_bytes_min =	128 * 1024,
-	.period_bytes_max =     256 * 1024,
-	.periods_min =	  4,
-	.periods_max =	  8,
-	.fifo_size =	    0,
-};
-
-/* Conventional and unconventional sample rate supported */
-static unsigned int supported_sample_rates[] = {
-	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
-};
-
-/* Add supported codecs for compress capture path */
-static uint32_t supported_compr_capture_codecs[] = {
-	SND_AUDIOCODEC_AMRWB
-};
-
-static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
-	.count = ARRAY_SIZE(supported_sample_rates),
-	.list = supported_sample_rates,
-	.mask = 0,
-};
-
-static bool msm_compr_capture_codecs(uint32_t req_codec)
-{
-	int i;
-
-	pr_debug("%s req_codec:%d\n", __func__, req_codec);
-	if (req_codec == 0)
-		return false;
-	for (i = 0; i < ARRAY_SIZE(supported_compr_capture_codecs); i++) {
-		if (req_codec == supported_compr_capture_codecs[i])
-			return true;
-	}
-	return false;
-}
-
-static void compr_event_handler(uint32_t opcode,
-		uint32_t token, uint32_t *payload, void *priv)
-{
-	struct compr_audio *compr = priv;
-	struct msm_audio *prtd = &compr->prtd;
-	struct snd_pcm_substream *substream = prtd->substream;
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct audio_aio_write_param param;
-	struct audio_aio_read_param read_param;
-	struct audio_buffer *buf = NULL;
-	phys_addr_t temp;
-	struct output_meta_data_st output_meta_data;
-	uint32_t *ptrmem = (uint32_t *)payload;
-	int i = 0;
-	int time_stamp_flag = 0;
-	int buffer_length = 0;
-	int stop_playback = 0;
-
-	pr_debug("%s opcode =%08x\n", __func__, opcode);
-	switch (opcode) {
-	case ASM_DATA_EVENT_WRITE_DONE_V2: {
-		uint32_t *ptrmem = (uint32_t *)&param;
-
-		pr_debug("ASM_DATA_EVENT_WRITE_DONE\n");
-		pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem);
-		prtd->pcm_irq_pos += prtd->pcm_count;
-		if (atomic_read(&prtd->start))
-			snd_pcm_period_elapsed(substream);
-		else
-			if (substream->timer_running)
-				snd_timer_interrupt(substream->timer, 1);
-		atomic_inc(&prtd->out_count);
-		wake_up(&the_locks.write_wait);
-		if (!atomic_read(&prtd->start)) {
-			atomic_set(&prtd->pending_buffer, 1);
-			break;
-		}
-		atomic_set(&prtd->pending_buffer, 0);
-
-		/*
-		 * check for underrun
-		 */
-		snd_pcm_stream_lock_irq(substream);
-		if (runtime->status->hw_ptr >= runtime->control->appl_ptr) {
-			runtime->render_flag |= SNDRV_RENDER_STOPPED;
-			stop_playback = 1;
-		}
-		snd_pcm_stream_unlock_irq(substream);
-
-		if (stop_playback) {
-			pr_err("underrun! render stopped\n");
-			break;
-		}
-
-		buf = prtd->audio_client->port[IN].buf;
-		pr_debug("%s:writing %d bytes of buffer[%d] to dsp 2\n",
-				__func__, prtd->pcm_count, prtd->out_head);
-		temp = buf[0].phys + (prtd->out_head * prtd->pcm_count);
-		pr_debug("%s:writing buffer[%d] from 0x%pK\n",
-			__func__, prtd->out_head, &temp);
-
-		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
-			time_stamp_flag = SET_TIMESTAMP;
-		else
-			time_stamp_flag = NO_TIMESTAMP;
-		memcpy(&output_meta_data, (char *)(buf->data +
-			prtd->out_head * prtd->pcm_count),
-			COMPRE_OUTPUT_METADATA_SIZE);
-
-		buffer_length = output_meta_data.frame_size;
-		pr_debug("meta_data_length: %d, frame_length: %d\n",
-			 output_meta_data.meta_data_length,
-			 output_meta_data.frame_size);
-		pr_debug("timestamp_msw: %d, timestamp_lsw: %d\n",
-			 output_meta_data.timestamp_msw,
-			 output_meta_data.timestamp_lsw);
-		if (buffer_length == 0) {
-			pr_debug("Received a zero length buffer-break out");
-			break;
-		}
-		param.paddr = temp + output_meta_data.meta_data_length;
-		param.len = buffer_length;
-		param.msw_ts = output_meta_data.timestamp_msw;
-		param.lsw_ts = output_meta_data.timestamp_lsw;
-		param.flags = time_stamp_flag;
-		param.uid = prtd->session_id;
-		for (i = 0; i < sizeof(struct audio_aio_write_param)/4;
-					i++, ++ptrmem)
-			pr_debug("cmd[%d]=0x%08x\n", i, *ptrmem);
-		if (q6asm_async_write(prtd->audio_client,
-					&param) < 0)
-			pr_err("%s:q6asm_async_write failed\n",
-				__func__);
-		else
-			prtd->out_head =
-				(prtd->out_head + 1) & (runtime->periods - 1);
-		break;
-	}
-	case ASM_DATA_EVENT_RENDERED_EOS:
-		pr_debug("ASM_DATA_CMDRSP_EOS\n");
-		if (atomic_read(&prtd->eos)) {
-			pr_debug("ASM_DATA_CMDRSP_EOS wake up\n");
-			prtd->cmd_ack = 1;
-			wake_up(&the_locks.eos_wait);
-			atomic_set(&prtd->eos, 0);
-		}
-		break;
-	case ASM_DATA_EVENT_READ_DONE_V2: {
-		pr_debug("ASM_DATA_EVENT_READ_DONE\n");
-		pr_debug("buf = %pK, data = 0x%X, *data = %pK,\n"
-			 "prtd->pcm_irq_pos = %d\n",
-				prtd->audio_client->port[OUT].buf,
-			 *(uint32_t *)prtd->audio_client->port[OUT].buf->data,
-				prtd->audio_client->port[OUT].buf->data,
-				prtd->pcm_irq_pos);
-
-		memcpy(prtd->audio_client->port[OUT].buf->data +
-			   prtd->pcm_irq_pos, (ptrmem + READDONE_IDX_SIZE),
-			   COMPRE_CAPTURE_HEADER_SIZE);
-		pr_debug("buf = %pK, updated data = 0x%X, *data = %pK\n",
-				prtd->audio_client->port[OUT].buf,
-			*(uint32_t *)(prtd->audio_client->port[OUT].buf->data +
-				prtd->pcm_irq_pos),
-				prtd->audio_client->port[OUT].buf->data);
-		if (!atomic_read(&prtd->start))
-			break;
-		pr_debug("frame size=%d, buffer = 0x%X\n",
-				ptrmem[READDONE_IDX_SIZE],
-				ptrmem[READDONE_IDX_BUFADD_LSW]);
-		if (ptrmem[READDONE_IDX_SIZE] > COMPRE_CAPTURE_MAX_FRAME_SIZE) {
-			pr_err("Frame length exceeded the max length");
-			break;
-		}
-		buf = prtd->audio_client->port[OUT].buf;
-
-		pr_debug("pcm_irq_pos=%d, buf[0].phys = 0x%pK\n",
-				prtd->pcm_irq_pos, &buf[0].phys);
-		read_param.len = prtd->pcm_count - COMPRE_CAPTURE_HEADER_SIZE;
-		read_param.paddr = buf[0].phys +
-			prtd->pcm_irq_pos + COMPRE_CAPTURE_HEADER_SIZE;
-		prtd->pcm_irq_pos += prtd->pcm_count;
-
-		if (atomic_read(&prtd->start))
-			snd_pcm_period_elapsed(substream);
-
-		q6asm_async_read(prtd->audio_client, &read_param);
-		break;
-	}
-	case APR_BASIC_RSP_RESULT: {
-		switch (payload[0]) {
-		case ASM_SESSION_CMD_RUN_V2: {
-			if (substream->stream
-				!= SNDRV_PCM_STREAM_PLAYBACK) {
-				atomic_set(&prtd->start, 1);
-				break;
-			}
-			if (!atomic_read(&prtd->pending_buffer))
-				break;
-			pr_debug("%s: writing %d bytes of buffer[%d] to dsp\n",
-				__func__, prtd->pcm_count, prtd->out_head);
-			buf = prtd->audio_client->port[IN].buf;
-			pr_debug("%s: writing buffer[%d] from 0x%pK head %d count %d\n",
-				__func__, prtd->out_head, &buf[0].phys,
-				prtd->pcm_count, prtd->out_head);
-			if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
-				time_stamp_flag = SET_TIMESTAMP;
-			else
-				time_stamp_flag = NO_TIMESTAMP;
-			memcpy(&output_meta_data, (char *)(buf->data +
-				prtd->out_head * prtd->pcm_count),
-				COMPRE_OUTPUT_METADATA_SIZE);
-			buffer_length = output_meta_data.frame_size;
-			pr_debug("meta_data_length: %d, frame_length: %d\n",
-				 output_meta_data.meta_data_length,
-				 output_meta_data.frame_size);
-			pr_debug("timestamp_msw: %d, timestamp_lsw: %d\n",
-				 output_meta_data.timestamp_msw,
-				 output_meta_data.timestamp_lsw);
-			param.paddr = buf[prtd->out_head].phys
-					+ output_meta_data.meta_data_length;
-			param.len = buffer_length;
-			param.msw_ts = output_meta_data.timestamp_msw;
-			param.lsw_ts = output_meta_data.timestamp_lsw;
-			param.flags = time_stamp_flag;
-			param.uid = prtd->session_id;
-			param.metadata_len = COMPRE_OUTPUT_METADATA_SIZE;
-			if (q6asm_async_write(prtd->audio_client,
-						&param) < 0)
-				pr_err("%s:q6asm_async_write failed\n",
-					__func__);
-			else
-				prtd->out_head =
-					(prtd->out_head + 1)
-					& (runtime->periods - 1);
-			atomic_set(&prtd->pending_buffer, 0);
-		}
-			break;
-		case ASM_STREAM_CMD_FLUSH:
-			pr_debug("ASM_STREAM_CMD_FLUSH\n");
-			prtd->cmd_ack = 1;
-			wake_up(&the_locks.flush_wait);
-			break;
-		default:
-			break;
-		}
-		break;
-	}
-	default:
-		pr_debug("Not Supported Event opcode[0x%x]\n", opcode);
-		break;
-	}
-}
-
-static int msm_compr_send_ddp_cfg(struct audio_client *ac,
-					struct snd_dec_ddp *ddp)
-{
-	int i, rc;
-
-	pr_debug("%s\n", __func__);
-
-	if (ddp->params_length / 2 > SND_DEC_DDP_MAX_PARAMS) {
-		pr_err("%s: Invalid number of params %u, max allowed %u\n",
-			__func__, ddp->params_length / 2,
-			SND_DEC_DDP_MAX_PARAMS);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < ddp->params_length/2; i++) {
-		rc = q6asm_ds1_set_endp_params(ac, ddp->params_id[i],
-						ddp->params_value[i]);
-		if (rc) {
-			pr_err("sending params_id: %d failed\n",
-				ddp->params_id[i]);
-			return rc;
-		}
-	}
-	return 0;
-}
-
-static int msm_compr_playback_prepare(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	struct snd_pcm_hw_params *params;
-	struct asm_aac_cfg aac_cfg;
-	uint16_t bits_per_sample = 16;
-	int ret;
-
-	struct asm_softpause_params softpause = {
-		.enable = SOFT_PAUSE_ENABLE,
-		.period = SOFT_PAUSE_PERIOD,
-		.step = SOFT_PAUSE_STEP,
-		.rampingcurve = SOFT_PAUSE_CURVE_LINEAR,
-	};
-	struct asm_softvolume_params softvol = {
-		.period = SOFT_VOLUME_PERIOD,
-		.step = SOFT_VOLUME_STEP,
-		.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
-	};
-
-	pr_debug("%s\n", __func__);
-
-	params = &soc_prtd->dpcm[substream->stream].hw_params;
-	if (runtime->format == SNDRV_PCM_FORMAT_S24_LE)
-		bits_per_sample = 24;
-
-	ret = q6asm_open_write_v2(prtd->audio_client,
-			compr->codec, bits_per_sample);
-	if (ret < 0) {
-		pr_err("%s: Session out open failed\n",
-				__func__);
-		return -ENOMEM;
-	}
-	msm_pcm_routing_reg_phy_stream(
-			soc_prtd->dai_link->id,
-			prtd->audio_client->perf_mode,
-			prtd->session_id,
-			substream->stream);
-	/*
-	 * the number of channels are required to call volume api
-	 * accoridngly. So, get channels from hw params
-	 */
-	if ((params_channels(params) > 0) &&
-			(params_periods(params) <= runtime->hw.channels_max))
-		prtd->channel_mode = params_channels(params);
-
-	ret = q6asm_set_softpause(prtd->audio_client, &softpause);
-	if (ret < 0)
-		pr_err("%s: Send SoftPause Param failed ret=%d\n",
-				__func__, ret);
-	ret = q6asm_set_softvolume(prtd->audio_client, &softvol);
-	if (ret < 0)
-		pr_err("%s: Send SoftVolume Param failed ret=%d\n",
-				__func__, ret);
-
-	ret = q6asm_set_io_mode(prtd->audio_client,
-			(COMPRESSED_IO | ASYNC_IO_MODE));
-	if (ret < 0) {
-		pr_err("%s: Set IO mode failed\n", __func__);
-		return -ENOMEM;
-	}
-
-	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
-	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
-	prtd->pcm_irq_pos = 0;
-	/* rate and channels are sent to audio driver */
-	prtd->samp_rate = runtime->rate;
-	prtd->channel_mode = runtime->channels;
-	prtd->out_head = 0;
-	atomic_set(&prtd->out_count, runtime->periods);
-
-	if (prtd->enabled)
-		return 0;
-
-	switch (compr->info.codec_param.codec.id) {
-	case SND_AUDIOCODEC_MP3:
-		/* No media format block for mp3 */
-		break;
-	case SND_AUDIOCODEC_AAC:
-		pr_debug("%s: SND_AUDIOCODEC_AAC\n", __func__);
-		memset(&aac_cfg, 0x0, sizeof(struct asm_aac_cfg));
-		aac_cfg.aot = AAC_ENC_MODE_EAAC_P;
-		aac_cfg.format = 0x03;
-		aac_cfg.ch_cfg = runtime->channels;
-		aac_cfg.sample_rate =  runtime->rate;
-		ret = q6asm_media_format_block_aac(prtd->audio_client,
-					&aac_cfg);
-		if (ret < 0)
-			pr_err("%s: CMD Format block failed\n", __func__);
-		break;
-	case SND_AUDIOCODEC_AC3: {
-		struct snd_dec_ddp *ddp =
-				&compr->info.codec_param.codec.options.ddp;
-		pr_debug("%s: SND_AUDIOCODEC_AC3\n", __func__);
-		ret = msm_compr_send_ddp_cfg(prtd->audio_client, ddp);
-		if (ret < 0)
-			pr_err("%s: DDP CMD CFG failed\n", __func__);
-		break;
-	}
-	case SND_AUDIOCODEC_EAC3: {
-		struct snd_dec_ddp *ddp =
-				&compr->info.codec_param.codec.options.ddp;
-		pr_debug("%s: SND_AUDIOCODEC_EAC3\n", __func__);
-		ret = msm_compr_send_ddp_cfg(prtd->audio_client, ddp);
-		if (ret < 0)
-			pr_err("%s: DDP CMD CFG failed\n", __func__);
-		break;
-	}
-	default:
-		return -EINVAL;
-	}
-
-	prtd->enabled = 1;
-	prtd->cmd_ack = 0;
-	prtd->cmd_interrupt = 0;
-
-	return 0;
-}
-
-static int msm_compr_capture_prepare(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	struct audio_buffer *buf = prtd->audio_client->port[OUT].buf;
-	struct snd_codec *codec = &compr->info.codec_param.codec;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct audio_aio_read_param read_param;
-	uint16_t bits_per_sample = 16;
-	int ret = 0;
-	int i;
-
-	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
-	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
-	prtd->pcm_irq_pos = 0;
-
-	if (runtime->format == SNDRV_PCM_FORMAT_S24_LE)
-		bits_per_sample = 24;
-
-	if (!msm_compr_capture_codecs(
-				compr->info.codec_param.codec.id)) {
-		/*
-		 * request codec invalid or not supported,
-		 * use default compress format
-		 */
-		compr->info.codec_param.codec.id =
-			SND_AUDIOCODEC_AMRWB;
-	}
-	switch (compr->info.codec_param.codec.id) {
-	case SND_AUDIOCODEC_AMRWB:
-		pr_debug("q6asm_open_read(FORMAT_AMRWB)\n");
-		ret = q6asm_open_read(prtd->audio_client,
-				FORMAT_AMRWB);
-		if (ret < 0) {
-			pr_err("%s: compressed Session out open failed\n",
-					__func__);
-			return -ENOMEM;
-		}
-		pr_debug("msm_pcm_routing_reg_phy_stream\n");
-		msm_pcm_routing_reg_phy_stream(
-				soc_prtd->dai_link->id,
-				prtd->audio_client->perf_mode,
-				prtd->session_id, substream->stream);
-		break;
-	default:
-		pr_debug("q6asm_open_read_compressed(COMPRESSED_META_DATA_MODE)\n");
-		/*
-		 * ret = q6asm_open_read_compressed(prtd->audio_client,
-		 * MAX_NUM_FRAMES_PER_BUFFER,
-		 * COMPRESSED_META_DATA_MODE);
-		 */
-			ret = -EINVAL;
-			break;
-	}
-
-	if (ret < 0) {
-		pr_err("%s: compressed Session out open failed\n",
-				__func__);
-		return -ENOMEM;
-	}
-
-	ret = q6asm_set_io_mode(prtd->audio_client,
-		(COMPRESSED_IO | ASYNC_IO_MODE));
-		if (ret < 0) {
-			pr_err("%s: Set IO mode failed\n", __func__);
-				return -ENOMEM;
-		}
-
-	if (!msm_compr_capture_codecs(codec->id)) {
-		/*
-		 * request codec invalid or not supported,
-		 * use default compress format
-		 */
-		codec->id = SND_AUDIOCODEC_AMRWB;
-	}
-	/* rate and channels are sent to audio driver */
-	prtd->samp_rate = runtime->rate;
-	prtd->channel_mode = runtime->channels;
-
-	if (prtd->enabled)
-		return ret;
-	read_param.len = prtd->pcm_count;
-
-	switch (codec->id) {
-	case SND_AUDIOCODEC_AMRWB:
-		pr_debug("SND_AUDIOCODEC_AMRWB\n");
-		ret = q6asm_enc_cfg_blk_amrwb(prtd->audio_client,
-			MAX_NUM_FRAMES_PER_BUFFER,
-			/*
-			 * use fixed band mode and dtx mode
-			 * band mode - 23.85 kbps
-			 */
-			AMR_WB_BAND_MODE,
-			/* dtx mode - disable */
-			AMR_WB_DTX_MODE);
-		if (ret < 0)
-			pr_err("%s: CMD Format block failed: %d\n",
-				__func__, ret);
-		break;
-	default:
-		pr_debug("No config for codec %d\n", codec->id);
-	}
-	pr_debug("%s: Samp_rate = %d, Channel = %d, pcm_size = %d,\n"
-			 "pcm_count = %d, periods = %d\n",
-			 __func__, prtd->samp_rate, prtd->channel_mode,
-			 prtd->pcm_size, prtd->pcm_count, runtime->periods);
-
-	for (i = 0; i < runtime->periods; i++) {
-		read_param.uid = i;
-		switch (codec->id) {
-		case SND_AUDIOCODEC_AMRWB:
-			read_param.len = prtd->pcm_count
-					- COMPRE_CAPTURE_HEADER_SIZE;
-			read_param.paddr = buf[i].phys
-					+ COMPRE_CAPTURE_HEADER_SIZE;
-			pr_debug("Push buffer [%d] to DSP, paddr: %pK, vaddr: %pK\n",
-					i, &read_param.paddr,
-					buf[i].data);
-			q6asm_async_read(prtd->audio_client, &read_param);
-			break;
-		default:
-			read_param.paddr = buf[i].phys;
-			/* q6asm_async_read_compressed(prtd->audio_client,
-			 * &read_param);
-			 */
-			pr_debug("%s: To add support for read compressed\n",
-								__func__);
-			ret = -EINVAL;
-			break;
-		}
-	}
-	prtd->periods = runtime->periods;
-
-	prtd->enabled = 1;
-
-	return ret;
-}
-
-static int msm_compr_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	int ret = 0;
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-
-	pr_debug("%s\n", __func__);
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-		prtd->pcm_irq_pos = 0;
-
-		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
-			if (!msm_compr_capture_codecs(
-				compr->info.codec_param.codec.id)) {
-				/*
-				 * request codec invalid or not supported,
-				 * use default compress format
-				 */
-				compr->info.codec_param.codec.id =
-				SND_AUDIOCODEC_AMRWB;
-			}
-			switch (compr->info.codec_param.codec.id) {
-			case SND_AUDIOCODEC_AMRWB:
-				break;
-			default:
-				msm_pcm_routing_reg_psthr_stream(
-					soc_prtd->dai_link->id,
-					prtd->session_id, substream->stream);
-				break;
-			}
-		}
-		atomic_set(&prtd->pending_buffer, 1);
-		/* fallthrough */
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		pr_debug("%s: Trigger start\n", __func__);
-		q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
-		atomic_set(&prtd->start, 1);
-		break;
-	case SNDRV_PCM_TRIGGER_STOP:
-		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
-		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
-			switch (compr->info.codec_param.codec.id) {
-			case SND_AUDIOCODEC_AMRWB:
-				break;
-			default:
-				msm_pcm_routing_reg_psthr_stream(
-					soc_prtd->dai_link->id,
-					prtd->session_id, substream->stream);
-				break;
-			}
-		}
-		atomic_set(&prtd->start, 0);
-		runtime->render_flag &= ~SNDRV_RENDER_STOPPED;
-		break;
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n");
-		q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
-		atomic_set(&prtd->start, 0);
-		runtime->render_flag &= ~SNDRV_RENDER_STOPPED;
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	return ret;
-}
-
-static void populate_codec_list(struct compr_audio *compr,
-		struct snd_pcm_runtime *runtime)
-{
-	pr_debug("%s\n", __func__);
-	/* MP3 Block */
-	compr->info.compr_cap.num_codecs = 5;
-	compr->info.compr_cap.min_fragment_size = runtime->hw.period_bytes_min;
-	compr->info.compr_cap.max_fragment_size = runtime->hw.period_bytes_max;
-	compr->info.compr_cap.min_fragments = runtime->hw.periods_min;
-	compr->info.compr_cap.max_fragments = runtime->hw.periods_max;
-	compr->info.compr_cap.codecs[0] = SND_AUDIOCODEC_MP3;
-	compr->info.compr_cap.codecs[1] = SND_AUDIOCODEC_AAC;
-	compr->info.compr_cap.codecs[2] = SND_AUDIOCODEC_AC3;
-	compr->info.compr_cap.codecs[3] = SND_AUDIOCODEC_EAC3;
-	compr->info.compr_cap.codecs[4] = SND_AUDIOCODEC_AMRWB;
-	/* Add new codecs here */
-}
-
-static int msm_compr_open(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr;
-	struct msm_audio *prtd;
-	int ret = 0;
-
-	pr_debug("%s\n", __func__);
-	compr = kzalloc(sizeof(struct compr_audio), GFP_KERNEL);
-	if (compr == NULL) {
-		pr_err("Failed to allocate memory for msm_audio\n");
-		return -ENOMEM;
-	}
-	prtd = &compr->prtd;
-	prtd->substream = substream;
-	runtime->render_flag = SNDRV_DMA_MODE;
-	prtd->audio_client = q6asm_audio_client_alloc(
-				(app_cb)compr_event_handler, compr);
-	if (!prtd->audio_client) {
-		pr_info("%s: Could not allocate memory\n", __func__);
-		kfree(prtd);
-		return -ENOMEM;
-	}
-
-	prtd->audio_client->perf_mode = false;
-	pr_info("%s: session ID %d\n", __func__, prtd->audio_client->session);
-
-	prtd->session_id = prtd->audio_client->session;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		runtime->hw = msm_compr_hardware_playback;
-		prtd->cmd_ack = 1;
-	} else {
-		runtime->hw = msm_compr_hardware_capture;
-	}
-
-
-	ret = snd_pcm_hw_constraint_list(runtime, 0,
-			SNDRV_PCM_HW_PARAM_RATE,
-			&constraints_sample_rates);
-	if (ret < 0)
-		pr_info("snd_pcm_hw_constraint_list failed\n");
-	/* Ensure that buffer size is a multiple of period size */
-	ret = snd_pcm_hw_constraint_integer(runtime,
-			    SNDRV_PCM_HW_PARAM_PERIODS);
-	if (ret < 0)
-		pr_info("snd_pcm_hw_constraint_integer failed\n");
-
-	prtd->dsp_cnt = 0;
-	atomic_set(&prtd->pending_buffer, 1);
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		compr->codec = FORMAT_MP3;
-	populate_codec_list(compr, runtime);
-	runtime->private_data = compr;
-	atomic_set(&prtd->eos, 0);
-	return 0;
-}
-
-static int compressed_set_volume(struct msm_audio *prtd, uint32_t volume)
-{
-	int rc = 0;
-	int avg_vol = 0;
-	int lgain = (volume >> 16) & 0xFFFF;
-	int rgain = volume & 0xFFFF;
-
-	if (prtd && prtd->audio_client) {
-		pr_debug("%s: channels %d volume 0x%x\n", __func__,
-			prtd->channel_mode, volume);
-		if ((prtd->channel_mode == 2) &&
-			(lgain != rgain)) {
-			pr_debug("%s: call q6asm_set_lrgain\n", __func__);
-			rc = q6asm_set_lrgain(prtd->audio_client, lgain, rgain);
-		} else {
-			avg_vol = (lgain + rgain)/2;
-			pr_debug("%s: call q6asm_set_volume\n", __func__);
-			rc = q6asm_set_volume(prtd->audio_client, avg_vol);
-		}
-		if (rc < 0) {
-			pr_err("%s: Send Volume command failed rc=%d\n",
-				__func__, rc);
-		}
-	}
-	return rc;
-}
-
-static int msm_compr_playback_close(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	int dir = 0;
-
-	pr_debug("%s\n", __func__);
-
-	dir = IN;
-	atomic_set(&prtd->pending_buffer, 0);
-
-	prtd->pcm_irq_pos = 0;
-	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
-	q6asm_audio_client_buf_free_contiguous(dir,
-				prtd->audio_client);
-		msm_pcm_routing_dereg_phy_stream(
-			soc_prtd->dai_link->id,
-			SNDRV_PCM_STREAM_PLAYBACK);
-	q6asm_audio_client_free(prtd->audio_client);
-	kfree(prtd);
-	return 0;
-}
-
-static int msm_compr_capture_close(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	int dir = OUT;
-
-	pr_debug("%s\n", __func__);
-	atomic_set(&prtd->pending_buffer, 0);
-	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
-	q6asm_audio_client_buf_free_contiguous(dir,
-				prtd->audio_client);
-	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->id,
-				SNDRV_PCM_STREAM_CAPTURE);
-	q6asm_audio_client_free(prtd->audio_client);
-	kfree(prtd);
-	return 0;
-}
-
-static int msm_compr_close(struct snd_pcm_substream *substream)
-{
-	int ret = 0;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		ret = msm_compr_playback_close(substream);
-	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		ret = msm_compr_capture_close(substream);
-	return ret;
-}
-
-static int msm_compr_prepare(struct snd_pcm_substream *substream)
-{
-	int ret = 0;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		ret = msm_compr_playback_prepare(substream);
-	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		ret = msm_compr_capture_prepare(substream);
-	return ret;
-}
-
-static snd_pcm_uframes_t msm_compr_pointer(struct snd_pcm_substream *substream)
-{
-
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-
-	if (prtd->pcm_irq_pos >= prtd->pcm_size)
-		prtd->pcm_irq_pos = 0;
-
-	pr_debug("%s: pcm_irq_pos = %d, pcm_size = %d, sample_bits = %d,\n"
-			 "frame_bits = %d\n", __func__, prtd->pcm_irq_pos,
-			 prtd->pcm_size, runtime->sample_bits,
-			 runtime->frame_bits);
-	return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
-}
-
-static int msm_compr_mmap(struct snd_pcm_substream *substream,
-				struct vm_area_struct *vma)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct msm_audio *prtd = runtime->private_data;
-	struct audio_client *ac = prtd->audio_client;
-	struct audio_port_data *apd = ac->port;
-	struct audio_buffer *ab;
-	int dir = -1;
-
-	prtd->mmap_flag = 1;
-	runtime->render_flag = SNDRV_NON_DMA_MODE;
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dir = IN;
-	else
-		dir = OUT;
-	ab = &(apd[dir].buf[0]);
-
-	return msm_audio_ion_mmap(ab, vma);
-}
-
-static int msm_compr_hw_params(struct snd_pcm_substream *substream,
-				struct snd_pcm_hw_params *params)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
-	struct audio_buffer *buf;
-	int dir, ret;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dir = IN;
-	else
-		dir = OUT;
-	/* Modifying kernel hardware params based on userspace config */
-	if (params_periods(params) > 0 &&
-		(params_periods(params) != runtime->hw.periods_max)) {
-		runtime->hw.periods_max = params_periods(params);
-	}
-	if (params_period_bytes(params) > 0 &&
-		(params_period_bytes(params) != runtime->hw.period_bytes_min)) {
-		runtime->hw.period_bytes_min = params_period_bytes(params);
-	}
-	runtime->hw.buffer_bytes_max =
-			runtime->hw.period_bytes_min * runtime->hw.periods_max;
-	pr_debug("allocate %zd buffers each of size %d\n",
-		runtime->hw.period_bytes_min,
-		runtime->hw.periods_max);
-	ret = q6asm_audio_client_buf_alloc_contiguous(dir,
-			prtd->audio_client,
-			runtime->hw.period_bytes_min,
-			runtime->hw.periods_max);
-	if (ret < 0) {
-		pr_err("Audio Start: Buffer Allocation failed rc = %d\n",
-						ret);
-		return -ENOMEM;
-	}
-	buf = prtd->audio_client->port[dir].buf;
-
-	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
-	dma_buf->dev.dev = substream->pcm->card->dev;
-	dma_buf->private_data = NULL;
-	dma_buf->area = buf[0].data;
-	dma_buf->addr =  buf[0].phys;
-	dma_buf->bytes = runtime->hw.buffer_bytes_max;
-
-	pr_debug("%s: buf[%pK]dma_buf->area[%pK]dma_buf->addr[%pK]\n"
-		 "dma_buf->bytes[%zd]\n", __func__,
-		 (void *)buf, (void *)dma_buf->area,
-		 &dma_buf->addr, dma_buf->bytes);
-	if (!dma_buf->area)
-		return -ENOMEM;
-
-	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
-	return 0;
-}
-
-static int msm_compr_ioctl_shared(struct snd_pcm_substream *substream,
-		unsigned int cmd, void *arg)
-{
-	int rc = 0;
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	uint64_t timestamp;
-	uint64_t temp;
-
-	switch (cmd) {
-	case SNDRV_COMPRESS_TSTAMP: {
-		struct snd_compr_tstamp *tstamp;
-
-		pr_debug("SNDRV_COMPRESS_TSTAMP\n");
-		tstamp = arg;
-		memset(tstamp, 0x0, sizeof(*tstamp));
-		rc = q6asm_get_session_time(prtd->audio_client, &timestamp);
-		if (rc < 0) {
-			pr_err("%s: Get Session Time return value =%lld\n",
-				__func__, timestamp);
-			return -EAGAIN;
-		}
-		temp = (timestamp * 2 * runtime->channels);
-		temp = temp * (runtime->rate/1000);
-		temp = div_u64(temp, 1000);
-		tstamp->sampling_rate = runtime->rate;
-		tstamp->timestamp = timestamp;
-		pr_debug("%s: bytes_consumed:,timestamp = %lld,\n",
-						__func__,
-			tstamp->timestamp);
-		return 0;
-	}
-	case SNDRV_COMPRESS_GET_CAPS: {
-		struct snd_compr_caps *caps;
-
-		caps = arg;
-		memset(caps, 0, sizeof(*caps));
-		pr_debug("SNDRV_COMPRESS_GET_CAPS\n");
-		memcpy(caps, &compr->info.compr_cap, sizeof(*caps));
-		return 0;
-	}
-	case SNDRV_COMPRESS_SET_PARAMS:
-		pr_debug("SNDRV_COMPRESS_SET_PARAMS:\n");
-		memcpy(&compr->info.codec_param, (void *) arg,
-			sizeof(struct snd_compr_params));
-		switch (compr->info.codec_param.codec.id) {
-		case SND_AUDIOCODEC_MP3:
-			/* For MP3 we dont need any other parameter */
-			pr_debug("SND_AUDIOCODEC_MP3\n");
-			compr->codec = FORMAT_MP3;
-			break;
-		case SND_AUDIOCODEC_AAC:
-			pr_debug("SND_AUDIOCODEC_AAC\n");
-			compr->codec = FORMAT_MPEG4_AAC;
-			break;
-		case SND_AUDIOCODEC_AC3: {
-			char params_value[MAX_AC3_PARAM_SIZE];
-			int *params_value_data = (int *)params_value;
-			/* 36 is the max param length for ddp */
-			int i;
-			struct snd_dec_ddp *ddp =
-				&compr->info.codec_param.codec.options.ddp;
-			uint32_t params_length = 0;
-
-			memset(params_value, 0, MAX_AC3_PARAM_SIZE);
-			/* check integer overflow */
-			if (ddp->params_length > UINT_MAX/sizeof(int)) {
-				pr_err("%s: Integer overflow ddp->params_length %d\n",
-				__func__, ddp->params_length);
-				return -EINVAL;
-			}
-			params_length = ddp->params_length*sizeof(int);
-			if (params_length > MAX_AC3_PARAM_SIZE) {
-				/*MAX is 36*sizeof(int) this should not happen*/
-				pr_err("%s: params_length(%d) is greater than %zd\n",
-				__func__, params_length, MAX_AC3_PARAM_SIZE);
-				return -EINVAL;
-			}
-			pr_debug("SND_AUDIOCODEC_AC3\n");
-			compr->codec = FORMAT_AC3;
-			pr_debug("params_length: %d\n", ddp->params_length);
-			for (i = 0; i < params_length/sizeof(int); i++)
-				pr_debug("params_value[%d]: %x\n", i,
-					params_value_data[i]);
-			for (i = 0; i < ddp->params_length/2; i++) {
-				ddp->params_id[i] = params_value_data[2*i];
-				ddp->params_value[i] = params_value_data[2*i+1];
-			}
-			if (atomic_read(&prtd->start)) {
-				rc = msm_compr_send_ddp_cfg(prtd->audio_client,
-								ddp);
-				if (rc < 0)
-					pr_err("%s: DDP CMD CFG failed\n",
-						__func__);
-			}
-			break;
-		}
-		case SND_AUDIOCODEC_EAC3: {
-			char params_value[MAX_AC3_PARAM_SIZE];
-			int *params_value_data = (int *)params_value;
-			/* 36 is the max param length for ddp */
-			int i;
-			struct snd_dec_ddp *ddp =
-				&compr->info.codec_param.codec.options.ddp;
-			uint32_t params_length = 0;
-
-			memset(params_value, 0, MAX_AC3_PARAM_SIZE);
-			/* check integer overflow */
-			if (ddp->params_length > UINT_MAX/sizeof(int)) {
-				pr_err("%s: Integer overflow ddp->params_length %d\n",
-				__func__, ddp->params_length);
-				return -EINVAL;
-			}
-			params_length = ddp->params_length*sizeof(int);
-			if (params_length > MAX_AC3_PARAM_SIZE) {
-				/*MAX is 36*sizeof(int) this should not happen*/
-				pr_err("%s: params_length(%d) is greater than %zd\n",
-				__func__, params_length, MAX_AC3_PARAM_SIZE);
-				return -EINVAL;
-			}
-			pr_debug("SND_AUDIOCODEC_EAC3\n");
-			compr->codec = FORMAT_EAC3;
-			pr_debug("params_length: %d\n", ddp->params_length);
-			for (i = 0; i < ddp->params_length; i++)
-				pr_debug("params_value[%d]: %x\n", i,
-					params_value_data[i]);
-			for (i = 0; i < ddp->params_length/2; i++) {
-				ddp->params_id[i] = params_value_data[2*i];
-				ddp->params_value[i] = params_value_data[2*i+1];
-			}
-			if (atomic_read(&prtd->start)) {
-				rc = msm_compr_send_ddp_cfg(prtd->audio_client,
-								ddp);
-				if (rc < 0)
-					pr_err("%s: DDP CMD CFG failed\n",
-						__func__);
-			}
-			break;
-		}
-		default:
-			pr_debug("FORMAT_LINEAR_PCM\n");
-			compr->codec = FORMAT_LINEAR_PCM;
-			break;
-		}
-		return 0;
-	case SNDRV_PCM_IOCTL1_RESET:
-		pr_debug("SNDRV_PCM_IOCTL1_RESET\n");
-		/* Flush only when session is started during CAPTURE,
-		 * while PLAYBACK has no such restriction.
-		 */
-		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
-			  (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
-						atomic_read(&prtd->start))) {
-			if (atomic_read(&prtd->eos)) {
-				prtd->cmd_interrupt = 1;
-				wake_up(&the_locks.eos_wait);
-				atomic_set(&prtd->eos, 0);
-			}
-
-			/* A unlikely race condition possible with FLUSH
-			 * DRAIN if ack is set by flush and reset by drain
-			 */
-			prtd->cmd_ack = 0;
-			rc = q6asm_cmd(prtd->audio_client, CMD_FLUSH);
-			if (rc < 0) {
-				pr_err("%s: flush cmd failed rc=%d\n",
-					__func__, rc);
-				return rc;
-			}
-			rc = wait_event_timeout(the_locks.flush_wait,
-				prtd->cmd_ack, 5 * HZ);
-			if (!rc)
-				pr_err("Flush cmd timeout\n");
-			prtd->pcm_irq_pos = 0;
-		}
-		break;
-	case SNDRV_COMPRESS_DRAIN:
-		pr_debug("%s: SNDRV_COMPRESS_DRAIN\n", __func__);
-		if (atomic_read(&prtd->pending_buffer)) {
-			pr_debug("%s: no pending writes, drain would block\n",
-			 __func__);
-			return -EWOULDBLOCK;
-		}
-
-		atomic_set(&prtd->eos, 1);
-		atomic_set(&prtd->pending_buffer, 0);
-		prtd->cmd_ack = 0;
-		q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
-		/* Wait indefinitely for  DRAIN. Flush can also signal this*/
-		rc = wait_event_interruptible(the_locks.eos_wait,
-			(prtd->cmd_ack || prtd->cmd_interrupt));
-
-		if (rc < 0)
-			pr_err("EOS cmd interrupted\n");
-		pr_debug("%s: SNDRV_COMPRESS_DRAIN  out of wait\n", __func__);
-
-		if (prtd->cmd_interrupt)
-			rc = -EINTR;
-
-		prtd->cmd_interrupt = 0;
-		return rc;
-	default:
-		break;
-	}
-	return snd_pcm_lib_ioctl(substream, cmd, arg);
-}
-#ifdef CONFIG_COMPAT
-struct snd_enc_wma32 {
-	u32 super_block_align; /* WMA Type-specific data */
-	u32 encodeopt1;
-	u32 encodeopt2;
-};
-
-struct snd_enc_vorbis32 {
-	s32 quality;
-	u32 managed;
-	u32 max_bit_rate;
-	u32 min_bit_rate;
-	u32 downmix;
-};
-
-struct snd_enc_real32 {
-	u32 quant_bits;
-	u32 start_region;
-	u32 num_regions;
-};
-
-struct snd_enc_flac32 {
-	u32 num;
-	u32 gain;
-};
-
-struct snd_enc_generic32 {
-	u32 bw;	/* encoder bandwidth */
-	s32 reserved[15];
-};
-struct snd_dec_ddp32 {
-	u32 params_length;
-	u32 params_id[18];
-	u32 params_value[18];
-};
-
-union snd_codec_options32 {
-	struct snd_enc_wma32 wma;
-	struct snd_enc_vorbis32 vorbis;
-	struct snd_enc_real32 real;
-	struct snd_enc_flac32 flac;
-	struct snd_enc_generic32 generic;
-	struct snd_dec_ddp32 ddp;
-};
-
-struct snd_codec32 {
-	u32 id;
-	u32 ch_in;
-	u32 ch_out;
-	u32 sample_rate;
-	u32 bit_rate;
-	u32 rate_control;
-	u32 profile;
-	u32 level;
-	u32 ch_mode;
-	u32 format;
-	u32 align;
-	union snd_codec_options32 options;
-	u32 reserved[3];
-};
-
-struct snd_compressed_buffer32 {
-	u32 fragment_size;
-	u32 fragments;
-};
-
-struct snd_compr_params32 {
-	struct snd_compressed_buffer32 buffer;
-	struct snd_codec32 codec;
-	u8 no_wake_mode;
-};
-
-struct snd_compr_caps32 {
-	u32 num_codecs;
-	u32 direction;
-	u32 min_fragment_size;
-	u32 max_fragment_size;
-	u32 min_fragments;
-	u32 max_fragments;
-	u32 codecs[MAX_NUM_CODECS];
-	u32 reserved[11];
-};
-struct snd_compr_tstamp32 {
-	u32 byte_offset;
-	u32 copied_total;
-	compat_ulong_t pcm_frames;
-	compat_ulong_t pcm_io_frames;
-	u32 sampling_rate;
-	compat_u64 timestamp;
-};
-enum {
-	SNDRV_COMPRESS_TSTAMP32 = _IOR('C', 0x20, struct snd_compr_tstamp32),
-	SNDRV_COMPRESS_GET_CAPS32 = _IOWR('C', 0x10, struct snd_compr_caps32),
-	SNDRV_COMPRESS_SET_PARAMS32 =
-	_IOW('C', 0x12, struct snd_compr_params32),
-};
-static int msm_compr_compat_ioctl(struct snd_pcm_substream *substream,
-		unsigned int cmd, void *arg)
-{
-	int err = 0;
-
-	switch (cmd) {
-	case SNDRV_COMPRESS_TSTAMP32: {
-		struct snd_compr_tstamp tstamp;
-		struct snd_compr_tstamp32 tstamp32;
-
-		memset(&tstamp, 0, sizeof(tstamp));
-		memset(&tstamp32, 0, sizeof(tstamp32));
-		cmd = SNDRV_COMPRESS_TSTAMP;
-		err = msm_compr_ioctl_shared(substream, cmd, &tstamp);
-		if (err) {
-			pr_err("%s: COMPRESS_TSTAMP failed rc %d\n",
-			__func__, err);
-			goto bail_out;
-		}
-		tstamp32.byte_offset = tstamp.byte_offset;
-		tstamp32.copied_total = tstamp.copied_total;
-		tstamp32.pcm_frames = tstamp.pcm_frames;
-		tstamp32.pcm_io_frames = tstamp.pcm_io_frames;
-		tstamp32.sampling_rate = tstamp.sampling_rate;
-		tstamp32.timestamp = tstamp.timestamp;
-		if (copy_to_user(arg, &tstamp32, sizeof(tstamp32))) {
-			pr_err("%s: copytouser failed COMPRESS_TSTAMP32\n",
-			__func__);
-			err = -EFAULT;
-		}
-		break;
-	}
-	case SNDRV_COMPRESS_GET_CAPS32: {
-		struct snd_compr_caps caps;
-		struct snd_compr_caps32 caps32;
-		u32 i;
-
-		memset(&caps, 0, sizeof(caps));
-		memset(&caps32, 0, sizeof(caps32));
-		cmd = SNDRV_COMPRESS_GET_CAPS;
-		err = msm_compr_ioctl_shared(substream, cmd, &caps);
-		if (err) {
-			pr_err("%s: GET_CAPS failed rc %d\n",
-			__func__, err);
-			goto bail_out;
-		}
-		pr_debug("SNDRV_COMPRESS_GET_CAPS_32\n");
-		if (!err && caps.num_codecs >= MAX_NUM_CODECS) {
-			pr_err("%s: Invalid number of codecs\n", __func__);
-			err = -EINVAL;
-			goto bail_out;
-		}
-		caps32.direction = caps.direction;
-		caps32.max_fragment_size = caps.max_fragment_size;
-		caps32.max_fragments = caps.max_fragments;
-		caps32.min_fragment_size = caps.min_fragment_size;
-		caps32.num_codecs = caps.num_codecs;
-		for (i = 0; i < caps.num_codecs; i++)
-			caps32.codecs[i] = caps.codecs[i];
-		if (copy_to_user(arg, &caps32, sizeof(caps32))) {
-			pr_err("%s: copytouser failed COMPRESS_GETCAPS32\n",
-			__func__);
-			err = -EFAULT;
-		}
-		break;
-	}
-	case SNDRV_COMPRESS_SET_PARAMS32: {
-		struct snd_compr_params32 params32;
-		struct snd_compr_params params;
-
-		memset(&params32, 0, sizeof(params32));
-		memset(&params, 0, sizeof(params));
-		cmd = SNDRV_COMPRESS_SET_PARAMS;
-		if (copy_from_user(&params32, arg, sizeof(params32))) {
-			pr_err("%s: copyfromuser failed SET_PARAMS32\n",
-			__func__);
-			err = -EFAULT;
-			goto bail_out;
-		}
-		params.no_wake_mode = params32.no_wake_mode;
-		params.codec.id = params32.codec.id;
-		params.codec.ch_in = params32.codec.ch_in;
-		params.codec.ch_out = params32.codec.ch_out;
-		params.codec.sample_rate = params32.codec.sample_rate;
-		params.codec.bit_rate = params32.codec.bit_rate;
-		params.codec.rate_control = params32.codec.rate_control;
-		params.codec.profile = params32.codec.profile;
-		params.codec.level = params32.codec.level;
-		params.codec.ch_mode = params32.codec.ch_mode;
-		params.codec.format = params32.codec.format;
-		params.codec.align = params32.codec.align;
-
-		switch (params.codec.id) {
-		case SND_AUDIOCODEC_WMA:
-		case SND_AUDIOCODEC_WMA_PRO:
-			params.codec.options.wma.encodeopt1 =
-			params32.codec.options.wma.encodeopt1;
-			params.codec.options.wma.encodeopt2 =
-			params32.codec.options.wma.encodeopt2;
-			params.codec.options.wma.super_block_align =
-			params32.codec.options.wma.super_block_align;
-		break;
-		case SND_AUDIOCODEC_VORBIS:
-			params.codec.options.vorbis.downmix =
-			params32.codec.options.vorbis.downmix;
-			params.codec.options.vorbis.managed =
-			params32.codec.options.vorbis.managed;
-			params.codec.options.vorbis.max_bit_rate =
-			params32.codec.options.vorbis.max_bit_rate;
-			params.codec.options.vorbis.min_bit_rate =
-			params32.codec.options.vorbis.min_bit_rate;
-			params.codec.options.vorbis.quality =
-			params32.codec.options.vorbis.quality;
-		break;
-		case SND_AUDIOCODEC_REAL:
-			params.codec.options.real.num_regions =
-			params32.codec.options.real.num_regions;
-			params.codec.options.real.quant_bits =
-			params32.codec.options.real.quant_bits;
-			params.codec.options.real.start_region =
-			params32.codec.options.real.start_region;
-		break;
-		case SND_AUDIOCODEC_FLAC:
-			params.codec.options.flac.gain =
-			params32.codec.options.flac.gain;
-			params.codec.options.flac.num =
-			params32.codec.options.flac.num;
-		break;
-		case SND_AUDIOCODEC_DTS:
-		case SND_AUDIOCODEC_DTS_PASS_THROUGH:
-		case SND_AUDIOCODEC_DTS_LBR:
-		case SND_AUDIOCODEC_DTS_LBR_PASS_THROUGH:
-		case SND_AUDIOCODEC_DTS_TRANSCODE_LOOPBACK:
-		break;
-		case SND_AUDIOCODEC_AC3:
-		case SND_AUDIOCODEC_EAC3:
-			params.codec.options.ddp.params_length =
-			params32.codec.options.ddp.params_length;
-			memcpy(params.codec.options.ddp.params_value,
-			params32.codec.options.ddp.params_value,
-			sizeof(params32.codec.options.ddp.params_value));
-			memcpy(params.codec.options.ddp.params_id,
-			params32.codec.options.ddp.params_id,
-			sizeof(params32.codec.options.ddp.params_id));
-		break;
-		default:
-			params.codec.options.generic.bw =
-			params32.codec.options.generic.bw;
-		break;
-		}
-		if (!err)
-			err = msm_compr_ioctl_shared(substream, cmd, &params);
-		break;
-	}
-	default:
-		err = msm_compr_ioctl_shared(substream, cmd, arg);
-	}
-bail_out:
-	return err;
-
-}
-#endif
-static int msm_compr_ioctl(struct snd_pcm_substream *substream,
-		unsigned int cmd, void *arg)
-{
-	int err = 0;
-
-	if (!substream) {
-		pr_err("%s: Invalid params\n", __func__);
-		return -EINVAL;
-	}
-	pr_debug("%s called with cmd = %d\n", __func__, cmd);
-	switch (cmd) {
-	case SNDRV_COMPRESS_TSTAMP: {
-		struct snd_compr_tstamp tstamp;
-
-		if (!arg) {
-			pr_err("%s: Invalid params Tstamp\n", __func__);
-			return -EINVAL;
-		}
-		err = msm_compr_ioctl_shared(substream, cmd, &tstamp);
-		if (err)
-			pr_err("%s: COMPRESS_TSTAMP failed rc %d\n",
-			__func__, err);
-		if (!err && copy_to_user(arg, &tstamp, sizeof(tstamp))) {
-			pr_err("%s: copytouser failed COMPRESS_TSTAMP\n",
-			__func__);
-			err = -EFAULT;
-		}
-		break;
-	}
-	case SNDRV_COMPRESS_GET_CAPS: {
-		struct snd_compr_caps cap;
-
-		if (!arg) {
-			pr_err("%s: Invalid params getcaps\n", __func__);
-			return -EINVAL;
-		}
-		pr_debug("SNDRV_COMPRESS_GET_CAPS\n");
-		err = msm_compr_ioctl_shared(substream, cmd, &cap);
-		if (err)
-			pr_err("%s: GET_CAPS failed rc %d\n",
-			__func__, err);
-		if (!err && copy_to_user(arg, &cap, sizeof(cap))) {
-			pr_err("%s: copytouser failed GET_CAPS\n",
-			__func__);
-			err = -EFAULT;
-		}
-		break;
-	}
-	case SNDRV_COMPRESS_SET_PARAMS: {
-		struct snd_compr_params params;
-
-		if (!arg) {
-			pr_err("%s: Invalid params setparam\n", __func__);
-			return -EINVAL;
-		}
-		if (copy_from_user(&params, arg,
-			sizeof(struct snd_compr_params))) {
-			pr_err("%s: SET_PARAMS\n", __func__);
-			return -EFAULT;
-		}
-		err = msm_compr_ioctl_shared(substream, cmd, &params);
-		if (err)
-			pr_err("%s: SET_PARAMS failed rc %d\n",
-			__func__, err);
-		break;
-	}
-	default:
-		err = msm_compr_ioctl_shared(substream, cmd, arg);
-	}
-	return err;
-}
-
-static int msm_compr_restart(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	struct audio_aio_write_param param;
-	struct audio_buffer *buf = NULL;
-	struct output_meta_data_st output_meta_data;
-	int time_stamp_flag = 0;
-	int buffer_length = 0;
-
-	pr_debug("%s, trigger restart\n", __func__);
-
-	if (runtime->render_flag & SNDRV_RENDER_STOPPED) {
-		buf = prtd->audio_client->port[IN].buf;
-		pr_debug("%s:writing %d bytes of buffer[%d] to dsp 2\n",
-				__func__, prtd->pcm_count, prtd->out_head);
-		pr_debug("%s:writing buffer[%d] from 0x%08x\n",
-				__func__, prtd->out_head,
-				((unsigned int)buf[0].phys
-				+ (prtd->out_head * prtd->pcm_count)));
-
-		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
-			time_stamp_flag = SET_TIMESTAMP;
-		else
-			time_stamp_flag = NO_TIMESTAMP;
-		memcpy(&output_meta_data, (char *)(buf->data +
-			prtd->out_head * prtd->pcm_count),
-			COMPRE_OUTPUT_METADATA_SIZE);
-
-		buffer_length = output_meta_data.frame_size;
-		pr_debug("meta_data_length: %d, frame_length: %d\n",
-			 output_meta_data.meta_data_length,
-			 output_meta_data.frame_size);
-		pr_debug("timestamp_msw: %d, timestamp_lsw: %d\n",
-			 output_meta_data.timestamp_msw,
-			 output_meta_data.timestamp_lsw);
-
-		param.paddr = (unsigned long)buf[0].phys
-				+ (prtd->out_head * prtd->pcm_count)
-				+ output_meta_data.meta_data_length;
-		param.len = buffer_length;
-		param.msw_ts = output_meta_data.timestamp_msw;
-		param.lsw_ts = output_meta_data.timestamp_lsw;
-		param.flags = time_stamp_flag;
-		param.uid = prtd->session_id;
-		if (q6asm_async_write(prtd->audio_client,
-					&param) < 0)
-			pr_err("%s:q6asm_async_write failed\n",
-				__func__);
-		else
-			prtd->out_head =
-				(prtd->out_head + 1) & (runtime->periods - 1);
-
-		runtime->render_flag &= ~SNDRV_RENDER_STOPPED;
-		return 0;
-	}
-	return 0;
-}
-
-static int msm_compr_volume_ctl_put(struct snd_kcontrol *kcontrol,
-				    struct snd_ctl_elem_value *ucontrol)
-{
-	int rc = 0;
-	struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
-	struct snd_pcm_substream *substream =
-			 vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
-	struct msm_audio *prtd;
-	int volume = ucontrol->value.integer.value[0];
-
-	pr_debug("%s: volume : %x\n", __func__, volume);
-	if (!substream)
-		return -ENODEV;
-	if (!substream->runtime)
-		return 0;
-	prtd = substream->runtime->private_data;
-	if (prtd)
-		rc = compressed_set_volume(prtd, volume);
-
-	return rc;
-}
-
-static int msm_compr_volume_ctl_get(struct snd_kcontrol *kcontrol,
-				  struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
-	struct snd_pcm_substream *substream =
-			 vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
-	struct msm_audio *prtd;
-
-	pr_debug("%s\n", __func__);
-	if (!substream)
-		return -ENODEV;
-	if (!substream->runtime)
-		return 0;
-	prtd = substream->runtime->private_data;
-	if (prtd)
-		ucontrol->value.integer.value[0] = prtd->volume;
-	return 0;
-}
-
-static int msm_compr_add_controls(struct snd_soc_pcm_runtime *rtd)
-{
-	int ret = 0;
-	struct snd_pcm *pcm = rtd->pcm;
-	struct snd_pcm_volume *volume_info;
-	struct snd_kcontrol *kctl;
-
-	dev_dbg(rtd->dev, "%s, Volume cntrl add\n", __func__);
-	ret = snd_pcm_add_volume_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
-				      NULL, 1, rtd->dai_link->id,
-				      &volume_info);
-	if (ret < 0)
-		return ret;
-	kctl = volume_info->kctl;
-	kctl->put = msm_compr_volume_ctl_put;
-	kctl->get = msm_compr_volume_ctl_get;
-	kctl->tlv.p = compr_rx_vol_gain;
-	return 0;
-}
-
-static const struct snd_pcm_ops msm_compr_ops = {
-	.open	   = msm_compr_open,
-	.hw_params	= msm_compr_hw_params,
-	.close	  = msm_compr_close,
-	.ioctl	  = msm_compr_ioctl,
-	.prepare	= msm_compr_prepare,
-	.trigger	= msm_compr_trigger,
-	.pointer	= msm_compr_pointer,
-	.mmap		= msm_compr_mmap,
-	.restart	= msm_compr_restart,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl   = msm_compr_compat_ioctl,
-#endif
-};
-
-static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_card *card = rtd->card->snd_card;
-	int ret = 0;
-
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
-
-	ret = msm_compr_add_controls(rtd);
-	if (ret)
-		pr_err("%s, kctl add failed\n", __func__);
-	return ret;
-}
-
-static struct snd_soc_platform_driver msm_soc_platform = {
-	.ops		= &msm_compr_ops,
-	.pcm_new	= msm_asoc_pcm_new,
-};
-
-static int msm_compr_probe(struct platform_device *pdev)
-{
-
-	dev_info(&pdev->dev, "%s: dev name %s\n",
-			 __func__, dev_name(&pdev->dev));
-
-	return snd_soc_register_platform(&pdev->dev,
-				   &msm_soc_platform);
-}
-
-static int msm_compr_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
-}
-
-static const struct of_device_id msm_compr_dt_match[] = {
-	{.compatible = "qcom,msm-compr-dsp"},
-	{}
-};
-MODULE_DEVICE_TABLE(of, msm_compr_dt_match);
-
-static struct platform_driver msm_compr_driver = {
-	.driver = {
-		.name = "msm-compr-dsp",
-		.owner = THIS_MODULE,
-		.of_match_table = msm_compr_dt_match,
-	},
-	.probe = msm_compr_probe,
-	.remove = msm_compr_remove,
-};
-
-static int __init msm_soc_platform_init(void)
-{
-	init_waitqueue_head(&the_locks.enable_wait);
-	init_waitqueue_head(&the_locks.eos_wait);
-	init_waitqueue_head(&the_locks.write_wait);
-	init_waitqueue_head(&the_locks.read_wait);
-	init_waitqueue_head(&the_locks.flush_wait);
-
-	return platform_driver_register(&msm_compr_driver);
-}
-module_init(msm_soc_platform_init);
-
-static void __exit msm_soc_platform_exit(void)
-{
-	platform_driver_unregister(&msm_compr_driver);
-}
-module_exit(msm_soc_platform_exit);
-
-MODULE_DESCRIPTION("PCM module platform driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.h b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.h
deleted file mode 100644
index d6e3ec6..0000000
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef _MSM_COMPR_H
-#define _MSM_COMPR_H
-#include <sound/apr_audio-v2.h>
-#include <sound/q6asm-v2.h>
-#include <sound/compress_params.h>
-#include <sound/compress_offload.h>
-#include <sound/compress_driver.h>
-
-#include "msm-pcm-q6-v2.h"
-
-struct compr_info {
-	struct snd_compr_caps compr_cap;
-	struct snd_compr_codec_caps codec_caps;
-	struct snd_compr_params codec_param;
-};
-
-struct compr_audio {
-	struct msm_audio prtd;
-	struct compr_info info;
-	uint32_t codec;
-};
-
-#endif /*_MSM_COMPR_H*/