Merge "sched/fair: Don't let tasks slip away from gold to silver cluster"
diff --git a/Documentation/accounting/psi.txt b/Documentation/accounting/psi.txt
index b8ca28b..4fb40fe 100644
--- a/Documentation/accounting/psi.txt
+++ b/Documentation/accounting/psi.txt
@@ -63,6 +63,110 @@
 which wouldn't necessarily make a dent in the time averages, or to
 average trends over custom time frames.
 
+Monitoring for pressure thresholds
+==================================
+
+Users can register triggers and use poll() to be woken up when resource
+pressure exceeds certain thresholds.
+
+A trigger describes the maximum cumulative stall time over a specific
+time window, e.g. 100ms of total stall time within any 500ms window to
+generate a wakeup event.
+
+To register a trigger user has to open psi interface file under
+/proc/pressure/ representing the resource to be monitored and write the
+desired threshold and time window. The open file descriptor should be
+used to wait for trigger events using select(), poll() or epoll().
+The following format is used:
+
+<some|full> <stall amount in us> <time window in us>
+
+For example writing "some 150000 1000000" into /proc/pressure/memory
+would add 150ms threshold for partial memory stall measured within
+1sec time window. Writing "full 50000 1000000" into /proc/pressure/io
+would add 50ms threshold for full io stall measured within 1sec time window.
+
+Triggers can be set on more than one psi metric and more than one trigger
+for the same psi metric can be specified. However for each trigger a separate
+file descriptor is required to be able to poll it separately from others,
+therefore for each trigger a separate open() syscall should be made even
+when opening the same psi interface file.
+
+Monitors activate only when system enters stall state for the monitored
+psi metric and deactivates upon exit from the stall state. While system is
+in the stall state psi signal growth is monitored at a rate of 10 times per
+tracking window.
+
+The kernel accepts window sizes ranging from 500ms to 10s, therefore min
+monitoring update interval is 50ms and max is 1s. Min limit is set to
+prevent overly frequent polling. Max limit is chosen as a high enough number
+after which monitors are most likely not needed and psi averages can be used
+instead.
+
+When activated, psi monitor stays active for at least the duration of one
+tracking window to avoid repeated activations/deactivations when system is
+bouncing in and out of the stall state.
+
+Notifications to the userspace are rate-limited to one per tracking window.
+
+The trigger will de-register when the file descriptor used to define the
+trigger  is closed.
+
+Userspace monitor usage example
+===============================
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <poll.h>
+#include <string.h>
+#include <unistd.h>
+
+/*
+ * Monitor memory partial stall with 1s tracking window size
+ * and 150ms threshold.
+ */
+int main() {
+	const char trig[] = "some 150000 1000000";
+	struct pollfd fds;
+	int n;
+
+	fds.fd = open("/proc/pressure/memory", O_RDWR | O_NONBLOCK);
+	if (fds.fd < 0) {
+		printf("/proc/pressure/memory open error: %s\n",
+			strerror(errno));
+		return 1;
+	}
+	fds.events = POLLPRI;
+
+	if (write(fds.fd, trig, strlen(trig) + 1) < 0) {
+		printf("/proc/pressure/memory write error: %s\n",
+			strerror(errno));
+		return 1;
+	}
+
+	printf("waiting for events...\n");
+	while (1) {
+		n = poll(&fds, 1, -1);
+		if (n < 0) {
+			printf("poll error: %s\n", strerror(errno));
+			return 1;
+		}
+		if (fds.revents & POLLERR) {
+			printf("got POLLERR, event source is gone\n");
+			return 0;
+		}
+		if (fds.revents & POLLPRI) {
+			printf("event triggered!\n");
+		} else {
+			printf("unknown event received: 0x%x\n", fds.revents);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
 Cgroup2 interface
 =================
 
@@ -71,3 +175,6 @@
 into cgroups. Each subdirectory in the cgroupfs mountpoint contains
 cpu.pressure, memory.pressure, and io.pressure files; the format is
 the same as the /proc/pressure/ files.
+
+Per-cgroup psi monitors can be specified and used the same way as
+system-wide ones.
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index 23e4bd5..acc1915 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -9,11 +9,6 @@
 Required properties:
 
 - compatible: "qcom,wil6210"
-- qcom,smmu-support: Boolean flag indicating whether PCIe has SMMU support
-- qcom,smmu-s1-en: Boolean flag indicating whether SMMU stage1 should be enabled
-- qcom,smmu-fast-map: Boolean flag indicating whether SMMU fast mapping should be enabled
-- qcom,smmu-coherent: Boolean flag indicating SMMU dma and page table coherency
-- qcom,smmu-mapping: specifies the base address and size of SMMU space
 - qcom,pcie-parent: phandle for the PCIe root complex to which 11ad card is connected
 - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
   the below optional properties:
@@ -29,6 +24,7 @@
 - qcom,use-ext-supply: Boolean flag to indicate if 11ad SIP uses external power supply
 - vdd-supply: phandle to 11ad VDD regulator node
 - vddio-supply: phandle to 11ad VDDIO regulator node
+- vdd-ldo-supply: phandle to 11ad VDD LDO regulator node
 - qcom,use-ext-clocks: Boolean flag to indicate if 11ad SIP uses external clocks
 - clocks	    : List of phandle and clock specifier pairs
 - clock-names       : List of clock input name strings sorted in the same
@@ -39,11 +35,6 @@
 Example:
 	wil6210: qcom,wil6210 {
 		compatible = "qcom,wil6210";
-		qcom,smmu-support;
-		qcom,smmu-s1-en;
-		qcom,smmu-fast-map;
-		qcom,smmu-coherent;
-		qcom,smmu-mapping = <0x20000000 0xe0000000>;
 		qcom,pcie-parent = <&pcie1>;
 		qcom,wigig-en = <&tlmm 94 0>;
 		qcom,wigig-dc = <&tlmm 81 0>;
@@ -56,6 +47,7 @@
 		qcom,use-ext-supply;
 		vdd-supply= <&pm8998_s7>;
 		vddio-supply= <&pm8998_s5>;
+		vdd-ldo-supply = <&pm8150_l15>;
 		qcom,use-ext-clocks;
 		clocks = <&clock_gcc clk_rf_clk3>,
 			 <&clock_gcc clk_rf_clk3_pin>;
@@ -63,3 +55,32 @@
 		qcom,keep-radio-on-during-sleep;
 	};
 
+Wil6210 client node under PCIe RP node needed for SMMU initialization by
+PCI framework when devices are discovered.
+
+Required properties:
+
+- qcom,iommu-dma-addr-pool: specifies the base address and size of SMMU space
+- qcom,iommu-dma: define the SMMU mode - bypass/fastmap/disabled
+- qcom,iommu-pagetable: indicating SMMU dma and page table coherency
+
+Example:
+&pcie1_rp {
+	#address-cells = <5>;
+	#size-cells = <0>;
+
+	wil6210_pci: wil6210_pci {
+		reg = <0 0 0 0 0>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		qcom,iommu-group = <&wil6210_pci_iommu_group>;
+
+		wil6210_pci_iommu_group: wil6210_pci_iommu_group {
+				qcom,iommu-dma-addr-pool = <0x20000000 0xe0000000>;
+				qcom,iommu-dma = "fastmap";
+				qcom,iommu-pagetable = "coherent";
+		};
+       };
+};
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
index 58c4e29..05d1a9a 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
@@ -4,7 +4,7 @@
 Required properties :
 - compatible: shall contain one of the following:
 		"qcom,gpucc-kona"
-		"qcom,gpucc-lito".
+		"qcom,lito-gpucc".
 - reg: shall contain base register offset and size.
 - reg-names: names of registers listed in the same order as in the reg property.
 		Must contain "cc_base".
diff --git a/Documentation/devicetree/bindings/clock/qcom,npucc.txt b/Documentation/devicetree/bindings/clock/qcom,npucc.txt
index 50977a0..cd5eb76 100644
--- a/Documentation/devicetree/bindings/clock/qcom,npucc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,npucc.txt
@@ -2,7 +2,8 @@
 -----------------------------------------------------------------
 
 Required properties :
-- compatible:		Should be "qcom,npucc-kona".
+- compatible:		Should be "qcom,npucc-kona"
+				  "qcom,lito-npucc".
 - reg:			Shall contain base register addresses and sizes.
 - reg-names:		Names of the register bases listed in the same order as
 			in the reg property.  Shall include: "cc", "qdsp6ss",
diff --git a/Documentation/devicetree/bindings/display/msm/sde-dp.txt b/Documentation/devicetree/bindings/display/msm/sde-dp.txt
index a17b738..7881230 100644
--- a/Documentation/devicetree/bindings/display/msm/sde-dp.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde-dp.txt
@@ -100,11 +100,14 @@
 - compatible:			Must be "qcom,msm-ext-disp"
 - qcom,dp-low-power-hw-hpd:	Low power hardware HPD feature enable control node
 - qcom,phy-version:		Phy version
+- qcom,pn-swap-lane-map:	P/N swap configuration of each lane
 - pinctrl-names:		List of names to assign mdss pin states defined in pinctrl device node
 				Refer to pinctrl-bindings.txt
 - pinctrl-<0..n>:		Lists phandles each pointing to the pin configuration node within a pin
 				controller. These pin configurations are installed in the pinctrl
 				device node. Refer to pinctrl-bindings.txt
+- qcom,max-lclk-frequency-khz:	An integer specifying the max. link clock in KHz supported by Display Port.
+- qcom,mst-fixed-topology-ports: u32 values of which MST output port to reserve, start from one
 
 [Optional child nodes]: These nodes are for devices which are
 dependent on msm_ext_disp. If msm_ext_disp is disabled then
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index 330f890..196d93b 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -38,11 +38,17 @@
 				above this powerlevel isense clock is at working frequency.
 
 Bus Scaling Data:
-- qcom,msm-bus,name: String property to describe the name of the 3D graphics processor.
-- qcom,msm-bus,num-cases: This is the the number of Bus Scaling use cases defined in the vectors property.
-- qcom,msm-bus,active-only: A boolean flag indicating if it is active only.
-- qcom,msm-bus,num-paths: This represents the number of paths in each Bus Scaling Usecase.
-- qcom,msm-bus,vectors-KBps: A series of 4 cell properties, format of which is:
+- qcom,gpu-bus-table:		Defines a bus voting table with the below properties. Multiple sets of bus
+				voting tables can be defined for given platform based on the type of ddr system.
+
+Properties:
+- compatible:			Must be "qcom,gpu-bus-table". Additionally, "qcom,gpu-bus-table-ddr" must also
+				be provided, with the ddr type value(integer) appended to the string.
+- qcom,msm-bus,name:		String property to describe the name of the 3D graphics processor.
+- qcom,msm-bus,num-cases:	This is the the number of Bus Scaling use cases defined in the vectors property.
+- qcom,msm-bus,active-only:	A boolean flag indicating if it is active only.
+- qcom,msm-bus,num-paths: 	This represents the number of paths in each Bus Scaling Usecase.
+- qcom,msm-bus,vectors-KBps:	A series of 4 cell properties, format of which is:
 					<src dst ab ib>, <src dst ab ib>, // For Bus Scaling Usecase 1
 					<src dst ab ib>, <src dst ab ib>, // For Bus Scaling Usecase 2
 					<..  ..  .. ..>, <..  ..  .. ..>; // For Bus Scaling Usecase n
@@ -171,8 +177,7 @@
 				certain protected registers and also pass to the user as
 				a property.
 - qcom,l2pc-cpu-mask:
-				Disables L2PC on masked CPUs when any of Graphics
-				rendering thread is running on masked CPUs.
+				Disables L2PC on masked CPUto the string.rendering thread is running on masked CPUs.
 				Bit 0 is for CPU-0, bit 1 is for CPU-1...
 
 - qcom,l2pc-update-queue:
@@ -339,14 +344,17 @@
 				"mem_iface_clk", "alt_mem_iface_clk";
 
 		/* Bus Scale Settings */
-		qcom,msm-bus,name = "grp3d";
-		qcom,msm-bus,num-cases = <4>;
-		qcom,msm-bus,num-paths = <1>;
-		qcom,msm-bus,vectors-KBps =
-			<26 512 0 0>,
-			<26 512 0 1600000>,
-			<26 512 0 3200000>,
-			<26 512 0 4264000>;
+		qcom, gpu-bus-table {
+			compatible="qcom,gpu-bus-table","qcom,gpu-bus-table-ddr7";
+			qcom,msm-bus,name = "grp3d";
+			qcom,msm-bus,num-cases = <4>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+				<26 512 0 0>,
+				<26 512 0 1600000>,
+				<26 512 0 3200000>,
+				<26 512 0 4264000>;
+		};
 
 		/* GDSC oxili regulators */
 		vdd-supply = <&gdsc_oxili_gx>;
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index f31ced7..2f948e8 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -120,17 +120,15 @@
   boot/dts/include/dt-bindings/msm/msm-bus-ids.h for list of acceptable slaves
 
 Optional properties:
-- qcom,bus-governor : governor to use when scaling bus, generally any commonly
-  found devfreq governor might be used.  In addition to those governors, the
-  custom Venus governors, "msm-vidc-ddr" or "msm-vidc-llcc" are also
-  acceptable values.
-  In the absence of this property the "performance" governor is used.
-- qcom,bus-rage-kbps : an array of two items (<min max>) that indicate the
+- qcom,bus-range-kbps : an array of two items (<min max>) that indicate the
   minimum and maximum acceptable votes for the bus.
   In the absence of this property <0 INT_MAX> is used.
 - qcom,ubwc-10bit : UBWC 10 bit content has different bus requirements,
   this tag will be used to pick the appropriate bus as per the session profile
   as shown below in example.
+- qcom,mode : Type of BW calculations to use.
+		"performance" - Use highest valid BW vote.
+		"venus-ddr", "venus-llcc" - Calculate for DDR, LLCC path.
 
 Memory Heaps
 ============
diff --git a/Documentation/devicetree/bindings/net/qrtr-fifo-xprt.txt b/Documentation/devicetree/bindings/net/qrtr-fifo-xprt.txt
new file mode 100644
index 0000000..69debce
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qrtr-fifo-xprt.txt
@@ -0,0 +1,25 @@
+Qualcomm Technologies, Inc. IPC Router FIFO Transport
+
+Required properties:
+- compatible:	should be "qcom,ipcr-fifo-xprt"
+- reg:		the irq register to raise an interrupt
+- interrupts:	the receiving interrupt line
+- qcom,ipc-shm:	reference to shared memory phandle
+
+Example:
+
+	fifo_vipc_irq@176 {
+		compatible = "qcom,ipcr-fifo-xprt";
+		reg = <0x176>;
+		interrupts = <0x0 0x142 0x1>;
+		qcom,ipc-shm = <&ipc-shm>;
+	};
+
+	ipc-shm: shared-buffer@85af7000 {
+		compatible = "qcom,hypervisor-shared-memory";
+		phandle = <0x1e4>;
+		reg = <0x0 0x85af7000 0x0 0x9000>;
+		label = "ipc_shm";
+		qcom,tx-is-first;
+	};
+
diff --git a/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt b/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt
index adb382b..12cf027 100644
--- a/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt
+++ b/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt
@@ -33,16 +33,17 @@
 		are the only acceptable sensor names,
 		1. pa
 		2. pa1
-		3. qfe_pa0
-		4. qfe_wtr0
-		5. modem_tsens
-		6. qfe_mmw0
-		7. qfe_mmw1
-		8. qfe_mmw2
-		9. qfe_mmw3
-		10. xo_therm
-		11. qfe_pa_mdm
-		12. qfe_pa_wtr
+		3. pa2
+		4. qfe_pa0
+		5. qfe_wtr0
+		6. modem_tsens
+		7. qfe_mmw0
+		8. qfe_mmw1
+		9. qfe_mmw2
+		10. qfe_mmw3
+		11. xo_therm
+		12. qfe_pa_mdm
+		13. qfe_pa_wtr
 
 Example:
 
diff --git a/arch/arm64/boot/dts/qcom/kona-coresight.dtsi b/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
index 6b80be3..7c9b06a 100644
--- a/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
@@ -3436,4 +3436,13 @@
 			};
 		};
 	};
+
+	hwevent {
+		compatible = "qcom,coresight-hwevent";
+
+		coresight-name = "coresight-hwevent";
+		coresight-csr = <&csr>;
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-mhi.dtsi b/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
index 8cd7629..c2cb9ec 100644
--- a/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
@@ -456,10 +456,31 @@
 				mhi,chan-type = <3>;
 			};
 
+			mhi_chan@105 {
+				reg = <105>;
+				label = "IP_HW_MHIP_0";
+				mhi,event-ring = <10>;
+				mhi,chan-dir = <1>;
+				mhi,data-type = <3>;
+				mhi,ee = <0x4>;
+				mhi,offload-chan;
+			};
+
+			mhi_chan@106 {
+				reg = <106>;
+				label = "IP_HW_MHIP_0";
+				mhi,event-ring = <11>;
+				mhi,chan-dir = <2>;
+				mhi,data-type = <3>;
+				mhi,ee = <0x4>;
+				mhi,offload-chan;
+				mhi,lpm-notify;
+			};
+
 			mhi_chan@107 {
 				reg = <107>;
 				label = "IP_HW_MHIP_1";
-				mhi,event-ring = <10>;
+				mhi,event-ring = <12>;
 				mhi,chan-dir = <1>;
 				mhi,data-type = <3>;
 				mhi,ee = <0x4>;
@@ -469,7 +490,7 @@
 			mhi_chan@108 {
 				reg = <108>;
 				label = "IP_HW_MHIP_1";
-				mhi,event-ring = <11>;
+				mhi,event-ring = <13>;
 				mhi,chan-dir = <2>;
 				mhi,data-type = <3>;
 				mhi,ee = <0x4>;
@@ -583,7 +604,7 @@
 				mhi,num-elements = <0>;
 				mhi,intmod = <0>;
 				mhi,msi = <0>;
-				mhi,chan = <107>;
+				mhi,chan = <105>;
 				mhi,priority = <1>;
 				mhi,brstmode = <3>;
 				mhi,hw-ev;
@@ -595,6 +616,30 @@
 				mhi,num-elements = <0>;
 				mhi,intmod = <0>;
 				mhi,msi = <0>;
+				mhi,chan = <106>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,hw-ev;
+				mhi,client-manage;
+				mhi,offload;
+			};
+
+			mhi_event@12 {
+				mhi,num-elements = <0>;
+				mhi,intmod = <0>;
+				mhi,msi = <0>;
+				mhi,chan = <107>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,hw-ev;
+				mhi,client-manage;
+				mhi,offload;
+			};
+
+			mhi_event@13 {
+				mhi,num-elements = <0>;
+				mhi,intmod = <0>;
+				mhi,msi = <0>;
 				mhi,chan = <108>;
 				mhi,priority = <1>;
 				mhi,brstmode = <3>;
diff --git a/arch/arm64/boot/dts/qcom/kona-smp2p.dtsi b/arch/arm64/boot/dts/qcom/kona-smp2p.dtsi
index b3f8d88..6f28b7c 100644
--- a/arch/arm64/boot/dts/qcom/kona-smp2p.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-smp2p.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
  */
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -51,6 +51,17 @@
 			interrupt-controller;
 			#interrupt-cells = <2>;
 		};
+
+		sleepstate_smp2p_out: sleepstate-out {
+			qcom,entry-name = "sleepstate";
+			#qcom,smem-state-cells = <1>;
+		};
+
+		sleepstate_smp2p_in: qcom,sleepstate-in {
+			qcom,entry-name = "sleepstate_see";
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
 	};
 
 	qcom,smp2p-cdsp {
diff --git a/arch/arm64/boot/dts/qcom/kona-thermal.dtsi b/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
index 64fba05..9102f4f 100644
--- a/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
@@ -50,6 +50,103 @@
 	};
 };
 
+&soc {
+	qmi-tmd-devices {
+		compatible = "qcom,qmi-cooling-devices";
+
+		modem {
+			qcom,instance-id = <0x64>;
+
+			modem_pa: modem_pa {
+				qcom,qmi-dev-name = "pa";
+				#cooling-cells = <2>;
+			};
+
+			modem_proc: modem_proc {
+				qcom,qmi-dev-name = "modem";
+				#cooling-cells = <2>;
+			};
+
+			modem_current: modem_current {
+				qcom,qmi-dev-name = "modem_current";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin: modem_skin {
+				qcom,qmi-dev-name = "modem_skin";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin0: modem_skin0 {
+				qcom,qmi-dev-name = "modem_skin0";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin1: modem_skin1 {
+				qcom,qmi-dev-name = "modem_skin1";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin2: modem_skin2 {
+				qcom,qmi-dev-name = "modem_skin2";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin3: modem_skin3 {
+				qcom,qmi-dev-name = "modem_skin3";
+				#cooling-cells = <2>;
+			};
+
+			modem_mmw0: modem_mmw0 {
+				qcom,qmi-dev-name = "mmw0";
+				#cooling-cells = <2>;
+			};
+
+			modem_mmw1: modem_mmw1 {
+				qcom,qmi-dev-name = "mmw1";
+				#cooling-cells = <2>;
+			};
+
+			modem_mmw2: modem_mmw2 {
+				qcom,qmi-dev-name = "mmw2";
+				#cooling-cells = <2>;
+			};
+
+			modem_mmw3: modem_mmw3 {
+				qcom,qmi-dev-name = "mmw3";
+				#cooling-cells = <2>;
+			};
+
+			modem_bcl: modem_bcl {
+				qcom,qmi-dev-name = "vbatt_low";
+				#cooling-cells = <2>;
+			};
+		};
+	};
+
+	qmi_sensor: qmi-ts-sensors {
+		compatible = "qcom,qmi-sensors";
+		#thermal-sensor-cells = <1>;
+
+		modem {
+			qcom,instance-id = <100>;
+			qcom,qmi-sensor-names = "pa",
+						"pa_1",
+						"pa_2",
+						"qfe_pa0",
+						"qfe_wtr0",
+						"modem_tsens",
+						"qfe_mmw0",
+						"qfe_mmw1",
+						"qfe_mmw2",
+						"qfe_mmw3",
+						"xo_therm",
+						"qfe_pa_mdm",
+						"qfe_pa_wtr";
+		};
+	};
+};
+
 &thermal_zones {
 	aoss0-usr {
 		polling-delay-passive = <0>;
@@ -854,4 +951,60 @@
 			};
 		};
 	};
+
+	modem-pa0-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&qmi_sensor 100>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	modem-pa1-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&qmi_sensor 101>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	modem-modem-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&qmi_sensor 105>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	modem-skin-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&qmi_sensor 110>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-vidc.dtsi b/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
index 95bc7d5..674f53a 100644
--- a/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
@@ -49,9 +49,8 @@
 			label = "cnoc";
 			qcom,bus-master = <MSM_BUS_MASTER_AMPSS_M0>;
 			qcom,bus-slave = <MSM_BUS_SLAVE_VENUS_CFG>;
-			qcom,bus-governor = "performance";
+			qcom,mode = "performance";
 			qcom,bus-range-kbps = <762 762>;
-			operating-points-v2 = <&venus_bus_cnoc_bw_table>;
 		};
 
 		venus_bus_ddr {
@@ -59,9 +58,8 @@
 			label = "venus-ddr";
 			qcom,bus-master = <MSM_BUS_MASTER_LLCC>;
 			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
-			qcom,bus-governor = "msm-vidc-ddr";
+			qcom,mode = "venus-ddr";
 			qcom,bus-range-kbps = <762 6533000>;
-			operating-points-v2 = <&suspendable_ddr_bw_opp_table>;
 		};
 
 		venus_bus_llcc {
@@ -69,9 +67,8 @@
 			label = "venus-llcc";
 			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
 			qcom,bus-slave = <MSM_BUS_SLAVE_LLCC>;
-			qcom,bus-governor = "msm-vidc-llcc";
+			qcom,mode = "venuc-llcc";
 			qcom,bus-range-kbps = <2288 6533000>;
-			operating-points-v2 = <&suspendable_llcc_bw_opp_table>;
 		};
 
 		/* MMUs */
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index 72dd390..48519d1 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -974,11 +974,6 @@
 		operating-points-v2 = <&keepalive_opp_table>;
 	};
 
-	venus_bus_cnoc_bw_table: bus-cnoc-bw-table {
-		compatible = "operating-points-v2";
-		BW_OPP_ENTRY( 200, 4);
-	};
-
 	llcc_bw_opp_table: llcc-bw-opp-table {
 		compatible = "operating-points-v2";
 		BW_OPP_ENTRY(  150, 16); /*  2288 MB/s */
@@ -1867,9 +1862,11 @@
 		lanes-per-direction = <2>;
 
 		clock-names = "ref_clk_src",
+			"ref_clk",
 			"ref_aux_clk";
 		clocks = <&clock_rpmh RPMH_CXO_CLK>,
-			<&clock_gcc GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK>;
+			<&clock_gcc GCC_UFS_1X_CLKREF_EN>,
+			<&clock_gcc GCC_UFS_PHY_PHY_AUX_CLK>;
 
 		status = "disabled";
 	};
@@ -1896,11 +1893,11 @@
 			"rx_lane0_sync_clk",
 			"rx_lane1_sync_clk";
 		clocks =
-			<&clock_gcc GCC_UFS_PHY_AXI_HW_CTL_CLK>,
-			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK>,
+			<&clock_gcc GCC_UFS_PHY_AXI_CLK>,
+			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
 			<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
-			<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK>,
-			<&clock_gcc GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK>,
+			<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+			<&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>,
 			<&clock_rpmh RPMH_CXO_CLK>,
 			<&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
 			<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
@@ -1910,7 +1907,7 @@
 			<0 0>,
 			<0 0>,
 			<37500000 300000000>,
-			<37500000 300000000>,
+			<75000000 300000000>,
 			<0 0>,
 			<0 0>,
 			<0 0>,
@@ -2237,6 +2234,13 @@
 		memory-region = <&pil_ipa_fw_mem>;
 	};
 
+	qcom,ipa-mpm {
+		compatible = "qcom,ipa-mpm";
+		qcom,mhi-chdb-base = <0x64300300>;
+		qcom,mhi-erdb-base = <0x64300700>;
+		qcom,iova-mapping = <0x10000000 0x1FFFFFFF>;
+	};
+
 	ipa_hw: qcom,ipa@1e00000 {
 		compatible = "qcom,ipa";
 		mboxes = <&qmp_aop 0>;
@@ -2709,6 +2713,14 @@
 		memory-region = <&pil_npu_mem>;
 	};
 
+	qcom,smp2p_sleepstate {
+		compatible = "qcom,smp2p-sleepstate";
+		qcom,smem-states = <&sleepstate_smp2p_out 0>;
+		interrupt-parent = <&sleepstate_smp2p_in>;
+		interrupts = <0 0>;
+		interrupt-names = "smp2p-sleepstate-in";
+	};
+
 	qcom,msm-cdsp-loader {
 		compatible = "qcom,cdsp-loader";
 		qcom,proc-img-to-load = "cdsp";
@@ -3389,6 +3401,13 @@
 				mhi,brstmode = <2>;
 			};
 		};
+
+		mhi_devices {
+			mhi_qrtr {
+				mhi,chan = "IPCR";
+				qcom,net-id = <0>;
+			};
+		};
 	};
 };
 
diff --git a/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi b/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
index dfb2644..1e822bb 100644
--- a/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
@@ -139,7 +139,7 @@
 
 	/* NPU GDSC */
 	npu_core_gdsc: qcom,gdsc@9981004 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0x9981004 0x4>;
 		regulator-name = "npu_core_gdsc";
 		status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/lito-rumi.dtsi b/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
index 9cb6c48..075d5d5 100644
--- a/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
@@ -58,6 +58,11 @@
 	clock-output-names = "rpmh_clocks";
 };
 
+&aopcc {
+	compatible = "qcom,dummycc";
+	clock-output-names = "qdss_clocks";
+};
+
 &usb0 {
 	dwc3@a600000 {
 		usb-phy = <&usb_emu_phy>, <&usb_nop_phy>;
diff --git a/arch/arm64/boot/dts/qcom/lito.dtsi b/arch/arm64/boot/dts/qcom/lito.dtsi
index 609691a..0862654 100644
--- a/arch/arm64/boot/dts/qcom/lito.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito.dtsi
@@ -395,70 +395,70 @@
 			reg = <0x0 0x81e00000 0x0 0x2600000>;
 		};
 
-		modem_wlan_mem: modem_wlan_region@86000000 {
+		modem_wlan_mem: modem_wlan_region@87400000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x86000000 0x0 0xbe00000>;
+			reg = <0x0 0x87400000 0x0 0xc900000>;
 		};
 
-		pil_camera_mem: pil_camera_region@91e00000 {
+		pil_camera_mem: pil_camera_region@93d00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x91e00000 0x0 0x500000>;
+			reg = <0x0 0x93d00000 0x0 0x500000>;
 		};
 
-		pil_npu_mem: pil_npu_region@92300000 {
+		pil_npu_mem: pil_npu_region@94200000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x92300000 0x0 0x500000>;
+			reg = <0x0 0x94200000 0x0 0x500000>;
 		};
 
-		pil_video_mem: pil_video_region@92800000 {
+		pil_video_mem: pil_video_region@94700000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x92800000 0x0 0x500000>;
+			reg = <0x0 0x94700000 0x0 0x500000>;
 		};
 
-		pil_cvp_mem: pil_cvp_region@92d00000 {
+		pil_cvp_mem: pil_cvp_region@94c00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x92d00000 0x0 0x500000>;
+			reg = <0x0 0x94c00000 0x0 0x500000>;
 		};
 
-		pil_cdsp_mem: pil_cdsp_region@93200000 {
+		pil_cdsp_mem: pil_cdsp_region@95100000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x93200000 0x0 0x1e00000>;
+			reg = <0x0 0x95100000 0x0 0x1e00000>;
 		};
 
-		pil_adsp_mem: pil_adsp_region@95000000 {
+		pil_adsp_mem: pil_adsp_region@96f00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x95000000 0x0 0x2800000>;
+			reg = <0x0 0x96f00000 0x0 0x2800000>;
 		};
 
-		pil_wlan_fw_mem: pil_wlan_fw_region@97800000 {
+		pil_wlan_fw_mem: pil_wlan_fw_region@99700000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x97800000 0x0 0x200000>;
+			reg = <0x0 0x99700000 0x0 0x200000>;
 		};
 
-		pil_ipa_fw_mem: pil_ipa_fw_region@97a00000 {
+		pil_ipa_fw_mem: pil_ipa_fw_region@99900000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x97a00000 0x0 0x10000>;
+			reg = <0x0 0x99900000 0x0 0x10000>;
 		};
 
-		pil_ipa_gsi_mem: pil_ipa_gsi_region@97a10000 {
+		pil_ipa_gsi_mem: pil_ipa_gsi_region@99910000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x97a10000 0x0 0x5000>;
+			reg = <0x0 0x99910000 0x0 0x5400>;
 		};
 
-		pil_gpu_mem: pil_gpu_region@97a15000 {
+		pil_gpu_mem: pil_gpu_region@99915400 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x97a15000 0x0 0x2000>;
+			reg = <0x0 0x99915400 0x0 0x2000>;
 		};
 
 		qseecom_mem: qseecom_mem_region@9e000000 {
@@ -472,6 +472,7 @@
 			no-map;
 			reg = <0x0 0x9f400000 0x0 0xc00000>;
 		};
+
 		cdsp_mem: cdsp_region {
 			compatible = "shared-dma-pool";
 			alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
@@ -851,8 +852,9 @@
 	};
 
 	aopcc: qcom,aopclk {
-		compatible = "qcom,dummycc";
-		clock-output-names = "qdss_clocks";
+		compatible = "qcom,aop-qmp-clk";
+		mboxes = <&qmp_aop 0>;
+		mbox-names = "qdss_clk";
 		#clock-cells = <1>;
 	};
 
@@ -901,11 +903,24 @@
 	};
 
 	gpucc: qcom,gpucc {
-		compatible = "qcom,gpucc-lito", "syscon";
+		compatible = "qcom,lito-gpucc", "syscon";
 		reg = <0x3d90000 0x9000>;
 		reg-names = "cc_base";
 		vdd_cx-supply = <&VDD_CX_LEVEL>;
 		vdd_mx-supply = <&VDD_MX_LEVEL>;
+		clocks = <&rpmhcc RPMH_CXO_CLK>;
+		clock-names = "xo";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	npucc: qcom,npucc {
+		compatible = "qcom,lito-npucc", "syscon";
+		reg = <0x9980000 0x10000>,
+			<0x9800000 0x10000>,
+			<0x9810000 0x10000>;
+		reg-names = "cc", "qdsp6ss", "qdsp6ss_pll";
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
@@ -1036,13 +1051,6 @@
 		status = "disabled";
 	};
 
-	npucc: qcom,npucc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "npucc_clocks";
-		#clock-cells = <1>;
-		#reset-cells = <1>;
-	};
-
 	apps_rsc: rsc@18200000 {
 		label = "apps_rsc";
 		compatible = "qcom,rpmh-rsc";
diff --git a/arch/arm64/boot/dts/qcom/pm8009.dtsi b/arch/arm64/boot/dts/qcom/pm8009.dtsi
index bfa5cf0..4b74708 100644
--- a/arch/arm64/boot/dts/qcom/pm8009.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8009.dtsi
@@ -29,10 +29,10 @@
 		pm8009_gpios: pinctrl@c000 {
 			compatible = "qcom,spmi-gpio";
 			reg = <0xc000 0x400>;
-			interrupts = <0x0 0xc0 0 IRQ_TYPE_NONE>,
-				     <0x0 0xc1 0 IRQ_TYPE_NONE>,
-				     <0x0 0xc2 0 IRQ_TYPE_NONE>,
-				     <0x0 0xc3 0 IRQ_TYPE_NONE>;
+			interrupts = <0xa 0xc0 0 IRQ_TYPE_NONE>,
+				     <0xa 0xc1 0 IRQ_TYPE_NONE>,
+				     <0xa 0xc2 0 IRQ_TYPE_NONE>,
+				     <0xa 0xc3 0 IRQ_TYPE_NONE>;
 			interrupt-names = "pm8009_gpio1", "pm8009_gpio2",
 					  "pm8009_gpio3", "pm8009_gpio4";
 			gpio-controller;
diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
index 29495c1..10161b4 100644
--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
@@ -76,14 +76,15 @@
 			interrupts = <0x0 0xc0 0x0 IRQ_TYPE_NONE>,
 					<0x0 0xc2 0x0 IRQ_TYPE_NONE>,
 					<0x0 0xc5 0x0 IRQ_TYPE_NONE>,
+					<0x0 0xc6 0x0 IRQ_TYPE_NONE>,
 					<0x0 0xc8 0x0 IRQ_TYPE_NONE>,
 					<0x0 0xc9 0x0 IRQ_TYPE_NONE>;
 			interrupt-names = "pm8150_gpio1", "pm8150_gpio3",
-					"pm8150_gpio6", "pm8150_gpio9",
-					"pm8150_gpio10";
+					"pm8150_gpio6", "pm8150_gpio7",
+					"pm8150_gpio9", "pm8150_gpio10";
 			gpio-controller;
 			#gpio-cells = <2>;
-			qcom,gpios-disallowed = <2 4 5 7 8>;
+			qcom,gpios-disallowed = <2 4 5 8>;
 		};
 
 		pm8150_rtc: qcom,pm8150_rtc {
diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
index c427d85..1b87f43 100644
--- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
@@ -61,17 +61,19 @@
 					<0x2 0xc1 0x0 IRQ_TYPE_NONE>,
 					<0x2 0xc4 0x0 IRQ_TYPE_NONE>,
 					<0x2 0xc5 0x0 IRQ_TYPE_NONE>,
+					<0x2 0xc6 0x0 IRQ_TYPE_NONE>,
 					<0x2 0xc7 0x0 IRQ_TYPE_NONE>,
 					<0x2 0xc8 0x0 IRQ_TYPE_NONE>,
 					<0x2 0xc9 0x0 IRQ_TYPE_NONE>,
 					<0x2 0xcb 0x0 IRQ_TYPE_NONE>;
 			interrupt-names = "pm8150b_gpio1", "pm8150b_gpio2",
 					"pm8150b_gpio5", "pm8150b_gpio6",
-					"pm8150b_gpio8", "pm8150b_gpio9",
-					"pm8150b_gpio10", "pm8150b_gpio12";
+					"pm8150b_gpio7", "pm8150b_gpio8",
+					"pm8150b_gpio9", "pm8150b_gpio10",
+					"pm8150b_gpio12";
 			gpio-controller;
 			#gpio-cells = <2>;
-			qcom,gpios-disallowed = <3 4 7 11>;
+			qcom,gpios-disallowed = <3 4 11>;
 		};
 
 		pm8150b_charger: qcom,qpnp-smb5 {
diff --git a/arch/arm64/boot/dts/qcom/pm8150l.dtsi b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
index da4e4e5..186c86f 100644
--- a/arch/arm64/boot/dts/qcom/pm8150l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
@@ -55,18 +55,18 @@
 					<0x4 0xc3 0x0 IRQ_TYPE_NONE>,
 					<0x4 0xc4 0x0 IRQ_TYPE_NONE>,
 					<0x4 0xc5 0x0 IRQ_TYPE_NONE>,
+					<0x4 0xc6 0x0 IRQ_TYPE_NONE>,
 					<0x4 0xc7 0x0 IRQ_TYPE_NONE>,
-					<0x4 0xc8 0x0 IRQ_TYPE_NONE>,
 					<0x4 0xc9 0x0 IRQ_TYPE_NONE>,
 					<0x4 0xca 0x0 IRQ_TYPE_NONE>;
 			interrupt-names = "pm8150l_gpio1", "pm8150l_gpio3",
 					"pm8150l_gpio4", "pm8150l_gpio5",
-					"pm8150l_gpio6", "pm8150l_gpio8",
-					"pm8150l_gpio9", "pm8150l_gpio10",
+					"pm8150l_gpio6", "pm8150l_gpio7",
+					"pm8150l_gpio8", "pm8150l_gpio10",
 					"pm8150l_gpio11";
 			gpio-controller;
 			#gpio-cells = <2>;
-			qcom,gpios-disallowed = <2 7 12>;
+			qcom,gpios-disallowed = <2 9 12>;
 		};
 
 		pm8150l_vadc: vadc@3100 {
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index 2fdb64d..cb6da38 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -552,6 +552,7 @@
 CONFIG_QSEE_IPC_IRQ_BRIDGE=y
 CONFIG_QCOM_GLINK=y
 CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
 CONFIG_QCOM_QDSS_BRIDGE=y
 CONFIG_MSM_CDSP_LOADER=y
 CONFIG_QCOM_SMCINVOKE=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 2210bec..626e4ad 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -571,6 +571,7 @@
 CONFIG_QSEE_IPC_IRQ_BRIDGE=y
 CONFIG_QCOM_GLINK=y
 CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
 CONFIG_QCOM_QDSS_BRIDGE=y
 CONFIG_MSM_CDSP_LOADER=y
 CONFIG_QCOM_SMCINVOKE=y
@@ -650,6 +651,7 @@
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index 870c6ce..c26e79a 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -369,6 +369,7 @@
 CONFIG_SM_CAMCC_LITO=y
 CONFIG_SM_DISPCC_LITO=y
 CONFIG_SM_GPUCC_LITO=y
+CONFIG_SM_NPUCC_LITO=y
 CONFIG_HWSPINLOCK=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_MAILBOX=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index 4c43eb8..d22eb76 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -377,6 +377,7 @@
 CONFIG_SM_CAMCC_LITO=y
 CONFIG_SM_DISPCC_LITO=y
 CONFIG_SM_GPUCC_LITO=y
+CONFIG_SM_NPUCC_LITO=y
 CONFIG_HWSPINLOCK=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_MAILBOX=y
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 19ba6f4..7616eab 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -4619,7 +4619,7 @@
 
 	for (n = 0; ; n++) {
 		rrate = clk_hw_round_rate(hw, rate + 1);
-		if (!rate) {
+		if (!rrate) {
 			pr_err("clk_round_rate failed for %s\n",
 							core->name);
 			goto err_derive_device_list;
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 3d7bbea..f6c8d1a 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -392,3 +392,12 @@
 	 Support for the graphics clock controller on Qualcomm Technologies, Inc.
 	 LITO devices.
 	 Say Y if you want to support graphics controller devices.
+
+config SM_NPUCC_LITO
+        tristate "LITO NPU Clock Controller"
+        depends on COMMON_CLK_QCOM
+        help
+          Support for the NPU clock controller on Qualcomm Technologies, Inc.
+          LITO devices.
+          Say Y if you want to enable use of the Network Processing Unit in
+          order to speed up certain types of calculations.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 8fce145..afa72d2 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -55,6 +55,7 @@
 obj-$(CONFIG_SM_CAMCC_LITO) += camcc-lito.o
 obj-$(CONFIG_SM_DISPCC_LITO) += dispcc-lito.o
 obj-$(CONFIG_SM_GCC_LITO) += gcc-lito.o
+obj-$(CONFIG_SM_NPUCC_LITO) += npucc-lito.o
 obj-$(CONFIG_SM_VIDEOCC_LITO) += videocc-lito.o
 obj-$(CONFIG_SM_GPUCC_LITO) += gpucc-lito.o
 obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/drivers/clk/qcom/camcc-lito.c b/drivers/clk/qcom/camcc-lito.c
index 10842bb..014be22 100644
--- a/drivers/clk/qcom/camcc-lito.c
+++ b/drivers/clk/qcom/camcc-lito.c
@@ -178,8 +178,8 @@
 	{ P_CORE_BI_PLL_TEST_SE, 7 },
 };
 
-static const char * const cam_cc_parent_names_9[] = {
-	"bi_tcxo",
+static const char * const cam_cc_parent_names_9_ao[] = {
+	"bi_tcxo_ao",
 	"core_bi_pll_test_se",
 };
 
@@ -198,7 +198,7 @@
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
-	.user_ctl_val = 0x00003101,
+	.user_ctl_val = 0x00000001,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
 };
@@ -274,7 +274,7 @@
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
-	.user_ctl_val = 0x00000101,
+	.user_ctl_val = 0x00000001,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
 };
@@ -403,7 +403,7 @@
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
-	.user_ctl_val = 0x00000101,
+	.user_ctl_val = 0x00000001,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
 };
@@ -458,7 +458,7 @@
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
-	.user_ctl_val = 0x00000101,
+	.user_ctl_val = 0x00000001,
 	.user_ctl_hi_val = 0x00000805,
 	.user_ctl_hi1_val = 0x00000000,
 };
@@ -1242,7 +1242,7 @@
 	.freq_tbl = ftbl_cam_cc_xo_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_xo_clk_src",
-		.parent_names = cam_cc_parent_names_9,
+		.parent_names = cam_cc_parent_names_9_ao,
 		.num_parents = 2,
 		.ops = &clk_rcg2_ops,
 	},
diff --git a/drivers/clk/qcom/dispcc-lito.c b/drivers/clk/qcom/dispcc-lito.c
index 48833b46..b275e730 100644
--- a/drivers/clk/qcom/dispcc-lito.c
+++ b/drivers/clk/qcom/dispcc-lito.c
@@ -82,6 +82,11 @@
 	"core_bi_pll_test_se",
 };
 
+static const char * const disp_cc_parent_names_2_ao[] = {
+	"bi_tcxo_ao",
+	"core_bi_pll_test_se",
+};
+
 static const struct parent_map disp_cc_parent_map_3[] = {
 	{ P_BI_TCXO, 0 },
 	{ P_DISP_CC_PLL0_OUT_MAIN, 1 },
@@ -581,7 +586,7 @@
 	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "disp_cc_xo_clk_src",
-		.parent_names = disp_cc_parent_names_2,
+		.parent_names = disp_cc_parent_names_2_ao,
 		.num_parents = 2,
 		.ops = &clk_rcg2_ops,
 	},
diff --git a/drivers/clk/qcom/gpucc-lito.c b/drivers/clk/qcom/gpucc-lito.c
index 92c2f19..53dff76 100644
--- a/drivers/clk/qcom/gpucc-lito.c
+++ b/drivers/clk/qcom/gpucc-lito.c
@@ -404,9 +404,18 @@
 static int gpu_cc_lito_probe(struct platform_device *pdev)
 {
 	struct regmap *regmap;
+	struct clk *clk;
 	unsigned int value, mask;
 	int ret;
 
+	clk = clk_get(&pdev->dev, "xo");
+	if (IS_ERR(clk)) {
+		if (PTR_ERR(clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get ahb clock handle\n");
+		return PTR_ERR(clk);
+	}
+	clk_put(clk);
+
 	regmap = qcom_cc_map(pdev, &gpu_cc_lito_desc);
 	if (IS_ERR(regmap))
 		return PTR_ERR(regmap);
diff --git a/drivers/clk/qcom/npucc-lito.c b/drivers/clk/qcom/npucc-lito.c
new file mode 100644
index 0000000..330266d
--- /dev/null
+++ b/drivers/clk/qcom/npucc-lito.c
@@ -0,0 +1,1118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,npucc-lito.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "reset.h"
+#include "vdd-level.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+
+#define HM0_CRC_SID_FSM_CTRL		0x11A0
+#define CRC_SID_FSM_CTRL_SETTING	0x800000
+#define HM0_CRC_MND_CFG			0x11A4
+#define CRC_MND_CFG_SETTING		0x15011
+
+enum {
+	P_BI_TCXO,
+	P_CORE_BI_PLL_TEST_SE,
+	P_GCC_NPU_GPLL0_CLK,
+	P_GCC_NPU_GPLL0_DIV_CLK,
+	P_NPU_CC_PLL0_OUT_EVEN,
+	P_NPU_CC_PLL1_OUT_EVEN,
+	P_NPU_Q6SS_PLL_OUT_MAIN,
+	P_NPU_CC_CRC_DIV,
+};
+
+static const struct parent_map npu_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_NPU_CC_PLL1_OUT_EVEN, 1 },
+	{ P_NPU_CC_PLL0_OUT_EVEN, 2 },
+	{ P_GCC_NPU_GPLL0_CLK, 4 },
+	{ P_GCC_NPU_GPLL0_DIV_CLK, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const npu_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"npu_cc_pll1_out_even",
+	"npu_cc_pll0_out_even",
+	"gcc_npu_gpll0_clk_src",
+	"gcc_npu_gpll0_div_clk_src",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map npu_cc_parent_map_0_crc[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_NPU_CC_PLL1_OUT_EVEN, 1 },
+	{ P_NPU_CC_CRC_DIV, 2 },
+	{ P_GCC_NPU_GPLL0_CLK, 4 },
+	{ P_GCC_NPU_GPLL0_DIV_CLK, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const npu_cc_parent_names_0_crc[] = {
+	"bi_tcxo",
+	"npu_cc_pll1_out_even",
+	"npu_cc_crc_div",
+	"gcc_npu_gpll0_clk_src",
+	"gcc_npu_gpll0_div_clk_src",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map npu_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const npu_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map npu_cc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_NPU_Q6SS_PLL_OUT_MAIN, 1 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const npu_cc_parent_names_2[] = {
+	"bi_tcxo",
+	"npu_q6ss_pll",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco lucid_vco[] = {
+	{ 249600000, 2000000000, 0 },
+};
+
+static const u32 crc_reg_offset[] = {
+	HM0_CRC_MND_CFG, HM0_CRC_SID_FSM_CTRL,
+};
+
+static const u32 crc_reg_val[] = {
+	CRC_MND_CFG_SETTING, CRC_SID_FSM_CTRL_SETTING,
+};
+
+static struct alpha_pll_config npu_cc_pll0_config = {
+	.l = 0x14,
+	.cal_l = 0x44,
+	.alpha = 0xD555,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000000,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+	.custom_reg_offset = crc_reg_offset,
+	.custom_reg_val = crc_reg_val,
+	.num_custom_reg = ARRAY_SIZE(crc_reg_offset),
+};
+
+static struct clk_alpha_pll npu_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.config = &npu_cc_pll0_config,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_npu_cc_pll0_out_even[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv npu_cc_pll0_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_npu_cc_pll0_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_npu_cc_pll0_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_pll0_out_even",
+		.parent_names = (const char *[]){ "npu_cc_pll0" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static struct alpha_pll_config npu_cc_pll1_config = {
+	.l = 0xF,
+	.cal_l = 0x44,
+	.alpha = 0xA000,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000000,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll npu_cc_pll1 = {
+	.offset = 0x400,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.config = &npu_cc_pll1_config,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_pll1",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_npu_cc_pll1_out_even[] = {
+	{ 0x0, 1 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv npu_cc_pll1_out_even = {
+	.offset = 0x400,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_npu_cc_pll1_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_npu_cc_pll1_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_pll1_out_even",
+		.parent_names = (const char *[]){ "npu_cc_pll1" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static const struct alpha_pll_config npu_q6ss_pll_config = {
+	.l = 0xD,
+	.cal_l = 0x44,
+	.alpha = 0x555,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000000,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll npu_q6ss_pll = {
+	.offset = 0x0,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_q6ss_pll",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static struct clk_fixed_factor npu_cc_crc_div = {
+	.mult = 1,
+	.div = 2,
+	.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_crc_div",
+		.parent_names = (const char *[]){ "npu_cc_pll0_out_even" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_npu_cc_cal_hm0_clk_src[] = {
+	F(200000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(300000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(518400000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(633600000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(825600000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	F(1000000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 npu_cc_cal_hm0_clk_src = {
+	.cmd_rcgr = 0x1100,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = npu_cc_parent_map_0_crc,
+	.freq_tbl = ftbl_npu_cc_cal_hm0_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_cal_hm0_clk_src",
+		.parent_names = npu_cc_parent_names_0_crc,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 200000000,
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 518400000,
+			[VDD_LOW_L1] = 633600000,
+			[VDD_NOMINAL] = 825600000,
+			[VDD_HIGH] = 1000000000},
+	},
+};
+
+static const struct freq_tbl ftbl_npu_cc_core_clk_src[] = {
+	F(60000000, P_GCC_NPU_GPLL0_DIV_CLK, 5, 0, 0),
+	F(100000000, P_GCC_NPU_GPLL0_DIV_CLK, 3, 0, 0),
+	F(200000000, P_GCC_NPU_GPLL0_CLK, 3, 0, 0),
+	F(333333333, P_NPU_CC_PLL1_OUT_EVEN, 4.5, 0, 0),
+	F(428571429, P_NPU_CC_PLL1_OUT_EVEN, 3.5, 0, 0),
+	F(500000000, P_NPU_CC_PLL1_OUT_EVEN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 npu_cc_core_clk_src = {
+	.cmd_rcgr = 0x1010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = npu_cc_parent_map_0,
+	.freq_tbl = ftbl_npu_cc_core_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_core_clk_src",
+		.parent_names = npu_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 60000000,
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW] = 200000000,
+			[VDD_LOW_L1] = 333333333,
+			[VDD_NOMINAL] = 428571429,
+			[VDD_HIGH] = 500000000},
+	},
+};
+
+static const struct freq_tbl ftbl_npu_cc_lmh_clk_src[] = {
+	F(60000000, P_GCC_NPU_GPLL0_DIV_CLK, 5, 0, 0),
+	F(100000000, P_GCC_NPU_GPLL0_DIV_CLK, 3, 0, 0),
+	F(200000000, P_GCC_NPU_GPLL0_CLK, 3, 0, 0),
+	F(214285714, P_NPU_CC_PLL1_OUT_EVEN, 7, 0, 0),
+	F(300000000, P_NPU_CC_PLL1_OUT_EVEN, 5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 npu_cc_lmh_clk_src = {
+	.cmd_rcgr = 0x1060,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = npu_cc_parent_map_0,
+	.freq_tbl = ftbl_npu_cc_lmh_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_lmh_clk_src",
+		.parent_names = npu_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 60000000,
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW] = 200000000,
+			[VDD_LOW_L1] = 214285714,
+			[VDD_NOMINAL] = 300000000},
+	},
+};
+
+static const struct freq_tbl ftbl_npu_cc_xo_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 npu_cc_xo_clk_src = {
+	.cmd_rcgr = 0x1400,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = npu_cc_parent_map_1,
+	.freq_tbl = ftbl_npu_cc_xo_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_cc_xo_clk_src",
+		.parent_names = npu_cc_parent_names_1,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_npu_dsp_core_clk_src[] = {
+	F(250000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
+	F(300000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
+	F(400000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
+	F(500000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
+	F(660000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
+	F(800000000, P_NPU_Q6SS_PLL_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 npu_dsp_core_clk_src = {
+	.cmd_rcgr = 0x28,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = npu_cc_parent_map_2,
+	.freq_tbl = ftbl_npu_dsp_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "npu_dsp_core_clk_src",
+		.parent_names = npu_cc_parent_names_2,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_MIN] = 250000000,
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 400000000,
+			[VDD_LOW_L1] = 500000000,
+			[VDD_NOMINAL] = 660000000,
+			[VDD_HIGH] = 800000000},
+	},
+};
+
+static struct clk_branch npu_cc_bto_core_clk = {
+	.halt_reg = 0x10dc,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10dc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_bto_core_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_bwmon_clk = {
+	.halt_reg = 0x10d8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10d8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_bwmon_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_cal_hm0_cdc_clk = {
+	.halt_reg = 0x1098,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1098,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_cal_hm0_cdc_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_cal_hm0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_cal_hm0_clk = {
+	.halt_reg = 0x1110,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1110,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_cal_hm0_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_cal_hm0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_cal_hm0_dpm_ip_clk = {
+	.halt_reg = 0x109c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x109c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_cal_hm0_dpm_ip_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_cal_hm0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_cal_hm0_perf_cnt_clk = {
+	.halt_reg = 0x10a0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10a0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_cal_hm0_perf_cnt_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_cal_hm0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_core_clk = {
+	.halt_reg = 0x1030,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1030,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_core_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dl_dpm_clk = {
+	.halt_reg = 0x1238,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1238,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dl_dpm_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_lmh_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dl_llm_clk = {
+	.halt_reg = 0x1234,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1234,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dl_llm_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_lmh_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dpm_clk = {
+	.halt_reg = 0x107c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x107c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dpm_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_lmh_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dpm_temp_clk = {
+	.halt_reg = 0x10c4,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x10c4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dpm_temp_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dpm_xo_clk = {
+	.halt_reg = 0x1094,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1094,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dpm_xo_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dsp_ahbm_clk = {
+	.halt_reg = 0x1214,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1214,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dsp_ahbm_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dsp_ahbs_clk = {
+	.halt_reg = 0x1210,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x1210,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dsp_ahbs_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dsp_axi_clk = {
+	.halt_reg = 0x121c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x121c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dsp_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_npu_noc_axi_clk"
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dsp_bwmon_ahb_clk = {
+	.halt_reg = 0x1218,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1218,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dsp_bwmon_ahb_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_dsp_bwmon_clk = {
+	.halt_reg = 0x1224,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1224,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_dsp_bwmon_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_isense_clk = {
+	.halt_reg = 0x1078,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_isense_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_lmh_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_llm_clk = {
+	.halt_reg = 0x1074,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1074,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_llm_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_lmh_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_llm_curr_clk = {
+	.halt_reg = 0x10d4,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x10d4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_llm_curr_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_llm_temp_clk = {
+	.halt_reg = 0x10c8,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x10c8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_llm_temp_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_llm_xo_clk = {
+	.halt_reg = 0x1090,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1090,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_llm_xo_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_noc_ahb_clk = {
+	.halt_reg = 0x10c0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10c0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_noc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_noc_axi_clk = {
+	.halt_reg = 0x10b8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10b8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_noc_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_npu_noc_axi_clk"
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_noc_dma_clk = {
+	.halt_reg = 0x10b0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10b0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_noc_dma_clk",
+			.parent_names = (const char *[]){
+				"gcc_npu_noc_dma_clk"
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_rsc_xo_clk = {
+	.halt_reg = 0x10e0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10e0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_rsc_xo_clk",
+			.parent_names = (const char *[]){
+				"npu_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_s2p_clk = {
+	.halt_reg = 0x10cc,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x10cc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_s2p_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch npu_cc_xo_clk = {
+	.halt_reg = 0x1410,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1410,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "npu_cc_xo_clk",
+			.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *npu_cc_lito_clocks[] = {
+	[NPU_CC_BTO_CORE_CLK] = &npu_cc_bto_core_clk.clkr,
+	[NPU_CC_BWMON_CLK] = &npu_cc_bwmon_clk.clkr,
+	[NPU_CC_CAL_HM0_CDC_CLK] = &npu_cc_cal_hm0_cdc_clk.clkr,
+	[NPU_CC_CAL_HM0_CLK] = &npu_cc_cal_hm0_clk.clkr,
+	[NPU_CC_CAL_HM0_CLK_SRC] = &npu_cc_cal_hm0_clk_src.clkr,
+	[NPU_CC_CAL_HM0_DPM_IP_CLK] = &npu_cc_cal_hm0_dpm_ip_clk.clkr,
+	[NPU_CC_CAL_HM0_PERF_CNT_CLK] = &npu_cc_cal_hm0_perf_cnt_clk.clkr,
+	[NPU_CC_CORE_CLK] = &npu_cc_core_clk.clkr,
+	[NPU_CC_CORE_CLK_SRC] = &npu_cc_core_clk_src.clkr,
+	[NPU_CC_DL_DPM_CLK] = &npu_cc_dl_dpm_clk.clkr,
+	[NPU_CC_DL_LLM_CLK] = &npu_cc_dl_llm_clk.clkr,
+	[NPU_CC_DPM_CLK] = &npu_cc_dpm_clk.clkr,
+	[NPU_CC_DPM_TEMP_CLK] = &npu_cc_dpm_temp_clk.clkr,
+	[NPU_CC_DPM_XO_CLK] = &npu_cc_dpm_xo_clk.clkr,
+	[NPU_CC_DSP_AHBM_CLK] = &npu_cc_dsp_ahbm_clk.clkr,
+	[NPU_CC_DSP_AHBS_CLK] = &npu_cc_dsp_ahbs_clk.clkr,
+	[NPU_CC_DSP_AXI_CLK] = &npu_cc_dsp_axi_clk.clkr,
+	[NPU_CC_DSP_BWMON_AHB_CLK] = &npu_cc_dsp_bwmon_ahb_clk.clkr,
+	[NPU_CC_DSP_BWMON_CLK] = &npu_cc_dsp_bwmon_clk.clkr,
+	[NPU_CC_ISENSE_CLK] = &npu_cc_isense_clk.clkr,
+	[NPU_CC_LLM_CLK] = &npu_cc_llm_clk.clkr,
+	[NPU_CC_LLM_CURR_CLK] = &npu_cc_llm_curr_clk.clkr,
+	[NPU_CC_LLM_TEMP_CLK] = &npu_cc_llm_temp_clk.clkr,
+	[NPU_CC_LLM_XO_CLK] = &npu_cc_llm_xo_clk.clkr,
+	[NPU_CC_LMH_CLK_SRC] = &npu_cc_lmh_clk_src.clkr,
+	[NPU_CC_NOC_AHB_CLK] = &npu_cc_noc_ahb_clk.clkr,
+	[NPU_CC_NOC_AXI_CLK] = &npu_cc_noc_axi_clk.clkr,
+	[NPU_CC_NOC_DMA_CLK] = &npu_cc_noc_dma_clk.clkr,
+	[NPU_CC_PLL0] = &npu_cc_pll0.clkr,
+	[NPU_CC_PLL0_OUT_EVEN] = &npu_cc_pll0_out_even.clkr,
+	[NPU_CC_PLL1] = &npu_cc_pll1.clkr,
+	[NPU_CC_PLL1_OUT_EVEN] = &npu_cc_pll1_out_even.clkr,
+	[NPU_CC_RSC_XO_CLK] = &npu_cc_rsc_xo_clk.clkr,
+	[NPU_CC_S2P_CLK] = &npu_cc_s2p_clk.clkr,
+	[NPU_CC_XO_CLK] = &npu_cc_xo_clk.clkr,
+	[NPU_CC_XO_CLK_SRC] = &npu_cc_xo_clk_src.clkr,
+};
+
+static struct clk_regmap *npu_qdsp6ss_lito_clocks[] = {
+	[NPU_DSP_CORE_CLK_SRC] = &npu_dsp_core_clk_src.clkr,
+};
+
+static struct clk_regmap *npu_qdsp6ss_pll_lito_clocks[] = {
+	[NPU_Q6SS_PLL] = &npu_q6ss_pll.clkr,
+};
+
+static const struct qcom_reset_map npu_cc_lito_resets[] = {
+	[NPU_CC_CAL_HM0_BCR] = { 0x10f0 },
+	[NPU_CC_CAL_HM1_BCR] = { 0x1130 },
+	[NPU_CC_CORE_BCR] = { 0x1000 },
+	[NPU_CC_DPM_TEMP_CLK_ARES] = { 0x10c4, 2 },
+	[NPU_CC_LLM_TEMP_CLK_ARES] = { 0x10c8, 2 },
+	[NPU_CC_LLM_CURR_CLK_ARES] = { 0x10d4, 2 },
+	[NPU_CC_DSP_BCR] = { 0x1200 },
+};
+
+static const struct regmap_config npu_cc_lito_regmap_config = {
+	.name = "cc",
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0xa060,
+	.fast_io = true,
+};
+
+static const struct regmap_config npu_qdsp6ss_lito_regmap_config = {
+	.name = "qdsp6ss",
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x203c,
+	.fast_io = true,
+};
+
+static const struct regmap_config npu_qdsp6ss_pll_lito_regmap_config = {
+	.name = "qdsp6ss_pll",
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x50,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc npu_cc_lito_desc = {
+	.config = &npu_cc_lito_regmap_config,
+	.clks = npu_cc_lito_clocks,
+	.num_clks = ARRAY_SIZE(npu_cc_lito_clocks),
+	.resets = npu_cc_lito_resets,
+	.num_resets = ARRAY_SIZE(npu_cc_lito_resets),
+};
+
+static const struct qcom_cc_desc npu_qdsp6ss_lito_desc = {
+	.config = &npu_qdsp6ss_lito_regmap_config,
+	.clks = npu_qdsp6ss_lito_clocks,
+	.num_clks = ARRAY_SIZE(npu_qdsp6ss_lito_clocks),
+};
+
+static const struct qcom_cc_desc npu_qdsp6ss_pll_lito_desc = {
+	.config = &npu_qdsp6ss_pll_lito_regmap_config,
+	.clks = npu_qdsp6ss_pll_lito_clocks,
+	.num_clks = ARRAY_SIZE(npu_qdsp6ss_pll_lito_clocks),
+};
+
+static const struct of_device_id npu_cc_lito_match_table[] = {
+	{ .compatible = "qcom,lito-npucc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, npu_cc_lito_match_table);
+
+static int npu_clocks_lito_probe(struct platform_device *pdev,
+					const struct qcom_cc_desc *desc)
+{
+	struct regmap *regmap;
+	struct resource *res;
+	void __iomem *base;
+	int ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							desc->config->name);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	regmap = devm_regmap_init_mmio(&pdev->dev, base, desc->config);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	if (!strcmp("cc", desc->config->name)) {
+		clk_lucid_pll_configure(&npu_cc_pll0, regmap,
+					&npu_cc_pll0_config);
+		clk_lucid_pll_configure(&npu_cc_pll1, regmap,
+					&npu_cc_pll1_config);
+
+		/* Register the fixed factor clock for CRC divider */
+		ret = devm_clk_hw_register(&pdev->dev, &npu_cc_crc_div.hw);
+		if (ret) {
+			dev_err(&pdev->dev, "Failed to register CRC divider clock, ret=%d\n",
+									ret);
+			return ret;
+		}
+	} else if (!strcmp("qdsp6ss_pll", desc->config->name)) {
+		clk_lucid_pll_configure(&npu_q6ss_pll, regmap,
+						&npu_q6ss_pll_config);
+	}
+
+	return qcom_cc_really_probe(pdev, desc, regmap);
+}
+
+static int npu_cc_lito_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		ret = PTR_ERR(vdd_cx.regulator[0]);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get vdd_cx regulator, ret=%d\n",
+									ret);
+		return ret;
+	}
+
+	ret = npu_clocks_lito_probe(pdev, &npu_cc_lito_desc);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "npu_cc clock registration failed, ret=%d\n",
+									ret);
+		return ret;
+	}
+
+	ret = npu_clocks_lito_probe(pdev, &npu_qdsp6ss_lito_desc);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "npu_qdsp6ss clock registration failed, ret=%d\n",
+									ret);
+		return ret;
+	}
+
+	ret = npu_clocks_lito_probe(pdev, &npu_qdsp6ss_pll_lito_desc);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "npu_qdsp6ss_pll clock registration failed, ret=%d\n",
+			ret);
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered NPU_CC clocks\n");
+
+	return 0;
+
+}
+
+static struct platform_driver npu_cc_lito_driver = {
+	.probe = npu_cc_lito_probe,
+	.driver = {
+		.name = "lito_npucc",
+		.of_match_table = npu_cc_lito_match_table,
+	},
+};
+
+static int __init npu_cc_lito_init(void)
+{
+	return platform_driver_register(&npu_cc_lito_driver);
+}
+subsys_initcall(npu_cc_lito_init);
+
+static void __exit npu_cc_lito_exit(void)
+{
+	platform_driver_unregister(&npu_cc_lito_driver);
+}
+module_exit(npu_cc_lito_exit);
+
+MODULE_DESCRIPTION("QTI NPU_CC LITO Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:npu_cc-lito");
diff --git a/drivers/clk/qcom/videocc-lito.c b/drivers/clk/qcom/videocc-lito.c
index 030ea60..5a72853 100644
--- a/drivers/clk/qcom/videocc-lito.c
+++ b/drivers/clk/qcom/videocc-lito.c
@@ -71,8 +71,8 @@
 	{ P_CORE_BI_PLL_TEST_SE, 7 },
 };
 
-static const char * const video_cc_parent_names_2[] = {
-	"bi_tcxo",
+static const char * const video_cc_parent_names_2_ao[] = {
+	"bi_tcxo_ao",
 	"core_bi_pll_test_se",
 };
 
@@ -183,7 +183,7 @@
 	.freq_tbl = ftbl_video_cc_xo_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "video_cc_xo_clk_src",
-		.parent_names = video_cc_parent_names_2,
+		.parent_names = video_cc_parent_names_2_ao,
 		.num_parents = 2,
 		.ops = &clk_rcg2_ops,
 	},
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index d708472..6794d60 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -51,10 +51,6 @@
 				     int id,
 				     struct drm_dp_payload *payload);
 
-static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
-				  struct drm_dp_mst_port *port,
-				  int offset, int size, u8 *bytes);
-
 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 				     struct drm_dp_mst_branch *mstb);
 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@ -439,6 +435,7 @@
 	if (idx > raw->curlen)
 		goto fail_len;
 	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
+	idx++;
 	if (idx > raw->curlen)
 		goto fail_len;
 
@@ -1402,7 +1399,6 @@
 	return false;
 }
 
-#if 0
 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
 {
 	struct drm_dp_sideband_msg_req_body req;
@@ -1415,7 +1411,6 @@
 
 	return 0;
 }
-#endif
 
 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
 				    bool up, u8 *msg, int len)
@@ -1981,30 +1976,65 @@
 }
 EXPORT_SYMBOL(drm_dp_update_payload_part2);
 
-#if 0 /* unused as of yet */
-static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
 				 struct drm_dp_mst_port *port,
-				 int offset, int size)
+				 int offset, int size, u8 *bytes)
 {
 	int len;
+	int ret;
 	struct drm_dp_sideband_msg_tx *txmsg;
+	struct drm_dp_mst_branch *mstb;
+
+	memset(bytes, 0, size);
+
+	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+	if (!mstb)
+		return -EINVAL;
 
 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
-	if (!txmsg)
-		return -ENOMEM;
+	if (!txmsg) {
+		ret = -ENOMEM;
+		goto fail_put;
+	}
 
-	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
-	txmsg->dst = port->parent;
+	len = build_dpcd_read(txmsg, port->port_num, offset, size);
+	txmsg->dst = mstb;
 
 	drm_dp_queue_down_tx(mgr, txmsg);
+	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+	if (ret <= 0) {
+		DRM_ERROR("dpcd read failed\n");
+		goto fail_free_msg;
+	}
 
-	return 0;
+	if (txmsg->reply.reply_type == 1) {
+		DRM_ERROR("dpcd read nack received\n");
+		ret = -EINVAL;
+		goto fail_free_msg;
+	}
+
+	if (port->port_num != txmsg->reply.u.remote_dpcd_read_ack.port_number) {
+		DRM_ERROR("got incorrect port in response\n");
+		ret = -EINVAL;
+		goto fail_free_msg;
+	}
+
+	if (size > txmsg->reply.u.remote_dpcd_read_ack.num_bytes)
+		size = txmsg->reply.u.remote_dpcd_read_ack.num_bytes;
+
+	memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, size);
+
+fail_free_msg:
+	kfree(txmsg);
+fail_put:
+	drm_dp_put_mst_branch_device(mstb);
+	return ret;
 }
-#endif
+EXPORT_SYMBOL(drm_dp_send_dpcd_read);
 
-static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
-				  struct drm_dp_mst_port *port,
-				  int offset, int size, u8 *bytes)
+int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+			   struct drm_dp_mst_port *port,
+			   int offset, int size, u8 *bytes)
 {
 	int len;
 	int ret;
@@ -2038,6 +2068,7 @@
 	drm_dp_put_mst_branch_device(mstb);
 	return ret;
 }
+EXPORT_SYMBOL(drm_dp_send_dpcd_write);
 
 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
 {
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index dc85ccc..3d15cd9 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -882,6 +882,30 @@
 			0xe4);
 }
 
+static void dp_catalog_ctrl_lane_pnswap(struct dp_catalog_ctrl *ctrl,
+						u8 ln_pnswap)
+{
+	struct dp_catalog_private *catalog;
+	struct dp_io_data *io_data;
+	u32 cfg0, cfg1;
+
+	catalog = dp_catalog_get_priv(ctrl);
+
+	cfg0 = 0x0a;
+	cfg1 = 0x0a;
+
+	cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0;
+	cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2;
+	cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0;
+	cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2;
+
+	io_data = catalog->io.dp_ln_tx0;
+	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV, cfg0);
+
+	io_data = catalog->io.dp_ln_tx1;
+	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV, cfg1);
+}
+
 static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl,
 						bool enable)
 {
@@ -2509,6 +2533,7 @@
 		.state_ctrl     = dp_catalog_ctrl_state_ctrl,
 		.config_ctrl    = dp_catalog_ctrl_config_ctrl,
 		.lane_mapping   = dp_catalog_ctrl_lane_mapping,
+		.lane_pnswap    = dp_catalog_ctrl_lane_pnswap,
 		.mainlink_ctrl  = dp_catalog_ctrl_mainlink_ctrl,
 		.set_pattern    = dp_catalog_ctrl_set_pattern,
 		.reset          = dp_catalog_ctrl_reset,
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 78aec713..85ed209 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -1,6 +1,19 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+<<<<<<< HEAD
+=======
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+>>>>>>> aacf58a... drm/msm/dp: Add P/N swap support for dp phy
  */
 
 #ifndef _DP_CATALOG_H_
@@ -93,6 +106,7 @@
 	void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u8 ln_cnt);
 	void (*lane_mapping)(struct dp_catalog_ctrl *ctrl, bool flipped,
 				char *lane_map);
+	void (*lane_pnswap)(struct dp_catalog_ctrl *ctrl, u8 ln_pnswap);
 	void (*mainlink_ctrl)(struct dp_catalog_ctrl *ctrl, bool enable);
 	void (*set_pattern)(struct dp_catalog_ctrl *ctrl, u32 pattern);
 	void (*reset)(struct dp_catalog_ctrl *ctrl);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c b/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
index d5eebb4..51fa987 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
@@ -266,6 +266,30 @@
 	}
 }
 
+static void dp_catalog_ctrl_lane_pnswap_v420(struct dp_catalog_ctrl *ctrl,
+						u8 ln_pnswap)
+{
+	struct dp_catalog_private_v420 *catalog;
+	struct dp_io_data *io_data;
+	u32 cfg0, cfg1;
+
+	catalog = dp_catalog_get_priv_v420(ctrl);
+
+	cfg0 = 0x0a;
+	cfg1 = 0x0a;
+
+	cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0;
+	cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2;
+	cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0;
+	cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2;
+
+	io_data = catalog->io->dp_ln_tx0;
+	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV_V420, cfg0);
+
+	io_data = catalog->io->dp_ln_tx1;
+	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV_V420, cfg1);
+}
+
 static void dp_catalog_put_v420(struct dp_catalog *catalog)
 {
 	struct dp_catalog_private_v420 *catalog_priv;
@@ -316,6 +340,7 @@
 	catalog->panel.config_msa  = dp_catalog_panel_config_msa_v420;
 	catalog->ctrl.phy_lane_cfg = dp_catalog_ctrl_phy_lane_cfg_v420;
 	catalog->ctrl.update_vx_px = dp_catalog_ctrl_update_vx_px_v420;
+	catalog->ctrl.lane_pnswap = dp_catalog_ctrl_lane_pnswap_v420;
 
 	/* Set the default execution mode to hardware mode */
 	dp_catalog_set_exe_mode_v420(catalog, "hw");
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 75a2f16..d84417e 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -161,6 +161,8 @@
 	if (enable) {
 		ctrl->catalog->lane_mapping(ctrl->catalog, ctrl->orientation,
 						ctrl->parser->l_map);
+		ctrl->catalog->lane_pnswap(ctrl->catalog,
+						ctrl->parser->l_pnswap);
 		ctrl->catalog->mst_config(ctrl->catalog, ctrl->mst_mode);
 		ctrl->catalog->config_ctrl(ctrl->catalog,
 				ctrl->link->link_params.lane_count);
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index e5f1b3e..e581303 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -11,7 +11,6 @@
 #include "dp_power.h"
 #include "dp_catalog.h"
 #include "dp_aux.h"
-#include "dp_ctrl.h"
 #include "dp_debug.h"
 #include "drm_connector.h"
 #include "sde_connector.h"
@@ -41,6 +40,8 @@
 	struct device *dev;
 	struct dp_debug dp_debug;
 	struct dp_parser *parser;
+	struct dp_ctrl *ctrl;
+	struct mutex lock;
 };
 
 static int dp_debug_get_edid_buf(struct dp_debug_private *debug)
@@ -90,6 +91,8 @@
 	if (!debug)
 		return -ENODEV;
 
+	mutex_lock(&debug->lock);
+
 	if (*ppos)
 		goto bail;
 
@@ -161,6 +164,7 @@
 	 */
 	pr_info("[%s]\n", edid ? "SET" : "CLEAR");
 
+	mutex_unlock(&debug->lock);
 	return rc;
 }
 
@@ -180,6 +184,8 @@
 	if (!debug)
 		return -ENODEV;
 
+	mutex_lock(&debug->lock);
+
 	if (*ppos)
 		goto bail;
 
@@ -260,6 +266,7 @@
 	} else
 		debug->aux->dpcd_updated(debug->aux);
 
+	mutex_unlock(&debug->lock);
 	return rc;
 }
 
@@ -747,7 +754,7 @@
 		const char __user *user_buff, size_t count, loff_t *ppos)
 {
 	struct dp_debug_private *debug = file->private_data;
-	char *buf;
+	char buf[SZ_32];
 	size_t len = 0;
 
 	if (!debug)
@@ -757,7 +764,9 @@
 		return 0;
 
 	len = min_t(size_t, count, SZ_32 - 1);
-	buf = memdup_user(user_buff, len);
+	if (copy_from_user(buf, user_buff, len))
+		goto end;
+
 	buf[len] = '\0';
 
 	if (sscanf(buf, "%3s", debug->exe_mode) != 1)
@@ -1437,6 +1446,7 @@
 
 		if (dp_debug_get_dpcd_buf(debug)) {
 			devm_kfree(debug->dev, debug->edid);
+			debug->edid = NULL;
 			return;
 		}
 
@@ -1444,6 +1454,9 @@
 		debug->aux->set_sim_mode(debug->aux, true,
 			debug->edid, debug->dpcd);
 	} else {
+		debug->aux->abort(debug->aux);
+		debug->ctrl->abort(debug->ctrl);
+
 		debug->aux->set_sim_mode(debug->aux, false, NULL, NULL);
 		debug->dp_debug.sim_mode = false;
 
@@ -1482,6 +1495,8 @@
 	if (*ppos)
 		return 0;
 
+	mutex_lock(&debug->lock);
+
 	/* Leave room for termination char */
 	len = min_t(size_t, count, SZ_8 - 1);
 	if (copy_from_user(buf, user_buff, len))
@@ -1494,6 +1509,7 @@
 
 	dp_debug_set_sim_mode(debug, sim);
 end:
+	mutex_unlock(&debug->lock);
 	return len;
 }
 
@@ -1941,6 +1957,14 @@
 		       DEBUG_NAME, rc);
 	}
 
+	file = debugfs_create_u32("max_lclk_khz", 0644, dir,
+			&debug->parser->max_lclk_khz);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
+		pr_err("[%s] debugfs max_lclk_khz failed, rc=%d\n",
+		       DEBUG_NAME, rc);
+	}
+
 	return 0;
 
 error_remove_dir:
@@ -1972,7 +1996,9 @@
 
 	debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
 
+	mutex_lock(&debug->lock);
 	dp_debug_set_sim_mode(debug, false);
+	mutex_unlock(&debug->lock);
 }
 
 struct dp_debug *dp_debug_get(struct dp_debug_in *in)
@@ -1981,7 +2007,8 @@
 	struct dp_debug_private *debug;
 	struct dp_debug *dp_debug;
 
-	if (!in->dev || !in->panel || !in->hpd || !in->link || !in->catalog) {
+	if (!in->dev || !in->panel || !in->hpd || !in->link ||
+	    !in->catalog || !in->ctrl) {
 		pr_err("invalid input\n");
 		rc = -EINVAL;
 		goto error;
@@ -2002,12 +2029,15 @@
 	debug->connector = in->connector;
 	debug->catalog = in->catalog;
 	debug->parser = in->parser;
+	debug->ctrl = in->ctrl;
 
 	dp_debug = &debug->dp_debug;
 	dp_debug->vdisplay = 0;
 	dp_debug->hdisplay = 0;
 	dp_debug->vrefresh = 0;
 
+	mutex_init(&debug->lock);
+
 	rc = dp_debug_init(dp_debug);
 	if (rc) {
 		devm_kfree(in->dev, debug);
@@ -2059,6 +2089,8 @@
 
 	dp_debug_deinit(dp_debug);
 
+	mutex_destroy(&debug->lock);
+
 	if (debug->edid)
 		devm_kfree(debug->dev, debug->edid);
 
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index dfbc652..11b890e 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -7,6 +7,7 @@
 #define _DP_DEBUG_H_
 
 #include "dp_panel.h"
+#include "dp_ctrl.h"
 #include "dp_link.h"
 #include "dp_usbpd.h"
 #include "dp_aux.h"
@@ -63,6 +64,7 @@
 	struct drm_connector **connector;
 	struct dp_catalog *catalog;
 	struct dp_parser *parser;
+	struct dp_ctrl *ctrl;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index bdd2478..b326a50 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -103,6 +103,8 @@
 
 	u32 tot_dsc_blks_in_use;
 
+	bool process_hpd_connect;
+
 	struct notifier_block usb_nb;
 };
 
@@ -111,11 +113,6 @@
 	{}
 };
 
-static bool dp_display_framework_ready(struct dp_display_private *dp)
-{
-	return dp->dp_display.post_open ? false : true;
-}
-
 static inline bool dp_display_is_hdcp_enabled(struct dp_display_private *dp)
 {
 	return dp->link->hdcp_status.hdcp_version && dp->hdcp.ops;
@@ -236,16 +233,64 @@
 		struct sde_hdcp_ops *ops = dev->ops;
 		void *fd = dev->fd;
 
-		if (!fd || !ops || (dp->hdcp.source_cap & dev->ver))
+		if (!fd || !ops)
 			continue;
 
-		if (ops->feature_supported(fd))
+		if (ops->set_mode && ops->set_mode(fd, dp->mst.mst_active))
+			continue;
+
+		if (!(dp->hdcp.source_cap & dev->ver) &&
+				ops->feature_supported &&
+				ops->feature_supported(fd))
 			dp->hdcp.source_cap |= dev->ver;
 	}
 
 	dp_display_update_hdcp_status(dp, false);
 }
 
+static void dp_display_hdcp_register_streams(struct dp_display_private *dp)
+{
+	int rc;
+	size_t i;
+	struct sde_hdcp_ops *ops = dp->hdcp.ops;
+	void *data = dp->hdcp.data;
+
+	if (dp_display_is_ready(dp) && dp->mst.mst_active && ops &&
+			ops->register_streams){
+		struct stream_info streams[DP_STREAM_MAX];
+		int index = 0;
+
+		pr_debug("Registering all active panel streams with HDCP\n");
+		for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
+			if (!dp->active_panels[i])
+				continue;
+			streams[index].stream_id = i;
+			streams[index].virtual_channel =
+				dp->active_panels[i]->vcpi;
+			index++;
+		}
+
+		if (index > 0) {
+			rc = ops->register_streams(data, index, streams);
+			if (rc)
+				pr_err("failed to register streams. rc = %d\n",
+					rc);
+		}
+	}
+}
+
+static void dp_display_hdcp_deregister_stream(struct dp_display_private *dp,
+		enum dp_stream_id stream_id)
+{
+	if (dp->hdcp.ops->deregister_streams) {
+		struct stream_info stream = {stream_id,
+				dp->active_panels[stream_id]->vcpi};
+
+		pr_debug("Deregistering stream within HDCP library\n");
+		dp->hdcp.ops->deregister_streams(dp->hdcp.data, 1, &stream);
+	}
+}
+
 static void dp_display_hdcp_cb_work(struct work_struct *work)
 {
 	struct dp_display_private *dp;
@@ -255,12 +300,21 @@
 	void *data;
 	int rc = 0;
 	u32 hdcp_auth_state;
+	u8 sink_status = 0;
 
 	dp = container_of(dw, struct dp_display_private, hdcp_cb_work);
 
 	if (!dp->power_on || !dp->is_connected || atomic_read(&dp->aborted))
 		return;
 
+	drm_dp_dpcd_readb(dp->aux->drm_aux, DP_SINK_STATUS, &sink_status);
+	sink_status &= (DP_RECEIVE_PORT_0_STATUS | DP_RECEIVE_PORT_1_STATUS);
+	if (sink_status < 1) {
+		pr_debug("Sink not synchronized. Queuing again then exiting\n");
+		queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
+		return;
+	}
+
 	status = &dp->link->hdcp_status;
 
 	if (status->hdcp_state == HDCP_STATE_INACTIVE) {
@@ -268,6 +322,11 @@
 		dp_display_update_hdcp_info(dp);
 
 		if (dp_display_is_hdcp_enabled(dp)) {
+			if (dp->hdcp.ops && dp->hdcp.ops->on &&
+					dp->hdcp.ops->on(dp->hdcp.data)) {
+				dp_display_update_hdcp_status(dp, true);
+				return;
+			}
 			status->hdcp_state = HDCP_STATE_AUTHENTICATING;
 		} else {
 			dp_display_update_hdcp_status(dp, true);
@@ -294,11 +353,18 @@
 
 	switch (status->hdcp_state) {
 	case HDCP_STATE_AUTHENTICATING:
+		dp_display_hdcp_register_streams(dp);
 		if (dp->hdcp.ops && dp->hdcp.ops->authenticate)
 			rc = dp->hdcp.ops->authenticate(data);
 		break;
 	case HDCP_STATE_AUTH_FAIL:
 		if (dp_display_is_ready(dp) && dp->power_on) {
+			if (ops && ops->on && ops->on(data)) {
+				dp_display_update_hdcp_status(dp, true);
+				return;
+			}
+			dp_display_hdcp_register_streams(dp);
+			status->hdcp_state = HDCP_STATE_AUTHENTICATING;
 			if (ops && ops->reauthenticate) {
 				rc = ops->reauthenticate(data);
 				if (rc)
@@ -309,6 +375,7 @@
 		}
 		break;
 	default:
+		dp_display_hdcp_register_streams(dp);
 		break;
 	}
 }
@@ -502,36 +569,6 @@
 			envp);
 }
 
-static void dp_display_post_open(struct dp_display *dp_display)
-{
-	struct drm_connector *connector;
-	struct dp_display_private *dp;
-
-	if (!dp_display) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-	if (IS_ERR_OR_NULL(dp)) {
-		pr_err("invalid params\n");
-		return;
-	}
-
-	connector = dp->dp_display.base_connector;
-
-	if (!connector) {
-		pr_err("base connector not set\n");
-		return;
-	}
-
-	/* if cable is already connected, send notification */
-	if (dp->hpd->hpd_high)
-		queue_work(dp->wq, &dp->connect_work);
-	else
-		dp_display->post_open = NULL;
-}
-
 static int dp_display_send_hpd_notification(struct dp_display_private *dp)
 {
 	int ret = 0;
@@ -541,6 +578,8 @@
 
 	if (!dp->mst.mst_active)
 		dp->dp_display.is_sst_connected = hpd;
+	else
+		dp->dp_display.is_sst_connected = false;
 
 	reinit_completion(&dp->notification_comp);
 	dp_display_send_hpd_event(dp);
@@ -551,9 +590,6 @@
 	if (!dp->mst.mst_active && (dp->power_on == hpd))
 		goto skip_wait;
 
-	if (!dp_display_framework_ready(dp))
-		goto skip_wait;
-
 	if (!wait_for_completion_timeout(&dp->notification_comp,
 						HZ * 5)) {
 		pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
@@ -571,30 +607,47 @@
 	dp->panel->mst_state = state;
 }
 
-static void dp_display_process_mst_hpd_high(struct dp_display_private *dp)
+static void dp_display_process_mst_hpd_high(struct dp_display_private *dp,
+						bool mst_probe)
 {
 	bool is_mst_receiver;
 	struct dp_mst_hpd_info info;
+	int ret;
 
-	if (dp->parser->has_mst && dp->mst.drm_registered) {
-		DP_MST_DEBUG("mst_hpd_high work\n");
+	if (!dp->parser->has_mst || !dp->mst.drm_registered) {
+		DP_MST_DEBUG("mst not enabled. has_mst:%d, registered:%d\n",
+				dp->parser->has_mst, dp->mst.drm_registered);
+		return;
+	}
 
+	DP_MST_DEBUG("mst_hpd_high work. mst_probe:%d\n", mst_probe);
+
+	if (!dp->mst.mst_active) {
 		is_mst_receiver = dp->panel->read_mst_cap(dp->panel);
 
-		if (is_mst_receiver && !dp->mst.mst_active) {
-
-			/* clear sink mst state */
-			drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0);
-
-			dp_display_update_mst_state(dp, true);
-
-			info.mst_protocol = dp->parser->has_mst_sideband;
-			info.mst_port_cnt = dp->debug->mst_port_cnt;
-			info.edid = dp->debug->get_edid(dp->debug);
-
-			if (dp->mst.cbs.hpd)
-				dp->mst.cbs.hpd(&dp->dp_display, true, &info);
+		if (!is_mst_receiver) {
+			DP_MST_DEBUG("sink doesn't support mst\n");
+			return;
 		}
+
+		/* clear sink mst state */
+		drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0);
+
+		ret = drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL,
+				 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+		if (ret < 0) {
+			pr_err("sink mst enablement failed\n");
+			return;
+		}
+
+		dp_display_update_mst_state(dp, true);
+	} else if (dp->mst.mst_active && mst_probe) {
+		info.mst_protocol = dp->parser->has_mst_sideband;
+		info.mst_port_cnt = dp->debug->mst_port_cnt;
+		info.edid = dp->debug->get_edid(dp->debug);
+
+		if (dp->mst.cbs.hpd)
+			dp->mst.cbs.hpd(&dp->dp_display, true, &info);
 	}
 
 	DP_MST_DEBUG("mst_hpd_high. mst_active:%d\n", dp->mst.mst_active);
@@ -648,7 +701,16 @@
 
 static int dp_display_process_hpd_high(struct dp_display_private *dp)
 {
-	int rc = 0;
+	int rc = -EINVAL;
+
+	mutex_lock(&dp->session_lock);
+
+	if (dp->is_connected) {
+		pr_debug("dp already connected, skipping hpd high\n");
+		mutex_unlock(&dp->session_lock);
+		rc = -EISCONN;
+		goto end;
+	}
 
 	dp->is_connected = true;
 
@@ -671,25 +733,32 @@
 	 * ETIMEDOUT --> cable may have been removed
 	 * ENOTCONN --> no downstream device connected
 	 */
-	if (rc == -ETIMEDOUT || rc == -ENOTCONN)
+	if (rc == -ETIMEDOUT || rc == -ENOTCONN) {
+		dp->is_connected = false;
 		goto end;
+	}
 
 	dp->link->process_request(dp->link);
 	dp->panel->handle_sink_request(dp->panel);
 
-	dp_display_process_mst_hpd_high(dp);
+	dp_display_process_mst_hpd_high(dp, false);
 
-	mutex_lock(&dp->session_lock);
 	rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active,
 				dp->panel->fec_en, false);
 	if (rc) {
-		mutex_unlock(&dp->session_lock);
+		dp->is_connected = false;
 		goto end;
 	}
+
+	dp->process_hpd_connect = false;
+
+	dp_display_process_mst_hpd_high(dp, true);
+end:
 	mutex_unlock(&dp->session_lock);
 
-	dp_display_send_hpd_notification(dp);
-end:
+	if (!rc)
+		dp_display_send_hpd_notification(dp);
+
 	return rc;
 }
 
@@ -715,6 +784,7 @@
 	int rc = 0;
 
 	dp->is_connected = false;
+	dp->process_hpd_connect = false;
 
 	dp_display_process_mst_hpd_low(dp);
 
@@ -755,11 +825,15 @@
 			goto end;
 	}
 
+	mutex_lock(&dp->session_lock);
 	dp_display_host_init(dp);
 
 	/* check for hpd high */
 	if (dp->hpd->hpd_high)
 		queue_work(dp->wq, &dp->connect_work);
+	else
+		dp->process_hpd_connect = true;
+	mutex_unlock(&dp->session_lock);
 end:
 	return rc;
 }
@@ -793,8 +867,10 @@
 {
 	int idx;
 	struct dp_panel *dp_panel;
+	struct dp_link_hdcp_status *status = &dp->link->hdcp_status;
 
-	if (dp_display_is_hdcp_enabled(dp)) {
+	if (dp_display_is_hdcp_enabled(dp) &&
+			status->hdcp_state != HDCP_STATE_INACTIVE) {
 		cancel_delayed_work_sync(&dp->hdcp_cb_work);
 		if (dp->hdcp.ops->off)
 			dp->hdcp.ops->off(dp->hdcp.data);
@@ -878,18 +954,12 @@
 		goto end;
 	}
 
-	/*
-	 * In case cable/dongle is disconnected during adb shell stop,
-	 * reset psm_enabled flag to false since it is no more needed
-	 */
-	if (dp->dp_display.post_open)
-		dp->debug->psm_enabled = false;
-
-	if (dp->debug->psm_enabled)
+	mutex_lock(&dp->session_lock);
+	if (dp->debug->psm_enabled && dp->core_initialized)
 		dp->link->psm_config(dp->link, &dp->panel->link_info, true);
+	mutex_unlock(&dp->session_lock);
 
 	dp_display_disconnect_sync(dp);
-	dp->dp_display.post_open = NULL;
 
 	if (!dp->debug->sim_mode && !dp->parser->no_aux_switch
 	    && !dp->parser->gpio_aux_switch)
@@ -936,11 +1006,19 @@
 	struct dp_display_private *dp = container_of(work,
 			struct dp_display_private, attention_work);
 
-	if (dp->debug->mst_hpd_sim)
-		goto mst_attention;
+	mutex_lock(&dp->session_lock);
 
-	if (dp->link->process_request(dp->link))
+	if (dp->debug->mst_hpd_sim || !dp->core_initialized) {
+		mutex_unlock(&dp->session_lock);
+		goto mst_attention;
+	}
+
+	if (dp->link->process_request(dp->link)) {
+		mutex_unlock(&dp->session_lock);
 		goto cp_irq;
+	}
+
+	mutex_unlock(&dp->session_lock);
 
 	if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) {
 		if (dp_display_is_sink_count_zero(dp)) {
@@ -997,16 +1075,16 @@
 		return -ENODEV;
 	}
 
-	pr_debug("hpd_irq:%d, hpd_high:%d, power_on:%d\n",
+	pr_debug("hpd_irq:%d, hpd_high:%d, power_on:%d, is_connected:%d\n",
 			dp->hpd->hpd_irq, dp->hpd->hpd_high,
-			dp->power_on);
+			dp->power_on, dp->is_connected);
 
 	if (!dp->hpd->hpd_high)
 		dp_display_disconnect_sync(dp);
 	else if ((dp->hpd->hpd_irq && dp->core_initialized) ||
 			dp->debug->mst_hpd_sim)
 		queue_work(dp->wq, &dp->attention_work);
-	else if (!dp->power_on)
+	else if (dp->process_hpd_connect || !dp->is_connected)
 		queue_work(dp->wq, &dp->connect_work);
 	else
 		pr_debug("ignored\n");
@@ -1228,6 +1306,7 @@
 	debug_in.connector = &dp->dp_display.base_connector;
 	debug_in.catalog = dp->catalog;
 	debug_in.parser = dp->parser;
+	debug_in.ctrl = dp->ctrl;
 
 	dp->debug = dp_debug_get(&debug_in);
 	if (IS_ERR(dp->debug)) {
@@ -1399,7 +1478,7 @@
 
 static int dp_display_set_stream_info(struct dp_display *dp_display,
 			void *panel, u32 strm_id, u32 start_slot,
-			u32 num_slots, u32 pbn)
+			u32 num_slots, u32 pbn, int vcpi)
 {
 	int rc = 0;
 	struct dp_panel *dp_panel;
@@ -1432,7 +1511,7 @@
 	if (panel) {
 		dp_panel = panel;
 		dp_panel->set_stream_info(dp_panel, strm_id, start_slot,
-				num_slots, pbn);
+				num_slots, pbn, vcpi);
 	}
 
 	mutex_unlock(&dp->session_lock);
@@ -1539,8 +1618,6 @@
 	cancel_delayed_work_sync(&dp->hdcp_cb_work);
 	queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
 end:
-	/* clear framework event notifier */
-	dp_display->post_open = NULL;
 	dp->aux->state |= DP_STATE_CTRL_POWERED_ON;
 
 	complete_all(&dp->notification_comp);
@@ -1552,7 +1629,9 @@
 {
 	struct dp_display_private *dp;
 	struct dp_panel *dp_panel = panel;
+	struct dp_link_hdcp_status *status;
 	int rc = 0;
+	size_t i;
 
 	if (!dp_display || !panel) {
 		pr_err("invalid input\n");
@@ -1563,19 +1642,35 @@
 
 	mutex_lock(&dp->session_lock);
 
+	status = &dp->link->hdcp_status;
+
 	if (!dp->power_on) {
 		pr_debug("stream already powered off, return\n");
 		goto end;
 	}
 
-	if (dp_display_is_hdcp_enabled(dp)) {
-		cancel_delayed_work_sync(&dp->hdcp_cb_work);
+	if (dp_display_is_hdcp_enabled(dp) &&
+			status->hdcp_state != HDCP_STATE_INACTIVE) {
+		flush_delayed_work(&dp->hdcp_cb_work);
+		if (dp->mst.mst_active) {
+			dp_display_hdcp_deregister_stream(dp,
+				dp_panel->stream_id);
+			for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
+				if (i != dp_panel->stream_id &&
+						dp->active_panels[i]) {
+					pr_debug("Streams are still active. Skip disabling HDCP\n");
+					goto stream;
+				}
+			}
+		}
+
 		if (dp->hdcp.ops->off)
 			dp->hdcp.ops->off(dp->hdcp.data);
 
 		dp_display_update_hdcp_status(dp, true);
 	}
 
+stream:
 	if (dp_panel->audio_supported)
 		dp_panel->audio->off(dp_panel->audio);
 
@@ -1689,14 +1784,6 @@
 		dp->link->psm_config(dp->link, &dp->panel->link_info, true);
 		dp->debug->psm_enabled = true;
 
-		/*
-		 * In case of framework reboot, the DP off sequence is executed
-		 * without any notification from driver. Initialize post_open
-		 * callback to notify DP connection once framework restarts.
-		 */
-		dp_display->post_open = dp_display_post_open;
-		dp->dp_display.is_sst_connected = false;
-
 		dp->ctrl->off(dp->ctrl);
 		dp_display_host_deinit(dp);
 	}
@@ -2259,6 +2346,77 @@
 	return 0;
 }
 
+static int dp_display_mst_connector_update_link_info(
+			struct dp_display *dp_display,
+			struct drm_connector *connector)
+{
+	int rc = 0;
+	struct sde_connector *sde_conn;
+	struct dp_panel *dp_panel;
+	struct dp_display_private *dp;
+
+	if (!dp_display || !connector) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	if (!dp->mst.drm_registered) {
+		pr_debug("drm mst not registered\n");
+		return -EPERM;
+	}
+
+	sde_conn = to_sde_connector(connector);
+	if (!sde_conn->drv_panel) {
+		pr_err("invalid panel for connector:%d\n", connector->base.id);
+		return -EINVAL;
+	}
+
+	dp_panel = sde_conn->drv_panel;
+
+	memcpy(dp_panel->dpcd, dp->panel->dpcd,
+			DP_RECEIVER_CAP_SIZE + 1);
+	memcpy(dp_panel->dsc_dpcd, dp->panel->dsc_dpcd,
+			DP_RECEIVER_DSC_CAP_SIZE + 1);
+	memcpy(&dp_panel->link_info, &dp->panel->link_info,
+			sizeof(dp_panel->link_info));
+
+	DP_MST_DEBUG("dp mst connector:%d link info updated\n");
+
+	return rc;
+}
+
+static int dp_display_mst_get_fixed_topology_port(
+			struct dp_display *dp_display,
+			u32 strm_id, u32 *port_num)
+{
+	struct dp_display_private *dp;
+	u32 port;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (strm_id >= DP_STREAM_MAX) {
+		pr_err("invalid stream id:%d\n", strm_id);
+		return -EINVAL;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	port = dp->parser->mst_fixed_port[strm_id];
+
+	if (!port || port > 255)
+		return -ENOENT;
+
+	if (port_num)
+		*port_num = port;
+
+	return 0;
+}
+
 static int dp_display_get_mst_caps(struct dp_display *dp_display,
 			struct dp_mst_caps *mst_caps)
 {
@@ -2332,7 +2490,7 @@
 	g_dp_display->unprepare     = dp_display_unprepare;
 	g_dp_display->request_irq   = dp_request_irq;
 	g_dp_display->get_debug     = dp_get_debug;
-	g_dp_display->post_open     = dp_display_post_open;
+	g_dp_display->post_open     = NULL;
 	g_dp_display->post_init     = dp_display_post_init;
 	g_dp_display->config_hdr    = dp_display_config_hdr;
 	g_dp_display->mst_install   = dp_display_mst_install;
@@ -2342,12 +2500,16 @@
 					dp_display_mst_connector_uninstall;
 	g_dp_display->mst_connector_update_edid =
 					dp_display_mst_connector_update_edid;
+	g_dp_display->mst_connector_update_link_info =
+				dp_display_mst_connector_update_link_info;
 	g_dp_display->get_mst_caps = dp_display_get_mst_caps;
 	g_dp_display->set_stream_info = dp_display_set_stream_info;
 	g_dp_display->update_pps = dp_display_update_pps;
 	g_dp_display->convert_to_dp_mode = dp_display_convert_to_dp_mode;
 	g_dp_display->mst_get_connector_info =
 					dp_display_mst_get_connector_info;
+	g_dp_display->mst_get_fixed_topology_port =
+					dp_display_mst_get_fixed_topology_port;
 
 	rc = component_add(&pdev->dev, &dp_display_comp_ops);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 410cee7..fe332af 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -100,13 +100,18 @@
 	int (*mst_connector_update_edid)(struct dp_display *dp_display,
 			struct drm_connector *connector,
 			struct edid *edid);
+	int (*mst_connector_update_link_info)(struct dp_display *dp_display,
+			struct drm_connector *connector);
 	int (*mst_get_connector_info)(struct dp_display *dp_display,
 			struct drm_connector *connector,
 			struct dp_mst_connector *mst_conn);
+	int (*mst_get_fixed_topology_port)(struct dp_display *dp_display,
+			u32 strm_id, u32 *port_num);
 	int (*get_mst_caps)(struct dp_display *dp_display,
 			struct dp_mst_caps *mst_caps);
 	int (*set_stream_info)(struct dp_display *dp_display, void *panel,
-			u32 strm_id, u32 start_slot, u32 num_slots, u32 pbn);
+			u32 strm_id, u32 start_slot, u32 num_slots, u32 pbn,
+			int vcpi);
 	void (*convert_to_dp_mode)(struct dp_display *dp_display, void *panel,
 			const struct drm_display_mode *drm_mode,
 			struct dp_display_mode *dp_mode);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 9b3bb24..b3b116a 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -114,7 +114,7 @@
 	}
 
 	/* for SST force stream id, start slot and total slots to 0 */
-	dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0, 0);
+	dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0, 0, 0);
 
 	rc = dp->enable(dp, bridge->dp_panel);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
index 3dd0fa1..f71c25e 100644
--- a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
+++ b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
@@ -51,7 +51,6 @@
 	u8 rx_status;
 	char abort_mask;
 
-	bool cp_irq_done;
 	bool polling;
 };
 
@@ -66,6 +65,25 @@
 	struct dp_hdcp2p2_int_set *int_set;
 };
 
+static inline int dp_hdcp2p2_valid_handle(struct dp_hdcp2p2_ctrl *ctrl)
+{
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (!ctrl->lib_ctx) {
+		pr_err("HDCP library needs to be acquired\n");
+		return -EINVAL;
+	}
+
+	if (!ctrl->lib) {
+		pr_err("invalid lib ops data\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static inline bool dp_hdcp2p2_is_valid_state(struct dp_hdcp2p2_ctrl *ctrl)
 {
 	enum hdcp_transport_wakeup_cmd cmd;
@@ -174,6 +192,7 @@
 	if (dp_hdcp2p2_copy_buf(ctrl, data))
 		goto exit;
 
+	ctrl->polling = false;
 	switch (data->cmd) {
 	case HDCP_TRANSPORT_CMD_STATUS_SUCCESS:
 		atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED);
@@ -216,38 +235,77 @@
 	atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
 }
 
+static int dp_hdcp2p2_register(void *input, bool mst_enabled)
+{
+	int rc;
+	enum sde_hdcp_2x_device_type device_type;
+	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
+
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return rc;
+
+	if (mst_enabled)
+		device_type = HDCP_TXMTR_DP_MST;
+	else
+		device_type = HDCP_TXMTR_DP;
+
+	return sde_hdcp_2x_enable(ctrl->lib_ctx, device_type);
+}
+
+static int dp_hdcp2p2_on(void *input)
+{
+	int rc = 0;
+	struct dp_hdcp2p2_ctrl *ctrl = input;
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
+
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return rc;
+
+	cdata.cmd = HDCP_2X_CMD_START;
+	cdata.context = ctrl->lib_ctx;
+	rc = ctrl->lib->wakeup(&cdata);
+	if (rc)
+		pr_err("Unable to start the HDCP 2.2 library (%d)\n", rc);
+
+	return rc;
+}
+
 static void dp_hdcp2p2_off(void *input)
 {
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
-	struct hdcp_transport_wakeup_data cdata = {
-					HDCP_TRANSPORT_CMD_AUTHENTICATE};
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
 
-	if (!ctrl) {
-		pr_err("invalid input\n");
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
 		return;
-	}
 
-	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
-		pr_err("hdcp is off\n");
-		return;
+	if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) {
+		cdata.cmd = HDCP_2X_CMD_STOP;
+		cdata.context = ctrl->lib_ctx;
+		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
 	}
 
 	dp_hdcp2p2_set_interrupts(ctrl, false);
 
 	dp_hdcp2p2_reset(ctrl);
 
-	cdata.context = input;
-	dp_hdcp2p2_wakeup(&cdata);
-
 	kthread_park(ctrl->thread);
+
+	sde_hdcp_2x_disable(ctrl->lib_ctx);
 }
 
 static int dp_hdcp2p2_authenticate(void *input)
 {
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = input;
 	struct hdcp_transport_wakeup_data cdata = {
 					HDCP_TRANSPORT_CMD_AUTHENTICATE};
-	int rc = 0;
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return rc;
 
 	dp_hdcp2p2_set_interrupts(ctrl, true);
 
@@ -370,44 +428,34 @@
 
 static bool dp_hdcp2p2_feature_supported(void *input)
 {
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = input;
 	struct sde_hdcp_2x_ops *lib = NULL;
 	bool supported = false;
 
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		goto end;
-	}
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return supported;
 
 	lib = ctrl->lib;
-	if (!lib) {
-		pr_err("invalid lib ops data\n");
-		goto end;
-	}
-
 	if (lib->feature_supported)
 		supported = lib->feature_supported(
 			ctrl->lib_ctx);
-end:
+
 	return supported;
 }
 
 static void dp_hdcp2p2_force_encryption(void *data, bool enable)
 {
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = data;
 	struct sde_hdcp_2x_ops *lib = NULL;
 
-	if (!ctrl) {
-		pr_err("invalid input\n");
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
 		return;
-	}
 
 	lib = ctrl->lib;
-	if (!lib) {
-		pr_err("invalid lib ops data\n");
-		return;
-	}
-
 	if (lib->force_encryption)
 		lib->force_encryption(ctrl->lib_ctx, enable);
 }
@@ -493,26 +541,12 @@
 		return;
 	}
 
-	if (ctrl->rx_status) {
-		if (!ctrl->cp_irq_done) {
-			pr_debug("waiting for CP_IRQ\n");
-			ctrl->polling = true;
-			return;
-		}
-
-		if (ctrl->rx_status & ctrl->sink_rx_status) {
-			ctrl->cp_irq_done = false;
-			ctrl->sink_rx_status = 0;
-			ctrl->rx_status = 0;
-		}
-	}
-
 	dp_hdcp2p2_get_msg_from_sink(ctrl);
 }
 
 static void dp_hdcp2p2_link_check(struct dp_hdcp2p2_ctrl *ctrl)
 {
-	int rc = 0;
+	int rc = 0, retries = 10;
 	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
 
 	if (!ctrl) {
@@ -545,6 +579,11 @@
 		goto exit;
 	}
 
+	/* wait for polling to start till spec allowed timeout */
+	while (!ctrl->polling && retries--)
+		msleep(20);
+
+	/* check if sink has made a message available */
 	if (ctrl->polling && (ctrl->sink_rx_status & ctrl->rx_status)) {
 		ctrl->sink_rx_status = 0;
 		ctrl->rx_status = 0;
@@ -552,26 +591,19 @@
 		dp_hdcp2p2_get_msg_from_sink(ctrl);
 
 		ctrl->polling = false;
-	} else {
-		ctrl->cp_irq_done = true;
 	}
 exit:
 	if (rc)
 		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
 }
 
-static void dp_hdcp2p2_manage_session(struct dp_hdcp2p2_ctrl *ctrl)
+static void dp_hdcp2p2_start_auth(struct dp_hdcp2p2_ctrl *ctrl)
 {
-	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
-
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_START_AUTH};
 	cdata.context = ctrl->lib_ctx;
 
 	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING)
-		cdata.cmd = HDCP_2X_CMD_START;
-	else
-		cdata.cmd = HDCP_2X_CMD_STOP;
-
-	dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
 }
 
 static int dp_hdcp2p2_read_rx_status(struct dp_hdcp2p2_ctrl *ctrl,
@@ -617,34 +649,31 @@
 
 static int dp_hdcp2p2_cp_irq(void *input)
 {
-	int rc = 0;
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = input;
 
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return rc;
 
 	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL ||
 		atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
 		pr_err("invalid hdcp state\n");
-		rc = -EINVAL;
-		goto error;
+		return -EINVAL;
 	}
 
 	ctrl->sink_rx_status = 0;
 	rc = dp_hdcp2p2_read_rx_status(ctrl, &ctrl->sink_rx_status);
 	if (rc) {
 		pr_err("failed to read rx status\n");
-		goto error;
+		return rc;
 	}
 
 	pr_debug("sink_rx_status=0x%x\n", ctrl->sink_rx_status);
 
 	if (!ctrl->sink_rx_status) {
 		pr_debug("not a hdcp 2.2 irq\n");
-		rc = -EINVAL;
-		goto error;
+		return -EINVAL;
 	}
 
 
@@ -652,8 +681,6 @@
 	wake_up(&ctrl->wait_q);
 
 	return 0;
-error:
-	return rc;
 }
 
 static int dp_hdcp2p2_isr(void *input)
@@ -721,6 +748,51 @@
 	return false;
 }
 
+static int dp_hdcp2p2_change_streams(struct dp_hdcp2p2_ctrl *ctrl,
+		struct sde_hdcp_2x_wakeup_data *cdata)
+{
+	if (!ctrl || cdata->num_streams == 0 || !cdata->streams) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (!ctrl->lib_ctx) {
+		pr_err("HDCP library needs to be acquired\n");
+		return -EINVAL;
+	}
+
+	if (!ctrl->lib) {
+		pr_err("invalid lib ops data\n");
+		return -EINVAL;
+	}
+
+	cdata->context = ctrl->lib_ctx;
+	return ctrl->lib->wakeup(cdata);
+}
+
+
+static int dp_hdcp2p2_register_streams(void *input, u8 num_streams,
+			struct stream_info *streams)
+{
+	struct dp_hdcp2p2_ctrl *ctrl = input;
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_OPEN_STREAMS};
+
+	cdata.streams = streams;
+	cdata.num_streams = num_streams;
+	return dp_hdcp2p2_change_streams(ctrl, &cdata);
+}
+
+static int dp_hdcp2p2_deregister_streams(void *input, u8 num_streams,
+			struct stream_info *streams)
+{
+	struct dp_hdcp2p2_ctrl *ctrl = input;
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_CLOSE_STREAMS};
+
+	cdata.streams = streams;
+	cdata.num_streams = num_streams;
+	return dp_hdcp2p2_change_streams(ctrl, &cdata);
+}
+
 void sde_dp_hdcp2p2_deinit(void *input)
 {
 	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
@@ -731,9 +803,13 @@
 		return;
 	}
 
-	cdata.cmd = HDCP_2X_CMD_STOP;
-	cdata.context = ctrl->lib_ctx;
-	dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+	if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) {
+		cdata.cmd = HDCP_2X_CMD_STOP;
+		cdata.context = ctrl->lib_ctx;
+		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+	}
+
+	sde_hdcp_2x_deregister(ctrl->lib_ctx);
 
 	kthread_stop(ctrl->thread);
 
@@ -769,7 +845,10 @@
 			dp_hdcp2p2_send_msg(ctrl);
 			break;
 		case HDCP_TRANSPORT_CMD_RECV_MESSAGE:
-			dp_hdcp2p2_recv_msg(ctrl);
+			if (ctrl->rx_status)
+				ctrl->polling = true;
+			else
+				dp_hdcp2p2_recv_msg(ctrl);
 			break;
 		case HDCP_TRANSPORT_CMD_STATUS_SUCCESS:
 			dp_hdcp2p2_send_auth_status(ctrl);
@@ -779,16 +858,13 @@
 			dp_hdcp2p2_send_auth_status(ctrl);
 			break;
 		case HDCP_TRANSPORT_CMD_LINK_POLL:
-			if (ctrl->cp_irq_done)
-				dp_hdcp2p2_recv_msg(ctrl);
-			else
-				ctrl->polling = true;
+			ctrl->polling = true;
 			break;
 		case HDCP_TRANSPORT_CMD_LINK_CHECK:
 			dp_hdcp2p2_link_check(ctrl);
 			break;
 		case HDCP_TRANSPORT_CMD_AUTHENTICATE:
-			dp_hdcp2p2_manage_session(ctrl);
+			dp_hdcp2p2_start_auth(ctrl);
 			break;
 		default:
 			break;
@@ -809,8 +885,12 @@
 		.feature_supported = dp_hdcp2p2_feature_supported,
 		.force_encryption = dp_hdcp2p2_force_encryption,
 		.sink_support = dp_hdcp2p2_supported,
+		.set_mode = dp_hdcp2p2_register,
+		.on = dp_hdcp2p2_on,
 		.off = dp_hdcp2p2_off,
 		.cp_irq = dp_hdcp2p2_cp_irq,
+		.register_streams = dp_hdcp2p2_register_streams,
+		.deregister_streams = dp_hdcp2p2_deregister_streams,
 	};
 
 	static struct hdcp_transport_ops client_ops = {
@@ -865,7 +945,6 @@
 	register_data.hdcp_data = &ctrl->lib_ctx;
 	register_data.client_ops = &client_ops;
 	register_data.ops = &hdcp2x_ops;
-	register_data.device_type = HDCP_TXMTR_DP;
 	register_data.client_data = ctrl;
 
 	rc = sde_hdcp_2x_register(&register_data);
diff --git a/drivers/gpu/drm/msm/dp/dp_mst_drm.c b/drivers/gpu/drm/msm/dp/dp_mst_drm.c
index f528485..508c6dc 100644
--- a/drivers/gpu/drm/msm/dp/dp_mst_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_mst_drm.c
@@ -21,8 +21,8 @@
 #include "dp_drm.h"
 
 #define DP_MST_DEBUG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
+#define DP_MST_INFO_LOG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
 
-#define MAX_DP_MST_STREAMS		2
 #define MAX_DP_MST_DRM_ENCODERS		2
 #define MAX_DP_MST_DRM_BRIDGES		2
 #define HPD_STRING_SIZE			30
@@ -93,12 +93,18 @@
 	struct drm_display_mode drm_mode;
 	struct dp_display_mode dp_mode;
 	struct drm_connector *connector;
+	struct drm_connector *old_connector;
 	void *dp_panel;
+	void *old_dp_panel;
 
 	int vcpi;
 	int pbn;
 	int num_slots;
 	int start_slot;
+
+	u32 fixed_port_num;
+	bool fixed_port_added;
+	struct drm_connector *fixed_connector;
 };
 
 struct dp_mst_private {
@@ -111,6 +117,7 @@
 	struct dp_mst_sim_mode simulator;
 	struct mutex mst_lock;
 	enum dp_drv_state state;
+	bool mst_session_state;
 };
 
 struct dp_mst_encoder_info_cache {
@@ -167,10 +174,13 @@
 			mutex_lock(&mstb->mgr->lock);
 			list_del(&port->next);
 			mutex_unlock(&mstb->mgr->lock);
-			return;
+			goto put_port;
 		}
 		(*mstb->mgr->cbs->register_connector)(port->connector);
 	}
+
+put_port:
+	kref_put(&port->kref, NULL);
 }
 
 static void dp_mst_sim_link_probe_work(struct work_struct *work)
@@ -525,7 +535,8 @@
 
 		mst->dp_display->set_stream_info(mst->dp_display,
 				dp_bridge->dp_panel,
-				dp_bridge->id, start_slot, num_slots, pbn);
+				dp_bridge->id, start_slot, num_slots, pbn,
+				dp_bridge->vcpi);
 
 		pr_info("bridge:%d vcpi:%d start_slot:%d num_slots:%d, pbn:%d\n",
 			dp_bridge->id, dp_bridge->vcpi,
@@ -550,7 +561,8 @@
 
 		mst->dp_display->set_stream_info(mst->dp_display,
 				mst_bridge->dp_panel,
-				mst_bridge->id, start_slot, num_slots, pbn);
+				mst_bridge->id, start_slot, num_slots, pbn,
+				mst_bridge->vcpi);
 	}
 }
 
@@ -672,8 +684,6 @@
 	struct dp_display *dp;
 	struct dp_mst_private *mst;
 
-	DP_MST_DEBUG("enter\n");
-
 	if (!drm_bridge) {
 		pr_err("Invalid params\n");
 		return;
@@ -682,6 +692,9 @@
 	bridge = to_dp_mst_bridge(drm_bridge);
 	dp = bridge->display;
 
+	bridge->old_connector = NULL;
+	bridge->old_dp_panel = NULL;
+
 	if (!bridge->connector) {
 		pr_err("Invalid connector\n");
 		return;
@@ -718,7 +731,14 @@
 		_dp_mst_bridge_pre_enable_part2(bridge);
 	}
 
-	DP_MST_DEBUG("mst bridge [%d] pre enable complete\n", bridge->id);
+	DP_MST_INFO_LOG("mode: id(%d) mode(%s), refresh(%d)\n",
+			bridge->id, bridge->drm_mode.name,
+			bridge->drm_mode.vrefresh);
+	DP_MST_INFO_LOG("dsc: id(%d) dsc(%d)\n", bridge->id,
+			bridge->dp_mode.timing.comp_info.comp_ratio);
+	DP_MST_INFO_LOG("channel: id(%d) vcpi(%d) start(%d) tot(%d)\n",
+			bridge->id, bridge->vcpi, bridge->start_slot,
+			bridge->num_slots);
 end:
 	mutex_unlock(&mst->mst_lock);
 }
@@ -729,8 +749,6 @@
 	struct dp_mst_bridge *bridge;
 	struct dp_display *dp;
 
-	DP_MST_DEBUG("enter\n");
-
 	if (!drm_bridge) {
 		pr_err("Invalid params\n");
 		return;
@@ -751,7 +769,8 @@
 		return;
 	}
 
-	DP_MST_DEBUG("mst bridge [%d] post enable complete\n", bridge->id);
+	DP_MST_INFO_LOG("mst bridge [%d] post enable complete\n",
+			bridge->id);
 }
 
 static void dp_mst_bridge_disable(struct drm_bridge *drm_bridge)
@@ -761,8 +780,6 @@
 	struct dp_display *dp;
 	struct dp_mst_private *mst;
 
-	DP_MST_DEBUG("enter\n");
-
 	if (!drm_bridge) {
 		pr_err("Invalid params\n");
 		return;
@@ -791,7 +808,7 @@
 
 	_dp_mst_bridge_pre_disable_part2(bridge);
 
-	DP_MST_DEBUG("mst bridge [%d] disable complete\n", bridge->id);
+	DP_MST_INFO_LOG("mst bridge [%d] disable complete\n", bridge->id);
 
 	mutex_unlock(&mst->mst_lock);
 }
@@ -803,8 +820,6 @@
 	struct dp_display *dp;
 	struct dp_mst_private *mst;
 
-	DP_MST_DEBUG("enter\n");
-
 	if (!drm_bridge) {
 		pr_err("Invalid params\n");
 		return;
@@ -832,12 +847,17 @@
 	/* maintain the connector to encoder link during suspend/resume */
 	if (mst->state != PM_SUSPEND) {
 		/* Disconnect the connector and panel info from bridge */
+		mst->mst_bridge[bridge->id].old_connector =
+				mst->mst_bridge[bridge->id].connector;
+		mst->mst_bridge[bridge->id].old_dp_panel =
+				mst->mst_bridge[bridge->id].dp_panel;
 		mst->mst_bridge[bridge->id].connector = NULL;
 		mst->mst_bridge[bridge->id].dp_panel = NULL;
 		mst->mst_bridge[bridge->id].encoder_active_sts = false;
 	}
 
-	DP_MST_DEBUG("mst bridge [%d] post disable complete\n", bridge->id);
+	DP_MST_INFO_LOG("mst bridge [%d] post disable complete\n",
+			bridge->id);
 }
 
 static void dp_mst_bridge_mode_set(struct drm_bridge *drm_bridge,
@@ -856,13 +876,21 @@
 
 	bridge = to_dp_mst_bridge(drm_bridge);
 	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		return;
+		if (!bridge->old_connector) {
+			pr_err("Invalid connector\n");
+			return;
+		}
+		bridge->connector = bridge->old_connector;
+		bridge->old_connector = NULL;
 	}
 
 	if (!bridge->dp_panel) {
-		pr_err("Invalid dp_panel\n");
-		return;
+		if (!bridge->old_dp_panel) {
+			pr_err("Invalid dp_panel\n");
+			return;
+		}
+		bridge->dp_panel = bridge->old_dp_panel;
+		bridge->old_dp_panel = NULL;
 	}
 
 	dp = bridge->display;
@@ -877,6 +905,10 @@
 
 /* DP MST Bridge APIs */
 
+static struct drm_connector *
+dp_mst_drm_fixed_connector_init(struct dp_display *dp_display,
+				struct drm_encoder *encoder);
+
 static const struct drm_bridge_funcs dp_mst_bridge_ops = {
 	.attach       = dp_mst_bridge_attach,
 	.mode_fixup   = dp_mst_bridge_mode_fixup,
@@ -944,6 +976,23 @@
 
 	DP_MST_DEBUG("mst drm bridge init. bridge id:%d\n", i);
 
+	/*
+	 * If fixed topology port is defined, connector will be created
+	 * immediately.
+	 */
+	rc = display->mst_get_fixed_topology_port(display, bridge->id,
+			&bridge->fixed_port_num);
+	if (!rc) {
+		bridge->fixed_connector =
+			dp_mst_drm_fixed_connector_init(display,
+				bridge->encoder);
+		if (bridge->fixed_connector == NULL) {
+			pr_err("failed to create fixed connector\n");
+			rc = -ENOMEM;
+			goto end;
+		}
+	}
+
 	return 0;
 
 end:
@@ -1136,7 +1185,8 @@
 	}
 
 	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (!mst->mst_bridge[i].encoder_active_sts) {
+		if (!mst->mst_bridge[i].encoder_active_sts &&
+			!mst->mst_bridge[i].fixed_connector) {
 			mst->mst_bridge[i].encoder_active_sts = true;
 			mst->mst_bridge[i].connector = connector;
 			mst->mst_bridge[i].dp_panel = conn->drv_panel;
@@ -1343,6 +1393,7 @@
 
 	if (!connector) {
 		pr_err("mst sde_connector_init failed\n");
+		drm_modeset_unlock_all(dev);
 		return connector;
 	}
 
@@ -1350,6 +1401,7 @@
 	if (rc) {
 		pr_err("mst connector install failed\n");
 		sde_connector_destroy(connector);
+		drm_modeset_unlock_all(dev);
 		return NULL;
 	}
 
@@ -1372,7 +1424,7 @@
 	/* unlock connector and make it accessible */
 	drm_modeset_unlock_all(dev);
 
-	DP_MST_DEBUG("add mst connector:%d\n", connector->base.id);
+	DP_MST_INFO_LOG("add mst connector id:%d\n", connector->base.id);
 
 	return connector;
 }
@@ -1383,7 +1435,8 @@
 
 	connector->status = connector->funcs->detect(connector, false);
 
-	DP_MST_DEBUG("register mst connector:%d\n", connector->base.id);
+	DP_MST_INFO_LOG("register mst connector id:%d\n",
+			connector->base.id);
 	drm_connector_register(connector);
 }
 
@@ -1392,12 +1445,297 @@
 {
 	DP_MST_DEBUG("enter\n");
 
-	DP_MST_DEBUG("destroy mst connector:%d\n", connector->base.id);
+	DP_MST_INFO_LOG("destroy mst connector id:%d\n", connector->base.id);
 
 	drm_connector_unregister(connector);
 	drm_connector_put(connector);
 }
 
+static enum drm_connector_status
+dp_mst_fixed_connector_detect(struct drm_connector *connector, bool force,
+			void *display)
+{
+	struct dp_display *dp_display = display;
+	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
+	int i;
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (mst->mst_bridge[i].fixed_connector != connector)
+			continue;
+
+		if (!mst->mst_bridge[i].fixed_port_added)
+			break;
+
+		return dp_mst_connector_detect(connector, force, display);
+	}
+
+	return connector_status_disconnected;
+}
+
+static struct drm_encoder *
+dp_mst_fixed_atomic_best_encoder(struct drm_connector *connector,
+			void *display, struct drm_connector_state *state)
+{
+	struct dp_display *dp_display = display;
+	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
+	struct sde_connector *conn = to_sde_connector(connector);
+	struct drm_encoder *enc = NULL;
+	u32 i;
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (mst->mst_bridge[i].connector == connector) {
+			enc = mst->mst_bridge[i].encoder;
+			goto end;
+		}
+	}
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (mst->mst_bridge[i].fixed_connector == connector) {
+			mst->mst_bridge[i].encoder_active_sts = true;
+			mst->mst_bridge[i].connector = connector;
+			mst->mst_bridge[i].dp_panel = conn->drv_panel;
+			enc = mst->mst_bridge[i].encoder;
+			break;
+		}
+	}
+
+end:
+	if (enc)
+		DP_MST_DEBUG("mst connector:%d atomic best encoder:%d\n",
+			connector->base.id, i);
+	else
+		DP_MST_DEBUG("mst connector:%d atomic best encoder failed\n",
+				connector->base.id);
+
+	return enc;
+}
+
+static u32 dp_mst_find_fixed_port_num(struct drm_dp_mst_branch *mstb,
+		struct drm_dp_mst_port *target)
+{
+	struct drm_dp_mst_port *port;
+	u32 port_num = 0;
+
+	/*
+	 * search through reversed order of adding sequence, so the port number
+	 * will be unique once topology is fixed
+	 */
+	list_for_each_entry_reverse(port, &mstb->ports, next) {
+		if (port->mstb)
+			port_num += dp_mst_find_fixed_port_num(port->mstb,
+						target);
+		else if (!port->input) {
+			++port_num;
+			if (port == target)
+				break;
+		}
+	}
+
+	return port_num;
+}
+
+static struct drm_connector *
+dp_mst_find_fixed_connector(struct dp_mst_private *dp_mst,
+		struct drm_dp_mst_port *port)
+{
+	struct dp_display *dp_display = dp_mst->dp_display;
+	struct drm_connector *connector = NULL;
+	struct sde_connector *c_conn;
+	u32 port_num;
+	int i;
+
+	mutex_lock(&port->mgr->lock);
+	port_num = dp_mst_find_fixed_port_num(port->mgr->mst_primary, port);
+	mutex_unlock(&port->mgr->lock);
+
+	if (!port_num)
+		return NULL;
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (dp_mst->mst_bridge[i].fixed_port_num == port_num) {
+			connector = dp_mst->mst_bridge[i].fixed_connector;
+			c_conn = to_sde_connector(connector);
+			c_conn->mst_port = port;
+			dp_display->mst_connector_update_link_info(dp_display,
+					connector);
+			dp_mst->mst_bridge[i].fixed_port_added = true;
+			DP_MST_DEBUG("found fixed connector %d\n",
+					DRMID(connector));
+			break;
+		}
+	}
+
+	return connector;
+}
+
+static int
+dp_mst_find_first_available_encoder_idx(struct dp_mst_private *dp_mst)
+{
+	int enc_idx = MAX_DP_MST_DRM_BRIDGES;
+	int i;
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (!dp_mst->mst_bridge[i].fixed_connector) {
+			enc_idx = i;
+			break;
+		}
+	}
+
+	return enc_idx;
+}
+
+static struct drm_connector *
+dp_mst_add_fixed_connector(struct drm_dp_mst_topology_mgr *mgr,
+		struct drm_dp_mst_port *port, const char *pathprop)
+{
+	struct dp_mst_private *dp_mst;
+	struct drm_device *dev;
+	struct dp_display *dp_display;
+	struct drm_connector *connector;
+	int i, enc_idx;
+
+	DP_MST_DEBUG("enter\n");
+
+	dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr);
+
+	dp_display = dp_mst->dp_display;
+	dev = dp_display->drm_dev;
+
+	if (port->input || port->mstb)
+		enc_idx = MAX_DP_MST_DRM_BRIDGES;
+	else {
+		/* if port is already reserved, return immediately */
+		connector = dp_mst_find_fixed_connector(dp_mst, port);
+		if (connector != NULL)
+			return connector;
+
+		/* first available bridge index for non-reserved port */
+		enc_idx = dp_mst_find_first_available_encoder_idx(dp_mst);
+	}
+
+	/* add normal connector */
+	connector = dp_mst_add_connector(mgr, port, pathprop);
+	if (!connector) {
+		DP_MST_DEBUG("failed to add connector\n");
+		return NULL;
+	}
+
+	drm_modeset_lock_all(dev);
+
+	/* clear encoder list */
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
+		connector->encoder_ids[i] = 0;
+
+	/* re-attach encoders from first available encoders */
+	for (i = enc_idx; i < MAX_DP_MST_DRM_BRIDGES; i++)
+		drm_connector_attach_encoder(connector,
+				dp_mst->mst_bridge[i].encoder);
+
+	drm_modeset_unlock_all(dev);
+
+	DP_MST_DEBUG("add mst connector:%d\n", connector->base.id);
+
+	return connector;
+}
+
+static void dp_mst_register_fixed_connector(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn = to_sde_connector(connector);
+	struct dp_display *dp_display = c_conn->display;
+	struct dp_mst_private *dp_mst = dp_display->dp_mst_prv_info;
+	int i;
+
+	DP_MST_DEBUG("enter\n");
+
+	/* skip connector registered for fixed topology ports */
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (dp_mst->mst_bridge[i].fixed_connector == connector) {
+			DP_MST_DEBUG("found fixed connector %d\n",
+					DRMID(connector));
+			return;
+		}
+	}
+
+	dp_mst_register_connector(connector);
+}
+
+static void dp_mst_destroy_fixed_connector(struct drm_dp_mst_topology_mgr *mgr,
+					   struct drm_connector *connector)
+{
+	struct dp_mst_private *dp_mst;
+	int i;
+
+	DP_MST_DEBUG("enter\n");
+
+	dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr);
+
+	/* skip connector destroy for fixed topology ports */
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (dp_mst->mst_bridge[i].fixed_connector == connector) {
+			dp_mst->mst_bridge[i].fixed_port_added = false;
+			DP_MST_DEBUG("destroy fixed connector %d\n",
+					DRMID(connector));
+			return;
+		}
+	}
+
+	dp_mst_destroy_connector(mgr, connector);
+}
+
+static struct drm_connector *
+dp_mst_drm_fixed_connector_init(struct dp_display *dp_display,
+			struct drm_encoder *encoder)
+{
+	static const struct sde_connector_ops dp_mst_connector_ops = {
+		.post_init  = NULL,
+		.detect     = dp_mst_fixed_connector_detect,
+		.get_modes  = dp_mst_connector_get_modes,
+		.mode_valid = dp_mst_connector_mode_valid,
+		.get_info   = dp_mst_connector_get_info,
+		.get_mode_info  = dp_mst_connector_get_mode_info,
+		.atomic_best_encoder = dp_mst_fixed_atomic_best_encoder,
+		.atomic_check = dp_mst_connector_atomic_check,
+		.config_hdr = dp_mst_connector_config_hdr,
+		.pre_destroy = dp_mst_connector_pre_destroy,
+	};
+	struct drm_device *dev;
+	struct drm_connector *connector;
+	int rc;
+
+	DP_MST_DEBUG("enter\n");
+
+	dev = dp_display->drm_dev;
+
+	connector = sde_connector_init(dev,
+				encoder,
+				NULL,
+				dp_display,
+				&dp_mst_connector_ops,
+				DRM_CONNECTOR_POLL_HPD,
+				DRM_MODE_CONNECTOR_DisplayPort);
+
+	if (!connector) {
+		pr_err("mst sde_connector_init failed\n");
+		return NULL;
+	}
+
+	rc = dp_display->mst_connector_install(dp_display, connector);
+	if (rc) {
+		pr_err("mst connector install failed\n");
+		sde_connector_destroy(connector);
+		return NULL;
+	}
+
+	drm_object_attach_property(&connector->base,
+			dev->mode_config.path_property, 0);
+	drm_object_attach_property(&connector->base,
+			dev->mode_config.tile_property, 0);
+
+	DP_MST_DEBUG("add mst fixed connector:%d\n", connector->base.id);
+
+	return connector;
+}
+
 static void dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
 {
 	struct dp_mst_private *mst = container_of(mgr, struct dp_mst_private,
@@ -1411,7 +1749,7 @@
 
 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
 
-	DP_MST_DEBUG("mst hot plug event\n");
+	DP_MST_INFO_LOG("mst hot plug event\n");
 }
 
 static void dp_mst_hpd_event_notify(struct dp_mst_private *mst, bool hpd_status)
@@ -1432,7 +1770,7 @@
 
 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
 
-	DP_MST_DEBUG("%s finished\n", __func__);
+	DP_MST_INFO_LOG("%s finished\n", __func__);
 }
 
 /* DP Driver Callback OPs */
@@ -1444,7 +1782,9 @@
 	struct dp_display *dp = dp_display;
 	struct dp_mst_private *mst = dp->dp_mst_prv_info;
 
-	DP_MST_DEBUG("enter:\n");
+	mutex_lock(&mst->mst_lock);
+	mst->mst_session_state = hpd_status;
+	mutex_unlock(&mst->mst_lock);
 
 	if (!hpd_status)
 		rc = mst->mst_fw_cbs->topology_mgr_set_mst(&mst->mst_mgr,
@@ -1466,9 +1806,7 @@
 
 	dp_mst_hpd_event_notify(mst, hpd_status);
 
-	DP_MST_DEBUG("mst display hpd:%d, rc:%d\n", hpd_status, rc);
-
-	DP_MST_DEBUG("exit:\n");
+	DP_MST_INFO_LOG("mst display hpd:%d, rc:%d\n", hpd_status, rc);
 }
 
 static void dp_mst_display_hpd_irq(void *dp_display,
@@ -1477,26 +1815,29 @@
 	int rc;
 	struct dp_display *dp = dp_display;
 	struct dp_mst_private *mst = dp->dp_mst_prv_info;
-	u8 esi[14], idx;
+	u8 esi[14];
 	unsigned int esi_res = DP_SINK_COUNT_ESI + 1;
 	bool handled;
 
-	DP_MST_DEBUG("enter:\n");
-
 	if (info->mst_hpd_sim) {
 		dp_mst_hotplug(&mst->mst_mgr);
 		return;
 	}
 
+	if (!mst->mst_session_state) {
+		pr_err("mst_hpd_irq received before mst session start\n");
+		return;
+	}
+
 	rc = drm_dp_dpcd_read(mst->caps.drm_aux, DP_SINK_COUNT_ESI,
 		esi, 14);
 	if (rc != 14) {
-		pr_err("dpcd sync status read failed, rlen=%d\n", rc);
-		goto end;
+		pr_err("dpcd sink status read failed, rlen=%d\n", rc);
+		return;
 	}
 
-	for (idx = 0; idx < 14; idx++)
-		DP_MST_DEBUG("mst irq: esi[%d]: 0x%x\n", idx, esi[idx]);
+	DP_MST_DEBUG("mst irq: esi1[0x%x] esi2[0x%x] esi3[%x]\n",
+			esi[1], esi[2], esi[3]);
 
 	rc = drm_dp_mst_hpd_irq(&mst->mst_mgr, esi, &handled);
 
@@ -1509,9 +1850,6 @@
 	}
 
 	DP_MST_DEBUG("mst display hpd_irq handled:%d rc:%d\n", handled, rc);
-
-end:
-	DP_MST_DEBUG("exit:\n");
 }
 
 static void dp_mst_set_state(void *dp_display, enum dp_drv_state mst_state)
@@ -1525,6 +1863,7 @@
 	}
 
 	mst->state = mst_state;
+	DP_MST_INFO_LOG("mst power state:%d\n", mst_state);
 }
 
 /* DP MST APIs */
@@ -1542,6 +1881,13 @@
 	.hotplug = dp_mst_hotplug,
 };
 
+static const struct drm_dp_mst_topology_cbs dp_mst_fixed_drm_cbs = {
+	.add_connector = dp_mst_add_fixed_connector,
+	.register_connector = dp_mst_register_fixed_connector,
+	.destroy_connector = dp_mst_destroy_fixed_connector,
+	.hotplug = dp_mst_hotplug,
+};
+
 static void dp_mst_sim_init(struct dp_mst_private *mst)
 {
 	INIT_WORK(&mst->simulator.probe_work, dp_mst_sim_link_probe_work);
@@ -1606,7 +1952,11 @@
 	}
 	memset(&dp_mst_enc_cache, 0, sizeof(dp_mst_enc_cache));
 
-	DP_MST_DEBUG("dp drm mst topology manager init completed\n");
+	/* choose fixed callback function if fixed topology is found */
+	if (!dp_display->mst_get_fixed_topology_port(dp_display, 0, NULL))
+		dp_mst.mst_mgr.cbs = &dp_mst_fixed_drm_cbs;
+
+	DP_MST_INFO_LOG("dp drm mst topology manager init completed\n");
 
 	return ret;
 
@@ -1637,6 +1987,6 @@
 
 	mutex_destroy(&mst->mst_lock);
 
-	DP_MST_DEBUG("dp drm mst topology manager deinit completed\n");
+	DP_MST_INFO_LOG("dp drm mst topology manager deinit completed\n");
 }
 
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index eca7909..d98ebcf 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -1530,12 +1530,14 @@
 	struct dp_dsc_slices_per_line *rec;
 	int slice_width;
 	u32 ppr = dp_mode->timing.pixel_clk_khz/1000;
+	int max_slice_width;
 
 	comp_info->dsc_info.slice_per_pkt = 0;
 	for (i = 0; i < ARRAY_SIZE(slice_per_line_tbl); i++) {
 		rec = &slice_per_line_tbl[i];
 		if ((ppr > rec->min_ppr) && (ppr <= rec->max_ppr)) {
 			comp_info->dsc_info.slice_per_pkt = rec->num_slices;
+			i++;
 			break;
 		}
 	}
@@ -1543,9 +1545,21 @@
 	if (comp_info->dsc_info.slice_per_pkt == 0)
 		return -EINVAL;
 
+	max_slice_width = dp_panel->dsc_dpcd[12] * 320;
 	slice_width = (dp_mode->timing.h_active /
 				comp_info->dsc_info.slice_per_pkt);
 
+	while (slice_width >= max_slice_width) {
+		if (i == ARRAY_SIZE(slice_per_line_tbl))
+			return -EINVAL;
+
+		rec = &slice_per_line_tbl[i];
+		comp_info->dsc_info.slice_per_pkt = rec->num_slices;
+		slice_width = (dp_mode->timing.h_active /
+				comp_info->dsc_info.slice_per_pkt);
+		i++;
+	}
+
 	comp_info->dsc_info.block_pred_enable =
 			dp_panel->sink_dsc_caps.block_pred_en;
 	comp_info->dsc_info.vbr_enable = 0;
@@ -1657,8 +1671,8 @@
 	panel->minor = link_info->revision & 0x0f;
 	pr_debug("version: %d.%d\n", panel->major, panel->minor);
 
-	link_info->rate =
-		drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]);
+	link_info->rate = min_t(unsigned long, panel->parser->max_lclk_khz,
+		drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]));
 	pr_debug("link_rate=%d\n", link_info->rate);
 
 	link_info->num_lanes = dp_panel->dpcd[DP_MAX_LANE_COUNT] &
@@ -2305,13 +2319,14 @@
 
 static int dp_panel_set_stream_info(struct dp_panel *dp_panel,
 		enum dp_stream_id stream_id, u32 ch_start_slot,
-			u32 ch_tot_slots, u32 pbn)
+			u32 ch_tot_slots, u32 pbn, int vcpi)
 {
 	if (!dp_panel || stream_id > DP_STREAM_MAX) {
 		pr_err("invalid input. stream_id: %d\n", stream_id);
 		return -EINVAL;
 	}
 
+	dp_panel->vcpi = vcpi;
 	dp_panel->stream_id = stream_id;
 	dp_panel->channel_start_slot = ch_start_slot;
 	dp_panel->channel_total_slots = ch_tot_slots;
@@ -2376,7 +2391,7 @@
 	if (!panel->custom_edid && dp_panel->edid_ctrl->edid)
 		sde_free_edid((void **)&dp_panel->edid_ctrl);
 
-	dp_panel_set_stream_info(dp_panel, DP_STREAM_MAX, 0, 0, 0);
+	dp_panel_set_stream_info(dp_panel, DP_STREAM_MAX, 0, 0, 0, 0);
 	memset(&dp_panel->pinfo, 0, sizeof(dp_panel->pinfo));
 	memset(&hdr->hdr_meta, 0, sizeof(hdr->hdr_meta));
 	panel->panel_on = false;
@@ -2931,6 +2946,8 @@
 	if (in->base_panel) {
 		memcpy(dp_panel->dpcd, in->base_panel->dpcd,
 				DP_RECEIVER_CAP_SIZE + 1);
+		memcpy(dp_panel->dsc_dpcd, in->base_panel->dsc_dpcd,
+				DP_RECEIVER_DSC_CAP_SIZE + 1);
 		memcpy(&dp_panel->link_info, &in->base_panel->link_info,
 				sizeof(dp_panel->link_info));
 		dp_panel->mst_state = in->base_panel->mst_state;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 90d5346..dc96090 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -110,6 +110,7 @@
 	 * Client sets the stream id value using set_stream_id interface.
 	 */
 	enum dp_stream_id stream_id;
+	int vcpi;
 
 	u32 channel_start_slot;
 	u32 channel_total_slots;
@@ -154,7 +155,7 @@
 
 	int (*set_stream_info)(struct dp_panel *dp_panel,
 			enum dp_stream_id stream_id, u32 ch_start_slot,
-			u32 ch_tot_slots, u32 pbn);
+			u32 ch_tot_slots, u32 pbn, int vcpi);
 
 	int (*read_sink_status)(struct dp_panel *dp_panel, u8 *sts, u32 size);
 	int (*update_edid)(struct dp_panel *dp_panel, struct edid *edid);
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index b0a6d24..bc4369d 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -151,11 +151,22 @@
 			parser->l_map[i] = data[i];
 	}
 
+	data = of_get_property(of_node, "qcom,pn-swap-lane-map", &len);
+	if (data && (len == DP_MAX_PHY_LN)) {
+		for (i = 0; i < len; i++)
+			parser->l_pnswap |= (data[i] & 0x01) << i;
+	}
+
 	rc = of_property_read_u32(of_node,
 		"qcom,max-pclk-frequency-khz", &parser->max_pclk_khz);
 	if (rc)
 		parser->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
 
+	rc = of_property_read_u32(of_node,
+		"qcom,max-lclk-frequency-khz", &parser->max_lclk_khz);
+	if (rc)
+		parser->max_lclk_khz = DP_MAX_LINK_CLK_KHZ;
+
 	return 0;
 }
 
@@ -692,6 +703,7 @@
 static int dp_parser_mst(struct dp_parser *parser)
 {
 	struct device *dev = &parser->pdev->dev;
+	int i;
 
 	parser->has_mst = of_property_read_bool(dev->of_node,
 			"qcom,mst-enable");
@@ -699,6 +711,12 @@
 
 	pr_debug("mst parsing successful. mst:%d\n", parser->has_mst);
 
+	for (i = 0; i < MAX_DP_MST_STREAMS; i++) {
+		of_property_read_u32_index(dev->of_node,
+				"qcom,mst-fixed-topology-ports", i,
+				&parser->mst_fixed_port[i]);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index 7fb90c9..9caa1a7 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -11,6 +11,8 @@
 #define DP_LABEL "MDSS DP DISPLAY"
 #define AUX_CFG_LEN	10
 #define DP_MAX_PIXEL_CLK_KHZ	675000
+#define DP_MAX_LINK_CLK_KHZ	810000
+#define MAX_DP_MST_STREAMS	2
 
 enum dp_pm_type {
 	DP_CORE_PM,
@@ -181,6 +183,9 @@
  * @mp: gpio, regulator and clock related data
  * @pinctrl: pin-control related data
  * @disp_data: controller's display related data
+ * @l_pnswap: P/N swap status on each lane
+ * @max_pclk_khz: maximum pixel clock supported for the platform
+ * @max_lclk_khz: maximum link clock supported for the platform
  * @hw_cfg: DP HW specific settings
  * @has_mst: MST feature enable status
  * @has_mst_sideband: MST sideband feature enable status
@@ -191,6 +196,7 @@
  * @max_dp_dsc_blks: maximum DSC blks for DP interface
  * @max_dp_dsc_input_width_pixs: Maximum input width for DSC block
  * @has_widebus: widebus (2PPC) feature eanble status
+  *@mst_fixed_port: mst port_num reserved for fixed topology
  * @parse: function to be called by client to parse device tree.
  * @get_io: function to be called by client to get io data.
  * @get_io_buf: function to be called by client to get io buffers.
@@ -205,8 +211,10 @@
 	struct dp_display_data disp_data;
 
 	u8 l_map[4];
+	u8 l_pnswap;
 	struct dp_aux_cfg aux_cfg[AUX_CFG_LEN];
 	u32 max_pclk_khz;
+	u32 max_lclk_khz;
 	struct dp_hw_cfg hw_cfg;
 	bool has_mst;
 	bool has_mst_sideband;
@@ -218,6 +226,7 @@
 	u32 max_dp_dsc_blks;
 	u32 max_dp_dsc_input_width_pixs;
 	bool lphw_hpd;
+	u32 mst_fixed_port[MAX_DP_MST_STREAMS];
 
 	int (*parse)(struct dp_parser *parser);
 	struct dp_io_data *(*get_io)(struct dp_parser *parser, char *name);
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index 5089f0c..7f9391d 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -351,12 +351,14 @@
 
 #define TXn_TX_EMP_POST1_LVL			(0x000C)
 #define TXn_TX_DRV_LVL				(0x001C)
+#define TXn_TX_POL_INV				(0x0064)
 
 #define DP_PHY_AUX_INTERRUPT_MASK_V420		(0x0054)
 #define DP_PHY_AUX_INTERRUPT_CLEAR_V420		(0x0058)
 #define DP_PHY_AUX_INTERRUPT_STATUS_V420	(0x00D8)
 #define DP_PHY_SPARE0_V420			(0x00C8)
 #define TXn_TX_DRV_LVL_V420			(0x0014)
+#define TXn_TX_POL_INV_V420			(0x005C)
 
 #define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		(0x004)
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index ae2ce71..15ad347 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -172,7 +172,7 @@
 {
 	struct dsi_display *dsi_display = display;
 	struct dsi_panel *panel;
-	u32 bl_scale, bl_scale_ad;
+	u32 bl_scale, bl_scale_sv;
 	u64 bl_temp;
 	int rc = 0;
 
@@ -193,12 +193,11 @@
 	bl_scale = panel->bl_config.bl_scale;
 	bl_temp = bl_lvl * bl_scale / MAX_BL_SCALE_LEVEL;
 
-	bl_scale_ad = panel->bl_config.bl_scale_ad;
-	bl_temp = (u32)bl_temp * bl_scale_ad / MAX_AD_BL_SCALE_LEVEL;
+	bl_scale_sv = panel->bl_config.bl_scale_sv;
+	bl_temp = (u32)bl_temp * bl_scale_sv / MAX_SV_BL_SCALE_LEVEL;
 
-	pr_debug("bl_scale = %u, bl_scale_ad = %u, bl_lvl = %u\n",
-		bl_scale, bl_scale_ad, (u32)bl_temp);
-
+	pr_debug("bl_scale = %u, bl_scale_sv = %u, bl_lvl = %u\n",
+		bl_scale, bl_scale_sv, (u32)bl_temp);
 	rc = dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
 			DSI_CORE_CLK, DSI_CLK_ON);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 5e9d3ac..730a2c2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -2032,7 +2032,7 @@
 	}
 
 	panel->bl_config.bl_scale = MAX_BL_SCALE_LEVEL;
-	panel->bl_config.bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+	panel->bl_config.bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
 
 	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-bl-min-level", &val);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 8d9cfea..a2dcebb 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -23,7 +23,7 @@
 
 #define MAX_BL_LEVEL 4096
 #define MAX_BL_SCALE_LEVEL 1024
-#define MAX_AD_BL_SCALE_LEVEL 65535
+#define MAX_SV_BL_SCALE_LEVEL 65535
 #define DSI_CMD_PPS_SIZE 135
 
 #define DSI_MODE_MAX 5
@@ -90,7 +90,7 @@
 	u32 brightness_max_level;
 	u32 bl_level;
 	u32 bl_scale;
-	u32 bl_scale_ad;
+	u32 bl_scale_sv;
 
 	int en_gpio;
 	/* PWM params */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 567852b..9a36012 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -87,8 +87,7 @@
 	int conn_cnt = 0;
 
 	if (msm_is_mode_seamless(&crtc_state->mode) ||
-		msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode) ||
-		msm_is_mode_seamless_poms(&crtc_state->adjusted_mode))
+		msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode))
 		return true;
 
 	if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable)
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 706157d..b8de212 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -189,7 +189,7 @@
 	CONNECTOR_PROP_DST_H,
 	CONNECTOR_PROP_ROI_V1,
 	CONNECTOR_PROP_BL_SCALE,
-	CONNECTOR_PROP_AD_BL_SCALE,
+	CONNECTOR_PROP_SV_BL_SCALE,
 
 	/* enum/bitmask properties */
 	CONNECTOR_PROP_TOPOLOGY_NAME,
@@ -249,18 +249,6 @@
 };
 
 /**
- * enum panel_mode - panel operation mode
- * @MSM_DISPLAY_VIDEO_MODE: video mode panel
- * @MSM_DISPLAY_CMD_MODE:   Command mode panel
- * @MODE_MAX:
- */
-enum panel_op_mode {
-	MSM_DISPLAY_VIDEO_MODE = 0,
-	MSM_DISPLAY_CMD_MODE,
-	MSM_DISPLAY_MODE_MAX,
-};
-
-/**
  * enum msm_event_wait - type of HW events to wait for
  * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
  * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
@@ -496,7 +484,7 @@
 struct msm_display_info {
 	int intf_type;
 	uint32_t capabilities;
-	enum panel_op_mode curr_panel_mode;
+
 	uint32_t num_of_h_tiles;
 	uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
 
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 3fce5ec..e062b07 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -38,8 +38,6 @@
 #define MSM_MODE_FLAG_SEAMLESS_DMS			(1<<2)
 /* Request to switch the fps */
 #define MSM_MODE_FLAG_SEAMLESS_VRR			(1<<3)
-/* Request to switch the panel mode */
-#define MSM_MODE_FLAG_SEAMLESS_POMS			(1<<4)
 
 /* As there are different display controller blocks depending on the
  * snapdragon version, the kms support is split out and the appropriate
@@ -214,13 +212,6 @@
 		: false;
 }
 
-static inline bool msm_is_mode_seamless_poms(
-		const struct drm_display_mode *mode)
-{
-	return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_POMS)
-		: false;
-}
-
 static inline bool msm_needs_vblank_pre_modeset(
 		const struct drm_display_mode *mode)
 {
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 48f97a0..b380481 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -2160,7 +2160,7 @@
 static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg)
 {
 	uint32_t input_bl = 0, output_bl = 0;
-	uint32_t scale = MAX_AD_BL_SCALE_LEVEL;
+	uint32_t scale = MAX_SV_BL_SCALE_LEVEL;
 	struct sde_hw_mixer *hw_lm = NULL;
 	struct sde_hw_dspp *hw_dspp = NULL;
 	u32 num_mixers;
@@ -2208,7 +2208,7 @@
 	if (!input_bl || input_bl < output_bl)
 		return;
 
-	scale = (output_bl * MAX_AD_BL_SCALE_LEVEL) / input_bl;
+	scale = (output_bl * MAX_SV_BL_SCALE_LEVEL) / input_bl;
 	event.length = sizeof(u32);
 	event.type = DRM_EVENT_AD_BACKLIGHT;
 	msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index c3be2fb..d87a981 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -539,13 +539,13 @@
 	else
 		bl_config->bl_scale = c_conn->bl_scale;
 
-	if (c_conn->bl_scale_ad > MAX_AD_BL_SCALE_LEVEL)
-		bl_config->bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+	if (c_conn->bl_scale_sv > MAX_SV_BL_SCALE_LEVEL)
+		bl_config->bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
 	else
-		bl_config->bl_scale_ad = c_conn->bl_scale_ad;
+		bl_config->bl_scale_sv = c_conn->bl_scale_sv;
 
-	SDE_DEBUG("bl_scale = %u, bl_scale_ad = %u, bl_level = %u\n",
-		bl_config->bl_scale, bl_config->bl_scale_ad,
+	SDE_DEBUG("bl_scale = %u, bl_scale_sv = %u, bl_level = %u\n",
+		bl_config->bl_scale, bl_config->bl_scale_sv,
 		bl_config->bl_level);
 	rc = c_conn->ops.set_backlight(&c_conn->base,
 			dsi_display, bl_config->bl_level);
@@ -615,7 +615,7 @@
 			mutex_unlock(&c_conn->lock);
 			break;
 		case CONNECTOR_PROP_BL_SCALE:
-		case CONNECTOR_PROP_AD_BL_SCALE:
+		case CONNECTOR_PROP_SV_BL_SCALE:
 			_sde_connector_update_bl_scale(c_conn);
 			break;
 		case CONNECTOR_PROP_HDR_METADATA:
@@ -1257,7 +1257,7 @@
 		if (rc)
 			SDE_ERROR_CONN(c_conn, "invalid roi_v1, rc: %d\n", rc);
 		break;
-	/* CONNECTOR_PROP_BL_SCALE and CONNECTOR_PROP_AD_BL_SCALE are
+	/* CONNECTOR_PROP_BL_SCALE and CONNECTOR_PROP_SV_BL_SCALE are
 	 * color-processing properties. These two properties require
 	 * special handling since they don't quite fit the current standard
 	 * atomic set property framework.
@@ -1266,8 +1266,8 @@
 		c_conn->bl_scale = val;
 		c_conn->bl_scale_dirty = true;
 		break;
-	case CONNECTOR_PROP_AD_BL_SCALE:
-		c_conn->bl_scale_ad = val;
+	case CONNECTOR_PROP_SV_BL_SCALE:
+		c_conn->bl_scale_sv = val;
 		c_conn->bl_scale_dirty = true;
 		break;
 	case CONNECTOR_PROP_HDR_METADATA:
@@ -2249,13 +2249,13 @@
 		0x0, 0, MAX_BL_SCALE_LEVEL, MAX_BL_SCALE_LEVEL,
 		CONNECTOR_PROP_BL_SCALE);
 
-	msm_property_install_range(&c_conn->property_info, "ad_bl_scale",
-		0x0, 0, MAX_AD_BL_SCALE_LEVEL, MAX_AD_BL_SCALE_LEVEL,
-		CONNECTOR_PROP_AD_BL_SCALE);
+	msm_property_install_range(&c_conn->property_info, "sv_bl_scale",
+		0x0, 0, MAX_SV_BL_SCALE_LEVEL, MAX_SV_BL_SCALE_LEVEL,
+		CONNECTOR_PROP_SV_BL_SCALE);
 
 	c_conn->bl_scale_dirty = false;
 	c_conn->bl_scale = MAX_BL_SCALE_LEVEL;
-	c_conn->bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+	c_conn->bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
 
 	/* enum/bitmask properties */
 	msm_property_install_enum(&c_conn->property_info, "topology_name",
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index a1bd65e..0db872f 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -372,7 +372,7 @@
  * @esd_status_check: Flag to indicate if ESD thread is scheduled or not
  * @bl_scale_dirty: Flag to indicate PP BL scale value(s) is changed
  * @bl_scale: BL scale value for ABA feature
- * @bl_scale_ad: BL scale value for AD feature
+ * @bl_scale_sv: BL scale value for sunlight visibility feature
  * @unset_bl_level: BL level that needs to be set later
  * @allow_bl_update: Flag to indicate if BL update is allowed currently or not
  * @qsync_mode: Cached Qsync mode, 0=disabled, 1=continuous mode
@@ -423,7 +423,7 @@
 
 	bool bl_scale_dirty;
 	u32 bl_scale;
-	u32 bl_scale_ad;
+	u32 bl_scale_sv;
 	u32 unset_bl_level;
 	bool allow_bl_update;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 3e25c97..93a1f0b 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1824,8 +1824,8 @@
 	 */
 	drm_for_each_encoder_mask(encoder, crtc->dev,
 			crtc->state->encoder_mask) {
-		post_commit |= sde_encoder_check_curr_mode(encoder,
-						MSM_DISPLAY_VIDEO_MODE);
+		post_commit |= sde_encoder_check_mode(encoder,
+						MSM_DISPLAY_CAP_VID_MODE);
 	}
 
 	SDE_DEBUG("crtc%d: secure_level %d old_valid_fb %d post_commit %d\n",
@@ -3164,8 +3164,8 @@
 	_sde_crtc_dest_scaler_setup(crtc);
 
 	/* cancel the idle notify delayed work */
-	if (sde_encoder_check_curr_mode(sde_crtc->mixers[0].encoder,
-					MSM_DISPLAY_VIDEO_MODE) &&
+	if (sde_encoder_check_mode(sde_crtc->mixers[0].encoder,
+					MSM_DISPLAY_CAP_VID_MODE) &&
 		kthread_cancel_delayed_work_sync(&sde_crtc->idle_notify_work))
 		SDE_DEBUG("idle notify work cancelled\n");
 
@@ -3281,9 +3281,8 @@
 	_sde_crtc_wait_for_fences(crtc);
 
 	/* schedule the idle notify delayed work */
-	if (idle_time && sde_encoder_check_curr_mode(
-						sde_crtc->mixers[0].encoder,
-						MSM_DISPLAY_VIDEO_MODE)) {
+	if (idle_time && sde_encoder_check_mode(sde_crtc->mixers[0].encoder,
+						MSM_DISPLAY_CAP_VID_MODE)) {
 		kthread_queue_delayed_work(&event_thread->worker,
 					&sde_crtc->idle_notify_work,
 					msecs_to_jiffies(idle_time));
@@ -4374,8 +4373,8 @@
 
 	drm_for_each_encoder_mask(encoder, crtc->dev,
 			crtc->state->encoder_mask) {
-		is_video_mode |= sde_encoder_check_curr_mode(encoder,
-			MSM_DISPLAY_VIDEO_MODE);
+		is_video_mode |= sde_encoder_check_mode(encoder,
+						MSM_DISPLAY_CAP_VID_MODE);
 	}
 
 	/*
@@ -5060,9 +5059,8 @@
 	cstate = to_sde_crtc_state(state);
 
 	drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
-		if (sde_encoder_check_curr_mode(encoder,
-						MSM_DISPLAY_VIDEO_MODE))
-			is_vid = true;
+		is_vid |= sde_encoder_check_mode(encoder,
+						MSM_DISPLAY_CAP_VID_MODE);
 		if (is_vid)
 			break;
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 4973c8c..f28a0a2 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -170,8 +170,6 @@
  * @te_source:		vsync source pin information
  * @num_phys_encs:	Actual number of physical encoders contained.
  * @phys_encs:		Container of physical encoders managed.
- * @phys_vid_encs:		Video physical encoders for panel mode switch.
- * @phys_cmd_encs:		Command physical encoders for panel mode switch.
  * @cur_master:		Pointer to the current master in this mode. Optimization
  *			Only valid after enable. Cleared as disable.
  * @hw_pp		Handle to the pingpong blocks used for the display. No.
@@ -238,8 +236,6 @@
 
 	unsigned int num_phys_encs;
 	struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
-	struct sde_encoder_phys *phys_vid_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
-	struct sde_encoder_phys *phys_cmd_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
 	struct sde_encoder_phys *cur_master;
 	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
@@ -711,16 +707,8 @@
 	sde_rsc_client_destroy(sde_enc->rsc_client);
 
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys;
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
-		phys = sde_enc->phys_vid_encs[i];
-		if (phys && phys->ops.destroy) {
-			phys->ops.destroy(phys);
-			--sde_enc->num_phys_encs;
-			sde_enc->phys_encs[i] = NULL;
-		}
-
-		phys = sde_enc->phys_cmd_encs[i];
 		if (phys && phys->ops.destroy) {
 			phys->ops.destroy(phys);
 			--sde_enc->num_phys_encs;
@@ -1713,7 +1701,7 @@
 		return;
 	}
 
-	if (sde_encoder_check_curr_mode(&sde_enc->base, MSM_DISPLAY_CMD_MODE)) {
+	if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
 		if (is_dummy)
 			vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_0 -
 					sde_enc->te_source;
@@ -1918,9 +1906,9 @@
 	if (sde_encoder_in_clone_mode(drm_enc) || !disp_info->is_primary ||
 			  (disp_info->is_primary && qsync_mode))
 		rsc_state = enable ? SDE_RSC_CLK_STATE : SDE_RSC_IDLE_STATE;
-	else if (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE))
+	else if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
 		rsc_state = enable ? SDE_RSC_CMD_STATE : SDE_RSC_IDLE_STATE;
-	else if (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_VIDEO_MODE))
+	else if (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE)
 		rsc_state = enable ? SDE_RSC_VID_STATE : SDE_RSC_IDLE_STATE;
 
 	SDE_EVT32(rsc_state, qsync_mode);
@@ -2058,15 +2046,14 @@
 	struct sde_kms *sde_kms;
 	struct sde_encoder_virt *sde_enc;
 	int rc;
-	bool is_cmd_mode = false, is_primary;
+	bool is_cmd_mode, is_primary;
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
 	priv = drm_enc->dev->dev_private;
 	sde_kms = to_sde_kms(priv->kms);
 
-	if (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE))
-		is_cmd_mode = true;
-
+	is_cmd_mode = sde_enc->disp_info.capabilities &
+			MSM_DISPLAY_CAP_CMD_MODE;
 	is_primary = sde_enc->disp_info.is_primary;
 
 	SDE_DEBUG_ENC(sde_enc, "enable:%d\n", enable);
@@ -2642,8 +2629,9 @@
 	}
 	sde_enc = to_sde_encoder_virt(drm_enc);
 	priv = drm_enc->dev->dev_private;
-	if (sde_encoder_check_curr_mode(&sde_enc->base, MSM_DISPLAY_VIDEO_MODE))
-		is_vid_mode = true;
+	is_vid_mode = sde_enc->disp_info.capabilities &
+						MSM_DISPLAY_CAP_VID_MODE;
+
 	/*
 	 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
 	 * events and return early for other events (ie wb display).
@@ -2702,32 +2690,6 @@
 	return ret;
 }
 
-static void sde_encoder_virt_mode_switch(enum sde_intf_mode intf_mode,
-					struct sde_encoder_virt *sde_enc,
-					struct drm_display_mode *adj_mode)
-{
-	int i = 0;
-
-	if (intf_mode == INTF_MODE_CMD) {
-		for (i = 0; i < sde_enc->num_phys_encs; i++)
-			sde_enc->phys_encs[i] = sde_enc->phys_vid_encs[i];
-		sde_enc->disp_info.curr_panel_mode = MSM_DISPLAY_VIDEO_MODE;
-		SDE_DEBUG_ENC(sde_enc, "switch to video physical encoder\n");
-		SDE_EVT32(DRMID(&sde_enc->base), intf_mode,
-			msm_is_mode_seamless_poms(adj_mode),
-			SDE_EVTLOG_FUNC_CASE1);
-	}
-	if (intf_mode == INTF_MODE_VIDEO) {
-		for (i = 0; i < sde_enc->num_phys_encs; i++)
-			sde_enc->phys_encs[i] = sde_enc->phys_cmd_encs[i];
-		sde_enc->disp_info.curr_panel_mode = MSM_DISPLAY_CMD_MODE;
-		SDE_EVT32(DRMID(&sde_enc->base), intf_mode,
-			msm_is_mode_seamless_poms(adj_mode),
-			SDE_EVTLOG_FUNC_CASE2);
-		SDE_DEBUG_ENC(sde_enc, "switch to command physical encoder\n");
-	}
-}
-
 static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 				      struct drm_display_mode *mode,
 				      struct drm_display_mode *adj_mode)
@@ -2741,8 +2703,6 @@
 	struct sde_connector *sde_conn = NULL;
 	struct sde_rm_hw_iter dsc_iter, pp_iter;
 	struct sde_rm_hw_request request_hw;
-	enum sde_intf_mode intf_mode;
-
 	int i = 0, ret;
 
 	if (!drm_enc) {
@@ -2799,11 +2759,6 @@
 			return;
 		}
 	}
-	intf_mode = sde_encoder_get_intf_mode(drm_enc);
-
-	/* Switch pysical encoder */
-	if (msm_is_mode_seamless_poms(adj_mode))
-		sde_encoder_virt_mode_switch(intf_mode, sde_enc, adj_mode);
 
 	/* release resources before seamless mode change */
 	if (msm_is_mode_seamless_dms(adj_mode)) {
@@ -3210,8 +3165,8 @@
 				phys->ops.enable(phys);
 		}
 
-		if (sde_enc->misr_enable  && phys->ops.setup_misr &&
-		(sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_VIDEO_MODE)))
+		if (sde_enc->misr_enable && (sde_enc->disp_info.capabilities &
+		     MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr)
 			phys->ops.setup_misr(phys, true,
 						sde_enc->misr_frame_count);
 	}
@@ -3568,8 +3523,7 @@
 {
 	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
 	unsigned int i;
-	bool trigger = true;
-	bool is_cmd_mode = false;
+	bool trigger = true, is_cmd_mode;
 	enum sde_rm_topology_name topology = SDE_RM_TOPOLOGY_NONE;
 
 	if (!drm_enc || !sde_enc->cur_master) {
@@ -3580,8 +3534,8 @@
 
 	sde_enc->crtc_frame_event_cb_data.connector =
 				sde_enc->cur_master->connector;
-	if (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE))
-		is_cmd_mode = true;
+	is_cmd_mode = sde_enc->disp_info.capabilities &
+					MSM_DISPLAY_CAP_CMD_MODE;
 
 	if (event & (SDE_ENCODER_FRAME_EVENT_DONE
 			| SDE_ENCODER_FRAME_EVENT_ERROR
@@ -3900,8 +3854,8 @@
 		return;
 	}
 
-	if (sde_encoder_check_curr_mode(&sde_enc->base, MSM_DISPLAY_VIDEO_MODE))
-		is_vid_mode = true;
+	is_vid_mode = sde_enc->disp_info.capabilities &
+					MSM_DISPLAY_CAP_VID_MODE;
 
 	/* don't perform flush/start operations for slave encoders */
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
@@ -4108,7 +4062,7 @@
 	}
 }
 
-bool sde_encoder_check_curr_mode(struct drm_encoder *drm_enc, u32 mode)
+bool sde_encoder_check_mode(struct drm_encoder *drm_enc, u32 mode)
 {
 	struct sde_encoder_virt *sde_enc;
 	struct msm_display_info *disp_info;
@@ -4121,7 +4075,7 @@
 	sde_enc = to_sde_encoder_virt(drm_enc);
 	disp_info = &sde_enc->disp_info;
 
-	return (disp_info->curr_panel_mode == mode);
+	return (disp_info->capabilities & mode);
 }
 
 void sde_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
@@ -4155,9 +4109,8 @@
 
 			/* update only for command mode primary ctl */
 			if ((phys == sde_enc->cur_master) &&
-				(sde_encoder_check_curr_mode(drm_enc,
-					MSM_DISPLAY_CMD_MODE))
-				&& ctl->ops.trigger_pending)
+			   (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+			    && ctl->ops.trigger_pending)
 				ctl->ops.trigger_pending(ctl);
 		}
 	}
@@ -4638,7 +4591,7 @@
 
 
 	if (sde_enc->cur_master && sde_enc->cur_master->connector &&
-	    sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE))
+	    disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
 		sde_enc->frame_trigger_mode = sde_connector_get_property(
 			sde_enc->cur_master->connector->state,
 			CONNECTOR_PROP_CMD_FRAME_TRIGGER_MODE);
@@ -5166,12 +5119,11 @@
 }
 
 static int sde_encoder_virt_add_phys_encs(
-		struct msm_display_info *disp_info,
+		u32 display_caps,
 		struct sde_encoder_virt *sde_enc,
 		struct sde_enc_phys_init_params *params)
 {
 	struct sde_encoder_phys *enc = NULL;
-	u32 display_caps = disp_info->capabilities;
 
 	SDE_DEBUG_ENC(sde_enc, "\n");
 
@@ -5195,7 +5147,8 @@
 			return !enc ? -EINVAL : PTR_ERR(enc);
 		}
 
-		sde_enc->phys_vid_encs[sde_enc->num_phys_encs] = enc;
+		sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+		++sde_enc->num_phys_encs;
 	}
 
 	if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
@@ -5206,18 +5159,11 @@
 				PTR_ERR(enc));
 			return !enc ? -EINVAL : PTR_ERR(enc);
 		}
-		sde_enc->phys_cmd_encs[sde_enc->num_phys_encs] = enc;
+
+		sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+		++sde_enc->num_phys_encs;
 	}
 
-	if (disp_info->curr_panel_mode == MSM_DISPLAY_VIDEO_MODE)
-		sde_enc->phys_encs[sde_enc->num_phys_encs] =
-			sde_enc->phys_vid_encs[sde_enc->num_phys_encs];
-	else
-		sde_enc->phys_encs[sde_enc->num_phys_encs] =
-			sde_enc->phys_cmd_encs[sde_enc->num_phys_encs];
-
-	++sde_enc->num_phys_encs;
-
 	return 0;
 }
 
@@ -5366,7 +5312,7 @@
 						&phys_params);
 			else
 				ret = sde_encoder_virt_add_phys_encs(
-						disp_info,
+						disp_info->capabilities,
 						sde_enc,
 						&phys_params);
 			if (ret)
@@ -5376,19 +5322,12 @@
 	}
 
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *vid_phys = sde_enc->phys_vid_encs[i];
-		struct sde_encoder_phys *cmd_phys = sde_enc->phys_cmd_encs[i];
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
-		if (vid_phys) {
-			atomic_set(&vid_phys->vsync_cnt, 0);
-			atomic_set(&vid_phys->underrun_cnt, 0);
+		if (phys) {
+			atomic_set(&phys->vsync_cnt, 0);
+			atomic_set(&phys->underrun_cnt, 0);
 		}
-
-		if (cmd_phys) {
-			atomic_set(&cmd_phys->vsync_cnt, 0);
-			atomic_set(&cmd_phys->underrun_cnt, 0);
-		}
-
 	}
 	mutex_unlock(&sde_enc->enc_lock);
 
@@ -5463,7 +5402,7 @@
 		sde_enc->rsc_client = NULL;
 	}
 
-	if (disp_info->curr_panel_mode == MSM_DISPLAY_CMD_MODE) {
+	if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
 		ret = _sde_encoder_input_handler(sde_enc);
 		if (ret)
 			SDE_ERROR(
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 83c43d5..a03ec6d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -201,12 +201,12 @@
 bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc);
 
 /**
- * sde_encoder_check_curr_mode - check if given mode is supported or not
+ * sde_encoder_check_mode - check if given mode is supported or not
  * @drm_enc: Pointer to drm encoder object
  * @mode: Mode to be checked
  * @Return: true if it is cmd mode
  */
-bool sde_encoder_check_curr_mode(struct drm_encoder *drm_enc, u32 mode);
+bool sde_encoder_check_mode(struct drm_encoder *drm_enc, u32 mode);
 
 /**
  * sde_encoder_init - initialize virtual encoder object
diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h
index 03178ca..5c1dc4a 100644
--- a/drivers/gpu/drm/msm/sde_hdcp.h
+++ b/drivers/gpu/drm/msm/sde_hdcp.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2012, 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __SDE_HDCP_H__
@@ -13,12 +13,15 @@
 #include <linux/debugfs.h>
 #include <linux/of_device.h>
 #include <linux/i2c.h>
+#include <linux/list.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
 #include <linux/hdcp_qseecom.h>
 #include "sde_kms.h"
 
+#define MAX_STREAM_COUNT 2
+
 enum sde_hdcp_client_id {
 	HDCP_CLIENT_HDMI,
 	HDCP_CLIENT_DP,
@@ -38,6 +41,18 @@
 	HDCP_VERSION_MAX = BIT(2),
 };
 
+struct stream_info {
+	u8 stream_id;
+	u8 virtual_channel;
+};
+
+struct sde_hdcp_stream {
+	struct list_head list;
+	u8 stream_id;
+	u8 virtual_channel;
+	u32 stream_handle;
+};
+
 struct sde_hdcp_init_data {
 	struct device *msm_hdcp_dev;
 	struct dss_io_data *core_io;
@@ -67,7 +82,13 @@
 	bool (*feature_supported)(void *input);
 	void (*force_encryption)(void *input, bool enable);
 	bool (*sink_support)(void *input);
+	int (*set_mode)(void *input, bool mst_enabled);
+	int (*on)(void *input);
 	void (*off)(void *hdcp_ctrl);
+	int (*register_streams)(void *input, u8 num_streams,
+			struct stream_info *streams);
+	int (*deregister_streams)(void *input, u8 num_streams,
+			struct stream_info *streams);
 };
 
 static inline const char *sde_hdcp_state_name(enum sde_hdcp_state hdcp_state)
diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.c b/drivers/gpu/drm/msm/sde_hdcp_2x.c
index c72ab1b..f578e09 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_2x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_2x.c
@@ -64,6 +64,10 @@
 	atomic_t hdcp_off;
 	enum sde_hdcp_2x_device_type device_type;
 	u8 min_enc_level;
+	struct list_head stream_handles;
+	u8 stream_count;
+	struct stream_info *streams;
+	u8 num_streams;
 
 	struct task_struct *thread;
 	struct completion response_completion;
@@ -315,6 +319,8 @@
 
 static void sde_hdcp_2x_clean(struct sde_hdcp_2x_ctrl *hdcp)
 {
+	struct list_head *element;
+	struct sde_hdcp_stream *stream_entry;
 	struct hdcp_transport_wakeup_data cdata = {HDCP_TRANSPORT_CMD_INVALID};
 
 	hdcp->authenticated = false;
@@ -322,10 +328,20 @@
 	cdata.context = hdcp->client_data;
 	cdata.cmd = HDCP_TRANSPORT_CMD_STATUS_FAILED;
 
-	if (!atomic_read(&hdcp->hdcp_off))
-		sde_hdcp_2x_wakeup_client(hdcp, &cdata);
+	while (!list_empty(&hdcp->stream_handles)) {
+		element = hdcp->stream_handles.next;
+		list_del(element);
 
-	atomic_set(&hdcp->hdcp_off, 1);
+		stream_entry = list_entry(element, struct sde_hdcp_stream,
+			list);
+		hdcp2_close_stream(hdcp->hdcp2_ctx,
+			stream_entry->stream_handle);
+		kzfree(stream_entry);
+		hdcp->stream_count--;
+	}
+
+	if (!atomic_xchg(&hdcp->hdcp_off, 1))
+		sde_hdcp_2x_wakeup_client(hdcp, &cdata);
 
 	hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_STOP, &hdcp->app_data);
 }
@@ -477,19 +493,26 @@
 static void sde_hdcp_2x_init(struct sde_hdcp_2x_ctrl *hdcp)
 {
 	int rc;
-
 	rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_START, &hdcp->app_data);
 	if (rc)
-		goto exit;
+		sde_hdcp_2x_clean(hdcp);
+}
 
-	pr_debug("[tz]: %s\n", sde_hdcp_2x_message_name(
-		hdcp->app_data.response.data[0]));
+static void sde_hdcp_2x_start_auth(struct sde_hdcp_2x_ctrl *hdcp)
+{
+	int rc;
+
+	rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_START_AUTH,
+		&hdcp->app_data);
+	if (rc) {
+		sde_hdcp_2x_clean(hdcp);
+		return;
+	}
+
+	pr_debug("message received from TZ: %s\n",
+		 sde_hdcp_2x_message_name(hdcp->app_data.response.data[0]));
 
 	sde_hdcp_2x_send_message(hdcp);
-
-	return;
-exit:
-	sde_hdcp_2x_clean(hdcp);
 }
 
 static void sde_hdcp_2x_timeout(struct sde_hdcp_2x_ctrl *hdcp)
@@ -539,7 +562,8 @@
 		goto exit;
 	}
 
-	if (hdcp->device_type == HDCP_TXMTR_DP) {
+	if (hdcp->device_type == HDCP_TXMTR_DP ||
+			hdcp->device_type == HDCP_TXMTR_DP_MST) {
 		msg[0] = hdcp->last_msg;
 		message_id_bytes = 1;
 	}
@@ -625,6 +649,147 @@
 		sde_hdcp_2x_clean(hdcp);
 }
 
+static struct list_head *sde_hdcp_2x_stream_present(
+		struct sde_hdcp_2x_ctrl *hdcp, u8 stream_id, u8 virtual_channel)
+{
+	struct sde_hdcp_stream *stream_entry;
+	struct list_head *entry;
+	bool present = false;
+
+	list_for_each(entry, &hdcp->stream_handles) {
+		stream_entry = list_entry(entry,
+			struct sde_hdcp_stream, list);
+		if (stream_entry->virtual_channel == virtual_channel &&
+				stream_entry->stream_id == stream_id) {
+			present = true;
+			break;
+		}
+	}
+
+	if (!present)
+		entry = NULL;
+	return entry;
+}
+
+static void sde_hdcp_2x_open_stream(struct sde_hdcp_2x_ctrl *hdcp)
+{
+	int rc;
+	size_t iterations, i;
+	u8 stream_id;
+	u8 virtual_channel;
+	u32 stream_handle = 0;
+	bool query_streams = false;
+
+	if (!hdcp->streams) {
+		pr_err("Array of streams to register is NULL\n");
+		return;
+	}
+
+	iterations = min(hdcp->num_streams, (u8)(MAX_STREAM_COUNT));
+
+	for (i  = 0; i < iterations; i++) {
+		if (hdcp->stream_count == MAX_STREAM_COUNT) {
+			pr_debug("Registered the maximum amount of streams\n");
+			break;
+		}
+
+		stream_id = hdcp->streams[i].stream_id;
+		virtual_channel = hdcp->streams[i].virtual_channel;
+
+		pr_debug("Opening stream %d, virtual channel %d\n",
+			stream_id, virtual_channel);
+
+		if (sde_hdcp_2x_stream_present(hdcp, stream_id,
+				virtual_channel)) {
+			pr_debug("Stream %d, virtual channel %d already open\n",
+				stream_id, virtual_channel);
+			continue;
+		}
+
+		rc = hdcp2_open_stream(hdcp->hdcp2_ctx, virtual_channel,
+				stream_id, &stream_handle);
+		if (rc) {
+			pr_err("Unable to open stream %d, virtual channel %d\n",
+				stream_id, virtual_channel);
+		} else {
+			struct sde_hdcp_stream *stream =
+				kzalloc(sizeof(struct sde_hdcp_stream),
+					GFP_KERNEL);
+			if (!stream)
+				break;
+
+			INIT_LIST_HEAD(&stream->list);
+			stream->stream_handle = stream_handle;
+			stream->stream_id = stream_id;
+			stream->virtual_channel = virtual_channel;
+
+			list_add(&stream->list, &hdcp->stream_handles);
+			hdcp->stream_count++;
+
+			query_streams = true;
+		}
+	}
+
+	if (query_streams && hdcp->authenticated)
+		sde_hdcp_2x_query_stream(hdcp);
+}
+
+static void sde_hdcp_2x_close_stream(struct sde_hdcp_2x_ctrl *hdcp)
+{
+	int rc;
+	size_t iterations, i;
+	u8 stream_id;
+	u8 virtual_channel;
+	struct list_head *entry;
+	struct sde_hdcp_stream *stream_entry;
+	bool query_streams = false;
+
+	if (!hdcp->streams) {
+		pr_err("Array of streams to register is NULL\n");
+		return;
+	}
+
+	iterations = min(hdcp->num_streams, (u8)(MAX_STREAM_COUNT));
+
+	for (i = 0; i < iterations; i++) {
+		if (hdcp->stream_count == 0) {
+			pr_debug("No streams are currently registered\n");
+			return;
+		}
+
+		stream_id = hdcp->streams[i].stream_id;
+		virtual_channel = hdcp->streams[i].virtual_channel;
+
+		pr_debug("Closing stream %d, virtual channel %d\n",
+			stream_id, virtual_channel);
+
+		entry = sde_hdcp_2x_stream_present(hdcp, stream_id,
+			virtual_channel);
+
+		if (!entry) {
+			pr_err("Unable to find stream %d, virtual channel %d\n"
+				, stream_id, virtual_channel);
+			continue;
+		}
+
+		stream_entry = list_entry(entry, struct sde_hdcp_stream,
+			list);
+
+		rc = hdcp2_close_stream(hdcp->hdcp2_ctx,
+			stream_entry->stream_handle);
+		if (rc)
+			pr_err("Unable to close stream %d, virtual channel %d\n"
+				, stream_id, virtual_channel);
+		hdcp->stream_count--;
+		list_del(entry);
+		kzfree(stream_entry);
+		query_streams = true;
+	}
+
+	if (query_streams && hdcp->authenticated)
+		sde_hdcp_2x_query_stream(hdcp);
+}
+
 /** sde_hdcp_2x_wakeup() - wakeup the module to execute a requested command
  * @data: data required for executing corresponding command.
  *
@@ -648,6 +813,8 @@
 	hdcp->timeout_left = data->timeout;
 	hdcp->total_message_length = data->total_message_length;
 	hdcp->min_enc_level = data->min_enc_level;
+	hdcp->streams = data->streams;
+	hdcp->num_streams = data->num_streams;
 
 	if (!completion_done(&hdcp->response_completion))
 		complete_all(&hdcp->response_completion);
@@ -709,6 +876,9 @@
 		case HDCP_2X_CMD_STOP:
 			sde_hdcp_2x_clean(hdcp);
 			break;
+		case HDCP_2X_CMD_START_AUTH:
+			sde_hdcp_2x_start_auth(hdcp);
+			break;
 		case HDCP_2X_CMD_MSG_SEND_SUCCESS:
 			sde_hdcp_2x_msg_sent(hdcp);
 			break;
@@ -733,6 +903,12 @@
 			}
 			sde_hdcp_2x_query_stream(hdcp);
 			break;
+		case HDCP_2X_CMD_OPEN_STREAMS:
+			sde_hdcp_2x_open_stream(hdcp);
+			break;
+		case HDCP_2X_CMD_CLOSE_STREAMS:
+			sde_hdcp_2x_close_stream(hdcp);
+			break;
 		default:
 			break;
 		}
@@ -777,16 +953,14 @@
 		goto unlock;
 	}
 
+	INIT_LIST_HEAD(&hdcp->stream_handles);
 	hdcp->client_data = data->client_data;
 	hdcp->client_ops = data->client_ops;
-	hdcp->device_type = data->device_type;
-
-	hdcp->hdcp2_ctx = hdcp2_init(hdcp->device_type);
 
 	INIT_KFIFO(hdcp->cmd_q);
 
 	init_waitqueue_head(&hdcp->wait_q);
-	atomic_set(&hdcp->hdcp_off, 0);
+	atomic_set(&hdcp->hdcp_off, 1);
 
 	init_completion(&hdcp->response_completion);
 
@@ -811,6 +985,40 @@
 	return rc;
 }
 
+int sde_hdcp_2x_enable(void *data, enum sde_hdcp_2x_device_type device_type)
+{
+	int rc =  0;
+	struct sde_hdcp_2x_ctrl *hdcp = data;
+
+	if (!hdcp)
+		return  -EINVAL;
+
+	if (hdcp->hdcp2_ctx) {
+		pr_debug("HDCP library context already acquired\n");
+		return 0;
+	}
+
+	hdcp->device_type = device_type;
+	hdcp->hdcp2_ctx = hdcp2_init(hdcp->device_type);
+	if (!hdcp->hdcp2_ctx) {
+		pr_err("Unable to acquire HDCP library handle\n");
+		return -ENOMEM;
+	}
+
+	return rc;
+}
+
+void sde_hdcp_2x_disable(void *data)
+{
+	struct sde_hdcp_2x_ctrl *hdcp = data;
+
+	if (!hdcp->hdcp2_ctx)
+		return;
+
+	hdcp2_deinit(hdcp->hdcp2_ctx);
+	hdcp->hdcp2_ctx = NULL;
+}
+
 void sde_hdcp_2x_deregister(void *data)
 {
 	struct sde_hdcp_2x_ctrl *hdcp = data;
@@ -818,7 +1026,7 @@
 	if (!hdcp)
 		return;
 
+	sde_hdcp_2x_disable(data);
 	kthread_stop(hdcp->thread);
-	hdcp2_deinit(hdcp->hdcp2_ctx);
 	kzfree(hdcp);
 }
diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.h b/drivers/gpu/drm/msm/sde_hdcp_2x.h
index 47247e4..cfcd7ce 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_2x.h
+++ b/drivers/gpu/drm/msm/sde_hdcp_2x.h
@@ -15,8 +15,9 @@
 /**
  * enum sde_hdcp_2x_wakeup_cmd - commands for interacting with HDCP driver
  * @HDCP_2X_CMD_INVALID:           initialization value
- * @HDCP_2X_CMD_START:             start authentication
- * @HDCP_2X_CMD_STOP:              stop authentication
+ * @HDCP_2X_CMD_START:             start HDCP driver
+ * @HDCP_2X_CMD_START_AUTH:        start authentication
+ * @HDCP_2X_CMD_STOP:              stop HDCP driver
  * @HDCP_2X_CMD_MSG_SEND_SUCCESS:  sending message to sink succeeded
  * @HDCP_2X_CMD_MSG_SEND_FAILED:   sending message to sink failed
  * @HDCP_2X_CMD_MSG_SEND_TIMEOUT:  sending message to sink timed out
@@ -26,10 +27,13 @@
  * @HDCP_2X_CMD_QUERY_STREAM_TYPE: start content stream processing
  * @HDCP_2X_CMD_LINK_FAILED:       link failure notification
  * @HDCP_2X_CMD_MIN_ENC_LEVEL:     trigger minimum encryption level change
+ * @HDCP_2X_CMD_OPEN_STREAMS:       open a virtual channel
+ * @HDCP_2X_CMD_CLOSE_STREAMS:      close a virtual channel
  */
 enum sde_hdcp_2x_wakeup_cmd {
 	HDCP_2X_CMD_INVALID,
 	HDCP_2X_CMD_START,
+	HDCP_2X_CMD_START_AUTH,
 	HDCP_2X_CMD_STOP,
 	HDCP_2X_CMD_MSG_SEND_SUCCESS,
 	HDCP_2X_CMD_MSG_SEND_FAILED,
@@ -40,6 +44,8 @@
 	HDCP_2X_CMD_QUERY_STREAM_TYPE,
 	HDCP_2X_CMD_LINK_FAILED,
 	HDCP_2X_CMD_MIN_ENC_LEVEL,
+	HDCP_2X_CMD_OPEN_STREAMS,
+	HDCP_2X_CMD_CLOSE_STREAMS,
 };
 
 /**
@@ -66,16 +72,19 @@
 
 enum sde_hdcp_2x_device_type {
 	HDCP_TXMTR_HDMI = 0x8001,
-	HDCP_TXMTR_DP = 0x8002
+	HDCP_TXMTR_DP = 0x8002,
+	HDCP_TXMTR_DP_MST = 0x8003
 };
 
 /**
  * struct sde_hdcp_2x_lib_wakeup_data - command and data send to HDCP driver
- * @cmd:       command type
- * @context:   void pointer to the HDCP driver instance
- * @buf:       message received from the sink
- * @buf_len:   length of message received from the sink
- * @timeout:   time out value for timed transactions
+ * @cmd:                       command type
+ * @context:                   void pointer to the HDCP driver instance
+ * @buf:                       message received from the sink
+ * @buf_len:                   length of message received from the sink
+ * @timeout:                   time out value for timed transactions
+ * @streams:                   list indicating which streams need adjustment
+ * @num_streams:               number of entries in streams
  */
 struct sde_hdcp_2x_wakeup_data {
 	enum sde_hdcp_2x_wakeup_cmd cmd;
@@ -83,6 +92,8 @@
 	uint32_t total_message_length;
 	uint32_t timeout;
 	u8 min_enc_level;
+	struct stream_info *streams;
+	u8 num_streams;
 };
 
 /**
@@ -151,6 +162,10 @@
 		return TO_STR(HDCP_2X_CMD_MSG_RECV_TIMEOUT);
 	case HDCP_2X_CMD_QUERY_STREAM_TYPE:
 		return TO_STR(HDCP_2X_CMD_QUERY_STREAM_TYPE);
+	case HDCP_2X_CMD_OPEN_STREAMS:
+		return TO_STR(HDCP_2X_CMD_OPEN_STREAMS);
+	case HDCP_2X_CMD_CLOSE_STREAMS:
+		return TO_STR(HDCP_2X_CMD_CLOSE_STREAMS);
 	default:
 		return "UNKNOWN";
 	}
@@ -190,12 +205,13 @@
 struct sde_hdcp_2x_register_data {
 	struct hdcp_transport_ops *client_ops;
 	struct sde_hdcp_2x_ops *ops;
-	enum sde_hdcp_2x_device_type device_type;
 	void *client_data;
 	void **hdcp_data;
 };
 
 /* functions for the HDCP 2.2 state machine module */
 int sde_hdcp_2x_register(struct sde_hdcp_2x_register_data *data);
+int sde_hdcp_2x_enable(void *data, enum sde_hdcp_2x_device_type device_type);
+void sde_hdcp_2x_disable(void *data);
 void sde_hdcp_2x_deregister(void *data);
 #endif
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index b9c24ac..345a2b5 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
  */
 #include <linux/module.h>
 #include <linux/fb.h>
@@ -26,6 +26,8 @@
 #include <linux/ion.h>
 #include <asm/cacheflush.h>
 #include <uapi/linux/sched/types.h>
+#include <linux/of_fdt.h>
+#include <linux/msm-bus.h>
 
 #include "kgsl.h"
 #include "kgsl_debugfs.h"
@@ -1354,6 +1356,33 @@
 	spin_unlock(&entry->priv->mem_lock);
 }
 
+struct msm_bus_scale_pdata *kgsl_get_bus_scale_table(struct kgsl_device *device)
+{
+	struct device_node *child = NULL, *parent;
+	char str[24];
+
+	parent = device->pdev->dev.of_node;
+
+	snprintf(str, sizeof(str), "qcom,gpu-bus-table-ddr%d",
+		of_fdt_get_ddrtype());
+
+	child = of_find_compatible_node(parent, NULL, str);
+
+	/* Go with the first bus table node */
+	if (child == NULL)
+		child = of_find_compatible_node(parent, NULL,
+			"qcom,gpu-bus-table");
+
+	if (child) {
+		struct msm_bus_scale_pdata *data = msm_bus_pdata_from_node(
+					device->pdev, child);
+		of_node_put(child);
+		return data;
+	}
+
+	return msm_bus_cl_get_pdata(device->pdev);
+}
+
 /**
  * kgsl_mem_entry_set_pend() - Set the pending free flag of a memory entry
  * @entry - The memory entry
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 4afd45b..d6784b1 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef __KGSL_DEVICE_H
 #define __KGSL_DEVICE_H
@@ -915,6 +915,21 @@
 	void *priv);
 
 /**
+ * kgsl_get_bus_scale_table() - Get the bus scaling table from devicetree
+ * @device: kgsl device handle
+ *
+ * This function will try to find the correct bus table data from the device
+ * tree based on the the underlying ddr type. If no matching child is found,
+ * it will fallback to the first child node containing the bus scaling data.
+ * If no child is found, it will pass the current device node, hoping that
+ * bus scaling data is provided as properties of the current device node.
+ *
+ * Return: Pointer to the structure containing the parsed bus scaling data
+ */
+struct msm_bus_scale_pdata *kgsl_get_bus_scale_table(
+	struct kgsl_device *device);
+
+/**
  * struct kgsl_pwr_limit - limit structure for each client
  * @node: Local list node for the limits list
  * @level: requested power level
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index ffb52fc..19bb72c 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1111,9 +1111,9 @@
 
 static int gmu_gpu_bw_probe(struct kgsl_device *device, struct gmu_device *gmu)
 {
-	struct msm_bus_scale_pdata *bus_scale_table;
+	struct msm_bus_scale_pdata *bus_scale_table =
+		kgsl_get_bus_scale_table(device);
 
-	bus_scale_table = msm_bus_cl_get_pdata(device->pdev);
 	if (bus_scale_table == NULL) {
 		dev_err(&gmu->pdev->dev, "dt: cannot get bus table\n");
 		return -ENODEV;
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index e7c51d6..7c76a8a 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/export.h>
@@ -2044,7 +2044,7 @@
 	struct device_node *gpubw_dev_node = NULL;
 	struct platform_device *p2dev;
 
-	bus_scale_table = msm_bus_cl_get_pdata(device->pdev);
+	bus_scale_table = kgsl_get_bus_scale_table(device);
 	if (bus_scale_table == NULL)
 		return -EINVAL;
 
diff --git a/drivers/hwtracing/coresight/coresight-hwevent.c b/drivers/hwtracing/coresight/coresight-hwevent.c
index 1af5fb9..51e5b6f 100644
--- a/drivers/hwtracing/coresight/coresight-hwevent.c
+++ b/drivers/hwtracing/coresight/coresight-hwevent.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, 2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -170,8 +170,8 @@
 	drvdata->nr_hmux = of_property_count_strings(node,
 						     "reg-names");
 
-	if (!drvdata->nr_hmux)
-		return -ENODEV;
+	if (drvdata->nr_hmux < 0)
+		drvdata->nr_hmux = 0;
 
 	if (drvdata->nr_hmux > 0) {
 		drvdata->hmux = devm_kzalloc(drvdata->dev, drvdata->nr_hmux *
@@ -191,9 +191,8 @@
 				return -ENODEV;
 			drvdata->hmux[i].start = res->start;
 			drvdata->hmux[i].end = res->end;
+
 		}
-	} else {
-		return drvdata->nr_hmux;
 	}
 
 	return 0;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index b3dae10d..2ccb9ce 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -3435,6 +3435,13 @@
 					num_cmd_buf--;
 				goto rel_cmd_buf;
 			}
+			if ((len <= cmd_desc[i].offset) ||
+				(cmd_desc[i].size < cmd_desc[i].length) ||
+				((len - cmd_desc[i].offset) <
+				cmd_desc[i].length)) {
+				CAM_ERR(CAM_ICP, "Invalid offset or length");
+				goto rel_cmd_buf;
+			}
 			cpu_addr = cpu_addr + cmd_desc[i].offset;
 		}
 	}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 981d7bba..864b37e 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -41,6 +41,7 @@
 	link->last_flush_id = 0;
 	link->initial_sync_req = -1;
 	link->in_msync_mode = false;
+	link->retry_cnt = 0;
 }
 
 void cam_req_mgr_handle_core_shutdown(void)
@@ -167,6 +168,46 @@
 }
 
 /**
+ * __cam_req_mgr_notify_error_on_link()
+ *
+ * @brief : Notify userspace on exceeding max retry
+ *          attempts to apply same req
+ * @link  : link on which the req could not be applied
+ *
+ */
+static int __cam_req_mgr_notify_error_on_link(
+	struct cam_req_mgr_core_link    *link)
+{
+	struct cam_req_mgr_core_session *session = NULL;
+	struct cam_req_mgr_message       msg;
+	int rc = 0;
+
+	session = (struct cam_req_mgr_core_session *)link->parent;
+
+	CAM_ERR(CAM_CRM,
+		"Notifying userspace to trigger recovery on link 0x%x for session %d",
+		link->link_hdl, session->session_hdl);
+
+	memset(&msg, 0, sizeof(msg));
+
+	msg.session_hdl = session->session_hdl;
+	msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
+	msg.u.err_msg.request_id = 0;
+	msg.u.err_msg.link_hdl   = link->link_hdl;
+
+	rc = cam_req_mgr_notify_message(&msg,
+		V4L_EVENT_CAM_REQ_MGR_ERROR,
+		V4L_EVENT_CAM_REQ_MGR_EVENT);
+
+	if (rc)
+		CAM_ERR(CAM_CRM,
+			"Error in notifying recovery for session %d link 0x%x rc %d",
+			session->session_hdl, link->link_hdl, rc);
+
+	return rc;
+}
+
+/**
  * __cam_req_mgr_traverse()
  *
  * @brief    : Traverse through pd tables, it will internally cover all linked
@@ -1092,7 +1133,20 @@
 	if (rc < 0) {
 		/* Apply req failed retry at next sof */
 		slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+
+		link->retry_cnt++;
+		if (link->retry_cnt == MAXIMUM_RETRY_ATTEMPTS) {
+			CAM_DBG(CAM_CRM,
+				"Max retry attempts reached on link[0x%x] for req [%lld]",
+				link->link_hdl,
+				in_q->slot[in_q->rd_idx].req_id);
+			__cam_req_mgr_notify_error_on_link(link);
+			link->retry_cnt = 0;
+		}
 	} else {
+		if (link->retry_cnt)
+			link->retry_cnt = 0;
+
 		link->trigger_mask |= trigger;
 
 		CAM_DBG(CAM_CRM, "Applied req[%lld] on link[%x] success",
@@ -1342,7 +1396,7 @@
 	memset(&msg, 0, sizeof(msg));
 
 	msg.session_hdl = session->session_hdl;
-	msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_DEVICE;
+	msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
 	msg.u.err_msg.request_id = 0;
 	msg.u.err_msg.link_hdl   = link->link_hdl;
 
@@ -1586,6 +1640,7 @@
 	link->req.in_q = NULL;
 	i = link - g_links;
 	CAM_DBG(CAM_CRM, "free link index %d", i);
+	cam_req_mgr_core_link_reset(link);
 	atomic_set(&g_links[i].is_used, 0);
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 8f07b3b..9a6acbc 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -29,6 +29,8 @@
 
 #define MAXIMUM_LINKS_PER_SESSION  4
 
+#define MAXIMUM_RETRY_ATTEMPTS 3
+
 /**
  * enum crm_workq_task_type
  * @codes: to identify which type of task is present
@@ -310,6 +312,8 @@
  * @in_msync_mode        : Flag to determine if a link is in master-slave mode
  * @initial_sync_req     : The initial req which is required to sync with the
  *                         other link
+ * @retry_cnt            : Counter that tracks number of attempts to apply
+ *                         the same req
  */
 struct cam_req_mgr_core_link {
 	int32_t                              link_hdl;
@@ -336,6 +340,7 @@
 	bool                                 initial_skip;
 	bool                                 in_msync_mode;
 	int64_t                              initial_sync_req;
+	uint32_t                             retry_cnt;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
index 412932d..02a7a1f 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -102,7 +102,7 @@
 	}
 
 	if ((packet->kmd_cmd_buf_index < 0) ||
-		(packet->kmd_cmd_buf_index > packet->num_cmd_buf)) {
+		(packet->kmd_cmd_buf_index >= packet->num_cmd_buf)) {
 		CAM_ERR(CAM_UTIL, "Invalid kmd buf index: %d",
 			packet->kmd_cmd_buf_index);
 		return -EINVAL;
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.c b/drivers/media/platform/msm/cvp/cvp_hfi.c
index a1469fa..ba93c30 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.c
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.c
@@ -87,6 +87,118 @@
 		.buf_num = HFI_DS_BUF_NUM,
 		.resp = HAL_NO_RESP,
 	},
+	{
+		.size = HFI_OF_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_TME_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_TME_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_OF_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_TME_FRAME,
+		.buf_offset = HFI_OF_BUFFERS_OFFSET,
+		.buf_num = HFI_OF_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_ODT_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_ODT_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_ODT_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_ODT_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_ODT_FRAME,
+		.buf_offset = HFI_ODT_BUFFERS_OFFSET,
+		.buf_num = HFI_ODT_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_OD_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_OD_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_OD_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_OD_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_OD_FRAME,
+		.buf_offset = HFI_OD_BUFFERS_OFFSET,
+		.buf_num = HFI_OD_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_NCC_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_NCC_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_NCC_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_NCC_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_NCC_FRAME,
+		.buf_offset = HFI_NCC_BUFFERS_OFFSET,
+		.buf_num = HFI_NCC_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_ICA_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_ICA_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_ICA_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_ICA_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_ICA_FRAME,
+		.buf_offset = HFI_ICA_BUFFERS_OFFSET,
+		.buf_num = HFI_ICA_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_HCD_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_HCD_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_HCD_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_HCD_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_HCD_FRAME,
+		.buf_offset = HFI_HCD_BUFFERS_OFFSET,
+		.buf_num = HFI_HCD_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_DCM_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DCM_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_DCM_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_DCM_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DCM_FRAME,
+		.buf_offset = HFI_DCM_BUFFERS_OFFSET,
+		.buf_num = HFI_DCM_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_PYS_HCD_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_PYS_HCD_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_PYS_HCD_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_PYS_HCD_FRAME,
+		.buf_offset = HFI_PYS_HCD_BUFFERS_OFFSET,
+		.buf_num = HFI_PYS_HCD_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
 };
 
 static struct hal_device_data hal_ctxt;
@@ -204,6 +316,17 @@
 	return -EINVAL;
 }
 
+int get_signal_from_pkt_type(unsigned int type)
+{
+	int i, pkt_num = ARRAY_SIZE(cvp_hfi_defs);
+
+	for (i = 0; i < pkt_num; i++)
+		if (cvp_hfi_defs[i].type == type)
+			return cvp_hfi_defs[i].resp;
+
+	return -EINVAL;
+}
+
 static void __dump_packet(u8 *packet, enum cvp_msg_prio log_level)
 {
 	u32 c = 0, packet_size = *(u32 *)packet;
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.h b/drivers/media/platform/msm/cvp/cvp_hfi.h
index b2aa67a..00c7062c 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.h
@@ -272,10 +272,6 @@
 	(HFI_CMD_SESSION_CVP_START + 0x00B)
 #define  HFI_CMD_SESSION_CVP_DFS_CONFIG\
 	(HFI_CMD_SESSION_CVP_START + 0x00C)
-#define  HFI_CMD_SESSION_CVP_TME_FRAME\
-	(HFI_CMD_SESSION_CVP_START + 0x00D)
-#define  HFI_CMD_SESSION_CVP_TME_CONFIG\
-	(HFI_CMD_SESSION_CVP_START + 0x00E)
 #define  HFI_CMD_SESSION_CVP_FTEXT\
 	(HFI_CMD_SESSION_CVP_START + 0x00F)
 
@@ -292,12 +288,10 @@
 	(HFI_CMD_SESSION_CVP_START + 0x014)
 
 /* ===========USECASE OPERATIONS===============*/
-#define HFI_CMD_SESSION_CVP_ODT\
-	(HFI_CMD_SESSION_CVP_START + 0x015)
-#define HFI_CMD_SESSION_CVP_PYS_HCD_FRAME\
-	(HFI_CMD_SESSION_CVP_START + 0x016)
-#define HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG\
-	(HFI_CMD_SESSION_CVP_START + 0x017)
+#define  HFI_CMD_SESSION_CVP_DCM_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x034)
+#define  HFI_CMD_SESSION_CVP_DCM_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x035)
 
 #define  HFI_CMD_SESSION_CVP_DME_CONFIG\
 	(HFI_CMD_SESSION_CVP_START + 0x039)
@@ -306,8 +300,30 @@
 #define  HFI_CMD_SESSION_CVP_DME_FRAME\
 	(HFI_CMD_SESSION_CVP_START + 0x03A)
 
+#define  HFI_CMD_SESSION_CVP_CV_TME_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x047)
+#define  HFI_CMD_SESSION_CVP_CV_TME_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x048)
+#define  HFI_CMD_SESSION_CVP_CV_OD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x049)
+#define  HFI_CMD_SESSION_CVP_CV_OD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x04A)
+#define  HFI_CMD_SESSION_CVP_CV_ODT_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x04B)
+#define  HFI_CMD_SESSION_CVP_CV_ODT_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x04C)
+
 #define  HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS\
 	(HFI_CMD_SESSION_CVP_START + 0x04D)
+#define HFI_CMD_SESSION_CVP_PYS_HCD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x050)
+#define HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x051)
+#define  HFI_CMD_SESSION_CVP_ICA_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x100)
+#define  HFI_CMD_SESSION_CVP_ICA_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x101)
+
 
 #define HFI_MSG_SYS_OX_START			\
 (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_MSG_START_OFFSET + 0x0000)
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_api.h b/drivers/media/platform/msm/cvp/cvp_hfi_api.h
index 7801764..dd21c40 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_api.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_api.h
@@ -76,6 +76,45 @@
 #define HFI_DS_BUFFERS_OFFSET	48
 #define HFI_DS_BUF_NUM	3
 
+#define HFI_OF_CONFIG_CMD_SIZE 34
+#define HFI_OF_FRAME_CMD_SIZE 24
+#define HFI_OF_BUFFERS_OFFSET 8
+#define HFI_OF_BUF_NUM 8
+
+#define HFI_ODT_CONFIG_CMD_SIZE 23
+#define HFI_ODT_FRAME_CMD_SIZE 33
+#define HFI_ODT_BUFFERS_OFFSET 11
+#define HFI_ODT_BUF_NUM 11
+
+#define HFI_OD_CONFIG_CMD_SIZE 24
+#define HFI_OD_FRAME_CMD_SIZE 12
+#define HFI_OD_BUFFERS_OFFSET 6
+#define HFI_OD_BUF_NUM 3
+
+#define HFI_NCC_CONFIG_CMD_SIZE 47
+#define HFI_NCC_FRAME_CMD_SIZE 22
+#define HFI_NCC_BUFFERS_OFFSET 8
+#define HFI_NCC_BUF_NUM 7
+
+#define HFI_ICA_CONFIG_CMD_SIZE 127
+#define HFI_ICA_FRAME_CMD_SIZE 14
+#define HFI_ICA_BUFFERS_OFFSET 6
+#define HFI_ICA_BUF_NUM 4
+
+#define HFI_HCD_CONFIG_CMD_SIZE 46
+#define HFI_HCD_FRAME_CMD_SIZE 18
+#define HFI_HCD_BUFFERS_OFFSET 12
+#define HFI_HCD_BUF_NUM 3
+
+#define HFI_DCM_CONFIG_CMD_SIZE 20
+#define HFI_DCM_FRAME_CMD_SIZE 19
+#define HFI_DCM_BUFFERS_OFFSET 9
+#define HFI_DCM_BUF_NUM 5
+
+#define HFI_PYS_HCD_CONFIG_CMD_SIZE 661
+#define HFI_PYS_HCD_FRAME_CMD_SIZE 86
+#define HFI_PYS_HCD_BUFFERS_OFFSET 6
+#define HFI_PYS_HCD_BUF_NUM 36
 
 enum cvp_status {
 	CVP_ERR_NONE = 0x0,
@@ -1135,6 +1174,14 @@
 	HAL_SESSION_DME_CONFIG_CMD_DONE,
 	HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE,
 	HAL_SESSION_DME_FRAME_CMD_DONE,
+	HAL_SESSION_TME_CONFIG_CMD_DONE,
+	HAL_SESSION_ODT_CONFIG_CMD_DONE,
+	HAL_SESSION_OD_CONFIG_CMD_DONE,
+	HAL_SESSION_NCC_CONFIG_CMD_DONE,
+	HAL_SESSION_ICA_CONFIG_CMD_DONE,
+	HAL_SESSION_HCD_CONFIG_CMD_DONE,
+	HAL_SESSION_DCM_CONFIG_CMD_DONE,
+	HAL_SESSION_PYS_HCD_CONFIG_CMD_DONE,
 	HAL_SESSION_PERSIST_CMD_DONE,
 	HAL_SESSION_PROPERTY_INFO,
 	HAL_SESSION_ERROR,
@@ -1501,6 +1548,7 @@
 			struct hfi_device *hdev);
 
 int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr);
+int get_signal_from_pkt_type(unsigned int type);
 extern const struct msm_cvp_hfi_defs cvp_hfi_defs[];
 
 #endif /*__CVP_HFI_API_H__ */
diff --git a/drivers/media/platform/msm/cvp/hfi_packetization.c b/drivers/media/platform/msm/cvp/hfi_packetization.c
index d8b7cc3..2a2a567 100644
--- a/drivers/media/platform/msm/cvp/hfi_packetization.c
+++ b/drivers/media/platform/msm/cvp/hfi_packetization.c
@@ -414,7 +414,12 @@
 		return -EINVAL;
 
 	def_idx = get_pkt_index(ptr);
-	if (def_idx < 0)
+	if (def_idx < 0 && ptr->size < MAX_HFI_PKT_SIZE * sizeof(u32)) {
+		memcpy(out_pkt, in_pkt, ptr->size);
+		return 0;
+	}
+
+	if (ptr->size > MAX_HFI_PKT_SIZE * sizeof(unsigned int))
 		goto error_hfi_packet;
 
 	if (cvp_hfi_defs[def_idx].type != ptr->packet_type)
diff --git a/drivers/media/platform/msm/cvp/hfi_response_handler.c b/drivers/media/platform/msm/cvp/hfi_response_handler.c
index a6874bb..1c40c16 100644
--- a/drivers/media/platform/msm/cvp/hfi_response_handler.c
+++ b/drivers/media/platform/msm/cvp/hfi_response_handler.c
@@ -662,6 +662,7 @@
 	struct msm_cvp_cb_info *info)
 {
 	struct msm_cvp_cb_cmd_done cmd_done = {0};
+	int signal;
 
 	if (!pkt) {
 		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
@@ -681,21 +682,14 @@
 		"%s: device_id=%d status=%d, sessionid=%x config=%x\n",
 		__func__, device_id, cmd_done.status,
 		cmd_done.session_id, pkt->op_conf_id);
-	switch (pkt->op_conf_id) {
-	case HFI_CMD_SESSION_CVP_DFS_CONFIG:
-	info->response_type = HAL_SESSION_DFS_CONFIG_CMD_DONE;
-		break;
-	case HFI_CMD_SESSION_CVP_DME_CONFIG:
-		info->response_type = HAL_SESSION_DME_CONFIG_CMD_DONE;
-		break;
-	case HFI_CMD_SESSION_CVP_DME_BASIC_CONFIG:
-		info->response_type = HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE;
-		break;
-	default:
+
+	signal = get_signal_from_pkt_type(pkt->op_conf_id);
+	if (signal < 0) {
 		dprintk(CVP_ERR, "%s Invalid op config id\n", __func__);
 		return -EINVAL;
 	}
 
+	info->response_type = signal;
 	info->response.cmd = cmd_done;
 	return 0;
 }
@@ -998,6 +992,7 @@
 	default:
 		dprintk(CVP_DBG, "Unable to parse message: %#x\n",
 				msg_hdr->packet);
+		pkt_func = (pkt_func_def)hfi_process_session_cvp_msg;
 		break;
 	}
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.c b/drivers/media/platform/msm/cvp/msm_cvp.c
index a6afeb6..e1dd50c 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp.c
@@ -278,13 +278,15 @@
 
 static int msm_cvp_session_process_hfi(
 	struct msm_cvp_inst *inst,
-	struct cvp_kmd_hfi_packet *in_pkt)
+	struct cvp_kmd_hfi_packet *in_pkt,
+	unsigned int in_offset,
+	unsigned int in_buf_num)
 {
 	int i, pkt_idx, rc = 0;
 	struct hfi_device *hdev;
 	struct msm_cvp_internal_buffer *cbuf;
 	struct buf_desc *buf_ptr;
-	unsigned int offset, buf_num;
+	unsigned int offset, buf_num, signal;
 
 	if (!inst || !inst->core || !in_pkt) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -297,10 +299,23 @@
 		dprintk(CVP_ERR, "%s incorrect packet %d, %x\n", __func__,
 				in_pkt->pkt_data[0],
 				in_pkt->pkt_data[1]);
-		return pkt_idx;
+		offset = in_offset;
+		buf_num = in_buf_num;
+		signal = HAL_NO_RESP;
+	} else {
+		offset = cvp_hfi_defs[pkt_idx].buf_offset;
+		buf_num = cvp_hfi_defs[pkt_idx].buf_num;
+		signal = cvp_hfi_defs[pkt_idx].resp;
 	}
-	offset = cvp_hfi_defs[pkt_idx].buf_offset;
-	buf_num = cvp_hfi_defs[pkt_idx].buf_num;
+
+	if (in_offset && in_buf_num) {
+		if (offset != in_offset || buf_num != in_buf_num) {
+			dprintk(CVP_ERR, "%s incorrect offset and num %d, %d\n",
+					__func__, in_offset, in_buf_num);
+			offset = in_offset;
+			buf_num = in_buf_num;
+		}
+	}
 
 	if (offset != 0 && buf_num != 0) {
 		buf_ptr = (struct buf_desc *)&in_pkt->pkt_data[offset];
@@ -330,16 +345,15 @@
 			__func__, in_pkt->pkt_data[0], in_pkt->pkt_data[1]);
 	}
 
-	if (cvp_hfi_defs[pkt_idx].resp != HAL_NO_RESP) {
-		rc = wait_for_sess_signal_receipt(inst,
-			cvp_hfi_defs[pkt_idx].resp);
+	if (signal != HAL_NO_RESP) {
+		rc = wait_for_sess_signal_receipt(inst, signal);
 		if (rc)
 			dprintk(CVP_ERR,
 				"%s: wait for signal failed, rc %d %d, %x %d\n",
 				__func__, rc,
 				in_pkt->pkt_data[0],
 				in_pkt->pkt_data[1],
-				cvp_hfi_defs[pkt_idx].resp);
+				signal);
 
 	}
 
@@ -761,7 +775,8 @@
 		struct cvp_kmd_hfi_packet *in_pkt =
 			(struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
 
-		rc = msm_cvp_session_process_hfi(inst, in_pkt);
+		rc = msm_cvp_session_process_hfi(inst, in_pkt,
+				arg->buf_offset, arg->buf_num);
 		break;
 	}
 	case CVP_KMD_HFI_DFS_FRAME_CMD_RESPONSE:
diff --git a/drivers/media/platform/msm/cvp/msm_v4l2_private.c b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
index a578d8f55..6656957 100644
--- a/drivers/media/platform/msm/cvp/msm_v4l2_private.c
+++ b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
@@ -20,7 +20,7 @@
 		return -EFAULT;
 
 	if (get_pkt_index(pkt_hdr) < 0) {
-		dprintk(CVP_DBG, "user mode provides incorrect hfi\n");
+		dprintk(CVP_ERR, "user mode provides incorrect hfi\n");
 		goto set_default_pkt_hdr;
 	}
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 74c3eec..7b0edfc 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -3551,7 +3551,7 @@
 	int rc = 0;
 	struct internal_buf *binfo = NULL;
 	u32 smem_flags = SMEM_UNCACHED, buffer_size, num_buffers, hfi_fmt;
-	struct hal_buffer_requirements *output_buf, *extradata_buf;
+	struct hal_buffer_requirements *output_buf;
 	unsigned int i;
 	struct hfi_device *hdev;
 	struct hfi_buffer_size_minimum b;
@@ -3566,6 +3566,17 @@
 		return 0;
 	}
 
+	/* Set DPB buffer count to firmware */
+	rc = msm_comm_set_buffer_count(inst,
+			output_buf->buffer_count_min,
+			output_buf->buffer_count_min,
+			HAL_BUFFER_OUTPUT);
+	if (rc) {
+		dprintk(VIDC_ERR, "%s: failed to set bufreqs(%#x)\n",
+			__func__, buffer_type);
+		return -EINVAL;
+	}
+
 	/* For DPB buffers, Always use FW count */
 	num_buffers = output_buf->buffer_count_min;
 	hfi_fmt = msm_comm_convert_color_fmt(inst->clk_data.dpb_fourcc);
@@ -3585,12 +3596,15 @@
 		inst->session, HFI_PROPERTY_PARAM_BUFFER_SIZE_MINIMUM,
 		&b, sizeof(b));
 
-	extradata_buf = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_OUTPUT);
-	if (extradata_buf) {
+	if (inst->bufq[CAPTURE_PORT].num_planes == 1 ||
+		!inst->bufq[CAPTURE_PORT].plane_sizes[1]) {
 		dprintk(VIDC_DBG,
-			"extradata: num = %d, size = %d\n",
-			extradata_buf->buffer_count_actual,
-			extradata_buf->buffer_size);
+			"This extradata buffer not required, buffer_type: %x\n",
+			buffer_type);
+	} else {
+		dprintk(VIDC_DBG,
+			"extradata: num = 1, size = %d\n",
+			inst->bufq[CAPTURE_PORT].plane_sizes[1]);
 		inst->dpb_extra_binfo = NULL;
 		inst->dpb_extra_binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
 		if (!inst->dpb_extra_binfo) {
@@ -3599,17 +3613,13 @@
 			goto fail_kzalloc;
 		}
 		rc = msm_comm_smem_alloc(inst,
-			extradata_buf->buffer_size, 1, smem_flags,
+			inst->bufq[CAPTURE_PORT].plane_sizes[1], 1, smem_flags,
 			buffer_type, 0, &inst->dpb_extra_binfo->smem);
 		if (rc) {
 			dprintk(VIDC_ERR,
 				"Failed to allocate output memory\n");
 			goto err_no_mem;
 		}
-	} else {
-		dprintk(VIDC_DBG,
-			"This extradata buffer not required, buffer_type: %x\n",
-			buffer_type);
 	}
 
 	if (inst->flags & VIDC_SECURE)
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c
index 5287569..f144070 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c
@@ -3,28 +3,21 @@
  * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
-
-#include <linux/module.h>
-#include "governor.h"
+#include "msm_vidc_debug.h"
 #include "fixedpoint.h"
 #include "msm_vidc_internal.h"
-#include "msm_vidc_debug.h"
 #include "vidc_hfi_api.h"
 #define COMPRESSION_RATIO_MAX 5
 
-enum governor_mode {
-	GOVERNOR_DDR,
-	GOVERNOR_LLCC,
-};
-
-struct governor {
-	enum governor_mode mode;
-	struct devfreq_governor devfreq_gov;
+enum vidc_bus_type {
+	PERF,
+	DDR,
+	LLCC,
 };
 
 /*
- * Minimum dimensions that the governor is willing to calculate
- * bandwidth for.  This means that anything bandwidth(0, 0) ==
+ * Minimum dimensions for which to calculate bandwidth.
+ * This means that anything bandwidth(0, 0) ==
  * bandwidth(BASELINE_DIMENSIONS.width, BASELINE_DIMENSIONS.height)
  */
 static const struct {
@@ -34,15 +27,6 @@
 	.height = 720,
 };
 
-/*
- * These are hardcoded AB values that the governor votes for in certain
- * situations, where a certain bus frequency is desired.  It isn't exactly
- * scalable since different platforms have different bus widths, but we'll
- * deal with that in the future.
- */
-const unsigned long NOMINAL_BW_MBPS = 6000 /* ideally 320 Mhz */,
-	SVS_BW_MBPS = 2000 /* ideally 100 Mhz */;
-
 /* converts Mbps to bps (the "b" part can be bits or bytes based on context) */
 #define kbps(__mbps) ((__mbps) * 1000)
 #define bps(__mbps) (kbps(__mbps) * 1000)
@@ -207,6 +191,16 @@
 	},
 };
 
+static u32 get_type_frm_name(char *name)
+{
+	if (!strcmp(name, "venus-llcc"))
+		return LLCC;
+	else if (!strcmp(name, "venus-ddr"))
+		return DDR;
+	else
+		return PERF;
+}
+
 static struct lut const *__lut(int width, int height, int fps)
 {
 	int frame_size = height * width, c = 0;
@@ -277,21 +271,21 @@
 }
 
 static unsigned long __calculate_vpe(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
 	return 0;
 }
 
 static unsigned long __calculate_cvp(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
 	unsigned long ret = 0;
 
-	switch (gm) {
-	case GOVERNOR_DDR:
+	switch (type) {
+	case DDR:
 		ret = d->ddr_bw;
 		break;
-	case GOVERNOR_LLCC:
+	case LLCC:
 		ret = d->sys_cache_bw;
 		break;
 	default:
@@ -332,7 +326,7 @@
 }
 
 static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
 	/*
 	 * XXX: Don't fool around with any of the hardcoded numbers unless you
@@ -573,11 +567,11 @@
 		__dump(dump, ARRAY_SIZE(dump));
 	}
 
-	switch (gm) {
-	case GOVERNOR_DDR:
+	switch (type) {
+	case DDR:
 		ret = kbps(fp_round(ddr.total));
 		break;
-	case GOVERNOR_LLCC:
+	case LLCC:
 		ret = kbps(fp_round(llc.total));
 		break;
 	default:
@@ -588,7 +582,7 @@
 }
 
 static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
 	/*
 	 * XXX: Don't fool around with any of the hardcoded numbers unless you
@@ -870,11 +864,11 @@
 		__dump(dump, ARRAY_SIZE(dump));
 	}
 
-	switch (gm) {
-	case GOVERNOR_DDR:
+	switch (type) {
+	case DDR:
 		ret = kbps(fp_round(ddr.total));
 		break;
-	case GOVERNOR_LLCC:
+	case LLCC:
 		ret = kbps(fp_round(llc.total));
 		break;
 	default:
@@ -885,41 +879,37 @@
 }
 
 static unsigned long __calculate(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
-	unsigned long (*calc[])(struct vidc_bus_vote_data *,
-			enum governor_mode) = {
-		[HAL_VIDEO_DOMAIN_VPE] = __calculate_vpe,
-		[HAL_VIDEO_DOMAIN_ENCODER] = __calculate_encoder,
-		[HAL_VIDEO_DOMAIN_DECODER] = __calculate_decoder,
-		[HAL_VIDEO_DOMAIN_CVP] = __calculate_cvp,
-	};
+	unsigned long value = 0;
 
-	if (d->domain >= ARRAY_SIZE(calc)) {
-		dprintk(VIDC_ERR, "%s: invalid domain %d\n",
-			__func__, d->domain);
-		return 0;
+	switch (d->domain) {
+	case HAL_VIDEO_DOMAIN_VPE:
+		value = __calculate_vpe(d, type);
+		break;
+	case HAL_VIDEO_DOMAIN_ENCODER:
+		value = __calculate_encoder(d, type);
+		break;
+	case HAL_VIDEO_DOMAIN_DECODER:
+		value = __calculate_decoder(d, type);
+		break;
+	case HAL_VIDEO_DOMAIN_CVP:
+		value = __calculate_cvp(d, type);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Unknown Domain");
 	}
-	return calc[d->domain](d, gm);
+
+	return value;
 }
 
-
-static int __get_target_freq(struct devfreq *dev, unsigned long *freq)
+unsigned long __calc_bw(struct bus_info *bus,
+				struct msm_vidc_gov_data *vidc_data)
 {
 	unsigned long ab_kbps = 0, c = 0;
-	struct devfreq_dev_status stats = {0};
-	struct msm_vidc_gov_data *vidc_data = NULL;
-	struct governor *gov = NULL;
+	enum vidc_bus_type type;
 
-	if (!dev || !freq)
-		return -EINVAL;
-
-	gov = container_of(dev->governor,
-			struct governor, devfreq_gov);
-	dev->profile->get_dev_status(dev->dev.parent, &stats);
-	vidc_data = (struct msm_vidc_gov_data *)stats.private_data;
-
-	if (!vidc_data || !vidc_data->data_count)
+	if (!vidc_data || !vidc_data->data_count || !vidc_data->data)
 		goto exit;
 
 	for (c = 0; c < vidc_data->data_count; ++c) {
@@ -929,85 +919,12 @@
 		}
 	}
 
+	type = get_type_frm_name(bus->name);
+
 	for (c = 0; c < vidc_data->data_count; ++c)
-		ab_kbps += __calculate(&vidc_data->data[c], gov->mode);
+		ab_kbps += __calculate(&vidc_data->data[c], type);
 
 exit:
-	*freq = clamp(ab_kbps, dev->min_freq, dev->max_freq ?
-		dev->max_freq : UINT_MAX);
-	trace_msm_vidc_perf_bus_vote(gov->devfreq_gov.name, *freq);
-	return 0;
+	trace_msm_vidc_perf_bus_vote(bus->name, ab_kbps);
+	return ab_kbps;
 }
-
-static int __event_handler(struct devfreq *devfreq, unsigned int event,
-		void *data)
-{
-	int rc = 0;
-
-	if (!devfreq)
-		return -EINVAL;
-
-	switch (event) {
-	case DEVFREQ_GOV_START:
-	case DEVFREQ_GOV_RESUME:
-	case DEVFREQ_GOV_SUSPEND:
-		mutex_lock(&devfreq->lock);
-		rc = update_devfreq(devfreq);
-		mutex_unlock(&devfreq->lock);
-		break;
-	}
-
-	return rc;
-}
-
-static struct governor governors[] = {
-	{
-		.mode = GOVERNOR_DDR,
-		.devfreq_gov = {
-			.name = "msm-vidc-ddr",
-			.get_target_freq = __get_target_freq,
-			.event_handler = __event_handler,
-		},
-	},
-	{
-		.mode = GOVERNOR_LLCC,
-		.devfreq_gov = {
-			.name = "msm-vidc-llcc",
-			.get_target_freq = __get_target_freq,
-			.event_handler = __event_handler,
-		},
-	},
-};
-
-static int __init msm_vidc_bw_gov_init(void)
-{
-	int c = 0, rc = 0;
-
-	for (c = 0; c < ARRAY_SIZE(governors); ++c) {
-		dprintk(VIDC_DBG, "Adding governor %s\n",
-				governors[c].devfreq_gov.name);
-
-		rc = devfreq_add_governor(&governors[c].devfreq_gov);
-		if (rc) {
-			dprintk(VIDC_ERR, "Error adding governor %s: %d\n",
-				governors[c].devfreq_gov.name, rc);
-			break;
-		}
-	}
-
-	return rc;
-}
-module_init(msm_vidc_bw_gov_init);
-
-static void __exit msm_vidc_bw_gov_exit(void)
-{
-	int c = 0;
-
-	for (c = 0; c < ARRAY_SIZE(governors); ++c) {
-		dprintk(VIDC_DBG, "Removing governor %s\n",
-				governors[c].devfreq_gov.name);
-		devfreq_remove_governor(&governors[c].devfreq_gov);
-	}
-}
-module_exit(msm_vidc_bw_gov_exit);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index aa68062..e738949 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -420,23 +420,12 @@
 		goto err_bus;
 	}
 
-	rc = of_property_read_string(dev->of_node, "qcom,bus-governor",
-			&bus->governor);
-	if (rc) {
-		rc = 0;
-		dprintk(VIDC_DBG,
-				"'qcom,bus-governor' not found, default to performance governor\n");
-		bus->governor = PERF_GOV;
-	}
+	rc = of_property_read_string(dev->of_node, "qcom,mode",
+			&bus->mode);
 
-	if (!strcmp(bus->governor, PERF_GOV))
+	if (!rc && !strcmp(bus->mode, PERF_GOV))
 		bus->is_prfm_gov_used = true;
 
-	if (of_find_property(dev->of_node, "operating-points-v2", NULL))
-		bus->has_freq_table = true;
-	else
-		bus->has_freq_table = false;
-
 	rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps",
 			range, ARRAY_SIZE(range));
 	if (rc) {
@@ -452,8 +441,8 @@
 
 	buses->count++;
 	bus->dev = dev;
-	dprintk(VIDC_DBG, "Found bus %s [%d->%d] with governor %s\n",
-			bus->name, bus->master, bus->slave, bus->governor);
+	dprintk(VIDC_DBG, "Found bus %s [%d->%d] with mode %s\n",
+			bus->name, bus->master, bus->slave, bus->mode);
 err_bus:
 	return rc;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index f134212..234ee9d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -6,7 +6,6 @@
 #ifndef __MSM_VIDC_RESOURCES_H__
 #define __MSM_VIDC_RESOURCES_H__
 
-#include <linux/devfreq.h>
 #include <linux/platform_device.h>
 #include "msm_vidc.h"
 #include <linux/soc/qcom/llcc-qcom.h>
@@ -94,13 +93,10 @@
 	int master;
 	int slave;
 	unsigned int range[2];
-	const char *governor;
 	struct device *dev;
-	struct devfreq_dev_profile devfreq_prof;
-	struct devfreq *devfreq;
 	struct msm_bus_client_handle *client;
 	bool is_prfm_gov_used;
-	bool has_freq_table;
+	const char *mode;
 };
 
 struct bus_set {
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 0ca6fa3..95330d3 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -8,7 +8,6 @@
 #include <linux/clk/qcom.h>
 #include <linux/coresight-stm.h>
 #include <linux/delay.h>
-#include <linux/devfreq.h>
 #include <linux/hash.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -984,84 +983,24 @@
 		dprintk(VIDC_ERR, "Failed to restore threshold values\n");
 }
 
-static int __devfreq_target(struct device *devfreq_dev,
-		unsigned long *freq, u32 flags)
+static int __vote_bandwidth(struct bus_info *bus,
+		unsigned long *freq)
 {
 	int rc = 0;
 	uint64_t ab = 0;
-	struct bus_info *bus = NULL, *temp = NULL;
-	struct venus_hfi_device *device = dev_get_drvdata(devfreq_dev);
 
-	venus_hfi_for_each_bus(device, temp) {
-		if (temp->dev == devfreq_dev) {
-			bus = temp;
-			break;
-		}
-	}
-
-	if (!bus) {
-		rc = -EBADHANDLE;
-		goto err_unknown_device;
-	}
-
-	/*
-	 * Clamp for all non zero frequencies. This clamp is necessary to stop
-	 * devfreq driver from spamming - Couldn't update frequency - logs, if
-	 * the scaled ab value is not part of the frequency table.
-	 */
 	if (*freq)
 		*freq = clamp_t(typeof(*freq), *freq, bus->range[0],
 				bus->range[1]);
 
-	/* we expect governors to provide values in kBps form, convert to Bps */
+	/* Bus Driver expects values in Bps */
 	ab = *freq * 1000;
-	rc = msm_bus_scale_update_bw(bus->client, ab, 0);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed voting bus %s to ab %llu\n: %d",
-				bus->name, ab, rc);
-		goto err_unknown_device;
-	}
-
 	dprintk(VIDC_PROF, "Voting bus %s to ab %llu\n", bus->name, ab);
+	rc = msm_bus_scale_update_bw(bus->client, ab, 0);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed voting bus %s to ab %llu, rc=%d\n",
+				bus->name, ab, rc);
 
-	return 0;
-err_unknown_device:
-	return rc;
-}
-
-static int __devfreq_get_status(struct device *devfreq_dev,
-		struct devfreq_dev_status *stat)
-{
-	int rc = 0;
-	struct bus_info *bus = NULL, *temp = NULL;
-	struct venus_hfi_device *device = dev_get_drvdata(devfreq_dev);
-
-	venus_hfi_for_each_bus(device, temp) {
-		if (temp->dev == devfreq_dev) {
-			bus = temp;
-			break;
-		}
-	}
-
-	if (!bus) {
-		rc = -EBADHANDLE;
-		goto err_unknown_device;
-	}
-
-	*stat = (struct devfreq_dev_status) {
-		.private_data = &device->bus_vote,
-		/*
-		 * Put in dummy place holder values for upstream govs, our
-		 * custom gov only needs .private_data.  We should fill this in
-		 * properly if we can actually measure busy_time accurately
-		 * (which we can't at the moment)
-		 */
-		.total_time = 1,
-		.busy_time = 1,
-		.current_frequency = 0,
-	};
-
-err_unknown_device:
 	return rc;
 }
 
@@ -1069,18 +1008,19 @@
 {
 	int rc = 0;
 	struct bus_info *bus = NULL;
+	unsigned long freq = 0, zero = 0;
 
 	kfree(device->bus_vote.data);
 	device->bus_vote.data = NULL;
 	device->bus_vote.data_count = 0;
 
 	venus_hfi_for_each_bus(device, bus) {
-		unsigned long zero = 0;
-
-		if (!bus->is_prfm_gov_used)
-			rc = devfreq_suspend_device(bus->devfreq);
+		if (!bus->is_prfm_gov_used) {
+			freq = __calc_bw(bus, &device->bus_vote);
+			rc = __vote_bandwidth(bus, &freq);
+		}
 		else
-			rc = __devfreq_target(bus->dev, &zero, 0);
+			rc = __vote_bandwidth(bus, &zero);
 
 		if (rc)
 			goto err_unknown_device;
@@ -1096,6 +1036,7 @@
 	int rc = 0;
 	struct bus_info *bus = NULL;
 	struct vidc_bus_vote_data *new_data = NULL;
+	unsigned long freq = 0;
 
 	if (!num_data) {
 		dprintk(VIDC_DBG, "No vote data available\n");
@@ -1118,15 +1059,18 @@
 	device->bus_vote.data_count = num_data;
 
 	venus_hfi_for_each_bus(device, bus) {
-		if (bus && bus->devfreq) {
+		if (bus) {
 			if (!bus->is_prfm_gov_used) {
-				rc = devfreq_resume_device(bus->devfreq);
-				if (rc)
-					goto err_no_mem;
+				freq = __calc_bw(bus, &device->bus_vote);
 			} else {
-				bus->devfreq->nb.notifier_call(
-					&bus->devfreq->nb, 0, NULL);
+				freq = bus->range[1];
+				dprintk(VIDC_ERR, "%s %s perf Vote %u\n",
+						__func__, bus->name,
+						bus->range[1]);
 			}
+			rc = __vote_bandwidth(bus, &freq);
+		} else {
+			dprintk(VIDC_ERR, "No BUS to Vote\n");
 		}
 	}
 
@@ -3891,10 +3835,6 @@
 	device->bus_vote = DEFAULT_BUS_VOTE;
 
 	venus_hfi_for_each_bus_reverse(device, bus) {
-		devfreq_remove_device(bus->devfreq);
-		bus->devfreq = NULL;
-		dev_set_drvdata(bus->dev, NULL);
-
 		msm_bus_scale_unregister(bus->client);
 		bus->client = NULL;
 	}
@@ -3909,41 +3849,14 @@
 		return -EINVAL;
 
 	venus_hfi_for_each_bus(device, bus) {
-		struct devfreq_dev_profile profile = {
-			.initial_freq = 0,
-			.polling_ms = INT_MAX,
-			.freq_table = NULL,
-			.max_state = 0,
-			.target = __devfreq_target,
-			.get_dev_status = __devfreq_get_status,
-			.exit = NULL,
-			/*.get_cur_greq = NULL,*/
-		};
-
-		if (!strcmp(bus->governor, "msm-vidc-llcc")) {
+		if (!strcmp(bus->mode, "msm-vidc-llcc")) {
 			if (msm_vidc_syscache_disable) {
 				dprintk(VIDC_DBG,
 					 "Skipping LLC bus init %s: %s\n",
-				bus->name, bus->governor);
+				bus->name, bus->mode);
 				continue;
 			}
 		}
-
-		/*
-		 * This is stupid, but there's no other easy way to get a hold
-		 * of struct bus_info in venus_hfi_devfreq_*()
-		 */
-		WARN(dev_get_drvdata(bus->dev), "%s's drvdata already set\n",
-				dev_name(bus->dev));
-		dev_set_drvdata(bus->dev, device);
-
-		if (bus->has_freq_table) {
-			rc = dev_pm_opp_of_add_table(bus->dev);
-			if (rc)
-				dprintk(VIDC_ERR, "Failed to add %s OPP table",
-						bus->name);
-		}
-
 		bus->client = msm_bus_scale_register(bus->master, bus->slave,
 				bus->name, false);
 		if (IS_ERR_OR_NULL(bus->client)) {
@@ -3954,25 +3867,6 @@
 			bus->client = NULL;
 			goto err_add_dev;
 		}
-
-		bus->devfreq_prof = profile;
-		bus->devfreq = devfreq_add_device(bus->dev,
-				&bus->devfreq_prof, bus->governor, NULL);
-		if (IS_ERR_OR_NULL(bus->devfreq)) {
-			rc = PTR_ERR(bus->devfreq) ?
-				PTR_ERR(bus->devfreq) : -EBADHANDLE;
-			dprintk(VIDC_ERR,
-					"Failed to add devfreq device for bus %s and governor %s: %d\n",
-					bus->name, bus->governor, rc);
-			bus->devfreq = NULL;
-			goto err_add_dev;
-		}
-
-		/*
-		 * Devfreq starts monitoring immediately, since we are just
-		 * initializing stuff at this point, force it to suspend
-		 */
-		devfreq_suspend_device(bus->devfreq);
 	}
 
 	return 0;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.h b/drivers/media/platform/msm/vidc/venus_hfi.h
index ecea88b..a361a23 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.h
+++ b/drivers/media/platform/msm/vidc/venus_hfi.h
@@ -42,6 +42,9 @@
 #define VIDC_MAX_SUBCACHES 4
 #define VIDC_MAX_SUBCACHE_SIZE 52
 
+extern unsigned long __calc_bw(struct bus_info *bus,
+			struct msm_vidc_gov_data *vidc_data);
+
 struct hfi_queue_table_header {
 	u32 qtbl_version;
 	u32 qtbl_size;
diff --git a/drivers/misc/hdcp_qseecom.c b/drivers/misc/hdcp_qseecom.c
index 53947b5..6de29bc 100644
--- a/drivers/misc/hdcp_qseecom.c
+++ b/drivers/misc/hdcp_qseecom.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[hdcp-qseecom] %s: " fmt, __func__
@@ -1044,11 +1044,7 @@
 	}
 
 	rc = handle->tx_init(handle);
-	if (rc)
-		goto error;
 
-	if (!handle->legacy_app)
-		rc = hdcp2_app_start_auth(handle);
 error:
 	return rc;
 }
@@ -1188,6 +1184,7 @@
 	pr_err("failed, rc=%d\n", rc);
 	return rc;
 }
+EXPORT_SYMBOL(hdcp2_force_encryption);
 
 static int hdcp2_app_query_stream(struct hdcp2_handle *handle)
 {
@@ -1236,6 +1233,9 @@
 	case HDCP2_CMD_START:
 		rc = hdcp2_app_start(handle);
 		break;
+	case HDCP2_CMD_START_AUTH:
+		rc = hdcp2_app_start_auth(handle);
+		break;
 	case HDCP2_CMD_PROCESS_MSG:
 		rc = hdcp2_app_process_msg(handle);
 		break;
@@ -1268,6 +1268,7 @@
 error:
 	return rc;
 }
+EXPORT_SYMBOL(hdcp2_app_comm);
 
 static int hdcp2_open_stream_helper(struct hdcp2_handle *handle,
 		uint8_t vc_payload_id,
@@ -1322,6 +1323,7 @@
 	return hdcp2_open_stream_helper(handle, vc_payload_id, stream_number,
 		stream_id);
 }
+EXPORT_SYMBOL(hdcp2_open_stream);
 
 static int hdcp2_close_stream_helper(struct hdcp2_handle *handle,
 		uint32_t stream_id)
@@ -1368,6 +1370,7 @@
 
 	return hdcp2_close_stream_helper(handle, stream_id);
 }
+EXPORT_SYMBOL(hdcp2_close_stream);
 
 void *hdcp2_init(u32 device_type)
 {
@@ -1382,11 +1385,13 @@
 error:
 	return handle;
 }
+EXPORT_SYMBOL(hdcp2_init);
 
 void hdcp2_deinit(void *ctx)
 {
 	kzfree(ctx);
 }
+EXPORT_SYMBOL(hdcp2_deinit);
 
 void *hdcp1_init(void)
 {
diff --git a/drivers/net/wireless/cnss2/genl.c b/drivers/net/wireless/cnss2/genl.c
index ecc6eb5..5a7fb1f5 100644
--- a/drivers/net/wireless/cnss2/genl.c
+++ b/drivers/net/wireless/cnss2/genl.c
@@ -143,11 +143,11 @@
 	genlmsg_end(skb, msg_header);
 	ret = genlmsg_multicast(&cnss_genl_family, skb, 0, 0, GFP_KERNEL);
 	if (ret < 0)
-		goto fail;
+		cnss_pr_err("Fail to send genl msg: %d\n", ret);
 
 	return ret;
 fail:
-	cnss_pr_err("genl msg send fail: %d\n", ret);
+	cnss_pr_err("Fail to generate genl msg: %d\n", ret);
 	if (skb)
 		nlmsg_free(skb);
 	return ret;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 2653abf..e12f274 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -361,32 +361,36 @@
 		map += out_size;
 		map_len -= out_size;
 	}
-	if (match) {
-		/* Get the irqdomain-map-pass-thru property (optional) */
-		pass = of_get_property(cur, pass_name, NULL);
-		if (!pass)
-			pass = dummy_pass;
 
-		/*
-		 * Successfully parsed a irqdomain-map translation; copy new
-		 * specifier into the out structure, keeping the
-		 * bits specified in irqdomain-map-pass-thru.
-		 */
-		match_array = map - out_size;
-		for (i = 0; i < out_size; i++) {
-			__be32 val = *(map - out_size + i);
-
-			out->param[i] = in->param[i];
-			if (i < in_size) {
-				val &= ~pass[i];
-				val |= cpu_to_be32(out->param[i]) & pass[i];
-			}
-
-			out->param[i] = be32_to_cpu(val);
-		}
-		out->param_count = in_size = out_size;
-		out->fwnode = of_node_to_fwnode(new);
+	if (!match) {
+		ret = -EINVAL;
+		goto put;
 	}
+
+	/* Get the irqdomain-map-pass-thru property (optional) */
+	pass = of_get_property(cur, pass_name, NULL);
+	if (!pass)
+		pass = dummy_pass;
+
+	/*
+	 * Successfully parsed a irqdomain-map translation; copy new
+	 * specifier into the out structure, keeping the
+	 * bits specified in irqdomain-map-pass-thru.
+	 */
+	match_array = map - out_size;
+	for (i = 0; i < out_size; i++) {
+		__be32 val = *(map - out_size + i);
+
+		out->param[i] = in->param[i];
+		if (i < in_size) {
+			val &= ~pass[i];
+			val |= cpu_to_be32(out->param[i]) & pass[i];
+		}
+
+		out->param[i] = be32_to_cpu(val);
+	}
+	out->param_count = in_size = out_size;
+	out->fwnode = of_node_to_fwnode(new);
 put:
 	of_node_put(cur);
 	of_node_put(new);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 11c3717..b3e28e6 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -2435,6 +2435,16 @@
 	} else
 		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
 
+	if (teth_prot == IPA_USB_RMNET) {
+		IPA_USB_DBG("USB suspend resetting dma mode\n");
+		result = ipa_mpm_reset_dma_mode(IPA_CLIENT_USB_PROD,
+			IPA_CLIENT_MHI_PRIME_RMNET_CONS);
+		if (result) {
+			IPA_USB_ERR("failed to reset dma mode\n");
+			goto bad_params;
+		}
+	}
+
 	result = ipa_usb_xdci_dismiss_channels(ul_clnt_hdl, dl_clnt_hdl,
 			teth_prot);
 	if (result)
@@ -2709,6 +2719,16 @@
 		goto bad_params;
 	}
 
+	if (teth_prot == IPA_USB_RMNET) {
+		IPA_USB_DBG("USB suspend resetting dma mode\n");
+		result = ipa_mpm_reset_dma_mode(IPA_CLIENT_USB_PROD,
+			IPA_CLIENT_MHI_PRIME_RMNET_CONS);
+		if (result) {
+			IPA_USB_ERR("failed to reset dma mode\n");
+			goto bad_params;
+		}
+	}
+
 	/* Stop UL channel & suspend DL/DPL EP */
 	result = ipa3_xdci_suspend(ul_clnt_hdl, dl_clnt_hdl,
 		true,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index ccd5655..192fced 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -2751,6 +2751,8 @@
 int ipa_mpm_notify_wan_state(void);
 int ipa_mpm_mhip_ul_data_stop(enum ipa_usb_teth_prot xdci_teth_prot);
 int ipa3_is_mhip_offload_enabled(void);
+int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
+	enum ipa_client_type dst_pipe);
 #else
 static inline int ipa_mpm_mhip_xdci_pipe_enable(
 	enum ipa_usb_teth_prot prot)
@@ -2775,6 +2777,12 @@
 {
 	return 0;
 }
+static inline int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
+	enum ipa_client_type dst_pipe)
+{
+	return 0;
+}
+
 #endif /* CONFIG_IPA3_MHI_PRIME_MANAGER */
 
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
index 3bb865d..c8ced3a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -2154,9 +2154,6 @@
 
 	switch (mhip_client) {
 	case IPA_MPM_MHIP_USB_RMNET:
-		ipa_mpm_reset_dma_mode(IPA_CLIENT_USB_PROD,
-			IPA_CLIENT_MHI_PRIME_RMNET_CONS);
-		break;
 	case IPA_MPM_MHIP_TETH:
 	case IPA_MPM_MHIP_USB_DPL:
 		IPA_MPM_DBG("Teth Disconnecting for prot %d\n", mhip_client);
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index b9043d5..46441e1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -2534,6 +2534,9 @@
 		ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ)
 		/* Android platform loads uC */
 		ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
+	else if (ipa3_ctx->ipa_config_is_mhi)
+		/* LE MHI platform */
+		ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_MHI_V01);
 	else
 		/* LE platform not loads uC */
 		ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index e2c362c..54c7fc7 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -26,9 +26,6 @@
 #include "wil_platform.h"
 #include "msm_11ad.h"
 
-#define SMMU_BASE	0x20000000 /* Device address range base */
-#define SMMU_SIZE	((SZ_1G * 4ULL) - SMMU_BASE)
-
 #define WIGIG_ENABLE_DELAY	50
 
 #define WIGIG_SUBSYS_NAME	"WIGIG"
@@ -39,9 +36,12 @@
 #define VDD_MIN_UV	1028000
 #define VDD_MAX_UV	1028000
 #define VDD_MAX_UA	575000
-#define VDDIO_MIN_UV	1950000
+#define VDDIO_MIN_UV	1824000
 #define VDDIO_MAX_UV	2040000
 #define VDDIO_MAX_UA	70300
+#define VDD_LDO_MIN_UV	1800000
+#define VDD_LDO_MAX_UV	1800000
+#define VDD_LDO_MAX_UA	100000
 
 #define WIGIG_MIN_CPU_BOOST_KBPS	150000
 
@@ -92,15 +92,6 @@
 	struct pci_saved_state *golden_state;
 	struct msm_pcie_register_event pci_event;
 
-	/* SMMU */
-	bool use_smmu; /* have SMMU enabled? */
-	int smmu_s1_en;
-	int smmu_fast_map;
-	int smmu_coherent;
-	struct dma_iommu_mapping *mapping;
-	u32 smmu_base;
-	u32 smmu_size;
-
 	/* bus frequency scaling */
 	struct msm_bus_scale_pdata *bus_scale;
 	u32 msm_bus_handle;
@@ -122,8 +113,9 @@
 	/* external vregs and clocks */
 	struct msm11ad_vreg vdd;
 	struct msm11ad_vreg vddio;
-	struct msm11ad_clk rf_clk3;
-	struct msm11ad_clk rf_clk3_pin;
+	struct msm11ad_vreg vdd_ldo;
+	struct msm11ad_clk rf_clk;
+	struct msm11ad_clk rf_clk_pin;
 
 	/* cpu boost support */
 	bool use_cpu_boost;
@@ -256,8 +248,18 @@
 	ctx->vddio.min_uV = VDDIO_MIN_UV;
 	ctx->vddio.max_uA = VDDIO_MAX_UA;
 
+	rc = msm_11ad_init_vreg(dev, &ctx->vdd_ldo, "vdd-ldo");
+	if (rc)
+		goto vdd_ldo_fail;
+
+	ctx->vdd_ldo.max_uV = VDD_LDO_MAX_UV;
+	ctx->vdd_ldo.min_uV = VDD_LDO_MIN_UV;
+	ctx->vdd_ldo.max_uA = VDD_LDO_MAX_UA;
+
 	return rc;
 
+vdd_ldo_fail:
+	msm_11ad_release_vreg(dev, &ctx->vddio);
 vddio_fail:
 	msm_11ad_release_vreg(dev, &ctx->vdd);
 out:
@@ -266,6 +268,7 @@
 
 static void msm_11ad_release_vregs(struct msm11ad_ctx *ctx)
 {
+	msm_11ad_release_vreg(ctx->dev, &ctx->vdd_ldo);
 	msm_11ad_release_vreg(ctx->dev, &ctx->vdd);
 	msm_11ad_release_vreg(ctx->dev, &ctx->vddio);
 }
@@ -381,8 +384,14 @@
 	if (rc)
 		goto vddio_fail;
 
+	rc = msm_11ad_enable_vreg(ctx, &ctx->vdd_ldo);
+	if (rc)
+		goto vdd_ldo_fail;
+
 	return rc;
 
+vdd_ldo_fail:
+	msm_11ad_disable_vreg(ctx, &ctx->vddio);
 vddio_fail:
 	msm_11ad_disable_vreg(ctx, &ctx->vdd);
 out:
@@ -391,10 +400,11 @@
 
 static int msm_11ad_disable_vregs(struct msm11ad_ctx *ctx)
 {
-	if (!ctx->vdd.reg && !ctx->vddio.reg)
+	if (!ctx->vdd.reg && !ctx->vddio.reg && !ctx->vdd_ldo.reg)
 		goto out;
 
 	/* ignore errors on disable vreg */
+	msm_11ad_disable_vreg(ctx, &ctx->vdd_ldo);
 	msm_11ad_disable_vreg(ctx, &ctx->vdd);
 	msm_11ad_disable_vreg(ctx, &ctx->vddio);
 
@@ -446,13 +456,13 @@
 {
 	int rc;
 
-	rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3);
+	rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk);
 	if (rc)
 		return rc;
 
-	rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3_pin);
+	rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk_pin);
 	if (rc)
-		msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
+		msm_11ad_disable_clk(ctx, &ctx->rf_clk);
 
 	return rc;
 }
@@ -461,22 +471,22 @@
 {
 	int rc;
 	struct device *dev = ctx->dev;
-	int rf_clk3_pin_idx;
+	int rf_clk_pin_idx;
 
 	if (!of_property_read_bool(dev->of_node, "qcom,use-ext-clocks"))
 		return 0;
 
-	rc = msm_11ad_init_clk(dev, &ctx->rf_clk3, "rf_clk3_clk");
+	rc = msm_11ad_init_clk(dev, &ctx->rf_clk, "rf_clk_clk");
 	if (rc)
 		return rc;
 
-	rf_clk3_pin_idx = of_property_match_string(dev->of_node, "clock-names",
-						   "rf_clk3_pin_clk");
-	if (rf_clk3_pin_idx >= 0) {
-		rc = msm_11ad_init_clk(dev, &ctx->rf_clk3_pin,
-				       "rf_clk3_pin_clk");
+	rf_clk_pin_idx = of_property_match_string(dev->of_node, "clock-names",
+						   "rf_clk_pin_clk");
+	if (rf_clk_pin_idx >= 0) {
+		rc = msm_11ad_init_clk(dev, &ctx->rf_clk_pin,
+				       "rf_clk_pin_clk");
 		if (rc)
-			msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3);
+			msm_11ad_release_clk(ctx->dev, &ctx->rf_clk);
 	}
 
 	return rc;
@@ -484,14 +494,14 @@
 
 static void msm_11ad_release_clocks(struct msm11ad_ctx *ctx)
 {
-	msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3_pin);
-	msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3);
+	msm_11ad_release_clk(ctx->dev, &ctx->rf_clk_pin);
+	msm_11ad_release_clk(ctx->dev, &ctx->rf_clk);
 }
 
 static void msm_11ad_disable_clocks(struct msm11ad_ctx *ctx)
 {
-	msm_11ad_disable_clk(ctx, &ctx->rf_clk3_pin);
-	msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
+	msm_11ad_disable_clk(ctx, &ctx->rf_clk_pin);
+	msm_11ad_disable_clk(ctx, &ctx->rf_clk);
 }
 
 static int msm_11ad_turn_device_power_off(struct msm11ad_ctx *ctx)
@@ -769,86 +779,6 @@
 	return rc;
 }
 
-static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
-{
-	int atomic_ctx = 1;
-	int rc;
-	int force_pt_coherent = 1;
-	int smmu_bypass = !ctx->smmu_s1_en;
-
-	if (!ctx->use_smmu)
-		return 0;
-
-	dev_info(ctx->dev, "Initialize SMMU, bypass=%d, fastmap=%d, coherent=%d\n",
-		 smmu_bypass, ctx->smmu_fast_map, ctx->smmu_coherent);
-
-	ctx->mapping = __depr_arm_iommu_create_mapping(&platform_bus_type,
-						ctx->smmu_base, ctx->smmu_size);
-	if (IS_ERR_OR_NULL(ctx->mapping)) {
-		rc = PTR_ERR(ctx->mapping) ?: -ENODEV;
-		dev_err(ctx->dev, "Failed to create IOMMU mapping (%d)\n", rc);
-		return rc;
-	}
-
-	rc = iommu_domain_set_attr(ctx->mapping->domain,
-				   DOMAIN_ATTR_ATOMIC,
-				   &atomic_ctx);
-	if (rc) {
-		dev_err(ctx->dev, "Set atomic attribute to SMMU failed (%d)\n",
-			rc);
-		goto release_mapping;
-	}
-
-	if (smmu_bypass) {
-		rc = iommu_domain_set_attr(ctx->mapping->domain,
-					   DOMAIN_ATTR_S1_BYPASS,
-					   &smmu_bypass);
-		if (rc) {
-			dev_err(ctx->dev, "Set bypass attribute to SMMU failed (%d)\n",
-				rc);
-			goto release_mapping;
-		}
-	} else {
-		/* Set dma-coherent and page table coherency */
-		if (ctx->smmu_coherent) {
-			arch_setup_dma_ops(&ctx->pcidev->dev, 0, 0, NULL, true);
-			rc = iommu_domain_set_attr(ctx->mapping->domain,
-				   DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
-				   &force_pt_coherent);
-			if (rc) {
-				dev_err(ctx->dev,
-					"Set SMMU PAGE_TABLE_FORCE_COHERENT attr failed (%d)\n",
-					rc);
-				goto release_mapping;
-			}
-		}
-
-		if (ctx->smmu_fast_map) {
-			rc = iommu_domain_set_attr(ctx->mapping->domain,
-						   DOMAIN_ATTR_FAST,
-						   &ctx->smmu_fast_map);
-			if (rc) {
-				dev_err(ctx->dev, "Set fast attribute to SMMU failed (%d)\n",
-					rc);
-				goto release_mapping;
-			}
-		}
-	}
-
-	rc = __depr_arm_iommu_attach_device(&ctx->pcidev->dev, ctx->mapping);
-	if (rc) {
-		dev_err(ctx->dev, "arm_iommu_attach_device failed (%d)\n", rc);
-		goto release_mapping;
-	}
-	dev_dbg(ctx->dev, "attached to IOMMU\n");
-
-	return 0;
-release_mapping:
-	__depr_arm_iommu_release_mapping(ctx->mapping);
-	ctx->mapping = NULL;
-	return rc;
-}
-
 static int msm_11ad_ssr_shutdown(const struct subsys_desc *subsys,
 				 bool force_stop)
 {
@@ -1091,7 +1021,6 @@
 	struct device_node *of_node = dev->of_node;
 	struct device_node *rc_node;
 	struct pci_dev *pcidev = NULL;
-	u32 smmu_mapping[2];
 	int rc, i;
 	bool pcidev_found = false;
 	struct msm_pcie_register_event *pci_event;
@@ -1118,7 +1047,6 @@
 	 *	qcom,msm-bus,vectors-KBps =
 	 *		<100 512 0 0>,
 	 *		<100 512 600000 800000>;
-	 *	qcom,smmu-support;
 	 *};
 	 * rc_node stands for "qcom,pcie", selected entries:
 	 * cell-index = <1>; (ctx->rc_index)
@@ -1149,7 +1077,6 @@
 		dev_err(ctx->dev, "Parent PCIE device index not found\n");
 		return -EINVAL;
 	}
-	ctx->use_smmu = of_property_read_bool(of_node, "qcom,smmu-support");
 	ctx->keep_radio_on_during_sleep = of_property_read_bool(of_node,
 		"qcom,keep-radio-on-during-sleep");
 	ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
@@ -1158,28 +1085,6 @@
 		return -EINVAL;
 	}
 
-	ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
-	if (ctx->smmu_s1_en) {
-		ctx->smmu_fast_map = of_property_read_bool(
-						of_node, "qcom,smmu-fast-map");
-		ctx->smmu_coherent = of_property_read_bool(
-						of_node, "qcom,smmu-coherent");
-	}
-	rc = of_property_read_u32_array(dev->of_node, "qcom,smmu-mapping",
-			smmu_mapping, 2);
-	if (rc) {
-		dev_err(ctx->dev,
-			"Failed to read base/size smmu addresses %d, fallback to default\n",
-			rc);
-		ctx->smmu_base = SMMU_BASE;
-		ctx->smmu_size = SMMU_SIZE;
-	} else {
-		ctx->smmu_base = smmu_mapping[0];
-		ctx->smmu_size = smmu_mapping[1];
-	}
-	dev_dbg(ctx->dev, "smmu_base=0x%x smmu_sise=0x%x\n",
-		ctx->smmu_base, ctx->smmu_size);
-
 	/*== execute ==*/
 	/* turn device on */
 	rc = msm_11ad_init_vregs(ctx);
@@ -1310,10 +1215,9 @@
 		 "  gpio_dc = %d\n"
 		 "  sleep_clk_en = %d\n"
 		 "  rc_index = %d\n"
-		 "  use_smmu = %d\n"
 		 "  pcidev = %pK\n"
 		 "}\n", ctx, ctx->gpio_en, ctx->gpio_dc, ctx->sleep_clk_en,
-		 ctx->rc_index, ctx->use_smmu, ctx->pcidev);
+		 ctx->rc_index, ctx->pcidev);
 
 	platform_set_drvdata(pdev, ctx);
 	device_disable_async_suspend(&pcidev->dev);
@@ -1543,12 +1447,6 @@
 		ctx->msm_bus_handle = 0;
 	}
 
-	if (ctx->use_smmu) {
-		__depr_arm_iommu_detach_device(&ctx->pcidev->dev);
-		__depr_arm_iommu_release_mapping(ctx->mapping);
-		ctx->mapping = NULL;
-	}
-
 	memset(&ctx->rops, 0, sizeof(ctx->rops));
 	ctx->wil_handle = NULL;
 
@@ -1587,12 +1485,12 @@
 		break;
 	case WIL_PLATFORM_EVT_PRE_RESET:
 		/*
-		 * Enable rf_clk3 clock before resetting the device to ensure
+		 * Enable rf_clk clock before resetting the device to ensure
 		 * stable ref clock during the device reset
 		 */
 		if (ctx->features &
 		    BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL)) {
-			rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3);
+			rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk);
 			if (rc) {
 				dev_err(ctx->dev,
 					"failed to enable clk, rc %d\n", rc);
@@ -1602,12 +1500,12 @@
 		break;
 	case WIL_PLATFORM_EVT_FW_RDY:
 		/*
-		 * Disable rf_clk3 clock after the device is up to allow
+		 * Disable rf_clk clock after the device is up to allow
 		 * the device to control it via its GPIO for power saving
 		 */
 		if (ctx->features &
 		    BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL))
-			msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
+			msm_11ad_disable_clk(ctx, &ctx->rf_clk);
 
 		/*
 		 * Save golden config space for pci linkdown recovery.
@@ -1659,6 +1557,10 @@
 {
 	struct pci_dev *pcidev = to_pci_dev(dev);
 	struct msm11ad_ctx *ctx = pcidev2ctx(pcidev);
+	struct iommu_domain *domain;
+	int bypass = 0;
+	int fastmap = 0;
+	int coherent = 0;
 
 	if (!ctx) {
 		pr_err("Context not found for pcidev %pK\n", pcidev);
@@ -1673,11 +1575,19 @@
 		return NULL;
 	}
 	dev_info(ctx->dev, "msm_bus handle 0x%x\n", ctx->msm_bus_handle);
-	/* smmu */
-	if (msm_11ad_smmu_init(ctx)) {
-		msm_bus_scale_unregister_client(ctx->msm_bus_handle);
-		ctx->msm_bus_handle = 0;
-		return NULL;
+
+	domain = iommu_get_domain_for_dev(&pcidev->dev);
+	if (domain) {
+		iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
+		iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &fastmap);
+		iommu_domain_get_attr(domain,
+				      DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
+				      &coherent);
+
+		dev_info(ctx->dev, "SMMU initialized, bypass=%d, fastmap=%d, coherent=%d\n",
+			 bypass, fastmap, coherent);
+	} else {
+		dev_warn(ctx->dev, "Unable to get iommu domain\n");
 	}
 
 	/* subsystem restart */
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 345ccf3..a867f71 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -595,6 +595,16 @@
 	  This enable the userspace clients to read and write to
 	  some glink packets channel.
 
+config QCOM_SMP2P_SLEEPSTATE
+	bool "SMP2P Sleepstate notifier"
+	depends on QCOM_SMP2P
+	help
+	  When this option is enabled, notifications are sent to remote procs
+	  for the power state changes on the local processor. The notifications
+	  are sent through the smp2p framework. This driver can also receive
+	  notifications from the remote to prevent suspend on the local
+	  processor.
+
 config QCOM_QDSS_BRIDGE
 	bool "Configure bridge driver for QTI/Qualcomm Technologies, Inc. MDM"
 	depends on MHI_BUS
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index f171fe6..1dc9f25 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -65,6 +65,7 @@
 obj-$(CONFIG_QCOM_FSA4480_I2C) += fsa4480-i2c.o
 obj-$(CONFIG_QCOM_GLINK) += glink_probe.o
 obj-$(CONFIG_QCOM_GLINK_PKT) += glink_pkt.o
+obj-$(CONFIG_QCOM_SMP2P_SLEEPSTATE) += smp2p_sleepstate.o
 obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
 obj-$(CONFIG_MSM_QBT_HANDLER) += qbt_handler.o
 obj-$(CONFIG_QSEE_IPC_IRQ) += qsee_ipc_irq.o
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
index d5a1a97..2d1e465 100644
--- a/drivers/soc/qcom/qdss_bridge.c
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -18,6 +18,7 @@
 #include <linux/mhi.h>
 #include <linux/usb/usb_qdss.h>
 #include <linux/of.h>
+#include <linux/delay.h>
 #include "qdss_bridge.h"
 
 #define MODULE_NAME "qdss_bridge"
@@ -142,6 +143,20 @@
 	return NULL;
 }
 
+static int qdss_check_entry(struct qdss_bridge_drvdata *drvdata)
+{
+	struct qdss_buf_tbl_lst *entry;
+	int ret = 0;
+
+	list_for_each_entry(entry, &drvdata->buf_tbl, link) {
+		if (atomic_read(&entry->available) == 0) {
+			ret = 1;
+			return ret;
+		}
+	}
+
+	return ret;
+}
 
 static void qdss_del_buf_tbl_entry(struct qdss_bridge_drvdata *drvdata,
 				void *buf)
@@ -808,9 +823,11 @@
 			spin_unlock_bh(&drvdata->lock);
 			if (drvdata->usb_ch && drvdata->usb_ch->priv_usb)
 				usb_qdss_close(drvdata->usb_ch);
+			do {
+				msleep(20);
+			} while (qdss_check_entry(drvdata));
 		}
 		mhi_ch_close(drvdata);
-
 	} else
 		spin_unlock_bh(&drvdata->lock);
 
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
index 3aaab71..331d67f 100644
--- a/drivers/soc/qcom/qmi_encdec.c
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2019 The Linux Foundation. All rights reserved.
  * Copyright (C) 2017 Linaro Ltd.
  */
 #include <linux/slab.h>
@@ -534,8 +534,8 @@
 		decoded_bytes += rc;
 	}
 
-	if (string_len > temp_ei->elem_len) {
-		pr_err("%s: String len %d > Max Len %d\n",
+	if (string_len >= temp_ei->elem_len) {
+		pr_err("%s: String len %d >= Max Len %d\n",
 		       __func__, string_len, temp_ei->elem_len);
 		return -ETOOSMALL;
 	} else if (string_len > tlv_len) {
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 4400f51..a391dae 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -347,6 +347,13 @@
 
 	ret = wait_for_completion_timeout(&txn->completion, timeout);
 
+	mutex_lock(&txn->lock);
+	if (txn->result == -ENETRESET) {
+		mutex_unlock(&txn->lock);
+		return txn->result;
+	}
+	mutex_unlock(&txn->lock);
+
 	mutex_lock(&qmi->txn_lock);
 	mutex_lock(&txn->lock);
 	idr_remove(&qmi->txns, txn->id);
@@ -446,17 +453,18 @@
 	if (IS_ERR(sock))
 		return;
 
-	mutex_lock(&qmi->sock_lock);
-	sock_release(qmi->sock);
-	qmi->sock = NULL;
-	mutex_unlock(&qmi->sock_lock);
-
 	qmi_recv_del_server(qmi, -1, -1);
 
 	if (qmi->ops.net_reset)
 		qmi->ops.net_reset(qmi);
 
 	mutex_lock(&qmi->sock_lock);
+	/* Already qmi_handle_release() started */
+	if (!qmi->sock) {
+		sock_release(sock);
+		return;
+	}
+	sock_release(qmi->sock);
 	qmi->sock = sock;
 	qmi->sq = sq;
 	mutex_unlock(&qmi->sock_lock);
@@ -570,16 +578,21 @@
 
 static void qmi_data_ready(struct sock *sk)
 {
-	struct qmi_handle *qmi = sk->sk_user_data;
+	struct qmi_handle *qmi = NULL;
 
 	/*
 	 * This will be NULL if we receive data while being in
 	 * qmi_handle_release()
 	 */
-	if (!qmi)
+	read_lock_bh(&sk->sk_callback_lock);
+	qmi = sk->sk_user_data;
+	if (!qmi) {
+		read_unlock_bh(&sk->sk_callback_lock);
 		return;
+	}
 
 	queue_work(qmi->wq, &qmi->work);
+	read_unlock_bh(&sk->sk_callback_lock);
 }
 
 static struct socket *qmi_sock_create(struct qmi_handle *qmi,
@@ -602,6 +615,7 @@
 	sock->sk->sk_user_data = qmi;
 	sock->sk->sk_data_ready = qmi_data_ready;
 	sock->sk->sk_error_report = qmi_data_ready;
+	sock->sk->sk_sndtimeo = HZ * 10;
 
 	return sock;
 }
@@ -682,21 +696,35 @@
  */
 void qmi_handle_release(struct qmi_handle *qmi)
 {
-	struct socket *sock = qmi->sock;
+	struct socket *sock;
 	struct qmi_service *svc, *tmp;
-
-	sock->sk->sk_user_data = NULL;
-	cancel_work_sync(&qmi->work);
-
-	qmi_recv_del_server(qmi, -1, -1);
+	struct qmi_txn *txn;
+	int txn_id;
 
 	mutex_lock(&qmi->sock_lock);
+	sock = qmi->sock;
+	write_lock_bh(&sock->sk->sk_callback_lock);
+	sock->sk->sk_user_data = NULL;
+	write_unlock_bh(&sock->sk->sk_callback_lock);
 	sock_release(sock);
 	qmi->sock = NULL;
 	mutex_unlock(&qmi->sock_lock);
 
+	cancel_work_sync(&qmi->work);
+
+	qmi_recv_del_server(qmi, -1, -1);
+
 	destroy_workqueue(qmi->wq);
 
+	mutex_lock(&qmi->txn_lock);
+	idr_for_each_entry(&qmi->txns, txn, txn_id) {
+		mutex_lock(&txn->lock);
+		idr_remove(&qmi->txns, txn->id);
+		txn->result = -ENETRESET;
+		complete(&txn->completion);
+		mutex_unlock(&txn->lock);
+	}
+	mutex_unlock(&qmi->txn_lock);
 	idr_destroy(&qmi->txns);
 
 	kfree(qmi->recv_buf);
@@ -761,7 +789,7 @@
 	if (qmi->sock) {
 		ret = kernel_sendmsg(qmi->sock, &msghdr, &iv, 1, len);
 		if (ret < 0)
-			pr_err("failed to send QMI message\n");
+			pr_info("failed to send QMI message %d\n", ret);
 	} else {
 		ret = -EPIPE;
 	}
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index b8379f1..b8585d1 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -26,6 +26,7 @@
 #include <linux/soc/qcom/smem.h>
 #include <linux/soc/qcom/smem_state.h>
 #include <linux/spinlock.h>
+#include <linux/pm_wakeup.h>
 
 #include <linux/ipc_logging.h>
 
@@ -160,6 +161,7 @@
 	struct regmap *ipc_regmap;
 	int ipc_offset;
 	int ipc_bit;
+	struct wakeup_source ws;
 
 	struct mbox_client mbox_client;
 	struct mbox_chan *mbox_chan;
@@ -297,6 +299,14 @@
 	}
 }
 
+static irqreturn_t qcom_smp2p_isr(int irq, void *data)
+{
+	struct qcom_smp2p *smp2p = data;
+
+	__pm_stay_awake(&smp2p->ws);
+	return IRQ_WAKE_THREAD;
+}
+
 /**
  * qcom_smp2p_intr() - interrupt handler for incoming notifications
  * @irq:	unused
@@ -321,7 +331,7 @@
 		if (IS_ERR(in)) {
 			dev_err(smp2p->dev,
 				"Unable to acquire remote smp2p item\n");
-			return IRQ_HANDLED;
+			goto out;
 		}
 
 		smp2p->in = in;
@@ -340,6 +350,8 @@
 			qcom_smp2p_do_ssr_ack(smp2p);
 	}
 
+out:
+	__pm_relax(&smp2p->ws);
 	return IRQ_HANDLED;
 }
 
@@ -636,12 +648,13 @@
 			list_add(&entry->node, &smp2p->outbound);
 		}
 	}
+	wakeup_source_init(&smp2p->ws, "smp2p");
 
 	/* Kick the outgoing edge after allocating entries */
 	qcom_smp2p_kick(smp2p);
 
 	ret = devm_request_threaded_irq(&pdev->dev, smp2p->irq,
-					NULL, qcom_smp2p_intr,
+					qcom_smp2p_isr, qcom_smp2p_intr,
 					IRQF_NO_SUSPEND | IRQF_ONESHOT,
 					"smp2p", (void *)smp2p);
 	if (ret) {
diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c
new file mode 100644
index 0000000..f090415
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_sleepstate.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/suspend.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/pm_wakeup.h>
+
+#define PROC_AWAKE_ID 12 /* 12th bit */
+#define AWAKE_BIT BIT(PROC_AWAKE_ID)
+static struct qcom_smem_state *state;
+static struct wakeup_source notify_ws;
+
+/**
+ * sleepstate_pm_notifier() - PM notifier callback function.
+ * @nb:		Pointer to the notifier block.
+ * @event:	Suspend state event from PM module.
+ * @unused:	Null pointer from PM module.
+ *
+ * This function is register as callback function to get notifications
+ * from the PM module on the system suspend state.
+ */
+static int sleepstate_pm_notifier(struct notifier_block *nb,
+				  unsigned long event, void *unused)
+{
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		qcom_smem_state_update_bits(state, AWAKE_BIT, 0);
+		break;
+
+	case PM_POST_SUSPEND:
+		qcom_smem_state_update_bits(state, AWAKE_BIT, AWAKE_BIT);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block sleepstate_pm_nb = {
+	.notifier_call = sleepstate_pm_notifier,
+	.priority = INT_MAX,
+};
+
+static irqreturn_t smp2p_sleepstate_handler(int irq, void *ctxt)
+{
+	__pm_wakeup_event(&notify_ws, 200);
+	return IRQ_HANDLED;
+}
+
+static int smp2p_sleepstate_probe(struct platform_device *pdev)
+{
+	int ret;
+	int irq;
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+
+	state = qcom_smem_state_get(&pdev->dev, 0, &ret);
+	if (IS_ERR(state))
+		return PTR_ERR(state);
+	qcom_smem_state_update_bits(state, AWAKE_BIT, AWAKE_BIT);
+
+	ret = register_pm_notifier(&sleepstate_pm_nb);
+	if (ret) {
+		dev_err(dev, "%s: power state notif error %d\n", __func__, ret);
+		return ret;
+	}
+	wakeup_source_init(&notify_ws, "smp2p-sleepstate");
+
+	irq = of_irq_get_byname(node, "smp2p-sleepstate-in");
+	if (irq <= 0) {
+		dev_err(dev, "failed to get irq for smp2p_sleep_state\n");
+		ret = -EPROBE_DEFER;
+		goto err;
+	}
+	dev_dbg(dev, "got smp2p-sleepstate-in irq %d\n", irq);
+	ret = devm_request_threaded_irq(dev, irq, NULL,
+					smp2p_sleepstate_handler,
+					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+					"smp2p_sleepstate", dev);
+	if (ret) {
+		dev_err(dev, "fail to register smp2p threaded_irq=%d\n", irq);
+		goto err;
+	}
+	return 0;
+err:
+	wakeup_source_trash(&notify_ws);
+	unregister_pm_notifier(&sleepstate_pm_nb);
+	return ret;
+}
+
+static const struct of_device_id smp2p_slst_match_table[] = {
+	{.compatible = "qcom,smp2p-sleepstate"},
+	{},
+};
+
+static struct platform_driver smp2p_sleepstate_driver = {
+	.probe = smp2p_sleepstate_probe,
+	.driver = {
+		.name = "smp2p_sleepstate",
+		.of_match_table = smp2p_slst_match_table,
+	},
+};
+
+static int __init smp2p_sleepstate_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&smp2p_sleepstate_driver);
+	if (ret) {
+		pr_err("%s: register failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+module_init(smp2p_sleepstate_init);
+MODULE_DESCRIPTION("SMP2P SLEEP STATE");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spmi/spmi-pmic-arb-debug.c b/drivers/spmi/spmi-pmic-arb-debug.c
index 83ef9ed..31f72ea 100644
--- a/drivers/spmi/spmi-pmic-arb-debug.c
+++ b/drivers/spmi/spmi-pmic-arb-debug.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. */
 
 #include <linux/delay.h>
 #include <linux/err.h>
@@ -328,17 +328,7 @@
 	},
 };
 
-int __init spmi_pmic_arb_debug_init(void)
-{
-	return platform_driver_register(&spmi_pmic_arb_debug_driver);
-}
-arch_initcall(spmi_pmic_arb_debug_init);
-
-static void __exit spmi_pmic_arb_debug_exit(void)
-{
-	platform_driver_unregister(&spmi_pmic_arb_debug_driver);
-}
-module_exit(spmi_pmic_arb_debug_exit);
+module_platform_driver(spmi_pmic_arb_debug_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:spmi_pmic_arb_debug");
diff --git a/drivers/thermal/qcom/qmi_sensors.c b/drivers/thermal/qcom/qmi_sensors.c
index 25e9850..98259a8 100644
--- a/drivers/thermal/qcom/qmi_sensors.c
+++ b/drivers/thermal/qcom/qmi_sensors.c
@@ -32,6 +32,7 @@
 enum qmi_ts_sensor {
 	QMI_TS_PA,
 	QMI_TS_PA_1,
+	QMI_TS_PA_2,
 	QMI_TS_QFE_PA_0,
 	QMI_TS_QFE_WTR_0,
 	QMI_TS_MODEM_MODEM,
@@ -75,6 +76,7 @@
 static char sensor_clients[QMI_TS_MAX_NR][QMI_CLIENT_NAME_LENGTH] = {
 	{"pa"},
 	{"pa_1"},
+	{"pa_2"},
 	{"qfe_pa0"},
 	{"qfe_wtr0"},
 	{"modem_tsens"},
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 7f78d26..a72efa0 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -634,4 +634,12 @@
 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
 				 struct drm_dp_mst_port *port, bool power_up);
 
+int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+			   struct drm_dp_mst_port *port,
+			   int offset, int size, u8 *bytes);
+
+int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+				 struct drm_dp_mst_port *port,
+				 int offset, int size, u8 *bytes);
+
 #endif
diff --git a/include/dt-bindings/clock/qcom,npucc-lito.h b/include/dt-bindings/clock/qcom,npucc-lito.h
index 8097f95..f405d84 100644
--- a/include/dt-bindings/clock/qcom,npucc-lito.h
+++ b/include/dt-bindings/clock/qcom,npucc-lito.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
 
 #ifndef _DT_BINDINGS_CLK_QCOM_NPU_CC_LITO_H
 #define _DT_BINDINGS_CLK_QCOM_NPU_CC_LITO_H
@@ -8,51 +8,46 @@
 #define NPU_CC_PLL0_OUT_EVEN					1
 #define NPU_CC_PLL1						2
 #define NPU_CC_PLL1_OUT_EVEN					3
-#define NPU_Q6SS_PLL						4
-#define NPU_CC_CAL_HM0_CLK					5
-#define NPU_CC_CAL_HM0_CLK_SRC					6
-#define NPU_CC_CAL_HM0_DPM_IP_CLK				7
-#define NPU_CC_CAL_HM0_PERF_CNT_CLK				8
-#define NPU_CC_CAL_HM1_CDC_CLK					9
-#define NPU_CC_CAL_HM1_CLK					10
-#define NPU_CC_CAL_HM1_CLK_SRC					11
-#define NPU_CC_CAL_HM1_DPM_IP_CLK				12
-#define NPU_CC_CAL_HM1_PERF_CNT_CLK				13
-#define NPU_CC_CORE_CLK						14
-#define NPU_CC_CORE_CLK_SRC					15
-#define NPU_CC_DL_DPM_CLK					16
-#define NPU_CC_DL_LLM_CLK					17
-#define NPU_CC_DPM_CLK						18
-#define NPU_CC_DPM_TEMP_CLK					19
-#define NPU_CC_DPM_XO_CLK					20
-#define NPU_CC_DSP_AHBM_CLK					21
-#define NPU_CC_DSP_AHBS_CLK					22
-#define NPU_CC_DSP_AXI_CLK					23
-#define NPU_CC_DSP_BWMON_AHB_CLK				24
-#define NPU_CC_DSP_BWMON_CLK					25
-#define NPU_CC_ISENSE_CLK					26
-#define NPU_CC_LLM_CLK						27
-#define NPU_CC_LLM_CURR_CLK					28
-#define NPU_CC_LLM_TEMP_CLK					29
-#define NPU_CC_LLM_XO_CLK					30
-#define NPU_CC_LMH_CLK_SRC					31
-#define NPU_CC_NOC_AHB_CLK					32
-#define NPU_CC_NOC_AXI_CLK					33
-#define NPU_CC_NOC_DMA_CLK					34
-#define NPU_CC_AON_CLK						35
-#define NPU_CC_ATB_CLK						36
-#define NPU_CC_BTO_CORE_CLK					37
-#define NPU_CC_BWMON_CLK					38
-#define NPU_CC_RSC_XO_CLK					39
-#define NPU_CC_S2P_CLK						40
-#define NPU_CC_XO_CLK						41
-#define NPU_CC_XO_CLK_SRC					42
-#define NPU_DSP_CORE_CLK_SRC					43
-#define NPU_CC_CAL_HM0_CDC_CLK					44
+#define NPU_CC_CAL_HM0_CLK					4
+#define NPU_CC_CAL_HM0_CLK_SRC					5
+#define NPU_CC_CAL_HM0_DPM_IP_CLK				6
+#define NPU_CC_CAL_HM0_PERF_CNT_CLK				7
+#define NPU_CC_CAL_HM0_CDC_CLK					8
+#define NPU_CC_CORE_CLK						9
+#define NPU_CC_CORE_CLK_SRC					10
+#define NPU_CC_DL_DPM_CLK					11
+#define NPU_CC_DL_LLM_CLK					12
+#define NPU_CC_DPM_CLK						13
+#define NPU_CC_DPM_TEMP_CLK					14
+#define NPU_CC_DPM_XO_CLK					15
+#define NPU_CC_DSP_AHBM_CLK					16
+#define NPU_CC_DSP_AHBS_CLK					17
+#define NPU_CC_DSP_AXI_CLK					18
+#define NPU_CC_DSP_BWMON_AHB_CLK				19
+#define NPU_CC_DSP_BWMON_CLK					20
+#define NPU_CC_ISENSE_CLK					21
+#define NPU_CC_LLM_CLK						22
+#define NPU_CC_LLM_CURR_CLK					23
+#define NPU_CC_LLM_TEMP_CLK					24
+#define NPU_CC_LLM_XO_CLK					25
+#define NPU_CC_LMH_CLK_SRC					26
+#define NPU_CC_NOC_AHB_CLK					27
+#define NPU_CC_NOC_AXI_CLK					28
+#define NPU_CC_NOC_DMA_CLK					29
+#define NPU_CC_BTO_CORE_CLK					30
+#define NPU_CC_BWMON_CLK					31
+#define NPU_CC_RSC_XO_CLK					32
+#define NPU_CC_S2P_CLK						33
+#define NPU_CC_XO_CLK						34
+#define NPU_CC_XO_CLK_SRC					35
+#define NPU_DSP_CORE_CLK_SRC					36
+#define NPU_Q6SS_PLL						37
 
 #define NPU_CC_CAL_HM0_BCR					0
 #define NPU_CC_CAL_HM1_BCR					1
 #define NPU_CC_CORE_BCR						2
 #define NPU_CC_DSP_BCR						3
-
+#define NPU_CC_DPM_TEMP_CLK_ARES				4
+#define NPU_CC_LLM_TEMP_CLK_ARES				5
+#define NPU_CC_LLM_CURR_CLK_ARES				6
 #endif
diff --git a/include/linux/hdcp_qseecom.h b/include/linux/hdcp_qseecom.h
index 96b24a1..34ffb9f 100644
--- a/include/linux/hdcp_qseecom.h
+++ b/include/linux/hdcp_qseecom.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __HDCP_QSEECOM_H
@@ -11,6 +11,7 @@
 
 enum hdcp2_app_cmd {
 	HDCP2_CMD_START,
+	HDCP2_CMD_START_AUTH,
 	HDCP2_CMD_STOP,
 	HDCP2_CMD_PROCESS_MSG,
 	HDCP2_CMD_TIMEOUT,
@@ -35,6 +36,8 @@
 	switch (cmd) {
 	case HDCP2_CMD_START:
 		return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START);
+	case HDCP2_CMD_START_AUTH:
+		return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START_AUTH);
 	case HDCP2_CMD_STOP:
 		return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_STOP);
 	case HDCP2_CMD_PROCESS_MSG:
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 7006008..af892c2 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -4,6 +4,7 @@
 #include <linux/jump_label.h>
 #include <linux/psi_types.h>
 #include <linux/sched.h>
+#include <linux/poll.h>
 
 struct seq_file;
 struct css_set;
@@ -26,6 +27,13 @@
 int psi_cgroup_alloc(struct cgroup *cgrp);
 void psi_cgroup_free(struct cgroup *cgrp);
 void cgroup_move_task(struct task_struct *p, struct css_set *to);
+
+struct psi_trigger *psi_trigger_create(struct psi_group *group,
+			char *buf, size_t nbytes, enum psi_res res);
+void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t);
+
+__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
+			poll_table *wait);
 #endif
 
 #else /* CONFIG_PSI */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 4d1c1f6..07aaf9b 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -1,8 +1,11 @@
 #ifndef _LINUX_PSI_TYPES_H
 #define _LINUX_PSI_TYPES_H
 
+#include <linux/kthread.h>
 #include <linux/seqlock.h>
 #include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/wait.h>
 
 #ifdef CONFIG_PSI
 
@@ -44,6 +47,12 @@
 	NR_PSI_STATES = 6,
 };
 
+enum psi_aggregators {
+	PSI_AVGS = 0,
+	PSI_POLL,
+	NR_PSI_AGGREGATORS,
+};
+
 struct psi_group_cpu {
 	/* 1st cacheline updated by the scheduler */
 
@@ -65,7 +74,55 @@
 	/* 2nd cacheline updated by the aggregator */
 
 	/* Delta detection against the sampling buckets */
-	u32 times_prev[NR_PSI_STATES] ____cacheline_aligned_in_smp;
+	u32 times_prev[NR_PSI_AGGREGATORS][NR_PSI_STATES]
+			____cacheline_aligned_in_smp;
+};
+
+/* PSI growth tracking window */
+struct psi_window {
+	/* Window size in ns */
+	u64 size;
+
+	/* Start time of the current window in ns */
+	u64 start_time;
+
+	/* Value at the start of the window */
+	u64 start_value;
+
+	/* Value growth in the previous window */
+	u64 prev_growth;
+};
+
+struct psi_trigger {
+	/* PSI state being monitored by the trigger */
+	enum psi_states state;
+
+	/* User-spacified threshold in ns */
+	u64 threshold;
+
+	/* List node inside triggers list */
+	struct list_head node;
+
+	/* Backpointer needed during trigger destruction */
+	struct psi_group *group;
+
+	/* Wait queue for polling */
+	wait_queue_head_t event_wait;
+
+	/* Pending event flag */
+	int event;
+
+	/* Tracking window */
+	struct psi_window win;
+
+	/*
+	 * Time last event was generated. Used for rate-limiting
+	 * events to one per window
+	 */
+	u64 last_event_time;
+
+	/* Refcounting to prevent premature destruction */
+	struct kref refcount;
 };
 
 struct psi_group {
@@ -79,11 +136,32 @@
 	u64 avg_total[NR_PSI_STATES - 1];
 	u64 avg_last_update;
 	u64 avg_next_update;
+
+	/* Aggregator work control */
 	struct delayed_work avgs_work;
 
 	/* Total stall times and sampled pressure averages */
-	u64 total[NR_PSI_STATES - 1];
+	u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1];
 	unsigned long avg[NR_PSI_STATES - 1][3];
+
+	/* Monitor work control */
+	atomic_t poll_scheduled;
+	struct kthread_worker __rcu *poll_kworker;
+	struct kthread_delayed_work poll_work;
+
+	/* Protects data used by the monitor */
+	struct mutex trigger_lock;
+
+	/* Configured polling triggers */
+	struct list_head triggers;
+	u32 nr_triggers[NR_PSI_STATES - 1];
+	u32 poll_states;
+	u64 poll_min_period;
+
+	/* Total stall times at the start of monitor activation */
+	u64 polling_total[NR_PSI_STATES - 1];
+	u64 polling_next_update;
+	u64 polling_until;
 };
 
 #else /* CONFIG_PSI */
diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h
index 9b9448e..f773d96 100644
--- a/include/uapi/linux/ipa_qmi_service_v01.h
+++ b/include/uapi/linux/ipa_qmi_service_v01.h
@@ -104,11 +104,16 @@
 	QMI_IPA_PLATFORM_TYPE_MSM_WINDOWS_V01 = 4,
 	/*  Platform identifier -	MSM device with Windows HLOS */
 	QMI_IPA_PLATFORM_TYPE_MSM_QNX_V01 = 5,
+	/* Platform identifier - MDM device with LE HLOS, MHI data router */
+	QMI_IPA_PLATFORM_TYPE_LE_MHI_V01 = 6,
 	/*  Platform identifier -	MSM device with QNX HLOS */
 	IPA_PLATFORM_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
 	/* To force a 32 bit signed enum.  Do not change or use */
 };
 
+#define QMI_IPA_PLATFORM_TYPE_LE_MHI_V01 \
+			QMI_IPA_PLATFORM_TYPE_LE_MHI_V01
+
 struct ipa_hdr_tbl_info_type_v01 {
 	uint32_t modem_offset_start;
 	/*	Offset from the start of IPA Shared memory from which
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index d3c7b7a..772c9aa 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -618,6 +618,7 @@
 enum ipa_wlan_fw_ssr_event {
 	WLAN_FWR_SSR_BEFORE_SHUTDOWN = BRIDGE_VLAN_MAPPING_MAX,
 	IPA_WLAN_FW_SSR_EVENT_MAX,
+#define IPA_WLAN_FW_SSR_EVENT_MAX IPA_WLAN_FW_SSR_EVENT_MAX
 };
 
 enum ipa_gsb_event {
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 4d631cb..eba5cab 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -3430,7 +3430,65 @@
 {
 	return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_CPU);
 }
-#endif
+
+static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
+					  size_t nbytes, enum psi_res res)
+{
+	struct psi_trigger *new;
+	struct cgroup *cgrp;
+
+	cgrp = cgroup_kn_lock_live(of->kn, false);
+	if (!cgrp)
+		return -ENODEV;
+
+	cgroup_get(cgrp);
+	cgroup_kn_unlock(of->kn);
+
+	new = psi_trigger_create(&cgrp->psi, buf, nbytes, res);
+	if (IS_ERR(new)) {
+		cgroup_put(cgrp);
+		return PTR_ERR(new);
+	}
+
+	psi_trigger_replace(&of->priv, new);
+
+	cgroup_put(cgrp);
+
+	return nbytes;
+}
+
+static ssize_t cgroup_io_pressure_write(struct kernfs_open_file *of,
+					  char *buf, size_t nbytes,
+					  loff_t off)
+{
+	return cgroup_pressure_write(of, buf, nbytes, PSI_IO);
+}
+
+static ssize_t cgroup_memory_pressure_write(struct kernfs_open_file *of,
+					  char *buf, size_t nbytes,
+					  loff_t off)
+{
+	return cgroup_pressure_write(of, buf, nbytes, PSI_MEM);
+}
+
+static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of,
+					  char *buf, size_t nbytes,
+					  loff_t off)
+{
+	return cgroup_pressure_write(of, buf, nbytes, PSI_CPU);
+}
+
+static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
+					  poll_table *pt)
+{
+	return psi_trigger_poll(&of->priv, of->file, pt);
+}
+
+static void cgroup_pressure_release(struct kernfs_open_file *of)
+{
+	psi_trigger_replace(&of->priv, NULL);
+}
+#endif /* CONFIG_PSI */
 
 static int cgroup_file_open(struct kernfs_open_file *of)
 {
@@ -4584,18 +4642,27 @@
 		.name = "io.pressure",
 		.flags = CFTYPE_NOT_ON_ROOT,
 		.seq_show = cgroup_io_pressure_show,
+		.write = cgroup_io_pressure_write,
+		.poll = cgroup_pressure_poll,
+		.release = cgroup_pressure_release,
 	},
 	{
 		.name = "memory.pressure",
 		.flags = CFTYPE_NOT_ON_ROOT,
 		.seq_show = cgroup_memory_pressure_show,
+		.write = cgroup_memory_pressure_write,
+		.poll = cgroup_pressure_poll,
+		.release = cgroup_pressure_release,
 	},
 	{
 		.name = "cpu.pressure",
 		.flags = CFTYPE_NOT_ON_ROOT,
 		.seq_show = cgroup_cpu_pressure_show,
+		.write = cgroup_cpu_pressure_write,
+		.poll = cgroup_pressure_poll,
+		.release = cgroup_pressure_release,
 	},
-#endif
+#endif /* CONFIG_PSI */
 	{ }	/* terminate */
 };
 
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index a2b3d9d..e521950 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1376,6 +1376,10 @@
 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
 {
 	data = data->parent_data;
+
+	if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
+		return 0;
+
 	if (data->chip->irq_set_wake)
 		return data->chip->irq_set_wake(data, on);
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 465a1a7..2fb2a69 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7194,6 +7194,24 @@
 	return min(util, capacity_orig_of(cpu));
 }
 
+#ifdef CONFIG_SCHED_WALT
+static inline unsigned long
+cpu_util_next_walt(int cpu, struct task_struct *p, int dst_cpu)
+{
+	unsigned long util =
+			cpu_rq(cpu)->walt_stats.cumulative_runnable_avg_scaled;
+	bool queued = task_on_rq_queued(p);
+
+	if (unlikely(queued && task_cpu(p) == cpu && dst_cpu != cpu))
+		util = max_t(long, util - task_util(p), 0);
+	else if (task_cpu(p) != cpu && dst_cpu == cpu &&
+						p->state == TASK_WAKING)
+		util += task_util(p);
+
+	return min_t(unsigned long, util, capacity_orig_of(cpu));
+}
+#endif
+
 /*
  * compute_energy(): Estimates the energy that would be consumed if @p was
  * migrated to @dst_cpu. compute_energy() predicts what will be the utilization
@@ -7220,9 +7238,13 @@
 		 * by compute_energy().
 		 */
 		for_each_cpu_and(cpu, perf_domain_span(pd), cpu_online_mask) {
+#ifdef CONFIG_SCHED_WALT
+			util = cpu_util_next_walt(cpu, p, dst_cpu);
+#else
 			util = cpu_util_next(cpu, p, dst_cpu);
 			util += cpu_util_rt(cpu_rq(cpu));
 			util = schedutil_energy_util(cpu, util);
+#endif
 			max_util = max(util, max_util);
 			sum_util += util;
 		}
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 1b99eef..e88918e 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -4,6 +4,9 @@
  * Copyright (c) 2018 Facebook, Inc.
  * Author: Johannes Weiner <hannes@cmpxchg.org>
  *
+ * Polling support by Suren Baghdasaryan <surenb@google.com>
+ * Copyright (c) 2018 Google, Inc.
+ *
  * When CPU, memory and IO are contended, tasks experience delays that
  * reduce throughput and introduce latencies into the workload. Memory
  * and IO contention, in addition, can cause a full loss of forward
@@ -129,9 +132,13 @@
 #include <linux/seq_file.h>
 #include <linux/proc_fs.h>
 #include <linux/seqlock.h>
+#include <linux/uaccess.h>
 #include <linux/cgroup.h>
 #include <linux/module.h>
 #include <linux/sched.h>
+#include <linux/ctype.h>
+#include <linux/file.h>
+#include <linux/poll.h>
 #include <linux/psi.h>
 #include "sched.h"
 
@@ -156,6 +163,11 @@
 #define EXP_60s		1981		/* 1/exp(2s/60s) */
 #define EXP_300s	2034		/* 1/exp(2s/300s) */
 
+/* PSI trigger definitions */
+#define WINDOW_MIN_US 500000	/* Min window size is 500ms */
+#define WINDOW_MAX_US 10000000	/* Max window size is 10s */
+#define UPDATES_PER_WINDOW 10	/* 10 updates per window */
+
 /* Sampling frequency in nanoseconds */
 static u64 psi_period __read_mostly;
 
@@ -176,6 +188,17 @@
 	group->avg_next_update = sched_clock() + psi_period;
 	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
 	mutex_init(&group->avgs_lock);
+	/* Init trigger-related members */
+	atomic_set(&group->poll_scheduled, 0);
+	mutex_init(&group->trigger_lock);
+	INIT_LIST_HEAD(&group->triggers);
+	memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
+	group->poll_states = 0;
+	group->poll_min_period = U32_MAX;
+	memset(group->polling_total, 0, sizeof(group->polling_total));
+	group->polling_next_update = ULLONG_MAX;
+	group->polling_until = 0;
+	rcu_assign_pointer(group->poll_kworker, NULL);
 }
 
 void __init psi_init(void)
@@ -210,7 +233,8 @@
 	}
 }
 
-static void get_recent_times(struct psi_group *group, int cpu, u32 *times,
+static void get_recent_times(struct psi_group *group, int cpu,
+			     enum psi_aggregators aggregator, u32 *times,
 			     u32 *pchanged_states)
 {
 	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
@@ -245,8 +269,8 @@
 		if (state_mask & (1 << s))
 			times[s] += now - state_start;
 
-		delta = times[s] - groupc->times_prev[s];
-		groupc->times_prev[s] = times[s];
+		delta = times[s] - groupc->times_prev[aggregator][s];
+		groupc->times_prev[aggregator][s] = times[s];
 
 		times[s] = delta;
 		if (delta)
@@ -274,7 +298,9 @@
 	avg[2] = calc_load(avg[2], EXP_300s, pct);
 }
 
-static void collect_percpu_times(struct psi_group *group, u32 *pchanged_states)
+static void collect_percpu_times(struct psi_group *group,
+				 enum psi_aggregators aggregator,
+				 u32 *pchanged_states)
 {
 	u64 deltas[NR_PSI_STATES - 1] = { 0, };
 	unsigned long nonidle_total = 0;
@@ -295,7 +321,7 @@
 		u32 nonidle;
 		u32 cpu_changed_states;
 
-		get_recent_times(group, cpu, times,
+		get_recent_times(group, cpu, aggregator, times,
 				&cpu_changed_states);
 		changed_states |= cpu_changed_states;
 
@@ -320,7 +346,8 @@
 
 	/* total= */
 	for (s = 0; s < NR_PSI_STATES - 1; s++)
-		group->total[s] += div_u64(deltas[s], max(nonidle_total, 1UL));
+		group->total[aggregator][s] +=
+				div_u64(deltas[s], max(nonidle_total, 1UL));
 
 	if (pchanged_states)
 		*pchanged_states = changed_states;
@@ -352,7 +379,7 @@
 	for (s = 0; s < NR_PSI_STATES - 1; s++) {
 		u32 sample;
 
-		sample = group->total[s] - group->avg_total[s];
+		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
 		/*
 		 * Due to the lockless sampling of the time buckets,
 		 * recorded time deltas can slip into the next period,
@@ -394,7 +421,7 @@
 
 	now = sched_clock();
 
-	collect_percpu_times(group, &changed_states);
+	collect_percpu_times(group, PSI_AVGS, &changed_states);
 	nonidle = changed_states & (1 << PSI_NONIDLE);
 	/*
 	 * If there is task activity, periodically fold the per-cpu
@@ -414,6 +441,187 @@
 	mutex_unlock(&group->avgs_lock);
 }
 
+/* Trigger tracking window manupulations */
+static void window_reset(struct psi_window *win, u64 now, u64 value,
+			 u64 prev_growth)
+{
+	win->start_time = now;
+	win->start_value = value;
+	win->prev_growth = prev_growth;
+}
+
+/*
+ * PSI growth tracking window update and growth calculation routine.
+ *
+ * This approximates a sliding tracking window by interpolating
+ * partially elapsed windows using historical growth data from the
+ * previous intervals. This minimizes memory requirements (by not storing
+ * all the intermediate values in the previous window) and simplifies
+ * the calculations. It works well because PSI signal changes only in
+ * positive direction and over relatively small window sizes the growth
+ * is close to linear.
+ */
+static u64 window_update(struct psi_window *win, u64 now, u64 value)
+{
+	u64 elapsed;
+	u64 growth;
+
+	elapsed = now - win->start_time;
+	growth = value - win->start_value;
+	/*
+	 * After each tracking window passes win->start_value and
+	 * win->start_time get reset and win->prev_growth stores
+	 * the average per-window growth of the previous window.
+	 * win->prev_growth is then used to interpolate additional
+	 * growth from the previous window assuming it was linear.
+	 */
+	if (elapsed > win->size)
+		window_reset(win, now, value, growth);
+	else {
+		u32 remaining;
+
+		remaining = win->size - elapsed;
+		growth += div_u64(win->prev_growth * remaining, win->size);
+	}
+
+	return growth;
+}
+
+static void init_triggers(struct psi_group *group, u64 now)
+{
+	struct psi_trigger *t;
+
+	list_for_each_entry(t, &group->triggers, node)
+		window_reset(&t->win, now,
+				group->total[PSI_POLL][t->state], 0);
+	memcpy(group->polling_total, group->total[PSI_POLL],
+		   sizeof(group->polling_total));
+	group->polling_next_update = now + group->poll_min_period;
+}
+
+static u64 update_triggers(struct psi_group *group, u64 now)
+{
+	struct psi_trigger *t;
+	bool new_stall = false;
+	u64 *total = group->total[PSI_POLL];
+
+	/*
+	 * On subsequent updates, calculate growth deltas and let
+	 * watchers know when their specified thresholds are exceeded.
+	 */
+	list_for_each_entry(t, &group->triggers, node) {
+		u64 growth;
+
+		/* Check for stall activity */
+		if (group->polling_total[t->state] == total[t->state])
+			continue;
+
+		/*
+		 * Multiple triggers might be looking at the same state,
+		 * remember to update group->polling_total[] once we've
+		 * been through all of them. Also remember to extend the
+		 * polling time if we see new stall activity.
+		 */
+		new_stall = true;
+
+		/* Calculate growth since last update */
+		growth = window_update(&t->win, now, total[t->state]);
+		if (growth < t->threshold)
+			continue;
+
+		/* Limit event signaling to once per window */
+		if (now < t->last_event_time + t->win.size)
+			continue;
+
+		/* Generate an event */
+		if (cmpxchg(&t->event, 0, 1) == 0)
+			wake_up_interruptible(&t->event_wait);
+		t->last_event_time = now;
+	}
+
+	if (new_stall)
+		memcpy(group->polling_total, total,
+				sizeof(group->polling_total));
+
+	return now + group->poll_min_period;
+}
+
+/*
+ * Schedule polling if it's not already scheduled. It's safe to call even from
+ * hotpath because even though kthread_queue_delayed_work takes worker->lock
+ * spinlock that spinlock is never contended due to poll_scheduled atomic
+ * preventing such competition.
+ */
+static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
+{
+	struct kthread_worker *kworker;
+
+	/* Do not reschedule if already scheduled */
+	if (atomic_cmpxchg(&group->poll_scheduled, 0, 1) != 0)
+		return;
+
+	rcu_read_lock();
+
+	kworker = rcu_dereference(group->poll_kworker);
+	/*
+	 * kworker might be NULL in case psi_trigger_destroy races with
+	 * psi_task_change (hotpath) which can't use locks
+	 */
+	if (likely(kworker))
+		kthread_queue_delayed_work(kworker, &group->poll_work, delay);
+	else
+		atomic_set(&group->poll_scheduled, 0);
+
+	rcu_read_unlock();
+}
+
+static void psi_poll_work(struct kthread_work *work)
+{
+	struct kthread_delayed_work *dwork;
+	struct psi_group *group;
+	u32 changed_states;
+	u64 now;
+
+	dwork = container_of(work, struct kthread_delayed_work, work);
+	group = container_of(dwork, struct psi_group, poll_work);
+
+	atomic_set(&group->poll_scheduled, 0);
+
+	mutex_lock(&group->trigger_lock);
+
+	now = sched_clock();
+
+	collect_percpu_times(group, PSI_POLL, &changed_states);
+
+	if (changed_states & group->poll_states) {
+		/* Initialize trigger windows when entering polling mode */
+		if (now > group->polling_until)
+			init_triggers(group, now);
+
+		/*
+		 * Keep the monitor active for at least the duration of the
+		 * minimum tracking window as long as monitor states are
+		 * changing.
+		 */
+		group->polling_until = now +
+			group->poll_min_period * UPDATES_PER_WINDOW;
+	}
+
+	if (now > group->polling_until) {
+		group->polling_next_update = ULLONG_MAX;
+		goto out;
+	}
+
+	if (now >= group->polling_next_update)
+		group->polling_next_update = update_triggers(group, now);
+
+	psi_schedule_poll_work(group,
+		nsecs_to_jiffies(group->polling_next_update - now) + 1);
+
+out:
+	mutex_unlock(&group->trigger_lock);
+}
+
 static void record_times(struct psi_group_cpu *groupc, int cpu,
 			 bool memstall_tick)
 {
@@ -460,8 +668,8 @@
 		groupc->times[PSI_NONIDLE] += delta;
 }
 
-static void psi_group_change(struct psi_group *group, int cpu,
-			     unsigned int clear, unsigned int set)
+static u32 psi_group_change(struct psi_group *group, int cpu,
+			    unsigned int clear, unsigned int set)
 {
 	struct psi_group_cpu *groupc;
 	unsigned int t, m;
@@ -507,6 +715,8 @@
 	groupc->state_mask = state_mask;
 
 	write_seqcount_end(&groupc->seq);
+
+	return state_mask;
 }
 
 static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
@@ -567,7 +777,11 @@
 		wake_clock = false;
 
 	while ((group = iterate_groups(task, &iter))) {
-		psi_group_change(group, cpu, clear, set);
+		u32 state_mask = psi_group_change(group, cpu, clear, set);
+
+		if (state_mask & group->poll_states)
+			psi_schedule_poll_work(group, 1);
+
 		if (wake_clock && !delayed_work_pending(&group->avgs_work))
 			schedule_delayed_work(&group->avgs_work, PSI_FREQ);
 	}
@@ -668,6 +882,8 @@
 
 	cancel_delayed_work_sync(&cgroup->psi.avgs_work);
 	free_percpu(cgroup->psi.pcpu);
+	/* All triggers must be removed by now */
+	WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
 }
 
 /**
@@ -731,7 +947,7 @@
 	/* Update averages before reporting them */
 	mutex_lock(&group->avgs_lock);
 	now = sched_clock();
-	collect_percpu_times(group, NULL);
+	collect_percpu_times(group, PSI_AVGS, NULL);
 	if (now >= group->avg_next_update)
 		group->avg_next_update = update_averages(group, now);
 	mutex_unlock(&group->avgs_lock);
@@ -743,7 +959,8 @@
 
 		for (w = 0; w < 3; w++)
 			avg[w] = group->avg[res * 2 + full][w];
-		total = div_u64(group->total[res * 2 + full], NSEC_PER_USEC);
+		total = div_u64(group->total[PSI_AVGS][res * 2 + full],
+				NSEC_PER_USEC);
 
 		seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
 			   full ? "full" : "some",
@@ -786,25 +1003,270 @@
 	return single_open(file, psi_cpu_show, NULL);
 }
 
+struct psi_trigger *psi_trigger_create(struct psi_group *group,
+			char *buf, size_t nbytes, enum psi_res res)
+{
+	struct psi_trigger *t;
+	enum psi_states state;
+	u32 threshold_us;
+	u32 window_us;
+
+	if (static_branch_likely(&psi_disabled))
+		return ERR_PTR(-EOPNOTSUPP);
+
+	if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
+		state = PSI_IO_SOME + res * 2;
+	else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
+		state = PSI_IO_FULL + res * 2;
+	else
+		return ERR_PTR(-EINVAL);
+
+	if (state >= PSI_NONIDLE)
+		return ERR_PTR(-EINVAL);
+
+	if (window_us < WINDOW_MIN_US ||
+		window_us > WINDOW_MAX_US)
+		return ERR_PTR(-EINVAL);
+
+	/* Check threshold */
+	if (threshold_us == 0 || threshold_us > window_us)
+		return ERR_PTR(-EINVAL);
+
+	t = kmalloc(sizeof(*t), GFP_KERNEL);
+	if (!t)
+		return ERR_PTR(-ENOMEM);
+
+	t->group = group;
+	t->state = state;
+	t->threshold = threshold_us * NSEC_PER_USEC;
+	t->win.size = window_us * NSEC_PER_USEC;
+	window_reset(&t->win, 0, 0, 0);
+
+	t->event = 0;
+	t->last_event_time = 0;
+	init_waitqueue_head(&t->event_wait);
+	kref_init(&t->refcount);
+
+	mutex_lock(&group->trigger_lock);
+
+	if (!rcu_access_pointer(group->poll_kworker)) {
+		struct sched_param param = {
+			.sched_priority = MAX_RT_PRIO - 1,
+		};
+		struct kthread_worker *kworker;
+
+		kworker = kthread_create_worker(0, "psimon");
+		if (IS_ERR(kworker)) {
+			kfree(t);
+			mutex_unlock(&group->trigger_lock);
+			return ERR_CAST(kworker);
+		}
+		sched_setscheduler(kworker->task, SCHED_FIFO, &param);
+		kthread_init_delayed_work(&group->poll_work,
+				psi_poll_work);
+		rcu_assign_pointer(group->poll_kworker, kworker);
+	}
+
+	list_add(&t->node, &group->triggers);
+	group->poll_min_period = min(group->poll_min_period,
+		div_u64(t->win.size, UPDATES_PER_WINDOW));
+	group->nr_triggers[t->state]++;
+	group->poll_states |= (1 << t->state);
+
+	mutex_unlock(&group->trigger_lock);
+
+	return t;
+}
+
+static void psi_trigger_destroy(struct kref *ref)
+{
+	struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
+	struct psi_group *group = t->group;
+	struct kthread_worker *kworker_to_destroy = NULL;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	/*
+	 * Wakeup waiters to stop polling. Can happen if cgroup is deleted
+	 * from under a polling process.
+	 */
+	wake_up_interruptible(&t->event_wait);
+
+	mutex_lock(&group->trigger_lock);
+
+	if (!list_empty(&t->node)) {
+		struct psi_trigger *tmp;
+		u64 period = ULLONG_MAX;
+
+		list_del(&t->node);
+		group->nr_triggers[t->state]--;
+		if (!group->nr_triggers[t->state])
+			group->poll_states &= ~(1 << t->state);
+		/* reset min update period for the remaining triggers */
+		list_for_each_entry(tmp, &group->triggers, node)
+			period = min(period, div_u64(tmp->win.size,
+					UPDATES_PER_WINDOW));
+		group->poll_min_period = period;
+		/* Destroy poll_kworker when the last trigger is destroyed */
+		if (group->poll_states == 0) {
+			group->polling_until = 0;
+			kworker_to_destroy = rcu_dereference_protected(
+					group->poll_kworker,
+					lockdep_is_held(&group->trigger_lock));
+			rcu_assign_pointer(group->poll_kworker, NULL);
+		}
+	}
+
+	mutex_unlock(&group->trigger_lock);
+
+	/*
+	 * Wait for both *trigger_ptr from psi_trigger_replace and
+	 * poll_kworker RCUs to complete their read-side critical sections
+	 * before destroying the trigger and optionally the poll_kworker
+	 */
+	synchronize_rcu();
+	/*
+	 * Destroy the kworker after releasing trigger_lock to prevent a
+	 * deadlock while waiting for psi_poll_work to acquire trigger_lock
+	 */
+	if (kworker_to_destroy) {
+		kthread_cancel_delayed_work_sync(&group->poll_work);
+		kthread_destroy_worker(kworker_to_destroy);
+	}
+	kfree(t);
+}
+
+void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new)
+{
+	struct psi_trigger *old = *trigger_ptr;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	rcu_assign_pointer(*trigger_ptr, new);
+	if (old)
+		kref_put(&old->refcount, psi_trigger_destroy);
+}
+
+__poll_t psi_trigger_poll(void **trigger_ptr,
+				struct file *file, poll_table *wait)
+{
+	__poll_t ret = DEFAULT_POLLMASK;
+	struct psi_trigger *t;
+
+	if (static_branch_likely(&psi_disabled))
+		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
+
+	rcu_read_lock();
+
+	t = rcu_dereference(*(void __rcu __force **)trigger_ptr);
+	if (!t) {
+		rcu_read_unlock();
+		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
+	}
+	kref_get(&t->refcount);
+
+	rcu_read_unlock();
+
+	poll_wait(file, &t->event_wait, wait);
+
+	if (cmpxchg(&t->event, 1, 0) == 1)
+		ret |= EPOLLPRI;
+
+	kref_put(&t->refcount, psi_trigger_destroy);
+
+	return ret;
+}
+
+static ssize_t psi_write(struct file *file, const char __user *user_buf,
+			 size_t nbytes, enum psi_res res)
+{
+	char buf[32];
+	size_t buf_size;
+	struct seq_file *seq;
+	struct psi_trigger *new;
+
+	if (static_branch_likely(&psi_disabled))
+		return -EOPNOTSUPP;
+
+	buf_size = min(nbytes, (sizeof(buf) - 1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size - 1] = '\0';
+
+	new = psi_trigger_create(&psi_system, buf, nbytes, res);
+	if (IS_ERR(new))
+		return PTR_ERR(new);
+
+	seq = file->private_data;
+	/* Take seq->lock to protect seq->private from concurrent writes */
+	mutex_lock(&seq->lock);
+	psi_trigger_replace(&seq->private, new);
+	mutex_unlock(&seq->lock);
+
+	return nbytes;
+}
+
+static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
+			    size_t nbytes, loff_t *ppos)
+{
+	return psi_write(file, user_buf, nbytes, PSI_IO);
+}
+
+static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
+				size_t nbytes, loff_t *ppos)
+{
+	return psi_write(file, user_buf, nbytes, PSI_MEM);
+}
+
+static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
+			     size_t nbytes, loff_t *ppos)
+{
+	return psi_write(file, user_buf, nbytes, PSI_CPU);
+}
+
+static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
+{
+	struct seq_file *seq = file->private_data;
+
+	return psi_trigger_poll(&seq->private, file, wait);
+}
+
+static int psi_fop_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+
+	psi_trigger_replace(&seq->private, NULL);
+	return single_release(inode, file);
+}
+
 static const struct file_operations psi_io_fops = {
 	.open           = psi_io_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
-	.release        = single_release,
+	.write          = psi_io_write,
+	.poll           = psi_fop_poll,
+	.release        = psi_fop_release,
 };
 
 static const struct file_operations psi_memory_fops = {
 	.open           = psi_memory_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
-	.release        = single_release,
+	.write          = psi_memory_write,
+	.poll           = psi_fop_poll,
+	.release        = psi_fop_release,
 };
 
 static const struct file_operations psi_cpu_fops = {
 	.open           = psi_cpu_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
-	.release        = single_release,
+	.write          = psi_cpu_write,
+	.poll           = psi_fop_poll,
+	.release        = psi_fop_release,
 };
 
 static int __init psi_proc_init(void)
diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig
index ee794d5..8b9d3c2 100644
--- a/net/qrtr/Kconfig
+++ b/net/qrtr/Kconfig
@@ -47,4 +47,12 @@
 	  transport provides bulk endpoints to facilitate sending and receiving
 	  IPC Router data packets.
 
+config QRTR_FIFO
+	tristate "FIFO IPC Router channels"
+	help
+	Say Y here to support FIFO based ipcrouter channels. FIFO Transport
+	Layer enables IPC Router communication between two virtual machines.
+	The shared memory between virtual machines will be allocated by the
+	hypervisor and signal other VMs through virtualized interrupts.
+
 endif # QRTR
diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile
index d3c3a19..cae5493 100644
--- a/net/qrtr/Makefile
+++ b/net/qrtr/Makefile
@@ -9,3 +9,6 @@
 
 obj-$(CONFIG_QRTR_USB) += qrtr-usb.o
 qrtr-usb-y      := usb.o
+
+obj-$(CONFIG_QRTR_FIFO) += qrtr-fifo.o
+qrtr-fifo-y	:= fifo.o
diff --git a/net/qrtr/fifo.c b/net/qrtr/fifo.c
new file mode 100644
index 0000000..0a494a6
--- /dev/null
+++ b/net/qrtr/fifo.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <microvisor/microvisor.h>
+
+#include "qrtr.h"
+
+#define FIFO_MAGIC_KEY	0x24495043 /* "$IPC" */
+#define FIFO_SIZE	0x4000
+#define FIFO_0_START	0x1000
+#define FIFO_1_START	(FIFO_0_START + FIFO_SIZE)
+#define FIFO_MAGIC_IDX	0x0
+#define TAIL_0_IDX	0x1
+#define HEAD_0_IDX	0x2
+#define TAIL_1_IDX	0x3
+#define HEAD_1_IDX	0x4
+
+struct fifo_pipe {
+	__le32 *tail;
+	__le32 *head;
+
+	void *fifo;
+	size_t length;
+};
+
+/**
+ * qrtr_fifo_xprt - qrtr FIFO transport structure
+ * @ep: qrtr endpoint specific info.
+ * @tx_pipe: TX FIFO specific info.
+ * @rx_pipe: RX FIFO specific info.
+ * @fifo_base: Base of the shared FIFO.
+ * @fifo_size: FIFO Size.
+ * @tx_fifo_idx: TX FIFO index.
+ * @kcap: Register info to raise irq to other VM.
+ */
+struct qrtr_fifo_xprt {
+	struct qrtr_endpoint ep;
+	struct fifo_pipe tx_pipe;
+	struct fifo_pipe rx_pipe;
+	void *fifo_base;
+	size_t fifo_size;
+	int tx_fifo_idx;
+	okl4_kcap_t kcap;
+};
+
+static void qrtr_fifo_raise_virq(struct qrtr_fifo_xprt *xprtp);
+
+static size_t fifo_rx_avail(struct fifo_pipe *pipe)
+{
+	u32 head;
+	u32 tail;
+
+	head = le32_to_cpu(*pipe->head);
+	tail = le32_to_cpu(*pipe->tail);
+
+	if (head < tail)
+		return pipe->length - tail + head;
+
+	return head - tail;
+}
+
+static void fifo_rx_peak(struct fifo_pipe *pipe,
+			 void *data, unsigned int offset, size_t count)
+{
+	size_t len;
+	u32 tail;
+
+	tail = le32_to_cpu(*pipe->tail);
+	tail += offset;
+	if (tail >= pipe->length)
+		tail -= pipe->length;
+
+	len = min_t(size_t, count, pipe->length - tail);
+	if (len)
+		memcpy_fromio(data, pipe->fifo + tail, len);
+
+	if (len != count)
+		memcpy_fromio(data + len, pipe->fifo, (count - len));
+}
+
+static void fifo_rx_advance(struct fifo_pipe *pipe, size_t count)
+{
+	u32 tail;
+
+	tail = le32_to_cpu(*pipe->tail);
+
+	tail += count;
+	if (tail > pipe->length)
+		tail -= pipe->length;
+
+	*pipe->tail = cpu_to_le32(tail);
+}
+
+static size_t fifo_tx_avail(struct fifo_pipe *pipe)
+{
+	u32 head;
+	u32 tail;
+	u32 avail;
+
+	head = le32_to_cpu(*pipe->head);
+	tail = le32_to_cpu(*pipe->tail);
+
+	if (tail <= head)
+		avail = pipe->length - head + tail;
+	else
+		avail = tail - head;
+
+	return avail;
+}
+
+static void fifo_tx_write(struct fifo_pipe *pipe,
+			  const void *data, size_t count)
+{
+	size_t len;
+	u32 head;
+
+	head = le32_to_cpu(*pipe->head);
+
+	len = min_t(size_t, count, pipe->length - head);
+	if (len)
+		memcpy_toio(pipe->fifo + head, data, len);
+
+	if (len != count)
+		memcpy_toio(pipe->fifo, data + len, count - len);
+
+	head += count;
+	if (head >= pipe->length)
+		head -= pipe->length;
+
+	/* Ensure ordering of fifo and head update */
+	wmb();
+
+	*pipe->head = cpu_to_le32(head);
+}
+
+/* from qrtr to FIFO */
+static int xprt_write(struct qrtr_endpoint *ep, struct sk_buff *skb)
+{
+	struct qrtr_fifo_xprt *xprtp;
+	int rc;
+
+	xprtp = container_of(ep, struct qrtr_fifo_xprt, ep);
+
+	rc = skb_linearize(skb);
+	if (rc) {
+		kfree_skb(skb);
+		return rc;
+	}
+
+	if (fifo_tx_avail(&xprtp->tx_pipe) < skb->len) {
+		pr_err("No Space in FIFO\n");
+		return -EAGAIN;
+	}
+
+	fifo_tx_write(&xprtp->tx_pipe, skb->data, skb->len);
+	kfree_skb(skb);
+
+	qrtr_fifo_raise_virq(xprtp);
+
+	return 0;
+}
+
+static void xprt_read_data(struct qrtr_fifo_xprt *xprtp)
+{
+	int rc;
+	u32 hdr[8];
+	void *data;
+	size_t pkt_len;
+	size_t rx_avail;
+	size_t hdr_len = sizeof(hdr);
+
+	while (fifo_rx_avail(&xprtp->rx_pipe)) {
+		fifo_rx_peak(&xprtp->rx_pipe, &hdr, 0, hdr_len);
+		pkt_len = qrtr_peek_pkt_size((void *)&hdr);
+		if ((int)pkt_len < 0) {
+			pr_err("invalid pkt_len %zu\n", pkt_len);
+			break;
+		}
+
+		data = kzalloc(pkt_len, GFP_ATOMIC);
+		if (!data)
+			break;
+
+		rx_avail = fifo_rx_avail(&xprtp->rx_pipe);
+		if (rx_avail < pkt_len) {
+			pr_err_ratelimited("Not FULL pkt in FIFO %zu %zu\n",
+					   rx_avail, pkt_len);
+			break;
+		}
+
+		fifo_rx_peak(&xprtp->rx_pipe, data, 0, pkt_len);
+		fifo_rx_advance(&xprtp->rx_pipe, pkt_len);
+
+		rc = qrtr_endpoint_post(&xprtp->ep, data, pkt_len);
+		if (rc == -EINVAL)
+			pr_err("invalid ipcrouter packet\n");
+		kfree(data);
+		data = NULL;
+	}
+}
+
+static void qrtr_fifo_raise_virq(struct qrtr_fifo_xprt *xprtp)
+{
+	okl4_error_t err;
+	unsigned long payload = 0xffff;
+
+	err = _okl4_sys_vinterrupt_raise(xprtp->kcap, payload);
+}
+
+static irqreturn_t qrtr_fifo_virq_handler(int irq, void *dev_id)
+{
+	xprt_read_data((struct qrtr_fifo_xprt *)dev_id);
+	return IRQ_HANDLED;
+}
+
+/**
+ * qrtr_fifo_config_init() - init FIFO xprt configs
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the FIFO XPRT pointer with
+ * the FIFO XPRT configurations either from device tree or static arrays.
+ */
+static void qrtr_fifo_config_init(struct qrtr_fifo_xprt *xprtp)
+{
+	__le32 *descs;
+
+	descs = xprtp->fifo_base;
+	descs[FIFO_MAGIC_IDX] = FIFO_MAGIC_KEY;
+
+	if (xprtp->tx_fifo_idx) {
+		xprtp->tx_pipe.tail = &descs[TAIL_0_IDX];
+		xprtp->tx_pipe.head = &descs[HEAD_0_IDX];
+		xprtp->tx_pipe.fifo = xprtp->fifo_base + FIFO_0_START;
+		xprtp->tx_pipe.length = FIFO_SIZE;
+
+		xprtp->rx_pipe.tail = &descs[TAIL_1_IDX];
+		xprtp->rx_pipe.head = &descs[HEAD_1_IDX];
+		xprtp->rx_pipe.fifo = xprtp->fifo_base + FIFO_1_START;
+		xprtp->rx_pipe.length = FIFO_SIZE;
+	} else {
+		xprtp->tx_pipe.tail = &descs[TAIL_1_IDX];
+		xprtp->tx_pipe.head = &descs[HEAD_1_IDX];
+		xprtp->tx_pipe.fifo = xprtp->fifo_base + FIFO_1_START;
+		xprtp->tx_pipe.length = FIFO_SIZE;
+
+		xprtp->rx_pipe.tail = &descs[TAIL_0_IDX];
+		xprtp->rx_pipe.head = &descs[HEAD_0_IDX];
+		xprtp->rx_pipe.fifo = xprtp->fifo_base + FIFO_0_START;
+		xprtp->rx_pipe.length = FIFO_SIZE;
+	}
+
+	/* Reset respective index */
+	*xprtp->tx_pipe.head = 0;
+	*xprtp->rx_pipe.tail = 0;
+}
+
+/**
+ * qrtr_fifo_xprt_probe() - Probe an FIFO xprt
+ *
+ * @pdev: Platform device corresponding to FIFO xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an FIFO transport.
+ */
+static int qrtr_fifo_xprt_probe(struct platform_device *pdev)
+{
+	int irq;
+	int ret;
+	struct resource *r;
+	struct device *parent;
+	struct qrtr_fifo_xprt *xprtp;
+	struct device_node *ipc_irq_np;
+	struct device_node *ipc_shm_np;
+	struct platform_device *ipc_shm_dev;
+
+	xprtp = devm_kzalloc(&pdev->dev, sizeof(*xprtp), GFP_KERNEL);
+	if (!xprtp)
+		return -ENOMEM;
+
+	parent = &pdev->dev;
+	ipc_irq_np = parent->of_node;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return -ENODEV;
+
+	ret = devm_request_irq(parent, irq, qrtr_fifo_virq_handler,
+			       IRQF_TRIGGER_RISING, dev_name(parent),
+			       xprtp);
+	if (ret < 0)
+		return -ENODEV;
+
+	/* this kcap is required to raise VIRQ */
+	ret = of_property_read_u32(ipc_irq_np, "reg", &xprtp->kcap);
+	if (ret < 0)
+		return -ENODEV;
+
+	ipc_shm_np = of_parse_phandle(ipc_irq_np, "qcom,ipc-shm", 0);
+	if (!ipc_shm_np)
+		return -ENODEV;
+
+	ipc_shm_dev = of_find_device_by_node(ipc_shm_np);
+	if (!ipc_shm_dev) {
+		of_node_put(ipc_shm_np);
+		return -ENODEV;
+	}
+
+	r = platform_get_resource(ipc_shm_dev, IORESOURCE_MEM, 0);
+	if (!r) {
+		pr_err("failed to get shared FIFO\n");
+		of_node_put(ipc_shm_np);
+		return -ENODEV;
+	}
+
+	xprtp->tx_fifo_idx = of_property_read_bool(ipc_shm_np,
+						   "qcom,tx-is-first");
+	of_node_put(ipc_shm_np);
+
+	xprtp->fifo_size = resource_size(r);
+	xprtp->fifo_base = devm_ioremap_nocache(&pdev->dev, r->start,
+						resource_size(r));
+	if (!xprtp->fifo_base) {
+		pr_err("ioreamp_nocache() failed\n");
+		return -ENOMEM;
+	}
+	qrtr_fifo_config_init(xprtp);
+
+	xprtp->ep.xmit = xprt_write;
+	ret = qrtr_endpoint_register(&xprtp->ep, QRTR_EP_NID_AUTO);
+	if (ret)
+		return ret;
+
+	if (fifo_rx_avail(&xprtp->rx_pipe))
+		xprt_read_data(xprtp);
+
+	return 0;
+}
+
+static const struct of_device_id qrtr_fifo_xprt_match_table[] = {
+	{ .compatible = "qcom,ipcr-fifo-xprt" },
+	{},
+};
+
+static struct platform_driver qrtr_fifo_xprt_driver = {
+	.probe = qrtr_fifo_xprt_probe,
+	.driver = {
+		.name = "qcom_fifo_qrtr",
+		.of_match_table = qrtr_fifo_xprt_match_table,
+	 },
+};
+
+static int __init qrtr_fifo_xprt_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&qrtr_fifo_xprt_driver);
+	if (rc) {
+		pr_err("driver register failed %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+module_init(qrtr_fifo_xprt_init);
+MODULE_DESCRIPTION("QTI IPC-router FIFO XPRT");
+MODULE_LICENSE("GPL v2");
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 6c32eb9..5c3d455 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -530,14 +530,12 @@
 	hdr->type = cpu_to_le32(type);
 	hdr->src_node_id = cpu_to_le32(from->sq_node);
 	hdr->src_port_id = cpu_to_le32(from->sq_port);
-	if (to->sq_port == QRTR_PORT_CTRL) {
+	if (to->sq_node == QRTR_NODE_BCAST)
 		hdr->dst_node_id = cpu_to_le32(node->nid);
-		hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
-	} else {
+	else
 		hdr->dst_node_id = cpu_to_le32(to->sq_node);
-		hdr->dst_port_id = cpu_to_le32(to->sq_port);
-	}
 
+	hdr->dst_port_id = cpu_to_le32(to->sq_port);
 	hdr->size = cpu_to_le32(len);
 	hdr->confirm_rx = !!confirm_rx;
 
@@ -780,49 +778,44 @@
 static struct qrtr_sock *qrtr_port_lookup(int port);
 static void qrtr_port_put(struct qrtr_sock *ipc);
 
-static bool qrtr_must_forward(u32 src_nid, u32 dst_nid, u32 type)
+static bool qrtr_must_forward(struct qrtr_node *src,
+			      struct qrtr_node *dst, u32 type)
 {
-	struct qrtr_node *dst;
-	struct qrtr_node *src;
-	bool ret = false;
-
-	if (src_nid == qrtr_local_nid)
+	/* Node structure is not maintained for local processor.
+	 * Hence src is null in that case.
+	 */
+	if (!src)
 		return true;
 
-	if (type == QRTR_TYPE_HELLO || type == QRTR_TYPE_RESUME_TX)
-		return ret;
+	if (!dst)
+		return false;
 
-	dst = qrtr_node_lookup(dst_nid);
-	src = qrtr_node_lookup(src_nid);
-	if (!dst || !src)
-		goto out;
-	if (dst == src)
-		goto out;
-	if (dst->nid == QRTR_EP_NID_AUTO)
-		goto out;
+	if (type == QRTR_TYPE_HELLO || type == QRTR_TYPE_RESUME_TX)
+		return false;
+
+	if (dst == src || dst->nid == QRTR_EP_NID_AUTO)
+		return false;
 
 	if (abs(dst->net_id - src->net_id) > 1)
-		ret = true;
+		return true;
 
-out:
-	qrtr_node_release(dst);
-	qrtr_node_release(src);
-
-	return ret;
+	return false;
 }
 
 static void qrtr_fwd_ctrl_pkt(struct sk_buff *skb)
 {
 	struct qrtr_node *node;
+	struct qrtr_node *src;
 	struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
 
+	src = qrtr_node_lookup(cb->src_node);
 	down_read(&qrtr_node_lock);
 	list_for_each_entry(node, &qrtr_all_epts, item) {
 		struct sockaddr_qrtr from;
 		struct sockaddr_qrtr to;
 		struct sk_buff *skbn;
 
-		if (!qrtr_must_forward(cb->src_node, node->nid, cb->type))
+		if (!qrtr_must_forward(src, node, cb->type))
 			continue;
 
 		skbn = skb_clone(skb, GFP_KERNEL);
@@ -840,6 +833,7 @@
 		qrtr_node_enqueue(node, skbn, cb->type, &from, &to, 0);
 	}
 	up_read(&qrtr_node_lock);
+	qrtr_node_release(src);
 }
 
 static void qrtr_fwd_pkt(struct sk_buff *skb, struct qrtr_cb *cb)
@@ -992,7 +986,7 @@
 	struct sk_buff *skb;
 
 	list_for_each_entry(dst, &qrtr_all_epts, item) {
-		if (!qrtr_must_forward(nid, dst->nid, QRTR_TYPE_DEL_PROC))
+		if (!qrtr_must_forward(src, dst, QRTR_TYPE_DEL_PROC))
 			continue;
 
 		skb = qrtr_alloc_ctrl_packet(&pkt);
@@ -1199,7 +1193,8 @@
 
 		sock_hold(&ipc->sk);
 		ipc->sk.sk_err = ENETRESET;
-		ipc->sk.sk_error_report(&ipc->sk);
+		if (ipc->sk.sk_error_report)
+			ipc->sk.sk_error_report(&ipc->sk);
 		sock_put(&ipc->sk);
 	}
 }
@@ -1300,7 +1295,8 @@
 	if (sk && sk->sk_err == ENETRESET) {
 		sock_hold(sk);
 		sk->sk_err = ENETRESET;
-		sk->sk_error_report(sk);
+		if (sk->sk_error_report)
+			sk->sk_error_report(sk);
 		sock_put(sk);
 		kfree_skb(skb);
 		return 0;
@@ -1355,6 +1351,7 @@
 	struct sock *sk = sock->sk;
 	struct qrtr_ctrl_pkt *pkt;
 	struct qrtr_node *node;
+	struct qrtr_node *srv_node;
 	struct sk_buff *skb;
 	size_t plen;
 	u32 type = QRTR_TYPE_DATA;
@@ -1392,6 +1389,7 @@
 	}
 
 	node = NULL;
+	srv_node = NULL;
 	if (addr->sq_node == QRTR_NODE_BCAST) {
 		enqueue_fn = qrtr_bcast_enqueue;
 		if (addr->sq_port != QRTR_PORT_CTRL) {
@@ -1445,11 +1443,14 @@
 
 		/* drop new server cmds that are not forwardable to dst node*/
 		pkt = (struct qrtr_ctrl_pkt *)skb->data;
-		if (!qrtr_must_forward(pkt->server.node, addr->sq_node, type)) {
+		srv_node = qrtr_node_lookup(pkt->server.node);
+		if (!qrtr_must_forward(srv_node, node, type)) {
 			rc = 0;
 			kfree_skb(skb);
+			qrtr_node_release(srv_node);
 			goto out_node;
 		}
+		qrtr_node_release(srv_node);
 	}
 
 	rc = enqueue_fn(node, skb, type, &ipc->us, addr, msg->msg_flags);