Merge "ARM: dts: msm: Add hwevent node for Kona"
diff --git a/Documentation/ABI/testing/procfs-concurrent_time b/Documentation/ABI/testing/procfs-concurrent_time
new file mode 100644
index 0000000..55b4142
--- /dev/null
+++ b/Documentation/ABI/testing/procfs-concurrent_time
@@ -0,0 +1,16 @@
+What:		/proc/uid_concurrent_active_time
+Date:		December 2018
+Contact:	Connor O'Brien <connoro@google.com>
+Description:
+	The /proc/uid_concurrent_active_time file displays aggregated cputime
+	numbers for each uid, broken down by the total number of cores that were
+	active while the uid's task was running.
+
+What:		/proc/uid_concurrent_policy_time
+Date:		December 2018
+Contact:	Connor O'Brien <connoro@google.com>
+Description:
+	The /proc/uid_concurrent_policy_time file displays aggregated cputime
+	numbers for each uid, broken down based on the cpufreq policy
+	of the core used by the uid's task and the number of cores associated
+	with that policy that were active while the uid's task was running.
diff --git a/Documentation/accounting/psi.txt b/Documentation/accounting/psi.txt
index b8ca28b..4fb40fe 100644
--- a/Documentation/accounting/psi.txt
+++ b/Documentation/accounting/psi.txt
@@ -63,6 +63,110 @@
 which wouldn't necessarily make a dent in the time averages, or to
 average trends over custom time frames.
 
+Monitoring for pressure thresholds
+==================================
+
+Users can register triggers and use poll() to be woken up when resource
+pressure exceeds certain thresholds.
+
+A trigger describes the maximum cumulative stall time over a specific
+time window, e.g. 100ms of total stall time within any 500ms window to
+generate a wakeup event.
+
+To register a trigger user has to open psi interface file under
+/proc/pressure/ representing the resource to be monitored and write the
+desired threshold and time window. The open file descriptor should be
+used to wait for trigger events using select(), poll() or epoll().
+The following format is used:
+
+<some|full> <stall amount in us> <time window in us>
+
+For example writing "some 150000 1000000" into /proc/pressure/memory
+would add 150ms threshold for partial memory stall measured within
+1sec time window. Writing "full 50000 1000000" into /proc/pressure/io
+would add 50ms threshold for full io stall measured within 1sec time window.
+
+Triggers can be set on more than one psi metric and more than one trigger
+for the same psi metric can be specified. However for each trigger a separate
+file descriptor is required to be able to poll it separately from others,
+therefore for each trigger a separate open() syscall should be made even
+when opening the same psi interface file.
+
+Monitors activate only when system enters stall state for the monitored
+psi metric and deactivates upon exit from the stall state. While system is
+in the stall state psi signal growth is monitored at a rate of 10 times per
+tracking window.
+
+The kernel accepts window sizes ranging from 500ms to 10s, therefore min
+monitoring update interval is 50ms and max is 1s. Min limit is set to
+prevent overly frequent polling. Max limit is chosen as a high enough number
+after which monitors are most likely not needed and psi averages can be used
+instead.
+
+When activated, psi monitor stays active for at least the duration of one
+tracking window to avoid repeated activations/deactivations when system is
+bouncing in and out of the stall state.
+
+Notifications to the userspace are rate-limited to one per tracking window.
+
+The trigger will de-register when the file descriptor used to define the
+trigger  is closed.
+
+Userspace monitor usage example
+===============================
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <poll.h>
+#include <string.h>
+#include <unistd.h>
+
+/*
+ * Monitor memory partial stall with 1s tracking window size
+ * and 150ms threshold.
+ */
+int main() {
+	const char trig[] = "some 150000 1000000";
+	struct pollfd fds;
+	int n;
+
+	fds.fd = open("/proc/pressure/memory", O_RDWR | O_NONBLOCK);
+	if (fds.fd < 0) {
+		printf("/proc/pressure/memory open error: %s\n",
+			strerror(errno));
+		return 1;
+	}
+	fds.events = POLLPRI;
+
+	if (write(fds.fd, trig, strlen(trig) + 1) < 0) {
+		printf("/proc/pressure/memory write error: %s\n",
+			strerror(errno));
+		return 1;
+	}
+
+	printf("waiting for events...\n");
+	while (1) {
+		n = poll(&fds, 1, -1);
+		if (n < 0) {
+			printf("poll error: %s\n", strerror(errno));
+			return 1;
+		}
+		if (fds.revents & POLLERR) {
+			printf("got POLLERR, event source is gone\n");
+			return 0;
+		}
+		if (fds.revents & POLLPRI) {
+			printf("event triggered!\n");
+		} else {
+			printf("unknown event received: 0x%x\n", fds.revents);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
 Cgroup2 interface
 =================
 
@@ -71,3 +175,6 @@
 into cgroups. Each subdirectory in the cgroupfs mountpoint contains
 cpu.pressure, memory.pressure, and io.pressure files; the format is
 the same as the /proc/pressure/ files.
+
+Per-cgroup psi monitors can be specified and used the same way as
+system-wide ones.
diff --git a/Documentation/devicetree/bindings/arm/msm/proxy-client.txt b/Documentation/devicetree/bindings/arm/msm/proxy-client.txt
new file mode 100644
index 0000000..29cfaf9
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/proxy-client.txt
@@ -0,0 +1,34 @@
+Bus Proxy Client Bindings
+
+Bus proxy client provides means to cast proxy bandwidth votes during bootup
+which is removed at the end of boot. This feature can be used in situations
+where a shared resource can be scaled between several possible perfomance
+levels and hardware requires that it be at a high level at the beginning of
+boot before the client has probed and voted for required bandwidth.
+
+Required properties:
+- compatible:			Must be "qcom,bus-proxy-client".
+
+Optional properties:
+- qcom,msm-bus,name:		String representing the client-name.
+- qcom,msm-bus,num-cases:	Total number of usecases.
+- qcom,msm-bus,active-only:	Boolean context flag for requests in active or
+				dual (active & sleep) contex.
+- qcom,msm-bus,num-paths:	Total number of master-slave pairs.
+- qcom,msm-bus,vectors-KBps:	Arrays of unsigned integers representing:
+				master-id, slave-id, arbitrated bandwidth
+				in KBps, instantaneous bandwidth in KBps.
+
+Example:
+
+	qcom,proxy-client {
+		compatible = "qcom,bus-proxy-client";
+		qcom,msm-bus,name = "proxy_client";
+		qcom,msm-bus,num-cases = <3>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,vectors-KBps =
+			<22 512 0 0>, <23 512 0 0>,
+			<22 512 0 6400000>, <23 512 0 6400000>,
+			<22 512 0 6400000>, <23 512 0 6400000>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt
new file mode 100644
index 0000000..d82d521
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt
@@ -0,0 +1,19 @@
+Qualcomm Technologies, Inc. SMSM Point-to-Point (SMP2P) Sleepstate driver
+
+Required properties:
+-compatible : should be one of the following:
+- "qcom,smp2p-sleepstate"
+-qcom,smem-states : the relevant outgoing smp2p entry
+- interrupt-parent: specifies the phandle to the parent interrupt controller
+  this one is cascaded from
+- interrupts: specifies the interrupt number, the irq line to be used
+- interrupt-names: Interrupt name string, must be "smp2p-sleepstate-in"
+
+Example:
+qcom,smp2p_sleepstate {
+	compatible = "qcom,smp2p-sleepstate";
+	qcom,smem-states = <&sleepstate_smp2p_out 0>;
+	interrupt-parent = <&sleepstate_smp2p_in>;
+	interrupts = <0 0>;
+	interrupt-names = "smp2p-sleepstate-in";
+};
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index 23e4bd5..acc1915 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -9,11 +9,6 @@
 Required properties:
 
 - compatible: "qcom,wil6210"
-- qcom,smmu-support: Boolean flag indicating whether PCIe has SMMU support
-- qcom,smmu-s1-en: Boolean flag indicating whether SMMU stage1 should be enabled
-- qcom,smmu-fast-map: Boolean flag indicating whether SMMU fast mapping should be enabled
-- qcom,smmu-coherent: Boolean flag indicating SMMU dma and page table coherency
-- qcom,smmu-mapping: specifies the base address and size of SMMU space
 - qcom,pcie-parent: phandle for the PCIe root complex to which 11ad card is connected
 - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
   the below optional properties:
@@ -29,6 +24,7 @@
 - qcom,use-ext-supply: Boolean flag to indicate if 11ad SIP uses external power supply
 - vdd-supply: phandle to 11ad VDD regulator node
 - vddio-supply: phandle to 11ad VDDIO regulator node
+- vdd-ldo-supply: phandle to 11ad VDD LDO regulator node
 - qcom,use-ext-clocks: Boolean flag to indicate if 11ad SIP uses external clocks
 - clocks	    : List of phandle and clock specifier pairs
 - clock-names       : List of clock input name strings sorted in the same
@@ -39,11 +35,6 @@
 Example:
 	wil6210: qcom,wil6210 {
 		compatible = "qcom,wil6210";
-		qcom,smmu-support;
-		qcom,smmu-s1-en;
-		qcom,smmu-fast-map;
-		qcom,smmu-coherent;
-		qcom,smmu-mapping = <0x20000000 0xe0000000>;
 		qcom,pcie-parent = <&pcie1>;
 		qcom,wigig-en = <&tlmm 94 0>;
 		qcom,wigig-dc = <&tlmm 81 0>;
@@ -56,6 +47,7 @@
 		qcom,use-ext-supply;
 		vdd-supply= <&pm8998_s7>;
 		vddio-supply= <&pm8998_s5>;
+		vdd-ldo-supply = <&pm8150_l15>;
 		qcom,use-ext-clocks;
 		clocks = <&clock_gcc clk_rf_clk3>,
 			 <&clock_gcc clk_rf_clk3_pin>;
@@ -63,3 +55,32 @@
 		qcom,keep-radio-on-during-sleep;
 	};
 
+Wil6210 client node under PCIe RP node needed for SMMU initialization by
+PCI framework when devices are discovered.
+
+Required properties:
+
+- qcom,iommu-dma-addr-pool: specifies the base address and size of SMMU space
+- qcom,iommu-dma: define the SMMU mode - bypass/fastmap/disabled
+- qcom,iommu-pagetable: indicating SMMU dma and page table coherency
+
+Example:
+&pcie1_rp {
+	#address-cells = <5>;
+	#size-cells = <0>;
+
+	wil6210_pci: wil6210_pci {
+		reg = <0 0 0 0 0>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		qcom,iommu-group = <&wil6210_pci_iommu_group>;
+
+		wil6210_pci_iommu_group: wil6210_pci_iommu_group {
+				qcom,iommu-dma-addr-pool = <0x20000000 0xe0000000>;
+				qcom,iommu-dma = "fastmap";
+				qcom,iommu-pagetable = "coherent";
+		};
+       };
+};
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt b/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
index 4f91bba..3283ff0 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
@@ -9,6 +9,7 @@
 - compatible : Shall contain one of the following:
 		"qcom,kona-rpmh-clk",
 		"qcom,sdm845-rpmh-clk"
+		"qcom,lito-rpmh-clk"
 
 - #clock-cells : must contain 1
 
diff --git a/Documentation/devicetree/bindings/display/msm/sde-dp.txt b/Documentation/devicetree/bindings/display/msm/sde-dp.txt
index a17b738..7881230 100644
--- a/Documentation/devicetree/bindings/display/msm/sde-dp.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde-dp.txt
@@ -100,11 +100,14 @@
 - compatible:			Must be "qcom,msm-ext-disp"
 - qcom,dp-low-power-hw-hpd:	Low power hardware HPD feature enable control node
 - qcom,phy-version:		Phy version
+- qcom,pn-swap-lane-map:	P/N swap configuration of each lane
 - pinctrl-names:		List of names to assign mdss pin states defined in pinctrl device node
 				Refer to pinctrl-bindings.txt
 - pinctrl-<0..n>:		Lists phandles each pointing to the pin configuration node within a pin
 				controller. These pin configurations are installed in the pinctrl
 				device node. Refer to pinctrl-bindings.txt
+- qcom,max-lclk-frequency-khz:	An integer specifying the max. link clock in KHz supported by Display Port.
+- qcom,mst-fixed-topology-ports: u32 values of which MST output port to reserve, start from one
 
 [Optional child nodes]: These nodes are for devices which are
 dependent on msm_ext_disp. If msm_ext_disp is disabled then
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index 330f890..196d93b 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -38,11 +38,17 @@
 				above this powerlevel isense clock is at working frequency.
 
 Bus Scaling Data:
-- qcom,msm-bus,name: String property to describe the name of the 3D graphics processor.
-- qcom,msm-bus,num-cases: This is the the number of Bus Scaling use cases defined in the vectors property.
-- qcom,msm-bus,active-only: A boolean flag indicating if it is active only.
-- qcom,msm-bus,num-paths: This represents the number of paths in each Bus Scaling Usecase.
-- qcom,msm-bus,vectors-KBps: A series of 4 cell properties, format of which is:
+- qcom,gpu-bus-table:		Defines a bus voting table with the below properties. Multiple sets of bus
+				voting tables can be defined for given platform based on the type of ddr system.
+
+Properties:
+- compatible:			Must be "qcom,gpu-bus-table". Additionally, "qcom,gpu-bus-table-ddr" must also
+				be provided, with the ddr type value(integer) appended to the string.
+- qcom,msm-bus,name:		String property to describe the name of the 3D graphics processor.
+- qcom,msm-bus,num-cases:	This is the the number of Bus Scaling use cases defined in the vectors property.
+- qcom,msm-bus,active-only:	A boolean flag indicating if it is active only.
+- qcom,msm-bus,num-paths: 	This represents the number of paths in each Bus Scaling Usecase.
+- qcom,msm-bus,vectors-KBps:	A series of 4 cell properties, format of which is:
 					<src dst ab ib>, <src dst ab ib>, // For Bus Scaling Usecase 1
 					<src dst ab ib>, <src dst ab ib>, // For Bus Scaling Usecase 2
 					<..  ..  .. ..>, <..  ..  .. ..>; // For Bus Scaling Usecase n
@@ -171,8 +177,7 @@
 				certain protected registers and also pass to the user as
 				a property.
 - qcom,l2pc-cpu-mask:
-				Disables L2PC on masked CPUs when any of Graphics
-				rendering thread is running on masked CPUs.
+				Disables L2PC on masked CPUto the string.rendering thread is running on masked CPUs.
 				Bit 0 is for CPU-0, bit 1 is for CPU-1...
 
 - qcom,l2pc-update-queue:
@@ -339,14 +344,17 @@
 				"mem_iface_clk", "alt_mem_iface_clk";
 
 		/* Bus Scale Settings */
-		qcom,msm-bus,name = "grp3d";
-		qcom,msm-bus,num-cases = <4>;
-		qcom,msm-bus,num-paths = <1>;
-		qcom,msm-bus,vectors-KBps =
-			<26 512 0 0>,
-			<26 512 0 1600000>,
-			<26 512 0 3200000>,
-			<26 512 0 4264000>;
+		qcom, gpu-bus-table {
+			compatible="qcom,gpu-bus-table","qcom,gpu-bus-table-ddr7";
+			qcom,msm-bus,name = "grp3d";
+			qcom,msm-bus,num-cases = <4>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+				<26 512 0 0>,
+				<26 512 0 1600000>,
+				<26 512 0 3200000>,
+				<26 512 0 4264000>;
+		};
 
 		/* GDSC oxili regulators */
 		vdd-supply = <&gdsc_oxili_gx>;
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index f31ced7..2f948e8 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -120,17 +120,15 @@
   boot/dts/include/dt-bindings/msm/msm-bus-ids.h for list of acceptable slaves
 
 Optional properties:
-- qcom,bus-governor : governor to use when scaling bus, generally any commonly
-  found devfreq governor might be used.  In addition to those governors, the
-  custom Venus governors, "msm-vidc-ddr" or "msm-vidc-llcc" are also
-  acceptable values.
-  In the absence of this property the "performance" governor is used.
-- qcom,bus-rage-kbps : an array of two items (<min max>) that indicate the
+- qcom,bus-range-kbps : an array of two items (<min max>) that indicate the
   minimum and maximum acceptable votes for the bus.
   In the absence of this property <0 INT_MAX> is used.
 - qcom,ubwc-10bit : UBWC 10 bit content has different bus requirements,
   this tag will be used to pick the appropriate bus as per the session profile
   as shown below in example.
+- qcom,mode : Type of BW calculations to use.
+		"performance" - Use highest valid BW vote.
+		"venus-ddr", "venus-llcc" - Calculate for DDR, LLCC path.
 
 Memory Heaps
 ============
diff --git a/Documentation/devicetree/bindings/net/qrtr-fifo-xprt.txt b/Documentation/devicetree/bindings/net/qrtr-fifo-xprt.txt
new file mode 100644
index 0000000..69debce
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qrtr-fifo-xprt.txt
@@ -0,0 +1,25 @@
+Qualcomm Technologies, Inc. IPC Router FIFO Transport
+
+Required properties:
+- compatible:	should be "qcom,ipcr-fifo-xprt"
+- reg:		the irq register to raise an interrupt
+- interrupts:	the receiving interrupt line
+- qcom,ipc-shm:	reference to shared memory phandle
+
+Example:
+
+	fifo_vipc_irq@176 {
+		compatible = "qcom,ipcr-fifo-xprt";
+		reg = <0x176>;
+		interrupts = <0x0 0x142 0x1>;
+		qcom,ipc-shm = <&ipc-shm>;
+	};
+
+	ipc-shm: shared-buffer@85af7000 {
+		compatible = "qcom,hypervisor-shared-memory";
+		phandle = <0x1e4>;
+		reg = <0x0 0x85af7000 0x0 0x9000>;
+		label = "ipc_shm";
+		qcom,tx-is-first;
+	};
+
diff --git a/Documentation/devicetree/bindings/pci/pci-msm.txt b/Documentation/devicetree/bindings/pci/pci-msm.txt
index e9d411a..362b19e 100644
--- a/Documentation/devicetree/bindings/pci/pci-msm.txt
+++ b/Documentation/devicetree/bindings/pci/pci-msm.txt
@@ -310,6 +310,11 @@
 	Value type: <u32>
 	Definition: Offset from PCIe PHY base to check if PCIe PHY status
 
+- qcom,phy-status-bit:
+	Usage: required
+	Value type: <u32>
+	Definition: BIT to check PCIe PHY status
+
 - qcom,phy-power-down-offset:
 	Usage: required
 	Value type: <u32>
@@ -468,6 +473,7 @@
 
 		qcom,pcie-phy-ver = <0x2101>; /* v2 version 1.01 */
 		qcom,phy-status-offset = <0x814>;
+		qcom,phy-status-bit = <6>;
 		qcom,phy-power-down-offset = <0x840>;
 		qcom,phy-sequence = <0x0840 0x03 0x0
 				0x0094 0x08 0x0
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 1ba6974..625e5d8 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -59,6 +59,8 @@
                                 a pipe reset via the IPA uC is required
 - qcom,ipa-wdi2:		Boolean context flag to indicate whether
 				using wdi-2.0 or not
+- qcom,ipa-wdi3-over-gsi:       Boolean context flag to indicate whether
+                                using wdi-3.0 or not
 - qcom,bandwidth-vote-for-ipa:	Boolean context flag to indicate whether
 				ipa clock voting is done by bandwidth
 				voting via msm-bus-scale driver or not
diff --git a/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt b/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
index 7da95f8..a42f491 100644
--- a/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
+++ b/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
@@ -13,6 +13,9 @@
 
 Optional properties:
 - qcom,iommu-s1-bypass:	Boolean flag to bypass IOMMU stage 1 translation.
+- qcom,msm-bus,num-paths: Number of paths to put vote for.
+- qcom,msm-bus,vectors-bus-ids: Master and slave Endpoint IDs for DDR
+				and Corex/2x paths.
 
 Optional subnodes:
 qcom,iommu_qupv3_geni_se_cb:	Child node representing the QUPV3 context
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt
index ac8fec3..e37bbb7 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt
@@ -435,6 +435,30 @@
 		    resolution of monotonic SOC under CAPACITY_RAW property
 		    during charging in the scale of 0-10000.
 
+- qcom,soc-scale-mode-en
+	Usage:      optional
+	Value type: <boolean>
+	Definition: A boolean property that when specified will enable scaling
+		    of the SOC linearly, based on the filtered battery voltage
+		    after crossing below a Vbatt threshold.
+
+- qcom,soc-scale-vbatt-mv
+	Usage:      optional
+	Value type: <u32>
+	Definition: Threshold voltage to decide when SOC should
+		    be scaled based on filtered voltage when
+		    qcom,soc-scale-mode-en is specified. If this
+		    is not specified, then the default value is 3400.
+		    Unit is in mV.
+
+- qcom,soc-scale-time-ms
+	Usage:      optional
+	Value type: <u32>
+	Definition: Timer value for doing SOC calculation based on
+		    filtered voltage when qcom,soc-scale-mode-en is
+		    specified. If this is not specified, then the
+		    default value is 10000. Unit is in ms.
+
 ==========================================================
 Second Level Nodes - Peripherals managed by FG Gen4 driver
 ==========================================================
diff --git a/Documentation/devicetree/bindings/qbt_handler/qbt_handler.txt b/Documentation/devicetree/bindings/qbt_handler/qbt_handler.txt
new file mode 100644
index 0000000..168aa24
--- /dev/null
+++ b/Documentation/devicetree/bindings/qbt_handler/qbt_handler.txt
@@ -0,0 +1,35 @@
+Qualcomm Technologies, Inc. QBT_HANDLER Specific Bindings
+
+QBT is a fingerprint sensor ASIC capable of performing fingerprint image scans
+and detecting finger presence on the sensor using programmable firmware.
+
+=======================
+Required Node Structure
+=======================
+
+- compatible
+  Usage:      required
+  Value type: <string>
+  Definition: "qcom,qbt-handler".
+
+- qcom,ipc-gpio
+  Usage:      required
+  Value type: <phandle>
+  Definition: phandle for GPIO to be used for IPC.
+
+- qcom,finger-detect-gpio
+  Usage:      required
+  Value type: <phandle>
+  Definition: phandle for GPIO to be used for finger detect.
+
+=======
+Example
+=======
+
+qcom,qbt_handler {
+                compatible = "qcom,qbt-handler";
+                qcom,ipc-gpio = <&tlmm 23 0>;
+                pinctrl-names = "default";
+                pinctrl-0 = <&key_home_default>;
+                qcom,finger-detect-gpio = <&pm8150_gpios 1 0>;
+};
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index bb5f74f..db50c09 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -8,6 +8,7 @@
 	soundwire core registers.
  - clock-names : clock names defined for WSA macro
  - clocks : clock handles defined for WSA  macro
+ - qcom,default-clk-id: Default clk ID used for WSA macro
  - qcom,wsa-swr-gpios: phandle for SWR data and clock GPIOs of WSA macro
  - qcom,wsa-bcl-pmic-params: u8 array of PMIC ID, SID and PPID in same order
 			 required to be configured to receive interrupts
@@ -24,6 +25,7 @@
 		<&clock_audio_wsa_2 0>;
 		qcom,wsa-swr-gpios = &wsa_swr_gpios;
 		qcom,wsa-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
+		qcom,default-clk-id = <TX_CORE_CLK>;
 		swr_0: wsa_swr_master {
 			compatible = "qcom,swr-mstr";
 			wsa881x_1: wsa881x@20170212 {
@@ -43,6 +45,7 @@
 	soundwire core registers.
  - clock-names : clock names defined for VA macro
  - clocks : clock handles defined for VA macro
+ - qcom,default-clk-id: Default clk ID used for VA macro
  - va-vdd-micb-supply: phandle of mic bias supply's regulator device tree node
  - qcom,va-vdd-micb-voltage: mic bias supply's voltage level min and max in mV
  - qcom,va-vdd-micb-current: mic bias supply's max current in mA
@@ -61,6 +64,7 @@
 		reg = <0x0C490000 0x0>;
 		clock-names = "va_core_clk";
 		clocks = <&clock_audio_va 0>;
+		qcom,default-clk-id = <TX_CORE_CLK>;
 		va-vdd-micb-supply = <&S4A>;
 		qcom,va-vdd-micb-voltage = <1800000 1800000>;
 		qcom,va-vdd-micb-current = <11200>;
@@ -78,6 +82,7 @@
 	soundwire core registers.
  - clock-names : clock names defined for RX macro
  - clocks : clock handles defined for RX macro
+ - qcom,default-clk-id: Default clk ID used for RX macro
  - qcom,rx-swr-gpios: phandle for SWR data and clock GPIOs of RX macro
  - qcom,rx_mclk_mode_muxsel: register address for RX macro MCLK mode mux select
  - qcom,rx-bcl-pmic-params: u8 array of PMIC ID, SID and PPID in same order
@@ -96,6 +101,7 @@
 		qcom,rx-swr-gpios = <&rx_swr_gpios>;
 		qcom,rx_mclk_mode_muxsel = <0x62C25020>;
 		qcom,rx-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
+		qcom,default-clk-id = <TX_CORE_CLK>;
 		swr_1: rx_swr_master {
 			compatible = "qcom,swr-mstr";
 			wcd938x_rx_slave: wcd938x-rx-slave {
@@ -220,3 +226,35 @@
 	qcom,cdc-on-demand-supplies = "cdc-vdd-buck",
 				      "cdc-vdd-mic-bias";
 };
+
+Bolero Clock Resource Manager
+
+Required Properties:
+ - compatible = "qcom,bolero-clk-rsc-mngr";
+ - qcom,fs-gen-sequence: Register sequence for fs clock generation
+ - clock-names : clock names defined for WSA macro
+ - clocks : clock handles defined for WSA  macro
+
+Optional Properties:
+ - qcom,rx_mclk_mode_muxsel: register address for RX macro MCLK mode mux select
+ - qcom,wsa_mclk_mode_muxsel: register address for WSA macro MCLK mux select
+ - qcom,va_mclk_mode_muxsel: register address for VA macro MCLK mode mux select
+
+Example:
+&bolero {
+	bolero-clock-rsc-manager {
+		compatible = "qcom,bolero-clk-rsc-mngr";
+		qcom,fs-gen-sequence = <0x3000 0x1>,
+				<0x3004 0x1>, <0x3080 0x2>;
+		qcom,rx_mclk_mode_muxsel = <0x033240D8>;
+		qcom,wsa_mclk_mode_muxsel = <0x033220D8>;
+		qcom,va_mclk_mode_muxsel = <0x033A0000>;
+		clock-names = "tx_core_clk", "tx_npl_clk", "rx_core_clk",
+			"rx_npl_clk", "wsa_core_clk", "wsa_npl_clk",
+			"va_core_clk", "va_npl_clk";
+		clocks = <&clock_audio_tx_1 0>, <&clock_audio_tx_2 0>,
+			<&clock_audio_rx_1 0>, <&clock_audio_rx_2 0>,
+			<&clock_audio_wsa_1 0>, <&clock_audio_wsa_2 0>,
+			<&clock_audio_va_1 0>, <&clock_audio_va_2 0>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt b/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt
index adb382b..12cf027 100644
--- a/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt
+++ b/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt
@@ -33,16 +33,17 @@
 		are the only acceptable sensor names,
 		1. pa
 		2. pa1
-		3. qfe_pa0
-		4. qfe_wtr0
-		5. modem_tsens
-		6. qfe_mmw0
-		7. qfe_mmw1
-		8. qfe_mmw2
-		9. qfe_mmw3
-		10. xo_therm
-		11. qfe_pa_mdm
-		12. qfe_pa_wtr
+		3. pa2
+		4. qfe_pa0
+		5. qfe_wtr0
+		6. modem_tsens
+		7. qfe_mmw0
+		8. qfe_mmw1
+		9. qfe_mmw2
+		10. qfe_mmw3
+		11. xo_therm
+		12. qfe_pa_mdm
+		13. qfe_pa_wtr
 
 Example:
 
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index eef7d9d..f205898 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -102,6 +102,29 @@
 such as metadata and extended attributes are reported for the upper
 directory only.  These attributes of the lower directory are hidden.
 
+credentials
+-----------
+
+By default, all access to the upper, lower and work directories is the
+recorded mounter's MAC and DAC credentials.  The incoming accesses are
+checked against the caller's credentials.
+
+In the case where caller MAC or DAC credentials do not overlap, a
+use case available in older versions of the driver, the
+override_creds mount flag can be turned off and help when the use
+pattern has caller with legitimate credentials where the mounter
+does not.  Several unintended side effects will occur though.  The
+caller without certain key capabilities or lower privilege will not
+always be able to delete files or directories, create nodes, or
+search some restricted directories.  The ability to search and read
+a directory entry is spotty as a result of the cache mechanism not
+retesting the credentials because of the assumption, a privileged
+caller can fill cache, then a lower privilege can read the directory
+cache.  The uneven security model where cache, upperdir and workdir
+are opened at privilege, but accessed without creating a form of
+privilege escalation, should only be used with strict understanding
+of the side effects and of the security policies.
+
 whiteouts and opaque directories
 --------------------------------
 
diff --git a/Documentation/power/energy-model.txt b/Documentation/power/energy-model.txt
new file mode 100644
index 0000000..5a23c6f
--- /dev/null
+++ b/Documentation/power/energy-model.txt
@@ -0,0 +1,169 @@
+                           ====================
+                           Energy Model of CPUs
+                           ====================
+
+1. Overview
+-----------
+
+The Energy Model (EM) framework serves as an interface between drivers knowing
+the power consumed by CPUs at various performance levels, and the kernel
+subsystems willing to use that information to make energy-aware decisions.
+
+The source of the information about the power consumed by CPUs can vary greatly
+from one platform to another. These power costs can be estimated using
+devicetree data in some cases. In others, the firmware will know better.
+Alternatively, userspace might be best positioned. And so on. In order to avoid
+each and every client subsystem to re-implement support for each and every
+possible source of information on its own, the EM framework intervenes as an
+abstraction layer which standardizes the format of power cost tables in the
+kernel, hence enabling to avoid redundant work.
+
+The figure below depicts an example of drivers (Arm-specific here, but the
+approach is applicable to any architecture) providing power costs to the EM
+framework, and interested clients reading the data from it.
+
+       +---------------+  +-----------------+  +---------------+
+       | Thermal (IPA) |  | Scheduler (EAS) |  |     Other     |
+       +---------------+  +-----------------+  +---------------+
+               |                   | em_pd_energy()    |
+               |                   | em_cpu_get()      |
+               +---------+         |         +---------+
+                         |         |         |
+                         v         v         v
+                        +---------------------+
+                        |    Energy Model     |
+                        |     Framework       |
+                        +---------------------+
+                           ^       ^       ^
+                           |       |       | em_register_perf_domain()
+                +----------+       |       +---------+
+                |                  |                 |
+        +---------------+  +---------------+  +--------------+
+        |  cpufreq-dt   |  |   arm_scmi    |  |    Other     |
+        +---------------+  +---------------+  +--------------+
+                ^                  ^                 ^
+                |                  |                 |
+        +--------------+   +---------------+  +--------------+
+        | Device Tree  |   |   Firmware    |  |      ?       |
+        +--------------+   +---------------+  +--------------+
+
+The EM framework manages power cost tables per 'performance domain' in the
+system. A performance domain is a group of CPUs whose performance is scaled
+together. Performance domains generally have a 1-to-1 mapping with CPUFreq
+policies. All CPUs in a performance domain are required to have the same
+micro-architecture. CPUs in different performance domains can have different
+micro-architectures.
+
+
+2. Core APIs
+------------
+
+  2.1 Config options
+
+CONFIG_ENERGY_MODEL must be enabled to use the EM framework.
+
+
+  2.2 Registration of performance domains
+
+Drivers are expected to register performance domains into the EM framework by
+calling the following API:
+
+  int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
+			      struct em_data_callback *cb);
+
+Drivers must specify the CPUs of the performance domains using the cpumask
+argument, and provide a callback function returning <frequency, power> tuples
+for each capacity state. The callback function provided by the driver is free
+to fetch data from any relevant location (DT, firmware, ...), and by any mean
+deemed necessary. See Section 3. for an example of driver implementing this
+callback, and kernel/power/energy_model.c for further documentation on this
+API.
+
+
+  2.3 Accessing performance domains
+
+Subsystems interested in the energy model of a CPU can retrieve it using the
+em_cpu_get() API. The energy model tables are allocated once upon creation of
+the performance domains, and kept in memory untouched.
+
+The energy consumed by a performance domain can be estimated using the
+em_pd_energy() API. The estimation is performed assuming that the schedutil
+CPUfreq governor is in use.
+
+More details about the above APIs can be found in include/linux/energy_model.h.
+
+
+3. Example driver
+-----------------
+
+This section provides a simple example of a CPUFreq driver registering a
+performance domain in the Energy Model framework using the (fake) 'foo'
+protocol. The driver implements an est_power() function to be provided to the
+EM framework.
+
+ -> drivers/cpufreq/foo_cpufreq.c
+
+01	static int est_power(unsigned long *mW, unsigned long *KHz, int cpu)
+02	{
+03		long freq, power;
+04
+05		/* Use the 'foo' protocol to ceil the frequency */
+06		freq = foo_get_freq_ceil(cpu, *KHz);
+07		if (freq < 0);
+08			return freq;
+09
+10		/* Estimate the power cost for the CPU at the relevant freq. */
+11		power = foo_estimate_power(cpu, freq);
+12		if (power < 0);
+13			return power;
+14
+15		/* Return the values to the EM framework */
+16		*mW = power;
+17		*KHz = freq;
+18
+19		return 0;
+20	}
+21
+22	static int foo_cpufreq_init(struct cpufreq_policy *policy)
+23	{
+24		struct em_data_callback em_cb = EM_DATA_CB(est_power);
+25		int nr_opp, ret;
+26
+27		/* Do the actual CPUFreq init work ... */
+28		ret = do_foo_cpufreq_init(policy);
+29		if (ret)
+30			return ret;
+31
+32		/* Find the number of OPPs for this policy */
+33		nr_opp = foo_get_nr_opp(policy);
+34
+35		/* And register the new performance domain */
+36		em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
+37
+38	        return 0;
+39	}
+
+
+4. Support for legacy Energy Models (DEPRECATED)
+------------------------------------------------
+
+The Android kernel version 4.14 and before used a different type of EM for EAS,
+referred to as the 'legacy' EM. The legacy EM relies on the out-of-tree
+'sched-energy-costs' devicetree bindings to provide the kernel with power costs.
+The usage of such bindings in Android has now been DEPRECATED in favour of the
+mainline equivalents.
+
+The currently supported alternatives to populate the EM include:
+ - using a firmware-based solution such as Arm SCMI (supported in
+   drivers/cpufreq/scmi-cpufreq.c);
+ - using the 'dynamic-power-coefficient' devicetree binding together with
+   PM_OPP. See the of_dev_pm_opp_get_cpu_power() helper in PM_OPP, and the
+   reference implementation in drivers/cpufreq/cpufreq-dt.c.
+
+In order to ease the transition to the new EM format, Android 4.19 also provides
+a compatibility driver able to load a legacy EM from DT into the EM framework.
+*** Please note that THIS FEATURE WILL NOT BE AVAILABLE in future Android
+kernels, and as such it must be considered only as a temporary workaround. ***
+
+If you know what you're doing and still want to use this driver, you need to set
+CONFIG_LEGACY_ENERGY_MODEL_DT=y in your kernel configuration to enable it.
diff --git a/Documentation/scheduler/sched-energy.txt b/Documentation/scheduler/sched-energy.txt
new file mode 100644
index 0000000..197d81f
--- /dev/null
+++ b/Documentation/scheduler/sched-energy.txt
@@ -0,0 +1,425 @@
+			   =======================
+			   Energy Aware Scheduling
+			   =======================
+
+1. Introduction
+---------------
+
+Energy Aware Scheduling (or EAS) gives the scheduler the ability to predict
+the impact of its decisions on the energy consumed by CPUs. EAS relies on an
+Energy Model (EM) of the CPUs to select an energy efficient CPU for each task,
+with a minimal impact on throughput. This document aims at providing an
+introduction on how EAS works, what are the main design decisions behind it, and
+details what is needed to get it to run.
+
+Before going any further, please note that at the time of writing:
+
+   /!\ EAS does not support platforms with symmetric CPU topologies /!\
+
+EAS operates only on heterogeneous CPU topologies (such as Arm big.LITTLE)
+because this is where the potential for saving energy through scheduling is
+the highest.
+
+The actual EM used by EAS is _not_ maintained by the scheduler, but by a
+dedicated framework. For details about this framework and what it provides,
+please refer to its documentation (see Documentation/power/energy-model.txt).
+
+
+2. Background and Terminology
+-----------------------------
+
+To make it clear from the start:
+ - energy = [joule] (resource like a battery on powered devices)
+ - power = energy/time = [joule/second] = [watt]
+
+The goal of EAS is to minimize energy, while still getting the job done. That
+is, we want to maximize:
+
+	performance [inst/s]
+	--------------------
+	    power [W]
+
+which is equivalent to minimizing:
+
+	energy [J]
+	-----------
+	instruction
+
+while still getting 'good' performance. It is essentially an alternative
+optimization objective to the current performance-only objective for the
+scheduler. This alternative considers two objectives: energy-efficiency and
+performance.
+
+The idea behind introducing an EM is to allow the scheduler to evaluate the
+implications of its decisions rather than blindly applying energy-saving
+techniques that may have positive effects only on some platforms. At the same
+time, the EM must be as simple as possible to minimize the scheduler latency
+impact.
+
+In short, EAS changes the way CFS tasks are assigned to CPUs. When it is time
+for the scheduler to decide where a task should run (during wake-up), the EM
+is used to break the tie between several good CPU candidates and pick the one
+that is predicted to yield the best energy consumption without harming the
+system's throughput. The predictions made by EAS rely on specific elements of
+knowledge about the platform's topology, which include the 'capacity' of CPUs,
+and their respective energy costs.
+
+
+3. Topology information
+-----------------------
+
+EAS (as well as the rest of the scheduler) uses the notion of 'capacity' to
+differentiate CPUs with different computing throughput. The 'capacity' of a CPU
+represents the amount of work it can absorb when running at its highest
+frequency compared to the most capable CPU of the system. Capacity values are
+normalized in a 1024 range, and are comparable with the utilization signals of
+tasks and CPUs computed by the Per-Entity Load Tracking (PELT) mechanism. Thanks
+to capacity and utilization values, EAS is able to estimate how big/busy a
+task/CPU is, and to take this into consideration when evaluating performance vs
+energy trade-offs. The capacity of CPUs is provided via arch-specific code
+through the arch_scale_cpu_capacity() callback.
+
+The rest of platform knowledge used by EAS is directly read from the Energy
+Model (EM) framework. The EM of a platform is composed of a power cost table
+per 'performance domain' in the system (see Documentation/power/energy-model.txt
+for futher details about performance domains).
+
+The scheduler manages references to the EM objects in the topology code when the
+scheduling domains are built, or re-built. For each root domain (rd), the
+scheduler maintains a singly linked list of all performance domains intersecting
+the current rd->span. Each node in the list contains a pointer to a struct
+em_perf_domain as provided by the EM framework.
+
+The lists are attached to the root domains in order to cope with exclusive
+cpuset configurations. Since the boundaries of exclusive cpusets do not
+necessarily match those of performance domains, the lists of different root
+domains can contain duplicate elements.
+
+Example 1.
+    Let us consider a platform with 12 CPUs, split in 3 performance domains
+    (pd0, pd4 and pd8), organized as follows:
+
+	          CPUs:   0 1 2 3 4 5 6 7 8 9 10 11
+	          PDs:   |--pd0--|--pd4--|---pd8---|
+	          RDs:   |----rd1----|-----rd2-----|
+
+    Now, consider that userspace decided to split the system with two
+    exclusive cpusets, hence creating two independent root domains, each
+    containing 6 CPUs. The two root domains are denoted rd1 and rd2 in the
+    above figure. Since pd4 intersects with both rd1 and rd2, it will be
+    present in the linked list '->pd' attached to each of them:
+       * rd1->pd: pd0 -> pd4
+       * rd2->pd: pd4 -> pd8
+
+    Please note that the scheduler will create two duplicate list nodes for
+    pd4 (one for each list). However, both just hold a pointer to the same
+    shared data structure of the EM framework.
+
+Since the access to these lists can happen concurrently with hotplug and other
+things, they are protected by RCU, like the rest of topology structures
+manipulated by the scheduler.
+
+EAS also maintains a static key (sched_energy_present) which is enabled when at
+least one root domain meets all conditions for EAS to start. Those conditions
+are summarized in Section 6.
+
+
+4. Energy-Aware task placement
+------------------------------
+
+EAS overrides the CFS task wake-up balancing code. It uses the EM of the
+platform and the PELT signals to choose an energy-efficient target CPU during
+wake-up balance. When EAS is enabled, select_task_rq_fair() calls
+find_energy_efficient_cpu() to do the placement decision. This function looks
+for the CPU with the highest spare capacity (CPU capacity - CPU utilization) in
+each performance domain since it is the one which will allow us to keep the
+frequency the lowest. Then, the function checks if placing the task there could
+save energy compared to leaving it on prev_cpu, i.e. the CPU where the task ran
+in its previous activation.
+
+find_energy_efficient_cpu() uses compute_energy() to estimate what will be the
+energy consumed by the system if the waking task was migrated. compute_energy()
+looks at the current utilization landscape of the CPUs and adjusts it to
+'simulate' the task migration. The EM framework provides the em_pd_energy() API
+which computes the expected energy consumption of each performance domain for
+the given utilization landscape.
+
+An example of energy-optimized task placement decision is detailed below.
+
+Example 2.
+    Let us consider a (fake) platform with 2 independent performance domains
+    composed of two CPUs each. CPU0 and CPU1 are little CPUs; CPU2 and CPU3
+    are big.
+
+    The scheduler must decide where to place a task P whose util_avg = 200
+    and prev_cpu = 0.
+
+    The current utilization landscape of the CPUs is depicted on the graph
+    below. CPUs 0-3 have a util_avg of 400, 100, 600 and 500 respectively
+    Each performance domain has three Operating Performance Points (OPPs).
+    The CPU capacity and power cost associated with each OPP is listed in
+    the Energy Model table. The util_avg of P is shown on the figures
+    below as 'PP'.
+
+    CPU util.
+      1024                 - - - - - - -              Energy Model
+                                               +-----------+-------------+
+                                               |  Little   |     Big     |
+       768                 =============       +-----+-----+------+------+
+                                               | Cap | Pwr | Cap  | Pwr  |
+                                               +-----+-----+------+------+
+       512  ===========    - ##- - - - -       | 170 | 50  | 512  | 400  |
+                             ##     ##         | 341 | 150 | 768  | 800  |
+       341  -PP - - - -      ##     ##         | 512 | 300 | 1024 | 1700 |
+             PP              ##     ##         +-----+-----+------+------+
+       170  -## - - - -      ##     ##
+             ##     ##       ##     ##
+           ------------    -------------
+            CPU0   CPU1     CPU2   CPU3
+
+      Current OPP: =====       Other OPP: - - -     util_avg (100 each): ##
+
+
+    find_energy_efficient_cpu() will first look for the CPUs with the
+    maximum spare capacity in the two performance domains. In this example,
+    CPU1 and CPU3. Then it will estimate the energy of the system if P was
+    placed on either of them, and check if that would save some energy
+    compared to leaving P on CPU0. EAS assumes that OPPs follow utilization
+    (which is coherent with the behaviour of the schedutil CPUFreq
+    governor, see Section 6. for more details on this topic).
+
+    Case 1. P is migrated to CPU1
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+      1024                 - - - - - - -
+
+                                            Energy calculation:
+       768                 =============     * CPU0: 200 / 341 * 150 = 88
+                                             * CPU1: 300 / 341 * 150 = 131
+                                             * CPU2: 600 / 768 * 800 = 625
+       512  - - - - - -    - ##- - - - -     * CPU3: 500 / 768 * 800 = 520
+                             ##     ##          => total_energy = 1364
+       341  ===========      ##     ##
+                    PP       ##     ##
+       170  -## - - PP-      ##     ##
+             ##     ##       ##     ##
+           ------------    -------------
+            CPU0   CPU1     CPU2   CPU3
+
+
+    Case 2. P is migrated to CPU3
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+      1024                 - - - - - - -
+
+                                            Energy calculation:
+       768                 =============     * CPU0: 200 / 341 * 150 = 88
+                                             * CPU1: 100 / 341 * 150 = 43
+                                    PP       * CPU2: 600 / 768 * 800 = 625
+       512  - - - - - -    - ##- - -PP -     * CPU3: 700 / 768 * 800 = 729
+                             ##     ##          => total_energy = 1485
+       341  ===========      ##     ##
+                             ##     ##
+       170  -## - - - -      ##     ##
+             ##     ##       ##     ##
+           ------------    -------------
+            CPU0   CPU1     CPU2   CPU3
+
+
+    Case 3. P stays on prev_cpu / CPU 0
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+      1024                 - - - - - - -
+
+                                            Energy calculation:
+       768                 =============     * CPU0: 400 / 512 * 300 = 234
+                                             * CPU1: 100 / 512 * 300 = 58
+                                             * CPU2: 600 / 768 * 800 = 625
+       512  ===========    - ##- - - - -     * CPU3: 500 / 768 * 800 = 520
+                             ##     ##          => total_energy = 1437
+       341  -PP - - - -      ##     ##
+             PP              ##     ##
+       170  -## - - - -      ##     ##
+             ##     ##       ##     ##
+           ------------    -------------
+            CPU0   CPU1     CPU2   CPU3
+
+
+    From these calculations, the Case 1 has the lowest total energy. So CPU 1
+    is be the best candidate from an energy-efficiency standpoint.
+
+Big CPUs are generally more power hungry than the little ones and are thus used
+mainly when a task doesn't fit the littles. However, little CPUs aren't always
+necessarily more energy-efficient than big CPUs. For some systems, the high OPPs
+of the little CPUs can be less energy-efficient than the lowest OPPs of the
+bigs, for example. So, if the little CPUs happen to have enough utilization at
+a specific point in time, a small task waking up at that moment could be better
+of executing on the big side in order to save energy, even though it would fit
+on the little side.
+
+And even in the case where all OPPs of the big CPUs are less energy-efficient
+than those of the little, using the big CPUs for a small task might still, under
+specific conditions, save energy. Indeed, placing a task on a little CPU can
+result in raising the OPP of the entire performance domain, and that will
+increase the cost of the tasks already running there. If the waking task is
+placed on a big CPU, its own execution cost might be higher than if it was
+running on a little, but it won't impact the other tasks of the little CPUs
+which will keep running at a lower OPP. So, when considering the total energy
+consumed by CPUs, the extra cost of running that one task on a big core can be
+smaller than the cost of raising the OPP on the little CPUs for all the other
+tasks.
+
+The examples above would be nearly impossible to get right in a generic way, and
+for all platforms, without knowing the cost of running at different OPPs on all
+CPUs of the system. Thanks to its EM-based design, EAS should cope with them
+correctly without too many troubles. However, in order to ensure a minimal
+impact on throughput for high-utilization scenarios, EAS also implements another
+mechanism called 'over-utilization'.
+
+
+5. Over-utilization
+-------------------
+
+From a general standpoint, the use-cases where EAS can help the most are those
+involving a light/medium CPU utilization. Whenever long CPU-bound tasks are
+being run, they will require all of the available CPU capacity, and there isn't
+much that can be done by the scheduler to save energy without severly harming
+throughput. In order to avoid hurting performance with EAS, CPUs are flagged as
+'over-utilized' as soon as they are used at more than 80% of their compute
+capacity. As long as no CPUs are over-utilized in a root domain, load balancing
+is disabled and EAS overridess the wake-up balancing code. EAS is likely to load
+the most energy efficient CPUs of the system more than the others if that can be
+done without harming throughput. So, the load-balancer is disabled to prevent
+it from breaking the energy-efficient task placement found by EAS. It is safe to
+do so when the system isn't overutilized since being below the 80% tipping point
+implies that:
+
+    a. there is some idle time on all CPUs, so the utilization signals used by
+       EAS are likely to accurately represent the 'size' of the various tasks
+       in the system;
+    b. all tasks should already be provided with enough CPU capacity,
+       regardless of their nice values;
+    c. since there is spare capacity all tasks must be blocking/sleeping
+       regularly and balancing at wake-up is sufficient.
+
+As soon as one CPU goes above the 80% tipping point, at least one of the three
+assumptions above becomes incorrect. In this scenario, the 'overutilized' flag
+is raised for the entire root domain, EAS is disabled, and the load-balancer is
+re-enabled. By doing so, the scheduler falls back onto load-based algorithms for
+wake-up and load balance under CPU-bound conditions. This provides a better
+respect of the nice values of tasks.
+
+Since the notion of overutilization largely relies on detecting whether or not
+there is some idle time in the system, the CPU capacity 'stolen' by higher
+(than CFS) scheduling classes (as well as IRQ) must be taken into account. As
+such, the detection of overutilization accounts for the capacity used not only
+by CFS tasks, but also by the other scheduling classes and IRQ.
+
+
+6. Dependencies and requirements for EAS
+----------------------------------------
+
+Energy Aware Scheduling depends on the CPUs of the system having specific
+hardware properties and on other features of the kernel being enabled. This
+section lists these dependencies and provides hints as to how they can be met.
+
+
+  6.1 - Asymmetric CPU topology
+
+As mentioned in the introduction, EAS is only supported on platforms with
+asymmetric CPU topologies for now. This requirement is checked at run-time by
+looking for the presence of the SD_ASYM_CPUCAPACITY flag when the scheduling
+domains are built.
+
+The flag is set/cleared automatically by the scheduler topology code whenever
+there are CPUs with different capacities in a root domain. The capacities of
+CPUs are provided by arch-specific code through the arch_scale_cpu_capacity()
+callback. As an example, arm and arm64 share an implementation of this callback
+which uses a combination of CPUFreq data and device-tree bindings to compute the
+capacity of CPUs (see drivers/base/arch_topology.c for more details).
+
+So, in order to use EAS on your platform your architecture must implement the
+arch_scale_cpu_capacity() callback, and some of the CPUs must have a lower
+capacity than others.
+
+Please note that EAS is not fundamentally incompatible with SMP, but no
+significant savings on SMP platforms have been observed yet. This restriction
+could be amended in the future if proven otherwise.
+
+
+  6.2 - Energy Model presence
+
+EAS uses the EM of a platform to estimate the impact of scheduling decisions on
+energy. So, your platform must provide power cost tables to the EM framework in
+order to make EAS start. To do so, please refer to documentation of the
+independent EM framework in Documentation/power/energy-model.txt.
+
+Please also note that the scheduling domains need to be re-built after the
+EM has been registered in order to start EAS.
+
+
+  6.3 - Energy Model complexity
+
+The task wake-up path is very latency-sensitive. When the EM of a platform is
+too complex (too many CPUs, too many performance domains, too many performance
+states, ...), the cost of using it in the wake-up path can become prohibitive.
+The energy-aware wake-up algorithm has a complexity of:
+
+	C = Nd * (Nc + Ns)
+
+with: Nd the number of performance domains; Nc the number of CPUs; and Ns the
+total number of OPPs (ex: for two perf. domains with 4 OPPs each, Ns = 8).
+
+A complexity check is performed at the root domain level, when scheduling
+domains are built. EAS will not start on a root domain if its C happens to be
+higher than the completely arbitrary EM_MAX_COMPLEXITY threshold (2048 at the
+time of writing).
+
+If you really want to use EAS but the complexity of your platform's Energy
+Model is too high to be used with a single root domain, you're left with only
+two possible options:
+
+    1. split your system into separate, smaller, root domains using exclusive
+       cpusets and enable EAS locally on each of them. This option has the
+       benefit to work out of the box but the drawback of preventing load
+       balance between root domains, which can result in an unbalanced system
+       overall;
+    2. submit patches to reduce the complexity of the EAS wake-up algorithm,
+       hence enabling it to cope with larger EMs in reasonable time.
+
+
+  6.4 - Schedutil governor
+
+EAS tries to predict at which OPP will the CPUs be running in the close future
+in order to estimate their energy consumption. To do so, it is assumed that OPPs
+of CPUs follow their utilization.
+
+Although it is very difficult to provide hard guarantees regarding the accuracy
+of this assumption in practice (because the hardware might not do what it is
+told to do, for example), schedutil as opposed to other CPUFreq governors at
+least _requests_ frequencies calculated using the utilization signals.
+Consequently, the only sane governor to use together with EAS is schedutil,
+because it is the only one providing some degree of consistency between
+frequency requests and energy predictions.
+
+Using EAS with any other governor than schedutil is not supported.
+
+
+  6.5 Scale-invariant utilization signals
+
+In order to make accurate prediction across CPUs and for all performance
+states, EAS needs frequency-invariant and CPU-invariant PELT signals. These can
+be obtained using the architecture-defined arch_scale{cpu,freq}_capacity()
+callbacks.
+
+Using EAS on a platform that doesn't implement these two callbacks is not
+supported.
+
+
+  6.6 Multithreading (SMT)
+
+EAS in its current form is SMT unaware and is not able to leverage
+multithreaded hardware to save energy. EAS considers threads as independent
+CPUs, which can actually be counter-productive for both performance and energy.
+
+EAS on SMT is not supported.
diff --git a/Makefile b/Makefile
index bc15999..277bce1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 19
-SUBLEVEL = 26
+SUBLEVEL = 30
 EXTRAVERSION =
 NAME = "People's Front"
 
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 8da87fe..99e6d89 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -340,7 +340,7 @@
 /*
  * __ffs: Similar to ffs, but zero based (0-31)
  */
-static inline __attribute__ ((const)) int __ffs(unsigned long word)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
 {
 	if (!word)
 		return word;
@@ -400,9 +400,9 @@
 /*
  * __ffs: Similar to ffs, but zero based (0-31)
  */
-static inline __attribute__ ((const)) int __ffs(unsigned long x)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
 {
-	int n;
+	unsigned long n;
 
 	asm volatile(
 	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index e8d9fb4..5c66633 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -18,6 +18,8 @@
 #include <asm/arcregs.h>
 #include <asm/irqflags.h>
 
+#define ARC_PATH_MAX	256
+
 /*
  * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
  *   -Prints 3 regs per line and a CR.
@@ -58,11 +60,12 @@
 	print_reg_file(&(cregs->r13), 13);
 }
 
-static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
+static void print_task_path_n_nm(struct task_struct *tsk)
 {
 	char *path_nm = NULL;
 	struct mm_struct *mm;
 	struct file *exe_file;
+	char buf[ARC_PATH_MAX];
 
 	mm = get_task_mm(tsk);
 	if (!mm)
@@ -72,7 +75,7 @@
 	mmput(mm);
 
 	if (exe_file) {
-		path_nm = file_path(exe_file, buf, 255);
+		path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
 		fput(exe_file);
 	}
 
@@ -80,10 +83,9 @@
 	pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
 }
 
-static void show_faulting_vma(unsigned long address, char *buf)
+static void show_faulting_vma(unsigned long address)
 {
 	struct vm_area_struct *vma;
-	char *nm = buf;
 	struct mm_struct *active_mm = current->active_mm;
 
 	/* can't use print_vma_addr() yet as it doesn't check for
@@ -96,8 +98,11 @@
 	 * if the container VMA is not found
 	 */
 	if (vma && (vma->vm_start <= address)) {
+		char buf[ARC_PATH_MAX];
+		char *nm = "?";
+
 		if (vma->vm_file) {
-			nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
+			nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
 			if (IS_ERR(nm))
 				nm = "?";
 		}
@@ -173,13 +178,8 @@
 {
 	struct task_struct *tsk = current;
 	struct callee_regs *cregs;
-	char *buf;
 
-	buf = (char *)__get_free_page(GFP_KERNEL);
-	if (!buf)
-		return;
-
-	print_task_path_n_nm(tsk, buf);
+	print_task_path_n_nm(tsk);
 	show_regs_print_info(KERN_INFO);
 
 	show_ecr_verbose(regs);
@@ -189,7 +189,7 @@
 		(void *)regs->blink, (void *)regs->ret);
 
 	if (user_mode(regs))
-		show_faulting_vma(regs->ret, buf); /* faulting code, not data */
+		show_faulting_vma(regs->ret); /* faulting code, not data */
 
 	pr_info("[STAT32]: 0x%08lx", regs->status32);
 
@@ -221,8 +221,6 @@
 	cregs = (struct callee_regs *)current->thread.callee_reg;
 	if (cregs)
 		show_callee_regs(cregs);
-
-	free_page((unsigned long)buf);
 }
 
 void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 27a1ee2..94efca7 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -168,6 +168,9 @@
 			interrupt-controller;
 			#interrupt-cells = <3>;
 			interrupt-parent = <&gic>;
+			clock-names = "clkout8";
+			clocks = <&cmu CLK_FIN_PLL>;
+			#clock-cells = <1>;
 		};
 
 		mipi_phy: video-phy {
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index a09e46c..00820d2 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -49,7 +49,7 @@
 	};
 
 	emmc_pwrseq: pwrseq {
-		pinctrl-0 = <&sd1_cd>;
+		pinctrl-0 = <&emmc_rstn>;
 		pinctrl-names = "default";
 		compatible = "mmc-pwrseq-emmc";
 		reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>;
@@ -161,12 +161,6 @@
 	cpu0-supply = <&buck2_reg>;
 };
 
-/* RSTN signal for eMMC */
-&sd1_cd {
-	samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-	samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-};
-
 &pinctrl_1 {
 	gpio_power_key: power_key {
 		samsung,pins = "gpx1-3";
@@ -184,6 +178,11 @@
 		samsung,pins = "gpx3-7";
 		samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
 	};
+
+	emmc_rstn: emmc-rstn {
+		samsung,pins = "gpk1-2";
+		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+	};
 };
 
 &ehci {
diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
index 2f4f408..27214e6 100644
--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
@@ -334,7 +334,7 @@
 			buck8_reg: BUCK8 {
 				regulator-name = "vdd_1.8v_ldo";
 				regulator-min-microvolt = <800000>;
-				regulator-max-microvolt = <1500000>;
+				regulator-max-microvolt = <2000000>;
 				regulator-always-on;
 				regulator-boot-on;
 			};
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 844caa3..50083ce 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -462,7 +462,7 @@
 			};
 
 			gpt: gpt@2098000 {
-				compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt";
+				compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
 				reg = <0x02098000 0x4000>;
 				interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clks IMX6SX_CLK_GPT_BUS>,
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 0d9faf1..a86b890 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -263,7 +263,7 @@
 			compatible = "amlogic,meson6-dwmac", "snps,dwmac";
 			reg = <0xc9410000 0x10000
 			       0xc1108108 0x4>;
-			interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
+			interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "macirq";
 			status = "disabled";
 		};
diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
index ef3177d3..8fdeeff 100644
--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
+++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
@@ -125,7 +125,6 @@
 		/* Realtek RTL8211F (0x001cc916) */
 		eth_phy: ethernet-phy@0 {
 			reg = <0>;
-			eee-broken-1000t;
 			interrupt-parent = <&gpio_intc>;
 			/* GPIOH_3 */
 			interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
@@ -172,8 +171,7 @@
 		cap-sd-highspeed;
 		disable-wp;
 
-		cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-		cd-inverted;
+		cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
 		vmmc-supply = <&tflash_vdd>;
 		vqmmc-supply = <&tf_io>;
diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
index f585361..6ac02be 100644
--- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
+++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
@@ -206,8 +206,7 @@
 		cap-sd-highspeed;
 		disable-wp;
 
-		cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-		cd-inverted;
+		cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
 		vmmc-supply = <&vcc_3v3>;
 	};
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
index ddc7a7b..f57acf8 100644
--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
@@ -105,7 +105,7 @@
 			interrupts-extended = <
 				&cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
 				&cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
-				&cpcap 48 1
+				&cpcap 48 0
 			>;
 			interrupt-names =
 				"id_ground", "id_float", "se0conn", "vbusvld",
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 0d9b853..e142e6c 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -370,6 +370,19 @@
 		compatible = "ti,omap2-onenand";
 		reg = <0 0 0x20000>;	/* CS0, offset 0, IO size 128K */
 
+		/*
+		 * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
+		 * bootloader set values when booted with v4.19 using both N950
+		 * and N9 devices (OneNAND Manufacturer: Samsung):
+		 *
+		 *   gpmc cs0 before gpmc_cs_program_settings:
+		 *   cs0 GPMC_CS_CONFIG1: 0xfd001202
+		 *   cs0 GPMC_CS_CONFIG2: 0x00181800
+		 *   cs0 GPMC_CS_CONFIG3: 0x00030300
+		 *   cs0 GPMC_CS_CONFIG4: 0x18001804
+		 *   cs0 GPMC_CS_CONFIG5: 0x03171d1d
+		 *   cs0 GPMC_CS_CONFIG6: 0x97080000
+		 */
 		gpmc,sync-read;
 		gpmc,sync-write;
 		gpmc,burst-length = <16>;
@@ -379,26 +392,27 @@
 		gpmc,device-width = <2>;
 		gpmc,mux-add-data = <2>;
 		gpmc,cs-on-ns = <0>;
-		gpmc,cs-rd-off-ns = <87>;
-		gpmc,cs-wr-off-ns = <87>;
+		gpmc,cs-rd-off-ns = <122>;
+		gpmc,cs-wr-off-ns = <122>;
 		gpmc,adv-on-ns = <0>;
-		gpmc,adv-rd-off-ns = <10>;
-		gpmc,adv-wr-off-ns = <10>;
-		gpmc,oe-on-ns = <15>;
-		gpmc,oe-off-ns = <87>;
+		gpmc,adv-rd-off-ns = <15>;
+		gpmc,adv-wr-off-ns = <15>;
+		gpmc,oe-on-ns = <20>;
+		gpmc,oe-off-ns = <122>;
 		gpmc,we-on-ns = <0>;
-		gpmc,we-off-ns = <87>;
-		gpmc,rd-cycle-ns = <112>;
-		gpmc,wr-cycle-ns = <112>;
-		gpmc,access-ns = <81>;
+		gpmc,we-off-ns = <122>;
+		gpmc,rd-cycle-ns = <148>;
+		gpmc,wr-cycle-ns = <148>;
+		gpmc,access-ns = <117>;
 		gpmc,page-burst-access-ns = <15>;
 		gpmc,bus-turnaround-ns = <0>;
 		gpmc,cycle2cycle-delay-ns = <0>;
 		gpmc,wait-monitoring-ns = <0>;
-		gpmc,clk-activation-ns = <5>;
-		gpmc,wr-data-mux-bus-ns = <30>;
-		gpmc,wr-access-ns = <81>;
-		gpmc,sync-clk-ps = <15000>;
+		gpmc,clk-activation-ns = <10>;
+		gpmc,wr-data-mux-bus-ns = <40>;
+		gpmc,wr-access-ns = <117>;
+
+		gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */
 
 		/*
 		 * MTD partition table corresponding to Nokia's MeeGo 1.2
diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
index 5d23667..25540b7 100644
--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
@@ -53,7 +53,7 @@
 
 	aliases {
 		serial0 = &uart0;
-		/* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
+		ethernet0 = &emac;
 		ethernet1 = &sdiowifi;
 	};
 
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index ed36dca..f519199 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -190,8 +190,6 @@
 	if (ssp == NULL)
 		return -ENODEV;
 
-	iounmap(ssp->mmio_base);
-
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	release_mem_region(res->start, resource_size(res));
 
@@ -201,7 +199,6 @@
 	list_del(&ssp->node);
 	mutex_unlock(&ssp_lock);
 
-	kfree(ssp);
 	return 0;
 }
 
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index f4964be..e80a792 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -118,6 +118,7 @@
 		reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
 		clocks = <&pmic>;
 		clock-names = "ext_clock";
+		post-power-on-delay-ms = <10>;
 		power-off-delay-us = <10>;
 	};
 
@@ -300,7 +301,6 @@
 
 		dwmmc_0: dwmmc0@f723d000 {
 			cap-mmc-highspeed;
-			mmc-hs200-1_8v;
 			non-removable;
 			bus-width = <0x8>;
 			vmmc-supply = <&ldo19>;
diff --git a/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
index 8e81f00..657fd87 100644
--- a/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
@@ -4,12 +4,28 @@
  */
 
 #include <dt-bindings/clock/qcom,audio-ext-clk.h>
+#include <dt-bindings/sound/qcom,bolero-clk-rsc.h>
 #include <dt-bindings/sound/audio-codec-port-types.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "kona-va-bolero.dtsi"
 
 &bolero {
 	qcom,num-macros = <4>;
+	bolero-clk-rsc-mngr {
+		compatible = "qcom,bolero-clk-rsc-mngr";
+		qcom,fs-gen-sequence = <0x3000 0x1>,
+					<0x3004 0x1>, <0x3080 0x2>;
+	qcom,rx_mclk_mode_muxsel = <0x033240D8>;
+	qcom,wsa_mclk_mode_muxsel = <0x033220D8>;
+	qcom,va_mclk_mode_muxsel = <0x033A0000>;
+	clock-names = "tx_core_clk", "tx_npl_clk", "rx_core_clk", "rx_npl_clk",
+		 "wsa_core_clk", "wsa_npl_clk", "va_core_clk", "va_npl_clk";
+	clocks = <&clock_audio_tx_1 0>, <&clock_audio_tx_2 0>,
+		<&clock_audio_rx_1 0>, <&clock_audio_rx_2 0>,
+		<&clock_audio_wsa_1 0>, <&clock_audio_wsa_2 0>,
+		<&clock_audio_va_1 0>, <&clock_audio_va_2 0>;
+	};
+
 	tx_macro: tx-macro@3220000 {
 		compatible = "qcom,tx-macro";
 		reg = <0x3220000 0x0>;
@@ -55,6 +71,7 @@
 		qcom,rx-swr-gpios = <&rx_swr_gpios>;
 		qcom,rx_mclk_mode_muxsel = <0x033240D8>;
 		qcom,rx-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
+		qcom,default-clk-id = <TX_CORE_CLK>;
 		swr1: rx_swr_master {
 			compatible = "qcom,swr-mstr";
 			#address-cells = <2>;
@@ -87,6 +104,7 @@
 			 <&clock_audio_wsa_2 0>;
 		qcom,wsa-swr-gpios = <&wsa_swr_gpios>;
 		qcom,wsa-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
+		qcom,default-clk-id = <TX_CORE_CLK>;
 		swr0: wsa_swr_master {
 			compatible = "qcom,swr-mstr";
 			#address-cells = <2>;
@@ -390,4 +408,20 @@
 		qcom,codec-lpass-clk-id = <0x30D>;
 		#clock-cells = <1>;
 	};
+
+	clock_audio_va_1: va_core_clk {
+		compatible = "qcom,audio-ref-clk";
+		qcom,codec-ext-clk-src = <AUDIO_LPASS_MCLK>;
+		qcom,codec-lpass-ext-clk-freq = <19200000>;
+		qcom,codec-lpass-clk-id = <0x30B>;
+		#clock-cells = <1>;
+	};
+
+	clock_audio_va_2: va_npl_clk {
+		compatible = "qcom,audio-ref-clk";
+		qcom,codec-ext-clk-src = <AUDIO_LPASS_MCLK_8>;
+		qcom,codec-lpass-ext-clk-freq = <19200000>;
+		qcom,codec-lpass-clk-id = <0x310>;
+		#clock-cells = <1>;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-audio.dtsi b/arch/arm64/boot/dts/qcom/kona-audio.dtsi
index 929168b..430def3 100644
--- a/arch/arm64/boot/dts/qcom/kona-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-audio.dtsi
@@ -38,6 +38,10 @@
 			compatible = "qcom,bolero-codec";
 			clock-names = "lpass_core_hw_vote";
 			clocks = <&lpass_core_hw_vote 0>;
+			bolero-clk-rsc-mngr {
+				compatible = "qcom,bolero-clk-rsc-mngr";
+			};
+
 			tx_macro: tx-macro@3220000 {
 				swr2: tx_swr_master {
 				};
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
index c2b1fdd..434c601 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
@@ -41,6 +41,54 @@
 		rgltr-load-current = <100000>;
 	};
 
+	actuator_rear_aux: qcom,actuator1 {
+		cell-index = <1>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_triple_wide: qcom,actuator4 {
+		cell-index = <4>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_triple_tele: qcom,actuator5 {
+		cell-index = <5>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_triple_uw: qcom,actuator6 {
+		cell-index = <6>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
 	eeprom_rear: qcom,eeprom0 {
 		cell-index = <0>;
 		compatible = "qcom,eeprom";
@@ -54,7 +102,7 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
@@ -91,7 +139,7 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
@@ -115,6 +163,117 @@
 		clock-rates = <24000000>;
 	};
 
+	eeprom_triple_wide: qcom,eeprom4 {
+		cell-index = <4>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l5>;
+		cam_vdig-supply = <&pm8009_l1>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_active_rear>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_suspend_rear>;
+		gpios = <&tlmm 94 0>,
+			<&tlmm 93 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_triple_tele: qcom,eeprom5 {
+		cell-index = <1>;
+		compatible = "qcom,eeprom";
+		cam_vdig-supply = <&pm8009_l2>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				 &cam_sensor_active_rear_aux>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				 &cam_sensor_suspend_rear_aux>;
+		gpios = <&tlmm 95 0>,
+			<&tlmm 92 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_triple_uw: qcom,eeprom6 {
+		cell-index = <6>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_rst2>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_rst2>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-position = <1>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
 	qcom,cam-sensor0 {
 		cell-index = <0>;
 		compatible = "qcom,cam-sensor";
@@ -123,8 +282,8 @@
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <180>;
 		actuator-src = <&actuator_rear>;
-		eeprom-src = <&eeprom_rear>;
 		led-flash-src = <&led_flash_rear>;
+		eeprom-src = <&eeprom_rear>;
 		cam_vio-supply = <&pm8009_l7>;
 		cam_bob-supply = <&pm8150a_bob>;
 		cam_vana-supply = <&pm8009_l5>;
@@ -201,6 +360,137 @@
 		clock-cntl-level = "turbo";
 		clock-rates = <24000000>;
 	};
+
+	qcom,cam-sensor4 {
+		cell-index = <4>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		actuator-src = <&actuator_triple_wide>;
+		led-flash-src = <&led_flash_rear>;
+		eeprom-src = <&eeprom_triple_wide>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vana-supply = <&pm8009_l5>;
+		cam_vdig-supply = <&pm8009_l1>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1104000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1104000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_active_rear>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_suspend_rear>;
+		gpios = <&tlmm 94 0>,
+			<&tlmm 93 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor5 {
+		cell-index = <5>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_triple_tele>;
+		actuator-src = <&actuator_triple_tele>;
+		led-flash-src = <&led_flash_rear_aux>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vdig-supply = <&pm8009_l2>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1200000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1200000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_active_rear_aux>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_suspend_rear_aux>;
+		gpios = <&tlmm 95 0>,
+			<&tlmm 92 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor6 {
+		cell-index = <6>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_triple_uw>;
+		actuator-src = <&actuator_triple_uw>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1056000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_rst2>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_rst2>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
 };
 
 &cam_cci1 {
@@ -217,13 +507,13 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_active_front>;
+				 &cam_sensor_active_rst2>;
 		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_suspend_front>;
+				 &cam_sensor_suspend_rst2>;
 		gpios = <&tlmm 96 0>,
 			<&tlmm 78 0>;
 		gpio-reset = <1>;
@@ -245,7 +535,7 @@
 		cell-index = <2>;
 		compatible = "qcom,cam-sensor";
 		csiphy-sd-index = <2>;
-		sensor-position-roll = <90>;
+		sensor-position-roll = <270>;
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <0>;
 		eeprom-src = <&eeprom_front>;
@@ -264,9 +554,9 @@
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_active_front>;
+				 &cam_sensor_active_rst2>;
 		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_suspend_front>;
+				 &cam_sensor_suspend_rst2>;
 		gpios = <&tlmm 96 0>,
 			<&tlmm 78 0>;
 		gpio-reset = <1>;
@@ -321,4 +611,3 @@
 		clock-rates = <24000000>;
 	};
 };
-
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
index 65cdd1b..120fcc1 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
@@ -41,6 +41,54 @@
 		rgltr-load-current = <100000>;
 	};
 
+	actuator_rear_aux: qcom,actuator1 {
+		cell-index = <1>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_triple_wide: qcom,actuator4 {
+		cell-index = <4>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_triple_tele: qcom,actuator5 {
+		cell-index = <5>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
+	actuator_triple_uw: qcom,actuator6 {
+		cell-index = <6>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <100000>;
+	};
+
 	eeprom_rear: qcom,eeprom0 {
 		cell-index = <0>;
 		compatible = "qcom,eeprom";
@@ -54,7 +102,7 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
@@ -91,6 +139,80 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				 &cam_sensor_active_rear_aux>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				 &cam_sensor_suspend_rear_aux>;
+		gpios = <&tlmm 95 0>,
+			<&tlmm 92 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_triple_wide: qcom,eeprom4 {
+		cell-index = <4>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l5>;
+		cam_vdig-supply = <&pm8009_l1>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_active_rear>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_suspend_rear>;
+		gpios = <&tlmm 94 0>,
+			<&tlmm 93 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_triple_tele: qcom,eeprom5 {
+		cell-index = <1>;
+		compatible = "qcom,eeprom";
+		cam_vdig-supply = <&pm8009_l2>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
 		rgltr-load-current = <0 80000 1200000 0 0>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
@@ -107,6 +229,43 @@
 					"CAM_RESET1";
 		sensor-position = <0>;
 		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_triple_uw: qcom,eeprom6 {
+		cell-index = <6>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_rst2>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_rst2>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-position = <1>;
+		sensor-mode = <0>;
 		cci-master = <1>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
@@ -201,6 +360,137 @@
 		clock-cntl-level = "turbo";
 		clock-rates = <24000000>;
 	};
+
+	qcom,cam-sensor4 {
+		cell-index = <4>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		actuator-src = <&actuator_triple_wide>;
+		led-flash-src = <&led_flash_rear>;
+		eeprom-src = <&eeprom_triple_wide>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vana-supply = <&pm8009_l5>;
+		cam_vdig-supply = <&pm8009_l1>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1104000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1104000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_active_rear>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_suspend_rear>;
+		gpios = <&tlmm 94 0>,
+			<&tlmm 93 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor5 {
+		cell-index = <5>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_triple_tele>;
+		actuator-src = <&actuator_triple_tele>;
+		led-flash-src = <&led_flash_rear_aux>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vdig-supply = <&pm8009_l2>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1200000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1200000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_active_rear_aux>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_suspend_rear_aux>;
+		gpios = <&tlmm 95 0>,
+			<&tlmm 92 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor6 {
+		cell-index = <6>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_triple_uw>;
+		actuator-src = <&actuator_triple_uw>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1056000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_rst2>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_rst2>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
 };
 
 &cam_cci1 {
@@ -217,13 +507,13 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_active_front>;
+				 &cam_sensor_active_rst2>;
 		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_suspend_front>;
+				 &cam_sensor_suspend_rst2>;
 		gpios = <&tlmm 96 0>,
 			<&tlmm 78 0>;
 		gpio-reset = <1>;
@@ -264,9 +554,9 @@
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_active_front>;
+				 &cam_sensor_active_rst2>;
 		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_suspend_front>;
+				 &cam_sensor_suspend_rst2>;
 		gpios = <&tlmm 96 0>,
 			<&tlmm 78 0>;
 		gpio-reset = <1>;
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
index 9348b4c..01f5771 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
@@ -54,7 +54,7 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk0_active
@@ -91,7 +91,7 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk1_active
@@ -217,13 +217,13 @@
 		rgltr-cntrl-support;
 		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
 		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
+		rgltr-load-current = <0 80000 1200000 0 100000>;
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_active_front>;
+				 &cam_sensor_active_rst2>;
 		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_suspend_front>;
+				 &cam_sensor_suspend_rst2>;
 		gpios = <&tlmm 96 0>,
 			<&tlmm 78 0>;
 		gpio-reset = <1>;
@@ -264,9 +264,9 @@
 		gpio-no-mux = <0>;
 		pinctrl-names = "cam_default", "cam_suspend";
 		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_active_front>;
+				 &cam_sensor_active_rst2>;
 		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_suspend_front>;
+				 &cam_sensor_suspend_rst2>;
 		gpios = <&tlmm 96 0>,
 			<&tlmm 78 0>;
 		gpio-reset = <1>;
diff --git a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
index 3199342..391ed85 100644
--- a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
@@ -114,6 +114,13 @@
 			linux,can-disable;
 		};
 	};
+
+	qcom,qbt_handler {
+		compatible = "qcom,qbt-handler";
+		qcom,ipc-gpio = <&tlmm 23 0>;
+		qcom,finger-detect-gpio = <&pm8150_gpios 1 0>;
+		status = "disabled";
+	};
 };
 
 &qupv3_se13_i2c {
diff --git a/arch/arm64/boot/dts/qcom/kona-coresight.dtsi b/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
index 49dc08d..7c9b06a 100644
--- a/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
@@ -623,7 +623,7 @@
 				     <13 64>,
 				     <15 32>,
 				     <16 64>,
-				     <17 64>,
+				     <17 32>,
 				     <18 64>,
 				     <20 64>,
 				     <21 64>,
diff --git a/arch/arm64/boot/dts/qcom/kona-cvp.dtsi b/arch/arm64/boot/dts/qcom/kona-cvp.dtsi
index d1c93ab..7092ba6 100644
--- a/arch/arm64/boot/dts/qcom/kona-cvp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-cvp.dtsi
@@ -20,16 +20,17 @@
 
 		/* Clocks */
 		clock-names =  "gcc_video_axi0",
-			"gcc_video_axi1", "cvp_clk";
+			"gcc_video_axi1", "cvp_clk", "core_clk";
 		clocks = <&clock_gcc GCC_VIDEO_AXI0_CLK>,
 			<&clock_gcc GCC_VIDEO_AXI1_CLK>,
-			<&clock_videocc VIDEO_CC_MVS1C_CLK>;
+			<&clock_videocc VIDEO_CC_MVS1C_CLK>,
+			<&clock_videocc VIDEO_CC_MVS1_CLK>;
 		qcom,proxy-clock-names = "gcc_video_axi0", "gcc_video_axi1",
-			"cvp_clk";
+			"cvp_clk", "core_clk";
 
-		qcom,clock-configs = <0x0 0x0 0x1>;
-		qcom,allowed-clock-rates = <403000000 520000000
-			549000000 666000000 800000000>;
+		qcom,clock-configs = <0x0 0x0 0x1 0x1>;
+		qcom,allowed-clock-rates = <239999999 338000000
+			366000000 444000000>;
 
 		/* Buses */
 		bus_cnoc {
diff --git a/arch/arm64/boot/dts/qcom/kona-gpu.dtsi b/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
index 155e294..69df65a 100644
--- a/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
@@ -232,7 +232,6 @@
 			"gpu_cc_ahb";
 
 		qcom,secure_align_mask = <0xfff>;
-		qcom,global_pt;
 		qcom,retention;
 		qcom,hyp_secure_alloc;
 
diff --git a/arch/arm64/boot/dts/qcom/kona-mhi.dtsi b/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
index a70b183..c2cb9ec 100644
--- a/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
@@ -456,10 +456,31 @@
 				mhi,chan-type = <3>;
 			};
 
+			mhi_chan@105 {
+				reg = <105>;
+				label = "IP_HW_MHIP_0";
+				mhi,event-ring = <10>;
+				mhi,chan-dir = <1>;
+				mhi,data-type = <3>;
+				mhi,ee = <0x4>;
+				mhi,offload-chan;
+			};
+
+			mhi_chan@106 {
+				reg = <106>;
+				label = "IP_HW_MHIP_0";
+				mhi,event-ring = <11>;
+				mhi,chan-dir = <2>;
+				mhi,data-type = <3>;
+				mhi,ee = <0x4>;
+				mhi,offload-chan;
+				mhi,lpm-notify;
+			};
+
 			mhi_chan@107 {
 				reg = <107>;
 				label = "IP_HW_MHIP_1";
-				mhi,event-ring = <10>;
+				mhi,event-ring = <12>;
 				mhi,chan-dir = <1>;
 				mhi,data-type = <3>;
 				mhi,ee = <0x4>;
@@ -469,7 +490,7 @@
 			mhi_chan@108 {
 				reg = <108>;
 				label = "IP_HW_MHIP_1";
-				mhi,event-ring = <11>;
+				mhi,event-ring = <13>;
 				mhi,chan-dir = <2>;
 				mhi,data-type = <3>;
 				mhi,ee = <0x4>;
@@ -583,7 +604,7 @@
 				mhi,num-elements = <0>;
 				mhi,intmod = <0>;
 				mhi,msi = <0>;
-				mhi,chan = <107>;
+				mhi,chan = <105>;
 				mhi,priority = <1>;
 				mhi,brstmode = <3>;
 				mhi,hw-ev;
@@ -595,6 +616,30 @@
 				mhi,num-elements = <0>;
 				mhi,intmod = <0>;
 				mhi,msi = <0>;
+				mhi,chan = <106>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,hw-ev;
+				mhi,client-manage;
+				mhi,offload;
+			};
+
+			mhi_event@12 {
+				mhi,num-elements = <0>;
+				mhi,intmod = <0>;
+				mhi,msi = <0>;
+				mhi,chan = <107>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,hw-ev;
+				mhi,client-manage;
+				mhi,offload;
+			};
+
+			mhi_event@13 {
+				mhi,num-elements = <0>;
+				mhi,intmod = <0>;
+				mhi,msi = <0>;
 				mhi,chan = <108>;
 				mhi,priority = <1>;
 				mhi,brstmode = <3>;
@@ -623,6 +668,15 @@
 				mhi,rsc-parent = <&mhi_netdev_0>;
 			};
 
+			mhi_qdss_dev_0 {
+				mhi,chan = "QDSS";
+				mhi,default-channel;
+			};
+
+			mhi_qdss_dev_1 {
+				mhi,chan = "IP_HW_QDSS";
+			};
+
 			mhi_qrtr {
 				mhi,chan = "IPCR";
 				qcom,net-id = <3>;
diff --git a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
index 932f194..67ae102 100644
--- a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
@@ -103,6 +103,14 @@
 			linux,can-disable;
 		};
 	};
+
+	qcom,qbt_handler {
+		compatible = "qcom,qbt-handler";
+		qcom,ipc-gpio = <&tlmm 23 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_home_default>;
+		qcom,finger-detect-gpio = <&pm8150_gpios 1 0>;
+	};
 };
 
 &qupv3_se13_i2c {
diff --git a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
index fbde9a3..417c45b 100644
--- a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
@@ -2267,8 +2267,8 @@
 			};
 		};
 
-		cam_sensor_active_front: cam_sensor_active_front {
-			/* RESET FRONT */
+		cam_sensor_active_rst2: cam_sensor_active_rst2 {
+			/* RESET 2 */
 			mux {
 				pins = "gpio78";
 				function = "gpio";
@@ -2281,8 +2281,8 @@
 			};
 		};
 
-		cam_sensor_suspend_front: cam_sensor_suspend_front {
-			/* RESET FRONT */
+		cam_sensor_suspend_rst2: cam_sensor_suspend_rst2 {
+			/* RESET 2 */
 			mux {
 				pins = "gpio78";
 				function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/kona-qrd.dtsi b/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
index 29b02de..c08d2ba 100644
--- a/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
@@ -195,6 +195,14 @@
 			linux,can-disable;
 		};
 	};
+
+	qcom,qbt_handler {
+		compatible = "qcom,qbt-handler";
+		qcom,ipc-gpio = <&tlmm 23 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_home_default>;
+		qcom,finger-detect-gpio = <&pm8150_gpios 1 0>;
+	};
 };
 
 &vreg_hap_boost {
@@ -458,6 +466,25 @@
 	qcom,platform-reset-gpio = <&tlmm 75 0>;
 };
 
+&dsi_sw43404_amoled_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <1023>;
+	qcom,mdss-brightness-max-level = <255>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sw43404_amoled_fhd_plus_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <1023>;
+	qcom,mdss-brightness-max-level = <255>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
 &sde_dsi {
 	qcom,dsi-default-panel = <&dsi_sw43404_amoled_cmd>;
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-regulators.dtsi b/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
index 81c7876..233b71a 100644
--- a/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
@@ -315,9 +315,12 @@
 			<RPMH_REGULATOR_MODE_LPM
 			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
+		proxy-supply = <&pm8150_l14>;
 		L14A: pm8150_l14: regulator-pm8150-l14 {
 			regulator-name = "pm8150_l14";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			qcom,proxy-consumer-enable;
+			qcom,proxy-consumer-current = <62000>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1880000>;
 			qcom,init-voltage = <1800000>;
@@ -429,7 +432,6 @@
 				= <RPMH_REGULATOR_LEVEL_MAX>;
 			qcom,init-voltage-level
 				= <RPMH_REGULATOR_LEVEL_LOW_SVS>;
-			regulator-always-on;
 		};
 
 		VDD_MMCX_LEVEL_AO: S4C_LEVEL_AO:
@@ -684,9 +686,12 @@
 			<RPMH_REGULATOR_MODE_LPM
 			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
+		proxy-supply = <&pm8150a_l11>;
 		L11C: pm8150a_l11: regulator-pm8150a-l11 {
 			regulator-name = "pm8150a_l11";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			qcom,proxy-consumer-enable;
+			qcom,proxy-consumer-current = <857000>;
 			regulator-min-microvolt = <3104000>;
 			regulator-max-microvolt = <3304000>;
 			qcom,init-voltage = <3104000>;
diff --git a/arch/arm64/boot/dts/qcom/kona-sde.dtsi b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
index 6158a8e..e28a2c5 100644
--- a/arch/arm64/boot/dts/qcom/kona-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
@@ -345,7 +345,6 @@
 			<&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
-			<&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
 			<&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>,
 			<&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK_SRC>,
@@ -354,7 +353,7 @@
 			<&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK>;
 		clock-names = "core_aux_clk", "core_usb_ref_clk_src",
 			"core_usb_pipe_clk", "link_clk", "link_iface_clk",
-			"crypto_clk", "pixel_clk_rcg", "pixel_parent",
+			"pixel_clk_rcg", "pixel_parent",
 			"pixel1_clk_rcg", "pixel1_parent",
 			"strm0_pixel_clk", "strm1_pixel_clk";
 
@@ -480,6 +479,7 @@
 		      <0x0aeb8000 0x3000>;
 		reg-names = "mdp_phys",
 			"rot_vbif_phys";
+		status = "disabled";
 
 		#list-cells = <1>;
 
diff --git a/arch/arm64/boot/dts/qcom/kona-smp2p.dtsi b/arch/arm64/boot/dts/qcom/kona-smp2p.dtsi
index b3f8d88..6f28b7c 100644
--- a/arch/arm64/boot/dts/qcom/kona-smp2p.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-smp2p.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
  */
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -51,6 +51,17 @@
 			interrupt-controller;
 			#interrupt-cells = <2>;
 		};
+
+		sleepstate_smp2p_out: sleepstate-out {
+			qcom,entry-name = "sleepstate";
+			#qcom,smem-state-cells = <1>;
+		};
+
+		sleepstate_smp2p_in: qcom,sleepstate-in {
+			qcom,entry-name = "sleepstate_see";
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
 	};
 
 	qcom,smp2p-cdsp {
diff --git a/arch/arm64/boot/dts/qcom/kona-thermal.dtsi b/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
index 64fba05..9102f4f 100644
--- a/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
@@ -50,6 +50,103 @@
 	};
 };
 
+&soc {
+	qmi-tmd-devices {
+		compatible = "qcom,qmi-cooling-devices";
+
+		modem {
+			qcom,instance-id = <0x64>;
+
+			modem_pa: modem_pa {
+				qcom,qmi-dev-name = "pa";
+				#cooling-cells = <2>;
+			};
+
+			modem_proc: modem_proc {
+				qcom,qmi-dev-name = "modem";
+				#cooling-cells = <2>;
+			};
+
+			modem_current: modem_current {
+				qcom,qmi-dev-name = "modem_current";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin: modem_skin {
+				qcom,qmi-dev-name = "modem_skin";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin0: modem_skin0 {
+				qcom,qmi-dev-name = "modem_skin0";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin1: modem_skin1 {
+				qcom,qmi-dev-name = "modem_skin1";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin2: modem_skin2 {
+				qcom,qmi-dev-name = "modem_skin2";
+				#cooling-cells = <2>;
+			};
+
+			modem_skin3: modem_skin3 {
+				qcom,qmi-dev-name = "modem_skin3";
+				#cooling-cells = <2>;
+			};
+
+			modem_mmw0: modem_mmw0 {
+				qcom,qmi-dev-name = "mmw0";
+				#cooling-cells = <2>;
+			};
+
+			modem_mmw1: modem_mmw1 {
+				qcom,qmi-dev-name = "mmw1";
+				#cooling-cells = <2>;
+			};
+
+			modem_mmw2: modem_mmw2 {
+				qcom,qmi-dev-name = "mmw2";
+				#cooling-cells = <2>;
+			};
+
+			modem_mmw3: modem_mmw3 {
+				qcom,qmi-dev-name = "mmw3";
+				#cooling-cells = <2>;
+			};
+
+			modem_bcl: modem_bcl {
+				qcom,qmi-dev-name = "vbatt_low";
+				#cooling-cells = <2>;
+			};
+		};
+	};
+
+	qmi_sensor: qmi-ts-sensors {
+		compatible = "qcom,qmi-sensors";
+		#thermal-sensor-cells = <1>;
+
+		modem {
+			qcom,instance-id = <100>;
+			qcom,qmi-sensor-names = "pa",
+						"pa_1",
+						"pa_2",
+						"qfe_pa0",
+						"qfe_wtr0",
+						"modem_tsens",
+						"qfe_mmw0",
+						"qfe_mmw1",
+						"qfe_mmw2",
+						"qfe_mmw3",
+						"xo_therm",
+						"qfe_pa_mdm",
+						"qfe_pa_wtr";
+		};
+	};
+};
+
 &thermal_zones {
 	aoss0-usr {
 		polling-delay-passive = <0>;
@@ -854,4 +951,60 @@
 			};
 		};
 	};
+
+	modem-pa0-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&qmi_sensor 100>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	modem-pa1-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&qmi_sensor 101>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	modem-modem-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&qmi_sensor 105>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	modem-skin-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&qmi_sensor 110>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi b/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
index 3ddc27e..11c09eb 100644
--- a/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
@@ -8,23 +8,14 @@
 		compatible = "qcom,va-macro";
 		reg = <0x3370000 0x0>;
 		clock-names = "va_core_clk";
-		clocks = <&clock_audio_va 0>;
+		clocks = <&clock_audio_va_1 0>;
 		va-vdd-micb-supply = <&S4A>;
 		qcom,va-vdd-micb-voltage = <1800000 1800000>;
 		qcom,va-vdd-micb-current = <11200>;
 		qcom,va-dmic-sample-rate = <4800000>;
 		qcom,va-clk-mux-select = <1>;
 		qcom,va-island-mode-muxsel = <0x033A0000>;
-	};
-};
-
-&soc {
-	clock_audio_va: va_core_clk  {
-		compatible = "qcom,audio-ref-clk";
-		qcom,codec-ext-clk-src = <AUDIO_LPASS_MCLK>;
-		qcom,codec-lpass-ext-clk-freq = <19200000>;
-		qcom,codec-lpass-clk-id = <0x30B>;
-		#clock-cells = <1>;
+		qcom,default-clk-id = <TX_CORE_CLK>;
 	};
 };
 
diff --git a/arch/arm64/boot/dts/qcom/kona-vidc.dtsi b/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
index 67f8dbe..674f53a 100644
--- a/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
@@ -26,16 +26,15 @@
 		vcodec-supply = <&mvs0_gdsc>;
 
 		/* Clocks */
-		clock-names = "gcc_video_axi0", "ahb_clk",
+		clock-names = "gcc_video_axi0",
 			"core_clk", "vcodec_clk";
 		clocks = <&clock_gcc GCC_VIDEO_AXI0_CLK>,
-			<&clock_videocc VIDEO_CC_AHB_CLK>,
 			<&clock_videocc VIDEO_CC_MVS0C_CLK>,
 			<&clock_videocc VIDEO_CC_MVS0_CLK>;
-		qcom,proxy-clock-names = "gcc_video_axi0", "ahb_clk",
+		qcom,proxy-clock-names = "gcc_video_axi0",
 					"core_clk", "vcodec_clk";
 		/* Mask: Bit0: Clock Scaling, Bit1: Mem Retention*/
-		qcom,clock-configs = <0x0 0x0 0x1 0x1>;
+		qcom,clock-configs = <0x0 0x1 0x1>;
 		qcom,allowed-clock-rates = <239999999 338000000
 						366000000 444000000>;
 		resets = <&clock_gcc GCC_VIDEO_AXI0_CLK_ARES>,
@@ -50,9 +49,8 @@
 			label = "cnoc";
 			qcom,bus-master = <MSM_BUS_MASTER_AMPSS_M0>;
 			qcom,bus-slave = <MSM_BUS_SLAVE_VENUS_CFG>;
-			qcom,bus-governor = "performance";
+			qcom,mode = "performance";
 			qcom,bus-range-kbps = <762 762>;
-			operating-points-v2 = <&venus_bus_cnoc_bw_table>;
 		};
 
 		venus_bus_ddr {
@@ -60,9 +58,8 @@
 			label = "venus-ddr";
 			qcom,bus-master = <MSM_BUS_MASTER_LLCC>;
 			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
-			qcom,bus-governor = "msm-vidc-ddr";
+			qcom,mode = "venus-ddr";
 			qcom,bus-range-kbps = <762 6533000>;
-			operating-points-v2 = <&suspendable_ddr_bw_opp_table>;
 		};
 
 		venus_bus_llcc {
@@ -70,9 +67,8 @@
 			label = "venus-llcc";
 			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
 			qcom,bus-slave = <MSM_BUS_SLAVE_LLCC>;
-			qcom,bus-governor = "msm-vidc-llcc";
+			qcom,mode = "venuc-llcc";
 			qcom,bus-range-kbps = <2288 6533000>;
-			operating-points-v2 = <&suspendable_llcc_bw_opp_table>;
 		};
 
 		/* MMUs */
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index 4106d92..48519d1 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -627,7 +627,7 @@
 			alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
 			reusable;
 			alignment = <0x0 0x400000>;
-			size = <0x0 0x800000>;
+			size = <0x0 0xc00000>;
 		};
 
 		qseecom_mem: qseecom_region {
@@ -945,6 +945,18 @@
 		};
 	};
 
+	bus_proxy_client: qcom,bus_proxy_client {
+		compatible = "qcom,bus-proxy-client";
+		qcom,msm-bus,name = "bus-proxy-client";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+			<22 512 0 0>, <23 512 0 0>,
+			<22 512 1500000 1500000>, <23 512 1500000 1500000>;
+		qcom,msm-bus,active-only;
+		status = "ok";
+	};
+
 	keepalive_opp_table: keepalive-opp-table {
 		compatible = "operating-points-v2";
 		opp-1 {
@@ -962,11 +974,6 @@
 		operating-points-v2 = <&keepalive_opp_table>;
 	};
 
-	venus_bus_cnoc_bw_table: bus-cnoc-bw-table {
-		compatible = "operating-points-v2";
-		BW_OPP_ENTRY( 200, 4);
-	};
-
 	llcc_bw_opp_table: llcc-bw-opp-table {
 		compatible = "operating-points-v2";
 		BW_OPP_ENTRY(  150, 16); /*  2288 MB/s */
@@ -1077,23 +1084,30 @@
 
 	npu_npu_ddr_bwmon: qcom,npu-npu-ddr-bwmon@60300 {
 		compatible = "qcom,bimc-bwmon4";
-		reg = <0x00060300 0x300>, <0x00060400 0x200>;
+		reg = <0x00060400 0x300>, <0x00060300 0x200>;
 		reg-names = "base", "global_base";
-		interrupts = <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,mport = <0>;
 		qcom,hw-timer-hz = <19200000>;
 		qcom,target-dev = <&npu_npu_ddr_bw>;
 		qcom,count-unit = <0x10000>;
 	};
 
-	npu_npu_ddr_bwmon_dsp: qcom,npu-npu-ddr-bwmoni_dsp@70200 {
+	npudsp_npu_ddr_bw: qcom,npudsp-npu-ddr-bw {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports = <MSM_BUS_MASTER_NPU MSM_BUS_SLAVE_EBI_CH0>;
+		operating-points-v2 = <&suspendable_ddr_bw_opp_table>;
+	};
+
+	npudsp_npu_ddr_bwmon: qcom,npudsp-npu-ddr-bwmon@70200 {
 		compatible = "qcom,bimc-bwmon4";
-		reg = <0x00070200 0x300>, <0x00070300 0x200>;
+		reg = <0x00070300 0x300>, <0x00070200 0x200>;
 		reg-names = "base", "global_base";
 		interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,mport = <0>;
 		qcom,hw-timer-hz = <19200000>;
-		qcom,target-dev = <&npu_npu_ddr_bw>;
+		qcom,target-dev = <&npudsp_npu_ddr_bw>;
 		qcom,count-unit = <0x10000>;
 	};
 
@@ -2220,8 +2234,16 @@
 		memory-region = <&pil_ipa_fw_mem>;
 	};
 
+	qcom,ipa-mpm {
+		compatible = "qcom,ipa-mpm";
+		qcom,mhi-chdb-base = <0x64300300>;
+		qcom,mhi-erdb-base = <0x64300700>;
+		qcom,iova-mapping = <0x10000000 0x1FFFFFFF>;
+	};
+
 	ipa_hw: qcom,ipa@1e00000 {
 		compatible = "qcom,ipa";
+		mboxes = <&qmp_aop 0>;
 		reg =
 			<0x1e00000 0x84000>,
 			<0x1e04000 0x23000>;
@@ -2691,6 +2713,14 @@
 		memory-region = <&pil_npu_mem>;
 	};
 
+	qcom,smp2p_sleepstate {
+		compatible = "qcom,smp2p-sleepstate";
+		qcom,smem-states = <&sleepstate_smp2p_out 0>;
+		interrupt-parent = <&sleepstate_smp2p_in>;
+		interrupts = <0 0>;
+		interrupt-names = "smp2p-sleepstate-in";
+	};
+
 	qcom,msm-cdsp-loader {
 		compatible = "qcom,cdsp-loader";
 		qcom,proc-img-to-load = "cdsp";
@@ -2909,13 +2939,11 @@
 		compatible = "qcom,pil-tz-generic";
 		reg = <0x5c00000 0x4000>;
 
-		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		vdd_cx-supply = <&L11A_LEVEL>;
 		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 0>;
-		vdd_mx-supply = <&VDD_MX_LEVEL>;
+		vdd_mx-supply = <&L4A_LEVEL>;
 		qcom,vdd_mx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 0>;
-
 		qcom,proxy-reg-names = "vdd_cx", "vdd_mx";
-		qcom,keep-proxy-regs-on;
 
 		clocks = <&clock_rpmh RPMH_CXO_CLK>;
 		clock-names = "xo";
@@ -2930,6 +2958,7 @@
 		status = "ok";
 		memory-region = <&pil_slpi_mem>;
 		qcom,complete-ramdump;
+		qcom,signal-aop;
 
 		/* Inputs from ssc */
 		interrupts-extended = <&pdc 9 IRQ_TYPE_LEVEL_HIGH>,
@@ -2948,6 +2977,7 @@
 		qcom,smem-states = <&dsps_smp2p_out 0>;
 		qcom,smem-state-names = "qcom,force-stop";
 
+		mboxes = <&qmp_aop 0>;
 		mbox-names = "slpi-pil";
 	};
 
@@ -3371,6 +3401,13 @@
 				mhi,brstmode = <2>;
 			};
 		};
+
+		mhi_devices {
+			mhi_qrtr {
+				mhi,chan = "IPCR";
+				qcom,net-id = <0>;
+			};
+		};
 	};
 };
 
diff --git a/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi b/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
index f093a93..dfb2644 100644
--- a/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
@@ -107,7 +107,7 @@
 	};
 
 	gpu_cx_gdsc: qcom,gdsc@3d9106c {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0x3d9106c 0x4>;
 		regulator-name = "gpu_cx_gdsc";
 		hw-ctrl-addr = <&gpu_cx_hw_ctrl>;
@@ -128,7 +128,7 @@
 	};
 
 	gpu_gx_gdsc: qcom,gdsc@3d9100c {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0x3d9100c 0x4>;
 		regulator-name = "gpu_gx_gdsc";
 		domain-addr = <&gpu_gx_domain_addr>;
diff --git a/arch/arm64/boot/dts/qcom/lito-ion.dtsi b/arch/arm64/boot/dts/qcom/lito-ion.dtsi
new file mode 100644
index 0000000..e68f421
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/lito-ion.dtsi
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+&soc {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		system_heap: qcom,ion-heap@25 {
+			reg = <25>;
+			qcom,ion-heap-type = "SYSTEM";
+		};
+
+		system_secure_heap: qcom,ion-heap@9 {
+			reg = <9>;
+			qcom,ion-heap-type = "SYSTEM_SECURE";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/lito-rumi.dtsi b/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
index 093f3d6..9cb6c48 100644
--- a/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
@@ -35,6 +35,27 @@
 	usb_nop_phy: usb_nop_phy {
 		compatible = "usb-nop-xceiv";
 	};
+
+	cxo: bi_tcxo {
+		compatible = "fixed-factor-clock";
+		clocks = <&xo_board>;
+		clock-mult = <1>;
+		clock-div = <2>;
+		#clock-cells = <0>;
+	};
+
+	cxo_a: bi_tcxo_ao {
+		compatible = "fixed-factor-clock";
+		clocks = <&xo_board>;
+		clock-mult = <1>;
+		clock-div = <2>;
+		#clock-cells = <0>;
+	};
+};
+
+&rpmhcc {
+	compatible = "qcom,dummycc";
+	clock-output-names = "rpmh_clocks";
 };
 
 &usb0 {
diff --git a/arch/arm64/boot/dts/qcom/lito-usb.dtsi b/arch/arm64/boot/dts/qcom/lito-usb.dtsi
index 7cc8b61..6cc33c0 100644
--- a/arch/arm64/boot/dts/qcom/lito-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-usb.dtsi
@@ -12,7 +12,9 @@
 		reg = <0x0a600000 0x100000>;
 		reg-names = "core_base";
 
-		qcom,iommu-dma = "disabled";
+		iommus = <&apps_smmu 0xE0 0x0>;
+		qcom,iommu-dma = "atomic";
+		qcom,iommu-dma-addr-pool = <0x90000000 0x60000000>;
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
diff --git a/arch/arm64/boot/dts/qcom/lito.dtsi b/arch/arm64/boot/dts/qcom/lito.dtsi
index e1e8b2c..609691a 100644
--- a/arch/arm64/boot/dts/qcom/lito.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito.dtsi
@@ -850,28 +850,6 @@
 		};
 	};
 
-	cxo: bi_tcxo {
-		compatible = "fixed-factor-clock";
-		clocks = <&xo_board>;
-		clock-mult = <1>;
-		clock-div = <2>;
-		#clock-cells = <0>;
-	};
-
-	cxo_a: bi_tcxo_ao {
-		compatible = "fixed-factor-clock";
-		clocks = <&xo_board>;
-		clock-mult = <1>;
-		clock-div = <2>;
-		#clock-cells = <0>;
-	};
-
-	rpmhcc: qcom,rpmhclk {
-		compatible = "qcom,dummycc";
-		clock-output-names = "rpmh_clocks";
-		#clock-cells = <1>;
-	};
-
 	aopcc: qcom,aopclk {
 		compatible = "qcom,dummycc";
 		clock-output-names = "qdss_clocks";
@@ -912,7 +890,7 @@
 	};
 
 	dispcc: qcom,dispcc {
-		compatible = "qcom,lito-dispcc";
+		compatible = "qcom,lito-dispcc", "syscon";
 		reg = <0xaf00000 0x20000>;
 		reg-names = "cc_base";
 		clock-names = "cfg_ahb_clk";
@@ -922,6 +900,16 @@
 		#reset-cells = <1>;
 	};
 
+	gpucc: qcom,gpucc {
+		compatible = "qcom,gpucc-lito", "syscon";
+		reg = <0x3d90000 0x9000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		vdd_mx-supply = <&VDD_MX_LEVEL>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
 	ufsphy_mem: ufsphy_mem@1d87000 {
 		reg = <0x1d87000 0xe00>; /* PHY regs */
 		reg-names = "phy_mem";
@@ -1055,13 +1043,6 @@
 		#reset-cells = <1>;
 	};
 
-	gpucc: qcom,gpucc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "gpucc_clocks";
-		#clock-cells = <1>;
-		#reset-cells = <1>;
-	};
-
 	apps_rsc: rsc@18200000 {
 		label = "apps_rsc";
 		compatible = "qcom,rpmh-rsc";
@@ -1082,6 +1063,11 @@
 		system_pm {
 			compatible = "qcom,system-pm";
 		};
+
+		rpmhcc: qcom,rpmhclk {
+			compatible = "qcom,lito-rpmh-clk";
+			#clock-cells = <1>;
+		};
 	};
 
 	disp_rsc: rsc@af20000 {
@@ -1469,6 +1455,7 @@
 #include "lito-regulators.dtsi"
 #include "lito-smp2p.dtsi"
 #include "lito-usb.dtsi"
+#include "lito-ion.dtsi"
 
 &ufs_phy_gdsc {
 	status = "ok";
@@ -1536,10 +1523,13 @@
 };
 
 &gpu_cx_gdsc {
+	parent-supply = <&VDD_CX_LEVEL>;
 	status = "ok";
 };
 
 &gpu_gx_gdsc {
+	parent-supply = <&VDD_GFX_LEVEL>;
+	vdd_parent-supply = <&VDD_GFX_LEVEL>;
 	status = "ok";
 };
 
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
index 2862a0f..20f1a38 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
@@ -419,22 +419,27 @@
 	kgsl_iommu_test_device {
 		compatible = "iommu-debug-test";
 		iommus = <&kgsl_smmu 0x7 0>;
+		qcom,iommu-dma = "disabled";
 	};
 
 	kgsl_iommu_coherent_test_device {
+		status = "disabled";
 		compatible = "iommu-debug-test";
 		iommus = <&kgsl_smmu 0x9 0>;
+		qcom,iommu-dma = "disabled";
 		dma-coherent;
 	};
 
 	apps_iommu_test_device {
 		compatible = "iommu-debug-test";
 		iommus = <&apps_smmu 0x21 0>;
+		qcom,iommu-dma = "disabled";
 	};
 
 	apps_iommu_coherent_test_device {
 		compatible = "iommu-debug-test";
 		iommus = <&apps_smmu 0x23 0>;
+		qcom,iommu-dma = "disabled";
 		dma-coherent;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index cd3865e..8c86c41 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -399,7 +399,7 @@
 		};
 
 		intc: interrupt-controller@9bc0000 {
-			compatible = "arm,gic-v3";
+			compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
 			#interrupt-cells = <3>;
 			interrupt-controller;
 			#redistributor-regions = <1>;
diff --git a/arch/arm64/boot/dts/qcom/pm8009.dtsi b/arch/arm64/boot/dts/qcom/pm8009.dtsi
index bfa5cf0..4b74708 100644
--- a/arch/arm64/boot/dts/qcom/pm8009.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8009.dtsi
@@ -29,10 +29,10 @@
 		pm8009_gpios: pinctrl@c000 {
 			compatible = "qcom,spmi-gpio";
 			reg = <0xc000 0x400>;
-			interrupts = <0x0 0xc0 0 IRQ_TYPE_NONE>,
-				     <0x0 0xc1 0 IRQ_TYPE_NONE>,
-				     <0x0 0xc2 0 IRQ_TYPE_NONE>,
-				     <0x0 0xc3 0 IRQ_TYPE_NONE>;
+			interrupts = <0xa 0xc0 0 IRQ_TYPE_NONE>,
+				     <0xa 0xc1 0 IRQ_TYPE_NONE>,
+				     <0xa 0xc2 0 IRQ_TYPE_NONE>,
+				     <0xa 0xc3 0 IRQ_TYPE_NONE>;
 			interrupt-names = "pm8009_gpio1", "pm8009_gpio2",
 					  "pm8009_gpio3", "pm8009_gpio4";
 			gpio-controller;
diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
index 29495c1..10161b4 100644
--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
@@ -76,14 +76,15 @@
 			interrupts = <0x0 0xc0 0x0 IRQ_TYPE_NONE>,
 					<0x0 0xc2 0x0 IRQ_TYPE_NONE>,
 					<0x0 0xc5 0x0 IRQ_TYPE_NONE>,
+					<0x0 0xc6 0x0 IRQ_TYPE_NONE>,
 					<0x0 0xc8 0x0 IRQ_TYPE_NONE>,
 					<0x0 0xc9 0x0 IRQ_TYPE_NONE>;
 			interrupt-names = "pm8150_gpio1", "pm8150_gpio3",
-					"pm8150_gpio6", "pm8150_gpio9",
-					"pm8150_gpio10";
+					"pm8150_gpio6", "pm8150_gpio7",
+					"pm8150_gpio9", "pm8150_gpio10";
 			gpio-controller;
 			#gpio-cells = <2>;
-			qcom,gpios-disallowed = <2 4 5 7 8>;
+			qcom,gpios-disallowed = <2 4 5 8>;
 		};
 
 		pm8150_rtc: qcom,pm8150_rtc {
diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
index c427d85..1b87f43 100644
--- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
@@ -61,17 +61,19 @@
 					<0x2 0xc1 0x0 IRQ_TYPE_NONE>,
 					<0x2 0xc4 0x0 IRQ_TYPE_NONE>,
 					<0x2 0xc5 0x0 IRQ_TYPE_NONE>,
+					<0x2 0xc6 0x0 IRQ_TYPE_NONE>,
 					<0x2 0xc7 0x0 IRQ_TYPE_NONE>,
 					<0x2 0xc8 0x0 IRQ_TYPE_NONE>,
 					<0x2 0xc9 0x0 IRQ_TYPE_NONE>,
 					<0x2 0xcb 0x0 IRQ_TYPE_NONE>;
 			interrupt-names = "pm8150b_gpio1", "pm8150b_gpio2",
 					"pm8150b_gpio5", "pm8150b_gpio6",
-					"pm8150b_gpio8", "pm8150b_gpio9",
-					"pm8150b_gpio10", "pm8150b_gpio12";
+					"pm8150b_gpio7", "pm8150b_gpio8",
+					"pm8150b_gpio9", "pm8150b_gpio10",
+					"pm8150b_gpio12";
 			gpio-controller;
 			#gpio-cells = <2>;
-			qcom,gpios-disallowed = <3 4 7 11>;
+			qcom,gpios-disallowed = <3 4 11>;
 		};
 
 		pm8150b_charger: qcom,qpnp-smb5 {
diff --git a/arch/arm64/boot/dts/qcom/pm8150l.dtsi b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
index 240764c..186c86f 100644
--- a/arch/arm64/boot/dts/qcom/pm8150l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
@@ -55,18 +55,18 @@
 					<0x4 0xc3 0x0 IRQ_TYPE_NONE>,
 					<0x4 0xc4 0x0 IRQ_TYPE_NONE>,
 					<0x4 0xc5 0x0 IRQ_TYPE_NONE>,
+					<0x4 0xc6 0x0 IRQ_TYPE_NONE>,
 					<0x4 0xc7 0x0 IRQ_TYPE_NONE>,
-					<0x4 0xc8 0x0 IRQ_TYPE_NONE>,
 					<0x4 0xc9 0x0 IRQ_TYPE_NONE>,
 					<0x4 0xca 0x0 IRQ_TYPE_NONE>;
 			interrupt-names = "pm8150l_gpio1", "pm8150l_gpio3",
 					"pm8150l_gpio4", "pm8150l_gpio5",
-					"pm8150l_gpio6", "pm8150l_gpio8",
-					"pm8150l_gpio9", "pm8150l_gpio10",
+					"pm8150l_gpio6", "pm8150l_gpio7",
+					"pm8150l_gpio8", "pm8150l_gpio10",
 					"pm8150l_gpio11";
 			gpio-controller;
 			#gpio-cells = <2>;
-			qcom,gpios-disallowed = <2 7 12>;
+			qcom,gpios-disallowed = <2 9 12>;
 		};
 
 		pm8150l_vadc: vadc@3100 {
@@ -362,6 +362,14 @@
 			};
 		};
 
+		pm8150l_pwm: qcom,pwms@bc00 {
+			compatible = "qcom,pwm-lpg";
+			reg = <0xbc00 0x200>;
+			reg-names = "lpg-base";
+			#pwm-cells = <2>;
+			qcom,num-lpg-channels = <2>;
+		};
+
 		pm8150l_rgb_led: qcom,leds@d000 {
 			compatible = "qcom,tri-led";
 			reg = <0xd000 0x100>;
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
index cbd35c0..33cb028 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
@@ -1161,6 +1161,9 @@
 				 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
 				 <&scif_clk>;
 			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+			       <&dmac2 0x13>, <&dmac2 0x12>;
+			dma-names = "tx", "rx", "tx", "rx";
 			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
 			resets = <&cpg 310>;
 			status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index 0cd4446..f60f08b 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -951,6 +951,9 @@
 				 <&cpg CPG_CORE R8A77965_CLK_S3D1>,
 				 <&scif_clk>;
 			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+			       <&dmac2 0x13>, <&dmac2 0x12>;
+			dma-names = "tx", "rx", "tx", "rx";
 			power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
 			resets = <&cpg 310>;
 			status = "disabled";
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
index eb5e8bd..8954c8c 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
@@ -101,6 +101,7 @@
 	sdio_pwrseq: sdio_pwrseq {
 		compatible = "mmc-pwrseq-simple";
 		reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
+		post-power-on-delay-ms = <10>;
 	};
 };
 
diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig
index d1ea96f..87a22fa 100644
--- a/arch/arm64/configs/cuttlefish_defconfig
+++ b/arch/arm64/configs/cuttlefish_defconfig
@@ -58,6 +58,7 @@
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
@@ -124,6 +125,7 @@
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -229,6 +231,7 @@
 CONFIG_PPP_MPPE=y
 CONFIG_PPTP=y
 CONFIG_PPPOL2TP=y
+CONFIG_USB_RTL8152=y
 CONFIG_USB_USBNET=y
 # CONFIG_USB_NET_AX8817X is not set
 # CONFIG_USB_NET_AX88179_178A is not set
@@ -299,6 +302,12 @@
 CONFIG_DRM_VIRTIO_GPU=y
 CONFIG_SOUND=y
 CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_INTEL8X0=y
+# CONFIG_SND_USB is not set
 CONFIG_HIDRAW=y
 CONFIG_UHID=y
 CONFIG_HID_A4TECH=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index d70999a..cb6da38 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -442,6 +442,8 @@
 CONFIG_USB_CONFIGFS_F_CCID=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_TYPEC=y
 CONFIG_USB_PD_POLICY=y
 CONFIG_QPNP_USB_PDPHY=y
@@ -514,6 +516,7 @@
 CONFIG_OVERRIDE_MEMORY_LIMIT=y
 CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_MSM_QBT_HANDLER=y
 CONFIG_QCOM_IPCC=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_KONA_LLCC=y
@@ -549,6 +552,7 @@
 CONFIG_QSEE_IPC_IRQ_BRIDGE=y
 CONFIG_QCOM_GLINK=y
 CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
 CONFIG_QCOM_QDSS_BRIDGE=y
 CONFIG_MSM_CDSP_LOADER=y
 CONFIG_QCOM_SMCINVOKE=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 9a5e940..626e4ad 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -453,6 +453,8 @@
 CONFIG_USB_CONFIGFS_F_CCID=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_TYPEC=y
 CONFIG_USB_PD_POLICY=y
 CONFIG_QPNP_USB_PDPHY=y
@@ -493,6 +495,7 @@
 CONFIG_IPA_WDI_UNIFIED_API=y
 CONFIG_RMNET_IPA3=y
 CONFIG_RNDIS_IPA=y
+CONFIG_IPA3_MHI_PRIME_MANAGER=y
 CONFIG_IPA_UT=y
 CONFIG_MSM_11AD=m
 CONFIG_USB_BAM=y
@@ -529,6 +532,7 @@
 CONFIG_OVERRIDE_MEMORY_LIMIT=y
 CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_MSM_QBT_HANDLER=y
 CONFIG_QCOM_IPCC=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_KONA_LLCC=y
@@ -567,6 +571,7 @@
 CONFIG_QSEE_IPC_IRQ_BRIDGE=y
 CONFIG_QCOM_GLINK=y
 CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
 CONFIG_QCOM_QDSS_BRIDGE=y
 CONFIG_MSM_CDSP_LOADER=y
 CONFIG_QCOM_SMCINVOKE=y
@@ -646,6 +651,7 @@
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
new file mode 100644
index 0000000..870c6ce
--- /dev/null
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -0,0 +1,473 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+# CONFIG_ZONE_DMA32 is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_LITO=y
+CONFIG_PCI=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_SECCOMP=y
+# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_BT=y
+CONFIG_CFG80211=y
+CONFIG_RFKILL=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RMNET=y
+CONFIG_PHYLIB=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_TTY_PRINTK=y
+CONFIG_HW_RANDOM=y
+CONFIG_DIAG_CHAR=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_LITO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_DRM=y
+CONFIG_DRM_MSM_REGISTER_LOGGING=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_QCOM_EMU_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=900
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_EDAC=y
+CONFIG_EDAC_KRYO_ARM64=y
+CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PM8XXX=y
+CONFIG_DMADEVICES=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QCOM_GENI_SE=y
+# CONFIG_QCOM_A53PLL is not set
+CONFIG_QCOM_CLK_RPMH=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_SM_GCC_LITO=y
+CONFIG_SM_VIDEOCC_LITO=y
+CONFIG_SM_CAMCC_LITO=y
+CONFIG_SM_DISPCC_LITO=y
+CONFIG_SM_GPUCC_LITO=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_GLINK_SPSS=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_IPCC=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_LITO_LLCC=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_RPMH=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_IIO=y
+CONFIG_PWM=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_QCOM_PDC=y
+CONFIG_PHY_XGENE=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SLIMBUS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SCHEDSTATS=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_IPC_LOGGING=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index 5d85d6d..4c43eb8 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -371,6 +371,7 @@
 CONFIG_QCOM_GENI_SE=y
 # CONFIG_QCOM_A53PLL is not set
 CONFIG_QCOM_CLK_RPMH=y
+CONFIG_MSM_CLK_AOP_QMP=y
 CONFIG_SM_GCC_LITO=y
 CONFIG_SM_VIDEOCC_LITO=y
 CONFIG_SM_CAMCC_LITO=y
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index b5a367d..30bb137 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -478,13 +478,13 @@
 	    addr < (unsigned long)__entry_text_end) ||
 	    (addr >= (unsigned long)__idmap_text_start &&
 	    addr < (unsigned long)__idmap_text_end) ||
+	    (addr >= (unsigned long)__hyp_text_start &&
+	    addr < (unsigned long)__hyp_text_end) ||
 	    !!search_exception_tables(addr))
 		return true;
 
 	if (!is_kernel_in_hyp_mode()) {
-		if ((addr >= (unsigned long)__hyp_text_start &&
-		    addr < (unsigned long)__hyp_text_end) ||
-		    (addr >= (unsigned long)__hyp_idmap_text_start &&
+		if ((addr >= (unsigned long)__hyp_idmap_text_start &&
 		    addr < (unsigned long)__hyp_idmap_text_end))
 			return true;
 	}
diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
index 07b4c65..8e73d65 100644
--- a/arch/mips/bcm63xx/dev-enet.c
+++ b/arch/mips/bcm63xx/dev-enet.c
@@ -70,6 +70,8 @@
 
 static int shared_device_registered;
 
+static u64 enet_dmamask = DMA_BIT_MASK(32);
+
 static struct resource enet0_res[] = {
 	{
 		.start		= -1, /* filled at runtime */
@@ -99,6 +101,8 @@
 	.resource	= enet0_res,
 	.dev		= {
 		.platform_data = &enet0_pd,
+		.dma_mask = &enet_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
@@ -131,6 +135,8 @@
 	.resource	= enet1_res,
 	.dev		= {
 		.platform_data = &enet1_pd,
+		.dma_mask = &enet_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
@@ -157,6 +163,8 @@
 	.resource	= enetsw_res,
 	.dev		= {
 		.platform_data = &enetsw_pd,
+		.dma_mask = &enet_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
index 50cff3c..4f7b1fa 100644
--- a/arch/mips/boot/dts/ingenic/ci20.dts
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -76,7 +76,7 @@
 	status = "okay";
 
 	pinctrl-names = "default";
-	pinctrl-0 = <&pins_uart2>;
+	pinctrl-0 = <&pins_uart3>;
 };
 
 &uart4 {
@@ -196,9 +196,9 @@
 		bias-disable;
 	};
 
-	pins_uart2: uart2 {
-		function = "uart2";
-		groups = "uart2-data", "uart2-hwflow";
+	pins_uart3: uart3 {
+		function = "uart3";
+		groups = "uart3-data", "uart3-hwflow";
 		bias-disable;
 	};
 
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 0b9535b..6b2a4a9 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -54,10 +54,9 @@
 unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
 			      unsigned long new, unsigned int size)
 {
-	u32 mask, old32, new32, load32;
+	u32 mask, old32, new32, load32, load;
 	volatile u32 *ptr32;
 	unsigned int shift;
-	u8 load;
 
 	/* Check that ptr is naturally aligned */
 	WARN_ON((unsigned long)ptr & (size - 1));
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index ba150c75..85b6c60 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -52,6 +52,7 @@
 void __init init_IRQ(void)
 {
 	int i;
+	unsigned int order = get_order(IRQ_STACK_SIZE);
 
 	for (i = 0; i < NR_IRQS; i++)
 		irq_set_noprobe(i);
@@ -62,8 +63,7 @@
 	arch_init_irq();
 
 	for_each_possible_cpu(i) {
-		int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
-		void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
+		void *s = (void *)__get_free_pages(GFP_KERNEL, order);
 
 		irq_stack[i] = s;
 		pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index d4f7fd4..85522c1 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -371,7 +371,7 @@
 static int get_frame_info(struct mips_frame_info *info)
 {
 	bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
-	union mips_instruction insn, *ip, *ip_end;
+	union mips_instruction insn, *ip;
 	const unsigned int max_insns = 128;
 	unsigned int last_insn_size = 0;
 	unsigned int i;
@@ -384,10 +384,9 @@
 	if (!ip)
 		goto err;
 
-	ip_end = (void *)ip + info->func_size;
-
-	for (i = 0; i < max_insns && ip < ip_end; i++) {
+	for (i = 0; i < max_insns; i++) {
 		ip = (void *)ip + last_insn_size;
+
 		if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
 			insn.word = ip->halfword[0] << 16;
 			last_insn_size = 2;
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 252c009..9bda82ed 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -1818,7 +1818,7 @@
 
 	/* Update the icache */
 	flush_icache_range((unsigned long)ctx.target,
-			   (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
+			   (unsigned long)&ctx.target[ctx.idx]);
 
 	if (bpf_jit_enable > 1)
 		/* Dump JIT code */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 3fe4af8..c23578a 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -22,7 +22,7 @@
  * This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
-#define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE >> 1)
+#define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE / 3)
 
 #define STACK_TOP		TASK_SIZE
 #define STACK_TOP_MAX		STACK_TOP
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index b2d26d9..9713d4e 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -186,7 +186,7 @@
 	BUG_ON(mem_size == 0);
 
 	set_max_mapnr(PFN_DOWN(mem_size));
-	max_low_pfn = memblock_end_of_DRAM();
+	max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
 
 #ifdef CONFIG_BLK_DEV_INITRD
 	setup_initrd();
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 58a522f..200a4b3 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -29,7 +29,8 @@
 	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
 
 #ifdef CONFIG_ZONE_DMA32
-	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
+	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
+			(unsigned long) PFN_PHYS(max_low_pfn)));
 #endif
 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 6403789..f105ae8 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -600,6 +600,14 @@
 	leal	TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
 	movl	%eax, %cr3
 3:
+	/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
+	pushl	%ecx
+	movl	$MSR_EFER, %ecx
+	rdmsr
+	btsl	$_EFER_LME, %eax
+	wrmsr
+	popl	%ecx
+
 	/* Enable PAE and LA57 (if required) paging modes */
 	movl	$X86_CR4_PAE, %eax
 	cmpl	$0, %edx
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
index 91f7563..6ff7e81 100644
--- a/arch/x86/boot/compressed/pgtable.h
+++ b/arch/x86/boot/compressed/pgtable.h
@@ -6,7 +6,7 @@
 #define TRAMPOLINE_32BIT_PGTABLE_OFFSET	0
 
 #define TRAMPOLINE_32BIT_CODE_OFFSET	PAGE_SIZE
-#define TRAMPOLINE_32BIT_CODE_SIZE	0x60
+#define TRAMPOLINE_32BIT_CODE_SIZE	0x70
 
 #define TRAMPOLINE_32BIT_STACK_END	TRAMPOLINE_32BIT_SIZE
 
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index 9e21573..f8debf7 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -1,5 +1,7 @@
+#include <linux/efi.h>
 #include <asm/e820/types.h>
 #include <asm/processor.h>
+#include <asm/efi.h>
 #include "pgtable.h"
 #include "../string.h"
 
@@ -37,9 +39,10 @@
 
 static unsigned long find_trampoline_placement(void)
 {
-	unsigned long bios_start, ebda_start;
+	unsigned long bios_start = 0, ebda_start = 0;
 	unsigned long trampoline_start;
 	struct boot_e820_entry *entry;
+	char *signature;
 	int i;
 
 	/*
@@ -47,8 +50,18 @@
 	 * This code is based on reserve_bios_regions().
 	 */
 
-	ebda_start = *(unsigned short *)0x40e << 4;
-	bios_start = *(unsigned short *)0x413 << 10;
+	/*
+	 * EFI systems may not provide legacy ROM. The memory may not be mapped
+	 * at all.
+	 *
+	 * Only look for values in the legacy ROM for non-EFI system.
+	 */
+	signature = (char *)&boot_params->efi_info.efi_loader_signature;
+	if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
+	    strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) {
+		ebda_start = *(unsigned short *)0x40e << 4;
+		bios_start = *(unsigned short *)0x413 << 10;
+	}
 
 	if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
 		bios_start = BIOS_START_MAX;
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index 8229cf8..4b536d0 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -58,6 +58,7 @@
 # CONFIG_ACPI_FAN is not set
 # CONFIG_ACPI_THERMAL is not set
 # CONFIG_X86_PM_TIMER is not set
+CONFIG_CPU_FREQ_TIMES=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_X86_ACPI_CPUFREQ=y
 CONFIG_PCI_MSI=y
@@ -96,6 +97,7 @@
 CONFIG_NET_IPVTI=y
 CONFIG_INET_ESP=y
 # CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_UDP_DIAG=y
 CONFIG_INET_DIAG_DESTROY=y
 CONFIG_TCP_CONG_ADVANCED=y
 # CONFIG_TCP_CONG_BIC is not set
@@ -128,6 +130,7 @@
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -234,6 +237,7 @@
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
 CONFIG_PPP_MPPE=y
+CONFIG_USB_RTL8152=y
 CONFIG_USB_USBNET=y
 # CONFIG_USB_NET_AX8817X is not set
 # CONFIG_USB_NET_AX88179_178A is not set
@@ -311,6 +315,12 @@
 CONFIG_DRM_VIRTIO_GPU=y
 CONFIG_SOUND=y
 CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_INTEL8X0=y
+# CONFIG_SND_USB is not set
 CONFIG_HIDRAW=y
 CONFIG_UHID=y
 CONFIG_HID_A4TECH=y
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index c04a881..a415543 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1970,7 +1970,7 @@
  */
 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
 {
-	kfree(cpuc->shared_regs);
+	intel_cpuc_finish(cpuc);
 	kfree(cpuc);
 }
 
@@ -1982,14 +1982,11 @@
 	cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
 	if (!cpuc)
 		return ERR_PTR(-ENOMEM);
-
-	/* only needed, if we have extra_regs */
-	if (x86_pmu.extra_regs) {
-		cpuc->shared_regs = allocate_shared_regs(cpu);
-		if (!cpuc->shared_regs)
-			goto error;
-	}
 	cpuc->is_fake = 1;
+
+	if (intel_cpuc_prepare(cpuc, cpu))
+		goto error;
+
 	return cpuc;
 error:
 	free_fake_cpuc(cpuc);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index fbd7551..12453cf 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -1995,6 +1995,39 @@
 	intel_pmu_enable_all(added);
 }
 
+static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
+{
+	u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
+
+	if (cpuc->tfa_shadow != val) {
+		cpuc->tfa_shadow = val;
+		wrmsrl(MSR_TSX_FORCE_ABORT, val);
+	}
+}
+
+static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
+{
+	/*
+	 * We're going to use PMC3, make sure TFA is set before we touch it.
+	 */
+	if (cntr == 3 && !cpuc->is_fake)
+		intel_set_tfa(cpuc, true);
+}
+
+static void intel_tfa_pmu_enable_all(int added)
+{
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+	/*
+	 * If we find PMC3 is no longer used when we enable the PMU, we can
+	 * clear TFA.
+	 */
+	if (!test_bit(3, cpuc->active_mask))
+		intel_set_tfa(cpuc, false);
+
+	intel_pmu_enable_all(added);
+}
+
 static inline u64 intel_pmu_get_status(void)
 {
 	u64 status;
@@ -2653,6 +2686,35 @@
 }
 
 static struct event_constraint *
+dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
+{
+	WARN_ON_ONCE(!cpuc->constraint_list);
+
+	if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
+		struct event_constraint *cx;
+
+		/*
+		 * grab pre-allocated constraint entry
+		 */
+		cx = &cpuc->constraint_list[idx];
+
+		/*
+		 * initialize dynamic constraint
+		 * with static constraint
+		 */
+		*cx = *c;
+
+		/*
+		 * mark constraint as dynamic
+		 */
+		cx->flags |= PERF_X86_EVENT_DYNAMIC;
+		c = cx;
+	}
+
+	return c;
+}
+
+static struct event_constraint *
 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
 			   int idx, struct event_constraint *c)
 {
@@ -2682,27 +2744,7 @@
 	 * only needed when constraint has not yet
 	 * been cloned (marked dynamic)
 	 */
-	if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
-		struct event_constraint *cx;
-
-		/*
-		 * grab pre-allocated constraint entry
-		 */
-		cx = &cpuc->constraint_list[idx];
-
-		/*
-		 * initialize dynamic constraint
-		 * with static constraint
-		 */
-		*cx = *c;
-
-		/*
-		 * mark constraint as dynamic, so we
-		 * can free it later on
-		 */
-		cx->flags |= PERF_X86_EVENT_DYNAMIC;
-		c = cx;
-	}
+	c = dyn_constraint(cpuc, c, idx);
 
 	/*
 	 * From here on, the constraint is dynamic.
@@ -3229,6 +3271,26 @@
 	return c;
 }
 
+static bool allow_tsx_force_abort = true;
+
+static struct event_constraint *
+tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+			  struct perf_event *event)
+{
+	struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
+
+	/*
+	 * Without TFA we must not use PMC3.
+	 */
+	if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) {
+		c = dyn_constraint(cpuc, c, idx);
+		c->idxmsk64 &= ~(1ULL << 3);
+		c->weight--;
+	}
+
+	return c;
+}
+
 /*
  * Broadwell:
  *
@@ -3282,7 +3344,7 @@
 	return x86_event_sysfs_show(page, config, event);
 }
 
-struct intel_shared_regs *allocate_shared_regs(int cpu)
+static struct intel_shared_regs *allocate_shared_regs(int cpu)
 {
 	struct intel_shared_regs *regs;
 	int i;
@@ -3314,23 +3376,24 @@
 	return c;
 }
 
-static int intel_pmu_cpu_prepare(int cpu)
-{
-	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 
+int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
+{
 	if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
 		cpuc->shared_regs = allocate_shared_regs(cpu);
 		if (!cpuc->shared_regs)
 			goto err;
 	}
 
-	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+	if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
 		size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
 
-		cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
+		cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
 		if (!cpuc->constraint_list)
 			goto err_shared_regs;
+	}
 
+	if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
 		cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
 		if (!cpuc->excl_cntrs)
 			goto err_constraint_list;
@@ -3352,6 +3415,11 @@
 	return -ENOMEM;
 }
 
+static int intel_pmu_cpu_prepare(int cpu)
+{
+	return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
+}
+
 static void flip_smm_bit(void *data)
 {
 	unsigned long set = *(unsigned long *)data;
@@ -3423,9 +3491,8 @@
 	}
 }
 
-static void free_excl_cntrs(int cpu)
+static void free_excl_cntrs(struct cpu_hw_events *cpuc)
 {
-	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 	struct intel_excl_cntrs *c;
 
 	c = cpuc->excl_cntrs;
@@ -3433,9 +3500,10 @@
 		if (c->core_id == -1 || --c->refcnt == 0)
 			kfree(c);
 		cpuc->excl_cntrs = NULL;
-		kfree(cpuc->constraint_list);
-		cpuc->constraint_list = NULL;
 	}
+
+	kfree(cpuc->constraint_list);
+	cpuc->constraint_list = NULL;
 }
 
 static void intel_pmu_cpu_dying(int cpu)
@@ -3443,9 +3511,8 @@
 	fini_debug_store_on_cpu(cpu);
 }
 
-static void intel_pmu_cpu_dead(int cpu)
+void intel_cpuc_finish(struct cpu_hw_events *cpuc)
 {
-	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 	struct intel_shared_regs *pc;
 
 	pc = cpuc->shared_regs;
@@ -3455,7 +3522,12 @@
 		cpuc->shared_regs = NULL;
 	}
 
-	free_excl_cntrs(cpu);
+	free_excl_cntrs(cpuc);
+}
+
+static void intel_pmu_cpu_dead(int cpu)
+{
+	intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
 }
 
 static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -3917,8 +3989,11 @@
        NULL
 };
 
+static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
+
 static struct attribute *intel_pmu_attrs[] = {
 	&dev_attr_freeze_on_smi.attr,
+	NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
 	NULL,
 };
 
@@ -4374,6 +4449,15 @@
 		x86_pmu.cpu_events = get_hsw_events_attrs();
 		intel_pmu_pebs_data_source_skl(
 			boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
+
+		if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+			x86_pmu.flags |= PMU_FL_TFA;
+			x86_pmu.get_event_constraints = tfa_get_event_constraints;
+			x86_pmu.enable_all = intel_tfa_pmu_enable_all;
+			x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
+			intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
+		}
+
 		pr_cont("Skylake events, ");
 		name = "skylake";
 		break;
@@ -4515,7 +4599,7 @@
 	hardlockup_detector_perf_restart();
 
 	for_each_online_cpu(c)
-		free_excl_cntrs(c);
+		free_excl_cntrs(&per_cpu(cpu_hw_events, c));
 
 	cpus_read_unlock();
 	pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 0ee3a44..42a3628 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -243,6 +243,11 @@
 	int excl_thread_id; /* 0 or 1 */
 
 	/*
+	 * SKL TSX_FORCE_ABORT shadow
+	 */
+	u64				tfa_shadow;
+
+	/*
 	 * AMD specific bits
 	 */
 	struct amd_nb			*amd_nb;
@@ -679,6 +684,7 @@
 #define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
 #define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
 #define PMU_FL_PEBS_ALL		0x10 /* all events are valid PEBS events */
+#define PMU_FL_TFA		0x20 /* deal with TSX force abort */
 
 #define EVENT_VAR(_id)  event_attr_##_id
 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
@@ -887,7 +893,8 @@
 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
 			  struct perf_event *event);
 
-struct intel_shared_regs *allocate_shared_regs(int cpu);
+extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
+extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
 
 int intel_pmu_init(void);
 
@@ -1023,9 +1030,13 @@
 	return 0;
 }
 
-static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
+static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
 {
-	return NULL;
+	return 0;
+}
+
+static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
+{
 }
 
 static inline int is_ht_workaround_enabled(void)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 89a048c..7b31ee5 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -340,6 +340,7 @@
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
 #define X86_FEATURE_AVX512_4VNNIW	(18*32+ 2) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS	(18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_TSX_FORCE_ABORT	(18*32+13) /* "" TSX_FORCE_ABORT */
 #define X86_FEATURE_PCONFIG		(18*32+18) /* Intel PCONFIG */
 #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 1f9de76..f14ca0b 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -629,6 +629,12 @@
 
 #define MSR_IA32_TSC_DEADLINE		0x000006E0
 
+
+#define MSR_TSX_FORCE_ABORT		0x0000010F
+
+#define MSR_TFA_RTM_FORCE_ABORT_BIT	0
+#define MSR_TFA_RTM_FORCE_ABORT		BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
+
 /* P4/Xeon+ specific */
 #define MSR_IA32_MCG_EAX		0x00000180
 #define MSR_IA32_MCG_EBX		0x00000181
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index b99d497..0b6352a 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -7,7 +7,11 @@
 #endif
 
 #ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_EXTRA
+#define KASAN_STACK_ORDER 2
+#else
 #define KASAN_STACK_ORDER 1
+#endif
 #else
 #define KASAN_STACK_ORDER 0
 #endif
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index aae77eb..4111edb 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -293,8 +293,7 @@
 		__put_user_asm(x, ptr, retval, "l", "k", "ir", errret);	\
 		break;							\
 	case 8:								\
-		__put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,	\
-				   errret);				\
+		__put_user_asm_u64(x, ptr, retval, errret);		\
 		break;							\
 	default:							\
 		__put_user_bad();					\
@@ -440,8 +439,10 @@
 #define __put_user_nocheck(x, ptr, size)			\
 ({								\
 	int __pu_err;						\
+	__typeof__(*(ptr)) __pu_val;				\
+	__pu_val = x;						\
 	__uaccess_begin();					\
-	__put_user_size((x), (ptr), (size), __pu_err, -EFAULT);	\
+	__put_user_size(__pu_val, (ptr), (size), __pu_err, -EFAULT);\
 	__uaccess_end();					\
 	__builtin_expect(__pu_err, 0);				\
 })
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 7654feb..652e7ff 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -313,14 +313,13 @@
 	struct apic_chip_data *apicd = apic_chip_data(irqd);
 	int vector, cpu;
 
-	cpumask_and(vector_searchmask, vector_searchmask, affmsk);
-	cpu = cpumask_first(vector_searchmask);
-	if (cpu >= nr_cpu_ids)
-		return -EINVAL;
+	cpumask_and(vector_searchmask, dest, affmsk);
+
 	/* set_affinity might call here for nothing */
 	if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
 		return 0;
-	vector = irq_matrix_alloc_managed(vector_matrix, cpu);
+	vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
+					  &cpu);
 	trace_vector_alloc_managed(irqd->irq, vector, vector);
 	if (vector < 0)
 		return vector;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index eeea634..6a25278 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -818,11 +818,9 @@
 static void init_amd_zn(struct cpuinfo_x86 *c)
 {
 	set_cpu_cap(c, X86_FEATURE_ZEN);
-	/*
-	 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
-	 * all up to and including B1.
-	 */
-	if (c->x86_model <= 1 && c->x86_stepping <= 1)
+
+	/* Fix erratum 1076: CPB feature bit not being set in CPUID. */
+	if (!cpu_has(c, X86_FEATURE_CPB))
 		set_cpu_cap(c, X86_FEATURE_CPB);
 }
 
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 07b5fc0..a4e7e10 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -707,7 +707,7 @@
 	if (!p) {
 		return ret;
 	} else {
-		if (boot_cpu_data.microcode == p->patch_id)
+		if (boot_cpu_data.microcode >= p->patch_id)
 			return ret;
 
 		ret = UCODE_NEW;
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 278cd07..9490a28 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -167,6 +167,9 @@
 	struct efi_info *current_ei = &boot_params.efi_info;
 	struct efi_info *ei = &params->efi_info;
 
+	if (!efi_enabled(EFI_RUNTIME_SERVICES))
+		return 0;
+
 	if (!current_ei->efi_memmap_size)
 		return 0;
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ee8f8d7..b475419 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3399,6 +3399,14 @@
 	kvm_mmu_reset_context(&svm->vcpu);
 	kvm_mmu_load(&svm->vcpu);
 
+	/*
+	 * Drop what we picked up for L2 via svm_complete_interrupts() so it
+	 * doesn't end up in L1.
+	 */
+	svm->vcpu.arch.nmi_injected = false;
+	kvm_clear_exception_queue(&svm->vcpu);
+	kvm_clear_interrupt_queue(&svm->vcpu);
+
 	return 0;
 }
 
@@ -4485,25 +4493,14 @@
 		kvm_lapic_reg_write(apic, APIC_ICR, icrl);
 		break;
 	case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
-		int i;
-		struct kvm_vcpu *vcpu;
-		struct kvm *kvm = svm->vcpu.kvm;
 		struct kvm_lapic *apic = svm->vcpu.arch.apic;
 
 		/*
-		 * At this point, we expect that the AVIC HW has already
-		 * set the appropriate IRR bits on the valid target
-		 * vcpus. So, we just need to kick the appropriate vcpu.
+		 * Update ICR high and low, then emulate sending IPI,
+		 * which is handled when writing APIC_ICR.
 		 */
-		kvm_for_each_vcpu(i, vcpu, kvm) {
-			bool m = kvm_apic_match_dest(vcpu, apic,
-						     icrl & KVM_APIC_SHORT_MASK,
-						     GET_APIC_DEST_FIELD(icrh),
-						     icrl & KVM_APIC_DEST_MASK);
-
-			if (m && !avic_vcpu_is_running(vcpu))
-				kvm_vcpu_wake_up(vcpu);
-		}
+		kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
+		kvm_lapic_reg_write(apic, APIC_ICR, icrl);
 		break;
 	}
 	case AVIC_IPI_FAILURE_INVALID_TARGET:
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 7ae3686..c9faf34 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -157,8 +157,8 @@
 	pmd = pmd_offset(pud, ppd->vaddr);
 	if (pmd_none(*pmd)) {
 		pte = ppd->pgtable_area;
-		memset(pte, 0, sizeof(pte) * PTRS_PER_PTE);
-		ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE;
+		memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
+		ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
 		set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
 	}
 
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 13f4485..bd372e8 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -641,6 +641,22 @@
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
 
+static void quirk_intel_th_dnv(struct pci_dev *dev)
+{
+	struct resource *r = &dev->resource[4];
+
+	/*
+	 * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
+	 * appears to be 4 MB in reality.
+	 */
+	if (r->end == r->start + 0x7ff) {
+		r->start = 0;
+		r->end   = 0x3fffff;
+		r->flags |= IORESOURCE_UNSET;
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
+
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
 
 #define AMD_141b_MMIO_BASE(x)	(0x80 + (x) * 0x8)
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 11fed6c..b593816 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -33,6 +33,7 @@
 CONFIG_HOTPLUG_CPU=y
 # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
 # CONFIG_PCI is not set
+CONFIG_VECTORS_OFFSET=0x00002000
 CONFIG_XTENSA_PLATFORM_XTFPGA=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 9053a56..5bd38ea 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -280,12 +280,13 @@
 
 	movi	a2, cpu_start_ccount
 1:
+	memw
 	l32i	a3, a2, 0
 	beqi	a3, 0, 1b
 	movi	a3, 0
 	s32i	a3, a2, 0
-	memw
 1:
+	memw
 	l32i	a3, a2, 0
 	beqi	a3, 0, 1b
 	wsr	a3, ccount
@@ -321,11 +322,13 @@
 	rsr	a0, prid
 	neg	a2, a0
 	movi	a3, cpu_start_id
+	memw
 	s32i	a2, a3, 0
 #if XCHAL_DCACHE_IS_WRITEBACK
 	dhwbi	a3, 0
 #endif
 1:
+	memw
 	l32i	a2, a3, 0
 	dhi	a3, 0
 	bne	a2, a0, 1b
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 4bb6813..5a0e0bd 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -320,8 +320,8 @@
 
 		/* Stack layout: sp-4: ra, sp-3: sp' */
 
-		pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
-		sp = *(unsigned long *)sp - 3;
+		pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
+		sp = SPILL_SLOT(sp, 1);
 	} while (count++ < 16);
 	return 0;
 }
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 932d646..be1f280 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -83,7 +83,7 @@
 {
 	unsigned i;
 
-	for (i = 0; i < max_cpus; ++i)
+	for_each_possible_cpu(i)
 		set_cpu_present(i, true);
 }
 
@@ -96,6 +96,11 @@
 	pr_info("%s: Core Count = %d\n", __func__, ncpus);
 	pr_info("%s: Core Id = %d\n", __func__, core_id);
 
+	if (ncpus > NR_CPUS) {
+		ncpus = NR_CPUS;
+		pr_info("%s: limiting core count by %d\n", __func__, ncpus);
+	}
+
 	for (i = 0; i < ncpus; ++i)
 		set_cpu_possible(i, true);
 }
@@ -195,9 +200,11 @@
 	int i;
 
 #ifdef CONFIG_HOTPLUG_CPU
-	cpu_start_id = cpu;
-	system_flush_invalidate_dcache_range(
-			(unsigned long)&cpu_start_id, sizeof(cpu_start_id));
+	WRITE_ONCE(cpu_start_id, cpu);
+	/* Pairs with the third memw in the cpu_restart */
+	mb();
+	system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
+					     sizeof(cpu_start_id));
 #endif
 	smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
 
@@ -206,18 +213,21 @@
 			ccount = get_ccount();
 		while (!ccount);
 
-		cpu_start_ccount = ccount;
+		WRITE_ONCE(cpu_start_ccount, ccount);
 
-		while (time_before(jiffies, timeout)) {
+		do {
+			/*
+			 * Pairs with the first two memws in the
+			 * .Lboot_secondary.
+			 */
 			mb();
-			if (!cpu_start_ccount)
-				break;
-		}
+			ccount = READ_ONCE(cpu_start_ccount);
+		} while (ccount && time_before(jiffies, timeout));
 
-		if (cpu_start_ccount) {
+		if (ccount) {
 			smp_call_function_single(0, mx_cpu_stop,
-					(void *)cpu, 1);
-			cpu_start_ccount = 0;
+						 (void *)cpu, 1);
+			WRITE_ONCE(cpu_start_ccount, 0);
 			return -EIO;
 		}
 	}
@@ -237,6 +247,7 @@
 	pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
 			__func__, cpu, idle, start_info.stack);
 
+	init_completion(&cpu_running);
 	ret = boot_secondary(cpu, idle);
 	if (ret == 0) {
 		wait_for_completion_timeout(&cpu_running,
@@ -298,8 +309,10 @@
 	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
 	while (time_before(jiffies, timeout)) {
 		system_invalidate_dcache_range((unsigned long)&cpu_start_id,
-				sizeof(cpu_start_id));
-		if (cpu_start_id == -cpu) {
+					       sizeof(cpu_start_id));
+		/* Pairs with the second memw in the cpu_restart */
+		mb();
+		if (READ_ONCE(cpu_start_id) == -cpu) {
 			platform_cpu_kill(cpu);
 			return;
 		}
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index fd524a5..378186b 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -89,7 +89,7 @@
 		container_of(evt, struct ccount_timer, evt);
 
 	if (timer->irq_enabled) {
-		disable_irq(evt->irq);
+		disable_irq_nosync(evt->irq);
 		timer->irq_enabled = 0;
 	}
 	return 0;
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 19923f8..b154e05 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -72,6 +72,7 @@
 #include <linux/sched/loadavg.h>
 #include <linux/sched/signal.h>
 #include <trace/events/block.h>
+#include <linux/blk-mq.h>
 #include "blk-rq-qos.h"
 #include "blk-stat.h"
 
@@ -568,6 +569,9 @@
 		return;
 
 	enabled = blk_iolatency_enabled(iolat->blkiolat);
+	if (!enabled)
+		return;
+
 	while (blkg && blkg->parent) {
 		iolat = blkg_to_lat(blkg);
 		if (!iolat) {
@@ -577,7 +581,7 @@
 		rqw = &iolat->rq_wait;
 
 		atomic_dec(&rqw->inflight);
-		if (!enabled || iolat->min_lat_nsec == 0)
+		if (iolat->min_lat_nsec == 0)
 			goto next;
 		iolatency_record_time(iolat, &bio->bi_issue, now,
 				      issue_as_root);
@@ -721,10 +725,13 @@
 	return 0;
 }
 
-static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+/*
+ * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
+ * return 0.
+ */
+static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
 {
 	struct iolatency_grp *iolat = blkg_to_lat(blkg);
-	struct blk_iolatency *blkiolat = iolat->blkiolat;
 	u64 oldval = iolat->min_lat_nsec;
 
 	iolat->min_lat_nsec = val;
@@ -733,9 +740,10 @@
 				    BLKIOLATENCY_MAX_WIN_SIZE);
 
 	if (!oldval && val)
-		atomic_inc(&blkiolat->enabled);
+		return 1;
 	if (oldval && !val)
-		atomic_dec(&blkiolat->enabled);
+		return -1;
+	return 0;
 }
 
 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
@@ -768,6 +776,7 @@
 	u64 lat_val = 0;
 	u64 oldval;
 	int ret;
+	int enable = 0;
 
 	ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
 	if (ret)
@@ -803,7 +812,12 @@
 	blkg = ctx.blkg;
 	oldval = iolat->min_lat_nsec;
 
-	iolatency_set_min_lat_nsec(blkg, lat_val);
+	enable = iolatency_set_min_lat_nsec(blkg, lat_val);
+	if (enable) {
+		WARN_ON_ONCE(!blk_get_queue(blkg->q));
+		blkg_get(blkg);
+	}
+
 	if (oldval != iolat->min_lat_nsec) {
 		iolatency_clear_scaling(blkg);
 	}
@@ -811,6 +825,24 @@
 	ret = 0;
 out:
 	blkg_conf_finish(&ctx);
+	if (ret == 0 && enable) {
+		struct iolatency_grp *tmp = blkg_to_lat(blkg);
+		struct blk_iolatency *blkiolat = tmp->blkiolat;
+
+		blk_mq_freeze_queue(blkg->q);
+
+		if (enable == 1)
+			atomic_inc(&blkiolat->enabled);
+		else if (enable == -1)
+			atomic_dec(&blkiolat->enabled);
+		else
+			WARN_ON_ONCE(1);
+
+		blk_mq_unfreeze_queue(blkg->q);
+
+		blkg_put(blkg);
+		blk_put_queue(blkg->q);
+	}
 	return ret ?: nbytes;
 }
 
@@ -910,8 +942,14 @@
 {
 	struct iolatency_grp *iolat = pd_to_lat(pd);
 	struct blkcg_gq *blkg = lat_to_blkg(iolat);
+	struct blk_iolatency *blkiolat = iolat->blkiolat;
+	int ret;
 
-	iolatency_set_min_lat_nsec(blkg, 0);
+	ret = iolatency_set_min_lat_nsec(blkg, 0);
+	if (ret == 1)
+		atomic_inc(&blkiolat->enabled);
+	if (ret == -1)
+		atomic_dec(&blkiolat->enabled);
 	iolatency_clear_scaling(blkg);
 }
 
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 6495abf..68fcda4 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -358,6 +358,7 @@
  * @min_priority:         minimum scheduling priority
  *                        (invariant after initialized)
  * @inherit_rt:           inherit RT scheduling policy from caller
+ * @txn_security_ctx:     require sender's security context
  *                        (invariant after initialized)
  * @async_todo:           list of async work items
  *                        (protected by @proc->inner_lock)
@@ -397,6 +398,7 @@
 		u8 sched_policy:2;
 		u8 inherit_rt:1;
 		u8 accept_fds:1;
+		u8 txn_security_ctx:1;
 		u8 min_priority;
 	};
 	bool has_async_transaction;
@@ -654,6 +656,7 @@
 	struct binder_priority	saved_priority;
 	bool    set_priority_called;
 	kuid_t	sender_euid;
+	binder_uintptr_t security_ctx;
 	/**
 	 * @lock:  protects @from, @to_proc, and @to_thread
 	 *
@@ -1363,6 +1366,7 @@
 	node->min_priority = to_kernel_prio(node->sched_policy, priority);
 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
 	node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
+	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
 	spin_lock_init(&node->lock);
 	INIT_LIST_HEAD(&node->work.entry);
 	INIT_LIST_HEAD(&node->async_todo);
@@ -2900,6 +2904,8 @@
 	binder_size_t last_fixup_min_off = 0;
 	struct binder_context *context = proc->context;
 	int t_debug_id = atomic_inc_return(&binder_last_id);
+	char *secctx = NULL;
+	u32 secctx_sz = 0;
 
 	e = binder_transaction_log_add(&binder_transaction_log);
 	e->debug_id = t_debug_id;
@@ -3123,6 +3129,20 @@
 		t->priority = target_proc->default_priority;
 	}
 
+	if (target_node && target_node->txn_security_ctx) {
+		u32 secid;
+
+		security_task_getsecid(proc->tsk, &secid);
+		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
+		if (ret) {
+			return_error = BR_FAILED_REPLY;
+			return_error_param = ret;
+			return_error_line = __LINE__;
+			goto err_get_secctx_failed;
+		}
+		extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
+	}
+
 	trace_binder_transaction(reply, t, target_node);
 
 	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
@@ -3139,6 +3159,19 @@
 		t->buffer = NULL;
 		goto err_binder_alloc_buf_failed;
 	}
+	if (secctx) {
+		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
+				    ALIGN(tr->offsets_size, sizeof(void *)) +
+				    ALIGN(extra_buffers_size, sizeof(void *)) -
+				    ALIGN(secctx_sz, sizeof(u64));
+		char *kptr = t->buffer->data + buf_offset;
+
+		t->security_ctx = (uintptr_t)kptr +
+		    binder_alloc_get_user_buffer_offset(&target_proc->alloc);
+		memcpy(kptr, secctx, secctx_sz);
+		security_release_secctx(secctx, secctx_sz);
+		secctx = NULL;
+	}
 	t->buffer->debug_id = t->debug_id;
 	t->buffer->transaction = t;
 	t->buffer->target_node = target_node;
@@ -3409,6 +3442,9 @@
 	t->buffer->transaction = NULL;
 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
 err_binder_alloc_buf_failed:
+	if (secctx)
+		security_release_secctx(secctx, secctx_sz);
+err_get_secctx_failed:
 	kfree(tcomplete);
 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
 err_alloc_tcomplete_failed:
@@ -4055,11 +4091,13 @@
 
 	while (1) {
 		uint32_t cmd;
-		struct binder_transaction_data tr;
+		struct binder_transaction_data_secctx tr;
+		struct binder_transaction_data *trd = &tr.transaction_data;
 		struct binder_work *w = NULL;
 		struct list_head *list = NULL;
 		struct binder_transaction *t = NULL;
 		struct binder_thread *t_from;
+		size_t trsize = sizeof(*trd);
 
 		binder_inner_proc_lock(proc);
 		if (!binder_worklist_empty_ilocked(&thread->todo))
@@ -4255,41 +4293,47 @@
 			struct binder_node *target_node = t->buffer->target_node;
 			struct binder_priority node_prio;
 
-			tr.target.ptr = target_node->ptr;
-			tr.cookie =  target_node->cookie;
+			trd->target.ptr = target_node->ptr;
+			trd->cookie =  target_node->cookie;
 			node_prio.sched_policy = target_node->sched_policy;
 			node_prio.prio = target_node->min_priority;
 			binder_transaction_priority(current, t, node_prio,
 						    target_node->inherit_rt);
 			cmd = BR_TRANSACTION;
 		} else {
-			tr.target.ptr = 0;
-			tr.cookie = 0;
+			trd->target.ptr = 0;
+			trd->cookie = 0;
 			cmd = BR_REPLY;
 		}
-		tr.code = t->code;
-		tr.flags = t->flags;
-		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
+		trd->code = t->code;
+		trd->flags = t->flags;
+		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
 
 		t_from = binder_get_txn_from(t);
 		if (t_from) {
 			struct task_struct *sender = t_from->proc->tsk;
 
-			tr.sender_pid = task_tgid_nr_ns(sender,
-							task_active_pid_ns(current));
+			trd->sender_pid =
+				task_tgid_nr_ns(sender,
+						task_active_pid_ns(current));
 		} else {
-			tr.sender_pid = 0;
+			trd->sender_pid = 0;
 		}
 
-		tr.data_size = t->buffer->data_size;
-		tr.offsets_size = t->buffer->offsets_size;
-		tr.data.ptr.buffer = (binder_uintptr_t)
+		trd->data_size = t->buffer->data_size;
+		trd->offsets_size = t->buffer->offsets_size;
+		trd->data.ptr.buffer = (binder_uintptr_t)
 			((uintptr_t)t->buffer->data +
 			binder_alloc_get_user_buffer_offset(&proc->alloc));
-		tr.data.ptr.offsets = tr.data.ptr.buffer +
+		trd->data.ptr.offsets = trd->data.ptr.buffer +
 					ALIGN(t->buffer->data_size,
 					    sizeof(void *));
 
+		tr.secctx = t->security_ctx;
+		if (t->security_ctx) {
+			cmd = BR_TRANSACTION_SEC_CTX;
+			trsize = sizeof(tr);
+		}
 		if (put_user(cmd, (uint32_t __user *)ptr)) {
 			if (t_from)
 				binder_thread_dec_tmpref(t_from);
@@ -4300,7 +4344,7 @@
 			return -EFAULT;
 		}
 		ptr += sizeof(uint32_t);
-		if (copy_to_user(ptr, &tr, sizeof(tr))) {
+		if (copy_to_user(ptr, &tr, trsize)) {
 			if (t_from)
 				binder_thread_dec_tmpref(t_from);
 
@@ -4309,7 +4353,7 @@
 
 			return -EFAULT;
 		}
-		ptr += sizeof(tr);
+		ptr += trsize;
 
 		trace_binder_transaction_received(t);
 		binder_stat_br(proc, thread, cmd);
@@ -4317,16 +4361,18 @@
 			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
 			     proc->pid, thread->pid,
 			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
-			     "BR_REPLY",
+				(cmd == BR_TRANSACTION_SEC_CTX) ?
+				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
 			     t->debug_id, t_from ? t_from->proc->pid : 0,
 			     t_from ? t_from->pid : 0, cmd,
 			     t->buffer->data_size, t->buffer->offsets_size,
-			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
+			     (u64)trd->data.ptr.buffer,
+			     (u64)trd->data.ptr.offsets);
 
 		if (t_from)
 			binder_thread_dec_tmpref(t_from);
 		t->buffer->allow_user_free = 1;
-		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
 			binder_inner_proc_lock(thread->proc);
 			t->to_parent = thread->transaction_stack;
 			t->to_thread = thread;
@@ -4671,7 +4717,8 @@
 	return ret;
 }
 
-static int binder_ioctl_set_ctx_mgr(struct file *filp)
+static int binder_ioctl_set_ctx_mgr(struct file *filp,
+				    struct flat_binder_object *fbo)
 {
 	int ret = 0;
 	struct binder_proc *proc = filp->private_data;
@@ -4700,7 +4747,7 @@
 	} else {
 		context->binder_context_mgr_uid = curr_euid;
 	}
-	new_node = binder_new_node(proc, NULL);
+	new_node = binder_new_node(proc, fbo);
 	if (!new_node) {
 		ret = -ENOMEM;
 		goto out;
@@ -4823,8 +4870,20 @@
 		binder_inner_proc_unlock(proc);
 		break;
 	}
+	case BINDER_SET_CONTEXT_MGR_EXT: {
+		struct flat_binder_object fbo;
+
+		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
+			ret = -EINVAL;
+			goto err;
+		}
+		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
+		if (ret)
+			goto err;
+		break;
+	}
 	case BINDER_SET_CONTEXT_MGR:
-		ret = binder_ioctl_set_ctx_mgr(filp);
+		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
 		if (ret)
 			goto err;
 		break;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 467c43c..e378af5 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -984,9 +984,9 @@
 			drv->remove(dev);
 
 		device_links_driver_cleanup(dev);
-		dma_deconfigure(dev);
 
 		devres_release_all(dev);
+		dma_deconfigure(dev);
 		dev->driver = NULL;
 		dev_set_drvdata(dev, NULL);
 		if (dev->pm_domain && dev->pm_domain->dismiss)
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 7f9ea8e..1342f8e 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -544,10 +544,9 @@
 					    hdev->bus);
 
 	if (!btrtl_dev->ic_info) {
-		rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
+		rtl_dev_info(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
 			    lmp_subver, hci_rev, hci_ver);
-		ret = -EINVAL;
-		goto err_free;
+		return btrtl_dev;
 	}
 
 	if (btrtl_dev->ic_info->has_rom_version) {
@@ -602,6 +601,11 @@
 	 * standard btusb. Once that firmware is uploaded, the subver changes
 	 * to a different value.
 	 */
+	if (!btrtl_dev->ic_info) {
+		rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n");
+		return 0;
+	}
+
 	switch (btrtl_dev->ic_info->lmp_subver) {
 	case RTL_ROM_LMP_8723A:
 	case RTL_ROM_LMP_3499:
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 35c50ac..f94f335 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -3185,7 +3185,7 @@
 		return err;
 
 	snprintf(strpid, PID_SIZE, "%d", current->pid);
-	buf_size = strlen(current->comm) + strlen(strpid) + 1;
+	buf_size = strlen(current->comm) + strlen("_") + strlen(strpid) + 1;
 	VERIFY(err, NULL != (fl->debug_buf = kzalloc(buf_size, GFP_KERNEL)));
 	if (err) {
 		kfree(fl);
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index c0a5b1f..4ccc39e 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -32,6 +32,7 @@
 #include <linux/wait.h>
 #include <linux/init.h>
 #include <linux/fs.h>
+#include <linux/nospec.h>
 
 #include <asm/io.h>
 #include <linux/uaccess.h>
@@ -386,7 +387,11 @@
 	TicCard = st_loc.tic_des_from_pc;	/* tic number to send            */
 	IndexCard = NumCard - 1;
 
-	if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
+	if (IndexCard >= MAX_BOARD)
+		return -EINVAL;
+	IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+	if (!apbs[IndexCard].RamIO)
 		return -EINVAL;
 
 #ifdef DEBUG
@@ -697,6 +702,7 @@
 	unsigned char IndexCard;
 	void __iomem *pmem;
 	int ret = 0;
+	static int warncount = 10;
 	volatile unsigned char byte_reset_it;
 	struct st_ram_io *adgl;
 	void __user *argp = (void __user *)arg;
@@ -711,16 +717,12 @@
 	mutex_lock(&ac_mutex);	
 	IndexCard = adgl->num_card-1;
 	 
-	if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
-		static int warncount = 10;
-		if (warncount) {
-			printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
-			warncount--;
-		}
-		kfree(adgl);
-		mutex_unlock(&ac_mutex);
-		return -EINVAL;
-	}
+	if (cmd != 6 && IndexCard >= MAX_BOARD)
+		goto err;
+	IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+	if (cmd != 6 && !apbs[IndexCard].RamIO)
+		goto err;
 
 	switch (cmd) {
 		
@@ -838,5 +840,16 @@
 	kfree(adgl);
 	mutex_unlock(&ac_mutex);
 	return 0;
+
+err:
+	if (warncount) {
+		pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
+			(int)IndexCard + 1);
+		warncount--;
+	}
+	kfree(adgl);
+	mutex_unlock(&ac_mutex);
+	return -EINVAL;
+
 }
 
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 8d216d4..27c1f64 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -712,7 +712,12 @@
 		if (err || pkt_len < 0)
 			break;
 		spin_lock_irqsave(&info->lock, flags);
-		info->data_ready--;
+		if (info->data_ready > 0) {
+			info->data_ready--;
+		} else {
+			spin_unlock_irqrestore(&info->lock, flags);
+			break;
+		}
 		spin_unlock_irqrestore(&info->lock, flags);
 		read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
 					  pkt_len, MSG_DONTWAIT);
@@ -808,7 +813,13 @@
 		}
 
 		spin_lock_irqsave(&info->lock, flags);
-		info->data_ready--;
+		if (info->data_ready > 0) {
+			info->data_ready--;
+		} else {
+			spin_unlock_irqrestore(&info->lock, flags);
+			mutex_unlock(&info->socket_info_mutex);
+			break;
+		}
 		spin_unlock_irqrestore(&info->lock, flags);
 
 		read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index decffb3..a738af8 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -262,8 +262,10 @@
 
 		if (vc5->clk_mux_ins == VC5_MUX_IN_XIN)
 			src = VC5_PRIM_SRC_SHDN_EN_XTAL;
-		if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
+		else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
 			src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
+		else /* Invalid; should have been caught by vc5_probe() */
+			return -EINVAL;
 	}
 
 	return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 1cb305f..7616eab 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -3275,7 +3275,7 @@
 	seq_printf(s, "\"protect_count\": %d,", c->protect_count);
 	seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
 	seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
-	seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
+	seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
 	seq_printf(s, "\"duty_cycle\": %u",
 		   clk_core_get_scaled_duty_cycle(c, 100000));
 }
@@ -4619,7 +4619,7 @@
 
 	for (n = 0; ; n++) {
 		rrate = clk_hw_round_rate(hw, rate + 1);
-		if (!rate) {
+		if (!rrate) {
 			pr_err("clk_round_rate failed for %s\n",
 							core->name);
 			goto err_derive_device_list;
diff --git a/drivers/clk/qcom/camcc-kona.c b/drivers/clk/qcom/camcc-kona.c
index 912f372..25873aa 100644
--- a/drivers/clk/qcom/camcc-kona.c
+++ b/drivers/clk/qcom/camcc-kona.c
@@ -280,6 +280,9 @@
 	.config_ctl_val = 0x08200920,
 	.config_ctl_hi_val = 0x05008011,
 	.config_ctl_hi1_val = 0x00000000,
+	.test_ctl_val = 0x00010000,
+	.test_ctl_hi_val = 0x00000000,
+	.test_ctl_hi1_val = 0x00000000,
 	.user_ctl_val = 0x00000100,
 	.user_ctl_hi_val = 0x00000000,
 	.user_ctl_hi1_val = 0x00000000,
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index a0521d1..5fc3c80 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -161,7 +161,8 @@
 
 /* ZONDA PLL specific offsets */
 #define ZONDA_PLL_OUT_MASK	0x9
-
+#define ZONDA_STAY_IN_CFA	BIT(16)
+#define ZONDA_PLL_FREQ_LOCK_DET	BIT(29)
 
 #define pll_alpha_width(p)					\
 		((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ?	\
@@ -216,6 +217,9 @@
 #define wait_for_pll_enable_lock(pll) \
 	wait_for_pll(pll, PLL_LOCK_DET, 0, "enable")
 
+#define wait_for_zonda_pll_freq_lock(pll) \
+	wait_for_pll(pll, ZONDA_PLL_FREQ_LOCK_DET, 0, "freq enable")
+
 #define wait_for_pll_disable(pll) \
 	wait_for_pll(pll, PLL_ACTIVE_FLAG, 1, "disable")
 
@@ -879,8 +883,7 @@
 				config->test_ctl_hi1_val);
 
 	regmap_update_bits(regmap, PLL_MODE(pll),
-			 PLL_UPDATE_BYPASS,
-			 PLL_UPDATE_BYPASS);
+			 PLL_BYPASSNL, 0);
 
 	/* Disable PLL output */
 	regmap_update_bits(regmap, PLL_MODE(pll),
@@ -900,7 +903,7 @@
 static int clk_zonda_pll_enable(struct clk_hw *hw)
 {
 	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
-	u32 val;
+	u32 val, test_ctl_val;
 	int ret;
 
 	ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
@@ -937,7 +940,15 @@
 	regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
 						PLL_OPMODE_RUN);
 
-	ret = wait_for_pll_enable_lock(pll);
+	ret = regmap_read(pll->clkr.regmap, PLL_TEST_CTL(pll), &test_ctl_val);
+	if (ret)
+		return ret;
+
+	/* If cfa mode then poll for freq lock */
+	if (test_ctl_val & ZONDA_STAY_IN_CFA)
+		ret = wait_for_zonda_pll_freq_lock(pll);
+	else
+		ret = wait_for_pll_enable_lock(pll);
 	if (ret)
 		return ret;
 
@@ -1001,6 +1012,7 @@
 {
 	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
 	unsigned long rrate;
+	u32 test_ctl_val;
 	u32 l;
 	u64 a;
 	int ret;
@@ -1022,7 +1034,16 @@
 	/* Wait before polling for the frequency latch */
 	udelay(5);
 
-	ret = wait_for_pll_enable_lock(pll);
+	/* Read stay in cfa mode */
+	ret = regmap_read(pll->clkr.regmap, PLL_TEST_CTL(pll), &test_ctl_val);
+	if (ret)
+		return ret;
+
+	/* If cfa mode then poll for freq lock */
+	if (test_ctl_val & ZONDA_STAY_IN_CFA)
+		ret = wait_for_zonda_pll_freq_lock(pll);
+	else
+		ret = wait_for_pll_enable_lock(pll);
 	if (ret)
 		return ret;
 
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index d7f3b9e..55f6a3b 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -281,6 +281,33 @@
 	.num_clks = ARRAY_SIZE(kona_rpmh_clocks),
 };
 
+DEFINE_CLK_RPMH_ARC(lito, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
+DEFINE_CLK_RPMH_VRM(lito, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
+DEFINE_CLK_RPMH_VRM(lito, rf_clk1, rf_clk1_ao, "rfclkd1", 1);
+DEFINE_CLK_RPMH_VRM(lito, rf_clk2, rf_clk2_ao, "rfclkd2", 1);
+DEFINE_CLK_RPMH_VRM(lito, rf_clk3, rf_clk3_ao, "rfclkd3", 1);
+DEFINE_CLK_RPMH_VRM(lito, rf_clk4, rf_clk4_ao, "rfclkd4", 1);
+
+static struct clk_hw *lito_rpmh_clocks[] = {
+	[RPMH_CXO_CLK]		= &lito_bi_tcxo.hw,
+	[RPMH_CXO_CLK_A]	= &lito_bi_tcxo_ao.hw,
+	[RPMH_LN_BB_CLK3]	= &lito_ln_bb_clk3.hw,
+	[RPMH_LN_BB_CLK3_A]	= &lito_ln_bb_clk3_ao.hw,
+	[RPMH_RF_CLK1]		= &lito_rf_clk1.hw,
+	[RPMH_RF_CLK1_A]	= &lito_rf_clk1_ao.hw,
+	[RPMH_RF_CLK2]		= &lito_rf_clk2.hw,
+	[RPMH_RF_CLK2_A]	= &lito_rf_clk2_ao.hw,
+	[RPMH_RF_CLK3]		= &lito_rf_clk3.hw,
+	[RPMH_RF_CLK3_A]	= &lito_rf_clk3_ao.hw,
+	[RPMH_RF_CLK4]		= &lito_rf_clk4.hw,
+	[RPMH_RF_CLK4_A]	= &lito_rf_clk4_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_lito = {
+	.clks = lito_rpmh_clocks,
+	.num_clks = ARRAY_SIZE(lito_rpmh_clocks),
+};
+
 static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
 					 void *data)
 {
@@ -358,6 +385,7 @@
 static const struct of_device_id clk_rpmh_match_table[] = {
 	{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
 	{ .compatible = "qcom,kona-rpmh-clk", .data = &clk_rpmh_kona},
+	{ .compatible = "qcom,lito-rpmh-clk", .data = &clk_rpmh_lito},
 	{ }
 };
 MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/debugcc-kona.c b/drivers/clk/qcom/debugcc-kona.c
index 7d71211..4947287 100644
--- a/drivers/clk/qcom/debugcc-kona.c
+++ b/drivers/clk/qcom/debugcc-kona.c
@@ -93,8 +93,6 @@
 	"disp_cc_mdss_byte1_intf_clk",
 	"disp_cc_mdss_dp_aux1_clk",
 	"disp_cc_mdss_dp_aux_clk",
-	"disp_cc_mdss_dp_crypto1_clk",
-	"disp_cc_mdss_dp_crypto_clk",
 	"disp_cc_mdss_dp_link1_clk",
 	"disp_cc_mdss_dp_link1_intf_clk",
 	"disp_cc_mdss_dp_link_clk",
@@ -431,10 +429,6 @@
 			0x25, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
 		{ "disp_cc_mdss_dp_aux_clk", 0x56, 2, DISP_CC,
 			0x20, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_dp_crypto1_clk", 0x56, 2, DISP_CC,
-			0x24, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
-		{ "disp_cc_mdss_dp_crypto_clk", 0x56, 2, DISP_CC,
-			0x1D, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
 		{ "disp_cc_mdss_dp_link1_clk", 0x56, 2, DISP_CC,
 			0x22, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
 		{ "disp_cc_mdss_dp_link1_intf_clk", 0x56, 2, DISP_CC,
diff --git a/drivers/clk/qcom/dispcc-kona.c b/drivers/clk/qcom/dispcc-kona.c
index 46592a4..3835bb7 100644
--- a/drivers/clk/qcom/dispcc-kona.c
+++ b/drivers/clk/qcom/dispcc-kona.c
@@ -422,60 +422,6 @@
 	},
 };
 
-static const struct freq_tbl ftbl_disp_cc_mdss_dp_crypto1_clk_src[] = {
-	F( 108000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 180000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 360000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	F( 540000, P_DP_PHY_PLL_LINK_CLK,   3,   0,   0),
-	{ }
-};
-
-static struct clk_rcg2 disp_cc_mdss_dp_crypto1_clk_src = {
-	.cmd_rcgr = 0x2228,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = disp_cc_parent_map_0,
-	.freq_tbl = ftbl_disp_cc_mdss_dp_crypto1_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "disp_cc_mdss_dp_crypto1_clk_src",
-		.parent_names = disp_cc_parent_names_0,
-		.num_parents = 8,
-		.flags = CLK_GET_RATE_NOCACHE,
-		.ops = &clk_rcg2_ops,
-		.vdd_class = &vdd_mm,
-		.num_rate_max = VDD_NUM,
-		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 12800,
-			[VDD_LOWER] = 108000,
-			[VDD_LOW] = 180000,
-			[VDD_LOW_L1] = 360000,
-			[VDD_NOMINAL] = 540000},
-	},
-};
-
-static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
-	.cmd_rcgr = 0x2194,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = disp_cc_parent_map_0,
-	.freq_tbl = ftbl_disp_cc_mdss_dp_crypto1_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "disp_cc_mdss_dp_crypto_clk_src",
-		.parent_names = disp_cc_parent_names_0,
-		.num_parents = 8,
-		.flags = CLK_GET_RATE_NOCACHE,
-		.ops = &clk_rcg2_ops,
-		.vdd_class = &vdd_mm,
-		.num_rate_max = VDD_NUM,
-		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 12800,
-			[VDD_LOWER] = 108000,
-			[VDD_LOW] = 180000,
-			[VDD_LOW_L1] = 360000,
-			[VDD_NOMINAL] = 540000},
-	},
-};
-
 static const struct freq_tbl ftbl_disp_cc_mdss_dp_link1_clk_src[] = {
 	F( 162000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
 	F( 270000, P_DP_PHY_PLL_LINK_CLK,   1,   0,   0),
@@ -997,42 +943,6 @@
 	},
 };
 
-static struct clk_branch disp_cc_mdss_dp_crypto1_clk = {
-	.halt_reg = 0x2064,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x2064,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_dp_crypto1_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_dp_crypto1_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
-	.halt_reg = 0x2048,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x2048,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_dp_crypto_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_dp_crypto_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch disp_cc_mdss_dp_link1_clk = {
 	.halt_reg = 0x205c,
 	.halt_check = BRANCH_HALT,
@@ -1493,11 +1403,6 @@
 	[DISP_CC_MDSS_DP_AUX1_CLK_SRC] = &disp_cc_mdss_dp_aux1_clk_src.clkr,
 	[DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
 	[DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
-	[DISP_CC_MDSS_DP_CRYPTO1_CLK] = &disp_cc_mdss_dp_crypto1_clk.clkr,
-	[DISP_CC_MDSS_DP_CRYPTO1_CLK_SRC] =
-		&disp_cc_mdss_dp_crypto1_clk_src.clkr,
-	[DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
-	[DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] = &disp_cc_mdss_dp_crypto_clk_src.clkr,
 	[DISP_CC_MDSS_DP_LINK1_CLK] = &disp_cc_mdss_dp_link1_clk.clkr,
 	[DISP_CC_MDSS_DP_LINK1_CLK_SRC] = &disp_cc_mdss_dp_link1_clk_src.clkr,
 	[DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC] =
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index fa1a196..3bf11a6 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -131,8 +131,8 @@
 	"core_bi_pll_test_se",
 };
 
-static const char * const gcc_parent_names_7[] = {
-	"bi_tcxo",
+static const char * const gcc_parent_names_7_ao[] = {
+	"bi_tcxo_ao",
 	"gpll0",
 	"gpll0_out_even",
 	"core_bi_pll_test_se",
@@ -144,6 +144,12 @@
 	"core_bi_pll_test_se",
 };
 
+static const char * const gcc_parent_names_8_ao[] = {
+	"bi_tcxo_ao",
+	"gpll0",
+	"core_bi_pll_test_se",
+};
+
 static const struct parent_map gcc_parent_map_10[] = {
 	{ P_BI_TCXO, 0 },
 	{ P_GPLL0_OUT_MAIN, 1 },
@@ -226,7 +232,7 @@
 	.freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_cpuss_ahb_clk_src",
-		.parent_names = gcc_parent_names_7,
+		.parent_names = gcc_parent_names_7_ao,
 		.num_parents = 4,
 		.ops = &clk_rcg2_ops,
 	},
@@ -245,7 +251,7 @@
 	.freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_cpuss_rbcpr_clk_src",
-		.parent_names = gcc_parent_names_8,
+		.parent_names = gcc_parent_names_8_ao,
 		.num_parents = 3,
 		.ops = &clk_rcg2_ops,
 	},
diff --git a/drivers/clk/qcom/gpucc-kona.c b/drivers/clk/qcom/gpucc-kona.c
index b46e269..8d00c6b 100644
--- a/drivers/clk/qcom/gpucc-kona.c
+++ b/drivers/clk/qcom/gpucc-kona.c
@@ -26,6 +26,12 @@
 #include "reset.h"
 #include "vdd-level.h"
 
+#define CX_GMU_CBCR_SLEEP_SHIFT	4
+#define CX_GMU_CBCR_SLEEP_MASK	GENMASK(7, 4)
+#define CX_GMU_CBCR_WAKE_SHIFT	8
+#define CX_GMU_CBCR_WAKE_MASK	GENMASK(11, 8)
+
+
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
 static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
 
@@ -419,6 +425,7 @@
 {
 	struct regmap *regmap;
 	struct clk *clk;
+	unsigned int value, mask;
 	int i, ret;
 
 	regmap = qcom_cc_map(pdev, &gpu_cc_kona_desc);
@@ -449,6 +456,12 @@
 			return PTR_ERR(clk);
 	}
 
+	/* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+	mask = CX_GMU_CBCR_SLEEP_MASK | CX_GMU_CBCR_WAKE_MASK;
+	value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
+	regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg,
+							mask, value);
+
 	ret = qcom_cc_really_probe(pdev, &gpu_cc_kona_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register GPU CC clocks\n");
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
index c0ad6bf..7b95b10 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -229,7 +229,7 @@
 
 	label = of_get_property(pdev->dev.of_node, "label", NULL);
 	if (!label)
-		pr_info("%d: MDSS pll label not specified\n");
+		pr_info("MDSS pll label not specified\n");
 	else
 		pr_info("MDSS pll label = %s\n", label);
 
diff --git a/drivers/clk/qcom/mdss/mdss_pll_trace.h b/drivers/clk/qcom/mdss/mdss_pll_trace.h
index fd193bf..cf46c7f 100644
--- a/drivers/clk/qcom/mdss/mdss_pll_trace.h
+++ b/drivers/clk/qcom/mdss/mdss_pll_trace.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #if !defined(_MDSS_PLL_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
@@ -13,7 +13,7 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM mdss_pll
 #undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE mdss_pll_trace
+#define TRACE_INCLUDE_FILE ../../drivers/clk/qcom/mdss/mdss_pll_trace
 
 
 TRACE_EVENT(mdss_pll_lock_start,
diff --git a/drivers/clk/qcom/videocc-kona.c b/drivers/clk/qcom/videocc-kona.c
index a85a880..8698403 100644
--- a/drivers/clk/qcom/videocc-kona.c
+++ b/drivers/clk/qcom/videocc-kona.c
@@ -87,9 +87,9 @@
 };
 
 static const struct alpha_pll_config video_pll0_config = {
-	.l = 0x14,
+	.l = 0x25,
 	.cal_l = 0x44,
-	.alpha = 0xD555,
+	.alpha = 0x8000,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
@@ -121,9 +121,9 @@
 };
 
 static const struct alpha_pll_config video_pll1_config = {
-	.l = 0x14,
+	.l = 0x29,
 	.cal_l = 0x44,
-	.alpha = 0xD555,
+	.alpha = 0xFAAA,
 	.config_ctl_val = 0x20485699,
 	.config_ctl_hi_val = 0x00002261,
 	.config_ctl_hi1_val = 0x029A699C,
@@ -227,12 +227,10 @@
 };
 
 static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
-	F(400000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
 	F(720000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
 	F(1014000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
 	F(1098000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
 	F(1332000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
-	F(1599000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
 	{ }
 };
 
@@ -252,22 +250,18 @@
 		.vdd_class = &vdd_mm,
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 400000000,
 			[VDD_LOWER] = 720000000,
 			[VDD_LOW] = 1014000000,
 			[VDD_LOW_L1] = 1098000000,
-			[VDD_NOMINAL] = 1332000000,
-			[VDD_HIGH] = 1599000000},
+			[VDD_NOMINAL] = 1332000000},
 	},
 };
 
 static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = {
-	F(400000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
 	F(806000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
 	F(1040000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
 	F(1098000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
 	F(1332000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
-	F(1599000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
 	{ }
 };
 
@@ -287,12 +281,10 @@
 		.vdd_class = &vdd_mm,
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 400000000,
 			[VDD_LOWER] = 806000000,
 			[VDD_LOW] = 1040000000,
 			[VDD_LOW_L1] = 1098000000,
-			[VDD_NOMINAL] = 1332000000,
-			[VDD_HIGH] = 1599000000},
+			[VDD_NOMINAL] = 1332000000},
 	},
 };
 
@@ -316,7 +308,7 @@
 		.vdd_class = &vdd_mm,
 		.num_rate_max = VDD_NUM,
 		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 32000},
+			[VDD_LOWER] = 32000},
 	},
 };
 
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index 269d359..edc31bb 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -133,9 +133,11 @@
 	struct tegra_dfll_soc_data *soc;
 
 	soc = tegra_dfll_unregister(pdev);
-	if (IS_ERR(soc))
+	if (IS_ERR(soc)) {
 		dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
 			PTR_ERR(soc));
+		return PTR_ERR(soc);
+	}
 
 	tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
 
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index ccfb4d9..079f0be 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -367,8 +367,10 @@
 	num_dividers = i;
 
 	tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
-	if (!tmp)
+	if (!tmp) {
+		*table = ERR_PTR(-ENOMEM);
 		return -ENOMEM;
+	}
 
 	valid_div = 0;
 	*width = 0;
@@ -403,6 +405,7 @@
 {
 	struct clk_omap_divider *div;
 	struct clk_omap_reg *reg;
+	int ret;
 
 	if (!setup)
 		return NULL;
@@ -422,6 +425,12 @@
 		div->flags |= CLK_DIVIDER_POWER_OF_TWO;
 
 	div->table = _get_div_table_from_setup(setup, &div->width);
+	if (IS_ERR(div->table)) {
+		ret = PTR_ERR(div->table);
+		kfree(div);
+		return ERR_PTR(ret);
+	}
+
 
 	div->shift = setup->bit_shift;
 	div->latch = -EINVAL;
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index ed5e424..ad48fd5 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -250,6 +250,7 @@
 {
 	struct cn_msg *msg;
 	struct proc_event *ev;
+	struct task_struct *parent;
 	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
 	if (atomic_read(&proc_event_num_listeners) < 1)
@@ -262,8 +263,14 @@
 	ev->what = PROC_EVENT_COREDUMP;
 	ev->event_data.coredump.process_pid = task->pid;
 	ev->event_data.coredump.process_tgid = task->tgid;
-	ev->event_data.coredump.parent_pid = task->real_parent->pid;
-	ev->event_data.coredump.parent_tgid = task->real_parent->tgid;
+
+	rcu_read_lock();
+	if (pid_alive(task)) {
+		parent = rcu_dereference(task->real_parent);
+		ev->event_data.coredump.parent_pid = parent->pid;
+		ev->event_data.coredump.parent_tgid = parent->tgid;
+	}
+	rcu_read_unlock();
 
 	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 	msg->ack = 0; /* not used */
@@ -276,6 +283,7 @@
 {
 	struct cn_msg *msg;
 	struct proc_event *ev;
+	struct task_struct *parent;
 	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
 	if (atomic_read(&proc_event_num_listeners) < 1)
@@ -290,8 +298,14 @@
 	ev->event_data.exit.process_tgid = task->tgid;
 	ev->event_data.exit.exit_code = task->exit_code;
 	ev->event_data.exit.exit_signal = task->exit_signal;
-	ev->event_data.exit.parent_pid = task->real_parent->pid;
-	ev->event_data.exit.parent_tgid = task->real_parent->tgid;
+
+	rcu_read_lock();
+	if (pid_alive(task)) {
+		parent = rcu_dereference(task->real_parent);
+		ev->event_data.exit.parent_pid = parent->pid;
+		ev->event_data.exit.parent_tgid = parent->tgid;
+	}
+	rcu_read_unlock();
 
 	memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 	msg->ack = 0; /* not used */
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d5b1ec6..5279839 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -358,7 +358,7 @@
 		}
 
 		cpufreq_stats_record_transition(policy, freqs->new);
-		cpufreq_times_record_transition(freqs);
+		cpufreq_times_record_transition(policy, freqs->new);
 		policy->cur = freqs->new;
 	}
 }
@@ -555,13 +555,13 @@
  *                          SYSFS INTERFACE                          *
  *********************************************************************/
 static ssize_t show_boost(struct kobject *kobj,
-				 struct attribute *attr, char *buf)
+			  struct kobj_attribute *attr, char *buf)
 {
 	return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
 }
 
-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
-				  const char *buf, size_t count)
+static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t count)
 {
 	int ret, enable;
 
@@ -1869,9 +1869,15 @@
 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
 					unsigned int target_freq)
 {
+	int ret;
+
 	target_freq = clamp_val(target_freq, policy->min, policy->max);
 
-	return cpufreq_driver->fast_switch(policy, target_freq);
+	ret = cpufreq_driver->fast_switch(policy, target_freq);
+	if (ret)
+		cpufreq_times_record_transition(policy, ret);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
 
@@ -2278,7 +2284,6 @@
 		ret = cpufreq_start_governor(policy);
 		if (!ret) {
 			pr_debug("cpufreq: governor change\n");
-			sched_cpufreq_governor_change(policy, old_gov);
 			return 0;
 		}
 		cpufreq_exit_governor(policy);
diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c
index a43eeee..2883d67 100644
--- a/drivers/cpufreq/cpufreq_times.c
+++ b/drivers/cpufreq/cpufreq_times.c
@@ -32,11 +32,17 @@
 static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
 static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */
 
+struct concurrent_times {
+	atomic64_t active[NR_CPUS];
+	atomic64_t policy[NR_CPUS];
+};
+
 struct uid_entry {
 	uid_t uid;
 	unsigned int max_state;
 	struct hlist_node hash;
 	struct rcu_head rcu;
+	struct concurrent_times *concurrent_times;
 	u64 time_in_state[0];
 };
 
@@ -87,6 +93,7 @@
 static struct uid_entry *find_or_register_uid_locked(uid_t uid)
 {
 	struct uid_entry *uid_entry, *temp;
+	struct concurrent_times *times;
 	unsigned int max_state = READ_ONCE(next_offset);
 	size_t alloc_size = sizeof(*uid_entry) + max_state *
 		sizeof(uid_entry->time_in_state[0]);
@@ -115,9 +122,15 @@
 	uid_entry = kzalloc(alloc_size, GFP_ATOMIC);
 	if (!uid_entry)
 		return NULL;
+	times = kzalloc(sizeof(*times), GFP_ATOMIC);
+	if (!times) {
+		kfree(uid_entry);
+		return NULL;
+	}
 
 	uid_entry->uid = uid;
 	uid_entry->max_state = max_state;
+	uid_entry->concurrent_times = times;
 
 	hash_add_rcu(uid_hash_table, &uid_entry->hash, uid);
 
@@ -180,10 +193,12 @@
 
 static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-	(*pos)++;
+	do {
+		(*pos)++;
 
-	if (*pos >= HASH_SIZE(uid_hash_table))
-		return NULL;
+		if (*pos >= HASH_SIZE(uid_hash_table))
+			return NULL;
+	} while (hlist_empty(&uid_hash_table[*pos]));
 
 	return &uid_hash_table[*pos];
 }
@@ -207,7 +222,8 @@
 				if (freqs->freq_table[i] ==
 				    CPUFREQ_ENTRY_INVALID)
 					continue;
-				seq_printf(m, " %d", freqs->freq_table[i]);
+				seq_put_decimal_ull(m, " ",
+						    freqs->freq_table[i]);
 			}
 		}
 		seq_putc(m, '\n');
@@ -216,13 +232,16 @@
 	rcu_read_lock();
 
 	hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
-		if (uid_entry->max_state)
-			seq_printf(m, "%d:", uid_entry->uid);
+		if (uid_entry->max_state) {
+			seq_put_decimal_ull(m, "", uid_entry->uid);
+			seq_putc(m, ':');
+		}
 		for (i = 0; i < uid_entry->max_state; ++i) {
+			u64 time;
 			if (freq_index_invalid(i))
 				continue;
-			seq_printf(m, " %lu", (unsigned long)nsec_to_clock_t(
-					   uid_entry->time_in_state[i]));
+			time = nsec_to_clock_t(uid_entry->time_in_state[i]);
+			seq_put_decimal_ull(m, " ", time);
 		}
 		if (uid_entry->max_state)
 			seq_putc(m, '\n');
@@ -232,6 +251,86 @@
 	return 0;
 }
 
+static int concurrent_time_seq_show(struct seq_file *m, void *v,
+	atomic64_t *(*get_times)(struct concurrent_times *))
+{
+	struct uid_entry *uid_entry;
+	int i, num_possible_cpus = num_possible_cpus();
+
+	rcu_read_lock();
+
+	hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
+		atomic64_t *times = get_times(uid_entry->concurrent_times);
+
+		seq_put_decimal_ull(m, "", (u64)uid_entry->uid);
+		seq_putc(m, ':');
+
+		for (i = 0; i < num_possible_cpus; ++i) {
+			u64 time = nsec_to_clock_t(atomic64_read(&times[i]));
+
+			seq_put_decimal_ull(m, " ", time);
+		}
+		seq_putc(m, '\n');
+	}
+
+	rcu_read_unlock();
+
+	return 0;
+}
+
+static inline atomic64_t *get_active_times(struct concurrent_times *times)
+{
+	return times->active;
+}
+
+static int concurrent_active_time_seq_show(struct seq_file *m, void *v)
+{
+	if (v == uid_hash_table) {
+		seq_put_decimal_ull(m, "cpus: ", num_possible_cpus());
+		seq_putc(m, '\n');
+	}
+
+	return concurrent_time_seq_show(m, v, get_active_times);
+}
+
+static inline atomic64_t *get_policy_times(struct concurrent_times *times)
+{
+	return times->policy;
+}
+
+static int concurrent_policy_time_seq_show(struct seq_file *m, void *v)
+{
+	int i;
+	struct cpu_freqs *freqs, *last_freqs = NULL;
+
+	if (v == uid_hash_table) {
+		int cnt = 0;
+
+		for_each_possible_cpu(i) {
+			freqs = all_freqs[i];
+			if (!freqs)
+				continue;
+			if (freqs != last_freqs) {
+				if (last_freqs) {
+					seq_put_decimal_ull(m, ": ", cnt);
+					seq_putc(m, ' ');
+					cnt = 0;
+				}
+				seq_put_decimal_ull(m, "policy", i);
+
+				last_freqs = freqs;
+			}
+			cnt++;
+		}
+		if (last_freqs) {
+			seq_put_decimal_ull(m, ": ", cnt);
+			seq_putc(m, '\n');
+		}
+	}
+
+	return concurrent_time_seq_show(m, v, get_policy_times);
+}
+
 void cpufreq_task_times_init(struct task_struct *p)
 {
 	unsigned long flags;
@@ -326,11 +425,16 @@
 {
 	unsigned long flags;
 	unsigned int state;
+	unsigned int active_cpu_cnt = 0;
+	unsigned int policy_cpu_cnt = 0;
+	unsigned int policy_first_cpu;
 	struct uid_entry *uid_entry;
 	struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
+	struct cpufreq_policy *policy;
 	uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
+	int cpu = 0;
 
-	if (!freqs || p->flags & PF_EXITING)
+	if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
 		return;
 
 	state = freqs->offset + READ_ONCE(freqs->last_index);
@@ -346,6 +450,42 @@
 	if (uid_entry && state < uid_entry->max_state)
 		uid_entry->time_in_state[state] += cputime;
 	spin_unlock_irqrestore(&uid_lock, flags);
+
+	rcu_read_lock();
+	uid_entry = find_uid_entry_rcu(uid);
+	if (!uid_entry) {
+		rcu_read_unlock();
+		return;
+	}
+
+	for_each_possible_cpu(cpu)
+		if (!idle_cpu(cpu))
+			++active_cpu_cnt;
+
+	atomic64_add(cputime,
+		     &uid_entry->concurrent_times->active[active_cpu_cnt - 1]);
+
+	policy = cpufreq_cpu_get(task_cpu(p));
+	if (!policy) {
+		/*
+		 * This CPU may have just come up and not have a cpufreq policy
+		 * yet.
+		 */
+		rcu_read_unlock();
+		return;
+	}
+
+	for_each_cpu(cpu, policy->related_cpus)
+		if (!idle_cpu(cpu))
+			++policy_cpu_cnt;
+
+	policy_first_cpu = cpumask_first(policy->related_cpus);
+	cpufreq_cpu_put(policy);
+
+	atomic64_add(cputime,
+		     &uid_entry->concurrent_times->policy[policy_first_cpu +
+							  policy_cpu_cnt - 1]);
+	rcu_read_unlock();
 }
 
 void cpufreq_times_create_policy(struct cpufreq_policy *policy)
@@ -387,6 +527,14 @@
 		all_freqs[cpu] = freqs;
 }
 
+static void uid_entry_reclaim(struct rcu_head *rcu)
+{
+	struct uid_entry *uid_entry = container_of(rcu, struct uid_entry, rcu);
+
+	kfree(uid_entry->concurrent_times);
+	kfree(uid_entry);
+}
+
 void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
 {
 	struct uid_entry *uid_entry;
@@ -400,7 +548,7 @@
 			hash, uid_start) {
 			if (uid_start == uid_entry->uid) {
 				hash_del_rcu(&uid_entry->hash);
-				kfree_rcu(uid_entry, rcu);
+				call_rcu(&uid_entry->rcu, uid_entry_reclaim);
 			}
 		}
 	}
@@ -408,24 +556,17 @@
 	spin_unlock_irqrestore(&uid_lock, flags);
 }
 
-void cpufreq_times_record_transition(struct cpufreq_freqs *freq)
+void cpufreq_times_record_transition(struct cpufreq_policy *policy,
+	unsigned int new_freq)
 {
 	int index;
-	struct cpu_freqs *freqs = all_freqs[freq->cpu];
-	struct cpufreq_policy *policy;
-
+	struct cpu_freqs *freqs = all_freqs[policy->cpu];
 	if (!freqs)
 		return;
 
-	policy = cpufreq_cpu_get(freq->cpu);
-	if (!policy)
-		return;
-
-	index = cpufreq_frequency_table_get_index(policy, freq->new);
+	index = cpufreq_frequency_table_get_index(policy, new_freq);
 	if (index >= 0)
 		WRITE_ONCE(freqs->last_index, index);
-
-	cpufreq_cpu_put(policy);
 }
 
 static const struct seq_operations uid_time_in_state_seq_ops = {
@@ -453,11 +594,55 @@
 	.release	= seq_release,
 };
 
+static const struct seq_operations concurrent_active_time_seq_ops = {
+	.start = uid_seq_start,
+	.next = uid_seq_next,
+	.stop = uid_seq_stop,
+	.show = concurrent_active_time_seq_show,
+};
+
+static int concurrent_active_time_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &concurrent_active_time_seq_ops);
+}
+
+static const struct file_operations concurrent_active_time_fops = {
+	.open		= concurrent_active_time_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static const struct seq_operations concurrent_policy_time_seq_ops = {
+	.start = uid_seq_start,
+	.next = uid_seq_next,
+	.stop = uid_seq_stop,
+	.show = concurrent_policy_time_seq_show,
+};
+
+static int concurrent_policy_time_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &concurrent_policy_time_seq_ops);
+}
+
+static const struct file_operations concurrent_policy_time_fops = {
+	.open		= concurrent_policy_time_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
 static int __init cpufreq_times_init(void)
 {
 	proc_create_data("uid_time_in_state", 0444, NULL,
 			 &uid_time_in_state_fops, NULL);
 
+	proc_create_data("uid_concurrent_active_time", 0444, NULL,
+			 &concurrent_active_time_fops, NULL);
+
+	proc_create_data("uid_concurrent_policy_time", 0444, NULL,
+			 &concurrent_policy_time_fops, NULL);
+
 	return 0;
 }
 
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b6a1aad..a005711 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -833,7 +833,7 @@
 /************************** sysfs begin ************************/
 #define show_one(file_name, object)					\
 	static ssize_t show_##file_name					\
-	(struct kobject *kobj, struct attribute *attr, char *buf)	\
+	(struct kobject *kobj, struct kobj_attribute *attr, char *buf)	\
 	{								\
 		return sprintf(buf, "%u\n", global.object);		\
 	}
@@ -842,7 +842,7 @@
 static int intel_pstate_update_status(const char *buf, size_t size);
 
 static ssize_t show_status(struct kobject *kobj,
-			   struct attribute *attr, char *buf)
+			   struct kobj_attribute *attr, char *buf)
 {
 	ssize_t ret;
 
@@ -853,7 +853,7 @@
 	return ret;
 }
 
-static ssize_t store_status(struct kobject *a, struct attribute *b,
+static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
 			    const char *buf, size_t count)
 {
 	char *p = memchr(buf, '\n', count);
@@ -867,7 +867,7 @@
 }
 
 static ssize_t show_turbo_pct(struct kobject *kobj,
-				struct attribute *attr, char *buf)
+				struct kobj_attribute *attr, char *buf)
 {
 	struct cpudata *cpu;
 	int total, no_turbo, turbo_pct;
@@ -893,7 +893,7 @@
 }
 
 static ssize_t show_num_pstates(struct kobject *kobj,
-				struct attribute *attr, char *buf)
+				struct kobj_attribute *attr, char *buf)
 {
 	struct cpudata *cpu;
 	int total;
@@ -914,7 +914,7 @@
 }
 
 static ssize_t show_no_turbo(struct kobject *kobj,
-			     struct attribute *attr, char *buf)
+			     struct kobj_attribute *attr, char *buf)
 {
 	ssize_t ret;
 
@@ -936,7 +936,7 @@
 	return ret;
 }
 
-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
 			      const char *buf, size_t count)
 {
 	unsigned int input;
@@ -983,7 +983,7 @@
 	return count;
 }
 
-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
 				  const char *buf, size_t count)
 {
 	unsigned int input;
@@ -1013,7 +1013,7 @@
 	return count;
 }
 
-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
 				  const char *buf, size_t count)
 {
 	unsigned int input;
@@ -1045,12 +1045,13 @@
 }
 
 static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
-				struct attribute *attr, char *buf)
+				struct kobj_attribute *attr, char *buf)
 {
 	return sprintf(buf, "%u\n", hwp_boost);
 }
 
-static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
+static ssize_t store_hwp_dynamic_boost(struct kobject *a,
+				       struct kobj_attribute *b,
 				       const char *buf, size_t count)
 {
 	unsigned int input;
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index ed3b785..0dece83 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -30,4 +30,15 @@
 	  WARNING: improper use of this can result in deadlocking kernel
 	  drivers from userspace. Intended for test and debug only.
 
+config DEBUG_DMA_BUF_REF
+	bool "DEBUG Reference Count"
+	depends on STACKDEPOT
+	depends on DMA_SHARED_BUFFER
+	default n
+	help
+	  Save stack traces for every call to dma_buf_get and dma_buf_put, to
+	  help debug memory leaks. Potential leaks may be found by manually
+	  matching the get/put call stacks.  This feature consumes extra memory
+	  in order to save the stack traces using STACKDEPOT.
+
 endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index c33bf88..dcbc33f 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,3 +1,4 @@
 obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
 obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
 obj-$(CONFIG_SW_SYNC)		+= sw_sync.o sync_debug.o
+obj-$(CONFIG_DEBUG_DMA_BUF_REF)	+= dma-buf-ref.o
diff --git a/drivers/dma-buf/dma-buf-ref.c b/drivers/dma-buf/dma-buf-ref.c
new file mode 100644
index 0000000..6298574
--- /dev/null
+++ b/drivers/dma-buf/dma-buf-ref.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/stackdepot.h>
+#include <linux/stacktrace.h>
+#include <linux/seq_file.h>
+
+#define DMA_BUF_STACK_DEPTH (16)
+
+struct dma_buf_ref {
+	struct list_head list;
+	depot_stack_handle_t handle;
+	int count;
+};
+
+void dma_buf_ref_init(struct dma_buf *dmabuf)
+{
+	INIT_LIST_HEAD(&dmabuf->refs);
+}
+
+void dma_buf_ref_destroy(struct dma_buf *dmabuf)
+{
+	struct dma_buf_ref *r, *n;
+
+	mutex_lock(&dmabuf->lock);
+	list_for_each_entry_safe(r, n, &dmabuf->refs, list) {
+		list_del(&r->list);
+		kfree(r);
+	}
+	mutex_unlock(&dmabuf->lock);
+}
+
+static void dma_buf_ref_insert_handle(struct dma_buf *dmabuf,
+				      depot_stack_handle_t handle,
+				      int count)
+{
+	struct dma_buf_ref *r;
+
+	mutex_lock(&dmabuf->lock);
+	list_for_each_entry(r, &dmabuf->refs, list) {
+		if (r->handle == handle) {
+			r->count += count;
+			goto out;
+		}
+	}
+
+	r = kzalloc(sizeof(*r), GFP_KERNEL);
+	if (!r)
+		goto out;
+
+	INIT_LIST_HEAD(&r->list);
+	r->handle = handle;
+	r->count = count;
+	list_add(&r->list, &dmabuf->refs);
+
+out:
+	mutex_unlock(&dmabuf->lock);
+}
+
+void dma_buf_ref_mod(struct dma_buf *dmabuf, int nr)
+{
+	unsigned long entries[DMA_BUF_STACK_DEPTH];
+	struct stack_trace trace = {
+		.nr_entries = 0,
+		.entries = entries,
+		.max_entries = DMA_BUF_STACK_DEPTH,
+		.skip = 1
+	};
+	depot_stack_handle_t handle;
+
+	save_stack_trace(&trace);
+	if (trace.nr_entries != 0 &&
+	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
+		trace.nr_entries--;
+
+	handle = depot_save_stack(&trace, GFP_KERNEL);
+	if (!handle)
+		return;
+
+	dma_buf_ref_insert_handle(dmabuf, handle, nr);
+}
+
+/**
+ * Called with dmabuf->lock held
+ */
+int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf)
+{
+	char *buf;
+	struct dma_buf_ref *ref;
+	int count = 0;
+	struct stack_trace trace;
+
+	buf = (void *)__get_free_page(GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	list_for_each_entry(ref, &dmabuf->refs, list) {
+		count += ref->count;
+
+		seq_printf(s, "References: %d\n", ref->count);
+		depot_fetch_stack(ref->handle, &trace);
+		snprint_stack_trace(buf, PAGE_SIZE, &trace, 0);
+		seq_puts(s, buf);
+		seq_putc(s, '\n');
+	}
+
+	seq_printf(s, "Total references: %d\n\n\n", count);
+	free_page((unsigned long)buf);
+
+	return 0;
+}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 5b0c24f..db82aae 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -36,6 +36,9 @@
 #include <linux/mm.h>
 #include <linux/kernel.h>
 #include <linux/atomic.h>
+#include <linux/sched/signal.h>
+#include <linux/fdtable.h>
+#include <linux/list_sort.h>
 
 #include <uapi/linux/dma-buf.h>
 
@@ -48,6 +51,19 @@
 	struct mutex lock;
 };
 
+struct dma_info {
+	struct dma_buf *dmabuf;
+	struct list_head head;
+};
+
+struct dma_proc {
+	char name[TASK_COMM_LEN];
+	pid_t pid;
+	size_t size;
+	struct list_head dma_bufs;
+	struct list_head head;
+};
+
 static struct dma_buf_list db_list;
 
 static int dma_buf_release(struct inode *inode, struct file *file)
@@ -71,12 +87,14 @@
 	 */
 	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
 
-	dmabuf->ops->release(dmabuf);
-
 	mutex_lock(&db_list.lock);
 	list_del(&dmabuf->list_node);
 	mutex_unlock(&db_list.lock);
 
+	dmabuf->ops->release(dmabuf);
+
+	dma_buf_ref_destroy(dmabuf);
+
 	if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
 		reservation_object_fini(dmabuf->resv);
 
@@ -457,6 +475,7 @@
 	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
 	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
 	dmabuf->name = bufname;
+	dmabuf->ktime = ktime_get();
 
 	if (!resv) {
 		resv = (struct reservation_object *)&dmabuf[1];
@@ -477,6 +496,9 @@
 	mutex_init(&dmabuf->lock);
 	INIT_LIST_HEAD(&dmabuf->attachments);
 
+	dma_buf_ref_init(dmabuf);
+	dma_buf_ref_mod(dmabuf, 1);
+
 	mutex_lock(&db_list.lock);
 	list_add(&dmabuf->list_node, &db_list.head);
 	mutex_unlock(&db_list.lock);
@@ -538,6 +560,7 @@
 		fput(file);
 		return ERR_PTR(-EINVAL);
 	}
+	dma_buf_ref_mod(file->private_data, 1);
 
 	return file->private_data;
 }
@@ -558,6 +581,7 @@
 	if (WARN_ON(!dmabuf || !dmabuf->file))
 		return;
 
+	dma_buf_ref_mod(dmabuf, -1);
 	fput(dmabuf->file);
 }
 EXPORT_SYMBOL_GPL(dma_buf_put);
@@ -1203,6 +1227,8 @@
 		seq_printf(s, "Total %d devices attached\n\n",
 				attach_count);
 
+		dma_buf_ref_show(s, buf_obj);
+
 		count++;
 		size += buf_obj->size;
 		mutex_unlock(&buf_obj->lock);
@@ -1226,6 +1252,157 @@
 	.release        = single_release,
 };
 
+static bool list_contains(struct list_head *list, struct dma_buf *info)
+{
+	struct dma_info *curr;
+
+	list_for_each_entry(curr, list, head)
+		if (curr->dmabuf == info)
+			return true;
+
+	return false;
+}
+
+static int get_dma_info(const void *data, struct file *file, unsigned int n)
+{
+	struct dma_proc *dma_proc;
+	struct dma_info *dma_info;
+
+	dma_proc = (struct dma_proc *)data;
+	if (!is_dma_buf_file(file))
+		return 0;
+
+	if (list_contains(&dma_proc->dma_bufs, file->private_data))
+		return 0;
+
+	dma_info = kzalloc(sizeof(*dma_info), GFP_ATOMIC);
+	if (!dma_info)
+		return -ENOMEM;
+
+	get_file(file);
+	dma_info->dmabuf = file->private_data;
+	dma_proc->size += dma_info->dmabuf->size / SZ_1K;
+	list_add(&dma_info->head, &dma_proc->dma_bufs);
+	return 0;
+}
+
+static void write_proc(struct seq_file *s, struct dma_proc *proc)
+{
+	struct dma_info *tmp;
+
+	seq_printf(s, "\n%s (PID %ld) size: %ld\nDMA Buffers:\n",
+		proc->name, proc->pid, proc->size);
+	seq_printf(s, "%-8s\t%-8s\t%-8s\n",
+		"Name", "Size (KB)", "Time Alive (sec)");
+
+	list_for_each_entry(tmp, &proc->dma_bufs, head) {
+		struct dma_buf *dmabuf = tmp->dmabuf;
+		ktime_t elapmstime = ktime_ms_delta(ktime_get(), dmabuf->ktime);
+
+		elapmstime = ktime_divns(elapmstime, MSEC_PER_SEC);
+		seq_printf(s, "%-8s\t%-8ld\t%-8ld\n",
+				dmabuf->name,
+				dmabuf->size / SZ_1K,
+				elapmstime);
+	}
+}
+
+static void free_proc(struct dma_proc *proc)
+{
+	struct dma_info *tmp, *n;
+
+	list_for_each_entry_safe(tmp, n, &proc->dma_bufs, head) {
+		dma_buf_put(tmp->dmabuf);
+		list_del(&tmp->head);
+		kfree(tmp);
+	}
+	kfree(proc);
+}
+
+static int dmacmp(void *unused, struct list_head *a, struct list_head *b)
+{
+	struct dma_info *a_buf, *b_buf;
+
+	a_buf = list_entry(a, struct dma_info, head);
+	b_buf = list_entry(b, struct dma_info, head);
+	return b_buf->dmabuf->size - a_buf->dmabuf->size;
+}
+
+static int proccmp(void *unused, struct list_head *a, struct list_head *b)
+{
+	struct dma_proc *a_proc, *b_proc;
+
+	a_proc = list_entry(a, struct dma_proc, head);
+	b_proc = list_entry(b, struct dma_proc, head);
+	return b_proc->size - a_proc->size;
+}
+
+static int dma_procs_debug_show(struct seq_file *s, void *unused)
+{
+	struct task_struct *task, *thread;
+	struct files_struct *files;
+	int ret = 0;
+	struct dma_proc *tmp, *n;
+	LIST_HEAD(plist);
+
+	read_lock(&tasklist_lock);
+	for_each_process(task) {
+		struct files_struct *group_leader_files = NULL;
+
+		tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
+		if (!tmp) {
+			ret = -ENOMEM;
+			read_unlock(&tasklist_lock);
+			goto mem_err;
+		}
+		INIT_LIST_HEAD(&tmp->dma_bufs);
+		for_each_thread(task, thread) {
+			task_lock(thread);
+			if (unlikely(!group_leader_files))
+				group_leader_files = task->group_leader->files;
+			files = thread->files;
+			if (files && (group_leader_files != files ||
+				      thread == task->group_leader))
+				ret = iterate_fd(files, 0, get_dma_info, tmp);
+			task_unlock(thread);
+		}
+		if (ret || list_empty(&tmp->dma_bufs))
+			goto skip;
+		list_sort(NULL, &tmp->dma_bufs, dmacmp);
+		get_task_comm(tmp->name, task);
+		tmp->pid = task->tgid;
+		list_add(&tmp->head, &plist);
+		continue;
+skip:
+		free_proc(tmp);
+	}
+	read_unlock(&tasklist_lock);
+
+	list_sort(NULL, &plist, proccmp);
+	list_for_each_entry(tmp, &plist, head)
+		write_proc(s, tmp);
+
+	ret = 0;
+mem_err:
+	list_for_each_entry_safe(tmp, n, &plist, head) {
+		list_del(&tmp->head);
+		free_proc(tmp);
+	}
+	return ret;
+}
+
+static int dma_procs_debug_open(struct inode *f_inode, struct file *file)
+{
+	return single_open(file, dma_procs_debug_show, NULL);
+}
+
+static const struct file_operations dma_procs_debug_fops = {
+	.open           = dma_procs_debug_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release
+};
+
 static struct dentry *dma_buf_debugfs_dir;
 
 static int dma_buf_init_debugfs(void)
@@ -1246,6 +1423,17 @@
 		debugfs_remove_recursive(dma_buf_debugfs_dir);
 		dma_buf_debugfs_dir = NULL;
 		err = PTR_ERR(d);
+		return err;
+	}
+
+	d = debugfs_create_file("dmaprocs", 0444, dma_buf_debugfs_dir,
+				NULL, &dma_procs_debug_fops);
+
+	if (IS_ERR(d)) {
+		pr_debug("dma_buf: debugfs: failed to create node dmaprocs\n");
+		debugfs_remove_recursive(dma_buf_debugfs_dir);
+		dma_buf_debugfs_dir = NULL;
+		err = PTR_ERR(d);
 	}
 
 	return err;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 4bf7256..a75b95f 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -203,6 +203,7 @@
 	u32				save_cim;
 	u32				save_cnda;
 	u32				save_cndc;
+	u32				irq_status;
 	unsigned long			status;
 	struct tasklet_struct		tasklet;
 	struct dma_slave_config		sconfig;
@@ -1580,8 +1581,8 @@
 	struct at_xdmac_desc	*desc;
 	u32			error_mask;
 
-	dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
-		 __func__, atchan->status);
+	dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
+		__func__, atchan->irq_status);
 
 	error_mask = AT_XDMAC_CIS_RBEIS
 		     | AT_XDMAC_CIS_WBEIS
@@ -1589,15 +1590,15 @@
 
 	if (at_xdmac_chan_is_cyclic(atchan)) {
 		at_xdmac_handle_cyclic(atchan);
-	} else if ((atchan->status & AT_XDMAC_CIS_LIS)
-		   || (atchan->status & error_mask)) {
+	} else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
+		   || (atchan->irq_status & error_mask)) {
 		struct dma_async_tx_descriptor  *txd;
 
-		if (atchan->status & AT_XDMAC_CIS_RBEIS)
+		if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
 			dev_err(chan2dev(&atchan->chan), "read bus error!!!");
-		if (atchan->status & AT_XDMAC_CIS_WBEIS)
+		if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
 			dev_err(chan2dev(&atchan->chan), "write bus error!!!");
-		if (atchan->status & AT_XDMAC_CIS_ROIS)
+		if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
 			dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
 
 		spin_lock_bh(&atchan->lock);
@@ -1652,7 +1653,7 @@
 			atchan = &atxdmac->chan[i];
 			chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
 			chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
-			atchan->status = chan_status & chan_imr;
+			atchan->irq_status = chan_status & chan_imr;
 			dev_vdbg(atxdmac->dma.dev,
 				 "%s: chan%d: imr=0x%x, status=0x%x\n",
 				 __func__, i, chan_imr, chan_status);
@@ -1666,7 +1667,7 @@
 				 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
 				 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
 
-			if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
+			if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
 				at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
 
 			tasklet_schedule(&atchan->tasklet);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index aa1712b..7b7fba0 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -642,11 +642,9 @@
 			srcs[i] = um->addr[i] + src_off;
 			ret = dma_mapping_error(dev->dev, um->addr[i]);
 			if (ret) {
-				dmaengine_unmap_put(um);
 				result("src mapping error", total_tests,
 				       src_off, dst_off, len, ret);
-				failed_tests++;
-				continue;
+				goto error_unmap_continue;
 			}
 			um->to_cnt++;
 		}
@@ -661,11 +659,9 @@
 					       DMA_BIDIRECTIONAL);
 			ret = dma_mapping_error(dev->dev, dsts[i]);
 			if (ret) {
-				dmaengine_unmap_put(um);
 				result("dst mapping error", total_tests,
 				       src_off, dst_off, len, ret);
-				failed_tests++;
-				continue;
+				goto error_unmap_continue;
 			}
 			um->bidi_cnt++;
 		}
@@ -693,12 +689,10 @@
 		}
 
 		if (!tx) {
-			dmaengine_unmap_put(um);
 			result("prep error", total_tests, src_off,
 			       dst_off, len, ret);
 			msleep(100);
-			failed_tests++;
-			continue;
+			goto error_unmap_continue;
 		}
 
 		done->done = false;
@@ -707,12 +701,10 @@
 		cookie = tx->tx_submit(tx);
 
 		if (dma_submit_error(cookie)) {
-			dmaengine_unmap_put(um);
 			result("submit error", total_tests, src_off,
 			       dst_off, len, ret);
 			msleep(100);
-			failed_tests++;
-			continue;
+			goto error_unmap_continue;
 		}
 		dma_async_issue_pending(chan);
 
@@ -725,16 +717,14 @@
 			dmaengine_unmap_put(um);
 			result("test timed out", total_tests, src_off, dst_off,
 			       len, 0);
-			failed_tests++;
-			continue;
+			goto error_unmap_continue;
 		} else if (status != DMA_COMPLETE) {
 			dmaengine_unmap_put(um);
 			result(status == DMA_ERROR ?
 			       "completion error status" :
 			       "completion busy status", total_tests, src_off,
 			       dst_off, len, ret);
-			failed_tests++;
-			continue;
+			goto error_unmap_continue;
 		}
 
 		dmaengine_unmap_put(um);
@@ -779,6 +769,12 @@
 			verbose_result("test passed", total_tests, src_off,
 				       dst_off, len, 0);
 		}
+
+		continue;
+
+error_unmap_continue:
+		dmaengine_unmap_put(um);
+		failed_tests++;
 	}
 	ktime = ktime_sub(ktime_get(), ktime);
 	ktime = ktime_sub(ktime, comparetime);
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 6bc8e66..c51462f 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -542,6 +542,7 @@
 	case ISCSI_BOOT_TGT_NIC_ASSOC:
 	case ISCSI_BOOT_TGT_CHAP_TYPE:
 		rc = S_IRUGO;
+		break;
 	case ISCSI_BOOT_TGT_NAME:
 		if (tgt->tgt_name_len)
 			rc = S_IRUGO;
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 2c22836..4596fde1 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -310,30 +310,26 @@
 			ret = -ENODEV;
 			goto err_put_device;
 		}
+
+		ret = regulator_enable(data->vcc);
+		if (ret)
+			goto err_put_device;
+
+		/* Wait for chip to boot into hibernate mode. */
+		msleep(SIRF_BOOT_DELAY);
 	}
 
 	if (data->wakeup) {
 		ret = gpiod_to_irq(data->wakeup);
 		if (ret < 0)
-			goto err_put_device;
-
+			goto err_disable_vcc;
 		data->irq = ret;
 
-		ret = devm_request_threaded_irq(dev, data->irq, NULL,
-				sirf_wakeup_handler,
+		ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler,
 				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
 				"wakeup", data);
 		if (ret)
-			goto err_put_device;
-	}
-
-	if (data->on_off) {
-		ret = regulator_enable(data->vcc);
-		if (ret)
-			goto err_put_device;
-
-		/* Wait for chip to boot into hibernate mode */
-		msleep(SIRF_BOOT_DELAY);
+			goto err_disable_vcc;
 	}
 
 	if (IS_ENABLED(CONFIG_PM)) {
@@ -342,7 +338,7 @@
 	} else {
 		ret = sirf_runtime_resume(dev);
 		if (ret < 0)
-			goto err_disable_vcc;
+			goto err_free_irq;
 	}
 
 	ret = gnss_register_device(gdev);
@@ -356,6 +352,9 @@
 		pm_runtime_disable(dev);
 	else
 		sirf_runtime_suspend(dev);
+err_free_irq:
+	if (data->wakeup)
+		free_irq(data->irq, data);
 err_disable_vcc:
 	if (data->on_off)
 		regulator_disable(data->vcc);
@@ -376,6 +375,9 @@
 	else
 		sirf_runtime_suspend(&serdev->dev);
 
+	if (data->wakeup)
+		free_irq(data->irq, data);
+
 	if (data->on_off)
 		regulator_disable(data->vcc);
 
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index d4ad6d0..7e09ce7 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -259,6 +259,7 @@
 	struct vf610_gpio_port *port;
 	struct resource *iores;
 	struct gpio_chip *gc;
+	int i;
 	int ret;
 
 	port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
@@ -298,6 +299,10 @@
 	if (ret < 0)
 		return ret;
 
+	/* Mask all GPIO interrupts */
+	for (i = 0; i < gc->ngpio; i++)
+		vf610_gpio_writel(0, port->base + PORT_PCR(i));
+
 	/* Clear the interrupt status register for all GPIO's */
 	vf610_gpio_writel(~0, port->base + PORT_ISFR);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7b4e657a..c3df75a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1443,7 +1443,8 @@
 		effective_mode &= ~S_IWUSR;
 
 	if ((adev->flags & AMD_IS_APU) &&
-	    (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+	    (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
+	     attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
 	     attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
 	     attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
 		return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 1c5d97f..8dcf622 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -37,6 +37,7 @@
 #include "amdgpu_display.h"
 #include <drm/amdgpu_drm.h>
 #include <linux/dma-buf.h>
+#include <linux/dma-fence-array.h>
 
 static const struct dma_buf_ops amdgpu_dmabuf_ops;
 
@@ -188,6 +189,48 @@
 	return ERR_PTR(ret);
 }
 
+static int
+__reservation_object_make_exclusive(struct reservation_object *obj)
+{
+	struct dma_fence **fences;
+	unsigned int count;
+	int r;
+
+	if (!reservation_object_get_list(obj)) /* no shared fences to convert */
+		return 0;
+
+	r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
+	if (r)
+		return r;
+
+	if (count == 0) {
+		/* Now that was unexpected. */
+	} else if (count == 1) {
+		reservation_object_add_excl_fence(obj, fences[0]);
+		dma_fence_put(fences[0]);
+		kfree(fences);
+	} else {
+		struct dma_fence_array *array;
+
+		array = dma_fence_array_create(count, fences,
+					       dma_fence_context_alloc(1), 0,
+					       false);
+		if (!array)
+			goto err_fences_put;
+
+		reservation_object_add_excl_fence(obj, &array->base);
+		dma_fence_put(&array->base);
+	}
+
+	return 0;
+
+err_fences_put:
+	while (count--)
+		dma_fence_put(fences[count]);
+	kfree(fences);
+	return -ENOMEM;
+}
+
 /**
  * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
  * @dma_buf: shared DMA buffer
@@ -219,16 +262,16 @@
 
 	if (attach->dev->driver != adev->dev->driver) {
 		/*
-		 * Wait for all shared fences to complete before we switch to future
-		 * use of exclusive fence on this prime shared bo.
+		 * We only create shared fences for internal use, but importers
+		 * of the dmabuf rely on exclusive fences for implicitly
+		 * tracking write hazards. As any of the current fences may
+		 * correspond to a write, we need to convert all existing
+		 * fences on the reservation object into a single exclusive
+		 * fence.
 		 */
-		r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
-							true, false,
-							MAX_SCHEDULE_TIMEOUT);
-		if (unlikely(r < 0)) {
-			DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
+		r = __reservation_object_make_exclusive(bo->tbo.resv);
+		if (r)
 			goto error_unreserve;
-		}
 	}
 
 	/* pin buffer into GTT */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6a84526..49fe508 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -3011,14 +3011,15 @@
 			 struct amdgpu_task_info *task_info)
 {
 	struct amdgpu_vm *vm;
+	unsigned long flags;
 
-	spin_lock(&adev->vm_manager.pasid_lock);
+	spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
 
 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
 	if (vm)
 		*task_info = vm->task_info;
 
-	spin_unlock(&adev->vm_manager.pasid_lock);
+	spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index 16b1a9c..743d3c9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -32,6 +32,7 @@
 #include "vega10_pptable.h"
 
 #define NUM_DSPCLK_LEVELS 8
+#define VEGA10_ENGINECLOCK_HARDMAX 198000
 
 static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
 		enum phm_platform_caps cap)
@@ -258,7 +259,26 @@
 		struct pp_hwmgr *hwmgr,
 		const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
 {
-	hwmgr->platform_descriptor.overdriveLimit.engineClock =
+	const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
+			(const ATOM_Vega10_GFXCLK_Dependency_Table *)
+			(((unsigned long) powerplay_table) +
+			le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
+	bool is_acg_enabled = false;
+	ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;
+
+	if (gfxclk_dep_table->ucRevId == 1) {
+		patom_record_v2 =
+			(ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
+		is_acg_enabled =
+			(bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable;
+	}
+
+	if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX &&
+		!is_acg_enabled)
+		hwmgr->platform_descriptor.overdriveLimit.engineClock =
+			VEGA10_ENGINECLOCK_HARDMAX;
+	else
+		hwmgr->platform_descriptor.overdriveLimit.engineClock =
 			le32_to_cpu(powerplay_table->ulMaxODEngineClock);
 	hwmgr->platform_descriptor.overdriveLimit.memoryClock =
 			le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 23397c0..94f5c364 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1564,6 +1564,24 @@
 	    old_plane_state->crtc != new_plane_state->crtc)
 		return -EINVAL;
 
+	/*
+	 * FIXME: Since prepare_fb and cleanup_fb are always called on
+	 * the new_plane_state for async updates we need to block framebuffer
+	 * changes. This prevents use of a fb that's been cleaned up and
+	 * double cleanups from occuring.
+	 */
+	if (old_plane_state->fb != new_plane_state->fb)
+		return -EINVAL;
+
+	/*
+	 * FIXME: Since prepare_fb and cleanup_fb are always called on
+	 * the new_plane_state for async updates we need to block framebuffer
+	 * changes. This prevents use of a fb that's been cleaned up and
+	 * double cleanups from occuring.
+	 */
+	if (old_plane_state->fb != new_plane_state->fb)
+		return -EINVAL;
+
 	funcs = plane->helper_private;
 	if (!funcs->atomic_async_update)
 		return -EINVAL;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index d708472..6794d60 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -51,10 +51,6 @@
 				     int id,
 				     struct drm_dp_payload *payload);
 
-static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
-				  struct drm_dp_mst_port *port,
-				  int offset, int size, u8 *bytes);
-
 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 				     struct drm_dp_mst_branch *mstb);
 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@ -439,6 +435,7 @@
 	if (idx > raw->curlen)
 		goto fail_len;
 	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
+	idx++;
 	if (idx > raw->curlen)
 		goto fail_len;
 
@@ -1402,7 +1399,6 @@
 	return false;
 }
 
-#if 0
 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
 {
 	struct drm_dp_sideband_msg_req_body req;
@@ -1415,7 +1411,6 @@
 
 	return 0;
 }
-#endif
 
 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
 				    bool up, u8 *msg, int len)
@@ -1981,30 +1976,65 @@
 }
 EXPORT_SYMBOL(drm_dp_update_payload_part2);
 
-#if 0 /* unused as of yet */
-static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
 				 struct drm_dp_mst_port *port,
-				 int offset, int size)
+				 int offset, int size, u8 *bytes)
 {
 	int len;
+	int ret;
 	struct drm_dp_sideband_msg_tx *txmsg;
+	struct drm_dp_mst_branch *mstb;
+
+	memset(bytes, 0, size);
+
+	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+	if (!mstb)
+		return -EINVAL;
 
 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
-	if (!txmsg)
-		return -ENOMEM;
+	if (!txmsg) {
+		ret = -ENOMEM;
+		goto fail_put;
+	}
 
-	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
-	txmsg->dst = port->parent;
+	len = build_dpcd_read(txmsg, port->port_num, offset, size);
+	txmsg->dst = mstb;
 
 	drm_dp_queue_down_tx(mgr, txmsg);
+	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+	if (ret <= 0) {
+		DRM_ERROR("dpcd read failed\n");
+		goto fail_free_msg;
+	}
 
-	return 0;
+	if (txmsg->reply.reply_type == 1) {
+		DRM_ERROR("dpcd read nack received\n");
+		ret = -EINVAL;
+		goto fail_free_msg;
+	}
+
+	if (port->port_num != txmsg->reply.u.remote_dpcd_read_ack.port_number) {
+		DRM_ERROR("got incorrect port in response\n");
+		ret = -EINVAL;
+		goto fail_free_msg;
+	}
+
+	if (size > txmsg->reply.u.remote_dpcd_read_ack.num_bytes)
+		size = txmsg->reply.u.remote_dpcd_read_ack.num_bytes;
+
+	memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, size);
+
+fail_free_msg:
+	kfree(txmsg);
+fail_put:
+	drm_dp_put_mst_branch_device(mstb);
+	return ret;
 }
-#endif
+EXPORT_SYMBOL(drm_dp_send_dpcd_read);
 
-static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
-				  struct drm_dp_mst_port *port,
-				  int offset, int size, u8 *bytes)
+int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+			   struct drm_dp_mst_port *port,
+			   int offset, int size, u8 *bytes)
 {
 	int len;
 	int ret;
@@ -2038,6 +2068,7 @@
 	drm_dp_put_mst_branch_device(mstb);
 	return ret;
 }
+EXPORT_SYMBOL(drm_dp_send_dpcd_write);
 
 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
 {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index bbb8126..9acb9df 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -896,7 +896,7 @@
 	np = dev_pm_opp_get_of_node(opp);
 
 	if (np) {
-		of_property_read_u32(np, "qcom,level", &val);
+		of_property_read_u32(np, "opp-level", &val);
 		of_node_put(np);
 	}
 
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index dc85ccc..3d15cd9 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -882,6 +882,30 @@
 			0xe4);
 }
 
+static void dp_catalog_ctrl_lane_pnswap(struct dp_catalog_ctrl *ctrl,
+						u8 ln_pnswap)
+{
+	struct dp_catalog_private *catalog;
+	struct dp_io_data *io_data;
+	u32 cfg0, cfg1;
+
+	catalog = dp_catalog_get_priv(ctrl);
+
+	cfg0 = 0x0a;
+	cfg1 = 0x0a;
+
+	cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0;
+	cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2;
+	cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0;
+	cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2;
+
+	io_data = catalog->io.dp_ln_tx0;
+	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV, cfg0);
+
+	io_data = catalog->io.dp_ln_tx1;
+	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV, cfg1);
+}
+
 static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl,
 						bool enable)
 {
@@ -2509,6 +2533,7 @@
 		.state_ctrl     = dp_catalog_ctrl_state_ctrl,
 		.config_ctrl    = dp_catalog_ctrl_config_ctrl,
 		.lane_mapping   = dp_catalog_ctrl_lane_mapping,
+		.lane_pnswap    = dp_catalog_ctrl_lane_pnswap,
 		.mainlink_ctrl  = dp_catalog_ctrl_mainlink_ctrl,
 		.set_pattern    = dp_catalog_ctrl_set_pattern,
 		.reset          = dp_catalog_ctrl_reset,
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 78aec713..85ed209 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -1,6 +1,19 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+<<<<<<< HEAD
+=======
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+>>>>>>> aacf58a... drm/msm/dp: Add P/N swap support for dp phy
  */
 
 #ifndef _DP_CATALOG_H_
@@ -93,6 +106,7 @@
 	void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u8 ln_cnt);
 	void (*lane_mapping)(struct dp_catalog_ctrl *ctrl, bool flipped,
 				char *lane_map);
+	void (*lane_pnswap)(struct dp_catalog_ctrl *ctrl, u8 ln_pnswap);
 	void (*mainlink_ctrl)(struct dp_catalog_ctrl *ctrl, bool enable);
 	void (*set_pattern)(struct dp_catalog_ctrl *ctrl, u32 pattern);
 	void (*reset)(struct dp_catalog_ctrl *ctrl);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c b/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
index d5eebb4..51fa987 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
@@ -266,6 +266,30 @@
 	}
 }
 
+static void dp_catalog_ctrl_lane_pnswap_v420(struct dp_catalog_ctrl *ctrl,
+						u8 ln_pnswap)
+{
+	struct dp_catalog_private_v420 *catalog;
+	struct dp_io_data *io_data;
+	u32 cfg0, cfg1;
+
+	catalog = dp_catalog_get_priv_v420(ctrl);
+
+	cfg0 = 0x0a;
+	cfg1 = 0x0a;
+
+	cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0;
+	cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2;
+	cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0;
+	cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2;
+
+	io_data = catalog->io->dp_ln_tx0;
+	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV_V420, cfg0);
+
+	io_data = catalog->io->dp_ln_tx1;
+	dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV_V420, cfg1);
+}
+
 static void dp_catalog_put_v420(struct dp_catalog *catalog)
 {
 	struct dp_catalog_private_v420 *catalog_priv;
@@ -316,6 +340,7 @@
 	catalog->panel.config_msa  = dp_catalog_panel_config_msa_v420;
 	catalog->ctrl.phy_lane_cfg = dp_catalog_ctrl_phy_lane_cfg_v420;
 	catalog->ctrl.update_vx_px = dp_catalog_ctrl_update_vx_px_v420;
+	catalog->ctrl.lane_pnswap = dp_catalog_ctrl_lane_pnswap_v420;
 
 	/* Set the default execution mode to hardware mode */
 	dp_catalog_set_exe_mode_v420(catalog, "hw");
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 811ba98..d84417e 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -161,6 +161,8 @@
 	if (enable) {
 		ctrl->catalog->lane_mapping(ctrl->catalog, ctrl->orientation,
 						ctrl->parser->l_map);
+		ctrl->catalog->lane_pnswap(ctrl->catalog,
+						ctrl->parser->l_pnswap);
 		ctrl->catalog->mst_config(ctrl->catalog, ctrl->mst_mode);
 		ctrl->catalog->config_ctrl(ctrl->catalog,
 				ctrl->link->link_params.lane_count);
@@ -1161,7 +1163,7 @@
 
 	ctrl->mst_mode = mst_mode;
 	ctrl->fec_mode = fec_mode;
-	rate = ctrl->panel->get_optimal_link_rate(ctrl->panel);
+	rate = ctrl->panel->link_info.rate;
 
 	if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
 		pr_debug("using phy test link parameters\n");
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index e5f1b3e..e581303 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -11,7 +11,6 @@
 #include "dp_power.h"
 #include "dp_catalog.h"
 #include "dp_aux.h"
-#include "dp_ctrl.h"
 #include "dp_debug.h"
 #include "drm_connector.h"
 #include "sde_connector.h"
@@ -41,6 +40,8 @@
 	struct device *dev;
 	struct dp_debug dp_debug;
 	struct dp_parser *parser;
+	struct dp_ctrl *ctrl;
+	struct mutex lock;
 };
 
 static int dp_debug_get_edid_buf(struct dp_debug_private *debug)
@@ -90,6 +91,8 @@
 	if (!debug)
 		return -ENODEV;
 
+	mutex_lock(&debug->lock);
+
 	if (*ppos)
 		goto bail;
 
@@ -161,6 +164,7 @@
 	 */
 	pr_info("[%s]\n", edid ? "SET" : "CLEAR");
 
+	mutex_unlock(&debug->lock);
 	return rc;
 }
 
@@ -180,6 +184,8 @@
 	if (!debug)
 		return -ENODEV;
 
+	mutex_lock(&debug->lock);
+
 	if (*ppos)
 		goto bail;
 
@@ -260,6 +266,7 @@
 	} else
 		debug->aux->dpcd_updated(debug->aux);
 
+	mutex_unlock(&debug->lock);
 	return rc;
 }
 
@@ -747,7 +754,7 @@
 		const char __user *user_buff, size_t count, loff_t *ppos)
 {
 	struct dp_debug_private *debug = file->private_data;
-	char *buf;
+	char buf[SZ_32];
 	size_t len = 0;
 
 	if (!debug)
@@ -757,7 +764,9 @@
 		return 0;
 
 	len = min_t(size_t, count, SZ_32 - 1);
-	buf = memdup_user(user_buff, len);
+	if (copy_from_user(buf, user_buff, len))
+		goto end;
+
 	buf[len] = '\0';
 
 	if (sscanf(buf, "%3s", debug->exe_mode) != 1)
@@ -1437,6 +1446,7 @@
 
 		if (dp_debug_get_dpcd_buf(debug)) {
 			devm_kfree(debug->dev, debug->edid);
+			debug->edid = NULL;
 			return;
 		}
 
@@ -1444,6 +1454,9 @@
 		debug->aux->set_sim_mode(debug->aux, true,
 			debug->edid, debug->dpcd);
 	} else {
+		debug->aux->abort(debug->aux);
+		debug->ctrl->abort(debug->ctrl);
+
 		debug->aux->set_sim_mode(debug->aux, false, NULL, NULL);
 		debug->dp_debug.sim_mode = false;
 
@@ -1482,6 +1495,8 @@
 	if (*ppos)
 		return 0;
 
+	mutex_lock(&debug->lock);
+
 	/* Leave room for termination char */
 	len = min_t(size_t, count, SZ_8 - 1);
 	if (copy_from_user(buf, user_buff, len))
@@ -1494,6 +1509,7 @@
 
 	dp_debug_set_sim_mode(debug, sim);
 end:
+	mutex_unlock(&debug->lock);
 	return len;
 }
 
@@ -1941,6 +1957,14 @@
 		       DEBUG_NAME, rc);
 	}
 
+	file = debugfs_create_u32("max_lclk_khz", 0644, dir,
+			&debug->parser->max_lclk_khz);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
+		pr_err("[%s] debugfs max_lclk_khz failed, rc=%d\n",
+		       DEBUG_NAME, rc);
+	}
+
 	return 0;
 
 error_remove_dir:
@@ -1972,7 +1996,9 @@
 
 	debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
 
+	mutex_lock(&debug->lock);
 	dp_debug_set_sim_mode(debug, false);
+	mutex_unlock(&debug->lock);
 }
 
 struct dp_debug *dp_debug_get(struct dp_debug_in *in)
@@ -1981,7 +2007,8 @@
 	struct dp_debug_private *debug;
 	struct dp_debug *dp_debug;
 
-	if (!in->dev || !in->panel || !in->hpd || !in->link || !in->catalog) {
+	if (!in->dev || !in->panel || !in->hpd || !in->link ||
+	    !in->catalog || !in->ctrl) {
 		pr_err("invalid input\n");
 		rc = -EINVAL;
 		goto error;
@@ -2002,12 +2029,15 @@
 	debug->connector = in->connector;
 	debug->catalog = in->catalog;
 	debug->parser = in->parser;
+	debug->ctrl = in->ctrl;
 
 	dp_debug = &debug->dp_debug;
 	dp_debug->vdisplay = 0;
 	dp_debug->hdisplay = 0;
 	dp_debug->vrefresh = 0;
 
+	mutex_init(&debug->lock);
+
 	rc = dp_debug_init(dp_debug);
 	if (rc) {
 		devm_kfree(in->dev, debug);
@@ -2059,6 +2089,8 @@
 
 	dp_debug_deinit(dp_debug);
 
+	mutex_destroy(&debug->lock);
+
 	if (debug->edid)
 		devm_kfree(debug->dev, debug->edid);
 
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index dfbc652..11b890e 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -7,6 +7,7 @@
 #define _DP_DEBUG_H_
 
 #include "dp_panel.h"
+#include "dp_ctrl.h"
 #include "dp_link.h"
 #include "dp_usbpd.h"
 #include "dp_aux.h"
@@ -63,6 +64,7 @@
 	struct drm_connector **connector;
 	struct dp_catalog *catalog;
 	struct dp_parser *parser;
+	struct dp_ctrl *ctrl;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index f2ef730..b326a50 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -103,6 +103,8 @@
 
 	u32 tot_dsc_blks_in_use;
 
+	bool process_hpd_connect;
+
 	struct notifier_block usb_nb;
 };
 
@@ -111,11 +113,6 @@
 	{}
 };
 
-static bool dp_display_framework_ready(struct dp_display_private *dp)
-{
-	return dp->dp_display.post_open ? false : true;
-}
-
 static inline bool dp_display_is_hdcp_enabled(struct dp_display_private *dp)
 {
 	return dp->link->hdcp_status.hdcp_version && dp->hdcp.ops;
@@ -236,16 +233,64 @@
 		struct sde_hdcp_ops *ops = dev->ops;
 		void *fd = dev->fd;
 
-		if (!fd || !ops || (dp->hdcp.source_cap & dev->ver))
+		if (!fd || !ops)
 			continue;
 
-		if (ops->feature_supported(fd))
+		if (ops->set_mode && ops->set_mode(fd, dp->mst.mst_active))
+			continue;
+
+		if (!(dp->hdcp.source_cap & dev->ver) &&
+				ops->feature_supported &&
+				ops->feature_supported(fd))
 			dp->hdcp.source_cap |= dev->ver;
 	}
 
 	dp_display_update_hdcp_status(dp, false);
 }
 
+static void dp_display_hdcp_register_streams(struct dp_display_private *dp)
+{
+	int rc;
+	size_t i;
+	struct sde_hdcp_ops *ops = dp->hdcp.ops;
+	void *data = dp->hdcp.data;
+
+	if (dp_display_is_ready(dp) && dp->mst.mst_active && ops &&
+			ops->register_streams){
+		struct stream_info streams[DP_STREAM_MAX];
+		int index = 0;
+
+		pr_debug("Registering all active panel streams with HDCP\n");
+		for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
+			if (!dp->active_panels[i])
+				continue;
+			streams[index].stream_id = i;
+			streams[index].virtual_channel =
+				dp->active_panels[i]->vcpi;
+			index++;
+		}
+
+		if (index > 0) {
+			rc = ops->register_streams(data, index, streams);
+			if (rc)
+				pr_err("failed to register streams. rc = %d\n",
+					rc);
+		}
+	}
+}
+
+static void dp_display_hdcp_deregister_stream(struct dp_display_private *dp,
+		enum dp_stream_id stream_id)
+{
+	if (dp->hdcp.ops->deregister_streams) {
+		struct stream_info stream = {stream_id,
+				dp->active_panels[stream_id]->vcpi};
+
+		pr_debug("Deregistering stream within HDCP library\n");
+		dp->hdcp.ops->deregister_streams(dp->hdcp.data, 1, &stream);
+	}
+}
+
 static void dp_display_hdcp_cb_work(struct work_struct *work)
 {
 	struct dp_display_private *dp;
@@ -255,12 +300,21 @@
 	void *data;
 	int rc = 0;
 	u32 hdcp_auth_state;
+	u8 sink_status = 0;
 
 	dp = container_of(dw, struct dp_display_private, hdcp_cb_work);
 
 	if (!dp->power_on || !dp->is_connected || atomic_read(&dp->aborted))
 		return;
 
+	drm_dp_dpcd_readb(dp->aux->drm_aux, DP_SINK_STATUS, &sink_status);
+	sink_status &= (DP_RECEIVE_PORT_0_STATUS | DP_RECEIVE_PORT_1_STATUS);
+	if (sink_status < 1) {
+		pr_debug("Sink not synchronized. Queuing again then exiting\n");
+		queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
+		return;
+	}
+
 	status = &dp->link->hdcp_status;
 
 	if (status->hdcp_state == HDCP_STATE_INACTIVE) {
@@ -268,6 +322,11 @@
 		dp_display_update_hdcp_info(dp);
 
 		if (dp_display_is_hdcp_enabled(dp)) {
+			if (dp->hdcp.ops && dp->hdcp.ops->on &&
+					dp->hdcp.ops->on(dp->hdcp.data)) {
+				dp_display_update_hdcp_status(dp, true);
+				return;
+			}
 			status->hdcp_state = HDCP_STATE_AUTHENTICATING;
 		} else {
 			dp_display_update_hdcp_status(dp, true);
@@ -294,11 +353,18 @@
 
 	switch (status->hdcp_state) {
 	case HDCP_STATE_AUTHENTICATING:
+		dp_display_hdcp_register_streams(dp);
 		if (dp->hdcp.ops && dp->hdcp.ops->authenticate)
 			rc = dp->hdcp.ops->authenticate(data);
 		break;
 	case HDCP_STATE_AUTH_FAIL:
 		if (dp_display_is_ready(dp) && dp->power_on) {
+			if (ops && ops->on && ops->on(data)) {
+				dp_display_update_hdcp_status(dp, true);
+				return;
+			}
+			dp_display_hdcp_register_streams(dp);
+			status->hdcp_state = HDCP_STATE_AUTHENTICATING;
 			if (ops && ops->reauthenticate) {
 				rc = ops->reauthenticate(data);
 				if (rc)
@@ -309,6 +375,7 @@
 		}
 		break;
 	default:
+		dp_display_hdcp_register_streams(dp);
 		break;
 	}
 }
@@ -502,36 +569,6 @@
 			envp);
 }
 
-static void dp_display_post_open(struct dp_display *dp_display)
-{
-	struct drm_connector *connector;
-	struct dp_display_private *dp;
-
-	if (!dp_display) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-	if (IS_ERR_OR_NULL(dp)) {
-		pr_err("invalid params\n");
-		return;
-	}
-
-	connector = dp->dp_display.base_connector;
-
-	if (!connector) {
-		pr_err("base connector not set\n");
-		return;
-	}
-
-	/* if cable is already connected, send notification */
-	if (dp->hpd->hpd_high)
-		queue_work(dp->wq, &dp->connect_work);
-	else
-		dp_display->post_open = NULL;
-}
-
 static int dp_display_send_hpd_notification(struct dp_display_private *dp)
 {
 	int ret = 0;
@@ -541,6 +578,8 @@
 
 	if (!dp->mst.mst_active)
 		dp->dp_display.is_sst_connected = hpd;
+	else
+		dp->dp_display.is_sst_connected = false;
 
 	reinit_completion(&dp->notification_comp);
 	dp_display_send_hpd_event(dp);
@@ -551,9 +590,6 @@
 	if (!dp->mst.mst_active && (dp->power_on == hpd))
 		goto skip_wait;
 
-	if (!dp_display_framework_ready(dp))
-		goto skip_wait;
-
 	if (!wait_for_completion_timeout(&dp->notification_comp,
 						HZ * 5)) {
 		pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
@@ -571,30 +607,47 @@
 	dp->panel->mst_state = state;
 }
 
-static void dp_display_process_mst_hpd_high(struct dp_display_private *dp)
+static void dp_display_process_mst_hpd_high(struct dp_display_private *dp,
+						bool mst_probe)
 {
 	bool is_mst_receiver;
 	struct dp_mst_hpd_info info;
+	int ret;
 
-	if (dp->parser->has_mst && dp->mst.drm_registered) {
-		DP_MST_DEBUG("mst_hpd_high work\n");
+	if (!dp->parser->has_mst || !dp->mst.drm_registered) {
+		DP_MST_DEBUG("mst not enabled. has_mst:%d, registered:%d\n",
+				dp->parser->has_mst, dp->mst.drm_registered);
+		return;
+	}
 
+	DP_MST_DEBUG("mst_hpd_high work. mst_probe:%d\n", mst_probe);
+
+	if (!dp->mst.mst_active) {
 		is_mst_receiver = dp->panel->read_mst_cap(dp->panel);
 
-		if (is_mst_receiver && !dp->mst.mst_active) {
-
-			/* clear sink mst state */
-			drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0);
-
-			dp_display_update_mst_state(dp, true);
-
-			info.mst_protocol = dp->parser->has_mst_sideband;
-			info.mst_port_cnt = dp->debug->mst_port_cnt;
-			info.edid = dp->debug->get_edid(dp->debug);
-
-			if (dp->mst.cbs.hpd)
-				dp->mst.cbs.hpd(&dp->dp_display, true, &info);
+		if (!is_mst_receiver) {
+			DP_MST_DEBUG("sink doesn't support mst\n");
+			return;
 		}
+
+		/* clear sink mst state */
+		drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0);
+
+		ret = drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL,
+				 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+		if (ret < 0) {
+			pr_err("sink mst enablement failed\n");
+			return;
+		}
+
+		dp_display_update_mst_state(dp, true);
+	} else if (dp->mst.mst_active && mst_probe) {
+		info.mst_protocol = dp->parser->has_mst_sideband;
+		info.mst_port_cnt = dp->debug->mst_port_cnt;
+		info.edid = dp->debug->get_edid(dp->debug);
+
+		if (dp->mst.cbs.hpd)
+			dp->mst.cbs.hpd(&dp->dp_display, true, &info);
 	}
 
 	DP_MST_DEBUG("mst_hpd_high. mst_active:%d\n", dp->mst.mst_active);
@@ -648,7 +701,16 @@
 
 static int dp_display_process_hpd_high(struct dp_display_private *dp)
 {
-	int rc = 0;
+	int rc = -EINVAL;
+
+	mutex_lock(&dp->session_lock);
+
+	if (dp->is_connected) {
+		pr_debug("dp already connected, skipping hpd high\n");
+		mutex_unlock(&dp->session_lock);
+		rc = -EISCONN;
+		goto end;
+	}
 
 	dp->is_connected = true;
 
@@ -671,25 +733,32 @@
 	 * ETIMEDOUT --> cable may have been removed
 	 * ENOTCONN --> no downstream device connected
 	 */
-	if (rc == -ETIMEDOUT || rc == -ENOTCONN)
+	if (rc == -ETIMEDOUT || rc == -ENOTCONN) {
+		dp->is_connected = false;
 		goto end;
+	}
 
 	dp->link->process_request(dp->link);
 	dp->panel->handle_sink_request(dp->panel);
 
-	dp_display_process_mst_hpd_high(dp);
+	dp_display_process_mst_hpd_high(dp, false);
 
-	mutex_lock(&dp->session_lock);
 	rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active,
 				dp->panel->fec_en, false);
 	if (rc) {
-		mutex_unlock(&dp->session_lock);
+		dp->is_connected = false;
 		goto end;
 	}
+
+	dp->process_hpd_connect = false;
+
+	dp_display_process_mst_hpd_high(dp, true);
+end:
 	mutex_unlock(&dp->session_lock);
 
-	dp_display_send_hpd_notification(dp);
-end:
+	if (!rc)
+		dp_display_send_hpd_notification(dp);
+
 	return rc;
 }
 
@@ -715,6 +784,7 @@
 	int rc = 0;
 
 	dp->is_connected = false;
+	dp->process_hpd_connect = false;
 
 	dp_display_process_mst_hpd_low(dp);
 
@@ -755,11 +825,15 @@
 			goto end;
 	}
 
+	mutex_lock(&dp->session_lock);
 	dp_display_host_init(dp);
 
 	/* check for hpd high */
 	if (dp->hpd->hpd_high)
 		queue_work(dp->wq, &dp->connect_work);
+	else
+		dp->process_hpd_connect = true;
+	mutex_unlock(&dp->session_lock);
 end:
 	return rc;
 }
@@ -793,8 +867,10 @@
 {
 	int idx;
 	struct dp_panel *dp_panel;
+	struct dp_link_hdcp_status *status = &dp->link->hdcp_status;
 
-	if (dp_display_is_hdcp_enabled(dp)) {
+	if (dp_display_is_hdcp_enabled(dp) &&
+			status->hdcp_state != HDCP_STATE_INACTIVE) {
 		cancel_delayed_work_sync(&dp->hdcp_cb_work);
 		if (dp->hdcp.ops->off)
 			dp->hdcp.ops->off(dp->hdcp.data);
@@ -878,18 +954,12 @@
 		goto end;
 	}
 
-	/*
-	 * In case cable/dongle is disconnected during adb shell stop,
-	 * reset psm_enabled flag to false since it is no more needed
-	 */
-	if (dp->dp_display.post_open)
-		dp->debug->psm_enabled = false;
-
-	if (dp->debug->psm_enabled)
+	mutex_lock(&dp->session_lock);
+	if (dp->debug->psm_enabled && dp->core_initialized)
 		dp->link->psm_config(dp->link, &dp->panel->link_info, true);
+	mutex_unlock(&dp->session_lock);
 
 	dp_display_disconnect_sync(dp);
-	dp->dp_display.post_open = NULL;
 
 	if (!dp->debug->sim_mode && !dp->parser->no_aux_switch
 	    && !dp->parser->gpio_aux_switch)
@@ -936,11 +1006,19 @@
 	struct dp_display_private *dp = container_of(work,
 			struct dp_display_private, attention_work);
 
-	if (dp->debug->mst_hpd_sim)
-		goto mst_attention;
+	mutex_lock(&dp->session_lock);
 
-	if (dp->link->process_request(dp->link))
+	if (dp->debug->mst_hpd_sim || !dp->core_initialized) {
+		mutex_unlock(&dp->session_lock);
+		goto mst_attention;
+	}
+
+	if (dp->link->process_request(dp->link)) {
+		mutex_unlock(&dp->session_lock);
 		goto cp_irq;
+	}
+
+	mutex_unlock(&dp->session_lock);
 
 	if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) {
 		if (dp_display_is_sink_count_zero(dp)) {
@@ -997,16 +1075,16 @@
 		return -ENODEV;
 	}
 
-	pr_debug("hpd_irq:%d, hpd_high:%d, power_on:%d\n",
+	pr_debug("hpd_irq:%d, hpd_high:%d, power_on:%d, is_connected:%d\n",
 			dp->hpd->hpd_irq, dp->hpd->hpd_high,
-			dp->power_on);
+			dp->power_on, dp->is_connected);
 
 	if (!dp->hpd->hpd_high)
 		dp_display_disconnect_sync(dp);
 	else if ((dp->hpd->hpd_irq && dp->core_initialized) ||
 			dp->debug->mst_hpd_sim)
 		queue_work(dp->wq, &dp->attention_work);
-	else if (!dp->power_on)
+	else if (dp->process_hpd_connect || !dp->is_connected)
 		queue_work(dp->wq, &dp->connect_work);
 	else
 		pr_debug("ignored\n");
@@ -1228,6 +1306,7 @@
 	debug_in.connector = &dp->dp_display.base_connector;
 	debug_in.catalog = dp->catalog;
 	debug_in.parser = dp->parser;
+	debug_in.ctrl = dp->ctrl;
 
 	dp->debug = dp_debug_get(&debug_in);
 	if (IS_ERR(dp->debug)) {
@@ -1310,7 +1389,6 @@
 	const u32 num_components = 3, default_bpp = 24;
 	struct dp_display_private *dp;
 	struct dp_panel *dp_panel;
-	u32 rc;
 
 	if (!dp_display || !panel) {
 		pr_err("invalid input\n");
@@ -1335,14 +1413,7 @@
 			mode->timing.bpp, mode->timing.pixel_clk_khz);
 
 	dp_panel->pinfo = mode->timing;
-	rc = dp_panel->init(dp_panel);
-
-	if (rc == -EAGAIN) {
-		dp->ctrl->off(dp->ctrl);
-		dp->ctrl->on(dp->ctrl, dp->mst.mst_active,
-			dp->panel->fec_en, false);
-	}
-
+	dp_panel->init(dp_panel);
 	mutex_unlock(&dp->session_lock);
 
 	return 0;
@@ -1407,7 +1478,7 @@
 
 static int dp_display_set_stream_info(struct dp_display *dp_display,
 			void *panel, u32 strm_id, u32 start_slot,
-			u32 num_slots, u32 pbn)
+			u32 num_slots, u32 pbn, int vcpi)
 {
 	int rc = 0;
 	struct dp_panel *dp_panel;
@@ -1440,7 +1511,7 @@
 	if (panel) {
 		dp_panel = panel;
 		dp_panel->set_stream_info(dp_panel, strm_id, start_slot,
-				num_slots, pbn);
+				num_slots, pbn, vcpi);
 	}
 
 	mutex_unlock(&dp->session_lock);
@@ -1547,8 +1618,6 @@
 	cancel_delayed_work_sync(&dp->hdcp_cb_work);
 	queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
 end:
-	/* clear framework event notifier */
-	dp_display->post_open = NULL;
 	dp->aux->state |= DP_STATE_CTRL_POWERED_ON;
 
 	complete_all(&dp->notification_comp);
@@ -1560,7 +1629,9 @@
 {
 	struct dp_display_private *dp;
 	struct dp_panel *dp_panel = panel;
+	struct dp_link_hdcp_status *status;
 	int rc = 0;
+	size_t i;
 
 	if (!dp_display || !panel) {
 		pr_err("invalid input\n");
@@ -1571,19 +1642,35 @@
 
 	mutex_lock(&dp->session_lock);
 
+	status = &dp->link->hdcp_status;
+
 	if (!dp->power_on) {
 		pr_debug("stream already powered off, return\n");
 		goto end;
 	}
 
-	if (dp_display_is_hdcp_enabled(dp)) {
-		cancel_delayed_work_sync(&dp->hdcp_cb_work);
+	if (dp_display_is_hdcp_enabled(dp) &&
+			status->hdcp_state != HDCP_STATE_INACTIVE) {
+		flush_delayed_work(&dp->hdcp_cb_work);
+		if (dp->mst.mst_active) {
+			dp_display_hdcp_deregister_stream(dp,
+				dp_panel->stream_id);
+			for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
+				if (i != dp_panel->stream_id &&
+						dp->active_panels[i]) {
+					pr_debug("Streams are still active. Skip disabling HDCP\n");
+					goto stream;
+				}
+			}
+		}
+
 		if (dp->hdcp.ops->off)
 			dp->hdcp.ops->off(dp->hdcp.data);
 
 		dp_display_update_hdcp_status(dp, true);
 	}
 
+stream:
 	if (dp_panel->audio_supported)
 		dp_panel->audio->off(dp_panel->audio);
 
@@ -1697,14 +1784,6 @@
 		dp->link->psm_config(dp->link, &dp->panel->link_info, true);
 		dp->debug->psm_enabled = true;
 
-		/*
-		 * In case of framework reboot, the DP off sequence is executed
-		 * without any notification from driver. Initialize post_open
-		 * callback to notify DP connection once framework restarts.
-		 */
-		dp_display->post_open = dp_display_post_open;
-		dp->dp_display.is_sst_connected = false;
-
 		dp->ctrl->off(dp->ctrl);
 		dp_display_host_deinit(dp);
 	}
@@ -2267,6 +2346,77 @@
 	return 0;
 }
 
+static int dp_display_mst_connector_update_link_info(
+			struct dp_display *dp_display,
+			struct drm_connector *connector)
+{
+	int rc = 0;
+	struct sde_connector *sde_conn;
+	struct dp_panel *dp_panel;
+	struct dp_display_private *dp;
+
+	if (!dp_display || !connector) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	if (!dp->mst.drm_registered) {
+		pr_debug("drm mst not registered\n");
+		return -EPERM;
+	}
+
+	sde_conn = to_sde_connector(connector);
+	if (!sde_conn->drv_panel) {
+		pr_err("invalid panel for connector:%d\n", connector->base.id);
+		return -EINVAL;
+	}
+
+	dp_panel = sde_conn->drv_panel;
+
+	memcpy(dp_panel->dpcd, dp->panel->dpcd,
+			DP_RECEIVER_CAP_SIZE + 1);
+	memcpy(dp_panel->dsc_dpcd, dp->panel->dsc_dpcd,
+			DP_RECEIVER_DSC_CAP_SIZE + 1);
+	memcpy(&dp_panel->link_info, &dp->panel->link_info,
+			sizeof(dp_panel->link_info));
+
+	DP_MST_DEBUG("dp mst connector:%d link info updated\n");
+
+	return rc;
+}
+
+static int dp_display_mst_get_fixed_topology_port(
+			struct dp_display *dp_display,
+			u32 strm_id, u32 *port_num)
+{
+	struct dp_display_private *dp;
+	u32 port;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (strm_id >= DP_STREAM_MAX) {
+		pr_err("invalid stream id:%d\n", strm_id);
+		return -EINVAL;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	port = dp->parser->mst_fixed_port[strm_id];
+
+	if (!port || port > 255)
+		return -ENOENT;
+
+	if (port_num)
+		*port_num = port;
+
+	return 0;
+}
+
 static int dp_display_get_mst_caps(struct dp_display *dp_display,
 			struct dp_mst_caps *mst_caps)
 {
@@ -2340,7 +2490,7 @@
 	g_dp_display->unprepare     = dp_display_unprepare;
 	g_dp_display->request_irq   = dp_request_irq;
 	g_dp_display->get_debug     = dp_get_debug;
-	g_dp_display->post_open     = dp_display_post_open;
+	g_dp_display->post_open     = NULL;
 	g_dp_display->post_init     = dp_display_post_init;
 	g_dp_display->config_hdr    = dp_display_config_hdr;
 	g_dp_display->mst_install   = dp_display_mst_install;
@@ -2350,12 +2500,16 @@
 					dp_display_mst_connector_uninstall;
 	g_dp_display->mst_connector_update_edid =
 					dp_display_mst_connector_update_edid;
+	g_dp_display->mst_connector_update_link_info =
+				dp_display_mst_connector_update_link_info;
 	g_dp_display->get_mst_caps = dp_display_get_mst_caps;
 	g_dp_display->set_stream_info = dp_display_set_stream_info;
 	g_dp_display->update_pps = dp_display_update_pps;
 	g_dp_display->convert_to_dp_mode = dp_display_convert_to_dp_mode;
 	g_dp_display->mst_get_connector_info =
 					dp_display_mst_get_connector_info;
+	g_dp_display->mst_get_fixed_topology_port =
+					dp_display_mst_get_fixed_topology_port;
 
 	rc = component_add(&pdev->dev, &dp_display_comp_ops);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 410cee7..fe332af 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -100,13 +100,18 @@
 	int (*mst_connector_update_edid)(struct dp_display *dp_display,
 			struct drm_connector *connector,
 			struct edid *edid);
+	int (*mst_connector_update_link_info)(struct dp_display *dp_display,
+			struct drm_connector *connector);
 	int (*mst_get_connector_info)(struct dp_display *dp_display,
 			struct drm_connector *connector,
 			struct dp_mst_connector *mst_conn);
+	int (*mst_get_fixed_topology_port)(struct dp_display *dp_display,
+			u32 strm_id, u32 *port_num);
 	int (*get_mst_caps)(struct dp_display *dp_display,
 			struct dp_mst_caps *mst_caps);
 	int (*set_stream_info)(struct dp_display *dp_display, void *panel,
-			u32 strm_id, u32 start_slot, u32 num_slots, u32 pbn);
+			u32 strm_id, u32 start_slot, u32 num_slots, u32 pbn,
+			int vcpi);
 	void (*convert_to_dp_mode)(struct dp_display *dp_display, void *panel,
 			const struct drm_display_mode *drm_mode,
 			struct dp_display_mode *dp_mode);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 9b3bb24..b3b116a 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -114,7 +114,7 @@
 	}
 
 	/* for SST force stream id, start slot and total slots to 0 */
-	dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0, 0);
+	dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0, 0, 0);
 
 	rc = dp->enable(dp, bridge->dp_panel);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
index 3dd0fa1..f71c25e 100644
--- a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
+++ b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
@@ -51,7 +51,6 @@
 	u8 rx_status;
 	char abort_mask;
 
-	bool cp_irq_done;
 	bool polling;
 };
 
@@ -66,6 +65,25 @@
 	struct dp_hdcp2p2_int_set *int_set;
 };
 
+static inline int dp_hdcp2p2_valid_handle(struct dp_hdcp2p2_ctrl *ctrl)
+{
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (!ctrl->lib_ctx) {
+		pr_err("HDCP library needs to be acquired\n");
+		return -EINVAL;
+	}
+
+	if (!ctrl->lib) {
+		pr_err("invalid lib ops data\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static inline bool dp_hdcp2p2_is_valid_state(struct dp_hdcp2p2_ctrl *ctrl)
 {
 	enum hdcp_transport_wakeup_cmd cmd;
@@ -174,6 +192,7 @@
 	if (dp_hdcp2p2_copy_buf(ctrl, data))
 		goto exit;
 
+	ctrl->polling = false;
 	switch (data->cmd) {
 	case HDCP_TRANSPORT_CMD_STATUS_SUCCESS:
 		atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED);
@@ -216,38 +235,77 @@
 	atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
 }
 
+static int dp_hdcp2p2_register(void *input, bool mst_enabled)
+{
+	int rc;
+	enum sde_hdcp_2x_device_type device_type;
+	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
+
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return rc;
+
+	if (mst_enabled)
+		device_type = HDCP_TXMTR_DP_MST;
+	else
+		device_type = HDCP_TXMTR_DP;
+
+	return sde_hdcp_2x_enable(ctrl->lib_ctx, device_type);
+}
+
+static int dp_hdcp2p2_on(void *input)
+{
+	int rc = 0;
+	struct dp_hdcp2p2_ctrl *ctrl = input;
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
+
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return rc;
+
+	cdata.cmd = HDCP_2X_CMD_START;
+	cdata.context = ctrl->lib_ctx;
+	rc = ctrl->lib->wakeup(&cdata);
+	if (rc)
+		pr_err("Unable to start the HDCP 2.2 library (%d)\n", rc);
+
+	return rc;
+}
+
 static void dp_hdcp2p2_off(void *input)
 {
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
-	struct hdcp_transport_wakeup_data cdata = {
-					HDCP_TRANSPORT_CMD_AUTHENTICATE};
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
 
-	if (!ctrl) {
-		pr_err("invalid input\n");
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
 		return;
-	}
 
-	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
-		pr_err("hdcp is off\n");
-		return;
+	if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) {
+		cdata.cmd = HDCP_2X_CMD_STOP;
+		cdata.context = ctrl->lib_ctx;
+		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
 	}
 
 	dp_hdcp2p2_set_interrupts(ctrl, false);
 
 	dp_hdcp2p2_reset(ctrl);
 
-	cdata.context = input;
-	dp_hdcp2p2_wakeup(&cdata);
-
 	kthread_park(ctrl->thread);
+
+	sde_hdcp_2x_disable(ctrl->lib_ctx);
 }
 
 static int dp_hdcp2p2_authenticate(void *input)
 {
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = input;
 	struct hdcp_transport_wakeup_data cdata = {
 					HDCP_TRANSPORT_CMD_AUTHENTICATE};
-	int rc = 0;
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return rc;
 
 	dp_hdcp2p2_set_interrupts(ctrl, true);
 
@@ -370,44 +428,34 @@
 
 static bool dp_hdcp2p2_feature_supported(void *input)
 {
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = input;
 	struct sde_hdcp_2x_ops *lib = NULL;
 	bool supported = false;
 
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		goto end;
-	}
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return supported;
 
 	lib = ctrl->lib;
-	if (!lib) {
-		pr_err("invalid lib ops data\n");
-		goto end;
-	}
-
 	if (lib->feature_supported)
 		supported = lib->feature_supported(
 			ctrl->lib_ctx);
-end:
+
 	return supported;
 }
 
 static void dp_hdcp2p2_force_encryption(void *data, bool enable)
 {
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = data;
 	struct sde_hdcp_2x_ops *lib = NULL;
 
-	if (!ctrl) {
-		pr_err("invalid input\n");
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
 		return;
-	}
 
 	lib = ctrl->lib;
-	if (!lib) {
-		pr_err("invalid lib ops data\n");
-		return;
-	}
-
 	if (lib->force_encryption)
 		lib->force_encryption(ctrl->lib_ctx, enable);
 }
@@ -493,26 +541,12 @@
 		return;
 	}
 
-	if (ctrl->rx_status) {
-		if (!ctrl->cp_irq_done) {
-			pr_debug("waiting for CP_IRQ\n");
-			ctrl->polling = true;
-			return;
-		}
-
-		if (ctrl->rx_status & ctrl->sink_rx_status) {
-			ctrl->cp_irq_done = false;
-			ctrl->sink_rx_status = 0;
-			ctrl->rx_status = 0;
-		}
-	}
-
 	dp_hdcp2p2_get_msg_from_sink(ctrl);
 }
 
 static void dp_hdcp2p2_link_check(struct dp_hdcp2p2_ctrl *ctrl)
 {
-	int rc = 0;
+	int rc = 0, retries = 10;
 	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
 
 	if (!ctrl) {
@@ -545,6 +579,11 @@
 		goto exit;
 	}
 
+	/* wait for polling to start till spec allowed timeout */
+	while (!ctrl->polling && retries--)
+		msleep(20);
+
+	/* check if sink has made a message available */
 	if (ctrl->polling && (ctrl->sink_rx_status & ctrl->rx_status)) {
 		ctrl->sink_rx_status = 0;
 		ctrl->rx_status = 0;
@@ -552,26 +591,19 @@
 		dp_hdcp2p2_get_msg_from_sink(ctrl);
 
 		ctrl->polling = false;
-	} else {
-		ctrl->cp_irq_done = true;
 	}
 exit:
 	if (rc)
 		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
 }
 
-static void dp_hdcp2p2_manage_session(struct dp_hdcp2p2_ctrl *ctrl)
+static void dp_hdcp2p2_start_auth(struct dp_hdcp2p2_ctrl *ctrl)
 {
-	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
-
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_START_AUTH};
 	cdata.context = ctrl->lib_ctx;
 
 	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING)
-		cdata.cmd = HDCP_2X_CMD_START;
-	else
-		cdata.cmd = HDCP_2X_CMD_STOP;
-
-	dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
 }
 
 static int dp_hdcp2p2_read_rx_status(struct dp_hdcp2p2_ctrl *ctrl,
@@ -617,34 +649,31 @@
 
 static int dp_hdcp2p2_cp_irq(void *input)
 {
-	int rc = 0;
+	int rc;
 	struct dp_hdcp2p2_ctrl *ctrl = input;
 
-	if (!ctrl) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
+	rc = dp_hdcp2p2_valid_handle(ctrl);
+	if (rc)
+		return rc;
 
 	if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL ||
 		atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
 		pr_err("invalid hdcp state\n");
-		rc = -EINVAL;
-		goto error;
+		return -EINVAL;
 	}
 
 	ctrl->sink_rx_status = 0;
 	rc = dp_hdcp2p2_read_rx_status(ctrl, &ctrl->sink_rx_status);
 	if (rc) {
 		pr_err("failed to read rx status\n");
-		goto error;
+		return rc;
 	}
 
 	pr_debug("sink_rx_status=0x%x\n", ctrl->sink_rx_status);
 
 	if (!ctrl->sink_rx_status) {
 		pr_debug("not a hdcp 2.2 irq\n");
-		rc = -EINVAL;
-		goto error;
+		return -EINVAL;
 	}
 
 
@@ -652,8 +681,6 @@
 	wake_up(&ctrl->wait_q);
 
 	return 0;
-error:
-	return rc;
 }
 
 static int dp_hdcp2p2_isr(void *input)
@@ -721,6 +748,51 @@
 	return false;
 }
 
+static int dp_hdcp2p2_change_streams(struct dp_hdcp2p2_ctrl *ctrl,
+		struct sde_hdcp_2x_wakeup_data *cdata)
+{
+	if (!ctrl || cdata->num_streams == 0 || !cdata->streams) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (!ctrl->lib_ctx) {
+		pr_err("HDCP library needs to be acquired\n");
+		return -EINVAL;
+	}
+
+	if (!ctrl->lib) {
+		pr_err("invalid lib ops data\n");
+		return -EINVAL;
+	}
+
+	cdata->context = ctrl->lib_ctx;
+	return ctrl->lib->wakeup(cdata);
+}
+
+
+static int dp_hdcp2p2_register_streams(void *input, u8 num_streams,
+			struct stream_info *streams)
+{
+	struct dp_hdcp2p2_ctrl *ctrl = input;
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_OPEN_STREAMS};
+
+	cdata.streams = streams;
+	cdata.num_streams = num_streams;
+	return dp_hdcp2p2_change_streams(ctrl, &cdata);
+}
+
+static int dp_hdcp2p2_deregister_streams(void *input, u8 num_streams,
+			struct stream_info *streams)
+{
+	struct dp_hdcp2p2_ctrl *ctrl = input;
+	struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_CLOSE_STREAMS};
+
+	cdata.streams = streams;
+	cdata.num_streams = num_streams;
+	return dp_hdcp2p2_change_streams(ctrl, &cdata);
+}
+
 void sde_dp_hdcp2p2_deinit(void *input)
 {
 	struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
@@ -731,9 +803,13 @@
 		return;
 	}
 
-	cdata.cmd = HDCP_2X_CMD_STOP;
-	cdata.context = ctrl->lib_ctx;
-	dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+	if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) {
+		cdata.cmd = HDCP_2X_CMD_STOP;
+		cdata.context = ctrl->lib_ctx;
+		dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+	}
+
+	sde_hdcp_2x_deregister(ctrl->lib_ctx);
 
 	kthread_stop(ctrl->thread);
 
@@ -769,7 +845,10 @@
 			dp_hdcp2p2_send_msg(ctrl);
 			break;
 		case HDCP_TRANSPORT_CMD_RECV_MESSAGE:
-			dp_hdcp2p2_recv_msg(ctrl);
+			if (ctrl->rx_status)
+				ctrl->polling = true;
+			else
+				dp_hdcp2p2_recv_msg(ctrl);
 			break;
 		case HDCP_TRANSPORT_CMD_STATUS_SUCCESS:
 			dp_hdcp2p2_send_auth_status(ctrl);
@@ -779,16 +858,13 @@
 			dp_hdcp2p2_send_auth_status(ctrl);
 			break;
 		case HDCP_TRANSPORT_CMD_LINK_POLL:
-			if (ctrl->cp_irq_done)
-				dp_hdcp2p2_recv_msg(ctrl);
-			else
-				ctrl->polling = true;
+			ctrl->polling = true;
 			break;
 		case HDCP_TRANSPORT_CMD_LINK_CHECK:
 			dp_hdcp2p2_link_check(ctrl);
 			break;
 		case HDCP_TRANSPORT_CMD_AUTHENTICATE:
-			dp_hdcp2p2_manage_session(ctrl);
+			dp_hdcp2p2_start_auth(ctrl);
 			break;
 		default:
 			break;
@@ -809,8 +885,12 @@
 		.feature_supported = dp_hdcp2p2_feature_supported,
 		.force_encryption = dp_hdcp2p2_force_encryption,
 		.sink_support = dp_hdcp2p2_supported,
+		.set_mode = dp_hdcp2p2_register,
+		.on = dp_hdcp2p2_on,
 		.off = dp_hdcp2p2_off,
 		.cp_irq = dp_hdcp2p2_cp_irq,
+		.register_streams = dp_hdcp2p2_register_streams,
+		.deregister_streams = dp_hdcp2p2_deregister_streams,
 	};
 
 	static struct hdcp_transport_ops client_ops = {
@@ -865,7 +945,6 @@
 	register_data.hdcp_data = &ctrl->lib_ctx;
 	register_data.client_ops = &client_ops;
 	register_data.ops = &hdcp2x_ops;
-	register_data.device_type = HDCP_TXMTR_DP;
 	register_data.client_data = ctrl;
 
 	rc = sde_hdcp_2x_register(&register_data);
diff --git a/drivers/gpu/drm/msm/dp/dp_mst_drm.c b/drivers/gpu/drm/msm/dp/dp_mst_drm.c
index f528485..508c6dc 100644
--- a/drivers/gpu/drm/msm/dp/dp_mst_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_mst_drm.c
@@ -21,8 +21,8 @@
 #include "dp_drm.h"
 
 #define DP_MST_DEBUG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
+#define DP_MST_INFO_LOG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
 
-#define MAX_DP_MST_STREAMS		2
 #define MAX_DP_MST_DRM_ENCODERS		2
 #define MAX_DP_MST_DRM_BRIDGES		2
 #define HPD_STRING_SIZE			30
@@ -93,12 +93,18 @@
 	struct drm_display_mode drm_mode;
 	struct dp_display_mode dp_mode;
 	struct drm_connector *connector;
+	struct drm_connector *old_connector;
 	void *dp_panel;
+	void *old_dp_panel;
 
 	int vcpi;
 	int pbn;
 	int num_slots;
 	int start_slot;
+
+	u32 fixed_port_num;
+	bool fixed_port_added;
+	struct drm_connector *fixed_connector;
 };
 
 struct dp_mst_private {
@@ -111,6 +117,7 @@
 	struct dp_mst_sim_mode simulator;
 	struct mutex mst_lock;
 	enum dp_drv_state state;
+	bool mst_session_state;
 };
 
 struct dp_mst_encoder_info_cache {
@@ -167,10 +174,13 @@
 			mutex_lock(&mstb->mgr->lock);
 			list_del(&port->next);
 			mutex_unlock(&mstb->mgr->lock);
-			return;
+			goto put_port;
 		}
 		(*mstb->mgr->cbs->register_connector)(port->connector);
 	}
+
+put_port:
+	kref_put(&port->kref, NULL);
 }
 
 static void dp_mst_sim_link_probe_work(struct work_struct *work)
@@ -525,7 +535,8 @@
 
 		mst->dp_display->set_stream_info(mst->dp_display,
 				dp_bridge->dp_panel,
-				dp_bridge->id, start_slot, num_slots, pbn);
+				dp_bridge->id, start_slot, num_slots, pbn,
+				dp_bridge->vcpi);
 
 		pr_info("bridge:%d vcpi:%d start_slot:%d num_slots:%d, pbn:%d\n",
 			dp_bridge->id, dp_bridge->vcpi,
@@ -550,7 +561,8 @@
 
 		mst->dp_display->set_stream_info(mst->dp_display,
 				mst_bridge->dp_panel,
-				mst_bridge->id, start_slot, num_slots, pbn);
+				mst_bridge->id, start_slot, num_slots, pbn,
+				mst_bridge->vcpi);
 	}
 }
 
@@ -672,8 +684,6 @@
 	struct dp_display *dp;
 	struct dp_mst_private *mst;
 
-	DP_MST_DEBUG("enter\n");
-
 	if (!drm_bridge) {
 		pr_err("Invalid params\n");
 		return;
@@ -682,6 +692,9 @@
 	bridge = to_dp_mst_bridge(drm_bridge);
 	dp = bridge->display;
 
+	bridge->old_connector = NULL;
+	bridge->old_dp_panel = NULL;
+
 	if (!bridge->connector) {
 		pr_err("Invalid connector\n");
 		return;
@@ -718,7 +731,14 @@
 		_dp_mst_bridge_pre_enable_part2(bridge);
 	}
 
-	DP_MST_DEBUG("mst bridge [%d] pre enable complete\n", bridge->id);
+	DP_MST_INFO_LOG("mode: id(%d) mode(%s), refresh(%d)\n",
+			bridge->id, bridge->drm_mode.name,
+			bridge->drm_mode.vrefresh);
+	DP_MST_INFO_LOG("dsc: id(%d) dsc(%d)\n", bridge->id,
+			bridge->dp_mode.timing.comp_info.comp_ratio);
+	DP_MST_INFO_LOG("channel: id(%d) vcpi(%d) start(%d) tot(%d)\n",
+			bridge->id, bridge->vcpi, bridge->start_slot,
+			bridge->num_slots);
 end:
 	mutex_unlock(&mst->mst_lock);
 }
@@ -729,8 +749,6 @@
 	struct dp_mst_bridge *bridge;
 	struct dp_display *dp;
 
-	DP_MST_DEBUG("enter\n");
-
 	if (!drm_bridge) {
 		pr_err("Invalid params\n");
 		return;
@@ -751,7 +769,8 @@
 		return;
 	}
 
-	DP_MST_DEBUG("mst bridge [%d] post enable complete\n", bridge->id);
+	DP_MST_INFO_LOG("mst bridge [%d] post enable complete\n",
+			bridge->id);
 }
 
 static void dp_mst_bridge_disable(struct drm_bridge *drm_bridge)
@@ -761,8 +780,6 @@
 	struct dp_display *dp;
 	struct dp_mst_private *mst;
 
-	DP_MST_DEBUG("enter\n");
-
 	if (!drm_bridge) {
 		pr_err("Invalid params\n");
 		return;
@@ -791,7 +808,7 @@
 
 	_dp_mst_bridge_pre_disable_part2(bridge);
 
-	DP_MST_DEBUG("mst bridge [%d] disable complete\n", bridge->id);
+	DP_MST_INFO_LOG("mst bridge [%d] disable complete\n", bridge->id);
 
 	mutex_unlock(&mst->mst_lock);
 }
@@ -803,8 +820,6 @@
 	struct dp_display *dp;
 	struct dp_mst_private *mst;
 
-	DP_MST_DEBUG("enter\n");
-
 	if (!drm_bridge) {
 		pr_err("Invalid params\n");
 		return;
@@ -832,12 +847,17 @@
 	/* maintain the connector to encoder link during suspend/resume */
 	if (mst->state != PM_SUSPEND) {
 		/* Disconnect the connector and panel info from bridge */
+		mst->mst_bridge[bridge->id].old_connector =
+				mst->mst_bridge[bridge->id].connector;
+		mst->mst_bridge[bridge->id].old_dp_panel =
+				mst->mst_bridge[bridge->id].dp_panel;
 		mst->mst_bridge[bridge->id].connector = NULL;
 		mst->mst_bridge[bridge->id].dp_panel = NULL;
 		mst->mst_bridge[bridge->id].encoder_active_sts = false;
 	}
 
-	DP_MST_DEBUG("mst bridge [%d] post disable complete\n", bridge->id);
+	DP_MST_INFO_LOG("mst bridge [%d] post disable complete\n",
+			bridge->id);
 }
 
 static void dp_mst_bridge_mode_set(struct drm_bridge *drm_bridge,
@@ -856,13 +876,21 @@
 
 	bridge = to_dp_mst_bridge(drm_bridge);
 	if (!bridge->connector) {
-		pr_err("Invalid connector\n");
-		return;
+		if (!bridge->old_connector) {
+			pr_err("Invalid connector\n");
+			return;
+		}
+		bridge->connector = bridge->old_connector;
+		bridge->old_connector = NULL;
 	}
 
 	if (!bridge->dp_panel) {
-		pr_err("Invalid dp_panel\n");
-		return;
+		if (!bridge->old_dp_panel) {
+			pr_err("Invalid dp_panel\n");
+			return;
+		}
+		bridge->dp_panel = bridge->old_dp_panel;
+		bridge->old_dp_panel = NULL;
 	}
 
 	dp = bridge->display;
@@ -877,6 +905,10 @@
 
 /* DP MST Bridge APIs */
 
+static struct drm_connector *
+dp_mst_drm_fixed_connector_init(struct dp_display *dp_display,
+				struct drm_encoder *encoder);
+
 static const struct drm_bridge_funcs dp_mst_bridge_ops = {
 	.attach       = dp_mst_bridge_attach,
 	.mode_fixup   = dp_mst_bridge_mode_fixup,
@@ -944,6 +976,23 @@
 
 	DP_MST_DEBUG("mst drm bridge init. bridge id:%d\n", i);
 
+	/*
+	 * If fixed topology port is defined, connector will be created
+	 * immediately.
+	 */
+	rc = display->mst_get_fixed_topology_port(display, bridge->id,
+			&bridge->fixed_port_num);
+	if (!rc) {
+		bridge->fixed_connector =
+			dp_mst_drm_fixed_connector_init(display,
+				bridge->encoder);
+		if (bridge->fixed_connector == NULL) {
+			pr_err("failed to create fixed connector\n");
+			rc = -ENOMEM;
+			goto end;
+		}
+	}
+
 	return 0;
 
 end:
@@ -1136,7 +1185,8 @@
 	}
 
 	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		if (!mst->mst_bridge[i].encoder_active_sts) {
+		if (!mst->mst_bridge[i].encoder_active_sts &&
+			!mst->mst_bridge[i].fixed_connector) {
 			mst->mst_bridge[i].encoder_active_sts = true;
 			mst->mst_bridge[i].connector = connector;
 			mst->mst_bridge[i].dp_panel = conn->drv_panel;
@@ -1343,6 +1393,7 @@
 
 	if (!connector) {
 		pr_err("mst sde_connector_init failed\n");
+		drm_modeset_unlock_all(dev);
 		return connector;
 	}
 
@@ -1350,6 +1401,7 @@
 	if (rc) {
 		pr_err("mst connector install failed\n");
 		sde_connector_destroy(connector);
+		drm_modeset_unlock_all(dev);
 		return NULL;
 	}
 
@@ -1372,7 +1424,7 @@
 	/* unlock connector and make it accessible */
 	drm_modeset_unlock_all(dev);
 
-	DP_MST_DEBUG("add mst connector:%d\n", connector->base.id);
+	DP_MST_INFO_LOG("add mst connector id:%d\n", connector->base.id);
 
 	return connector;
 }
@@ -1383,7 +1435,8 @@
 
 	connector->status = connector->funcs->detect(connector, false);
 
-	DP_MST_DEBUG("register mst connector:%d\n", connector->base.id);
+	DP_MST_INFO_LOG("register mst connector id:%d\n",
+			connector->base.id);
 	drm_connector_register(connector);
 }
 
@@ -1392,12 +1445,297 @@
 {
 	DP_MST_DEBUG("enter\n");
 
-	DP_MST_DEBUG("destroy mst connector:%d\n", connector->base.id);
+	DP_MST_INFO_LOG("destroy mst connector id:%d\n", connector->base.id);
 
 	drm_connector_unregister(connector);
 	drm_connector_put(connector);
 }
 
+static enum drm_connector_status
+dp_mst_fixed_connector_detect(struct drm_connector *connector, bool force,
+			void *display)
+{
+	struct dp_display *dp_display = display;
+	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
+	int i;
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (mst->mst_bridge[i].fixed_connector != connector)
+			continue;
+
+		if (!mst->mst_bridge[i].fixed_port_added)
+			break;
+
+		return dp_mst_connector_detect(connector, force, display);
+	}
+
+	return connector_status_disconnected;
+}
+
+static struct drm_encoder *
+dp_mst_fixed_atomic_best_encoder(struct drm_connector *connector,
+			void *display, struct drm_connector_state *state)
+{
+	struct dp_display *dp_display = display;
+	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
+	struct sde_connector *conn = to_sde_connector(connector);
+	struct drm_encoder *enc = NULL;
+	u32 i;
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (mst->mst_bridge[i].connector == connector) {
+			enc = mst->mst_bridge[i].encoder;
+			goto end;
+		}
+	}
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (mst->mst_bridge[i].fixed_connector == connector) {
+			mst->mst_bridge[i].encoder_active_sts = true;
+			mst->mst_bridge[i].connector = connector;
+			mst->mst_bridge[i].dp_panel = conn->drv_panel;
+			enc = mst->mst_bridge[i].encoder;
+			break;
+		}
+	}
+
+end:
+	if (enc)
+		DP_MST_DEBUG("mst connector:%d atomic best encoder:%d\n",
+			connector->base.id, i);
+	else
+		DP_MST_DEBUG("mst connector:%d atomic best encoder failed\n",
+				connector->base.id);
+
+	return enc;
+}
+
+static u32 dp_mst_find_fixed_port_num(struct drm_dp_mst_branch *mstb,
+		struct drm_dp_mst_port *target)
+{
+	struct drm_dp_mst_port *port;
+	u32 port_num = 0;
+
+	/*
+	 * search through reversed order of adding sequence, so the port number
+	 * will be unique once topology is fixed
+	 */
+	list_for_each_entry_reverse(port, &mstb->ports, next) {
+		if (port->mstb)
+			port_num += dp_mst_find_fixed_port_num(port->mstb,
+						target);
+		else if (!port->input) {
+			++port_num;
+			if (port == target)
+				break;
+		}
+	}
+
+	return port_num;
+}
+
+static struct drm_connector *
+dp_mst_find_fixed_connector(struct dp_mst_private *dp_mst,
+		struct drm_dp_mst_port *port)
+{
+	struct dp_display *dp_display = dp_mst->dp_display;
+	struct drm_connector *connector = NULL;
+	struct sde_connector *c_conn;
+	u32 port_num;
+	int i;
+
+	mutex_lock(&port->mgr->lock);
+	port_num = dp_mst_find_fixed_port_num(port->mgr->mst_primary, port);
+	mutex_unlock(&port->mgr->lock);
+
+	if (!port_num)
+		return NULL;
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (dp_mst->mst_bridge[i].fixed_port_num == port_num) {
+			connector = dp_mst->mst_bridge[i].fixed_connector;
+			c_conn = to_sde_connector(connector);
+			c_conn->mst_port = port;
+			dp_display->mst_connector_update_link_info(dp_display,
+					connector);
+			dp_mst->mst_bridge[i].fixed_port_added = true;
+			DP_MST_DEBUG("found fixed connector %d\n",
+					DRMID(connector));
+			break;
+		}
+	}
+
+	return connector;
+}
+
+static int
+dp_mst_find_first_available_encoder_idx(struct dp_mst_private *dp_mst)
+{
+	int enc_idx = MAX_DP_MST_DRM_BRIDGES;
+	int i;
+
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (!dp_mst->mst_bridge[i].fixed_connector) {
+			enc_idx = i;
+			break;
+		}
+	}
+
+	return enc_idx;
+}
+
+static struct drm_connector *
+dp_mst_add_fixed_connector(struct drm_dp_mst_topology_mgr *mgr,
+		struct drm_dp_mst_port *port, const char *pathprop)
+{
+	struct dp_mst_private *dp_mst;
+	struct drm_device *dev;
+	struct dp_display *dp_display;
+	struct drm_connector *connector;
+	int i, enc_idx;
+
+	DP_MST_DEBUG("enter\n");
+
+	dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr);
+
+	dp_display = dp_mst->dp_display;
+	dev = dp_display->drm_dev;
+
+	if (port->input || port->mstb)
+		enc_idx = MAX_DP_MST_DRM_BRIDGES;
+	else {
+		/* if port is already reserved, return immediately */
+		connector = dp_mst_find_fixed_connector(dp_mst, port);
+		if (connector != NULL)
+			return connector;
+
+		/* first available bridge index for non-reserved port */
+		enc_idx = dp_mst_find_first_available_encoder_idx(dp_mst);
+	}
+
+	/* add normal connector */
+	connector = dp_mst_add_connector(mgr, port, pathprop);
+	if (!connector) {
+		DP_MST_DEBUG("failed to add connector\n");
+		return NULL;
+	}
+
+	drm_modeset_lock_all(dev);
+
+	/* clear encoder list */
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
+		connector->encoder_ids[i] = 0;
+
+	/* re-attach encoders from first available encoders */
+	for (i = enc_idx; i < MAX_DP_MST_DRM_BRIDGES; i++)
+		drm_connector_attach_encoder(connector,
+				dp_mst->mst_bridge[i].encoder);
+
+	drm_modeset_unlock_all(dev);
+
+	DP_MST_DEBUG("add mst connector:%d\n", connector->base.id);
+
+	return connector;
+}
+
+static void dp_mst_register_fixed_connector(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn = to_sde_connector(connector);
+	struct dp_display *dp_display = c_conn->display;
+	struct dp_mst_private *dp_mst = dp_display->dp_mst_prv_info;
+	int i;
+
+	DP_MST_DEBUG("enter\n");
+
+	/* skip connector registered for fixed topology ports */
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (dp_mst->mst_bridge[i].fixed_connector == connector) {
+			DP_MST_DEBUG("found fixed connector %d\n",
+					DRMID(connector));
+			return;
+		}
+	}
+
+	dp_mst_register_connector(connector);
+}
+
+static void dp_mst_destroy_fixed_connector(struct drm_dp_mst_topology_mgr *mgr,
+					   struct drm_connector *connector)
+{
+	struct dp_mst_private *dp_mst;
+	int i;
+
+	DP_MST_DEBUG("enter\n");
+
+	dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr);
+
+	/* skip connector destroy for fixed topology ports */
+	for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+		if (dp_mst->mst_bridge[i].fixed_connector == connector) {
+			dp_mst->mst_bridge[i].fixed_port_added = false;
+			DP_MST_DEBUG("destroy fixed connector %d\n",
+					DRMID(connector));
+			return;
+		}
+	}
+
+	dp_mst_destroy_connector(mgr, connector);
+}
+
+static struct drm_connector *
+dp_mst_drm_fixed_connector_init(struct dp_display *dp_display,
+			struct drm_encoder *encoder)
+{
+	static const struct sde_connector_ops dp_mst_connector_ops = {
+		.post_init  = NULL,
+		.detect     = dp_mst_fixed_connector_detect,
+		.get_modes  = dp_mst_connector_get_modes,
+		.mode_valid = dp_mst_connector_mode_valid,
+		.get_info   = dp_mst_connector_get_info,
+		.get_mode_info  = dp_mst_connector_get_mode_info,
+		.atomic_best_encoder = dp_mst_fixed_atomic_best_encoder,
+		.atomic_check = dp_mst_connector_atomic_check,
+		.config_hdr = dp_mst_connector_config_hdr,
+		.pre_destroy = dp_mst_connector_pre_destroy,
+	};
+	struct drm_device *dev;
+	struct drm_connector *connector;
+	int rc;
+
+	DP_MST_DEBUG("enter\n");
+
+	dev = dp_display->drm_dev;
+
+	connector = sde_connector_init(dev,
+				encoder,
+				NULL,
+				dp_display,
+				&dp_mst_connector_ops,
+				DRM_CONNECTOR_POLL_HPD,
+				DRM_MODE_CONNECTOR_DisplayPort);
+
+	if (!connector) {
+		pr_err("mst sde_connector_init failed\n");
+		return NULL;
+	}
+
+	rc = dp_display->mst_connector_install(dp_display, connector);
+	if (rc) {
+		pr_err("mst connector install failed\n");
+		sde_connector_destroy(connector);
+		return NULL;
+	}
+
+	drm_object_attach_property(&connector->base,
+			dev->mode_config.path_property, 0);
+	drm_object_attach_property(&connector->base,
+			dev->mode_config.tile_property, 0);
+
+	DP_MST_DEBUG("add mst fixed connector:%d\n", connector->base.id);
+
+	return connector;
+}
+
 static void dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
 {
 	struct dp_mst_private *mst = container_of(mgr, struct dp_mst_private,
@@ -1411,7 +1749,7 @@
 
 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
 
-	DP_MST_DEBUG("mst hot plug event\n");
+	DP_MST_INFO_LOG("mst hot plug event\n");
 }
 
 static void dp_mst_hpd_event_notify(struct dp_mst_private *mst, bool hpd_status)
@@ -1432,7 +1770,7 @@
 
 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
 
-	DP_MST_DEBUG("%s finished\n", __func__);
+	DP_MST_INFO_LOG("%s finished\n", __func__);
 }
 
 /* DP Driver Callback OPs */
@@ -1444,7 +1782,9 @@
 	struct dp_display *dp = dp_display;
 	struct dp_mst_private *mst = dp->dp_mst_prv_info;
 
-	DP_MST_DEBUG("enter:\n");
+	mutex_lock(&mst->mst_lock);
+	mst->mst_session_state = hpd_status;
+	mutex_unlock(&mst->mst_lock);
 
 	if (!hpd_status)
 		rc = mst->mst_fw_cbs->topology_mgr_set_mst(&mst->mst_mgr,
@@ -1466,9 +1806,7 @@
 
 	dp_mst_hpd_event_notify(mst, hpd_status);
 
-	DP_MST_DEBUG("mst display hpd:%d, rc:%d\n", hpd_status, rc);
-
-	DP_MST_DEBUG("exit:\n");
+	DP_MST_INFO_LOG("mst display hpd:%d, rc:%d\n", hpd_status, rc);
 }
 
 static void dp_mst_display_hpd_irq(void *dp_display,
@@ -1477,26 +1815,29 @@
 	int rc;
 	struct dp_display *dp = dp_display;
 	struct dp_mst_private *mst = dp->dp_mst_prv_info;
-	u8 esi[14], idx;
+	u8 esi[14];
 	unsigned int esi_res = DP_SINK_COUNT_ESI + 1;
 	bool handled;
 
-	DP_MST_DEBUG("enter:\n");
-
 	if (info->mst_hpd_sim) {
 		dp_mst_hotplug(&mst->mst_mgr);
 		return;
 	}
 
+	if (!mst->mst_session_state) {
+		pr_err("mst_hpd_irq received before mst session start\n");
+		return;
+	}
+
 	rc = drm_dp_dpcd_read(mst->caps.drm_aux, DP_SINK_COUNT_ESI,
 		esi, 14);
 	if (rc != 14) {
-		pr_err("dpcd sync status read failed, rlen=%d\n", rc);
-		goto end;
+		pr_err("dpcd sink status read failed, rlen=%d\n", rc);
+		return;
 	}
 
-	for (idx = 0; idx < 14; idx++)
-		DP_MST_DEBUG("mst irq: esi[%d]: 0x%x\n", idx, esi[idx]);
+	DP_MST_DEBUG("mst irq: esi1[0x%x] esi2[0x%x] esi3[%x]\n",
+			esi[1], esi[2], esi[3]);
 
 	rc = drm_dp_mst_hpd_irq(&mst->mst_mgr, esi, &handled);
 
@@ -1509,9 +1850,6 @@
 	}
 
 	DP_MST_DEBUG("mst display hpd_irq handled:%d rc:%d\n", handled, rc);
-
-end:
-	DP_MST_DEBUG("exit:\n");
 }
 
 static void dp_mst_set_state(void *dp_display, enum dp_drv_state mst_state)
@@ -1525,6 +1863,7 @@
 	}
 
 	mst->state = mst_state;
+	DP_MST_INFO_LOG("mst power state:%d\n", mst_state);
 }
 
 /* DP MST APIs */
@@ -1542,6 +1881,13 @@
 	.hotplug = dp_mst_hotplug,
 };
 
+static const struct drm_dp_mst_topology_cbs dp_mst_fixed_drm_cbs = {
+	.add_connector = dp_mst_add_fixed_connector,
+	.register_connector = dp_mst_register_fixed_connector,
+	.destroy_connector = dp_mst_destroy_fixed_connector,
+	.hotplug = dp_mst_hotplug,
+};
+
 static void dp_mst_sim_init(struct dp_mst_private *mst)
 {
 	INIT_WORK(&mst->simulator.probe_work, dp_mst_sim_link_probe_work);
@@ -1606,7 +1952,11 @@
 	}
 	memset(&dp_mst_enc_cache, 0, sizeof(dp_mst_enc_cache));
 
-	DP_MST_DEBUG("dp drm mst topology manager init completed\n");
+	/* choose fixed callback function if fixed topology is found */
+	if (!dp_display->mst_get_fixed_topology_port(dp_display, 0, NULL))
+		dp_mst.mst_mgr.cbs = &dp_mst_fixed_drm_cbs;
+
+	DP_MST_INFO_LOG("dp drm mst topology manager init completed\n");
 
 	return ret;
 
@@ -1637,6 +1987,6 @@
 
 	mutex_destroy(&mst->mst_lock);
 
-	DP_MST_DEBUG("dp drm mst topology manager deinit completed\n");
+	DP_MST_INFO_LOG("dp drm mst topology manager deinit completed\n");
 }
 
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 15413f6..d98ebcf 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -74,9 +74,6 @@
 	u8 spd_product_description[16];
 	u8 major;
 	u8 minor;
-	u32 bpp;
-	u32 active_pclk;
-	u32 optimal_link_rate;
 };
 
 static const struct dp_panel_info fail_safe = {
@@ -1533,12 +1530,14 @@
 	struct dp_dsc_slices_per_line *rec;
 	int slice_width;
 	u32 ppr = dp_mode->timing.pixel_clk_khz/1000;
+	int max_slice_width;
 
 	comp_info->dsc_info.slice_per_pkt = 0;
 	for (i = 0; i < ARRAY_SIZE(slice_per_line_tbl); i++) {
 		rec = &slice_per_line_tbl[i];
 		if ((ppr > rec->min_ppr) && (ppr <= rec->max_ppr)) {
 			comp_info->dsc_info.slice_per_pkt = rec->num_slices;
+			i++;
 			break;
 		}
 	}
@@ -1546,9 +1545,21 @@
 	if (comp_info->dsc_info.slice_per_pkt == 0)
 		return -EINVAL;
 
+	max_slice_width = dp_panel->dsc_dpcd[12] * 320;
 	slice_width = (dp_mode->timing.h_active /
 				comp_info->dsc_info.slice_per_pkt);
 
+	while (slice_width >= max_slice_width) {
+		if (i == ARRAY_SIZE(slice_per_line_tbl))
+			return -EINVAL;
+
+		rec = &slice_per_line_tbl[i];
+		comp_info->dsc_info.slice_per_pkt = rec->num_slices;
+		slice_width = (dp_mode->timing.h_active /
+				comp_info->dsc_info.slice_per_pkt);
+		i++;
+	}
+
 	comp_info->dsc_info.block_pred_enable =
 			dp_panel->sink_dsc_caps.block_pred_en;
 	comp_info->dsc_info.vbr_enable = 0;
@@ -1660,8 +1671,8 @@
 	panel->minor = link_info->revision & 0x0f;
 	pr_debug("version: %d.%d\n", panel->major, panel->minor);
 
-	link_info->rate =
-		drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]);
+	link_info->rate = min_t(unsigned long, panel->parser->max_lclk_khz,
+		drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]));
 	pr_debug("link_rate=%d\n", link_info->rate);
 
 	link_info->num_lanes = dp_panel->dpcd[DP_MAX_LANE_COUNT] &
@@ -1767,50 +1778,12 @@
 	return 0;
 }
 
-static u32 dp_panel_get_optimal_link_rate(struct dp_panel *dp_panel)
-{
-	struct dp_panel_private *panel;
-	u32 lrate, rate = 0;
-
-	if (!dp_panel) {
-		pr_err("invalid input\n");
-		goto end;
-	}
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	/*
-	 * As MST can support multiple streams,
-	 * do not optimize the link rate for MST.
-	 */
-	if (panel->dp_panel.mst_state) {
-		rate = panel->dp_panel.link_info.rate;
-		goto end;
-	}
-
-	lrate = ((panel->active_pclk / panel->dp_panel.link_info.num_lanes) *
-			panel->bpp) / 8;
-
-	if (lrate <= DP_LINK_RATE_RBR)
-		rate = DP_LINK_RATE_RBR;
-	else if (lrate <= DP_LINK_RATE_HBR)
-		rate = DP_LINK_RATE_HBR;
-	else if (lrate <= DP_LINK_RATE_HBR2)
-		rate = DP_LINK_RATE_HBR2;
-	else
-		rate = DP_LINK_RATE_HBR3;
-end:
-	panel->optimal_link_rate = rate;
-	return rate;
-}
-
 static int dp_panel_read_edid(struct dp_panel *dp_panel,
 	struct drm_connector *connector)
 {
 	int ret = 0;
 	struct dp_panel_private *panel;
 	struct edid *edid;
-	struct drm_display_mode *mode;
 
 	if (!dp_panel) {
 		pr_err("invalid input\n");
@@ -1831,16 +1804,6 @@
 		ret = -EINVAL;
 		goto end;
 	}
-
-	mutex_lock(&connector->dev->mode_config.mutex);
-	_sde_edid_update_modes(connector, dp_panel->edid_ctrl);
-	mutex_unlock(&connector->dev->mode_config.mutex);
-
-	mode = list_first_entry(&connector->probed_modes,
-				 struct drm_display_mode, head);
-
-	panel->bpp = connector->display_info.bpc * 3;
-	panel->active_pclk = mode->clock;
 end:
 	edid = dp_panel->edid_ctrl->edid;
 	dp_panel->audio_supported = drm_detect_monitor_audio(edid);
@@ -2356,13 +2319,14 @@
 
 static int dp_panel_set_stream_info(struct dp_panel *dp_panel,
 		enum dp_stream_id stream_id, u32 ch_start_slot,
-			u32 ch_tot_slots, u32 pbn)
+			u32 ch_tot_slots, u32 pbn, int vcpi)
 {
 	if (!dp_panel || stream_id > DP_STREAM_MAX) {
 		pr_err("invalid input. stream_id: %d\n", stream_id);
 		return -EINVAL;
 	}
 
+	dp_panel->vcpi = vcpi;
 	dp_panel->stream_id = stream_id;
 	dp_panel->channel_start_slot = ch_start_slot;
 	dp_panel->channel_total_slots = ch_tot_slots;
@@ -2376,7 +2340,6 @@
 	int rc = 0;
 	struct dp_panel_private *panel;
 	struct dp_panel_info *pinfo;
-	u32 current_link_rate;
 
 	if (!dp_panel) {
 		pr_err("invalid input\n");
@@ -2400,13 +2363,6 @@
 		pinfo->refresh_rate, pinfo->bpp, pinfo->pixel_clk_khz,
 		panel->link->link_params.bw_code,
 		panel->link->link_params.lane_count);
-
-	panel->active_pclk = pinfo->pixel_clk_khz;
-	current_link_rate = panel->optimal_link_rate;
-	dp_panel_get_optimal_link_rate(dp_panel);
-
-	if (panel->optimal_link_rate != current_link_rate)
-		rc = -EAGAIN;
 end:
 	return rc;
 }
@@ -2435,7 +2391,7 @@
 	if (!panel->custom_edid && dp_panel->edid_ctrl->edid)
 		sde_free_edid((void **)&dp_panel->edid_ctrl);
 
-	dp_panel_set_stream_info(dp_panel, DP_STREAM_MAX, 0, 0, 0);
+	dp_panel_set_stream_info(dp_panel, DP_STREAM_MAX, 0, 0, 0, 0);
 	memset(&dp_panel->pinfo, 0, sizeof(dp_panel->pinfo));
 	memset(&hdr->hdr_meta, 0, sizeof(hdr->hdr_meta));
 	panel->panel_on = false;
@@ -2971,32 +2927,36 @@
 		goto error;
 	}
 
-	dp_panel = &panel->dp_panel;
-
-	if (in->base_panel) {
-		struct dp_panel_private *base_panel_priv =
-			container_of(in->base_panel,
-				struct dp_panel_private, dp_panel);
-
-		memcpy(panel, base_panel_priv, sizeof(*panel));
-
-		goto update;
-	}
-
 	panel->dev = in->dev;
 	panel->aux = in->aux;
 	panel->catalog = in->catalog;
 	panel->link = in->link;
 	panel->parser = in->parser;
 
+	dp_panel = &panel->dp_panel;
 	dp_panel->max_bw_code = DP_LINK_BW_8_1;
 	dp_panel->spd_enabled = true;
 	memcpy(panel->spd_vendor_name, vendor_name, (sizeof(u8) * 8));
 	memcpy(panel->spd_product_description, product_desc, (sizeof(u8) * 16));
+	dp_panel->connector = in->connector;
 
 	dp_panel->dsc_feature_enable = panel->parser->dsc_feature_enable;
 	dp_panel->fec_feature_enable = panel->parser->fec_feature_enable;
 
+	if (in->base_panel) {
+		memcpy(dp_panel->dpcd, in->base_panel->dpcd,
+				DP_RECEIVER_CAP_SIZE + 1);
+		memcpy(dp_panel->dsc_dpcd, in->base_panel->dsc_dpcd,
+				DP_RECEIVER_DSC_CAP_SIZE + 1);
+		memcpy(&dp_panel->link_info, &in->base_panel->link_info,
+				sizeof(dp_panel->link_info));
+		dp_panel->mst_state = in->base_panel->mst_state;
+		dp_panel->widebus_en = in->base_panel->widebus_en;
+		dp_panel->fec_en = in->base_panel->fec_en;
+		dp_panel->dsc_en = in->base_panel->dsc_en;
+		dp_panel->fec_overhead_fp = in->base_panel->fec_overhead_fp;
+	}
+
 	dp_panel->init = dp_panel_init_panel_info;
 	dp_panel->deinit = dp_panel_deinit_panel_info;
 	dp_panel->hw_cfg = dp_panel_hw_cfg;
@@ -3017,9 +2977,7 @@
 	dp_panel->read_mst_cap = dp_panel_read_mst_cap;
 	dp_panel->convert_to_dp_mode = dp_panel_convert_to_dp_mode;
 	dp_panel->update_pps = dp_panel_update_pps;
-	dp_panel->get_optimal_link_rate = dp_panel_get_optimal_link_rate;
-update:
-	dp_panel->connector = in->connector;
+
 	sde_conn = to_sde_connector(dp_panel->connector);
 	sde_conn->drv_panel = dp_panel;
 
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index a3473ec..dc96090 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -18,11 +18,6 @@
 #define DP_RECEIVER_DSC_CAP_SIZE    15
 #define DP_RECEIVER_FEC_STATUS_SIZE 3
 
-#define DP_LINK_RATE_RBR  162000
-#define DP_LINK_RATE_HBR  270000
-#define DP_LINK_RATE_HBR2 540000
-#define DP_LINK_RATE_HBR3 810000
-
 /*
  * A source initiated power down flag is set
  * when the DP is powered off while physical
@@ -115,6 +110,7 @@
 	 * Client sets the stream id value using set_stream_id interface.
 	 */
 	enum dp_stream_id stream_id;
+	int vcpi;
 
 	u32 channel_start_slot;
 	u32 channel_total_slots;
@@ -159,7 +155,7 @@
 
 	int (*set_stream_info)(struct dp_panel *dp_panel,
 			enum dp_stream_id stream_id, u32 ch_start_slot,
-			u32 ch_tot_slots, u32 pbn);
+			u32 ch_tot_slots, u32 pbn, int vcpi);
 
 	int (*read_sink_status)(struct dp_panel *dp_panel, u8 *sts, u32 size);
 	int (*update_edid)(struct dp_panel *dp_panel, struct edid *edid);
@@ -168,7 +164,6 @@
 		const struct drm_display_mode *drm_mode,
 		struct dp_display_mode *dp_mode);
 	void (*update_pps)(struct dp_panel *dp_panel, char *pps_cmd);
-	u32 (*get_optimal_link_rate)(struct dp_panel *dp_panel);
 };
 
 struct dp_tu_calc_input {
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index b0a6d24..bc4369d 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -151,11 +151,22 @@
 			parser->l_map[i] = data[i];
 	}
 
+	data = of_get_property(of_node, "qcom,pn-swap-lane-map", &len);
+	if (data && (len == DP_MAX_PHY_LN)) {
+		for (i = 0; i < len; i++)
+			parser->l_pnswap |= (data[i] & 0x01) << i;
+	}
+
 	rc = of_property_read_u32(of_node,
 		"qcom,max-pclk-frequency-khz", &parser->max_pclk_khz);
 	if (rc)
 		parser->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
 
+	rc = of_property_read_u32(of_node,
+		"qcom,max-lclk-frequency-khz", &parser->max_lclk_khz);
+	if (rc)
+		parser->max_lclk_khz = DP_MAX_LINK_CLK_KHZ;
+
 	return 0;
 }
 
@@ -692,6 +703,7 @@
 static int dp_parser_mst(struct dp_parser *parser)
 {
 	struct device *dev = &parser->pdev->dev;
+	int i;
 
 	parser->has_mst = of_property_read_bool(dev->of_node,
 			"qcom,mst-enable");
@@ -699,6 +711,12 @@
 
 	pr_debug("mst parsing successful. mst:%d\n", parser->has_mst);
 
+	for (i = 0; i < MAX_DP_MST_STREAMS; i++) {
+		of_property_read_u32_index(dev->of_node,
+				"qcom,mst-fixed-topology-ports", i,
+				&parser->mst_fixed_port[i]);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index 7fb90c9..9caa1a7 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -11,6 +11,8 @@
 #define DP_LABEL "MDSS DP DISPLAY"
 #define AUX_CFG_LEN	10
 #define DP_MAX_PIXEL_CLK_KHZ	675000
+#define DP_MAX_LINK_CLK_KHZ	810000
+#define MAX_DP_MST_STREAMS	2
 
 enum dp_pm_type {
 	DP_CORE_PM,
@@ -181,6 +183,9 @@
  * @mp: gpio, regulator and clock related data
  * @pinctrl: pin-control related data
  * @disp_data: controller's display related data
+ * @l_pnswap: P/N swap status on each lane
+ * @max_pclk_khz: maximum pixel clock supported for the platform
+ * @max_lclk_khz: maximum link clock supported for the platform
  * @hw_cfg: DP HW specific settings
  * @has_mst: MST feature enable status
  * @has_mst_sideband: MST sideband feature enable status
@@ -191,6 +196,7 @@
  * @max_dp_dsc_blks: maximum DSC blks for DP interface
  * @max_dp_dsc_input_width_pixs: Maximum input width for DSC block
  * @has_widebus: widebus (2PPC) feature eanble status
+  *@mst_fixed_port: mst port_num reserved for fixed topology
  * @parse: function to be called by client to parse device tree.
  * @get_io: function to be called by client to get io data.
  * @get_io_buf: function to be called by client to get io buffers.
@@ -205,8 +211,10 @@
 	struct dp_display_data disp_data;
 
 	u8 l_map[4];
+	u8 l_pnswap;
 	struct dp_aux_cfg aux_cfg[AUX_CFG_LEN];
 	u32 max_pclk_khz;
+	u32 max_lclk_khz;
 	struct dp_hw_cfg hw_cfg;
 	bool has_mst;
 	bool has_mst_sideband;
@@ -218,6 +226,7 @@
 	u32 max_dp_dsc_blks;
 	u32 max_dp_dsc_input_width_pixs;
 	bool lphw_hpd;
+	u32 mst_fixed_port[MAX_DP_MST_STREAMS];
 
 	int (*parse)(struct dp_parser *parser);
 	struct dp_io_data *(*get_io)(struct dp_parser *parser, char *name);
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index 5089f0c..7f9391d 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -351,12 +351,14 @@
 
 #define TXn_TX_EMP_POST1_LVL			(0x000C)
 #define TXn_TX_DRV_LVL				(0x001C)
+#define TXn_TX_POL_INV				(0x0064)
 
 #define DP_PHY_AUX_INTERRUPT_MASK_V420		(0x0054)
 #define DP_PHY_AUX_INTERRUPT_CLEAR_V420		(0x0058)
 #define DP_PHY_AUX_INTERRUPT_STATUS_V420	(0x00D8)
 #define DP_PHY_SPARE0_V420			(0x00C8)
 #define TXn_TX_DRV_LVL_V420			(0x0014)
+#define TXn_TX_POL_INV_V420			(0x005C)
 
 #define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		(0x004)
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index ae2ce71..15ad347 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -172,7 +172,7 @@
 {
 	struct dsi_display *dsi_display = display;
 	struct dsi_panel *panel;
-	u32 bl_scale, bl_scale_ad;
+	u32 bl_scale, bl_scale_sv;
 	u64 bl_temp;
 	int rc = 0;
 
@@ -193,12 +193,11 @@
 	bl_scale = panel->bl_config.bl_scale;
 	bl_temp = bl_lvl * bl_scale / MAX_BL_SCALE_LEVEL;
 
-	bl_scale_ad = panel->bl_config.bl_scale_ad;
-	bl_temp = (u32)bl_temp * bl_scale_ad / MAX_AD_BL_SCALE_LEVEL;
+	bl_scale_sv = panel->bl_config.bl_scale_sv;
+	bl_temp = (u32)bl_temp * bl_scale_sv / MAX_SV_BL_SCALE_LEVEL;
 
-	pr_debug("bl_scale = %u, bl_scale_ad = %u, bl_lvl = %u\n",
-		bl_scale, bl_scale_ad, (u32)bl_temp);
-
+	pr_debug("bl_scale = %u, bl_scale_sv = %u, bl_lvl = %u\n",
+		bl_scale, bl_scale_sv, (u32)bl_temp);
 	rc = dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
 			DSI_CORE_CLK, DSI_CLK_ON);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 5e9d3ac..730a2c2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -2032,7 +2032,7 @@
 	}
 
 	panel->bl_config.bl_scale = MAX_BL_SCALE_LEVEL;
-	panel->bl_config.bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+	panel->bl_config.bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
 
 	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-bl-min-level", &val);
 	if (rc) {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 8d9cfea..a2dcebb 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -23,7 +23,7 @@
 
 #define MAX_BL_LEVEL 4096
 #define MAX_BL_SCALE_LEVEL 1024
-#define MAX_AD_BL_SCALE_LEVEL 65535
+#define MAX_SV_BL_SCALE_LEVEL 65535
 #define DSI_CMD_PPS_SIZE 135
 
 #define DSI_MODE_MAX 5
@@ -90,7 +90,7 @@
 	u32 brightness_max_level;
 	u32 bl_level;
 	u32 bl_scale;
-	u32 bl_scale_ad;
+	u32 bl_scale_sv;
 
 	int en_gpio;
 	/* PWM params */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index e8da71d..9a36012 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  * Copyright (C) 2014 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ab26608..b36f62a 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -241,7 +241,8 @@
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
 	if (!res) {
-		dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
+		dev_dbg(&pdev->dev, "failed to get memory resource: %s\n",
+									name);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -270,7 +271,7 @@
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
 	if (!res) {
-		dev_err(&pdev->dev, "failed to get memory resource: %s\n",
+		dev_dbg(&pdev->dev, "failed to get memory resource: %s\n",
 									name);
 		return 0;
 	}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d60eb34..b8de212 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -189,7 +189,7 @@
 	CONNECTOR_PROP_DST_H,
 	CONNECTOR_PROP_ROI_V1,
 	CONNECTOR_PROP_BL_SCALE,
-	CONNECTOR_PROP_AD_BL_SCALE,
+	CONNECTOR_PROP_SV_BL_SCALE,
 
 	/* enum/bitmask properties */
 	CONNECTOR_PROP_TOPOLOGY_NAME,
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index f7a0ede..d4cc5ce 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -115,7 +115,9 @@
 		char *fptr = &fifo->buf[fifo->head];
 		int n;
 
-		wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
+		wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
+		if (!rd->open)
+			return;
 
 		/* Note that smp_load_acquire() is not strictly required
 		 * as CIRC_SPACE_TO_END() does not access the tail more
@@ -213,7 +215,10 @@
 static int rd_release(struct inode *inode, struct file *file)
 {
 	struct msm_rd_state *rd = inode->i_private;
+
 	rd->open = false;
+	wake_up_all(&rd->fifo_event);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 48f97a0..b380481 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -2160,7 +2160,7 @@
 static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg)
 {
 	uint32_t input_bl = 0, output_bl = 0;
-	uint32_t scale = MAX_AD_BL_SCALE_LEVEL;
+	uint32_t scale = MAX_SV_BL_SCALE_LEVEL;
 	struct sde_hw_mixer *hw_lm = NULL;
 	struct sde_hw_dspp *hw_dspp = NULL;
 	u32 num_mixers;
@@ -2208,7 +2208,7 @@
 	if (!input_bl || input_bl < output_bl)
 		return;
 
-	scale = (output_bl * MAX_AD_BL_SCALE_LEVEL) / input_bl;
+	scale = (output_bl * MAX_SV_BL_SCALE_LEVEL) / input_bl;
 	event.length = sizeof(u32);
 	event.type = DRM_EVENT_AD_BACKLIGHT;
 	msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index fce411b..d87a981 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -539,13 +539,13 @@
 	else
 		bl_config->bl_scale = c_conn->bl_scale;
 
-	if (c_conn->bl_scale_ad > MAX_AD_BL_SCALE_LEVEL)
-		bl_config->bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+	if (c_conn->bl_scale_sv > MAX_SV_BL_SCALE_LEVEL)
+		bl_config->bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
 	else
-		bl_config->bl_scale_ad = c_conn->bl_scale_ad;
+		bl_config->bl_scale_sv = c_conn->bl_scale_sv;
 
-	SDE_DEBUG("bl_scale = %u, bl_scale_ad = %u, bl_level = %u\n",
-		bl_config->bl_scale, bl_config->bl_scale_ad,
+	SDE_DEBUG("bl_scale = %u, bl_scale_sv = %u, bl_level = %u\n",
+		bl_config->bl_scale, bl_config->bl_scale_sv,
 		bl_config->bl_level);
 	rc = c_conn->ops.set_backlight(&c_conn->base,
 			dsi_display, bl_config->bl_level);
@@ -615,7 +615,7 @@
 			mutex_unlock(&c_conn->lock);
 			break;
 		case CONNECTOR_PROP_BL_SCALE:
-		case CONNECTOR_PROP_AD_BL_SCALE:
+		case CONNECTOR_PROP_SV_BL_SCALE:
 			_sde_connector_update_bl_scale(c_conn);
 			break;
 		case CONNECTOR_PROP_HDR_METADATA:
@@ -1189,8 +1189,7 @@
 	struct sde_connector *c_conn;
 	struct sde_connector_state *c_state;
 	int idx, rc;
-	uint64_t fence_user_fd;
-	uint64_t __user prev_user_fd;
+	uint64_t fence_fd;
 
 	if (!connector || !state || !property) {
 		SDE_ERROR("invalid argument(s), conn %pK, state %pK, prp %pK\n",
@@ -1233,42 +1232,23 @@
 		if (!val)
 			goto end;
 
-		rc = copy_from_user(&prev_user_fd, (void __user *)val,
-				sizeof(uint64_t));
+		/*
+		 * update the the offset to a timeline for commit completion
+		 */
+		rc = sde_fence_create(c_conn->retire_fence, &fence_fd, 1);
 		if (rc) {
-			SDE_ERROR("copy from user failed rc:%d\n", rc);
-			rc = -EFAULT;
+			SDE_ERROR("fence create failed rc:%d\n", rc);
 			goto end;
 		}
 
-		/*
-		 * client is expected to reset the property to -1 before
-		 * requesting for the retire fence
-		 */
-		if (prev_user_fd == -1) {
-			/*
-			 * update the offset to a timeline for
-			 * commit completion
-			 */
-			rc = sde_fence_create(c_conn->retire_fence,
-						&fence_user_fd, 1);
-			if (rc) {
-				SDE_ERROR("fence create failed rc:%d\n", rc);
-				goto end;
-			}
-
-			rc = copy_to_user((uint64_t __user *)(uintptr_t)val,
-					&fence_user_fd, sizeof(uint64_t));
-			if (rc) {
-				SDE_ERROR("copy to user failed rc:%d\n", rc);
-				/*
-				 * fence will be released with timeline
-				 * update
-				 */
-				put_unused_fd(fence_user_fd);
-				rc = -EFAULT;
-				goto end;
-			}
+		rc = copy_to_user((uint64_t __user *)(uintptr_t)val, &fence_fd,
+			sizeof(uint64_t));
+		if (rc) {
+			SDE_ERROR("copy to user failed rc:%d\n", rc);
+			/* fence will be released with timeline update */
+			put_unused_fd(fence_fd);
+			rc = -EFAULT;
+			goto end;
 		}
 		break;
 	case CONNECTOR_PROP_ROI_V1:
@@ -1277,7 +1257,7 @@
 		if (rc)
 			SDE_ERROR_CONN(c_conn, "invalid roi_v1, rc: %d\n", rc);
 		break;
-	/* CONNECTOR_PROP_BL_SCALE and CONNECTOR_PROP_AD_BL_SCALE are
+	/* CONNECTOR_PROP_BL_SCALE and CONNECTOR_PROP_SV_BL_SCALE are
 	 * color-processing properties. These two properties require
 	 * special handling since they don't quite fit the current standard
 	 * atomic set property framework.
@@ -1286,8 +1266,8 @@
 		c_conn->bl_scale = val;
 		c_conn->bl_scale_dirty = true;
 		break;
-	case CONNECTOR_PROP_AD_BL_SCALE:
-		c_conn->bl_scale_ad = val;
+	case CONNECTOR_PROP_SV_BL_SCALE:
+		c_conn->bl_scale_sv = val;
 		c_conn->bl_scale_dirty = true;
 		break;
 	case CONNECTOR_PROP_HDR_METADATA:
@@ -2269,13 +2249,13 @@
 		0x0, 0, MAX_BL_SCALE_LEVEL, MAX_BL_SCALE_LEVEL,
 		CONNECTOR_PROP_BL_SCALE);
 
-	msm_property_install_range(&c_conn->property_info, "ad_bl_scale",
-		0x0, 0, MAX_AD_BL_SCALE_LEVEL, MAX_AD_BL_SCALE_LEVEL,
-		CONNECTOR_PROP_AD_BL_SCALE);
+	msm_property_install_range(&c_conn->property_info, "sv_bl_scale",
+		0x0, 0, MAX_SV_BL_SCALE_LEVEL, MAX_SV_BL_SCALE_LEVEL,
+		CONNECTOR_PROP_SV_BL_SCALE);
 
 	c_conn->bl_scale_dirty = false;
 	c_conn->bl_scale = MAX_BL_SCALE_LEVEL;
-	c_conn->bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+	c_conn->bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
 
 	/* enum/bitmask properties */
 	msm_property_install_enum(&c_conn->property_info, "topology_name",
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index a1bd65e..0db872f 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -372,7 +372,7 @@
  * @esd_status_check: Flag to indicate if ESD thread is scheduled or not
  * @bl_scale_dirty: Flag to indicate PP BL scale value(s) is changed
  * @bl_scale: BL scale value for ABA feature
- * @bl_scale_ad: BL scale value for AD feature
+ * @bl_scale_sv: BL scale value for sunlight visibility feature
  * @unset_bl_level: BL level that needs to be set later
  * @allow_bl_update: Flag to indicate if BL update is allowed currently or not
  * @qsync_mode: Cached Qsync mode, 0=disabled, 1=continuous mode
@@ -423,7 +423,7 @@
 
 	bool bl_scale_dirty;
 	u32 bl_scale;
-	u32 bl_scale_ad;
+	u32 bl_scale_sv;
 	u32 unset_bl_level;
 	bool allow_bl_update;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 6ad0612c..93a1f0b 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -5102,8 +5102,7 @@
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
 	int idx, ret;
-	uint64_t fence_user_fd;
-	uint64_t __user prev_user_fd;
+	uint64_t fence_fd;
 
 	if (!crtc || !state || !property) {
 		SDE_ERROR("invalid argument(s)\n");
@@ -5163,34 +5162,19 @@
 		if (!val)
 			goto exit;
 
-		ret = copy_from_user(&prev_user_fd, (void __user *)val,
-				sizeof(uint64_t));
+		ret = _sde_crtc_get_output_fence(crtc, state, &fence_fd);
 		if (ret) {
-			SDE_ERROR("copy from user failed rc:%d\n", ret);
-			ret = -EFAULT;
+			SDE_ERROR("fence create failed rc:%d\n", ret);
 			goto exit;
 		}
 
-		/*
-		 * client is expected to reset the property to -1 before
-		 * requesting for the release fence
-		 */
-		if (prev_user_fd == -1) {
-			ret = _sde_crtc_get_output_fence(crtc, state,
-					&fence_user_fd);
-			if (ret) {
-				SDE_ERROR("fence create failed rc:%d\n", ret);
-				goto exit;
-			}
-
-			ret = copy_to_user((uint64_t __user *)(uintptr_t)val,
-					&fence_user_fd, sizeof(uint64_t));
-			if (ret) {
-				SDE_ERROR("copy to user failed rc:%d\n", ret);
-				put_unused_fd(fence_user_fd);
-				ret = -EFAULT;
-				goto exit;
-			}
+		ret = copy_to_user((uint64_t __user *)(uintptr_t)val, &fence_fd,
+				sizeof(uint64_t));
+		if (ret) {
+			SDE_ERROR("copy to user failed rc:%d\n", ret);
+			put_unused_fd(fence_fd);
+			ret = -EFAULT;
+			goto exit;
 		}
 		break;
 	default:
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 2a029c8..f28a0a2 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -223,6 +223,7 @@
  * @elevated_ahb_vote:		increase AHB bus speed for the first frame
  *				after power collapse
  * @pm_qos_cpu_req:		pm_qos request for cpu frequency
+ * @mode_info:                  stores the current mode information
  */
 struct sde_encoder_virt {
 	struct drm_encoder base;
@@ -282,6 +283,7 @@
 	bool recovery_events_enabled;
 	bool elevated_ahb_vote;
 	struct pm_qos_request pm_qos_cpu_req;
+	struct msm_mode_info mode_info;
 };
 
 #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -340,67 +342,16 @@
 	pm_qos_remove_request(&sde_enc->pm_qos_cpu_req);
 }
 
-static struct drm_connector_state *_sde_encoder_get_conn_state(
-		struct drm_encoder *drm_enc)
-{
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	struct list_head *connector_list;
-	struct drm_connector *conn_iter;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid argument\n");
-		return NULL;
-	}
-
-	priv = drm_enc->dev->dev_private;
-	sde_kms = to_sde_kms(priv->kms);
-	connector_list = &sde_kms->dev->mode_config.connector_list;
-
-	list_for_each_entry(conn_iter, connector_list, head)
-		if (conn_iter->encoder == drm_enc)
-			return conn_iter->state;
-
-	return NULL;
-}
-
-static int _sde_encoder_get_mode_info(struct drm_encoder *drm_enc,
-		struct msm_mode_info *mode_info)
-{
-	struct drm_connector_state *conn_state;
-
-	if (!drm_enc || !mode_info) {
-		SDE_ERROR("invalid arguments\n");
-		return -EINVAL;
-	}
-
-	conn_state = _sde_encoder_get_conn_state(drm_enc);
-	if (!conn_state) {
-		SDE_ERROR("invalid connector state for the encoder: %d\n",
-			drm_enc->base.id);
-		return -EINVAL;
-	}
-
-	return sde_connector_get_mode_info(conn_state, mode_info);
-}
-
 static bool _sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
 {
+	struct sde_encoder_virt *sde_enc;
 	struct msm_compression_info *comp_info;
-	struct msm_mode_info mode_info;
-	int rc = 0;
 
 	if (!drm_enc)
 		return false;
 
-	rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
-	if (rc) {
-		SDE_ERROR("failed to get mode info, enc: %d\n",
-			drm_enc->base.id);
-		return false;
-	}
-
-	comp_info = &mode_info.comp_info;
+	sde_enc  = to_sde_encoder_virt(drm_enc);
+	comp_info = &sde_enc->mode_info.comp_info;
 
 	return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
 }
@@ -712,8 +663,7 @@
 		struct drm_connector_state *conn_state)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
-	struct msm_mode_info mode_info;
-	int rc, i = 0;
+	int i = 0;
 
 	if (!hw_res || !drm_enc || !conn_state) {
 		SDE_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
@@ -735,18 +685,8 @@
 			phys->ops.get_hw_resources(phys, hw_res, conn_state);
 	}
 
-	/**
-	 * NOTE: Do not use sde_encoder_get_mode_info here as this function is
-	 * called from atomic_check phase. Use the below API to get mode
-	 * information of the temporary conn_state passed.
-	 */
-	rc = sde_connector_get_mode_info(conn_state, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return;
-	}
-
-	hw_res->topology = mode_info.topology;
+	sde_connector_get_mode_info(conn_state, &sde_enc->mode_info);
+	hw_res->topology = sde_enc->mode_info.topology;
 	hw_res->is_primary = sde_enc->disp_info.is_primary;
 }
 
@@ -1334,27 +1274,20 @@
 	struct sde_hw_dsc *hw_dsc = sde_enc->hw_dsc[0];
 	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
 	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
-	struct msm_mode_info mode_info;
 	struct msm_display_dsc_info *dsc = NULL;
 	struct sde_hw_ctl *hw_ctl;
 	struct sde_ctl_dsc_cfg cfg;
-	int rc;
 
 	if (hw_dsc == NULL || hw_pp == NULL || !enc_master) {
 		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
 		return -EINVAL;
 	}
 
-	rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return -EINVAL;
-	}
 
 	hw_ctl = enc_master->hw_ctl;
 
 	memset(&cfg, 0, sizeof(cfg));
-	dsc = &mode_info.comp_info.dsc_info;
+	dsc = &sde_enc->mode_info.comp_info.dsc_info;
 	_sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
 
 	this_frame_slices = roi->w / dsc->slice_width;
@@ -1407,11 +1340,10 @@
 	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 	struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
 	struct msm_display_dsc_info dsc[MAX_CHANNELS_PER_ENC];
-	struct msm_mode_info mode_info;
 	bool half_panel_partial_update;
 	struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl;
 	struct sde_ctl_dsc_cfg cfg;
-	int i, rc;
+	int i;
 
 	memset(&cfg, 0, sizeof(cfg));
 
@@ -1426,12 +1358,6 @@
 		}
 	}
 
-	rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return -EINVAL;
-	}
-
 	half_panel_partial_update =
 			hweight_long(params->affected_displays) == 1;
 
@@ -1441,8 +1367,8 @@
 	if (enc_master->intf_mode == INTF_MODE_VIDEO)
 		dsc_common_mode |= DSC_MODE_VIDEO;
 
-	memcpy(&dsc[0], &mode_info.comp_info.dsc_info, sizeof(dsc[0]));
-	memcpy(&dsc[1], &mode_info.comp_info.dsc_info, sizeof(dsc[1]));
+	memcpy(&dsc[0], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[0]));
+	memcpy(&dsc[1], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[1]));
 
 	/*
 	 * Since both DSC use same pic dimension, set same pic dimension
@@ -1530,11 +1456,10 @@
 	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 	struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
 	struct msm_display_dsc_info *dsc = NULL;
-	struct msm_mode_info mode_info;
 	bool half_panel_partial_update;
 	struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl;
 	struct sde_ctl_dsc_cfg cfg;
-	int i, rc;
+	int i;
 
 	memset(&cfg, 0, sizeof(cfg));
 
@@ -1549,13 +1474,7 @@
 		}
 	}
 
-	rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return -EINVAL;
-	}
-
-	dsc = &mode_info.comp_info.dsc_info;
+	dsc = &sde_enc->mode_info.comp_info.dsc_info;
 
 	half_panel_partial_update =
 			hweight_long(params->affected_displays) == 1;
@@ -1720,9 +1639,8 @@
 	struct sde_kms *sde_kms;
 	struct sde_hw_mdp *hw_mdptop;
 	struct drm_encoder *drm_enc;
-	struct msm_mode_info mode_info;
 	struct sde_encoder_virt *sde_enc;
-	int i, rc = 0;
+	int i;
 
 	sde_enc = to_sde_encoder_virt(phys_enc->parent);
 
@@ -1752,18 +1670,12 @@
 		return;
 	}
 
-	rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return;
-	}
-
 	if (hw_mdptop->ops.setup_vsync_source) {
 		for (i = 0; i < sde_enc->num_phys_encs; i++)
 			vsync_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
 
 		vsync_cfg.pp_count = sde_enc->num_phys_encs;
-		vsync_cfg.frame_rate = mode_info.frame_rate;
+		vsync_cfg.frame_rate = sde_enc->mode_info.frame_rate;
 		vsync_cfg.vsync_source = vsync_source;
 		vsync_cfg.is_dummy = is_dummy;
 
@@ -1955,9 +1867,8 @@
 	struct sde_rsc_cmd_config *rsc_config;
 	int ret, prefill_lines;
 	struct msm_display_info *disp_info;
-	struct msm_mode_info mode_info;
+	struct msm_mode_info *mode_info;
 	int wait_vblank_crtc_id = SDE_RSC_INVALID_CRTC_ID;
-	int rc = 0;
 	u32 qsync_mode = 0;
 
 	if (!drm_enc || !drm_enc->dev) {
@@ -1966,6 +1877,8 @@
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	mode_info = &sde_enc->mode_info;
+
 	crtc = sde_enc->crtc;
 
 	if (!sde_enc->crtc) {
@@ -1980,12 +1893,6 @@
 		return 0;
 	}
 
-	rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to mode info\n");
-		return 0;
-	}
-
 	/**
 	 * only primary command mode panel without Qsync can request CMD state.
 	 * all other panels/displays can request for VID state including
@@ -2006,19 +1913,19 @@
 
 	SDE_EVT32(rsc_state, qsync_mode);
 
-	prefill_lines = mode_info.prefill_lines;
+	prefill_lines = mode_info->prefill_lines;
 
 	/* compare specific items and reconfigure the rsc */
-	if ((rsc_config->fps != mode_info.frame_rate) ||
-	    (rsc_config->vtotal != mode_info.vtotal) ||
+	if ((rsc_config->fps != mode_info->frame_rate) ||
+	    (rsc_config->vtotal != mode_info->vtotal) ||
 	    (rsc_config->prefill_lines != prefill_lines) ||
-	    (rsc_config->jitter_numer != mode_info.jitter_numer) ||
-	    (rsc_config->jitter_denom != mode_info.jitter_denom)) {
-		rsc_config->fps = mode_info.frame_rate;
-		rsc_config->vtotal = mode_info.vtotal;
+	    (rsc_config->jitter_numer != mode_info->jitter_numer) ||
+	    (rsc_config->jitter_denom != mode_info->jitter_denom)) {
+		rsc_config->fps = mode_info->frame_rate;
+		rsc_config->vtotal = mode_info->vtotal;
 		rsc_config->prefill_lines = prefill_lines;
-		rsc_config->jitter_numer = mode_info.jitter_numer;
-		rsc_config->jitter_denom = mode_info.jitter_denom;
+		rsc_config->jitter_numer = mode_info->jitter_numer;
+		rsc_config->jitter_denom = mode_info->jitter_denom;
 		sde_enc->rsc_state_init = false;
 	}
 
@@ -3166,7 +3073,6 @@
 	int i, ret = 0;
 	struct msm_compression_info *comp_info = NULL;
 	struct drm_display_mode *cur_mode = NULL;
-	struct msm_mode_info mode_info;
 	struct msm_display_info *disp_info;
 
 	if (!drm_enc) {
@@ -3181,16 +3087,10 @@
 		return;
 	}
 
-	ret = _sde_encoder_get_mode_info(drm_enc, &mode_info);
-	if (ret) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return;
-	}
-
 	if (drm_enc->crtc && !sde_enc->crtc)
 		sde_enc->crtc = drm_enc->crtc;
 
-	comp_info = &mode_info.comp_info;
+	comp_info = &sde_enc->mode_info.comp_info;
 	cur_mode = &sde_enc->base.crtc->state->adjusted_mode;
 
 	SDE_DEBUG_ENC(sde_enc, "\n");
@@ -3244,7 +3144,7 @@
 
 		phys->comp_type = comp_info->comp_type;
 		phys->comp_ratio = comp_info->comp_ratio;
-		phys->wide_bus_en = mode_info.wide_bus_en;
+		phys->wide_bus_en = sde_enc->mode_info.wide_bus_en;
 		phys->frame_trigger_mode = sde_enc->frame_trigger_mode;
 		if (phys->comp_type == MSM_DISPLAY_COMPRESSION_DSC) {
 			phys->dsc_extra_pclk_cycle_cnt =
@@ -3372,6 +3272,7 @@
 	 * outstanding events and timers have been completed
 	 */
 	sde_enc->crtc = NULL;
+	memset(&sde_enc->mode_info, 0, sizeof(sde_enc->mode_info));
 
 	SDE_DEBUG_ENC(sde_enc, "encoder disabled\n");
 
@@ -4219,11 +4120,10 @@
 static void _sde_encoder_setup_dither(struct sde_encoder_phys *phys)
 {
 	void *dither_cfg;
-	int ret = 0, rc, i = 0;
+	int ret = 0, i = 0;
 	size_t len = 0;
 	enum sde_rm_topology_name topology;
 	struct drm_encoder *drm_enc;
-	struct msm_mode_info mode_info;
 	struct msm_display_dsc_info *dsc = NULL;
 	struct sde_encoder_virt *sde_enc;
 	struct sde_hw_pingpong *hw_pp;
@@ -4239,13 +4139,7 @@
 
 	drm_enc = phys->parent;
 	sde_enc = to_sde_encoder_virt(drm_enc);
-	rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
-		return;
-	}
-
-	dsc = &mode_info.comp_info.dsc_info;
+	dsc = &sde_enc->mode_info.comp_info.dsc_info;
 	/* disable dither for 10 bpp or 10bpc dsc config */
 	if (dsc->bpp == 10 || dsc->bpc == 10) {
 		phys->hw_pp->ops.setup_dither(phys->hw_pp, NULL, 0);
@@ -5596,22 +5490,16 @@
 
 u32 sde_encoder_get_fps(struct drm_encoder *drm_enc)
 {
-	struct msm_mode_info mode_info;
-	int rc;
+	struct sde_encoder_virt *sde_enc;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
 		return 0;
 	}
 
-	rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
-	if (rc) {
-		SDE_ERROR_ENC(to_sde_encoder_virt(drm_enc),
-			"failed to get mode info\n");
-		return 0;
-	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
 
-	return mode_info.frame_rate;
+	return sde_enc->mode_info.frame_rate;
 }
 
 enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 0fdc41b..dc9cc77 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -351,6 +351,7 @@
 	list_for_each_entry(fc, &ctx->fence_list_head, fence_list) {
 		if (trigger_value == fc->base.seqno) {
 			fd = fc->fd;
+			*val = fd;
 			break;
 		}
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 31dcfa8..738ecbb 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -65,7 +65,8 @@
 #define MAX_DOWNSCALE_RATIO		4
 #define SSPP_UNITY_SCALE		1
 
-#define MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DEFAULT	2
+#define MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_NUMERATOR	11
+#define MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DENOMINATOR	5
 #define MAX_DOWNSCALE_RATIO_INLINE_ROT_NRT_DEFAULT	4
 #define MAX_PRE_ROT_HEIGHT_INLINE_ROT_DEFAULT	1088
 
@@ -1143,8 +1144,10 @@
 	if (IS_SDE_INLINE_ROT_REV_100(sde_cfg->true_inline_rot_rev)) {
 		set_bit(SDE_SSPP_TRUE_INLINE_ROT_V1, &sspp->features);
 		sblk->in_rot_format_list = sde_cfg->inline_rot_formats;
-		sblk->in_rot_maxdwnscale_rt =
-			sde_cfg->true_inline_dwnscale_rt;
+		sblk->in_rot_maxdwnscale_rt_num =
+			sde_cfg->true_inline_dwnscale_rt_num;
+		sblk->in_rot_maxdwnscale_rt_denom =
+			sde_cfg->true_inline_dwnscale_rt_denom;
 		sblk->in_rot_maxdwnscale_nrt =
 			sde_cfg->true_inline_dwnscale_nrt;
 		sblk->in_rot_maxheight =
@@ -3778,6 +3781,8 @@
 		sde_cfg->sui_ns_allowed = true;
 		sde_cfg->sui_misr_supported = true;
 		sde_cfg->sui_block_xin_mask = 0x3F71;
+		sde_cfg->has_sui_blendstage = true;
+		sde_cfg->has_qos_fl_nocalc = true;
 		sde_cfg->has_3d_merge_reset = true;
 		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
 		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
@@ -3806,6 +3811,8 @@
 		sde_cfg->sui_misr_supported = true;
 		sde_cfg->has_decimation = true;
 		sde_cfg->sui_block_xin_mask = 0x2EE1;
+		sde_cfg->has_sui_blendstage = true;
+		sde_cfg->has_qos_fl_nocalc = true;
 		sde_cfg->has_3d_merge_reset = true;
 		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
 		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
@@ -3823,6 +3830,8 @@
 		sde_cfg->sui_ns_allowed = true;
 		sde_cfg->sui_misr_supported = true;
 		sde_cfg->sui_block_xin_mask = 0xE71;
+		sde_cfg->has_sui_blendstage = true;
+		sde_cfg->has_qos_fl_nocalc = true;
 		sde_cfg->has_3d_merge_reset = true;
 	} else if (IS_KONA_TARGET(hw_rev)) {
 		sde_cfg->has_cwb_support = true;
@@ -3836,6 +3845,8 @@
 		sde_cfg->sui_ns_allowed = true;
 		sde_cfg->sui_misr_supported = true;
 		sde_cfg->sui_block_xin_mask = 0x3F71;
+		sde_cfg->has_sui_blendstage = true;
+		sde_cfg->has_qos_fl_nocalc = true;
 		sde_cfg->has_3d_merge_reset = true;
 		clear_bit(MDSS_INTR_AD4_0_INTR, sde_cfg->mdss_irqs);
 		clear_bit(MDSS_INTR_AD4_1_INTR, sde_cfg->mdss_irqs);
@@ -3844,8 +3855,10 @@
 		set_bit(SDE_MDP_DHDR_MEMPOOL, &sde_cfg->mdp[0].features);
 		sde_cfg->has_vig_p010 = true;
 		sde_cfg->true_inline_rot_rev = SDE_INLINE_ROT_VERSION_1_0_0;
-		sde_cfg->true_inline_dwnscale_rt =
-			MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DEFAULT;
+		sde_cfg->true_inline_dwnscale_rt_num =
+			MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_NUMERATOR;
+		sde_cfg->true_inline_dwnscale_rt_denom =
+			MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DENOMINATOR;
 		sde_cfg->true_inline_dwnscale_nrt =
 			MAX_DOWNSCALE_RATIO_INLINE_ROT_NRT_DEFAULT;
 		sde_cfg->true_inline_prefill_fudge_lines = 2;
@@ -3875,16 +3888,10 @@
 	if (!sde_cfg)
 		return -EINVAL;
 
-	if (IS_SM8150_TARGET(hw_rev) || IS_SM6150_TARGET(hw_rev) ||
-			IS_SDMMAGPIE_TARGET(hw_rev)) {
+	if (sde_cfg->has_sui_blendstage)
 		sde_cfg->sui_supported_blendstage =
 			sde_cfg->max_mixer_blendstages - SDE_STAGE_0;
 
-		for (i = 0; i < sde_cfg->sspp_count; i++)
-			set_bit(SDE_PERF_SSPP_QOS_FL_NOCALC,
-					&sde_cfg->sspp[i].perf_features);
-	}
-
 	for (i = 0; i < sde_cfg->sspp_count; i++) {
 		if (sde_cfg->sspp[i].sblk) {
 			max_horz_deci = max(max_horz_deci,
@@ -3893,6 +3900,10 @@
 				sde_cfg->sspp[i].sblk->maxvdeciexp);
 		}
 
+		if (sde_cfg->has_qos_fl_nocalc)
+			set_bit(SDE_PERF_SSPP_QOS_FL_NOCALC,
+				&sde_cfg->sspp[i].perf_features);
+
 		/*
 		 * set sec-ui blocked SSPP feature flag based on blocked
 		 * xin-mask if sec-ui-misr feature is enabled;
@@ -3959,8 +3970,10 @@
 			kfree(sde_cfg->vbif[i].qos_tbl[j].priority_lvl);
 	}
 
-	for (i = 0; i < SDE_QOS_LUT_USAGE_MAX; i++)
+	for (i = 0; i < SDE_QOS_LUT_USAGE_MAX; i++) {
+		kfree(sde_cfg->perf.sfe_lut_tbl[i].entries);
 		kfree(sde_cfg->perf.qos_lut_tbl[i].entries);
+	}
 
 	kfree(sde_cfg->dma_formats);
 	kfree(sde_cfg->cursor_formats);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 2f7c781..7d25092c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -593,7 +593,10 @@
  * @format_list: Pointer to list of supported formats
  * @virt_format_list: Pointer to list of supported formats for virtual planes
  * @in_rot_format_list: Pointer to list of supported formats for inline rotation
- * @in_rot_maxdwnscale_rt: max downscale ratio for inline rotation rt clients
+ * @in_rot_maxdwnscale_rt_num: max downscale ratio for inline rotation
+ *                                 rt clients - numerator
+ * @in_rot_maxdwnscale_rt_denom: max downscale ratio for inline rotation
+ *                                 rt clients - denominator
  * @in_rot_maxdwnscale_nrt: max downscale ratio for inline rotation nrt clients
  * @in_rot_maxheight: max pre rotated height for inline rotation
  * @in_rot_prefill_fudge_lines: prefill fudge lines for inline rotation
@@ -630,7 +633,8 @@
 	const struct sde_format_extended *format_list;
 	const struct sde_format_extended *virt_format_list;
 	const struct sde_format_extended *in_rot_format_list;
-	u32 in_rot_maxdwnscale_rt;
+	u32 in_rot_maxdwnscale_rt_num;
+	u32 in_rot_maxdwnscale_rt_denom;
 	u32 in_rot_maxdwnscale_nrt;
 	u32 in_rot_maxheight;
 	u32 in_rot_prefill_fudge_lines;
@@ -1186,7 +1190,10 @@
  * @vbif_qos_nlvl      number of vbif QoS priority level
  * @ts_prefill_rev     prefill traffic shaper feature revision
  * @true_inline_rot_rev	inline rotator feature revision
- * @true_inline_dwnscale_rt    true inline rotator downscale ratio for rt
+ * @true_inline_dwnscale_rt_num    true inline rotator downscale ratio for rt
+ *                                       - numerator
+ * @true_inline_dwnscale_rt_denom    true inline rot downscale ratio for rt
+ *                                       - denominator
  * @true_inline_dwnscale_nrt    true inline rotator downscale ratio for nrt
  * @true_inline_prefill_fudge_lines    true inline rotator prefill fudge lines
  * @true_inline_prefill_lines_nv12    true inline prefill lines for nv12 format
@@ -1197,6 +1204,7 @@
  * @has_qsync	       Supports qsync feature
  * @has_3d_merge_reset Supports 3D merge reset
  * @has_decimation     Supports decimation
+ * @has_qos_fl_nocalc  flag to indicate QoS fill level needs no calculation
  * @sc_cfg: system cache configuration
  * @uidle_cfg		Settings for uidle feature
  * @sui_misr_supported  indicate if secure-ui-misr is supported
@@ -1208,6 +1216,7 @@
  * @sui_ns_allowed      flag to indicate non-secure context banks are allowed
  *                         during secure-ui session
  * @sui_supported_blendstage  secure-ui supported blendstage
+ * @has_sui_blendstage  flag to indicate secure-ui has a blendstage restriction
  * @has_cursor    indicates if hardware cursor is supported
  * @has_vig_p010  indicates if vig pipe supports p010 format
  * @inline_rot_formats	formats supported by the inline rotator feature
@@ -1242,7 +1251,8 @@
 	u32 vbif_qos_nlvl;
 	u32 ts_prefill_rev;
 	u32 true_inline_rot_rev;
-	u32 true_inline_dwnscale_rt;
+	u32 true_inline_dwnscale_rt_num;
+	u32 true_inline_dwnscale_rt_denom;
 	u32 true_inline_dwnscale_nrt;
 	u32 true_inline_prefill_fudge_lines;
 	u32 true_inline_prefill_lines_nv12;
@@ -1253,6 +1263,7 @@
 	bool has_qsync;
 	bool has_3d_merge_reset;
 	bool has_decimation;
+	bool has_qos_fl_nocalc;
 
 	struct sde_sc_cfg sc_cfg;
 
@@ -1263,6 +1274,7 @@
 	u32 sec_sid_mask[MAX_BLOCKS];
 	u32 sui_ns_allowed;
 	u32 sui_supported_blendstage;
+	bool has_sui_blendstage;
 
 	bool has_hdr;
 	bool has_hdr_plus;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index c5d9098..219ee07 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -2499,9 +2499,14 @@
 	}
 
 	crtc_state->active = true;
-	drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
+	ret = drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
+	if (ret)
+		SDE_ERROR("error %d setting the crtc\n", ret);
 
-	drm_atomic_commit(state);
+	ret = drm_atomic_commit(state);
+	if (ret)
+		SDE_ERROR("Error %d doing the atomic commit\n", ret);
+
 end:
 	if (state)
 		drm_atomic_state_put(state);
@@ -2962,7 +2967,7 @@
 
 	node = of_find_node_by_name(parent, node_name);
 	if (!node) {
-		SDE_ERROR("failed to find node %s\n", node_name);
+		SDE_DEBUG("failed to find node %s\n", node_name);
 		return -EINVAL;
 	}
 
@@ -2982,7 +2987,7 @@
 
 	data->num_splash_displays = num_displays;
 
-	pr_info("splash mem num_regions:%d\n", num_regions);
+	SDE_DEBUG("splash mem num_regions:%d\n", num_regions);
 	if (num_displays > num_regions) {
 		share_splash_mem = true;
 		pr_info(":%d displays share same splash buf\n", num_displays);
@@ -3015,7 +3020,7 @@
 			data->splash_display[i].splash = &data->splash_mem[0];
 		}
 
-		pr_info("splash mem for disp:%d add:%lx size:%x\n", (i + 1),
+		SDE_DEBUG("splash mem for disp:%d add:%lx size:%x\n", (i + 1),
 				splash_display->splash->splash_buf_base,
 				splash_display->splash->splash_buf_size);
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 47bfa93..eff1a2d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1645,14 +1645,16 @@
 		 */
 		rotation ^= (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y);
 
-		if (!psde->pipe_sblk->in_rot_maxdwnscale_rt ||
+		if (!psde->pipe_sblk->in_rot_maxdwnscale_rt_num ||
+			!psde->pipe_sblk->in_rot_maxdwnscale_rt_denom ||
 			!psde->pipe_sblk->in_rot_maxdwnscale_nrt ||
 			!psde->pipe_sblk->in_rot_maxheight ||
 			!psde->pipe_sblk->in_rot_format_list ||
 			!(psde->features & BIT(SDE_SSPP_TRUE_INLINE_ROT_V1))) {
 			SDE_ERROR_PLANE(psde,
-				"wrong config rt:%d nrt:%d fmt:%d h:%d 0x%x\n",
-				!psde->pipe_sblk->in_rot_maxdwnscale_rt,
+			    "wrong config rt:%d/%d nrt:%d fmt:%d h:%d 0x%x\n",
+				!psde->pipe_sblk->in_rot_maxdwnscale_rt_num,
+				!psde->pipe_sblk->in_rot_maxdwnscale_rt_denom,
 				!psde->pipe_sblk->in_rot_maxdwnscale_nrt,
 				!psde->pipe_sblk->in_rot_format_list,
 				!psde->pipe_sblk->in_rot_maxheight,
@@ -2312,6 +2314,8 @@
 		uint32_t hor_req_pixels, hor_fetch_pixels;
 		uint32_t vert_req_pixels, vert_fetch_pixels;
 		uint32_t src_w_tmp, src_h_tmp;
+		uint32_t scaler_w, scaler_h;
+		bool rot;
 
 		/* re-use color plane 1's config for plane 2 */
 		if (i == 2)
@@ -2361,20 +2365,27 @@
 		}
 
 		/*
+		 * swap the scaler src width & height for inline-rotation 90
+		 * comparison with Pixel-Extension, as PE is based on
+		 * pre-rotation and QSEED is based on post-rotation
+		 */
+		rot = pstate->rotation & DRM_MODE_ROTATE_90;
+		scaler_w = rot ? pstate->scaler3_cfg.src_height[i]
+				    : pstate->scaler3_cfg.src_width[i];
+		scaler_h = rot ? pstate->scaler3_cfg.src_width[i]
+				    : pstate->scaler3_cfg.src_height[i];
+		/*
 		 * Alpha plane can only be scaled using bilinear or pixel
 		 * repeat/drop, src_width and src_height are only specified
 		 * for Y and UV plane
 		 */
-		if (i != 3 &&
-			(hor_req_pixels != pstate->scaler3_cfg.src_width[i] ||
-			vert_req_pixels != pstate->scaler3_cfg.src_height[i])) {
+		if (i != 3 && (hor_req_pixels != scaler_w ||
+					vert_req_pixels != scaler_h)) {
 			SDE_ERROR_PLANE(psde,
-				"roi[%d] %d/%d, scaler src %dx%d, src %dx%d\n",
+			    "roi[%d] roi:%dx%d scaler:%dx%d src:%dx%d rot:%d\n",
 				i, pstate->pixel_ext.roi_w[i],
 				pstate->pixel_ext.roi_h[i],
-				pstate->scaler3_cfg.src_width[i],
-				pstate->scaler3_cfg.src_height[i],
-				src_w, src_h);
+				scaler_w, scaler_h, src_w, src_h, rot);
 			return -EINVAL;
 		}
 
@@ -2410,7 +2421,8 @@
 	int ret = 0;
 	uint32_t deci_w, deci_h, src_deci_w, src_deci_h;
 	uint32_t scaler_src_w, scaler_src_h;
-	uint32_t max_upscale, max_downscale, max_linewidth;
+	uint32_t max_downscale_num, max_downscale_denom;
+	uint32_t max_upscale, max_linewidth;
 	bool inline_rotation, rt_client;
 	struct drm_crtc *crtc;
 
@@ -2439,14 +2451,20 @@
 	else
 		rt_client = true;
 
+	max_downscale_denom = 1;
 	/* inline rotation RT clients have a different max downscaling limit */
 	if (inline_rotation) {
-		if (rt_client)
-			max_downscale = psde->pipe_sblk->in_rot_maxdwnscale_rt;
-		else
-			max_downscale = psde->pipe_sblk->in_rot_maxdwnscale_nrt;
+		if (rt_client) {
+			max_downscale_num =
+				psde->pipe_sblk->in_rot_maxdwnscale_rt_num;
+			max_downscale_denom =
+				psde->pipe_sblk->in_rot_maxdwnscale_rt_denom;
+		} else {
+			max_downscale_num =
+				psde->pipe_sblk->in_rot_maxdwnscale_nrt;
+		}
 	} else {
-		max_downscale = psde->pipe_sblk->maxdwnscale;
+		max_downscale_num = psde->pipe_sblk->maxdwnscale;
 	}
 
 	/* decimation validation */
@@ -2479,8 +2497,10 @@
 	/* check max scaler capability */
 	else if (((scaler_src_w * max_upscale) < dst->w) ||
 		((scaler_src_h * max_upscale) < dst->h) ||
-		((dst->w * max_downscale) < scaler_src_w) ||
-		((dst->h * max_downscale) < scaler_src_h)) {
+		(((dst->w * max_downscale_num) / max_downscale_denom)
+			< scaler_src_w) ||
+		(((dst->h * max_downscale_num) / max_downscale_denom)
+			< scaler_src_h)) {
 		SDE_ERROR_PLANE(psde,
 			"too much scaling requested %ux%u->%ux%u rot:%d\n",
 			scaler_src_w, scaler_src_h, dst->w, dst->h,
@@ -3545,8 +3565,16 @@
 		const struct sde_format_extended *inline_rot_fmt_list;
 
 		sde_kms_info_add_keyint(info, "true_inline_rot_rev", 1);
-		sde_kms_info_add_keyint(info, "true_inline_dwnscale_rt",
-			psde->pipe_sblk->in_rot_maxdwnscale_rt);
+		sde_kms_info_add_keyint(info,
+			"true_inline_dwnscale_rt",
+			(int) (psde->pipe_sblk->in_rot_maxdwnscale_rt_num /
+				psde->pipe_sblk->in_rot_maxdwnscale_rt_denom));
+		sde_kms_info_add_keyint(info,
+				"true_inline_dwnscale_rt_numerator",
+				psde->pipe_sblk->in_rot_maxdwnscale_rt_num);
+		sde_kms_info_add_keyint(info,
+				"true_inline_dwnscale_rt_denominator",
+				psde->pipe_sblk->in_rot_maxdwnscale_rt_denom);
 		sde_kms_info_add_keyint(info, "true_inline_dwnscale_nrt",
 			psde->pipe_sblk->in_rot_maxdwnscale_nrt);
 		sde_kms_info_add_keyint(info, "true_inline_max_height",
@@ -4276,10 +4304,14 @@
 				&psde->debugfs_default_scale);
 
 	if (cfg->features & BIT(SDE_SSPP_TRUE_INLINE_ROT_V1)) {
-		debugfs_create_u32("in_rot_max_downscale_rt",
+		debugfs_create_u32("in_rot_max_downscale_rt_num",
 			0600,
 			psde->debugfs_root,
-			(u32 *) &psde->pipe_sblk->in_rot_maxdwnscale_rt);
+			(u32 *) &psde->pipe_sblk->in_rot_maxdwnscale_rt_num);
+		debugfs_create_u32("in_rot_max_downscale_rt_denom",
+			0600,
+			psde->debugfs_root,
+			(u32 *) &psde->pipe_sblk->in_rot_maxdwnscale_rt_denom);
 		debugfs_create_u32("in_rot_max_downscale_nrt",
 			0600,
 			psde->debugfs_root,
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 9a64dd9..ec019fb 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -4413,7 +4413,7 @@
 
 		snprintf(debug_name, sizeof(debug_name), "%s_reg",
 				blk_base->name);
-		debugfs_create_file(debug_name, 0600, debugfs_root, blk_base,
+		debugfs_create_file(debug_name, 0400, debugfs_root, blk_base,
 				&sde_reg_fops);
 	}
 
diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h
index 03178ca..5c1dc4a 100644
--- a/drivers/gpu/drm/msm/sde_hdcp.h
+++ b/drivers/gpu/drm/msm/sde_hdcp.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2012, 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __SDE_HDCP_H__
@@ -13,12 +13,15 @@
 #include <linux/debugfs.h>
 #include <linux/of_device.h>
 #include <linux/i2c.h>
+#include <linux/list.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
 #include <linux/hdcp_qseecom.h>
 #include "sde_kms.h"
 
+#define MAX_STREAM_COUNT 2
+
 enum sde_hdcp_client_id {
 	HDCP_CLIENT_HDMI,
 	HDCP_CLIENT_DP,
@@ -38,6 +41,18 @@
 	HDCP_VERSION_MAX = BIT(2),
 };
 
+struct stream_info {
+	u8 stream_id;
+	u8 virtual_channel;
+};
+
+struct sde_hdcp_stream {
+	struct list_head list;
+	u8 stream_id;
+	u8 virtual_channel;
+	u32 stream_handle;
+};
+
 struct sde_hdcp_init_data {
 	struct device *msm_hdcp_dev;
 	struct dss_io_data *core_io;
@@ -67,7 +82,13 @@
 	bool (*feature_supported)(void *input);
 	void (*force_encryption)(void *input, bool enable);
 	bool (*sink_support)(void *input);
+	int (*set_mode)(void *input, bool mst_enabled);
+	int (*on)(void *input);
 	void (*off)(void *hdcp_ctrl);
+	int (*register_streams)(void *input, u8 num_streams,
+			struct stream_info *streams);
+	int (*deregister_streams)(void *input, u8 num_streams,
+			struct stream_info *streams);
 };
 
 static inline const char *sde_hdcp_state_name(enum sde_hdcp_state hdcp_state)
diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.c b/drivers/gpu/drm/msm/sde_hdcp_2x.c
index 54dfc8f..f578e09 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_2x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_2x.c
@@ -64,6 +64,10 @@
 	atomic_t hdcp_off;
 	enum sde_hdcp_2x_device_type device_type;
 	u8 min_enc_level;
+	struct list_head stream_handles;
+	u8 stream_count;
+	struct stream_info *streams;
+	u8 num_streams;
 
 	struct task_struct *thread;
 	struct completion response_completion;
@@ -315,6 +319,8 @@
 
 static void sde_hdcp_2x_clean(struct sde_hdcp_2x_ctrl *hdcp)
 {
+	struct list_head *element;
+	struct sde_hdcp_stream *stream_entry;
 	struct hdcp_transport_wakeup_data cdata = {HDCP_TRANSPORT_CMD_INVALID};
 
 	hdcp->authenticated = false;
@@ -322,10 +328,20 @@
 	cdata.context = hdcp->client_data;
 	cdata.cmd = HDCP_TRANSPORT_CMD_STATUS_FAILED;
 
-	if (!atomic_read(&hdcp->hdcp_off))
-		sde_hdcp_2x_wakeup_client(hdcp, &cdata);
+	while (!list_empty(&hdcp->stream_handles)) {
+		element = hdcp->stream_handles.next;
+		list_del(element);
 
-	atomic_set(&hdcp->hdcp_off, 1);
+		stream_entry = list_entry(element, struct sde_hdcp_stream,
+			list);
+		hdcp2_close_stream(hdcp->hdcp2_ctx,
+			stream_entry->stream_handle);
+		kzfree(stream_entry);
+		hdcp->stream_count--;
+	}
+
+	if (!atomic_xchg(&hdcp->hdcp_off, 1))
+		sde_hdcp_2x_wakeup_client(hdcp, &cdata);
 
 	hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_STOP, &hdcp->app_data);
 }
@@ -333,20 +349,17 @@
 static u8 sde_hdcp_2x_stream_type(u8 min_enc_level)
 {
 	u8 stream_type = 0;
-	u8 const hdcp_min_enc_level_0 = 0, hdcp_min_enc_level_1 = 1,
-	   hdcp_min_enc_level_2 = 2;
-	u8 const stream_type_0 = 0, stream_type_1 = 1;
 
 	switch (min_enc_level) {
-	case hdcp_min_enc_level_0:
-	case hdcp_min_enc_level_1:
-		stream_type = stream_type_0;
+	case 0:
+	case 1:
+		stream_type = 0;
 		break;
-	case hdcp_min_enc_level_2:
-		stream_type = stream_type_1;
+	case 2:
+		stream_type = 1;
 		break;
 	default:
-		stream_type = stream_type_0;
+		stream_type = 0;
 		break;
 	}
 
@@ -480,19 +493,26 @@
 static void sde_hdcp_2x_init(struct sde_hdcp_2x_ctrl *hdcp)
 {
 	int rc;
-
 	rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_START, &hdcp->app_data);
 	if (rc)
-		goto exit;
+		sde_hdcp_2x_clean(hdcp);
+}
 
-	pr_debug("[tz]: %s\n", sde_hdcp_2x_message_name(
-		hdcp->app_data.response.data[0]));
+static void sde_hdcp_2x_start_auth(struct sde_hdcp_2x_ctrl *hdcp)
+{
+	int rc;
+
+	rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_START_AUTH,
+		&hdcp->app_data);
+	if (rc) {
+		sde_hdcp_2x_clean(hdcp);
+		return;
+	}
+
+	pr_debug("message received from TZ: %s\n",
+		 sde_hdcp_2x_message_name(hdcp->app_data.response.data[0]));
 
 	sde_hdcp_2x_send_message(hdcp);
-
-	return;
-exit:
-	sde_hdcp_2x_clean(hdcp);
 }
 
 static void sde_hdcp_2x_timeout(struct sde_hdcp_2x_ctrl *hdcp)
@@ -542,7 +562,8 @@
 		goto exit;
 	}
 
-	if (hdcp->device_type == HDCP_TXMTR_DP) {
+	if (hdcp->device_type == HDCP_TXMTR_DP ||
+			hdcp->device_type == HDCP_TXMTR_DP_MST) {
 		msg[0] = hdcp->last_msg;
 		message_id_bytes = 1;
 	}
@@ -628,6 +649,147 @@
 		sde_hdcp_2x_clean(hdcp);
 }
 
+static struct list_head *sde_hdcp_2x_stream_present(
+		struct sde_hdcp_2x_ctrl *hdcp, u8 stream_id, u8 virtual_channel)
+{
+	struct sde_hdcp_stream *stream_entry;
+	struct list_head *entry;
+	bool present = false;
+
+	list_for_each(entry, &hdcp->stream_handles) {
+		stream_entry = list_entry(entry,
+			struct sde_hdcp_stream, list);
+		if (stream_entry->virtual_channel == virtual_channel &&
+				stream_entry->stream_id == stream_id) {
+			present = true;
+			break;
+		}
+	}
+
+	if (!present)
+		entry = NULL;
+	return entry;
+}
+
+static void sde_hdcp_2x_open_stream(struct sde_hdcp_2x_ctrl *hdcp)
+{
+	int rc;
+	size_t iterations, i;
+	u8 stream_id;
+	u8 virtual_channel;
+	u32 stream_handle = 0;
+	bool query_streams = false;
+
+	if (!hdcp->streams) {
+		pr_err("Array of streams to register is NULL\n");
+		return;
+	}
+
+	iterations = min(hdcp->num_streams, (u8)(MAX_STREAM_COUNT));
+
+	for (i  = 0; i < iterations; i++) {
+		if (hdcp->stream_count == MAX_STREAM_COUNT) {
+			pr_debug("Registered the maximum amount of streams\n");
+			break;
+		}
+
+		stream_id = hdcp->streams[i].stream_id;
+		virtual_channel = hdcp->streams[i].virtual_channel;
+
+		pr_debug("Opening stream %d, virtual channel %d\n",
+			stream_id, virtual_channel);
+
+		if (sde_hdcp_2x_stream_present(hdcp, stream_id,
+				virtual_channel)) {
+			pr_debug("Stream %d, virtual channel %d already open\n",
+				stream_id, virtual_channel);
+			continue;
+		}
+
+		rc = hdcp2_open_stream(hdcp->hdcp2_ctx, virtual_channel,
+				stream_id, &stream_handle);
+		if (rc) {
+			pr_err("Unable to open stream %d, virtual channel %d\n",
+				stream_id, virtual_channel);
+		} else {
+			struct sde_hdcp_stream *stream =
+				kzalloc(sizeof(struct sde_hdcp_stream),
+					GFP_KERNEL);
+			if (!stream)
+				break;
+
+			INIT_LIST_HEAD(&stream->list);
+			stream->stream_handle = stream_handle;
+			stream->stream_id = stream_id;
+			stream->virtual_channel = virtual_channel;
+
+			list_add(&stream->list, &hdcp->stream_handles);
+			hdcp->stream_count++;
+
+			query_streams = true;
+		}
+	}
+
+	if (query_streams && hdcp->authenticated)
+		sde_hdcp_2x_query_stream(hdcp);
+}
+
+static void sde_hdcp_2x_close_stream(struct sde_hdcp_2x_ctrl *hdcp)
+{
+	int rc;
+	size_t iterations, i;
+	u8 stream_id;
+	u8 virtual_channel;
+	struct list_head *entry;
+	struct sde_hdcp_stream *stream_entry;
+	bool query_streams = false;
+
+	if (!hdcp->streams) {
+		pr_err("Array of streams to register is NULL\n");
+		return;
+	}
+
+	iterations = min(hdcp->num_streams, (u8)(MAX_STREAM_COUNT));
+
+	for (i = 0; i < iterations; i++) {
+		if (hdcp->stream_count == 0) {
+			pr_debug("No streams are currently registered\n");
+			return;
+		}
+
+		stream_id = hdcp->streams[i].stream_id;
+		virtual_channel = hdcp->streams[i].virtual_channel;
+
+		pr_debug("Closing stream %d, virtual channel %d\n",
+			stream_id, virtual_channel);
+
+		entry = sde_hdcp_2x_stream_present(hdcp, stream_id,
+			virtual_channel);
+
+		if (!entry) {
+			pr_err("Unable to find stream %d, virtual channel %d\n"
+				, stream_id, virtual_channel);
+			continue;
+		}
+
+		stream_entry = list_entry(entry, struct sde_hdcp_stream,
+			list);
+
+		rc = hdcp2_close_stream(hdcp->hdcp2_ctx,
+			stream_entry->stream_handle);
+		if (rc)
+			pr_err("Unable to close stream %d, virtual channel %d\n"
+				, stream_id, virtual_channel);
+		hdcp->stream_count--;
+		list_del(entry);
+		kzfree(stream_entry);
+		query_streams = true;
+	}
+
+	if (query_streams && hdcp->authenticated)
+		sde_hdcp_2x_query_stream(hdcp);
+}
+
 /** sde_hdcp_2x_wakeup() - wakeup the module to execute a requested command
  * @data: data required for executing corresponding command.
  *
@@ -651,6 +813,8 @@
 	hdcp->timeout_left = data->timeout;
 	hdcp->total_message_length = data->total_message_length;
 	hdcp->min_enc_level = data->min_enc_level;
+	hdcp->streams = data->streams;
+	hdcp->num_streams = data->num_streams;
 
 	if (!completion_done(&hdcp->response_completion))
 		complete_all(&hdcp->response_completion);
@@ -712,6 +876,9 @@
 		case HDCP_2X_CMD_STOP:
 			sde_hdcp_2x_clean(hdcp);
 			break;
+		case HDCP_2X_CMD_START_AUTH:
+			sde_hdcp_2x_start_auth(hdcp);
+			break;
 		case HDCP_2X_CMD_MSG_SEND_SUCCESS:
 			sde_hdcp_2x_msg_sent(hdcp);
 			break;
@@ -736,6 +903,12 @@
 			}
 			sde_hdcp_2x_query_stream(hdcp);
 			break;
+		case HDCP_2X_CMD_OPEN_STREAMS:
+			sde_hdcp_2x_open_stream(hdcp);
+			break;
+		case HDCP_2X_CMD_CLOSE_STREAMS:
+			sde_hdcp_2x_close_stream(hdcp);
+			break;
 		default:
 			break;
 		}
@@ -780,16 +953,14 @@
 		goto unlock;
 	}
 
+	INIT_LIST_HEAD(&hdcp->stream_handles);
 	hdcp->client_data = data->client_data;
 	hdcp->client_ops = data->client_ops;
-	hdcp->device_type = data->device_type;
-
-	hdcp->hdcp2_ctx = hdcp2_init(hdcp->device_type);
 
 	INIT_KFIFO(hdcp->cmd_q);
 
 	init_waitqueue_head(&hdcp->wait_q);
-	atomic_set(&hdcp->hdcp_off, 0);
+	atomic_set(&hdcp->hdcp_off, 1);
 
 	init_completion(&hdcp->response_completion);
 
@@ -814,6 +985,40 @@
 	return rc;
 }
 
+int sde_hdcp_2x_enable(void *data, enum sde_hdcp_2x_device_type device_type)
+{
+	int rc =  0;
+	struct sde_hdcp_2x_ctrl *hdcp = data;
+
+	if (!hdcp)
+		return  -EINVAL;
+
+	if (hdcp->hdcp2_ctx) {
+		pr_debug("HDCP library context already acquired\n");
+		return 0;
+	}
+
+	hdcp->device_type = device_type;
+	hdcp->hdcp2_ctx = hdcp2_init(hdcp->device_type);
+	if (!hdcp->hdcp2_ctx) {
+		pr_err("Unable to acquire HDCP library handle\n");
+		return -ENOMEM;
+	}
+
+	return rc;
+}
+
+void sde_hdcp_2x_disable(void *data)
+{
+	struct sde_hdcp_2x_ctrl *hdcp = data;
+
+	if (!hdcp->hdcp2_ctx)
+		return;
+
+	hdcp2_deinit(hdcp->hdcp2_ctx);
+	hdcp->hdcp2_ctx = NULL;
+}
+
 void sde_hdcp_2x_deregister(void *data)
 {
 	struct sde_hdcp_2x_ctrl *hdcp = data;
@@ -821,7 +1026,7 @@
 	if (!hdcp)
 		return;
 
+	sde_hdcp_2x_disable(data);
 	kthread_stop(hdcp->thread);
-	hdcp2_deinit(hdcp->hdcp2_ctx);
 	kzfree(hdcp);
 }
diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.h b/drivers/gpu/drm/msm/sde_hdcp_2x.h
index 47247e4..cfcd7ce 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_2x.h
+++ b/drivers/gpu/drm/msm/sde_hdcp_2x.h
@@ -15,8 +15,9 @@
 /**
  * enum sde_hdcp_2x_wakeup_cmd - commands for interacting with HDCP driver
  * @HDCP_2X_CMD_INVALID:           initialization value
- * @HDCP_2X_CMD_START:             start authentication
- * @HDCP_2X_CMD_STOP:              stop authentication
+ * @HDCP_2X_CMD_START:             start HDCP driver
+ * @HDCP_2X_CMD_START_AUTH:        start authentication
+ * @HDCP_2X_CMD_STOP:              stop HDCP driver
  * @HDCP_2X_CMD_MSG_SEND_SUCCESS:  sending message to sink succeeded
  * @HDCP_2X_CMD_MSG_SEND_FAILED:   sending message to sink failed
  * @HDCP_2X_CMD_MSG_SEND_TIMEOUT:  sending message to sink timed out
@@ -26,10 +27,13 @@
  * @HDCP_2X_CMD_QUERY_STREAM_TYPE: start content stream processing
  * @HDCP_2X_CMD_LINK_FAILED:       link failure notification
  * @HDCP_2X_CMD_MIN_ENC_LEVEL:     trigger minimum encryption level change
+ * @HDCP_2X_CMD_OPEN_STREAMS:       open a virtual channel
+ * @HDCP_2X_CMD_CLOSE_STREAMS:      close a virtual channel
  */
 enum sde_hdcp_2x_wakeup_cmd {
 	HDCP_2X_CMD_INVALID,
 	HDCP_2X_CMD_START,
+	HDCP_2X_CMD_START_AUTH,
 	HDCP_2X_CMD_STOP,
 	HDCP_2X_CMD_MSG_SEND_SUCCESS,
 	HDCP_2X_CMD_MSG_SEND_FAILED,
@@ -40,6 +44,8 @@
 	HDCP_2X_CMD_QUERY_STREAM_TYPE,
 	HDCP_2X_CMD_LINK_FAILED,
 	HDCP_2X_CMD_MIN_ENC_LEVEL,
+	HDCP_2X_CMD_OPEN_STREAMS,
+	HDCP_2X_CMD_CLOSE_STREAMS,
 };
 
 /**
@@ -66,16 +72,19 @@
 
 enum sde_hdcp_2x_device_type {
 	HDCP_TXMTR_HDMI = 0x8001,
-	HDCP_TXMTR_DP = 0x8002
+	HDCP_TXMTR_DP = 0x8002,
+	HDCP_TXMTR_DP_MST = 0x8003
 };
 
 /**
  * struct sde_hdcp_2x_lib_wakeup_data - command and data send to HDCP driver
- * @cmd:       command type
- * @context:   void pointer to the HDCP driver instance
- * @buf:       message received from the sink
- * @buf_len:   length of message received from the sink
- * @timeout:   time out value for timed transactions
+ * @cmd:                       command type
+ * @context:                   void pointer to the HDCP driver instance
+ * @buf:                       message received from the sink
+ * @buf_len:                   length of message received from the sink
+ * @timeout:                   time out value for timed transactions
+ * @streams:                   list indicating which streams need adjustment
+ * @num_streams:               number of entries in streams
  */
 struct sde_hdcp_2x_wakeup_data {
 	enum sde_hdcp_2x_wakeup_cmd cmd;
@@ -83,6 +92,8 @@
 	uint32_t total_message_length;
 	uint32_t timeout;
 	u8 min_enc_level;
+	struct stream_info *streams;
+	u8 num_streams;
 };
 
 /**
@@ -151,6 +162,10 @@
 		return TO_STR(HDCP_2X_CMD_MSG_RECV_TIMEOUT);
 	case HDCP_2X_CMD_QUERY_STREAM_TYPE:
 		return TO_STR(HDCP_2X_CMD_QUERY_STREAM_TYPE);
+	case HDCP_2X_CMD_OPEN_STREAMS:
+		return TO_STR(HDCP_2X_CMD_OPEN_STREAMS);
+	case HDCP_2X_CMD_CLOSE_STREAMS:
+		return TO_STR(HDCP_2X_CMD_CLOSE_STREAMS);
 	default:
 		return "UNKNOWN";
 	}
@@ -190,12 +205,13 @@
 struct sde_hdcp_2x_register_data {
 	struct hdcp_transport_ops *client_ops;
 	struct sde_hdcp_2x_ops *ops;
-	enum sde_hdcp_2x_device_type device_type;
 	void *client_data;
 	void **hdcp_data;
 };
 
 /* functions for the HDCP 2.2 state machine module */
 int sde_hdcp_2x_register(struct sde_hdcp_2x_register_data *data);
+int sde_hdcp_2x_enable(void *data, enum sde_hdcp_2x_device_type device_type);
+void sde_hdcp_2x_disable(void *data);
 void sde_hdcp_2x_deregister(void *data);
 #endif
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index cada0fb..d14441c 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _SDE_POWER_HANDLE_H_
@@ -14,8 +14,8 @@
 #define SDE_POWER_HANDLE_ENABLE_NRT_BUS_IB_QUOTA	0
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
 
-#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA	1800000000
-#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA	1800000000
+#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA	3000000000
+#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA	3000000000
 
 #include <linux/sde_io_util.h>
 #include <soc/qcom/cx_ipeak.h>
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 725d34f..cf218c6 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -94,7 +94,7 @@
 		pr_err("invalid rsc index\n");
 		return ERR_PTR(-EINVAL);
 	} else if (!rsc_prv_list[rsc_index]) {
-		pr_err("rsc not probed yet or not available\n");
+		pr_debug("rsc not probed yet or not available\n");
 		return NULL;
 	}
 
@@ -250,7 +250,7 @@
 		pr_err("invalid rsc index:%d\n", rsc_index);
 		return false;
 	} else if (!rsc_prv_list[rsc_index]) {
-		pr_err("rsc idx:%d not probed yet or not available\n",
+		pr_debug("rsc idx:%d not probed yet or not available\n",
 								rsc_index);
 		return false;
 	}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index d587779..a97294a 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5676,7 +5676,7 @@
 	u16 data_offset, size;
 	u8 frev, crev;
 	struct ci_power_info *pi;
-	enum pci_bus_speed speed_cap;
+	enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
 	struct pci_dev *root = rdev->pdev->bus->self;
 	int ret;
 
@@ -5685,7 +5685,8 @@
 		return -ENOMEM;
 	rdev->pm.dpm.priv = pi;
 
-	speed_cap = pcie_get_speed_cap(root);
+	if (!pci_is_root_bus(rdev->pdev->bus))
+		speed_cap = pcie_get_speed_cap(root);
 	if (speed_cap == PCI_SPEED_UNKNOWN) {
 		pi->sys_pcie_mask = 0;
 	} else {
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 8fb60b3..0a785ef 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6899,7 +6899,7 @@
 	struct ni_power_info *ni_pi;
 	struct si_power_info *si_pi;
 	struct atom_clock_dividers dividers;
-	enum pci_bus_speed speed_cap;
+	enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
 	struct pci_dev *root = rdev->pdev->bus->self;
 	int ret;
 
@@ -6911,7 +6911,8 @@
 	eg_pi = &ni_pi->eg;
 	pi = &eg_pi->rv7xx;
 
-	speed_cap = pcie_get_speed_cap(root);
+	if (!pci_is_root_bus(rdev->pdev->bus))
+		speed_cap = pcie_get_speed_cap(root);
 	if (speed_cap == PCI_SPEED_UNKNOWN) {
 		si_pi->sys_pcie_mask = 0;
 	} else {
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 061d2e0..416da53 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -92,6 +92,8 @@
 	val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
 	val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
 	writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
+
+	clk_disable_unprepare(hdmi->tmds_clk);
 }
 
 static void sun4i_hdmi_enable(struct drm_encoder *encoder)
@@ -102,6 +104,8 @@
 
 	DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
 
+	clk_prepare_enable(hdmi->tmds_clk);
+
 	sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
 	val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
 	val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 3fb084f..8c31c9a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -672,6 +672,7 @@
 			return PTR_ERR(tcon->sclk0);
 		}
 	}
+	clk_prepare_enable(tcon->sclk0);
 
 	if (tcon->quirks->has_channel_1) {
 		tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
@@ -686,6 +687,7 @@
 
 static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
 {
+	clk_disable_unprepare(tcon->sclk0);
 	clk_disable_unprepare(tcon->clk);
 }
 
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 8819898..3ddb7f2 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -967,6 +967,7 @@
 #define A6XX_GMU_RPMH_HYST_CTRL			0x1F8E9
 #define A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE    0x1F8EC
 #define A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG      0x1F900
+#define A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP     0x1F901
 #define A6XX_GMU_BOOT_KMD_LM_HANDSHAKE		0x1F9F0
 #define A6XX_GMU_LLM_GLM_SLEEP_CTRL		0x1F957
 #define A6XX_GMU_LLM_GLM_SLEEP_STATUS		0x1F958
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index c2f7a04..9cc556a 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -452,7 +452,8 @@
 		.major = 5,
 		.minor = 0,
 		.patchid = 0,
-		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU,
+		.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
+			ADRENO_IOCOHERENT,
 		.sqefw_name = "a650_sqe.fw",
 		.zap_name = "a650_zap",
 		.gpudev = &adreno_a6xx_gpudev,
diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c
index 5f308c3..2dc05a2 100644
--- a/drivers/gpu/msm/adreno_a6xx_gmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_gmu.c
@@ -325,6 +325,10 @@
 
 	kgsl_regwrite(device, A6XX_GMU_CX_GMU_WFI_CONFIG, 0x0);
 
+	/* Set the log wptr index */
+	gmu_core_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP,
+			gmu->log_wptr_retention);
+
 	/* Bring GMU out of reset */
 	gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 0);
 	if (timed_poll_check(device,
@@ -423,6 +427,9 @@
 	/* Make sure M3 is in reset before going on */
 	wmb();
 
+	gmu_core_regread(device, A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP,
+			&gmu->log_wptr_retention);
+
 	/* RSC sleep sequence is different on v1 */
 	if (adreno_is_a630v1(adreno_dev))
 		gmu_core_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 +
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index b9c24ac..345a2b5 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
  */
 #include <linux/module.h>
 #include <linux/fb.h>
@@ -26,6 +26,8 @@
 #include <linux/ion.h>
 #include <asm/cacheflush.h>
 #include <uapi/linux/sched/types.h>
+#include <linux/of_fdt.h>
+#include <linux/msm-bus.h>
 
 #include "kgsl.h"
 #include "kgsl_debugfs.h"
@@ -1354,6 +1356,33 @@
 	spin_unlock(&entry->priv->mem_lock);
 }
 
+struct msm_bus_scale_pdata *kgsl_get_bus_scale_table(struct kgsl_device *device)
+{
+	struct device_node *child = NULL, *parent;
+	char str[24];
+
+	parent = device->pdev->dev.of_node;
+
+	snprintf(str, sizeof(str), "qcom,gpu-bus-table-ddr%d",
+		of_fdt_get_ddrtype());
+
+	child = of_find_compatible_node(parent, NULL, str);
+
+	/* Go with the first bus table node */
+	if (child == NULL)
+		child = of_find_compatible_node(parent, NULL,
+			"qcom,gpu-bus-table");
+
+	if (child) {
+		struct msm_bus_scale_pdata *data = msm_bus_pdata_from_node(
+					device->pdev, child);
+		of_node_put(child);
+		return data;
+	}
+
+	return msm_bus_cl_get_pdata(device->pdev);
+}
+
 /**
  * kgsl_mem_entry_set_pend() - Set the pending free flag of a memory entry
  * @entry - The memory entry
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 4afd45b..d6784b1 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef __KGSL_DEVICE_H
 #define __KGSL_DEVICE_H
@@ -915,6 +915,21 @@
 	void *priv);
 
 /**
+ * kgsl_get_bus_scale_table() - Get the bus scaling table from devicetree
+ * @device: kgsl device handle
+ *
+ * This function will try to find the correct bus table data from the device
+ * tree based on the the underlying ddr type. If no matching child is found,
+ * it will fallback to the first child node containing the bus scaling data.
+ * If no child is found, it will pass the current device node, hoping that
+ * bus scaling data is provided as properties of the current device node.
+ *
+ * Return: Pointer to the structure containing the parsed bus scaling data
+ */
+struct msm_bus_scale_pdata *kgsl_get_bus_scale_table(
+	struct kgsl_device *device);
+
+/**
  * struct kgsl_pwr_limit - limit structure for each client
  * @node: Local list node for the limits list
  * @level: requested power level
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 37e6f5f..19bb72c 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -22,9 +22,6 @@
 #undef MODULE_PARAM_PREFIX
 #define MODULE_PARAM_PREFIX "kgsl."
 
-#define GMU_CONTEXT_USER		0
-#define GMU_CONTEXT_KERNEL		1
-
 #define GMU_CM3_CFG_NONMASKINTR_SHIFT    9
 
 struct gmu_iommu_context {
@@ -363,11 +360,11 @@
 }
 
 /*
- * gmu_kmem_close() - free all kernel memory allocated for GMU and detach GMU
+ * gmu_memory_close() - free all memory allocated for GMU and detach GMU
  * from IOMMU context banks.
  * @gmu: Pointer to GMU device
  */
-static void gmu_kmem_close(struct gmu_device *gmu)
+static void gmu_memory_close(struct gmu_device *gmu)
 {
 	int i;
 	struct gmu_memdesc *md;
@@ -395,19 +392,14 @@
 		clear_bit(i, &gmu->kmem_bitmap);
 	}
 
-	/* Detach the device from SMMU context bank */
-	iommu_detach_device(ctx->domain, ctx->dev);
+	for (i = 0; i < ARRAY_SIZE(gmu_ctx); i++) {
+		ctx = &gmu_ctx[i];
 
-	/* free kernel mem context */
-	iommu_domain_free(ctx->domain);
-}
-
-static void gmu_memory_close(struct gmu_device *gmu)
-{
-	gmu_kmem_close(gmu);
-	/* Free user memory context */
-	iommu_domain_free(gmu_ctx[GMU_CONTEXT_USER].domain);
-
+		if (ctx->domain) {
+			iommu_detach_device(ctx->domain, ctx->dev);
+			iommu_domain_free(ctx->domain);
+		}
+	}
 }
 
 static enum gmu_mem_type gmu_get_blk_memtype(struct gmu_device *gmu,
@@ -459,38 +451,31 @@
 {
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	int ret;
 
 	/* Allocates & maps memory for HFI */
-	gmu->hfi_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
-			HFIMEM_SIZE, (IOMMU_READ | IOMMU_WRITE));
-	if (IS_ERR(gmu->hfi_mem)) {
-		ret = PTR_ERR(gmu->hfi_mem);
-		goto err_ret;
-	}
+	if (IS_ERR_OR_NULL(gmu->hfi_mem))
+		gmu->hfi_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
+				HFIMEM_SIZE, (IOMMU_READ | IOMMU_WRITE));
+	if (IS_ERR(gmu->hfi_mem))
+		return PTR_ERR(gmu->hfi_mem);
 
 	/* Allocates & maps GMU crash dump memory */
 	if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) {
-		gmu->dump_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
-				DUMPMEM_SIZE, (IOMMU_READ | IOMMU_WRITE));
-		if (IS_ERR(gmu->dump_mem)) {
-			ret = PTR_ERR(gmu->dump_mem);
-			goto err_ret;
-		}
+		if (IS_ERR_OR_NULL(gmu->dump_mem))
+			gmu->dump_mem = allocate_gmu_kmem(gmu,
+					GMU_NONCACHED_KERNEL, 0,
+					DUMPMEM_SIZE,
+					(IOMMU_READ | IOMMU_WRITE));
+		if (IS_ERR(gmu->dump_mem))
+			return PTR_ERR(gmu->dump_mem);
 	}
 
 	/* GMU master log */
-	gmu->gmu_log = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
-			LOGMEM_SIZE, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
-	if (IS_ERR(gmu->gmu_log)) {
-		ret = PTR_ERR(gmu->gmu_log);
-		goto err_ret;
-	}
-
-	return 0;
-err_ret:
-	gmu_memory_close(gmu);
-	return ret;
+	if (IS_ERR_OR_NULL(gmu->gmu_log))
+		gmu->gmu_log = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
+				LOGMEM_SIZE,
+				(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
+	return PTR_ERR_OR_ZERO(gmu->gmu_log);
 }
 
 /*
@@ -1126,9 +1111,9 @@
 
 static int gmu_gpu_bw_probe(struct kgsl_device *device, struct gmu_device *gmu)
 {
-	struct msm_bus_scale_pdata *bus_scale_table;
+	struct msm_bus_scale_pdata *bus_scale_table =
+		kgsl_get_bus_scale_table(device);
 
-	bus_scale_table = msm_bus_cl_get_pdata(device->pdev);
 	if (bus_scale_table == NULL) {
 		dev_err(&gmu->pdev->dev, "dt: cannot get bus table\n");
 		return -ENODEV;
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index e5845b7..e57a844 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -93,6 +93,11 @@
 	GMU_MEM_TYPE_MAX,
 };
 
+enum gmu_context_index {
+	GMU_CONTEXT_USER = 0,
+	GMU_CONTEXT_KERNEL,
+};
+
 /**
  * struct gmu_memdesc - Gmu shared memory object descriptor
  * @hostptr: Kernel virtual address
@@ -108,7 +113,7 @@
 	phys_addr_t physaddr;
 	uint64_t size;
 	enum gmu_mem_type mem_type;
-	uint32_t ctx_idx;
+	enum gmu_context_index ctx_idx;
 };
 
 struct gmu_bw_votes {
@@ -172,6 +177,7 @@
  * @idle_level: Minimal GPU idle power level
  * @fault_count: GMU fault count
  * @mailbox: Messages to AOP for ACD enable/disable go through this
+ * @log_wptr_retention: Store the log wptr offset on slumber
  */
 struct gmu_device {
 	struct {
@@ -214,6 +220,7 @@
 	struct gmu_memdesc kmem_entries[GMU_KERNEL_ENTRIES];
 	unsigned long kmem_bitmap;
 	const struct gmu_vma_entry *vma;
+	unsigned int log_wptr_retention;
 };
 
 struct gmu_memdesc *gmu_get_memdesc(struct gmu_device *gmu,
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index e7c51d6..7c76a8a 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/export.h>
@@ -2044,7 +2044,7 @@
 	struct device_node *gpubw_dev_node = NULL;
 	struct platform_device *p2dev;
 
-	bus_scale_table = msm_bus_cl_get_pdata(device->pdev);
+	bus_scale_table = kgsl_get_bus_scale_table(device);
 	if (bus_scale_table == NULL)
 		return -EINVAL;
 
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index e123112..293142d 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -167,8 +167,9 @@
 {
 	struct stp_master *master;
 	size_t size;
+	unsigned long align = sizeof(unsigned long);
 
-	size = ALIGN(stm->data->sw_nchannels, 8) / 8;
+	size = ALIGN(stm->data->sw_nchannels, align) / align;
 	size += sizeof(struct stp_master);
 	master = kzalloc(size, GFP_ATOMIC);
 	if (!master)
@@ -218,8 +219,8 @@
 	bitmap_release_region(&master->chan_map[0], output->channel,
 			      ilog2(output->nr_chans));
 
-	output->nr_chans = 0;
 	master->nr_free += output->nr_chans;
+	output->nr_chans = 0;
 }
 
 /*
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 65d06a8..2ac8609 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1498,8 +1498,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int omap_i2c_runtime_suspend(struct device *dev)
+static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
 {
 	struct omap_i2c_dev *omap = dev_get_drvdata(dev);
 
@@ -1525,7 +1524,7 @@
 	return 0;
 }
 
-static int omap_i2c_runtime_resume(struct device *dev)
+static int __maybe_unused omap_i2c_runtime_resume(struct device *dev)
 {
 	struct omap_i2c_dev *omap = dev_get_drvdata(dev);
 
@@ -1540,20 +1539,18 @@
 }
 
 static const struct dev_pm_ops omap_i2c_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				      pm_runtime_force_resume)
 	SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
 			   omap_i2c_runtime_resume, NULL)
 };
-#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
-#else
-#define OMAP_I2C_PM_OPS NULL
-#endif /* CONFIG_PM */
 
 static struct platform_driver omap_i2c_driver = {
 	.probe		= omap_i2c_probe,
 	.remove		= omap_i2c_remove,
 	.driver		= {
 		.name	= "omap_i2c",
-		.pm	= OMAP_I2C_PM_OPS,
+		.pm	= &omap_i2c_pm_ops,
 		.of_match_table = of_match_ptr(omap_i2c_of_match),
 	},
 };
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 70d39fc..54eb695 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -980,7 +980,6 @@
 	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
 		wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
 		wc.wc_flags = IB_WC_WITH_IMM;
-		tlen -= sizeof(u32);
 	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
 		wc.ex.imm_data = 0;
 		wc.wc_flags = 0;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index f8d029a..bce2b5c 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -513,7 +513,6 @@
 	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
 		wc.ex.imm_data = ohdr->u.ud.imm_data;
 		wc.wc_flags = IB_WC_WITH_IMM;
-		tlen -= sizeof(u32);
 	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
 		wc.ex.imm_data = 0;
 		wc.wc_flags = 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 1abe3c6..b22d02c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -248,7 +248,6 @@
 	struct list_head     list;
 	struct net_device   *dev;
 	struct ipoib_neigh  *neigh;
-	struct ipoib_path   *path;
 	struct ipoib_tx_buf *tx_ring;
 	unsigned int	     tx_head;
 	unsigned int	     tx_tail;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 0428e01e..aa9dcfc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1312,7 +1312,6 @@
 
 	neigh->cm = tx;
 	tx->neigh = neigh;
-	tx->path = path;
 	tx->dev = dev;
 	list_add(&tx->list, &priv->cm.start_list);
 	set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
@@ -1371,7 +1370,7 @@
 				neigh->daddr + QPN_AND_OPTIONS_OFFSET);
 			goto free_neigh;
 		}
-		memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
+		memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
 
 		spin_unlock_irqrestore(&priv->lock, flags);
 		netif_tx_unlock_bh(dev);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 225ae69..628ef61 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1337,6 +1337,7 @@
 	{ "ELAN0000", 0 },
 	{ "ELAN0100", 0 },
 	{ "ELAN0600", 0 },
+	{ "ELAN0601", 0 },
 	{ "ELAN0602", 0 },
 	{ "ELAN0605", 0 },
 	{ "ELAN0608", 0 },
diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c
index 38bfaca..150f9ee 100644
--- a/drivers/input/tablet/wacom_serial4.c
+++ b/drivers/input/tablet/wacom_serial4.c
@@ -187,6 +187,7 @@
 	MODEL_DIGITIZER_II	= 0x5544, /* UD */
 	MODEL_GRAPHIRE		= 0x4554, /* ET */
 	MODEL_PENPARTNER	= 0x4354, /* CT */
+	MODEL_ARTPAD_II		= 0x4B54, /* KT */
 };
 
 static void wacom_handle_model_response(struct wacom *wacom)
@@ -245,6 +246,7 @@
 		wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
 		break;
 
+	case MODEL_ARTPAD_II:
 	case MODEL_DIGITIZER_II:
 		wacom->dev->name = "Wacom Digitizer II";
 		wacom->dev->id.version = MODEL_DIGITIZER_II;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 34c9aa7..27500ab 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1929,16 +1929,13 @@
 
 static void do_detach(struct iommu_dev_data *dev_data)
 {
+	struct protection_domain *domain = dev_data->domain;
 	struct amd_iommu *iommu;
 	u16 alias;
 
 	iommu = amd_iommu_rlookup_table[dev_data->devid];
 	alias = dev_data->alias;
 
-	/* decrease reference counters */
-	dev_data->domain->dev_iommu[iommu->index] -= 1;
-	dev_data->domain->dev_cnt                 -= 1;
-
 	/* Update data structures */
 	dev_data->domain = NULL;
 	list_del(&dev_data->list);
@@ -1948,6 +1945,16 @@
 
 	/* Flush the DTE entry */
 	device_flush_dte(dev_data);
+
+	/* Flush IOTLB */
+	domain_flush_tlb_pde(domain);
+
+	/* Wait for the flushes to finish */
+	domain_flush_complete(domain);
+
+	/* decrease reference counters - needs to happen after the flushes */
+	domain->dev_iommu[iommu->index] -= 1;
+	domain->dev_cnt                 -= 1;
 }
 
 /*
@@ -2555,13 +2562,13 @@
 			bus_addr  = address + s->dma_address + (j << PAGE_SHIFT);
 			iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
 
-			if (--mapped_pages)
+			if (--mapped_pages == 0)
 				goto out_free_iova;
 		}
 	}
 
 out_free_iova:
-	free_iova_fast(&dma_dom->iovad, address, npages);
+	free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
 
 out_err:
 	return 0;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c6cbfee..d3936b3 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -3021,14 +3021,6 @@
 	return ret;
 }
 
-static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
-{
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
-	if (smmu_domain->tlb_ops)
-		smmu_domain->tlb_ops->tlb_sync(smmu_domain);
-}
-
 #define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
 static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
 			   struct scatterlist *sg, unsigned int nents, int prot)
@@ -4000,8 +3992,6 @@
 	.map			= arm_smmu_map,
 	.unmap			= arm_smmu_unmap,
 	.map_sg			= arm_smmu_map_sg,
-	.flush_iotlb_all	= arm_smmu_iotlb_sync,
-	.iotlb_sync		= arm_smmu_iotlb_sync,
 	.iova_to_phys		= arm_smmu_iova_to_phys,
 	.iova_to_phys_hard	= arm_smmu_iova_to_phys_hard,
 	.add_device		= arm_smmu_add_device,
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 2b165b3..4378f2c 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -240,7 +240,7 @@
 
 static struct iommu_debug_attr secure_attr = {
 	.dma_type = 0,
-	.vmid = VMID_CP_CAMERA,
+	.vmid = VMID_CP_PIXEL,
 };
 
 static int iommu_debug_set_attrs(struct iommu_debug_device *ddev,
@@ -1417,20 +1417,10 @@
 				       size_t count, loff_t *offset)
 {
 	struct iommu_debug_device *ddev = file->private_data;
-	char c[2];
+	char buf[100];
 
-	if (*offset)
-		return 0;
-
-	c[0] = ddev->domain ? '1' : '0';
-	c[1] = '\n';
-	if (copy_to_user(ubuf, &c, 2)) {
-		pr_err_ratelimited("copy_to_user failed\n");
-		return -EFAULT;
-	}
-	*offset = 1;		/* non-zero means we're done */
-
-	return 2;
+	snprintf(buf, sizeof(buf), "%d\n", ddev->domain ? 1 : 0);
+	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
 }
 
 static const struct file_operations iommu_debug_dma_attach_fops = {
@@ -1444,9 +1434,7 @@
 					       size_t count, loff_t *offset)
 {
 	char buf[100];
-	ssize_t retval;
-	size_t buflen;
-	int buf_len = sizeof(buf);
+	size_t buf_len = sizeof(buf);
 
 	if (*offset)
 		return 0;
@@ -1458,16 +1446,7 @@
 	else
 		snprintf(buf, buf_len, "0x%pK\n", test_virt_addr);
 
-	buflen = strlen(buf);
-	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err_ratelimited("Couldn't copy_to_user\n");
-		retval = -EFAULT;
-	} else {
-		*offset = 1;	/* non-zero means we're done */
-		retval = buflen;
-	}
-
-	return retval;
+	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
 }
 
 static const struct file_operations iommu_debug_test_virt_addr_fops = {
@@ -1512,8 +1491,6 @@
 	struct iommu_debug_device *ddev = file->private_data;
 	uint64_t pte;
 	char buf[100];
-	ssize_t retval;
-	size_t buflen;
 
 	if (kptr_restrict != 0) {
 		pr_err_ratelimited("kptr_restrict needs to be disabled.\n");
@@ -1538,18 +1515,8 @@
 		strlcpy(buf, "FAIL\n", sizeof(buf));
 	else
 		snprintf(buf, sizeof(buf), "pte=%016llx\n", pte);
-
-	buflen = strlen(buf);
-	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err_ratelimited("Couldn't copy_to_user\n");
-		retval = -EFAULT;
-	} else {
-		*offset = 1;	/* non-zero means we're done */
-		retval = buflen;
-	}
-
 	mutex_unlock(&ddev->state_lock);
-	return retval;
+	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
 }
 
 static const struct file_operations iommu_debug_pte_fops = {
@@ -1582,8 +1549,6 @@
 	struct iommu_debug_device *ddev = file->private_data;
 	phys_addr_t phys;
 	char buf[100];
-	ssize_t retval;
-	size_t buflen;
 
 	if (kptr_restrict != 0) {
 		pr_err_ratelimited("kptr_restrict needs to be disabled.\n");
@@ -1611,18 +1576,8 @@
 	} else {
 		snprintf(buf, 100, "%pa\n", &phys);
 	}
-
-	buflen = strlen(buf);
-	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err_ratelimited("Couldn't copy_to_user\n");
-		retval = -EFAULT;
-	} else {
-		*offset = 1;	/* non-zero means we're done */
-		retval = buflen;
-	}
-
 	mutex_unlock(&ddev->state_lock);
-	return retval;
+	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
 }
 
 static const struct file_operations iommu_debug_atos_fops = {
@@ -1637,8 +1592,6 @@
 	struct iommu_debug_device *ddev = file->private_data;
 	phys_addr_t phys;
 	char buf[100];
-	ssize_t retval;
-	size_t buflen;
 
 	if (kptr_restrict != 0) {
 		pr_err_ratelimited("kptr_restrict needs to be disabled.\n");
@@ -1662,18 +1615,8 @@
 		strlcpy(buf, "FAIL\n", sizeof(buf));
 	else
 		snprintf(buf, sizeof(buf), "%pa\n", &phys);
-
-	buflen = strlen(buf);
-	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err_ratelimited("Couldn't copy_to_user\n");
-		retval = -EFAULT;
-	} else {
-		*offset = 1;	/* non-zero means we're done */
-		retval = buflen;
-	}
-
 	mutex_unlock(&ddev->state_lock);
-	return retval;
+	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
 }
 
 static const struct file_operations iommu_debug_dma_atos_fops = {
@@ -1875,8 +1818,6 @@
 {
 	struct iommu_debug_device *ddev = file->private_data;
 	char buf[100];
-	ssize_t retval;
-	size_t buflen;
 	dma_addr_t iova;
 
 	if (*offset)
@@ -1886,17 +1827,7 @@
 
 	iova = ddev->iova;
 	snprintf(buf, sizeof(buf), "%pa\n", &iova);
-
-	buflen = strlen(buf);
-	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err_ratelimited("Couldn't copy_to_user\n");
-		retval = -EFAULT;
-	} else {
-		*offset = 1;	/* non-zero means we're done */
-		retval = buflen;
-	}
-
-	return retval;
+	return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
 }
 
 static const struct file_operations iommu_debug_dma_map_fops = {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 4c2246f..15579cb 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1581,6 +1581,9 @@
 		nr_irqs /= 2;
 	} while (nr_irqs > 0);
 
+	if (!nr_irqs)
+		err = -ENOSPC;
+
 	if (err)
 		goto out;
 
@@ -1951,6 +1954,29 @@
 		   get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
 }
 
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
+{
+	u32 count = 1000000;	/* 1s! */
+	bool clean;
+	u64 val;
+
+	val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+	val &= ~GICR_VPENDBASER_Valid;
+	gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+	do {
+		val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+		clean = !(val & GICR_VPENDBASER_Dirty);
+		if (!clean) {
+			count--;
+			cpu_relax();
+			udelay(1);
+		}
+	} while (!clean && count);
+
+	return val;
+}
+
 static void its_cpu_init_lpis(void)
 {
 	void __iomem *rbase = gic_data_rdist_rd_base();
@@ -2024,6 +2050,30 @@
 	val |= GICR_CTLR_ENABLE_LPIS;
 	writel_relaxed(val, rbase + GICR_CTLR);
 
+	if (gic_rdists->has_vlpis) {
+		void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+
+		/*
+		 * It's possible for CPU to receive VLPIs before it is
+		 * sheduled as a vPE, especially for the first CPU, and the
+		 * VLPI with INTID larger than 2^(IDbits+1) will be considered
+		 * as out of range and dropped by GIC.
+		 * So we initialize IDbits to known value to avoid VLPI drop.
+		 */
+		val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
+		pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
+			smp_processor_id(), val);
+		gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+
+		/*
+		 * Also clear Valid bit of GICR_VPENDBASER, in case some
+		 * ancient programming gets left in and has possibility of
+		 * corrupting memory.
+		 */
+		val = its_clear_vpend_valid(vlpi_base);
+		WARN_ON(val & GICR_VPENDBASER_Dirty);
+	}
+
 	/* Make sure the GIC has seen the above */
 	dsb(sy);
 }
@@ -2644,26 +2694,11 @@
 static void its_vpe_deschedule(struct its_vpe *vpe)
 {
 	void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
-	u32 count = 1000000;	/* 1s! */
-	bool clean;
 	u64 val;
 
-	/* We're being scheduled out */
-	val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
-	val &= ~GICR_VPENDBASER_Valid;
-	gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+	val = its_clear_vpend_valid(vlpi_base);
 
-	do {
-		val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
-		clean = !(val & GICR_VPENDBASER_Dirty);
-		if (!clean) {
-			count--;
-			cpu_relax();
-			udelay(1);
-		}
-	} while (!clean && count);
-
-	if (unlikely(!clean && !count)) {
+	if (unlikely(val & GICR_VPENDBASER_Dirty)) {
 		pr_err_ratelimited("ITS virtual pending table not cleaning\n");
 		vpe->idai = false;
 		vpe->pending_last = true;
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index ad70e7c..fbfa7ff 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -24,7 +24,7 @@
 	unsigned long		*bm;
 };
 
-static struct mutex		mbi_lock;
+static DEFINE_MUTEX(mbi_lock);
 static phys_addr_t		mbi_phys_base;
 static struct mbi_range		*mbi_ranges;
 static unsigned int		mbi_range_nr;
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 25f32e1..3496b61 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -34,6 +34,9 @@
 #define SEL_INT_PENDING		(1 << 6)
 #define SEL_INT_NUM_MASK	0x3f
 
+#define MMP2_ICU_INT_ROUTE_PJ4_IRQ	(1 << 5)
+#define MMP2_ICU_INT_ROUTE_PJ4_FIQ	(1 << 6)
+
 struct icu_chip_data {
 	int			nr_irqs;
 	unsigned int		virq_base;
@@ -190,7 +193,8 @@
 static const struct mmp_intc_conf mmp2_conf = {
 	.conf_enable	= 0x20,
 	.conf_disable	= 0x0,
-	.conf_mask	= 0x7f,
+	.conf_mask	= MMP2_ICU_INT_ROUTE_PJ4_IRQ |
+			  MMP2_ICU_INT_ROUTE_PJ4_FIQ,
 };
 
 static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7033a28..9df1334 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4630,7 +4630,6 @@
 	atomic_inc(&r10_bio->remaining);
 	read_bio->bi_next = NULL;
 	generic_make_request(read_bio);
-	sector_nr += nr_sectors;
 	sectors_done += nr_sectors;
 	if (sector_nr <= last)
 		goto read_more;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index b3dae10d..2ccb9ce 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -3435,6 +3435,13 @@
 					num_cmd_buf--;
 				goto rel_cmd_buf;
 			}
+			if ((len <= cmd_desc[i].offset) ||
+				(cmd_desc[i].size < cmd_desc[i].length) ||
+				((len - cmd_desc[i].offset) <
+				cmd_desc[i].length)) {
+				CAM_ERR(CAM_ICP, "Invalid offset or length");
+				goto rel_cmd_buf;
+			}
 			cpu_addr = cpu_addr + cmd_desc[i].offset;
 		}
 	}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index f5e612c..3b3eb8e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -36,7 +36,7 @@
 	(CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON + 1)
 
 #define CAM_ISP_GENERIC_BLOB_TYPE_MAX               \
-	(CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V2 + 1)
+	(CAM_ISP_GENERIC_BLOB_TYPE_IFE_CORE_CONFIG + 1)
 
 static uint32_t blob_type_hw_cmd_map[CAM_ISP_GENERIC_BLOB_TYPE_MAX] = {
 	CAM_ISP_HW_CMD_GET_HFR_UPDATE,
@@ -3600,6 +3600,54 @@
 	return rc;
 }
 
+static int cam_isp_blob_core_cfg_update(
+	uint32_t                               blob_type,
+	struct cam_isp_generic_blob_info      *blob_info,
+	struct cam_isp_core_config            *core_config,
+	struct cam_hw_prepare_update_args     *prepare)
+{
+	struct cam_ife_hw_mgr_ctx             *ctx = NULL;
+	struct cam_ife_hw_mgr_res             *hw_mgr_res;
+	struct cam_hw_intf                    *hw_intf;
+	uint64_t                               clk_rate = 0;
+	int                                    rc = -EINVAL, i;
+	struct cam_vfe_core_config_args        vfe_core_config;
+
+	ctx = prepare->ctxt_to_hw_map;
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			clk_rate = 0;
+			if (!hw_mgr_res->hw_res[i] ||
+				hw_mgr_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF)
+				continue;
+
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf && hw_intf->hw_ops.process_cmd) {
+				vfe_core_config.node_res =
+					hw_mgr_res->hw_res[i];
+
+				memcpy(&vfe_core_config.core_config,
+					core_config,
+					sizeof(struct cam_isp_core_config));
+
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_ISP_HW_CMD_CORE_CONFIG,
+					&vfe_core_config,
+					sizeof(
+					struct cam_vfe_core_config_args));
+				if (rc)
+					CAM_ERR(CAM_ISP, "Core cfg parse fail");
+			} else {
+				CAM_WARN(CAM_ISP, "NULL hw_intf!");
+			}
+		}
+	}
+
+	return rc;
+}
+
 static int cam_isp_blob_clock_update(
 	uint32_t                               blob_type,
 	struct cam_isp_generic_blob_info      *blob_info,
@@ -3939,6 +3987,16 @@
 			CAM_ERR(CAM_ISP, "FS Update Failed rc: %d", rc);
 	}
 		break;
+	case CAM_ISP_GENERIC_BLOB_TYPE_IFE_CORE_CONFIG: {
+		struct cam_isp_core_config *core_config =
+			(struct cam_isp_core_config *)blob_data;
+
+		rc = cam_isp_blob_core_cfg_update(blob_type, blob_info,
+			core_config, prepare);
+		if (rc)
+			CAM_ERR(CAM_ISP, "Core cfg update fail: %d", rc);
+	}
+		break;
 
 	default:
 		CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index 3bdd4fb..a3321d6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -98,6 +98,7 @@
 	CAM_ISP_HW_CMD_FE_UPDATE_IN_RD,
 	CAM_ISP_HW_CMD_FE_UPDATE_BUS_RD,
 	CAM_ISP_HW_CMD_UBWC_UPDATE_V2,
+	CAM_ISP_HW_CMD_CORE_CONFIG,
 	CAM_ISP_HW_CMD_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index c331a55..1c1f867 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -183,6 +183,17 @@
 };
 
 /*
+ * struct cam_vfe_core_config_args:
+ *
+ * @node_res:                Resource to get the time stamp
+ * @core_config:             Core config for IFE
+ */
+struct cam_vfe_core_config_args {
+	struct cam_isp_resource_node      *node_res;
+	struct cam_isp_core_config         core_config;
+};
+
+/*
  * struct cam_vfe_bw_update_args:
  *
  * @node_res:             Resource to get the BW
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index 485da8b..3c8a7e2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -581,6 +581,7 @@
 	case CAM_ISP_HW_CMD_CLOCK_UPDATE:
 	case CAM_ISP_HW_CMD_BW_UPDATE:
 	case CAM_ISP_HW_CMD_BW_CONTROL:
+	case CAM_ISP_HW_CMD_CORE_CONFIG:
 		rc = core_info->vfe_top->hw_ops.process_cmd(
 			core_info->vfe_top->top_priv, cmd_type, cmd_args,
 			arg_size);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
index 2dddd9d..e855a54 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
@@ -105,6 +105,7 @@
 	.diag_config              = 0x00000064,
 	.diag_sensor_status_0     = 0x00000068,
 	.diag_sensor_status_1     = 0x00000098,
+	.bus_overflow_status      = 0x0000AA68,
 };
 
 static struct cam_vfe_camif_lite_ver3_reg vfe480_camif_rdi[3] = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
index 9ebeb55..c9d66ed 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
@@ -47,6 +47,7 @@
 	.reg_update_cmd           = 0x00000020,
 	.diag_config              = 0x00000050,
 	.diag_sensor_status_0     = 0x00000054,
+	.bus_overflow_status      = 0x00001A68,
 };
 
 static struct cam_vfe_camif_lite_ver3_reg vfe48x_camif_rdi[4] = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
index 20ad20f..6d5a514 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
@@ -155,6 +155,9 @@
 	uint32_t                         dual_hw_alternate_vfe_id;
 	struct list_head                 vfe_out_list;
 
+	uint32_t                         is_master;
+	uint32_t                         is_dual;
+
 	uint32_t                         format;
 	uint32_t                         max_width;
 	uint32_t                         max_height;
@@ -1663,25 +1666,34 @@
 		if (rsrc_data->is_master) {
 			val = cam_io_r_mb(common_data->mem_base +
 				common_data->common_reg->comp_cfg_0);
+
 			val |= (0x1 << (rsrc_data->comp_grp_type + 14));
+
 			cam_io_w_mb(val, common_data->mem_base +
 				common_data->common_reg->comp_cfg_0);
 
 			val = cam_io_r_mb(common_data->mem_base +
 				common_data->common_reg->comp_cfg_1);
+
 			val |= (0x1 << rsrc_data->comp_grp_type);
+
 			cam_io_w_mb(val, common_data->mem_base +
 				common_data->common_reg->comp_cfg_1);
 		} else {
 			val = cam_io_r_mb(common_data->mem_base +
 				common_data->common_reg->comp_cfg_0);
+
 			val |= (0x1 << rsrc_data->comp_grp_type);
+			val |= (0x1 << (rsrc_data->comp_grp_type + 14));
+
 			cam_io_w_mb(val, common_data->mem_base +
 				common_data->common_reg->comp_cfg_0);
 
 			val = cam_io_r_mb(common_data->mem_base +
 				common_data->common_reg->comp_cfg_1);
+
 			val |= (0x1 << rsrc_data->comp_grp_type);
+
 			cam_io_w_mb(val, common_data->mem_base +
 				common_data->common_reg->comp_cfg_1);
 		}
@@ -1967,6 +1979,9 @@
 		return rc;
 	}
 
+	rsrc_data->is_dual = out_acquire_args->is_dual;
+	rsrc_data->is_master = out_acquire_args->is_master;
+
 	cam_vfe_bus_ver3_add_wm_to_comp_grp(rsrc_data->comp_grp,
 		client_done_mask);
 
@@ -2087,6 +2102,9 @@
 	rc = cam_vfe_bus_ver3_start_comp_grp(rsrc_data->comp_grp,
 		bus_irq_reg_mask);
 
+	if (rsrc_data->is_dual && !rsrc_data->is_master)
+		goto end;
+
 	vfe_out->irq_handle = cam_irq_controller_subscribe_irq(
 		common_data->bus_irq_controller,
 		CAM_IRQ_PRIORITY_1,
@@ -2130,6 +2148,7 @@
 		}
 	}
 
+end:
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 	return rc;
 }
@@ -3105,9 +3124,9 @@
 		CAM_ERR(CAM_ISP, "num_wm %d h_init 0x%x",
 			vfe_out_data->num_wm,
 			ubwc_generic_plane_cfg->h_init);
-		if ((!wm_data->is_dual) && ((wm_data->ubwc_meta_cfg !=
-			ubwc_generic_plane_cfg->meta_stride) ||
-			!wm_data->init_cfg_done)) {
+		if (wm_data->ubwc_meta_cfg !=
+			ubwc_generic_plane_cfg->meta_stride ||
+			!wm_data->init_cfg_done) {
 			wm_data->ubwc_meta_cfg =
 				ubwc_generic_plane_cfg->meta_stride;
 			wm_data->ubwc_updated = true;
@@ -3166,45 +3185,6 @@
 	return rc;
 }
 
-static uint32_t cam_vfe_bus_ver3_convert_bytes_to_pixels(uint32_t packer_fmt,
-	uint32_t width)
-{
-	int pixels = 0;
-
-	switch (packer_fmt) {
-	case PACKER_FMT_VER3_PLAIN_128:
-		pixels = width / 16;
-		break;
-	case PACKER_FMT_VER3_PLAIN_8:
-	case PACKER_FMT_VER3_PLAIN_8_ODD_EVEN:
-	case PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10:
-	case PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10_ODD_EVEN:
-		pixels = width;
-		break;
-	case PACKER_FMT_VER3_PLAIN_16_10BPP:
-	case PACKER_FMT_VER3_PLAIN_16_12BPP:
-	case PACKER_FMT_VER3_PLAIN_16_14BPP:
-	case PACKER_FMT_VER3_PLAIN_16_16BPP:
-		pixels = width / 2;
-		break;
-	case PACKER_FMT_VER3_PLAIN_32:
-		pixels = width / 4;
-		break;
-	case PACKER_FMT_VER3_PLAIN_64:
-		pixels = width / 8;
-		break;
-	case PACKER_FMT_VER3_TP_10:
-		pixels = width * 3 / 4;
-		break;
-	case PACKER_FMT_VER3_MAX:
-	default:
-		CAM_ERR(CAM_ISP, "Invalid packer cfg 0x%x", packer_fmt);
-		break;
-	}
-
-	return pixels;
-}
-
 static int cam_vfe_bus_ver3_update_stripe_cfg(void *priv, void *cmd_args,
 	uint32_t arg_size)
 {
@@ -3238,8 +3218,7 @@
 		wm_data = vfe_out_data->wm_res[i]->res_priv;
 		stripe_config = (struct cam_isp_dual_stripe_config  *)
 			&stripe_args->dual_cfg->stripes[ports_plane_idx + i];
-		wm_data->width = cam_vfe_bus_ver3_convert_bytes_to_pixels(
-			wm_data->pack_fmt, stripe_config->width);
+		wm_data->width = stripe_config->width;
 		wm_data->offset = stripe_config->offset;
 		CAM_DBG(CAM_ISP, "id:%x WM:%d width:0x%x offset:%x",
 			stripe_args->res->res_id, wm_data->index,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
index 322ef84..7dc1f83 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
@@ -26,6 +26,7 @@
 	struct cam_vfe_camif_lite_ver3_reg_data     *reg_data;
 	struct cam_hw_soc_info                      *soc_info;
 	enum cam_isp_hw_sync_mode                    sync_mode;
+	struct cam_vfe_camif_common_cfg              cam_common_cfg;
 
 	cam_hw_mgr_event_cb_func                     event_cb;
 	void                                        *priv;
@@ -266,8 +267,11 @@
 		rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
 		val |= (1 << rsrc_data->reg_data->extern_reg_update_shift);
 
-	if (camif_lite_res->res_id == CAM_ISP_HW_VFE_IN_PDLIB)
+	if (camif_lite_res->res_id == CAM_ISP_HW_VFE_IN_PDLIB) {
 		val |= (1 << rsrc_data->reg_data->operating_mode_shift);
+		val |= (rsrc_data->cam_common_cfg.input_mux_sel_pdaf & 0x1) <<
+			CAM_SHIFT_TOP_CORE_CFG_MUXSEL_PDAF;
+	}
 
 	cam_io_w_mb(val, rsrc_data->mem_base +
 		rsrc_data->common_reg->core_cfg_0);
@@ -383,6 +387,39 @@
 	return rc;
 }
 
+static int cam_vfe_camif_lite_ver3_core_config(
+	struct cam_isp_resource_node *rsrc_node, void *cmd_args)
+{
+	struct cam_vfe_mux_camif_lite_data *camif_lite_priv;
+	struct cam_vfe_core_config_args *vfe_core_cfg =
+		(struct cam_vfe_core_config_args *)cmd_args;
+
+	camif_lite_priv =
+		(struct cam_vfe_mux_camif_lite_data *)rsrc_node->res_priv;
+	camif_lite_priv->cam_common_cfg.vid_ds16_r2pd =
+		vfe_core_cfg->core_config.vid_ds16_r2pd;
+	camif_lite_priv->cam_common_cfg.vid_ds4_r2pd =
+		vfe_core_cfg->core_config.vid_ds4_r2pd;
+	camif_lite_priv->cam_common_cfg.disp_ds16_r2pd =
+		vfe_core_cfg->core_config.disp_ds16_r2pd;
+	camif_lite_priv->cam_common_cfg.disp_ds4_r2pd =
+		vfe_core_cfg->core_config.disp_ds4_r2pd;
+	camif_lite_priv->cam_common_cfg.dsp_streaming_tap_point =
+		vfe_core_cfg->core_config.dsp_streaming_tap_point;
+	camif_lite_priv->cam_common_cfg.ihist_src_sel =
+		vfe_core_cfg->core_config.ihist_src_sel;
+	camif_lite_priv->cam_common_cfg.hdr_be_src_sel =
+		vfe_core_cfg->core_config.hdr_be_src_sel;
+	camif_lite_priv->cam_common_cfg.hdr_bhist_src_sel =
+		vfe_core_cfg->core_config.hdr_bhist_src_sel;
+	camif_lite_priv->cam_common_cfg.input_mux_sel_pdaf =
+		vfe_core_cfg->core_config.input_mux_sel_pdaf;
+	camif_lite_priv->cam_common_cfg.input_mux_sel_pp =
+		vfe_core_cfg->core_config.input_mux_sel_pp;
+
+	return 0;
+}
+
 static int cam_vfe_camif_lite_process_cmd(
 	struct cam_isp_resource_node *rsrc_node,
 	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
@@ -399,6 +436,9 @@
 		rc = cam_vfe_camif_lite_get_reg_update(rsrc_node, cmd_args,
 			arg_size);
 		break;
+	case CAM_ISP_HW_CMD_CORE_CONFIG:
+		rc = cam_vfe_camif_lite_ver3_core_config(rsrc_node, cmd_args);
+		break;
 	default:
 		CAM_ERR(CAM_ISP,
 			"unsupported process command:%d", cmd_type);
@@ -408,6 +448,115 @@
 	return rc;
 }
 
+static void cam_vfe_camif_lite_print_status(uint32_t val,
+	uint32_t violation_status, int ret, bool is_ife_lite)
+{
+	uint32_t violation_mask = 0x3F00;
+
+	if (is_ife_lite) {
+
+		if (ret == CAM_VFE_IRQ_STATUS_OVERFLOW) {
+			if (val & 0x100)
+				CAM_INFO(CAM_ISP, "RDI3 FRAME DROP");
+
+			if (val & 0x80)
+				CAM_INFO(CAM_ISP, "RDI2 FRAME DROP");
+
+			if (val & 0x40)
+				CAM_INFO(CAM_ISP, "RDI1 FRAME DROP");
+
+			if (val & 0x20)
+				CAM_INFO(CAM_ISP, "RDI0 FRAME DROP");
+
+			if (val & 0x8)
+				CAM_INFO(CAM_ISP, "RDI3 OVERFLOW");
+
+			if (val & 0x4)
+				CAM_INFO(CAM_ISP, "RDI2 OVERFLOW");
+
+			if (val & 0x2)
+				CAM_INFO(CAM_ISP, "RDI1 OVERFLOW");
+
+			if (val & 0x1)
+				CAM_INFO(CAM_ISP, "RDI0 OVERFLOW");
+		}
+
+		if (ret == CAM_VFE_IRQ_STATUS_VIOLATION) {
+
+			if (val & 0x800)
+				CAM_INFO(CAM_ISP, "RDI3 CAMIF VIOLATION");
+
+			if (val & 0x400)
+				CAM_INFO(CAM_ISP, "RDI2 CAMIF VIOLATION");
+
+			if (val & 0x200)
+				CAM_INFO(CAM_ISP, "RDI1 CAMIF VIOLATION");
+
+			if (val & 0x100)
+				CAM_INFO(CAM_ISP, "RDI0 CAMIF VIOLATION");
+		}
+	} else {
+
+		if (ret == CAM_VFE_IRQ_STATUS_OVERFLOW) {
+			if (val & 0x200000)
+				CAM_INFO(CAM_ISP, "RDI2 FRAME DROP");
+
+			if (val & 0x400000)
+				CAM_INFO(CAM_ISP, "RDI1 FRAME DROP");
+
+			if (val & 0x800000)
+				CAM_INFO(CAM_ISP, "RDI0 FRAME DROP");
+
+			if (val & 0x1000000)
+				CAM_INFO(CAM_ISP, "PD PIPE FRAME DROP");
+
+			if (val & 0x8000000)
+				CAM_INFO(CAM_ISP, "RDI2 OVERFLOW");
+
+			if (val & 0x10000000)
+				CAM_INFO(CAM_ISP, "RDI1 OVERFLOW");
+
+			if (val & 0x20000000)
+				CAM_INFO(CAM_ISP, "RDI0 OVERFLOW");
+
+			if (val & 0x40000000)
+				CAM_INFO(CAM_ISP, "PD PIPE OVERFLOW");
+		}
+
+		if (ret == CAM_VFE_IRQ_STATUS_VIOLATION) {
+			if (val & 0x02000)
+				CAM_INFO(CAM_ISP, "PD CAMIF VIOLATION");
+
+			if (val & 0x04000)
+				CAM_INFO(CAM_ISP, "PD VIOLATION");
+
+			if (val & 0x08000)
+				CAM_INFO(CAM_ISP, "LCR CAMIF VIOLATION");
+
+			if (val & 0x010000)
+				CAM_INFO(CAM_ISP, "LCR VIOLATION");
+
+			if (val & 0x020000)
+				CAM_INFO(CAM_ISP, "RDI0 CAMIF VIOLATION");
+
+			if (val & 0x040000)
+				CAM_INFO(CAM_ISP, "RDI1 CAMIF VIOLATION");
+
+			if (val & 0x080000)
+				CAM_INFO(CAM_ISP, "RDI2 CAMIF VIOLATION");
+		}
+
+		if (violation_mask & violation_status)
+			CAM_INFO(CAM_ISP, "LCR VIOLATION, module = %d",
+				violation_mask & violation_status);
+
+		violation_mask = 0x0F0000;
+		if (violation_mask & violation_status)
+			CAM_INFO(CAM_ISP, "PD Violation, module = %d",
+				violation_mask & violation_status);
+	}
+}
+
 static int cam_vfe_camif_lite_handle_irq_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
@@ -450,14 +599,15 @@
 	void                                    *handler_priv,
 	void                                    *evt_payload_priv)
 {
-	int                                      ret = CAM_VFE_IRQ_STATUS_MAX;
-	struct cam_isp_resource_node            *camif_lite_node;
-	struct cam_vfe_mux_camif_lite_data      *camif_lite_priv;
-	struct cam_vfe_top_irq_evt_payload      *payload;
-	struct cam_isp_hw_event_info             evt_info;
-	uint32_t                                 irq_status0;
-	uint32_t                                 irq_status1;
-	uint32_t                                 irq_status2;
+	int ret = CAM_VFE_IRQ_STATUS_MAX;
+	struct cam_isp_resource_node *camif_lite_node;
+	struct cam_vfe_mux_camif_lite_data *camif_lite_priv;
+	struct cam_vfe_top_irq_evt_payload *payload;
+	struct cam_isp_hw_event_info evt_info;
+	uint32_t irq_status[CAM_IFE_IRQ_REGISTERS_MAX];
+	int i = 0;
+	bool is_ife_lite = true;
+	uint32_t val = 0;
 
 	if (!handler_priv || !evt_payload_priv) {
 		CAM_ERR(CAM_ISP, "Invalid params");
@@ -468,9 +618,8 @@
 	camif_lite_priv = camif_lite_node->res_priv;
 	payload         = evt_payload_priv;
 
-	irq_status0     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
-	irq_status1     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
-	irq_status2     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS2];
+	for (i = 0; i < CAM_IFE_IRQ_REGISTERS_MAX; i++)
+		irq_status[i] = payload->irq_reg_val[i];
 
 	evt_info.hw_idx   = camif_lite_node->hw_intf->hw_idx;
 	evt_info.res_id   = camif_lite_node->res_id;
@@ -478,9 +627,16 @@
 
 	CAM_DBG(CAM_ISP,
 		"irq_status_0 = 0x%x, irq_status_1 = 0x%x, irq_status_2 = 0x%x",
-		irq_status0, irq_status1, irq_status2);
+		irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0],
+		irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1],
+		irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]);
 
-	if (irq_status1 & camif_lite_priv->reg_data->sof_irq_mask) {
+	if (strnstr(camif_lite_priv->soc_info->compatible, "lite",
+		strlen(camif_lite_priv->soc_info->compatible)) == NULL)
+		is_ife_lite = false;
+
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+		& camif_lite_priv->reg_data->sof_irq_mask) {
 		CAM_DBG(CAM_ISP, "Received SOF");
 		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 
@@ -489,7 +645,8 @@
 				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
 	}
 
-	if (irq_status1 & camif_lite_priv->reg_data->epoch0_irq_mask) {
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+		& camif_lite_priv->reg_data->epoch0_irq_mask) {
 		CAM_DBG(CAM_ISP, "Received EPOCH");
 		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 
@@ -498,7 +655,8 @@
 				CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
 	}
 
-	if (irq_status1 & camif_lite_priv->reg_data->eof_irq_mask) {
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+		& camif_lite_priv->reg_data->eof_irq_mask) {
 		CAM_DBG(CAM_ISP, "Received EOF\n");
 		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 
@@ -507,7 +665,8 @@
 				CAM_ISP_HW_EVENT_EOF, (void *)&evt_info);
 	}
 
-	if (irq_status0 & camif_lite_priv->reg_data->error_irq_mask0) {
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0]
+		& camif_lite_priv->reg_data->error_irq_mask0) {
 		CAM_DBG(CAM_ISP, "Received VFE Overflow ERROR\n");
 
 		evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW;
@@ -516,10 +675,54 @@
 			camif_lite_priv->event_cb(camif_lite_priv->priv,
 				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
 
+		val = cam_io_r(camif_lite_priv->mem_base +
+			camif_lite_priv->common_reg->bus_overflow_status);
+
+		if (is_ife_lite && val) {
+
+			if (val & 0x01)
+				CAM_INFO(CAM_ISP,
+					"RDI0 bus overflow");
+
+			if (val & 0x02)
+				CAM_INFO(CAM_ISP,
+					"RDI1 bus overflow");
+
+			if (val & 0x04)
+				CAM_INFO(CAM_ISP,
+					"RDI2 bus overflow");
+
+			if (val & 0x08)
+				CAM_INFO(CAM_ISP,
+					"RDI3 bus overflow");
+		}
+
+		if (!is_ife_lite && val) {
+
+			if (val & 0x0800)
+				CAM_INFO(CAM_ISP, "CAMIF PD bus overflow");
+
+			if (val & 0x0400000)
+				CAM_INFO(CAM_ISP, "LCR bus overflow");
+
+			if (val & 0x0800000)
+				CAM_INFO(CAM_ISP, "RDI0 bus overflow");
+
+			if (val & 0x01000000)
+				CAM_INFO(CAM_ISP, "RDI1 bus overflow");
+
+			if (val & 0x02000000)
+				CAM_INFO(CAM_ISP, "RDI2 bus overflow");
+		}
+
 		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+		cam_vfe_camif_lite_print_status(
+			irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0],
+			irq_status[CAM_IFE_IRQ_VIOLATION_STATUS], ret,
+			is_ife_lite);
 	}
 
-	if (irq_status2 & camif_lite_priv->reg_data->error_irq_mask2) {
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]) {
 		CAM_DBG(CAM_ISP, "Received CAMIF Lite Violation ERROR\n");
 
 		evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION;
@@ -529,6 +732,10 @@
 				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
 
 		ret = CAM_VFE_IRQ_STATUS_VIOLATION;
+		cam_vfe_camif_lite_print_status(
+			irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2],
+			irq_status[CAM_IFE_IRQ_VIOLATION_STATUS], ret,
+			is_ife_lite);
 	}
 
 	cam_vfe_camif_lite_put_evt_payload(camif_lite_priv, &payload);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
index f54ad2e..058482b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
@@ -20,7 +20,6 @@
 #include "cam_cpas_api.h"
 
 #define CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX 2
-#define CAM_VFE_CAMIF_VER3_CORE_CFG_0_DEFAULT 0x78002800
 
 struct cam_vfe_mux_camif_ver3_data {
 	void __iomem                                *mem_base;
@@ -29,6 +28,7 @@
 	struct cam_vfe_top_ver3_reg_offset_common   *common_reg;
 	struct cam_vfe_camif_ver3_reg_data          *reg_data;
 	struct cam_hw_soc_info                      *soc_info;
+	struct cam_vfe_camif_common_cfg             cam_common_cfg;
 
 	cam_hw_mgr_event_cb_func             event_cb;
 	void                                *priv;
@@ -394,11 +394,6 @@
 	val = cam_io_r_mb(rsrc_data->mem_base +
 		rsrc_data->common_reg->core_cfg_0);
 
-	/* Programming to default value must be removed once uapis have been
-	 * updated to receive this programming from userspace.
-	 */
-	val |= CAM_VFE_CAMIF_VER3_CORE_CFG_0_DEFAULT;
-
 	/* AF stitching by hw disabled by default
 	 * PP CAMIF currently operates only in offline mode
 	 */
@@ -419,6 +414,25 @@
 		(rsrc_data->sync_mode == CAM_ISP_HW_SYNC_MASTER))
 		val |= (1 << rsrc_data->reg_data->dual_ife_pix_en_shift);
 
+	val |= (~rsrc_data->cam_common_cfg.vid_ds16_r2pd & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_VID_DS16_R2PD;
+	val |= (~rsrc_data->cam_common_cfg.vid_ds4_r2pd & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_VID_DS4_R2PD;
+	val |= (~rsrc_data->cam_common_cfg.disp_ds16_r2pd & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_DISP_DS16_R2PD;
+	val |= (~rsrc_data->cam_common_cfg.disp_ds4_r2pd & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_DISP_DS4_R2PD;
+	val |= (rsrc_data->cam_common_cfg.dsp_streaming_tap_point & 0x3) <<
+		CAM_SHIFT_TOP_CORE_CFG_DSP_STREAMING;
+	val |= (rsrc_data->cam_common_cfg.ihist_src_sel & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_STATS_IHIST;
+	val |= (rsrc_data->cam_common_cfg.hdr_be_src_sel & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BE;
+	val |= (rsrc_data->cam_common_cfg.hdr_bhist_src_sel & 0x1) <<
+		CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BHIST;
+	val |= (rsrc_data->cam_common_cfg.input_mux_sel_pp & 0x3) <<
+		CAM_SHIFT_TOP_CORE_CFG_INPUTMUX_PP;
+
 	cam_io_w_mb(val, rsrc_data->mem_base +
 		rsrc_data->common_reg->core_cfg_0);
 
@@ -676,6 +690,39 @@
 	return rc;
 }
 
+static int cam_vfe_camif_ver3_core_config(
+	struct cam_isp_resource_node *rsrc_node, void *cmd_args)
+{
+	struct cam_vfe_mux_camif_ver3_data *camif_priv;
+	struct cam_vfe_core_config_args *vfe_core_cfg =
+		(struct cam_vfe_core_config_args *)cmd_args;
+
+	camif_priv =
+		(struct cam_vfe_mux_camif_ver3_data *)rsrc_node->res_priv;
+	camif_priv->cam_common_cfg.vid_ds16_r2pd =
+		vfe_core_cfg->core_config.vid_ds16_r2pd;
+	camif_priv->cam_common_cfg.vid_ds4_r2pd =
+		vfe_core_cfg->core_config.vid_ds4_r2pd;
+	camif_priv->cam_common_cfg.disp_ds16_r2pd =
+		vfe_core_cfg->core_config.disp_ds16_r2pd;
+	camif_priv->cam_common_cfg.disp_ds4_r2pd =
+		vfe_core_cfg->core_config.disp_ds4_r2pd;
+	camif_priv->cam_common_cfg.dsp_streaming_tap_point =
+		vfe_core_cfg->core_config.dsp_streaming_tap_point;
+	camif_priv->cam_common_cfg.ihist_src_sel =
+		vfe_core_cfg->core_config.ihist_src_sel;
+	camif_priv->cam_common_cfg.hdr_be_src_sel =
+		vfe_core_cfg->core_config.hdr_be_src_sel;
+	camif_priv->cam_common_cfg.hdr_bhist_src_sel =
+		vfe_core_cfg->core_config.hdr_bhist_src_sel;
+	camif_priv->cam_common_cfg.input_mux_sel_pdaf =
+		vfe_core_cfg->core_config.input_mux_sel_pdaf;
+	camif_priv->cam_common_cfg.input_mux_sel_pp =
+		vfe_core_cfg->core_config.input_mux_sel_pp;
+
+	return 0;
+}
+
 static int cam_vfe_camif_ver3_sof_irq_debug(
 	struct cam_isp_resource_node *rsrc_node, void *cmd_args)
 {
@@ -718,6 +765,9 @@
 	case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG:
 		rc = cam_vfe_camif_ver3_sof_irq_debug(rsrc_node, cmd_args);
 		break;
+	case CAM_ISP_HW_CMD_CORE_CONFIG:
+		rc = cam_vfe_camif_ver3_core_config(rsrc_node, cmd_args);
+		break;
 	case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG:
 		camif_priv = (struct cam_vfe_mux_camif_ver3_data *)
 			rsrc_node->res_priv;
@@ -732,6 +782,262 @@
 	return rc;
 }
 
+static void cam_vfe_camif_ver3_print_status(uint32_t val,
+	uint32_t violation_status, int ret)
+{
+	uint32_t violation_mask = 0x3F;
+	uint32_t module_id;
+
+	if (ret == CAM_VFE_IRQ_STATUS_OVERFLOW) {
+		if (val & 0x0200)
+			CAM_INFO(CAM_ISP, "DSP OVERFLOW");
+
+		if (val & 0x2000000)
+			CAM_INFO(CAM_ISP, "PIXEL PIPE FRAME DROP");
+
+		if (val & 0x80000000)
+			CAM_INFO(CAM_ISP, "PIXEL PIPE OVERFLOW");
+	}
+
+	if (ret == CAM_VFE_IRQ_STATUS_VIOLATION) {
+
+		if (val & 0x080)
+			CAM_INFO(CAM_ISP, "DSP IFE PROTOCOL VIOLATION");
+
+		if (val & 0x0100)
+			CAM_INFO(CAM_ISP, "IFE DSP TX PROTOCOL VIOLATION");
+
+		if (val & 0x0200)
+			CAM_INFO(CAM_ISP, "DSP IFE RX PROTOCOL VIOLATION");
+
+		if (val & 0x0400)
+			CAM_INFO(CAM_ISP, "PP PREPROCESS VIOLATION");
+
+		if (val & 0x0800)
+			CAM_INFO(CAM_ISP, "PP CAMIF VIOLATION");
+
+		if (val & 0x01000)
+			CAM_INFO(CAM_ISP, "PP VIOLATION");
+
+		if (val & 0x0100000)
+			CAM_INFO(CAM_ISP,
+				"DSP_TX_VIOLATION:overflow on DSP interface TX path FIFO");
+
+		if (val & 0x0200000)
+			CAM_INFO(CAM_ISP,
+			"DSP_RX_VIOLATION:overflow on DSP interface RX path FIFO");
+
+		if (val & 0x10000000)
+			CAM_INFO(CAM_ISP, "DSP ERROR VIOLATION");
+
+		if (val & 0x20000000)
+			CAM_INFO(CAM_ISP,
+				"DIAG VIOLATION: HBI is less than the minimum required HBI");
+	}
+
+	if (violation_mask & violation_status) {
+		CAM_INFO(CAM_ISP, "PP VIOLATION, module = %d",
+			violation_mask & violation_status);
+		module_id = violation_mask & violation_status;
+		switch (module_id) {
+		case 0:
+			CAM_INFO(CAM_ISP, "Demux");
+			break;
+		case 1:
+			CAM_INFO(CAM_ISP,
+				"CHROMA_UP");
+			break;
+		case 2:
+			CAM_INFO(CAM_ISP,
+				"PEDESTAL");
+			break;
+		case 3:
+			CAM_INFO(CAM_ISP,
+				"LINEARIZATION");
+			break;
+		case 4:
+			CAM_INFO(CAM_ISP,
+				"BPC_PDPC");
+			break;
+		case 5:
+			CAM_INFO(CAM_ISP,
+				"HDR_BINCORRECT");
+			break;
+		case 6:
+			CAM_INFO(CAM_ISP, "ABF");
+			break;
+		case 7:
+			CAM_INFO(CAM_ISP, "LSC");
+			break;
+		case 8:
+			CAM_INFO(CAM_ISP, "DEMOSAIC");
+			break;
+		case 9:
+			CAM_INFO(CAM_ISP,
+				"COLOR_CORRECT");
+			break;
+		case 10:
+			CAM_INFO(CAM_ISP, "GTM");
+			break;
+		case 11:
+			CAM_INFO(CAM_ISP, "GLUT");
+			break;
+		case 12:
+			CAM_INFO(CAM_ISP,
+				"COLOR_XFORM");
+			break;
+		case 13:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_PIXEL_RAW_OUT");
+			break;
+		case 14:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_MN_Y_FD_OUT");
+			break;
+		case 15:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_MN_C_FD_OUT");
+			break;
+		case 16:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_FD_OUT");
+			break;
+		case 17:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_FD_OUT");
+			break;
+		case 18:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_MN_Y_DISP_OUT");
+			break;
+		case 19:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_MN_C_DISP_OUT");
+			break;
+		case 20:
+			CAM_INFO(CAM_ISP,
+				"module: CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_DISP_OUT");
+			break;
+		case 21:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_DISP_OUT");
+			break;
+		case 22:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_4TO1_Y_DISP_DS4_OUT");
+			break;
+		case 23:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_4TO1_C_DISP_DS4_OUT");
+			break;
+		case 24:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS4_OUT");
+			break;
+		case 25:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS4_OUT");
+			break;
+		case 26:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_4TO1_Y_DISP_DS16_OUT");
+			break;
+		case 27:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_4TO1_C_DISP_DS16_OUT");
+			break;
+		case 28:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS16_OUT");
+			break;
+		case 29:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS16_OUT");
+			break;
+		case 30:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_MN_Y_VID_OUT");
+			break;
+		case 31:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_MN_C_VID_OUT");
+			break;
+		case 32:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_VID_OUT");
+			break;
+		case 33:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_VID_OUT");
+			break;
+		case 34:
+			CAM_INFO(CAM_ISP, "DSX_Y_VID_OUT");
+			break;
+		case 35:
+			CAM_INFO(CAM_ISP, "DSX_C_VID_OUT");
+			break;
+		case 36:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DSX_Y_VID_OUT");
+			break;
+		case 37:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DSX_C_VID_OUT");
+			break;
+		case 38:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_4TO1_Y_VID_DS16_OUT");
+			break;
+		case 39:
+			CAM_INFO(CAM_ISP,
+				"DOWNSCALE_4TO1_C_VID_DS16_OUT");
+			break;
+		case 40:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_VID_DS16_OUT");
+			break;
+		case 41:
+			CAM_INFO(CAM_ISP,
+				"CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_VID_DS16_OUT");
+			break;
+		case 42:
+			CAM_INFO(CAM_ISP, "BLS");
+			break;
+		case 43:
+			CAM_INFO(CAM_ISP, "STATS_TINTLESS_BG");
+			break;
+		case 44:
+			CAM_INFO(CAM_ISP, "STATS_HDR_BHIST");
+			break;
+		case 45:
+			CAM_INFO(CAM_ISP, "STATS_HDR_BE");
+			break;
+		case 46:
+			CAM_INFO(CAM_ISP, "STATS_AWB_BG");
+			break;
+		case 47:
+			CAM_INFO(CAM_ISP, "STATS_BHIST");
+			break;
+		case 48:
+			CAM_INFO(CAM_ISP, "STATS_BAF");
+			break;
+		case 49:
+			CAM_INFO(CAM_ISP, "STATS_RS");
+			break;
+		case 50:
+			CAM_INFO(CAM_ISP, "STATS_CS");
+			break;
+		case 51:
+			CAM_INFO(CAM_ISP, "STATS_IHIST");
+			break;
+		default:
+			CAM_ERR(CAM_ISP,
+				"Invalid Module ID:%d", module_id);
+			break;
+		}
+	}
+}
+
 static int cam_vfe_camif_ver3_handle_irq_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
@@ -773,15 +1079,14 @@
 static int cam_vfe_camif_ver3_handle_irq_bottom_half(void *handler_priv,
 	void *evt_payload_priv)
 {
-	int                                   ret = CAM_VFE_IRQ_STATUS_ERR;
-	struct cam_isp_resource_node         *camif_node;
-	struct cam_vfe_mux_camif_ver3_data   *camif_priv;
-	struct cam_vfe_top_irq_evt_payload   *payload;
-	struct cam_isp_hw_event_info          evt_info;
-	uint32_t                              irq_status0;
-	uint32_t                              irq_status1;
-	uint32_t                              irq_status2;
-	uint32_t                              val;
+	int ret = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_isp_resource_node *camif_node;
+	struct cam_vfe_mux_camif_ver3_data *camif_priv;
+	struct cam_vfe_top_irq_evt_payload *payload;
+	struct cam_isp_hw_event_info evt_info;
+	uint32_t irq_status[CAM_IFE_IRQ_REGISTERS_MAX];
+	uint32_t val;
+	int i = 0;
 
 	if (!handler_priv || !evt_payload_priv) {
 		CAM_ERR(CAM_ISP,
@@ -793,15 +1098,15 @@
 	camif_node = handler_priv;
 	camif_priv = camif_node->res_priv;
 	payload = evt_payload_priv;
-	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
-	irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
-	irq_status2 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS2];
+	for (i = 0; i < CAM_IFE_IRQ_REGISTERS_MAX; i++)
+		irq_status[i] = payload->irq_reg_val[i];
 
 	evt_info.hw_idx   = camif_node->hw_intf->hw_idx;
 	evt_info.res_id   = camif_node->res_id;
 	evt_info.res_type = camif_node->res_type;
 
-	if (irq_status1 & camif_priv->reg_data->sof_irq_mask) {
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+		& camif_priv->reg_data->sof_irq_mask) {
 		if ((camif_priv->enable_sof_irq_debug) &&
 			(camif_priv->irq_debug_cnt <=
 			CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
@@ -824,7 +1129,8 @@
 		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
 
-	if (irq_status1 & camif_priv->reg_data->epoch0_irq_mask) {
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+		& camif_priv->reg_data->epoch0_irq_mask) {
 		CAM_DBG(CAM_ISP, "Received EPOCH");
 
 		if (camif_priv->event_cb)
@@ -834,7 +1140,8 @@
 		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
 
-	if (irq_status1 & camif_priv->reg_data->eof_irq_mask) {
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+		& camif_priv->reg_data->eof_irq_mask) {
 		CAM_DBG(CAM_ISP, "Received EOF");
 
 		if (camif_priv->event_cb)
@@ -844,29 +1151,104 @@
 		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
 
-	if (irq_status0 & camif_priv->reg_data->error_irq_mask0) {
-		CAM_ERR(CAM_ISP, "Received VFE Overflow ERROR\n");
-
-		evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW;
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0]
+		& camif_priv->reg_data->error_irq_mask0) {
+		CAM_ERR(CAM_ISP, "VFE Overflow");
 
 		if (camif_priv->event_cb)
 			camif_priv->event_cb(camif_priv->priv,
 				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
 
+		val = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->bus_overflow_status);
+
+		if (val) {
+
+			if (val & 0x01)
+				CAM_INFO(CAM_ISP, "VID Y 1:1 bus overflow");
+
+			if (val & 0x02)
+				CAM_INFO(CAM_ISP, "VID C 1:1 bus overflow");
+
+			if (val & 0x04)
+				CAM_INFO(CAM_ISP, "VID YC 4:1 bus overflow");
+
+			if (val & 0x08)
+				CAM_INFO(CAM_ISP, "VID YC 16:1 bus overflow");
+
+			if (val & 0x010)
+				CAM_INFO(CAM_ISP, "DISP Y 1:1 bus overflow");
+
+			if (val & 0x020)
+				CAM_INFO(CAM_ISP, "DISP C 1:1 bus overflow");
+
+			if (val & 0x040)
+				CAM_INFO(CAM_ISP, "DISP YC 4:1 bus overflow");
+
+			if (val & 0x080)
+				CAM_INFO(CAM_ISP, "DISP YC 16:1 bus overflow");
+
+			if (val & 0x0100)
+				CAM_INFO(CAM_ISP, "FD Y bus overflow");
+
+			if (val & 0x0200)
+				CAM_INFO(CAM_ISP, "FD C bus overflow");
+
+			if (val & 0x0400)
+				CAM_INFO(CAM_ISP,
+				"PIXEL RAW DUMP bus overflow");
+
+			if (val & 0x01000)
+				CAM_INFO(CAM_ISP, "STATS HDR BE bus overflow");
+
+			if (val & 0x02000)
+				CAM_INFO(CAM_ISP,
+				"STATS HDR BHIST bus overflow");
+
+			if (val & 0x04000)
+				CAM_INFO(CAM_ISP,
+				"STATS TINTLESS BG bus overflow");
+
+			if (val & 0x08000)
+				CAM_INFO(CAM_ISP, "STATS AWB BG bus overflow");
+
+			if (val & 0x010000)
+				CAM_INFO(CAM_ISP, "STATS BHIST bus overflow");
+
+			if (val & 0x020000)
+				CAM_INFO(CAM_ISP, "STATS RS bus overflow");
+
+			if (val & 0x040000)
+				CAM_INFO(CAM_ISP, "STATS CS bus overflow");
+
+			if (val & 0x080000)
+				CAM_INFO(CAM_ISP, "STATS IHIST bus overflow");
+
+			if (val & 0x0100000)
+				CAM_INFO(CAM_ISP, "STATS BAF bus overflow");
+
+			if (val & 0x0200000)
+				CAM_INFO(CAM_ISP, "PDAF bus overflow");
+		}
+
 		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+		cam_vfe_camif_ver3_print_status(
+			irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0],
+			irq_status[CAM_IFE_IRQ_VIOLATION_STATUS], ret);
 		cam_vfe_camif_ver3_reg_dump(camif_node->res_priv);
 	}
 
-	if (irq_status2 & camif_priv->reg_data->error_irq_mask2) {
-		CAM_ERR(CAM_ISP, "Received CAMIF Violation ERROR\n");
-
-		evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION;
+	if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]) {
+		CAM_ERR(CAM_ISP, "VFE Violation");
 
 		if (camif_priv->event_cb)
 			camif_priv->event_cb(camif_priv->priv,
 				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
 
 		ret = CAM_VFE_IRQ_STATUS_VIOLATION;
+		cam_vfe_camif_ver3_print_status(
+			irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2],
+			irq_status[CAM_IFE_IRQ_VIOLATION_STATUS], ret);
 		cam_vfe_camif_ver3_reg_dump(camif_node->res_priv);
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
index 3f0799e..955cbf0 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
@@ -350,6 +350,19 @@
 	return rc;
 }
 
+static int cam_vfe_core_config_control(
+	struct cam_vfe_top_ver3_priv *top_priv,
+	 void *cmd_args, uint32_t arg_size)
+{
+	struct cam_vfe_core_config_args  *core_config = cmd_args;
+
+	if (core_config->node_res->process_cmd)
+		return core_config->node_res->process_cmd(core_config->node_res,
+			CAM_ISP_HW_CMD_CORE_CONFIG, cmd_args, arg_size);
+
+	return -EINVAL;
+}
+
 static int cam_vfe_top_ver3_bw_control(
 	struct cam_vfe_top_ver3_priv *top_priv,
 	 void *cmd_args, uint32_t arg_size)
@@ -713,6 +726,9 @@
 	case CAM_ISP_HW_CMD_BW_CONTROL:
 		rc = cam_vfe_top_ver3_bw_control(top_priv, cmd_args, arg_size);
 		break;
+	case CAM_ISP_HW_CMD_CORE_CONFIG:
+		rc = cam_vfe_core_config_control(top_priv, cmd_args, arg_size);
+		break;
 	default:
 		rc = -EINVAL;
 		CAM_ERR(CAM_ISP, "Error, Invalid cmd:%d", cmd_type);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
index a83048d..1ae8e5d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
@@ -12,6 +12,17 @@
 
 #define CAM_VFE_TOP_VER3_MUX_MAX     6
 
+#define CAM_SHIFT_TOP_CORE_CFG_MUXSEL_PDAF       31
+#define CAM_SHIFT_TOP_CORE_CFG_VID_DS16_R2PD     30
+#define CAM_SHIFT_TOP_CORE_CFG_VID_DS4_R2PD      29
+#define CAM_SHIFT_TOP_CORE_CFG_DISP_DS16_R2PD    28
+#define CAM_SHIFT_TOP_CORE_CFG_DISP_DS4_R2PD     27
+#define CAM_SHIFT_TOP_CORE_CFG_DSP_STREAMING     25
+#define CAM_SHIFT_TOP_CORE_CFG_STATS_IHIST       10
+#define CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BE       9
+#define CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BHIST    8
+#define CAM_SHIFT_TOP_CORE_CFG_INPUTMUX_PP        5
+
 struct cam_vfe_top_ver3_reg_offset_common {
 	uint32_t hw_version;
 	uint32_t titan_version;
@@ -36,6 +47,20 @@
 	uint32_t diag_config;
 	uint32_t diag_sensor_status_0;
 	uint32_t diag_sensor_status_1;
+	uint32_t bus_overflow_status;
+};
+
+struct cam_vfe_camif_common_cfg {
+	uint32_t     vid_ds16_r2pd;
+	uint32_t     vid_ds4_r2pd;
+	uint32_t     disp_ds16_r2pd;
+	uint32_t     disp_ds4_r2pd;
+	uint32_t     dsp_streaming_tap_point;
+	uint32_t     ihist_src_sel;
+	uint32_t     hdr_be_src_sel;
+	uint32_t     hdr_bhist_src_sel;
+	uint32_t     input_mux_sel_pdaf;
+	uint32_t     input_mux_sel_pp;
 };
 
 struct cam_vfe_top_ver3_hw_info {
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 981d7bba..864b37e 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -41,6 +41,7 @@
 	link->last_flush_id = 0;
 	link->initial_sync_req = -1;
 	link->in_msync_mode = false;
+	link->retry_cnt = 0;
 }
 
 void cam_req_mgr_handle_core_shutdown(void)
@@ -167,6 +168,46 @@
 }
 
 /**
+ * __cam_req_mgr_notify_error_on_link()
+ *
+ * @brief : Notify userspace on exceeding max retry
+ *          attempts to apply same req
+ * @link  : link on which the req could not be applied
+ *
+ */
+static int __cam_req_mgr_notify_error_on_link(
+	struct cam_req_mgr_core_link    *link)
+{
+	struct cam_req_mgr_core_session *session = NULL;
+	struct cam_req_mgr_message       msg;
+	int rc = 0;
+
+	session = (struct cam_req_mgr_core_session *)link->parent;
+
+	CAM_ERR(CAM_CRM,
+		"Notifying userspace to trigger recovery on link 0x%x for session %d",
+		link->link_hdl, session->session_hdl);
+
+	memset(&msg, 0, sizeof(msg));
+
+	msg.session_hdl = session->session_hdl;
+	msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
+	msg.u.err_msg.request_id = 0;
+	msg.u.err_msg.link_hdl   = link->link_hdl;
+
+	rc = cam_req_mgr_notify_message(&msg,
+		V4L_EVENT_CAM_REQ_MGR_ERROR,
+		V4L_EVENT_CAM_REQ_MGR_EVENT);
+
+	if (rc)
+		CAM_ERR(CAM_CRM,
+			"Error in notifying recovery for session %d link 0x%x rc %d",
+			session->session_hdl, link->link_hdl, rc);
+
+	return rc;
+}
+
+/**
  * __cam_req_mgr_traverse()
  *
  * @brief    : Traverse through pd tables, it will internally cover all linked
@@ -1092,7 +1133,20 @@
 	if (rc < 0) {
 		/* Apply req failed retry at next sof */
 		slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+
+		link->retry_cnt++;
+		if (link->retry_cnt == MAXIMUM_RETRY_ATTEMPTS) {
+			CAM_DBG(CAM_CRM,
+				"Max retry attempts reached on link[0x%x] for req [%lld]",
+				link->link_hdl,
+				in_q->slot[in_q->rd_idx].req_id);
+			__cam_req_mgr_notify_error_on_link(link);
+			link->retry_cnt = 0;
+		}
 	} else {
+		if (link->retry_cnt)
+			link->retry_cnt = 0;
+
 		link->trigger_mask |= trigger;
 
 		CAM_DBG(CAM_CRM, "Applied req[%lld] on link[%x] success",
@@ -1342,7 +1396,7 @@
 	memset(&msg, 0, sizeof(msg));
 
 	msg.session_hdl = session->session_hdl;
-	msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_DEVICE;
+	msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
 	msg.u.err_msg.request_id = 0;
 	msg.u.err_msg.link_hdl   = link->link_hdl;
 
@@ -1586,6 +1640,7 @@
 	link->req.in_q = NULL;
 	i = link - g_links;
 	CAM_DBG(CAM_CRM, "free link index %d", i);
+	cam_req_mgr_core_link_reset(link);
 	atomic_set(&g_links[i].is_used, 0);
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 8f07b3b..9a6acbc 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -29,6 +29,8 @@
 
 #define MAXIMUM_LINKS_PER_SESSION  4
 
+#define MAXIMUM_RETRY_ATTEMPTS 3
+
 /**
  * enum crm_workq_task_type
  * @codes: to identify which type of task is present
@@ -310,6 +312,8 @@
  * @in_msync_mode        : Flag to determine if a link is in master-slave mode
  * @initial_sync_req     : The initial req which is required to sync with the
  *                         other link
+ * @retry_cnt            : Counter that tracks number of attempts to apply
+ *                         the same req
  */
 struct cam_req_mgr_core_link {
 	int32_t                              link_hdl;
@@ -336,6 +340,7 @@
 	bool                                 initial_skip;
 	bool                                 in_msync_mode;
 	int64_t                              initial_sync_req;
+	uint32_t                             retry_cnt;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
index 412932d..02a7a1f 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -102,7 +102,7 @@
 	}
 
 	if ((packet->kmd_cmd_buf_index < 0) ||
-		(packet->kmd_cmd_buf_index > packet->num_cmd_buf)) {
+		(packet->kmd_cmd_buf_index >= packet->num_cmd_buf)) {
 		CAM_ERR(CAM_UTIL, "Invalid kmd buf index: %d",
 			packet->kmd_cmd_buf_index);
 		return -EINVAL;
diff --git a/drivers/media/platform/msm/cvp/Makefile b/drivers/media/platform/msm/cvp/Makefile
index 2525565..0d80860 100644
--- a/drivers/media/platform/msm/cvp/Makefile
+++ b/drivers/media/platform/msm/cvp/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 ccflags-y += -I$(srctree)/drivers/media/platform/msm/cvp/
+ccflags-y += -I$(srctree)/drivers/media/platform/msm/synx/
 
 msm-cvp-objs := msm_v4l2_cvp.o \
 				msm_v4l2_private.o \
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.c b/drivers/media/platform/msm/cvp/cvp_hfi.c
index 65a108d..ba93c30 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.c
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.c
@@ -37,6 +37,170 @@
 #define REG_ADDR_OFFSET_BITMASK	0x000FFFFF
 #define QDSS_IOVA_START 0x80001000
 
+const struct msm_cvp_hfi_defs cvp_hfi_defs[] = {
+	{
+		.size = HFI_DFS_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DFS_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_DFS_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_DFS_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DFS_FRAME,
+		.buf_offset = HFI_DFS_FRAME_BUFFERS_OFFSET,
+		.buf_num = HFI_DFS_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_DME_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DME_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_DME_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_DME_BASIC_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DME_BASIC_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_DME_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DME_FRAME,
+		.buf_offset = HFI_DME_FRAME_BUFFERS_OFFSET,
+		.buf_num = HFI_DME_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_PERSIST_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS,
+		.buf_offset = HFI_PERSIST_BUFFERS_OFFSET,
+		.buf_num = HFI_PERSIST_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_DS_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DS,
+		.buf_offset = HFI_DS_BUFFERS_OFFSET,
+		.buf_num = HFI_DS_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_OF_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_TME_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_TME_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_OF_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_TME_FRAME,
+		.buf_offset = HFI_OF_BUFFERS_OFFSET,
+		.buf_num = HFI_OF_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_ODT_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_ODT_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_ODT_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_ODT_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_ODT_FRAME,
+		.buf_offset = HFI_ODT_BUFFERS_OFFSET,
+		.buf_num = HFI_ODT_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_OD_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_OD_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_OD_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_OD_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_CV_OD_FRAME,
+		.buf_offset = HFI_OD_BUFFERS_OFFSET,
+		.buf_num = HFI_OD_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_NCC_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_NCC_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_NCC_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_NCC_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_NCC_FRAME,
+		.buf_offset = HFI_NCC_BUFFERS_OFFSET,
+		.buf_num = HFI_NCC_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_ICA_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_ICA_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_ICA_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_ICA_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_ICA_FRAME,
+		.buf_offset = HFI_ICA_BUFFERS_OFFSET,
+		.buf_num = HFI_ICA_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_HCD_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_HCD_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_HCD_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_HCD_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_HCD_FRAME,
+		.buf_offset = HFI_HCD_BUFFERS_OFFSET,
+		.buf_num = HFI_HCD_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_DCM_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DCM_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_DCM_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_DCM_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_DCM_FRAME,
+		.buf_offset = HFI_DCM_BUFFERS_OFFSET,
+		.buf_num = HFI_DCM_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+	{
+		.size = HFI_PYS_HCD_CONFIG_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG,
+		.buf_offset = 0,
+		.buf_num = 0,
+		.resp = HAL_SESSION_PYS_HCD_CONFIG_CMD_DONE,
+	},
+	{
+		.size = HFI_PYS_HCD_FRAME_CMD_SIZE,
+		.type = HFI_CMD_SESSION_CVP_PYS_HCD_FRAME,
+		.buf_offset = HFI_PYS_HCD_BUFFERS_OFFSET,
+		.buf_num = HFI_PYS_HCD_BUF_NUM,
+		.resp = HAL_NO_RESP,
+	},
+};
+
 static struct hal_device_data hal_ctxt;
 
 #define TZBSP_MEM_PROTECT_VIDEO_VAR 0x8
@@ -72,7 +236,7 @@
 	.data_count = 0,
 };
 
-const int cvp_max_packets = 1000;
+const int cvp_max_packets = 32;
 
 static void venus_hfi_pm_handler(struct work_struct *work);
 static DECLARE_DELAYED_WORK(venus_hfi_pm_work, venus_hfi_pm_handler);
@@ -100,19 +264,12 @@
 static int __power_collapse(struct venus_hfi_device *device, bool force);
 static int venus_hfi_noc_error_info(void *dev);
 
-static void interrupt_init_vpu4(struct venus_hfi_device *device);
 static void interrupt_init_vpu5(struct venus_hfi_device *device);
 static void setup_dsp_uc_memmap_vpu5(struct venus_hfi_device *device);
 static void clock_config_on_enable_vpu5(struct venus_hfi_device *device);
 
 static int __set_ubwc_config(struct venus_hfi_device *device);
 
-struct venus_hfi_vpu_ops cvp_vpu4_ops = {
-	.interrupt_init = interrupt_init_vpu4,
-	.setup_dsp_uc_memmap = NULL,
-	.clock_config_on_enable = NULL,
-};
-
 struct venus_hfi_vpu_ops cvp_vpu5_ops = {
 	.interrupt_init = interrupt_init_vpu5,
 	.setup_dsp_uc_memmap = setup_dsp_uc_memmap_vpu5,
@@ -147,6 +304,29 @@
 
 #define ROW_SIZE 32
 
+int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr)
+{
+	int i, pkt_num = ARRAY_SIZE(cvp_hfi_defs);
+
+	for (i = 0; i < pkt_num; i++)
+		if ((cvp_hfi_defs[i].size*sizeof(unsigned int) == hdr->size) &&
+			(cvp_hfi_defs[i].type == hdr->packet_type))
+			return i;
+
+	return -EINVAL;
+}
+
+int get_signal_from_pkt_type(unsigned int type)
+{
+	int i, pkt_num = ARRAY_SIZE(cvp_hfi_defs);
+
+	for (i = 0; i < pkt_num; i++)
+		if (cvp_hfi_defs[i].type == type)
+			return cvp_hfi_defs[i].resp;
+
+	return -EINVAL;
+}
+
 static void __dump_packet(u8 *packet, enum cvp_msg_prio log_level)
 {
 	u32 c = 0, packet_size = *(u32 *)packet;
@@ -165,96 +345,6 @@
 	}
 }
 
-static void __sim_modify_cmd_packet(u8 *packet, struct venus_hfi_device *device)
-{
-	struct hfi_cmd_sys_session_init_packet *sys_init;
-	struct hal_session *session = NULL;
-	u8 i;
-	phys_addr_t fw_bias = 0;
-
-	if (!device || !packet) {
-		dprintk(CVP_ERR, "Invalid Param\n");
-		return;
-	} else if (!device->hal_data->firmware_base
-			|| is_iommu_present(device->res)) {
-		return;
-	}
-
-	fw_bias = device->hal_data->firmware_base;
-	sys_init = (struct hfi_cmd_sys_session_init_packet *)packet;
-
-	session = __get_session(device, sys_init->session_id);
-	if (!session) {
-		dprintk(CVP_DBG, "%s :Invalid session id: %x\n",
-				__func__, sys_init->session_id);
-		return;
-	}
-
-	switch (sys_init->packet_type) {
-	case HFI_CMD_SESSION_EMPTY_BUFFER:
-		if (session->is_decoder) {
-			struct hfi_cmd_session_empty_buffer_compressed_packet
-			*pkt = (struct
-			hfi_cmd_session_empty_buffer_compressed_packet
-			*) packet;
-			pkt->packet_buffer -= fw_bias;
-		} else {
-			struct
-			hfi_cmd_session_empty_buffer_uncompressed_plane0_packet
-			*pkt = (struct
-			hfi_cmd_session_empty_buffer_uncompressed_plane0_packet
-			*) packet;
-			pkt->packet_buffer -= fw_bias;
-		}
-		break;
-	case HFI_CMD_SESSION_FILL_BUFFER:
-	{
-		struct hfi_cmd_session_fill_buffer_packet *pkt =
-			(struct hfi_cmd_session_fill_buffer_packet *)packet;
-		pkt->packet_buffer -= fw_bias;
-		break;
-	}
-	case HFI_CMD_SESSION_SET_BUFFERS:
-	{
-		struct hfi_cmd_session_set_buffers_packet *pkt =
-			(struct hfi_cmd_session_set_buffers_packet *)packet;
-		if (pkt->buffer_type == HFI_BUFFER_OUTPUT ||
-			pkt->buffer_type == HFI_BUFFER_OUTPUT2) {
-			struct hfi_buffer_info *buff;
-
-			buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
-			buff->buffer_addr -= fw_bias;
-			if (buff->extra_data_addr >= fw_bias)
-				buff->extra_data_addr -= fw_bias;
-		} else {
-			for (i = 0; i < pkt->num_buffers; i++)
-				pkt->rg_buffer_info[i] -= fw_bias;
-		}
-		break;
-	}
-	case HFI_CMD_SESSION_RELEASE_BUFFERS:
-	{
-		struct hfi_cmd_session_release_buffer_packet *pkt =
-			(struct hfi_cmd_session_release_buffer_packet *)packet;
-
-		if (pkt->buffer_type == HFI_BUFFER_OUTPUT ||
-			pkt->buffer_type == HFI_BUFFER_OUTPUT2) {
-			struct hfi_buffer_info *buff;
-
-			buff = (struct hfi_buffer_info *) pkt->rg_buffer_info;
-			buff->buffer_addr -= fw_bias;
-			buff->extra_data_addr -= fw_bias;
-		} else {
-			for (i = 0; i < pkt->num_buffers; i++)
-				pkt->rg_buffer_info[i] -= fw_bias;
-		}
-		break;
-	}
-	default:
-		break;
-	}
-}
-
 static int __dsp_send_hfi_queue(struct venus_hfi_device *device)
 {
 	int rc;
@@ -382,87 +472,16 @@
 	return rc;
 }
 
-static int __session_pause(struct venus_hfi_device *device,
-		struct hal_session *session)
-{
-	int rc = 0;
-
-	/* ignore if session paused already */
-	if (session->flags & SESSION_PAUSE)
-		return 0;
-
-	session->flags |= SESSION_PAUSE;
-	dprintk(CVP_DBG, "%s: cvp session %x paused\n", __func__,
-		hash32_ptr(session));
-
-	return rc;
-}
-
-static int __session_resume(struct venus_hfi_device *device,
-		struct hal_session *session)
-{
-	int rc = 0;
-
-	/* ignore if session already resumed */
-	if (!(session->flags & SESSION_PAUSE))
-		return 0;
-
-	session->flags &= ~SESSION_PAUSE;
-	dprintk(CVP_DBG, "%s: cvp session %x resumed\n", __func__,
-		hash32_ptr(session));
-
-	rc = __resume(device);
-	if (rc) {
-		dprintk(CVP_ERR, "%s: resume failed\n", __func__);
-		goto exit;
-	}
-
-	if (device->dsp_flags & DSP_SUSPEND) {
-		dprintk(CVP_ERR, "%s: dsp not resumed\n", __func__);
-		rc = -EINVAL;
-		goto exit;
-	}
-
-exit:
-	return rc;
-}
-
 static int venus_hfi_session_pause(void *sess)
 {
-	int rc;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	device = session->device;
-
-	mutex_lock(&device->lock);
-	rc = __session_pause(device, session);
-	mutex_unlock(&device->lock);
-
-	return rc;
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
+	return -EINVAL;
 }
 
 static int venus_hfi_session_resume(void *sess)
 {
-	int rc;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	device = session->device;
-
-	mutex_lock(&device->lock);
-	rc = __session_resume(device, session);
-	mutex_unlock(&device->lock);
-
-	return rc;
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
+	return -EINVAL;
 }
 
 static int __acquire_regulator(struct regulator_info *rinfo,
@@ -622,71 +641,6 @@
 	return 0;
 }
 
-static void __hal_sim_modify_msg_packet(u8 *packet,
-					struct venus_hfi_device *device)
-{
-	struct hfi_msg_sys_session_init_done_packet *init_done;
-	struct hal_session *session = NULL;
-	phys_addr_t fw_bias = 0;
-
-	if (!device || !packet) {
-		dprintk(CVP_ERR, "Invalid Param\n");
-		return;
-	} else if (!device->hal_data->firmware_base
-			|| is_iommu_present(device->res)) {
-		return;
-	}
-
-	fw_bias = device->hal_data->firmware_base;
-	init_done = (struct hfi_msg_sys_session_init_done_packet *)packet;
-	session = __get_session(device, init_done->session_id);
-
-	if (!session) {
-		dprintk(CVP_DBG, "%s: Invalid session id: %x\n",
-				__func__, init_done->session_id);
-		return;
-	}
-
-	switch (init_done->packet_type) {
-	case HFI_MSG_SESSION_FILL_BUFFER_DONE:
-		if (session->is_decoder) {
-			struct
-			hfi_msg_session_fbd_uncompressed_plane0_packet
-			*pkt_uc = (struct
-			hfi_msg_session_fbd_uncompressed_plane0_packet
-			*) packet;
-			pkt_uc->packet_buffer += fw_bias;
-		} else {
-			struct
-			hfi_msg_session_fill_buffer_done_compressed_packet
-			*pkt = (struct
-			hfi_msg_session_fill_buffer_done_compressed_packet
-			*) packet;
-			pkt->packet_buffer += fw_bias;
-		}
-		break;
-	case HFI_MSG_SESSION_EMPTY_BUFFER_DONE:
-	{
-		struct hfi_msg_session_empty_buffer_done_packet *pkt =
-		(struct hfi_msg_session_empty_buffer_done_packet *)packet;
-		pkt->packet_buffer += fw_bias;
-		break;
-	}
-	case HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE:
-	{
-		struct
-		hfi_msg_session_get_sequence_header_done_packet
-		*pkt =
-		(struct hfi_msg_session_get_sequence_header_done_packet *)
-		packet;
-		pkt->sequence_header += fw_bias;
-		break;
-	}
-	default:
-		break;
-	}
-}
-
 static int __read_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
 		u32 *pb_tx_req_is_set)
 {
@@ -1292,7 +1246,7 @@
 			}
 
 			trace_msm_cvp_perf_clock_scale(cl->name, freq);
-			dprintk(CVP_PROF, "Scaling clock %s to %u\n",
+			dprintk(CVP_DBG, "Scaling clock %s to %u\n",
 					cl->name, freq);
 		}
 	}
@@ -1377,7 +1331,6 @@
 		goto err_q_null;
 	}
 
-	__sim_modify_cmd_packet((u8 *)pkt, device);
 	if (__resume(device)) {
 		dprintk(CVP_ERR, "%s: Power on failed\n", __func__);
 		goto err_q_write;
@@ -1446,7 +1399,6 @@
 	}
 
 	if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
-		__hal_sim_modify_msg_packet((u8 *)pkt, device);
 		if (tx_req_is_set)
 			__write_register(device, CVP_CPU_CS_H2ASOFTINT, 1);
 		rc = 0;
@@ -2543,7 +2495,7 @@
 		goto err_create_pkt;
 	}
 
-	dprintk(CVP_INFO, "set buffers: %#x\n", buffer_info->buffer_type);
+	dprintk(CVP_DBG, "set buffers: %#x\n", buffer_info->buffer_type);
 	if (__iface_cmdq_write(session->device, &pkt))
 		rc = -ENOTEMPTY;
 
@@ -2596,56 +2548,26 @@
 static int venus_hfi_session_start(void *session)
 {
 	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
-		return -EINVAL;
+	return -EINVAL;
 }
 
 static int venus_hfi_session_continue(void *session)
 {
-	struct hal_session *sess;
-	struct venus_hfi_device *device;
-	int rc = 0;
-
-	if (!session) {
-		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
-		return -EINVAL;
-	}
-
-	sess = session;
-	device = sess->device;
-
-	mutex_lock(&device->lock);
-	rc = __send_session_cmd(sess, HFI_CMD_SESSION_CONTINUE);
-	mutex_unlock(&device->lock);
-
-	return rc;
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
+	return -EINVAL;
 }
 
 static int venus_hfi_session_stop(void *session)
 {
-	struct hal_session *sess;
-	struct venus_hfi_device *device;
-	int rc = 0;
-
-	if (!session) {
-		dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
-		return -EINVAL;
-	}
-
-	sess = session;
-	device = sess->device;
-
-	mutex_lock(&device->lock);
-	rc = __send_session_cmd(sess, HFI_CMD_SESSION_STOP);
-	mutex_unlock(&device->lock);
-
-	return rc;
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
+	return -EINVAL;
 }
 
-static int venus_hfi_session_cvp_dme_config(void *sess,
-		struct msm_cvp_internal_dmeconfig *dme_config)
+static int venus_hfi_session_send(void *sess,
+		struct cvp_kmd_hfi_packet *in_pkt)
 {
 	int rc = 0;
-	struct hfi_cmd_session_cvp_dme_config_packet pkt;
+	struct cvp_kmd_hfi_packet pkt;
 	struct hal_session *session = sess;
 	struct venus_hfi_device *device;
 
@@ -2659,174 +2581,26 @@
 
 	if (!__is_session_valid(device, session, __func__)) {
 		rc = -EINVAL;
-		goto dme_config_err;
+		goto err_send_pkt;
 	}
-	rc = call_hfi_pkt_op(device, session_cvp_dme_config,
-			&pkt, session, dme_config);
+	rc = call_hfi_pkt_op(device, session_send,
+			&pkt, session, in_pkt);
 	if (rc) {
 		dprintk(CVP_ERR,
-				"Session get buf req: failed to create pkt\n");
-		goto dme_config_err;
-	}
-
-	if (__iface_cmdq_write(session->device, &pkt))
-		rc = -ENOTEMPTY;
-	dprintk(CVP_DBG, "%s: calling __iface_cmdq_write\n", __func__);
-dme_config_err:
-	mutex_unlock(&device->lock);
-	return rc;
-}
-
-static int venus_hfi_session_cvp_dme_frame(void *sess,
-				struct msm_cvp_internal_dmeframe *dme_frame)
-{
-	int rc = 0;
-	struct hfi_cmd_session_cvp_dme_frame_packet pkt;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device) {
-		dprintk(CVP_ERR, "invalid session");
-		return -ENODEV;
-	}
-
-	device = session->device;
-	mutex_lock(&device->lock);
-
-	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
-		goto dme_frame_err;
-	}
-	rc = call_hfi_pkt_op(device, session_cvp_dme_frame,
-			&pkt, session, dme_frame);
-	if (rc) {
-		dprintk(CVP_ERR,
-				"Session get buf req: failed to create pkt\n");
-		goto dme_frame_err;
-	}
-
-	if (__iface_cmdq_write(session->device, &pkt))
-		rc = -ENOTEMPTY;
-	dprintk(CVP_DBG, "%s: done calling __iface_cmdq_write\n", __func__);
-dme_frame_err:
-	mutex_unlock(&device->lock);
-	return rc;
-}
-
-
-static int venus_hfi_session_cvp_persist(void *sess,
-		struct msm_cvp_internal_persist_cmd *pbuf_cmd)
-{
-	int rc = 0;
-	struct hfi_cmd_session_cvp_persist_packet pkt;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device) {
-		dprintk(CVP_ERR, "invalid session");
-		return -ENODEV;
-	}
-
-	device = session->device;
-	mutex_lock(&device->lock);
-
-	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
-		goto persist_err;
-	}
-	rc = call_hfi_pkt_op(device, session_cvp_persist,
-			&pkt, session, pbuf_cmd);
-	if (rc) {
-		dprintk(CVP_ERR,
-				"Failed to create persist pkt\n");
-		goto persist_err;
+				"failed to create pkt\n");
+		goto err_send_pkt;
 	}
 
 	if (__iface_cmdq_write(session->device, &pkt))
 		rc = -ENOTEMPTY;
 
-	dprintk(CVP_DBG, "%s: done calling __iface_cmdq_write\n", __func__);
-persist_err:
-	mutex_unlock(&device->lock);
-	return rc;
-}
-
-static int venus_hfi_session_cvp_dfs_config(void *sess,
-		struct msm_cvp_internal_dfsconfig *dfs_config)
-{
-	int rc = 0;
-	struct hfi_cmd_session_cvp_dfs_config_packet pkt;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device) {
-		dprintk(CVP_ERR, "invalid session");
-		return -ENODEV;
-	}
-
-	device = session->device;
-	mutex_lock(&device->lock);
-
-	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
-		goto err_create_pkt;
-	}
-	rc = call_hfi_pkt_op(device, session_cvp_dfs_config,
-			&pkt, session, dfs_config);
-	if (rc) {
-		dprintk(CVP_ERR,
-				"Session get buf req: failed to create pkt\n");
-		goto err_create_pkt;
-	}
-
-	dprintk(CVP_DBG, "%s: calling __iface_cmdq_write\n", __func__);
-	if (__iface_cmdq_write(session->device, &pkt))
-		rc = -ENOTEMPTY;
-	dprintk(CVP_DBG, "%s: done calling __iface_cmdq_write\n", __func__);
-err_create_pkt:
+err_send_pkt:
 	mutex_unlock(&device->lock);
 	return rc;
 
 	return rc;
 }
 
-static int venus_hfi_session_cvp_dfs_frame(void *sess,
-				struct msm_cvp_internal_dfsframe *dfs_frame)
-{
-	int rc = 0;
-	struct hfi_cmd_session_cvp_dfs_frame_packet pkt;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device) {
-		dprintk(CVP_ERR, "invalid session");
-		return -ENODEV;
-	}
-
-	device = session->device;
-	mutex_lock(&device->lock);
-
-	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
-		goto err_create_pkt;
-	}
-	rc = call_hfi_pkt_op(device, session_cvp_dfs_frame,
-			&pkt, session, dfs_frame);
-	if (rc) {
-		dprintk(CVP_ERR,
-				"Session get buf req: failed to create pkt\n");
-		goto err_create_pkt;
-	}
-
-	dprintk(CVP_DBG, "%s: calling __iface_cmdq_write\n", __func__);
-	if (__iface_cmdq_write(session->device, &pkt))
-		rc = -ENOTEMPTY;
-	dprintk(CVP_DBG, "%s: done calling __iface_cmdq_write\n", __func__);
-err_create_pkt:
-	mutex_unlock(&device->lock);
-	return rc;
-}
-
 static int venus_hfi_session_get_buf_req(void *sess)
 {
 	struct hfi_cmd_session_get_property_packet pkt;
@@ -2863,35 +2637,8 @@
 
 static int venus_hfi_session_flush(void *sess, enum hal_flush flush_mode)
 {
-	struct hfi_cmd_session_flush_packet pkt;
-	int rc = 0;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device) {
-		dprintk(CVP_ERR, "invalid session");
-		return -ENODEV;
-	}
-
-	device = session->device;
-	mutex_lock(&device->lock);
-
-	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
-		goto err_create_pkt;
-	}
-	rc = call_hfi_pkt_op(device, session_flush,
-			&pkt, session, flush_mode);
-	if (rc) {
-		dprintk(CVP_ERR, "Session flush: failed to create pkt\n");
-		goto err_create_pkt;
-	}
-
-	if (__iface_cmdq_write(session->device, &pkt))
-		rc = -ENOTEMPTY;
-err_create_pkt:
-	mutex_unlock(&device->lock);
-	return rc;
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
+	return -EINVAL;
 }
 
 static int __check_core_registered(struct hal_device_data core,
@@ -3284,6 +3031,7 @@
 	case HAL_SESSION_UNREGISTER_BUFFER_DONE:
 	case HAL_SESSION_DFS_CONFIG_CMD_DONE:
 	case HAL_SESSION_DME_CONFIG_CMD_DONE:
+	case HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE:
 	case HAL_SESSION_DFS_FRAME_CMD_DONE:
 	case HAL_SESSION_DME_FRAME_CMD_DONE:
 	case HAL_SESSION_PERSIST_CMD_DONE:
@@ -3357,6 +3105,9 @@
 					"Corrupt/unknown packet found, discarding\n");
 			--packet_count;
 			continue;
+		} else if (info->response_type == HAL_NO_RESP) {
+			--packet_count;
+			continue;
 		}
 
 		/* Process the packet types that we're interested in */
@@ -4014,30 +3765,14 @@
 	dprintk(CVP_DBG, "Enabling regulators\n");
 
 	venus_hfi_for_each_regulator(device, rinfo) {
-		if (rinfo->has_hw_power_collapse) {
-			rc = regulator_set_mode(rinfo->regulator,
-				REGULATOR_MODE_FAST);
-			if (rc) {
-				dprintk(CVP_ERR,
-					"Failed to enable hwctrl%s: %d\n",
-						rinfo->name, rc);
-				goto err_reg_enable_failed;
-			}
-			dprintk(CVP_DBG, "Enabled regulator %s hw ctrl\n",
-					rinfo->name);
-
-		} else {
-			rc = regulator_enable(rinfo->regulator);
-			if (rc) {
-				dprintk(CVP_ERR,
-						"Failed to enable %s: %d\n",
-						rinfo->name, rc);
-				goto err_reg_enable_failed;
-			}
-
-			dprintk(CVP_DBG, "Enabled regulator %s\n",
-					rinfo->name);
+		rc = regulator_enable(rinfo->regulator);
+		if (rc) {
+			dprintk(CVP_ERR, "Failed to enable %s: %d\n",
+					rinfo->name, rc);
+			goto err_reg_enable_failed;
 		}
+
+		dprintk(CVP_DBG, "Enabled regulator %s\n", rinfo->name);
 		c++;
 	}
 
@@ -4096,10 +3831,6 @@
 		CVP_WRAPPER_INTR_MASK, mask_val);
 }
 
-static void interrupt_init_vpu4(struct venus_hfi_device *device)
-{
-}
-
 static void setup_dsp_uc_memmap_vpu5(struct venus_hfi_device *device)
 {
 	/* initialize DSP QTBL & UCREGION with CPU queues */
@@ -4493,85 +4224,9 @@
 	return rc;
 }
 
-static void __noc_error_info(struct venus_hfi_device *device, u32 core_num)
-{
-	u32 vcodec_core_video_noc_base_offs, val;
-
-	if (!device) {
-		dprintk(CVP_ERR, "%s: null device\n", __func__);
-		return;
-	}
-	if (!core_num) {
-		vcodec_core_video_noc_base_offs =
-			VCODEC_CORE0_VIDEO_NOC_BASE_OFFS;
-	} else if (core_num == 1) {
-		vcodec_core_video_noc_base_offs =
-			VCODEC_CORE1_VIDEO_NOC_BASE_OFFS;
-	} else {
-		dprintk(CVP_ERR, "%s: invalid core_num %u\n",
-			__func__, core_num);
-		return;
-	}
-
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_SWID_LOW_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_SWID_LOW:     %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_SWID_HIGH_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_SWID_HIGH:    %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_MAINCTL_LOW_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_MAINCTL_LOW:  %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG0_LOW_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG0_LOW:  %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG0_HIGH_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG0_HIGH: %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG1_LOW_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG1_LOW:  %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG1_HIGH_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG1_HIGH: %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG2_LOW_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG2_LOW:  %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG2_HIGH_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG2_HIGH: %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_LOW_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG3_LOW:  %#x\n", core_num, val);
-	val = __read_register(device, vcodec_core_video_noc_base_offs +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_HIGH_OFFS);
-	dprintk(CVP_ERR, "CORE%d_NOC_ERR_ERRLOG3_HIGH: %#x\n", core_num, val);
-}
-
 static int venus_hfi_noc_error_info(void *dev)
 {
-	struct venus_hfi_device *device;
-	const u32 core0 = 0, core1 = 1;
-
-	if (!dev) {
-		dprintk(CVP_ERR, "%s: null device\n", __func__);
-		return -EINVAL;
-	}
-	device = dev;
-
-	mutex_lock(&device->lock);
-	dprintk(CVP_ERR, "%s: non error information\n", __func__);
-
-	if (__read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
-		__noc_error_info(device, core0);
-
-	if (__read_register(device, VCODEC_CORE1_VIDEO_NOC_BASE_OFFS +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
-		__noc_error_info(device, core1);
-
-	mutex_unlock(&device->lock);
-
+	dprintk(CVP_ERR, "%s not supported yet!\n", __func__);
 	return 0;
 }
 
@@ -4598,10 +4253,7 @@
 
 void __init_cvp_ops(struct venus_hfi_device *device)
 {
-	if (device->res->vpu_ver == VPU_VERSION_4)
-		device->vpu_ops = &cvp_vpu4_ops;
-	else
-		device->vpu_ops = &cvp_vpu5_ops;
+	device->vpu_ops = &cvp_vpu5_ops;
 }
 
 static struct venus_hfi_device *__add_device(u32 device_id,
@@ -4674,6 +4326,8 @@
 	return hdevice;
 
 err_cleanup:
+	if (hdevice->venus_pm_workq)
+		destroy_workqueue(hdevice->venus_pm_workq);
 	if (hdevice->cvp_workq)
 		destroy_workqueue(hdevice->cvp_workq);
 	kfree(hdevice->response_pkt);
@@ -4743,11 +4397,7 @@
 	hdev->session_continue = venus_hfi_session_continue;
 	hdev->session_stop = venus_hfi_session_stop;
 	hdev->session_get_buf_req = venus_hfi_session_get_buf_req;
-	hdev->session_cvp_dfs_config = venus_hfi_session_cvp_dfs_config;
-	hdev->session_cvp_dfs_frame = venus_hfi_session_cvp_dfs_frame;
-	hdev->session_cvp_dme_config = venus_hfi_session_cvp_dme_config;
-	hdev->session_cvp_dme_frame = venus_hfi_session_cvp_dme_frame;
-	hdev->session_cvp_persist = venus_hfi_session_cvp_persist;
+	hdev->session_send = venus_hfi_session_send;
 	hdev->session_flush = venus_hfi_session_flush;
 	hdev->session_set_property = venus_hfi_session_set_property;
 	hdev->session_get_property = venus_hfi_session_get_property;
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.h b/drivers/media/platform/msm/cvp/cvp_hfi.h
index 8a3de61..00c7062c 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.h
@@ -80,26 +80,6 @@
 #define HFI_INDEX_EXTRADATA_OUTPUT_CROP		0x0700000F
 #define HFI_INDEX_EXTRADATA_ASPECT_RATIO	0x7F100003
 
-struct hfi_buffer_alloc_mode {
-	u32 buffer_type;
-	u32 buffer_mode;
-};
-
-
-struct hfi_index_extradata_config {
-	int enable;
-	u32 index_extra_data_id;
-};
-
-struct hfi_extradata_header {
-	u32 size;
-	u32 version;
-	u32 port_index;
-	u32 type;
-	u32 data_size;
-	u8 rg_data[1];
-};
-
 #define HFI_INTERLACE_FRAME_PROGRESSIVE					0x01
 #define HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST	0x02
 #define HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST	0x04
@@ -222,60 +202,6 @@
 #define HFI_PROPERTY_CONFIG_VPE_OX_START				\
 	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x8000)
 
-struct hfi_batch_info {
-	u32 input_batch_count;
-	u32 output_batch_count;
-};
-
-struct hfi_buffer_count_actual {
-	u32 buffer_type;
-	u32 buffer_count_actual;
-	u32 buffer_count_min_host;
-};
-
-struct hfi_buffer_size_minimum {
-	u32 buffer_type;
-	u32 buffer_size;
-};
-
-struct hfi_buffer_requirements {
-	u32 buffer_type;
-	u32 buffer_size;
-	u32 buffer_region_size;
-	u32 buffer_count_min;
-	u32 buffer_count_min_host;
-	u32 buffer_count_actual;
-	u32 contiguous;
-	u32 buffer_alignment;
-};
-
-struct hfi_data_payload {
-	u32 size;
-	u8 rg_data[1];
-};
-
-struct hfi_enable_picture {
-	u32 picture_type;
-};
-
-struct hfi_mb_error_map {
-	u32 error_map_size;
-	u8 rg_error_map[1];
-};
-
-struct hfi_metadata_pass_through {
-	int enable;
-	u32 size;
-};
-
-struct hfi_multi_view_select {
-	u32 view_index;
-};
-
-struct hfi_hybrid_hierp {
-	u32 layers;
-};
-
 #define HFI_PRIORITY_LOW		10
 #define HFI_PRIOIRTY_MEDIUM		20
 #define HFI_PRIORITY_HIGH		30
@@ -292,13 +218,6 @@
 #define HFI_RATE_CONTROL_MBR_VFR	(HFI_OX_BASE + 0x7)
 #define HFI_RATE_CONTROL_CQ		(HFI_OX_BASE + 0x8)
 
-
-struct hfi_uncompressed_plane_actual_constraints_info {
-	u32 buffer_type;
-	u32 num_planes;
-	struct hfi_uncompressed_plane_constraints rg_plane_format[1];
-};
-
 #define HFI_CMD_SYS_OX_START		\
 (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_CMD_START_OFFSET + 0x0000)
 #define HFI_CMD_SYS_SESSION_ABORT	(HFI_CMD_SYS_OX_START + 0x001)
@@ -353,10 +272,6 @@
 	(HFI_CMD_SESSION_CVP_START + 0x00B)
 #define  HFI_CMD_SESSION_CVP_DFS_CONFIG\
 	(HFI_CMD_SESSION_CVP_START + 0x00C)
-#define  HFI_CMD_SESSION_CVP_TME_FRAME\
-	(HFI_CMD_SESSION_CVP_START + 0x00D)
-#define  HFI_CMD_SESSION_CVP_TME_CONFIG\
-	(HFI_CMD_SESSION_CVP_START + 0x00E)
 #define  HFI_CMD_SESSION_CVP_FTEXT\
 	(HFI_CMD_SESSION_CVP_START + 0x00F)
 
@@ -373,20 +288,42 @@
 	(HFI_CMD_SESSION_CVP_START + 0x014)
 
 /* ===========USECASE OPERATIONS===============*/
-#define HFI_CMD_SESSION_CVP_ODT\
-	(HFI_CMD_SESSION_CVP_START + 0x015)
-#define HFI_CMD_SESSION_CVP_PYS_HCD_FRAME\
-	(HFI_CMD_SESSION_CVP_START + 0x016)
-#define HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG\
-	(HFI_CMD_SESSION_CVP_START + 0x017)
+#define  HFI_CMD_SESSION_CVP_DCM_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x034)
+#define  HFI_CMD_SESSION_CVP_DCM_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x035)
 
 #define  HFI_CMD_SESSION_CVP_DME_CONFIG\
 	(HFI_CMD_SESSION_CVP_START + 0x039)
+#define  HFI_CMD_SESSION_CVP_DME_BASIC_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x03B)
 #define  HFI_CMD_SESSION_CVP_DME_FRAME\
 	(HFI_CMD_SESSION_CVP_START + 0x03A)
 
+#define  HFI_CMD_SESSION_CVP_CV_TME_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x047)
+#define  HFI_CMD_SESSION_CVP_CV_TME_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x048)
+#define  HFI_CMD_SESSION_CVP_CV_OD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x049)
+#define  HFI_CMD_SESSION_CVP_CV_OD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x04A)
+#define  HFI_CMD_SESSION_CVP_CV_ODT_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x04B)
+#define  HFI_CMD_SESSION_CVP_CV_ODT_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x04C)
+
 #define  HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS\
 	(HFI_CMD_SESSION_CVP_START + 0x04D)
+#define HFI_CMD_SESSION_CVP_PYS_HCD_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x050)
+#define HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x051)
+#define  HFI_CMD_SESSION_CVP_ICA_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x100)
+#define  HFI_CMD_SESSION_CVP_ICA_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x101)
+
 
 #define HFI_MSG_SYS_OX_START			\
 (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_MSG_START_OFFSET + 0x0000)
@@ -449,37 +386,12 @@
 #define CVP_IFACEQ_VAR_LARGE_PKT_SIZE 512
 #define CVP_IFACEQ_VAR_HUGE_PKT_SIZE  (1024*12)
 
-
-struct hfi_cmd_sys_session_abort_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
 struct hfi_cmd_sys_ping_packet {
 	u32 size;
 	u32 packet_type;
 	u32 client_data;
 };
 
-struct hfi_cmd_session_load_resources_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
-struct hfi_cmd_session_start_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
-struct hfi_cmd_session_stop_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
 struct hfi_cmd_session_empty_buffer_compressed_packet {
 	u32 size;
 	u32 packet_type;
@@ -517,24 +429,6 @@
 	u32 rgData[1];
 };
 
-struct hfi_cmd_session_empty_buffer_uncompressed_plane1_packet {
-	u32 flags;
-	u32 alloc_len;
-	u32 filled_len;
-	u32 offset;
-	u32 packet_buffer2;
-	u32 rgData[1];
-};
-
-struct hfi_cmd_session_empty_buffer_uncompressed_plane2_packet {
-	u32 flags;
-	u32 alloc_len;
-	u32 filled_len;
-	u32 offset;
-	u32 packet_buffer3;
-	u32 rgData[1];
-};
-
 struct hfi_cmd_session_fill_buffer_packet {
 	u32 size;
 	u32 packet_type;
@@ -556,18 +450,6 @@
 	u32 flush_type;
 };
 
-struct hfi_cmd_session_suspend_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
-struct hfi_cmd_session_resume_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
 struct hfi_cmd_session_get_property_packet {
 	u32 size;
 	u32 packet_type;
@@ -581,42 +463,6 @@
 	u32 buf_size[HFI_MAX_PLANES];
 };
 
-struct hfi_cmd_session_hdr {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
-struct hfi_cmd_session_cvp_dfs_config_packet {
-	u32 cvp_internal_dfs_config[CVP_DFS_CONFIG_CMD_SIZE];
-};
-
-struct hfi_cmd_session_cvp_dfs_frame_packet {
-	u32 cvp_dfs_frame[CVP_DFS_FRAME_BUFFERS_OFFSET];
-	u32 left_view_buffer_addr;
-	u32 left_view_buffer_size;
-	u32 right_view_buffer_addr;
-	u32 right_view_buffer_size;
-	u32 disparity_map_buffer_addr;
-	u32 disparity_map_buffer_size;
-	u32 occlusion_mask_buffer_addr;
-	u32 occlusion_mask_buffer_size;
-};
-
-struct hfi_cmd_session_cvp_dme_config_packet {
-	u32 cvp_internal_dme_config[CVP_DME_CONFIG_CMD_SIZE];
-};
-
-struct hfi_cmd_session_cvp_dme_frame_packet {
-	u32 cvp_dme_frame[CVP_DME_FRAME_BUFFERS_OFFSET];
-	struct buf_desc bufs[8];
-};
-
-struct hfi_cmd_session_cvp_persist_packet {
-	u32 cvp_persist_frame[CVP_PERSIST_BUFFERS_OFFSET];
-	struct buf_desc bufs[CVP_PSRSIST_BUF_NUM];
-};
-
 struct hfi_cmd_session_release_buffer_packet {
 	u32 size;
 	u32 packet_type;
@@ -655,279 +501,6 @@
 	u32 rg_property_data[1];
 };
 
-struct hfi_msg_session_load_resources_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-};
-
-struct hfi_msg_session_start_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-};
-
-struct hfi_msg_session_stop_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-};
-
-struct hfi_msg_session_suspend_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-};
-
-struct hfi_msg_session_resume_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-};
-
-struct hfi_msg_session_flush_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-	u32 flush_type;
-};
-
-struct hfi_ubwc_cr_stats_info_type {
-	u32 cr_stats_info0;
-	u32 cr_stats_info1;
-	u32 cr_stats_info2;
-	u32 cr_stats_info3;
-	u32 cr_stats_info4;
-	u32 cr_stats_info5;
-	u32 cr_stats_info6;
-};
-
-struct hfi_frame_cr_stats_type {
-	u32 frame_index;
-	struct hfi_ubwc_cr_stats_info_type ubwc_stats_info;
-	u32 complexity_number;
-};
-
-struct hfi_msg_session_empty_buffer_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-	u32 offset;
-	u32 filled_len;
-	u32 input_tag;
-	u32 packet_buffer;
-	u32 extra_data_buffer;
-	u32 flags;
-	struct hfi_frame_cr_stats_type ubwc_cr_stats;
-	u32 rgData[0];
-};
-
-struct hfi_msg_session_fill_buffer_done_compressed_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 time_stamp_hi;
-	u32 time_stamp_lo;
-	u32 error_type;
-	u32 flags;
-	u32 mark_target;
-	u32 mark_data;
-	u32 stats;
-	u32 offset;
-	u32 alloc_len;
-	u32 filled_len;
-	u32 input_tag;
-	u32 output_tag;
-	u32 picture_type;
-	u32 packet_buffer;
-	u32 extra_data_buffer;
-	u32 rgData[0];
-};
-
-struct hfi_msg_session_fbd_uncompressed_plane0_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 stream_id;
-	u32 view_id;
-	u32 error_type;
-	u32 time_stamp_hi;
-	u32 time_stamp_lo;
-	u32 flags;
-	u32 mark_target;
-	u32 mark_data;
-	u32 stats;
-	u32 alloc_len;
-	u32 filled_len;
-	u32 offset;
-	u32 frame_width;
-	u32 frame_height;
-	u32 start_x_coord;
-	u32 start_y_coord;
-	u32 input_tag;
-	u32 input_tag2;
-	u32 output_tag;
-	u32 picture_type;
-	u32 packet_buffer;
-	u32 extra_data_buffer;
-	u32 rgData[0];
-};
-
-struct hfi_msg_session_fill_buffer_done_uncompressed_plane1_packet {
-	u32 flags;
-	u32 alloc_len;
-	u32 filled_len;
-	u32 offset;
-	u32 packet_buffer2;
-	u32 rgData[0];
-};
-
-struct hfi_msg_session_fill_buffer_done_uncompressed_plane2_packet {
-	u32 flags;
-	u32 alloc_len;
-	u32 filled_len;
-	u32 offset;
-	u32 packet_buffer3;
-	u32 rgData[0];
-};
-
-struct hfi_msg_session_property_info_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 num_properties;
-	u32 rg_property_data[1];
-};
-
-struct hfi_msg_session_release_resources_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-};
-
-struct hfi_msg_session_release_buffers_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 error_type;
-	u32 num_buffers;
-	u32 rg_buffer_info[1];
-};
-
-struct hfi_extradata_mb_quantization_payload {
-	u8 rg_mb_qp[1];
-};
-
-struct hfi_extradata_timestamp_payload {
-	u32 time_stamp_low;
-	u32 time_stamp_high;
-};
-
-
-struct hfi_extradata_s3d_frame_packing_payload {
-	u32 fpa_id;
-	int cancel_flag;
-	u32 fpa_type;
-	int quin_cunx_flag;
-	u32 content_interprtation_type;
-	int spatial_flipping_flag;
-	int frame0_flipped_flag;
-	int field_views_flag;
-	int current_frame_isFrame0_flag;
-	int frame0_self_contained_flag;
-	int frame1_self_contained_flag;
-	u32 frame0_graid_pos_x;
-	u32 frame0_graid_pos_y;
-	u32 frame1_graid_pos_x;
-	u32 frame1_graid_pos_y;
-	u32 fpa_reserved_byte;
-	u32 fpa_repetition_period;
-	int fpa_extension_flag;
-};
-
-struct hfi_extradata_interlace_video_payload {
-	u32 format;
-};
-
-struct hfi_conceal_color_type {
-	u32 value_8bit;
-	u32 value_10bit;
-};
-
-struct hfi_extradata_num_concealed_mb_payload {
-	u32 num_mb_concealed;
-};
-
-struct hfi_extradata_sliceinfo {
-	u32 offset_in_stream;
-	u32 slice_length;
-};
-
-struct hfi_extradata_multislice_info_payload {
-	u32 num_slices;
-	struct hfi_extradata_sliceinfo rg_slice_info[1];
-};
-
-struct hfi_index_extradata_input_crop_payload {
-	u32 size;
-	u32 version;
-	u32 port_index;
-	u32 left;
-	u32 top;
-	u32 width;
-	u32 height;
-};
-
-struct hfi_index_extradata_output_crop_payload {
-	u32 size;
-	u32 version;
-	u32 port_index;
-	u32 left;
-	u32 top;
-	u32 display_width;
-	u32 display_height;
-	u32 width;
-	u32 height;
-};
-
-struct hfi_index_extradata_digital_zoom_payload {
-	u32 size;
-	u32 version;
-	u32 port_index;
-	int width;
-	int height;
-};
-
-struct hfi_index_extradata_aspect_ratio_payload {
-	u32 size;
-	u32 version;
-	u32 port_index;
-	u32 aspect_width;
-	u32 aspect_height;
-};
-
-struct hfi_extradata_frame_type_payload {
-	u32 frame_rate;
-};
-
-struct hfi_extradata_recovery_point_sei_payload {
-	u32 flag;
-};
-
-struct hfi_cmd_session_continue_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-};
-
 enum session_flags {
 	SESSION_PAUSE = BIT(1),
 };
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_api.h b/drivers/media/platform/msm/cvp/cvp_hfi_api.h
index dae783a..dd21c40 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_api.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_api.h
@@ -13,6 +13,7 @@
 #include <linux/hash.h>
 #include "msm_cvp_core.h"
 #include "msm_cvp_resources.h"
+#include "cvp_hfi_helper.h"
 
 #define CONTAINS(__a, __sz, __t) (\
 	(__t >= __a) && \
@@ -56,6 +57,65 @@
 /* 16 encoder and 16 decoder sessions */
 #define CVP_MAX_SESSIONS               32
 
+#define HFI_DFS_CONFIG_CMD_SIZE	38
+#define HFI_DFS_FRAME_CMD_SIZE	16
+#define HFI_DFS_FRAME_BUFFERS_OFFSET 8
+#define HFI_DFS_BUF_NUM 4
+
+#define HFI_DME_CONFIG_CMD_SIZE	194
+#define HFI_DME_BASIC_CONFIG_CMD_SIZE	51
+#define HFI_DME_FRAME_CMD_SIZE	28
+#define HFI_DME_FRAME_BUFFERS_OFFSET 12
+#define HFI_DME_BUF_NUM 8
+
+#define HFI_PERSIST_CMD_SIZE	11
+#define HFI_PERSIST_BUFFERS_OFFSET 7
+#define HFI_PERSIST_BUF_NUM     2
+
+#define HFI_DS_CMD_SIZE	54
+#define HFI_DS_BUFFERS_OFFSET	48
+#define HFI_DS_BUF_NUM	3
+
+#define HFI_OF_CONFIG_CMD_SIZE 34
+#define HFI_OF_FRAME_CMD_SIZE 24
+#define HFI_OF_BUFFERS_OFFSET 8
+#define HFI_OF_BUF_NUM 8
+
+#define HFI_ODT_CONFIG_CMD_SIZE 23
+#define HFI_ODT_FRAME_CMD_SIZE 33
+#define HFI_ODT_BUFFERS_OFFSET 11
+#define HFI_ODT_BUF_NUM 11
+
+#define HFI_OD_CONFIG_CMD_SIZE 24
+#define HFI_OD_FRAME_CMD_SIZE 12
+#define HFI_OD_BUFFERS_OFFSET 6
+#define HFI_OD_BUF_NUM 3
+
+#define HFI_NCC_CONFIG_CMD_SIZE 47
+#define HFI_NCC_FRAME_CMD_SIZE 22
+#define HFI_NCC_BUFFERS_OFFSET 8
+#define HFI_NCC_BUF_NUM 7
+
+#define HFI_ICA_CONFIG_CMD_SIZE 127
+#define HFI_ICA_FRAME_CMD_SIZE 14
+#define HFI_ICA_BUFFERS_OFFSET 6
+#define HFI_ICA_BUF_NUM 4
+
+#define HFI_HCD_CONFIG_CMD_SIZE 46
+#define HFI_HCD_FRAME_CMD_SIZE 18
+#define HFI_HCD_BUFFERS_OFFSET 12
+#define HFI_HCD_BUF_NUM 3
+
+#define HFI_DCM_CONFIG_CMD_SIZE 20
+#define HFI_DCM_FRAME_CMD_SIZE 19
+#define HFI_DCM_BUFFERS_OFFSET 9
+#define HFI_DCM_BUF_NUM 5
+
+#define HFI_PYS_HCD_CONFIG_CMD_SIZE 661
+#define HFI_PYS_HCD_FRAME_CMD_SIZE 86
+#define HFI_PYS_HCD_BUFFERS_OFFSET 6
+#define HFI_PYS_HCD_BUF_NUM 36
+
 enum cvp_status {
 	CVP_ERR_NONE = 0x0,
 	CVP_ERR_FAIL = 0x80000000,
@@ -1082,7 +1142,7 @@
 #define IS_HAL_SESSION_CMD(cmd) ((cmd) >= HAL_SESSION_EVENT_CHANGE && \
 		(cmd) <= HAL_SESSION_ERROR)
 enum hal_command_response {
-	/* SYSTEM COMMANDS_DONE*/
+	HAL_NO_RESP,
 	HAL_SYS_INIT_DONE,
 	HAL_SYS_SET_RESOURCE_DONE,
 	HAL_SYS_RELEASE_RESOURCE_DONE,
@@ -1112,7 +1172,16 @@
 	HAL_SESSION_DFS_CONFIG_CMD_DONE,
 	HAL_SESSION_DFS_FRAME_CMD_DONE,
 	HAL_SESSION_DME_CONFIG_CMD_DONE,
+	HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE,
 	HAL_SESSION_DME_FRAME_CMD_DONE,
+	HAL_SESSION_TME_CONFIG_CMD_DONE,
+	HAL_SESSION_ODT_CONFIG_CMD_DONE,
+	HAL_SESSION_OD_CONFIG_CMD_DONE,
+	HAL_SESSION_NCC_CONFIG_CMD_DONE,
+	HAL_SESSION_ICA_CONFIG_CMD_DONE,
+	HAL_SESSION_HCD_CONFIG_CMD_DONE,
+	HAL_SESSION_DCM_CONFIG_CMD_DONE,
+	HAL_SESSION_PYS_HCD_CONFIG_CMD_DONE,
 	HAL_SESSION_PERSIST_CMD_DONE,
 	HAL_SESSION_PROPERTY_INFO,
 	HAL_SESSION_ERROR,
@@ -1252,6 +1321,7 @@
 	enum cvp_status status;
 	u32 size;
 	union {
+		struct hfi_msg_session_hdr msg_hdr;
 		struct cvp_resource_hdr resource_hdr;
 		struct cvp_buffer_addr_info buffer_addr_info;
 		struct cvp_frame_plane_config frame_plane_config;
@@ -1411,73 +1481,17 @@
 	(((q) && (q)->op) ? ((q)->op(args)) : 0)
 
 /* DFS related structures */
-struct msm_cvp_internal_dfsconfig {
-	struct list_head list;
-	struct msm_smem smem;
-	struct msm_cvp_dfs_config dfs_config;
-};
-
 struct	buf_desc {
 	u32 fd;
 	u32 size;
 };
 
-/**
- * struct msm_cvp_dfs_frame_kmd - argument passed with VIDIOC_CVP_CMD
- * @cvp_dfs_frame:                parameters for DFS frame command
- * @left_view_buffer_fd:          fd for left view buffer
- * @left_view_buffer_size:        size for left view buffer
- * @right_view_buffer_fd:         fd for right view buffer
- * @right_view_buffer_size:       size for right view buffer
- * @disparity_map_buffer_fd:      fd for disparity map buffer
- * @disparity_map_buffer_size:    size for disparity map buffer
- * @occlusion_mask_buffer_fd:     fd for occlusion mask buffer
- * @occlusion_mask_buffer_size:   size for occlusion mask buffer
- */
-
-struct msm_cvp_dfs_frame_kmd {
-	unsigned int cvp_dfs_frame[CVP_DFS_FRAME_BUFFERS_OFFSET];
-	unsigned int left_view_buffer_fd;
-	unsigned int left_view_buffer_size;
-	unsigned int right_view_buffer_fd;
-	unsigned int right_view_buffer_size;
-	unsigned int disparity_map_buffer_fd;
-	unsigned int disparity_map_buffer_size;
-	unsigned int occlusion_mask_buffer_fd;
-	unsigned int occlusion_mask_buffer_size;
-};
-
-
-struct msm_cvp_internal_dfsframe {
-	struct list_head list;
-	struct msm_cvp_dfs_frame_kmd dfs_frame;
-};
-
-/* DME related structures */
-struct msm_cvp_internal_dmeconfig {
-	struct list_head list;
-	struct msm_smem smem;
-	struct msm_cvp_dme_config dme_config;
-};
-
-struct msm_cvp_dme_frame_kmd {
-	unsigned int cvp_dme_frame[CVP_DME_FRAME_BUFFERS_OFFSET];
-	struct buf_desc bufs[CVP_DME_BUF_NUM];
-};
-
-struct msm_cvp_internal_dmeframe {
-	struct list_head list;
-	struct msm_cvp_dme_frame_kmd dme_frame;
-};
-
-struct msm_cvp_persist_kmd {
-	unsigned int cvp_pcmd[CVP_PERSIST_BUFFERS_OFFSET];
-	struct buf_desc bufs[CVP_PSRSIST_BUF_NUM];
-};
-
-struct msm_cvp_internal_persist_cmd {
-	struct list_head list;
-	struct msm_cvp_persist_kmd persist_cmd;
+struct msm_cvp_hfi_defs {
+	unsigned int size;
+	unsigned int type;
+	unsigned int buf_offset;
+	unsigned int buf_num;
+	enum hal_command_response resp;
 };
 
 struct hfi_device {
@@ -1502,18 +1516,8 @@
 	int (*session_start)(void *sess);
 	int (*session_continue)(void *sess);
 	int (*session_stop)(void *sess);
-	int (*session_cvp_operation_config)(void *sess,
-		struct cvp_frame_data *input_frame);
-	int (*session_cvp_dfs_config)(void *sess,
-		struct msm_cvp_internal_dfsconfig *dfs_config);
-	int (*session_cvp_dfs_frame)(void *sess,
-		struct msm_cvp_internal_dfsframe *dfs_frame);
-	int (*session_cvp_dme_config)(void *sess,
-		struct msm_cvp_internal_dmeconfig *dme_config);
-	int (*session_cvp_dme_frame)(void *sess,
-		struct msm_cvp_internal_dmeframe *dme_frame);
-	int (*session_cvp_persist)(void *sess,
-		struct msm_cvp_internal_persist_cmd *pbuf_cmd);
+	int (*session_send)(void *sess,
+		struct cvp_kmd_hfi_packet *in_pkt);
 	int (*session_get_buf_req)(void *sess);
 	int (*session_flush)(void *sess, enum hal_flush flush_mode);
 	int (*session_set_property)(void *sess, enum hal_property ptype,
@@ -1542,9 +1546,9 @@
 		hfi_cmd_response_callback callback);
 void cvp_hfi_deinitialize(enum msm_cvp_hfi_type hfi_type,
 			struct hfi_device *hdev);
-u32 cvp_get_hfi_domain(enum hal_domain hal_domain);
-u32 cvp_get_hfi_codec(enum hal_video_codec hal_codec);
-enum hal_domain cvp_get_hal_domain(u32 hfi_domain);
-enum hal_video_codec cvp_get_hal_codec(u32 hfi_codec);
+
+int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr);
+int get_signal_from_pkt_type(unsigned int type);
+extern const struct msm_cvp_hfi_defs cvp_hfi_defs[];
 
 #endif /*__CVP_HFI_API_H__ */
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
index e11a85b..4707ec1 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
@@ -1020,7 +1020,7 @@
 	u32 buffer_idx;
 };
 
-struct hfi_msg_session_cvp_release_buffers_done_packet {
+struct hfi_msg_session_hdr {
 	u32 size;
 	u32 packet_type;
 	u32 session_id;
diff --git a/drivers/media/platform/msm/cvp/hfi_packetization.c b/drivers/media/platform/msm/cvp/hfi_packetization.c
index 86da057..2a2a567 100644
--- a/drivers/media/platform/msm/cvp/hfi_packetization.c
+++ b/drivers/media/platform/msm/cvp/hfi_packetization.c
@@ -15,222 +15,6 @@
  * sensible index.
  */
 
-static int entropy_mode[] = {
-	[ilog2(HAL_H264_ENTROPY_CAVLC)] = HFI_H264_ENTROPY_CAVLC,
-	[ilog2(HAL_H264_ENTROPY_CABAC)] = HFI_H264_ENTROPY_CABAC,
-};
-
-static int statistics_mode[] = {
-	[ilog2(HAL_STATISTICS_MODE_DEFAULT)] = HFI_STATISTICS_MODE_DEFAULT,
-	[ilog2(HAL_STATISTICS_MODE_1)] = HFI_STATISTICS_MODE_1,
-	[ilog2(HAL_STATISTICS_MODE_2)] = HFI_STATISTICS_MODE_2,
-	[ilog2(HAL_STATISTICS_MODE_3)] = HFI_STATISTICS_MODE_3,
-};
-
-static int color_format[] = {
-	[ilog2(HAL_COLOR_FORMAT_MONOCHROME)] = HFI_COLOR_FORMAT_MONOCHROME,
-	[ilog2(HAL_COLOR_FORMAT_NV12)] = HFI_COLOR_FORMAT_NV12,
-	[ilog2(HAL_COLOR_FORMAT_NV21)] = HFI_COLOR_FORMAT_NV21,
-	[ilog2(HAL_COLOR_FORMAT_NV12_4x4TILE)] = HFI_COLOR_FORMAT_NV12_4x4TILE,
-	[ilog2(HAL_COLOR_FORMAT_NV21_4x4TILE)] = HFI_COLOR_FORMAT_NV21_4x4TILE,
-	[ilog2(HAL_COLOR_FORMAT_YUYV)] = HFI_COLOR_FORMAT_YUYV,
-	[ilog2(HAL_COLOR_FORMAT_YVYU)] = HFI_COLOR_FORMAT_YVYU,
-	[ilog2(HAL_COLOR_FORMAT_UYVY)] = HFI_COLOR_FORMAT_UYVY,
-	[ilog2(HAL_COLOR_FORMAT_VYUY)] = HFI_COLOR_FORMAT_VYUY,
-	[ilog2(HAL_COLOR_FORMAT_RGB565)] = HFI_COLOR_FORMAT_RGB565,
-	[ilog2(HAL_COLOR_FORMAT_BGR565)] = HFI_COLOR_FORMAT_BGR565,
-	[ilog2(HAL_COLOR_FORMAT_RGB888)] = HFI_COLOR_FORMAT_RGB888,
-	[ilog2(HAL_COLOR_FORMAT_BGR888)] = HFI_COLOR_FORMAT_BGR888,
-	/* UBWC Color formats*/
-	[ilog2(HAL_COLOR_FORMAT_NV12_UBWC)] =  HFI_COLOR_FORMAT_NV12_UBWC,
-	[ilog2(HAL_COLOR_FORMAT_NV12_TP10_UBWC)] =
-			HFI_COLOR_FORMAT_YUV420_TP10_UBWC,
-	/*P010 10bit format*/
-	[ilog2(HAL_COLOR_FORMAT_P010)] =  HFI_COLOR_FORMAT_P010,
-	[ilog2(HAL_COLOR_FORMAT_NV12_512)] = HFI_COLOR_FORMAT_NV12,
-};
-
-static int nal_type[] = {
-	[ilog2(HAL_NAL_FORMAT_STARTCODES)] = HFI_NAL_FORMAT_STARTCODES,
-	[ilog2(HAL_NAL_FORMAT_ONE_NAL_PER_BUFFER)] =
-		HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER,
-	[ilog2(HAL_NAL_FORMAT_ONE_BYTE_LENGTH)] =
-		HFI_NAL_FORMAT_ONE_BYTE_LENGTH,
-	[ilog2(HAL_NAL_FORMAT_TWO_BYTE_LENGTH)] =
-		HFI_NAL_FORMAT_TWO_BYTE_LENGTH,
-	[ilog2(HAL_NAL_FORMAT_FOUR_BYTE_LENGTH)] =
-		HFI_NAL_FORMAT_FOUR_BYTE_LENGTH,
-};
-
-static inline int hal_to_hfi_type(int property, int hal_type)
-{
-	if (hal_type <= 0 || roundup_pow_of_two(hal_type) != hal_type) {
-		/*
-		 * Not a power of 2, it's not going
-		 * to be in any of the tables anyway
-		 */
-		return -EINVAL;
-	}
-
-	if (hal_type)
-		hal_type = ilog2(hal_type);
-
-	switch (property) {
-	case HAL_PARAM_VENC_H264_ENTROPY_CONTROL:
-		return (hal_type >= ARRAY_SIZE(entropy_mode)) ?
-			-ENOTSUPP : entropy_mode[hal_type];
-	case HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT:
-		return (hal_type >= ARRAY_SIZE(color_format)) ?
-			-ENOTSUPP : color_format[hal_type];
-	case HAL_PARAM_NAL_STREAM_FORMAT_SELECT:
-		return (hal_type >= ARRAY_SIZE(nal_type)) ?
-			-ENOTSUPP : nal_type[hal_type];
-	case HAL_PARAM_VENC_MBI_STATISTICS_MODE:
-		return (hal_type >= ARRAY_SIZE(statistics_mode)) ?
-			-ENOTSUPP : statistics_mode[hal_type];
-	default:
-		return -ENOTSUPP;
-	}
-}
-
-enum hal_domain cvp_get_hal_domain(u32 hfi_domain)
-{
-	enum hal_domain hal_domain = 0;
-
-	switch (hfi_domain) {
-	case HFI_VIDEO_DOMAIN_VPE:
-		hal_domain = HAL_VIDEO_DOMAIN_VPE;
-		break;
-	case HFI_VIDEO_DOMAIN_ENCODER:
-		hal_domain = HAL_VIDEO_DOMAIN_ENCODER;
-		break;
-	case HFI_VIDEO_DOMAIN_DECODER:
-		hal_domain = HAL_VIDEO_DOMAIN_DECODER;
-		break;
-	case HFI_VIDEO_DOMAIN_CVP:
-		hal_domain = HAL_VIDEO_DOMAIN_CVP;
-		break;
-	default:
-		dprintk(CVP_ERR, "%s: invalid domain %x\n",
-			__func__, hfi_domain);
-		hal_domain = 0;
-		break;
-	}
-	return hal_domain;
-}
-
-enum hal_video_codec cvp_get_hal_codec(u32 hfi_codec)
-{
-	enum hal_video_codec hal_codec = 0;
-
-	switch (hfi_codec) {
-	case HFI_VIDEO_CODEC_H264:
-		hal_codec = HAL_VIDEO_CODEC_H264;
-		break;
-	case HFI_VIDEO_CODEC_MPEG1:
-		hal_codec = HAL_VIDEO_CODEC_MPEG1;
-		break;
-	case HFI_VIDEO_CODEC_MPEG2:
-		hal_codec = HAL_VIDEO_CODEC_MPEG2;
-		break;
-	case HFI_VIDEO_CODEC_VP8:
-		hal_codec = HAL_VIDEO_CODEC_VP8;
-		break;
-	case HFI_VIDEO_CODEC_HEVC:
-		hal_codec = HAL_VIDEO_CODEC_HEVC;
-		break;
-	case HFI_VIDEO_CODEC_VP9:
-		hal_codec = HAL_VIDEO_CODEC_VP9;
-		break;
-	case HFI_VIDEO_CODEC_TME:
-		hal_codec = HAL_VIDEO_CODEC_TME;
-		break;
-	case HFI_VIDEO_CODEC_CVP:
-		hal_codec = HAL_VIDEO_CODEC_CVP;
-		break;
-	default:
-		dprintk(CVP_INFO, "%s: invalid codec 0x%x\n",
-			__func__, hfi_codec);
-		hal_codec = 0;
-		break;
-	}
-	return hal_codec;
-}
-
-u32 cvp_get_hfi_domain(enum hal_domain hal_domain)
-{
-	u32 hfi_domain;
-
-	switch (hal_domain) {
-	case HAL_VIDEO_DOMAIN_VPE:
-		hfi_domain = HFI_VIDEO_DOMAIN_VPE;
-		break;
-	case HAL_VIDEO_DOMAIN_ENCODER:
-		hfi_domain = HFI_VIDEO_DOMAIN_ENCODER;
-		break;
-	case HAL_VIDEO_DOMAIN_DECODER:
-		hfi_domain = HFI_VIDEO_DOMAIN_DECODER;
-		break;
-	case HAL_VIDEO_DOMAIN_CVP:
-		hfi_domain = HFI_VIDEO_DOMAIN_CVP;
-		break;
-	default:
-		dprintk(CVP_ERR, "%s: invalid domain 0x%x\n",
-			__func__, hal_domain);
-		hfi_domain = 0;
-		break;
-	}
-	return hfi_domain;
-}
-
-u32 cvp_get_hfi_codec(enum hal_video_codec hal_codec)
-{
-	u32 hfi_codec = 0;
-
-	switch (hal_codec) {
-	case HAL_VIDEO_CODEC_H264:
-		hfi_codec = HFI_VIDEO_CODEC_H264;
-		break;
-	case HAL_VIDEO_CODEC_MPEG1:
-		hfi_codec = HFI_VIDEO_CODEC_MPEG1;
-		break;
-	case HAL_VIDEO_CODEC_MPEG2:
-		hfi_codec = HFI_VIDEO_CODEC_MPEG2;
-		break;
-	case HAL_VIDEO_CODEC_VP8:
-		hfi_codec = HFI_VIDEO_CODEC_VP8;
-		break;
-	case HAL_VIDEO_CODEC_HEVC:
-		hfi_codec = HFI_VIDEO_CODEC_HEVC;
-		break;
-	case HAL_VIDEO_CODEC_VP9:
-		hfi_codec = HFI_VIDEO_CODEC_VP9;
-		break;
-	case HAL_VIDEO_CODEC_TME:
-		hfi_codec = HFI_VIDEO_CODEC_TME;
-		break;
-	case HAL_VIDEO_CODEC_CVP:
-		hfi_codec = HFI_VIDEO_CODEC_CVP;
-		break;
-	default:
-		dprintk(CVP_INFO, "%s: invalid codec 0x%x\n",
-			__func__, hal_codec);
-		hfi_codec = 0;
-		break;
-	}
-	return hfi_codec;
-}
-
-static void create_pkt_enable(void *pkt, u32 type, bool enable)
-{
-	u32 *pkt_header = pkt;
-	u32 *pkt_type = &pkt_header[0];
-	struct hfi_enable *hfi_enable = (struct hfi_enable *)&pkt_header[1];
-
-	*pkt_type = type;
-	hfi_enable->enable = enable;
-}
-
 int cvp_create_pkt_cmd_sys_init(struct hfi_cmd_sys_init_packet *pkt,
 			   u32 arch_type)
 {
@@ -465,6 +249,7 @@
 	hfi->bank_spreading = ubwc_config->bank_spreading;
 	hfi->override_bit_info.bank_spreading_override =
 		ubwc_config->override_bit_info.bank_spreading_override;
+	hfi->size = sizeof(struct hfi_cmd_sys_set_ubwc_config_packet_type);
 
 	return rc;
 }
@@ -551,135 +336,6 @@
 	return buffer;
 }
 
-static int get_hfi_extradata_index(enum hal_extradata_id index)
-{
-	int ret = 0;
-
-	switch (index) {
-	case HAL_EXTRADATA_INTERLACE_VIDEO:
-		ret = HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_TIMESTAMP:
-		ret = HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_S3D_FRAME_PACKING:
-		ret = HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_FRAME_RATE:
-		ret = HFI_PROPERTY_PARAM_VDEC_FRAME_RATE_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_PANSCAN_WINDOW:
-		ret = HFI_PROPERTY_PARAM_VDEC_PANSCAN_WNDW_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_RECOVERY_POINT_SEI:
-		ret = HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_NUM_CONCEALED_MB:
-		ret = HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB;
-		break;
-	case HAL_EXTRADATA_ASPECT_RATIO:
-	case HAL_EXTRADATA_OUTPUT_CROP:
-		ret = HFI_PROPERTY_PARAM_INDEX_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_MPEG2_SEQDISP:
-		ret = HFI_PROPERTY_PARAM_VDEC_MPEG2_SEQDISP_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_STREAM_USERDATA:
-		ret = HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_FRAME_QP:
-		ret = HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_LTR_INFO:
-		ret = HFI_PROPERTY_PARAM_VENC_LTR_INFO;
-		break;
-	case HAL_EXTRADATA_ROI_QP:
-		ret = HFI_PROPERTY_PARAM_VENC_ROI_QP_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI:
-		ret =
-		HFI_PROPERTY_PARAM_VDEC_MASTERING_DISPLAY_COLOUR_SEI_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
-		ret = HFI_PROPERTY_PARAM_VDEC_CONTENT_LIGHT_LEVEL_SEI_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_VUI_DISPLAY_INFO:
-		ret = HFI_PROPERTY_PARAM_VUI_DISPLAY_INFO_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_VPX_COLORSPACE:
-		ret = HFI_PROPERTY_PARAM_VDEC_VPX_COLORSPACE_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_UBWC_CR_STATS_INFO:
-		ret = HFI_PROPERTY_PARAM_VDEC_UBWC_CR_STAT_INFO_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_HDR10PLUS_METADATA:
-		ret = HFI_PROPERTY_PARAM_VENC_HDR10PLUS_METADATA_EXTRADATA;
-		break;
-	default:
-		dprintk(CVP_WARN, "Extradata index not found: %d\n", index);
-		break;
-	}
-	return ret;
-}
-
-static int get_hfi_extradata_id(enum hal_extradata_id index)
-{
-	int ret = 0;
-
-	switch (index) {
-	case HAL_EXTRADATA_ASPECT_RATIO:
-		ret = MSM_CVP_EXTRADATA_ASPECT_RATIO;
-		break;
-	case HAL_EXTRADATA_OUTPUT_CROP:
-		ret = MSM_CVP_EXTRADATA_OUTPUT_CROP;
-		break;
-	default:
-		ret = get_hfi_extradata_index(index);
-		break;
-	}
-	return ret;
-}
-
-static u32 get_hfi_ltr_mode(enum ltr_mode ltr_mode_type)
-{
-	u32 ltrmode;
-
-	switch (ltr_mode_type) {
-	case HAL_LTR_MODE_DISABLE:
-		ltrmode = HFI_LTR_MODE_DISABLE;
-		break;
-	case HAL_LTR_MODE_MANUAL:
-		ltrmode = HFI_LTR_MODE_MANUAL;
-		break;
-	default:
-		dprintk(CVP_ERR, "Invalid ltr mode: %#x\n",
-			ltr_mode_type);
-		ltrmode = HFI_LTR_MODE_DISABLE;
-		break;
-	}
-	return ltrmode;
-}
-
-static u32 get_hfi_work_mode(enum hal_work_mode work_mode)
-{
-	u32 hfi_work_mode;
-
-	switch (work_mode) {
-	case CVP_WORK_MODE_1:
-		hfi_work_mode = HFI_WORKMODE_1;
-		break;
-	case CVP_WORK_MODE_2:
-		hfi_work_mode = HFI_WORKMODE_2;
-		break;
-	default:
-		dprintk(CVP_ERR, "Invalid work mode: %#x\n",
-			work_mode);
-		hfi_work_mode = HFI_WORKMODE_2;
-		break;
-	}
-	return hfi_work_mode;
-}
-
 int cvp_create_pkt_cmd_session_set_buffers(
 		struct hfi_cmd_session_cvp_set_buffers_packet *pkt,
 		struct hal_session *session,
@@ -727,113 +383,6 @@
 	return 0;
 }
 
-int cvp_create_pkt_cmd_session_etb_decoder(
-	struct hfi_cmd_session_empty_buffer_compressed_packet *pkt,
-	struct hal_session *session, struct cvp_frame_data *input_frame)
-{
-	int rc = 0;
-
-	if (!pkt || !session)
-		return -EINVAL;
-
-	pkt->size =
-		sizeof(struct hfi_cmd_session_empty_buffer_compressed_packet);
-	pkt->packet_type = HFI_CMD_SESSION_EMPTY_BUFFER;
-	pkt->session_id = hash32_ptr(session);
-	pkt->time_stamp_hi = upper_32_bits(input_frame->timestamp);
-	pkt->time_stamp_lo = lower_32_bits(input_frame->timestamp);
-	pkt->flags = input_frame->flags;
-	pkt->mark_target = input_frame->mark_target;
-	pkt->mark_data = input_frame->mark_data;
-	pkt->offset = input_frame->offset;
-	pkt->alloc_len = input_frame->alloc_len;
-	pkt->filled_len = input_frame->filled_len;
-	pkt->input_tag = input_frame->clnt_data;
-	pkt->packet_buffer = (u32)input_frame->device_addr;
-
-	trace_msm_v4l2_cvp_buffer_event_start("ETB",
-		input_frame->device_addr, input_frame->timestamp,
-		input_frame->alloc_len, input_frame->filled_len,
-		input_frame->offset);
-
-	if (!pkt->packet_buffer)
-		rc = -EINVAL;
-	return rc;
-}
-
-int cvp_create_pkt_cmd_session_etb_encoder(
-	struct hfi_cmd_session_empty_buffer_uncompressed_plane0_packet *pkt,
-	struct hal_session *session, struct cvp_frame_data *input_frame)
-{
-	int rc = 0;
-
-	if (!pkt || !session)
-		return -EINVAL;
-
-	pkt->size = sizeof(struct
-		hfi_cmd_session_empty_buffer_uncompressed_plane0_packet);
-	pkt->packet_type = HFI_CMD_SESSION_EMPTY_BUFFER;
-	pkt->session_id = hash32_ptr(session);
-	pkt->view_id = 0;
-	pkt->time_stamp_hi = upper_32_bits(input_frame->timestamp);
-	pkt->time_stamp_lo = lower_32_bits(input_frame->timestamp);
-	pkt->flags = input_frame->flags;
-	pkt->mark_target = input_frame->mark_target;
-	pkt->mark_data = input_frame->mark_data;
-	pkt->offset = input_frame->offset;
-	pkt->alloc_len = input_frame->alloc_len;
-	pkt->filled_len = input_frame->filled_len;
-	pkt->input_tag = input_frame->clnt_data;
-	pkt->packet_buffer = (u32)input_frame->device_addr;
-	pkt->extra_data_buffer = (u32)input_frame->extradata_addr;
-
-	trace_msm_v4l2_cvp_buffer_event_start("ETB",
-		input_frame->device_addr, input_frame->timestamp,
-		input_frame->alloc_len, input_frame->filled_len,
-		input_frame->offset);
-
-	if (!pkt->packet_buffer)
-		rc = -EINVAL;
-	return rc;
-}
-
-int cvp_create_pkt_cmd_session_ftb(
-		struct hfi_cmd_session_fill_buffer_packet *pkt,
-		struct hal_session *session,
-		struct cvp_frame_data *output_frame)
-{
-	int rc = 0;
-
-	if (!pkt || !session || !output_frame)
-		return -EINVAL;
-
-	pkt->size = sizeof(struct hfi_cmd_session_fill_buffer_packet);
-	pkt->packet_type = HFI_CMD_SESSION_FILL_BUFFER;
-	pkt->session_id = hash32_ptr(session);
-
-	if (output_frame->buffer_type == HAL_BUFFER_OUTPUT)
-		pkt->stream_id = 0;
-	else if (output_frame->buffer_type == HAL_BUFFER_OUTPUT2)
-		pkt->stream_id = 1;
-
-	if (!output_frame->device_addr)
-		return -EINVAL;
-
-	pkt->packet_buffer = (u32)output_frame->device_addr;
-	pkt->extra_data_buffer = (u32)output_frame->extradata_addr;
-	pkt->alloc_len = output_frame->alloc_len;
-	pkt->filled_len = output_frame->filled_len;
-	pkt->offset = output_frame->offset;
-	pkt->rgData[0] = output_frame->extradata_size;
-
-	trace_msm_v4l2_cvp_buffer_event_start("FTB",
-		output_frame->device_addr, output_frame->timestamp,
-		output_frame->alloc_len, output_frame->filled_len,
-		output_frame->offset);
-
-	return rc;
-}
-
 int cvp_create_pkt_cmd_session_get_buf_req(
 		struct hfi_cmd_session_get_property_packet *pkt,
 		struct hal_session *session)
@@ -852,197 +401,45 @@
 	return rc;
 }
 
-int cvp_create_pkt_cmd_session_cvp_dfs_config(
-		struct hfi_cmd_session_cvp_dfs_config_packet *pkt,
+int cvp_create_pkt_cmd_session_send(
+		struct cvp_kmd_hfi_packet *out_pkt,
 		struct hal_session *session,
-		struct msm_cvp_internal_dfsconfig *dfs_config)
+		struct cvp_kmd_hfi_packet *in_pkt)
 {
-	struct hfi_cmd_session_hdr *ptr =
-		(struct hfi_cmd_session_hdr *)pkt;
+	int def_idx;
+	struct cvp_hal_session_cmd_pkt *ptr =
+		(struct cvp_hal_session_cmd_pkt *)in_pkt;
 
-	if (!pkt || !session)
+	if (!out_pkt || !in_pkt || !session)
 		return -EINVAL;
 
-	memcpy(pkt, &dfs_config->dfs_config,
-		CVP_DFS_CONFIG_CMD_SIZE*sizeof(unsigned int));
-
-	if (ptr->size != CVP_DFS_CONFIG_CMD_SIZE*sizeof(unsigned int))
-		goto error_dfs_config;
-
-	if (ptr->packet_type != HFI_CMD_SESSION_CVP_DFS_CONFIG)
-		goto error_dfs_config;
-
-	if (ptr->session_id != hash32_ptr(session))
-		goto error_dfs_config;
-
-	return 0;
-
-error_dfs_config:
-	dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
-		__func__, ptr->size, ptr->packet_type, ptr->session_id);
-
-	return -EINVAL;
-}
-
-
-int cvp_create_pkt_cmd_session_cvp_dfs_frame(
-		struct hfi_cmd_session_cvp_dfs_frame_packet *pkt,
-		struct hal_session *session,
-		struct msm_cvp_internal_dfsframe *dfs_frame)
-{
-	struct hfi_cmd_session_hdr *ptr =
-		(struct hfi_cmd_session_hdr *)pkt;
-
-	if (!pkt || !session)
-		return -EINVAL;
-
-	memcpy(pkt, &dfs_frame->dfs_frame,
-		CVP_DFS_FRAME_CMD_SIZE*sizeof(unsigned int));
-
-	if (ptr->size != CVP_DFS_FRAME_CMD_SIZE*sizeof(unsigned int))
-		goto error_dfs_frame;
-
-	if (ptr->packet_type != HFI_CMD_SESSION_CVP_DFS_FRAME)
-		goto error_dfs_frame;
-
-	if (ptr->session_id != hash32_ptr(session))
-		goto error_dfs_frame;
-
-
-	return 0;
-
-error_dfs_frame:
-	dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
-		__func__, ptr->size, ptr->packet_type, ptr->session_id);
-
-	return -EINVAL;
-}
-
-int cvp_create_pkt_cmd_session_cvp_dme_config(
-		struct hfi_cmd_session_cvp_dme_config_packet *pkt,
-		struct hal_session *session,
-		struct msm_cvp_internal_dmeconfig *dme_config)
-{
-	struct hfi_cmd_session_hdr *ptr =
-		(struct hfi_cmd_session_hdr *)pkt;
-
-	if (!pkt || !session)
-		return -EINVAL;
-
-	memcpy(pkt, &dme_config->dme_config,
-		CVP_DME_CONFIG_CMD_SIZE*sizeof(unsigned int));
-
-	if (ptr->size != CVP_DME_CONFIG_CMD_SIZE*sizeof(unsigned int))
-		goto error_dme_config;
-
-	if (ptr->packet_type != HFI_CMD_SESSION_CVP_DME_CONFIG)
-		goto error_dme_config;
-
-	if (ptr->session_id != hash32_ptr(session))
-		goto error_dme_config;
-
-	return 0;
-
-error_dme_config:
-	dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
-		__func__, ptr->size, ptr->packet_type, ptr->session_id);
-
-	return -EINVAL;
-}
-
-
-int cvp_create_pkt_cmd_session_cvp_dme_frame(
-		struct hfi_cmd_session_cvp_dme_frame_packet *pkt,
-		struct hal_session *session,
-		struct msm_cvp_internal_dmeframe *dme_frame)
-{
-	struct hfi_cmd_session_hdr *ptr =
-		(struct hfi_cmd_session_hdr *)pkt;
-
-	if (!pkt || !session)
-		return -EINVAL;
-
-	memcpy(pkt, &dme_frame->dme_frame,
-		CVP_DME_FRAME_CMD_SIZE*sizeof(unsigned int));
-
-	if (ptr->size != CVP_DME_FRAME_CMD_SIZE*sizeof(unsigned int))
-		goto error_dme_frame;
-
-	if (ptr->packet_type != HFI_CMD_SESSION_CVP_DME_FRAME)
-		goto error_dme_frame;
-
-	if (ptr->session_id != hash32_ptr(session))
-		goto error_dme_frame;
-
-	return 0;
-
-error_dme_frame:
-	dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
-		__func__, ptr->size, ptr->packet_type, ptr->session_id);
-
-	return -EINVAL;
-}
-
-int cvp_create_pckt_cmd_session_cvp_persist(
-		struct hfi_cmd_session_cvp_persist_packet *pkt,
-		struct hal_session *session,
-		struct msm_cvp_internal_persist_cmd *pbuf_cmd)
-{
-	struct hfi_cmd_session_hdr *ptr =
-		(struct hfi_cmd_session_hdr *)pkt;
-
-	if (!pkt || !session)
-		return -EINVAL;
-
-	memcpy(pkt, &pbuf_cmd->persist_cmd,
-		CVP_PERSIST_CMD_SIZE*sizeof(unsigned int));
-
-	if (ptr->size != CVP_PERSIST_CMD_SIZE*sizeof(unsigned int))
-		goto error_persist;
-
-	if (ptr->packet_type != HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS)
-		goto error_persist;
-
-	if (ptr->session_id != hash32_ptr(session))
-		goto error_persist;
-
-	return 0;
-
-error_persist:
-	dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
-		__func__, ptr->size, ptr->packet_type, ptr->session_id);
-
-	return -EINVAL;
-
-}
-
-
-int cvp_create_pkt_cmd_session_flush(struct hfi_cmd_session_flush_packet *pkt,
-			struct hal_session *session, enum hal_flush flush_mode)
-{
-	int rc = 0;
-
-	if (!pkt || !session)
-		return -EINVAL;
-
-	pkt->size = sizeof(struct hfi_cmd_session_flush_packet);
-	pkt->packet_type = HFI_CMD_SESSION_FLUSH;
-	pkt->session_id = hash32_ptr(session);
-	switch (flush_mode) {
-	case HAL_FLUSH_INPUT:
-		pkt->flush_type = HFI_FLUSH_INPUT;
-		break;
-	case HAL_FLUSH_OUTPUT:
-		pkt->flush_type = HFI_FLUSH_OUTPUT;
-		break;
-	case HAL_FLUSH_ALL:
-		pkt->flush_type = HFI_FLUSH_ALL;
-		break;
-	default:
-		dprintk(CVP_ERR, "Invalid flush mode: %#x\n", flush_mode);
-		return -EINVAL;
+	def_idx = get_pkt_index(ptr);
+	if (def_idx < 0 && ptr->size < MAX_HFI_PKT_SIZE * sizeof(u32)) {
+		memcpy(out_pkt, in_pkt, ptr->size);
+		return 0;
 	}
-	return rc;
+
+	if (ptr->size > MAX_HFI_PKT_SIZE * sizeof(unsigned int))
+		goto error_hfi_packet;
+
+	if (cvp_hfi_defs[def_idx].type != ptr->packet_type)
+		goto error_hfi_packet;
+
+	if ((cvp_hfi_defs[def_idx].size*sizeof(unsigned int)) != ptr->size)
+		goto error_hfi_packet;
+
+	if (ptr->session_id != hash32_ptr(session))
+		goto error_hfi_packet;
+
+	memcpy(out_pkt, in_pkt, ptr->size);
+
+	return 0;
+
+error_hfi_packet:
+	dprintk(CVP_ERR, "%s incorrect packet: size=%d type=%d sessionid=%d\n",
+		__func__, ptr->size, ptr->packet_type, ptr->session_id);
+
+	return -EINVAL;
 }
 
 int cvp_create_pkt_cmd_session_get_property(
@@ -1060,1076 +457,10 @@
 		struct hal_session *session,
 		enum hal_property ptype, void *pdata)
 {
-	int rc = 0;
-
-	if (!pkt || !session)
-		return -EINVAL;
-
-	pkt->size = sizeof(struct hfi_cmd_session_set_property_packet);
-	pkt->packet_type = HFI_CMD_SESSION_SET_PROPERTY;
-	pkt->session_id = hash32_ptr(session);
-	pkt->num_properties = 1;
-
-	dprintk(CVP_DBG, "Setting HAL Property = 0x%x\n", ptype);
-
-	switch (ptype) {
-	case HAL_CONFIG_FRAME_RATE:
-	{
-		u32 buffer_type;
-		struct hfi_frame_rate *hfi;
-		struct hal_frame_rate *prop = (struct hal_frame_rate *) pdata;
-
-		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_FRAME_RATE;
-		hfi = (struct hfi_frame_rate *) &pkt->rg_property_data[1];
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-
-		hfi->frame_rate = prop->frame_rate;
-		pkt->size += sizeof(struct hfi_frame_rate);
-		break;
-	}
-	case HAL_CONFIG_OPERATING_RATE:
-	{
-		struct hfi_operating_rate *hfi;
-		struct hal_operating_rate *prop =
-			(struct hal_operating_rate *) pdata;
-
-		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_OPERATING_RATE;
-		hfi = (struct hfi_operating_rate *) &pkt->rg_property_data[1];
-		hfi->operating_rate = prop->operating_rate;
-		pkt->size += sizeof(struct hfi_operating_rate);
-		break;
-	}
-	case HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT:
-	{
-		u32 buffer_type;
-		struct hfi_uncompressed_format_select *hfi;
-		struct hal_uncompressed_format_select *prop =
-			(struct hal_uncompressed_format_select *) pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
-
-		hfi = (struct hfi_uncompressed_format_select *)
-					&pkt->rg_property_data[1];
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-		hfi->format = hal_to_hfi_type(
-				HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT,
-				prop->format);
-		pkt->size += sizeof(struct hfi_uncompressed_format_select);
-		break;
-	}
-	case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO:
-	{
-		struct hfi_uncompressed_plane_actual_constraints_info *hfi;
-		struct hal_uncompressed_plane_actual_constraints_info *prop =
-		(struct hal_uncompressed_plane_actual_constraints_info *) pdata;
-		u32 buffer_type;
-		u32 num_plane = prop->num_planes;
-		u32 hfi_pkt_size =
-			2 * sizeof(u32)
-			+ num_plane
-			* sizeof(struct hal_uncompressed_plane_constraints);
-
-		pkt->rg_property_data[0] =
-		HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO;
-
-		hfi = (struct hfi_uncompressed_plane_actual_constraints_info *)
-					&pkt->rg_property_data[1];
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-
-		hfi->num_planes = prop->num_planes;
-		memcpy(hfi->rg_plane_format, prop->rg_plane_format,
-			hfi->num_planes
-			*sizeof(struct hal_uncompressed_plane_constraints));
-		pkt->size += hfi_pkt_size;
-		break;
-	}
-	case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO:
-		break;
-	case HAL_PARAM_FRAME_SIZE:
-	{
-		struct hfi_frame_size *hfi;
-		struct hal_frame_size *prop = (struct hal_frame_size *) pdata;
-		u32 buffer_type;
-
-		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_FRAME_SIZE;
-		hfi = (struct hfi_frame_size *) &pkt->rg_property_data[1];
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-
-		hfi->height = prop->height;
-		hfi->width = prop->width;
-		pkt->size += sizeof(struct hfi_frame_size);
-		break;
-	}
-	case HAL_CONFIG_REALTIME:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_CONFIG_REALTIME,
-			(((struct hal_enable *) pdata)->enable));
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_BUFFER_COUNT_ACTUAL:
-	{
-		struct hfi_buffer_count_actual *hfi;
-		struct hal_buffer_count_actual *prop =
-			(struct hal_buffer_count_actual *) pdata;
-		u32 buffer_type;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
-		hfi = (struct hfi_buffer_count_actual *)
-			&pkt->rg_property_data[1];
-		hfi->buffer_count_actual = prop->buffer_count_actual;
-		hfi->buffer_count_min_host = prop->buffer_count_min_host;
-
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-
-		pkt->size += sizeof(struct hfi_buffer_count_actual);
-
-		break;
-	}
-	case HAL_PARAM_NAL_STREAM_FORMAT_SELECT:
-	{
-		struct hfi_nal_stream_format_select *hfi;
-		struct hal_nal_stream_format_select *prop =
-			(struct hal_nal_stream_format_select *)pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT;
-		hfi = (struct hfi_nal_stream_format_select *)
-			&pkt->rg_property_data[1];
-		dprintk(CVP_DBG, "data is :%d\n",
-				prop->nal_stream_format_select);
-		hfi->nal_stream_format_select = hal_to_hfi_type(
-				HAL_PARAM_NAL_STREAM_FORMAT_SELECT,
-				prop->nal_stream_format_select);
-		pkt->size += sizeof(struct hfi_nal_stream_format_select);
-		break;
-	}
-	case HAL_PARAM_VDEC_OUTPUT_ORDER:
-	{
-		int *data = (int *) pdata;
-
-		pkt->rg_property_data[0] =
-				HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER;
-		switch (*data) {
-		case HAL_OUTPUT_ORDER_DECODE:
-			pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DECODE;
-			break;
-		case HAL_OUTPUT_ORDER_DISPLAY:
-			pkt->rg_property_data[1] = HFI_OUTPUT_ORDER_DISPLAY;
-			break;
-		default:
-			dprintk(CVP_ERR, "invalid output order: %#x\n",
-						  *data);
-			break;
-		}
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VDEC_PICTURE_TYPE_DECODE:
-	{
-		struct hfi_enable_picture *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE;
-		hfi = (struct hfi_enable_picture *) &pkt->rg_property_data[1];
-		hfi->picture_type =
-			((struct hfi_enable_picture *)pdata)->picture_type;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VDEC_MULTI_STREAM:
-	{
-		struct hfi_multi_stream *hfi;
-		struct hal_multi_stream *prop =
-			(struct hal_multi_stream *) pdata;
-		u32 buffer_type;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
-		hfi = (struct hfi_multi_stream *) &pkt->rg_property_data[1];
-
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-		hfi->enable = prop->enable;
-		pkt->size += sizeof(struct hfi_multi_stream);
-		break;
-	}
-	case HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VDEC_SYNC_FRAME_DECODE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_SECURE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			  HFI_PROPERTY_PARAM_SECURE_SESSION,
-			  ((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_CONFIG_VENC_REQUEST_IFRAME:
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME;
-		break;
-	case HAL_CONFIG_HEIC_FRAME_QUALITY:
-	{
-		struct hfi_heic_frame_quality *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY;
-		hfi =
-		(struct hfi_heic_frame_quality *) &pkt->rg_property_data[1];
-		hfi->frame_quality =
-			((struct hal_heic_frame_quality *)pdata)->frame_quality;
-		pkt->size += sizeof(u32) +
-			sizeof(struct hfi_heic_frame_quality);
-		break;
-	}
-	case HAL_CONFIG_HEIC_GRID_ENABLE:
-	{
-		struct hfi_heic_grid_enable *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_HEIC_GRID_ENABLE;
-		hfi = (struct hfi_heic_grid_enable *) &pkt->rg_property_data[1];
-		hfi->grid_enable =
-			((struct hal_heic_grid_enable *)pdata)->grid_enable;
-		pkt->size += sizeof(u32) + sizeof(struct hfi_heic_grid_enable);
-		break;
-	}
-	case HAL_CONFIG_VENC_TARGET_BITRATE:
-	{
-		struct hfi_bitrate *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE;
-		hfi = (struct hfi_bitrate *) &pkt->rg_property_data[1];
-		hfi->bit_rate = ((struct hal_bitrate *)pdata)->bit_rate;
-		hfi->layer_id = ((struct hal_bitrate *)pdata)->layer_id;
-		pkt->size += sizeof(struct hfi_bitrate);
-		break;
-	}
-	case HAL_PARAM_PROFILE_LEVEL_CURRENT:
-	{
-		struct hfi_profile_level *hfi;
-		struct hal_profile_level *prop =
-			(struct hal_profile_level *) pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
-		hfi = (struct hfi_profile_level *)
-			&pkt->rg_property_data[1];
-
-		/* There is an assumption here that HAL level is same as
-		 * HFI level
-		 */
-		hfi->level = prop->level;
-		hfi->profile = prop->profile;
-		if (hfi->profile <= 0) {
-			hfi->profile = HFI_H264_PROFILE_HIGH;
-			dprintk(CVP_WARN,
-					"Profile %d not supported, falling back to high\n",
-					prop->profile);
-		}
-
-		pkt->size += sizeof(struct hfi_profile_level);
-		break;
-	}
-	case HAL_PARAM_VENC_H264_ENTROPY_CONTROL:
-	{
-		struct hfi_h264_entropy_control *hfi;
-		struct hal_h264_entropy_control *prop =
-			(struct hal_h264_entropy_control *) pdata;
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL;
-		hfi = (struct hfi_h264_entropy_control *)
-			&pkt->rg_property_data[1];
-		hfi->entropy_mode = hal_to_hfi_type(
-		   HAL_PARAM_VENC_H264_ENTROPY_CONTROL,
-		   prop->entropy_mode);
-
-		hfi->cabac_model = HFI_H264_CABAC_MODEL_0;
-		pkt->size += sizeof(struct hfi_h264_entropy_control);
-		break;
-	}
-	case HAL_PARAM_VENC_RATE_CONTROL:
-	{
-		u32 *rc;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_RATE_CONTROL;
-		rc = (u32 *)pdata;
-		switch ((enum hal_rate_control) *rc) {
-		case HAL_RATE_CONTROL_OFF:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_OFF;
-			break;
-		case HAL_RATE_CONTROL_CBR:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_CBR_CFR;
-			break;
-		case HAL_RATE_CONTROL_VBR:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_CFR;
-			break;
-		case HAL_RATE_CONTROL_MBR:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_MBR_CFR;
-			break;
-		case HAL_RATE_CONTROL_CBR_VFR:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_CBR_VFR;
-			break;
-		case HAL_RATE_CONTROL_MBR_VFR:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_MBR_VFR;
-			break;
-		case HAL_RATE_CONTROL_CQ:
-			pkt->rg_property_data[1] = HFI_RATE_CONTROL_CQ;
-			break;
-		default:
-			dprintk(CVP_ERR,
-					"Invalid Rate control setting: %pK\n",
-					pdata);
-			break;
-		}
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VENC_H264_DEBLOCK_CONTROL:
-	{
-		struct hfi_h264_db_control *hfi;
-		struct hal_h264_db_control *prop =
-			(struct hal_h264_db_control *) pdata;
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL;
-		hfi = (struct hfi_h264_db_control *) &pkt->rg_property_data[1];
-		switch (prop->mode) {
-		case HAL_H264_DB_MODE_DISABLE:
-			hfi->mode = HFI_H264_DB_MODE_DISABLE;
-			break;
-		case HAL_H264_DB_MODE_SKIP_SLICE_BOUNDARY:
-			hfi->mode = HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY;
-			break;
-		case HAL_H264_DB_MODE_ALL_BOUNDARY:
-			hfi->mode = HFI_H264_DB_MODE_ALL_BOUNDARY;
-			break;
-		default:
-			dprintk(CVP_ERR, "Invalid deblocking mode: %#x\n",
-						  prop->mode);
-			break;
-		}
-		hfi->slice_alpha_offset = prop->slice_alpha_offset;
-		hfi->slice_beta_offset = prop->slice_beta_offset;
-		pkt->size += sizeof(struct hfi_h264_db_control);
-		break;
-	}
-	case HAL_CONFIG_VENC_FRAME_QP:
-	{
-		struct hfi_quantization *hfi;
-		struct hal_quantization *hal_quant =
-			(struct hal_quantization *) pdata;
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_FRAME_QP;
-		hfi = (struct hfi_quantization *) &pkt->rg_property_data[1];
-		hfi->qp_packed = hal_quant->qpi | hal_quant->qpp << 8 |
-			hal_quant->qpb << 16;
-		hfi->layer_id = hal_quant->layer_id;
-		hfi->enable = hal_quant->enable;
-		pkt->size += sizeof(struct hfi_quantization);
-		break;
-	}
-	case HAL_PARAM_VENC_SESSION_QP_RANGE:
-	{
-		struct hfi_quantization_range *hfi;
-		struct hal_quantization_range *hal_range =
-			(struct hal_quantization_range *) pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE;
-		hfi = (struct hfi_quantization_range *)
-				&pkt->rg_property_data[1];
-
-		/*
-		 * When creating the packet, pack the qp value as
-		 * 0xbbppii, where ii = qp range for I-frames,
-		 * pp = qp range for P-frames, etc.
-		 */
-		hfi->min_qp.qp_packed = hal_range->qpi_min |
-			hal_range->qpp_min << 8 |
-			hal_range->qpb_min << 16;
-		hfi->max_qp.qp_packed = hal_range->qpi_max |
-			hal_range->qpp_max << 8 |
-			hal_range->qpb_max << 16;
-		hfi->max_qp.layer_id = hal_range->layer_id;
-		hfi->min_qp.layer_id = hal_range->layer_id;
-
-		pkt->size += sizeof(struct hfi_quantization_range);
-		break;
-	}
-	case HAL_CONFIG_VENC_INTRA_PERIOD:
-	{
-		struct hfi_intra_period *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD;
-		hfi = (struct hfi_intra_period *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_intra_period *) pdata,
-				sizeof(struct hfi_intra_period));
-		pkt->size += sizeof(struct hfi_intra_period);
-
-		if (hfi->bframes) {
-			struct hfi_enable *hfi_enable;
-			u32 *prop_type;
-
-			prop_type = (u32 *)((u8 *)&pkt->rg_property_data[0] +
-				sizeof(u32) + sizeof(struct hfi_intra_period));
-			*prop_type =  HFI_PROPERTY_PARAM_VENC_ADAPTIVE_B;
-			hfi_enable = (struct hfi_enable *)(prop_type + 1);
-			hfi_enable->enable = true;
-			pkt->num_properties = 2;
-			pkt->size += sizeof(struct hfi_enable) + sizeof(u32);
-		}
-		break;
-	}
-	case HAL_CONFIG_VENC_IDR_PERIOD:
-	{
-		struct hfi_idr_period *hfi;
-
-		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD;
-		hfi = (struct hfi_idr_period *) &pkt->rg_property_data[1];
-		hfi->idr_period = ((struct hfi_idr_period *) pdata)->idr_period;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VENC_ADAPTIVE_B:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VENC_ADAPTIVE_B,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VDEC_CONCEAL_COLOR:
-	{
-		struct hfi_conceal_color *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR;
-		hfi = (struct hfi_conceal_color *) &pkt->rg_property_data[1];
-		if (hfi) {
-			hfi->conceal_color_8bit =
-				((struct hfi_conceal_color *) pdata)->
-				conceal_color_8bit;
-			hfi->conceal_color_10bit =
-				((struct hfi_conceal_color *) pdata)->
-				conceal_color_10bit;
-		}
-		pkt->size += sizeof(struct hfi_conceal_color);
-		break;
-	}
-	case HAL_PARAM_VPE_ROTATION:
-	{
-		struct hfi_vpe_rotation_type *hfi;
-		struct hal_vpe_rotation *prop =
-			(struct hal_vpe_rotation *) pdata;
-		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_VPE_ROTATION;
-		hfi = (struct hfi_vpe_rotation_type *)&pkt->rg_property_data[1];
-		switch (prop->rotate) {
-		case 0:
-			hfi->rotation = HFI_ROTATE_NONE;
-			break;
-		case 90:
-			hfi->rotation = HFI_ROTATE_90;
-			break;
-		case 180:
-			hfi->rotation = HFI_ROTATE_180;
-			break;
-		case 270:
-			hfi->rotation = HFI_ROTATE_270;
-			break;
-		default:
-			dprintk(CVP_ERR, "Invalid rotation setting: %#x\n",
-				prop->rotate);
-			rc = -EINVAL;
-			break;
-		}
-		switch (prop->flip) {
-		case HAL_FLIP_NONE:
-			hfi->flip = HFI_FLIP_NONE;
-			break;
-		case HAL_FLIP_HORIZONTAL:
-			hfi->flip = HFI_FLIP_HORIZONTAL;
-			break;
-		case HAL_FLIP_VERTICAL:
-			hfi->flip = HFI_FLIP_VERTICAL;
-			break;
-		case HAL_FLIP_BOTH:
-			hfi->flip = HFI_FLIP_HORIZONTAL | HFI_FLIP_VERTICAL;
-			break;
-		default:
-			dprintk(CVP_ERR, "Invalid flip setting: %#x\n",
-				prop->flip);
-			rc = -EINVAL;
-			break;
-		}
-		pkt->size += sizeof(struct hfi_vpe_rotation_type);
-		break;
-	}
-	case HAL_PARAM_VENC_INTRA_REFRESH:
-	{
-		struct hfi_intra_refresh *hfi;
-		struct hal_intra_refresh *prop =
-			(struct hal_intra_refresh *) pdata;
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH;
-		hfi = (struct hfi_intra_refresh *) &pkt->rg_property_data[1];
-		hfi->mbs = 0;
-		switch (prop->mode) {
-		case HAL_INTRA_REFRESH_NONE:
-			hfi->mode = HFI_INTRA_REFRESH_NONE;
-			break;
-		case HAL_INTRA_REFRESH_CYCLIC:
-			hfi->mode = HFI_INTRA_REFRESH_CYCLIC;
-			hfi->mbs = prop->ir_mbs;
-			break;
-		case HAL_INTRA_REFRESH_RANDOM:
-			hfi->mode = HFI_INTRA_REFRESH_RANDOM;
-			hfi->mbs = prop->ir_mbs;
-			break;
-		default:
-			dprintk(CVP_ERR,
-					"Invalid intra refresh setting: %#x\n",
-					prop->mode);
-			break;
-		}
-		pkt->size += sizeof(struct hfi_intra_refresh);
-		break;
-	}
-	case HAL_PARAM_VENC_MULTI_SLICE_CONTROL:
-	{
-		struct hfi_multi_slice_control *hfi;
-		struct hal_multi_slice_control *prop =
-			(struct hal_multi_slice_control *) pdata;
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL;
-		hfi = (struct hfi_multi_slice_control *)
-			&pkt->rg_property_data[1];
-		switch (prop->multi_slice) {
-		case HAL_MULTI_SLICE_OFF:
-			hfi->multi_slice = HFI_MULTI_SLICE_OFF;
-			break;
-		case HAL_MULTI_SLICE_BY_MB_COUNT:
-			hfi->multi_slice = HFI_MULTI_SLICE_BY_MB_COUNT;
-			break;
-		case HAL_MULTI_SLICE_BY_BYTE_COUNT:
-			hfi->multi_slice = HFI_MULTI_SLICE_BY_BYTE_COUNT;
-			break;
-		default:
-			dprintk(CVP_ERR, "Invalid slice settings: %#x\n",
-				prop->multi_slice);
-			break;
-		}
-		hfi->slice_size = prop->slice_size;
-		pkt->size += sizeof(struct
-					hfi_multi_slice_control);
-		break;
-	}
-	case HAL_PARAM_INDEX_EXTRADATA:
-	{
-		struct hfi_index_extradata_config *hfi;
-		struct hal_extradata_enable *extra = pdata;
-		int id = 0;
-
-		pkt->rg_property_data[0] =
-			get_hfi_extradata_index(extra->index);
-		hfi = (struct hfi_index_extradata_config *)
-			&pkt->rg_property_data[1];
-		hfi->enable = extra->enable;
-		id = get_hfi_extradata_id(extra->index);
-		if (id)
-			hfi->index_extra_data_id = id;
-		else {
-			dprintk(CVP_WARN,
-				"Failed to find extradata id: %d\n",
-				id);
-			rc = -EINVAL;
-		}
-		pkt->size += sizeof(struct hfi_index_extradata_config);
-		break;
-	}
-	case HAL_PARAM_VENC_SLICE_DELIVERY_MODE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-				HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE,
-				((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VENC_VUI_TIMING_INFO:
-	{
-		struct hfi_vui_timing_info *hfi;
-		struct hal_vui_timing_info *timing_info = pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_VUI_TIMING_INFO;
-
-		hfi = (struct hfi_vui_timing_info *)&pkt->rg_property_data[1];
-		hfi->enable = timing_info->enable;
-		hfi->fixed_frame_rate = timing_info->fixed_frame_rate;
-		hfi->time_scale = timing_info->time_scale;
-
-		pkt->size += sizeof(struct hfi_vui_timing_info);
-		break;
-	}
-	case HAL_PARAM_VENC_GENERATE_AUDNAL:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-				HFI_PROPERTY_PARAM_VENC_GENERATE_AUDNAL,
-				((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-				HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY,
-				((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VENC_LTRMODE:
-	{
-		struct hfi_ltr_mode *hfi;
-		struct hal_ltr_mode *hal = pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_LTRMODE;
-		hfi = (struct hfi_ltr_mode *) &pkt->rg_property_data[1];
-		hfi->ltr_mode = get_hfi_ltr_mode(hal->mode);
-		hfi->ltr_count = hal->count;
-		hfi->trust_mode = hal->trust_mode;
-		pkt->size += sizeof(struct hfi_ltr_mode);
-		break;
-	}
-	case HAL_CONFIG_VENC_USELTRFRAME:
-	{
-		struct hfi_ltr_use *hfi;
-		struct hal_ltr_use *hal = pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_USELTRFRAME;
-		hfi = (struct hfi_ltr_use *) &pkt->rg_property_data[1];
-		hfi->frames = hal->frames;
-		hfi->ref_ltr = hal->ref_ltr;
-		hfi->use_constrnt = hal->use_constraint;
-		pkt->size += sizeof(struct hfi_ltr_use);
-		break;
-	}
-	case HAL_CONFIG_VENC_MARKLTRFRAME:
-	{
-		struct hfi_ltr_mark *hfi;
-		struct hal_ltr_mark *hal = pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME;
-		hfi = (struct hfi_ltr_mark *) &pkt->rg_property_data[1];
-		hfi->mark_frame = hal->mark_frame;
-		pkt->size += sizeof(struct hfi_ltr_mark);
-		break;
-	}
-	case HAL_PARAM_VENC_HIER_P_MAX_ENH_LAYERS:
-	{
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER;
-		pkt->rg_property_data[1] = *(u32 *)pdata;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_CONFIG_VENC_HIER_P_NUM_FRAMES:
-	{
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER;
-		pkt->rg_property_data[1] = *(u32 *)pdata;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VENC_DISABLE_RC_TIMESTAMP:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-				HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP,
-				((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VPE_COLOR_SPACE_CONVERSION:
-	{
-		struct hfi_vpe_color_space_conversion *hfi = NULL;
-		struct hal_vpe_color_space_conversion *hal = pdata;
-
-		pkt->rg_property_data[0] =
-				HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION;
-		hfi = (struct hfi_vpe_color_space_conversion *)
-			&pkt->rg_property_data[1];
-
-		hfi->input_color_primaries = hal->input_color_primaries;
-		if (hal->custom_matrix_enabled)
-			/* Bit Mask to enable all custom values */
-			hfi->custom_matrix_enabled = 0x7;
-		else
-			hfi->custom_matrix_enabled = 0x0;
-		memcpy(hfi->csc_matrix, hal->csc_matrix,
-				sizeof(hfi->csc_matrix));
-		memcpy(hfi->csc_bias, hal->csc_bias, sizeof(hfi->csc_bias));
-		memcpy(hfi->csc_limit, hal->csc_limit, sizeof(hfi->csc_limit));
-		pkt->size += sizeof(struct hfi_vpe_color_space_conversion);
-		break;
-	}
-	case HAL_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_CONFIG_VENC_PERF_MODE:
-	{
-		u32 hfi_perf_mode = 0;
-		enum hal_perf_mode hal_perf_mode = *(enum hal_perf_mode *)pdata;
-
-		switch (hal_perf_mode) {
-		case HAL_PERF_MODE_POWER_SAVE:
-			hfi_perf_mode = HFI_VENC_PERFMODE_POWER_SAVE;
-			break;
-		case HAL_PERF_MODE_POWER_MAX_QUALITY:
-			hfi_perf_mode = HFI_VENC_PERFMODE_MAX_QUALITY;
-			break;
-		default:
-			return -ENOTSUPP;
-		}
-
-		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VENC_PERF_MODE;
-		pkt->rg_property_data[1] = hfi_perf_mode;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PARAM_VENC_HIER_P_HYBRID_MODE:
-	{
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE;
-		pkt->rg_property_data[1] =
-			((struct hfi_hybrid_hierp *)pdata)->layers ?: 0xFF;
-		pkt->size += sizeof(u32) +
-			sizeof(struct hfi_hybrid_hierp);
-		break;
-	}
-	case HAL_PARAM_VENC_MBI_STATISTICS_MODE:
-	{
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_MBI_DUMPING;
-		pkt->rg_property_data[1] = hal_to_hfi_type(
-			HAL_PARAM_VENC_MBI_STATISTICS_MODE,
-				*(u32 *)pdata);
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_CONFIG_VENC_BASELAYER_PRIORITYID:
-	{
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_BASELAYER_PRIORITYID;
-		pkt->rg_property_data[1] = *(u32 *)pdata;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO:
-	{
-		struct hfi_aspect_ratio *hfi = NULL;
-		struct hal_aspect_ratio *hal = pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_ASPECT_RATIO;
-		hfi = (struct hfi_aspect_ratio *)
-			&pkt->rg_property_data[1];
-		memcpy(hfi, hal,
-			sizeof(struct hfi_aspect_ratio));
-		pkt->size += sizeof(struct hfi_aspect_ratio);
-		break;
-	}
-	case HAL_PARAM_VENC_BITRATE_TYPE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VENC_BITRATE_TYPE,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VENC_H264_TRANSFORM_8x8:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VENC_H264_8X8_TRANSFORM,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VENC_VIDEO_SIGNAL_INFO:
-	{
-		struct hal_video_signal_info *hal = pdata;
-		struct hfi_video_signal_metadata *signal_info =
-			(struct hfi_video_signal_metadata *)
-			&pkt->rg_property_data[1];
-
-		signal_info->enable = true;
-		signal_info->video_format = MSM_CVP_NTSC;
-		signal_info->video_full_range = hal->full_range;
-		signal_info->color_description = MSM_CVP_COLOR_DESC_PRESENT;
-		signal_info->color_primaries = hal->color_space;
-		signal_info->transfer_characteristics = hal->transfer_chars;
-		signal_info->matrix_coeffs = hal->matrix_coeffs;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_VIDEO_SIGNAL_INFO;
-		pkt->size += sizeof(*signal_info);
-		break;
-	}
-	case HAL_PARAM_VENC_IFRAMESIZE_TYPE:
-	{
-		enum hal_iframesize_type hal =
-			*(enum hal_iframesize_type *)pdata;
-		struct hfi_iframe_size *hfi = (struct hfi_iframe_size *)
-			&pkt->rg_property_data[1];
-
-		switch (hal) {
-		case HAL_IFRAMESIZE_TYPE_DEFAULT:
-			hfi->type = HFI_IFRAME_SIZE_DEFAULT;
-			break;
-		case HAL_IFRAMESIZE_TYPE_MEDIUM:
-			hfi->type = HFI_IFRAME_SIZE_MEDIUM;
-			break;
-		case HAL_IFRAMESIZE_TYPE_HUGE:
-			hfi->type = HFI_IFRAME_SIZE_HIGH;
-			break;
-		case HAL_IFRAMESIZE_TYPE_UNLIMITED:
-			hfi->type = HFI_IFRAME_SIZE_UNLIMITED;
-			break;
-		default:
-			return -ENOTSUPP;
-		}
-		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_VENC_IFRAMESIZE;
-		pkt->size += sizeof(struct hfi_iframe_size);
-		break;
-	}
-	case HAL_PARAM_BUFFER_SIZE_MINIMUM:
-	{
-		struct hfi_buffer_size_minimum *hfi;
-		struct hal_buffer_size_minimum *prop =
-			(struct hal_buffer_size_minimum *) pdata;
-		u32 buffer_type;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_BUFFER_SIZE_MINIMUM;
-
-		hfi = (struct hfi_buffer_size_minimum *)
-			&pkt->rg_property_data[1];
-		hfi->buffer_size = prop->buffer_size;
-
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-
-		pkt->size += sizeof(struct hfi_buffer_size_minimum);
-		break;
-	}
-	case HAL_PARAM_SYNC_BASED_INTERRUPT:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_SYNC_BASED_INTERRUPT,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(struct hfi_enable);
-		break;
-	}
-	case HAL_PARAM_VENC_LOW_LATENCY:
-	{
-		struct hfi_enable *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE;
-		hfi = (struct hfi_enable *) &pkt->rg_property_data[1];
-		hfi->enable = ((struct hal_enable *) pdata)->enable;
-		pkt->size += sizeof(u32);
-		break;
-	}
-	case HAL_CONFIG_VENC_BLUR_RESOLUTION:
-	{
-		struct hfi_frame_size *hfi;
-		struct hal_frame_size *prop = (struct hal_frame_size *) pdata;
-		u32 buffer_type;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE;
-		hfi = (struct hfi_frame_size *) &pkt->rg_property_data[1];
-		buffer_type = get_hfi_buffer(prop->buffer_type);
-		if (buffer_type)
-			hfi->buffer_type = buffer_type;
-		else
-			return -EINVAL;
-
-		hfi->height = prop->height;
-		hfi->width = prop->width;
-		pkt->size += sizeof(struct hfi_frame_size);
-		break;
-	}
-	case HAL_PARAM_VIDEO_CORES_USAGE:
-	{
-		struct hal_videocores_usage_info *hal = pdata;
-		struct hfi_videocores_usage_type *core_info =
-			(struct hfi_videocores_usage_type *)
-			&pkt->rg_property_data[1];
-
-		core_info->video_core_enable_mask = hal->video_core_enable_mask;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
-		pkt->size += sizeof(*core_info);
-		break;
-	}
-	case HAL_PARAM_VIDEO_WORK_MODE:
-	{
-		struct hal_video_work_mode *hal = pdata;
-		struct hfi_video_work_mode *work_mode =
-			(struct hfi_video_work_mode *)
-			&pkt->rg_property_data[1];
-
-		work_mode->video_work_mode = get_hfi_work_mode(
-						hal->video_work_mode);
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_WORK_MODE;
-		pkt->size += sizeof(*work_mode);
-		break;
-	}
-	case HAL_PARAM_VIDEO_WORK_ROUTE:
-	{
-		struct hal_video_work_route *hal = pdata;
-		struct hfi_video_work_route *prop =
-			(struct hfi_video_work_route *)
-			&pkt->rg_property_data[1];
-		prop->video_work_route =
-			hal->video_work_route;
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_WORK_ROUTE;
-		pkt->size += sizeof(*prop);
-		break;
-	}
-	case HAL_PARAM_VENC_HDR10_PQ_SEI:
-	{
-		struct hfi_hdr10_pq_sei *hfi;
-		struct hal_hdr10_pq_sei *prop =
-			(struct hal_hdr10_pq_sei *) pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI;
-		hfi = (struct hfi_hdr10_pq_sei *)
-			&pkt->rg_property_data[1];
-
-		memcpy(hfi, prop, sizeof(*hfi));
-		pkt->size += sizeof(struct hfi_hdr10_pq_sei);
-		break;
-	}
-	case HAL_CONFIG_VENC_VBV_HRD_BUF_SIZE:
-	{
-		struct hfi_vbv_hdr_buf_size *hfi;
-		struct hal_vbv_hdr_buf_size *prop =
-			(struct hal_vbv_hdr_buf_size *) pdata;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_CONFIG_VENC_VBV_HRD_BUF_SIZE;
-		hfi = (struct hfi_vbv_hdr_buf_size *)
-			&pkt->rg_property_data[1];
-
-		hfi->vbv_hdr_buf_size = prop->vbv_hdr_buf_size;
-		pkt->size += sizeof(struct hfi_vbv_hdr_buf_size);
-		break;
-	}
-	/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
-	case HAL_CONFIG_BUFFER_REQUIREMENTS:
-	case HAL_CONFIG_PRIORITY:
-	case HAL_CONFIG_BATCH_INFO:
-	case HAL_PARAM_METADATA_PASS_THROUGH:
-	case HAL_SYS_IDLE_INDICATOR:
-	case HAL_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
-	case HAL_PARAM_INTERLACE_FORMAT_SUPPORTED:
-	case HAL_PARAM_CHROMA_SITE:
-	case HAL_PARAM_PROPERTIES_SUPPORTED:
-	case HAL_PARAM_PROFILE_LEVEL_SUPPORTED:
-	case HAL_PARAM_CAPABILITY_SUPPORTED:
-	case HAL_PARAM_NAL_STREAM_FORMAT_SUPPORTED:
-	case HAL_PARAM_MULTI_VIEW_FORMAT:
-	case HAL_PARAM_MAX_SEQUENCE_HEADER_SIZE:
-	case HAL_PARAM_CODEC_SUPPORTED:
-	case HAL_PARAM_VDEC_MULTI_VIEW_SELECT:
-	case HAL_PARAM_VDEC_MB_QUANTIZATION:
-	case HAL_PARAM_VDEC_NUM_CONCEALED_MB:
-	case HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING:
-	case HAL_CONFIG_BUFFER_COUNT_ACTUAL:
-	case HAL_CONFIG_VDEC_MULTI_STREAM:
-	case HAL_PARAM_VENC_MULTI_SLICE_INFO:
-	case HAL_CONFIG_VENC_TIMESTAMP_SCALE:
-	default:
-		dprintk(CVP_ERR, "DEFAULT: Calling %#x\n", ptype);
-		rc = -ENOTSUPP;
-		break;
-	}
-	return rc;
+	/* Currently no set property is supported */
+	dprintk(CVP_ERR, "%s cmd:%#x not supported\n", __func__,
+			ptype);
+	return -EINVAL;
 }
 
 static int get_hfi_ssr_type(enum hal_ssr_trigger_type type)
@@ -2180,22 +511,6 @@
 	return 0;
 }
 
-int cvp_create_pkt_cmd_session_sync_process(
-		struct hfi_cmd_session_sync_process_packet *pkt,
-		struct hal_session *session)
-{
-	if (!pkt || !session)
-		return -EINVAL;
-
-	*pkt = (struct hfi_cmd_session_sync_process_packet) {0};
-	pkt->size = sizeof(*pkt);
-	pkt->packet_type = HFI_CMD_SESSION_SYNC;
-	pkt->session_id = hash32_ptr(session);
-	pkt->sync_id = 0;
-
-	return 0;
-}
-
 static struct hfi_packetization_ops hfi_default = {
 	.sys_init = cvp_create_pkt_cmd_sys_init,
 	.sys_pc_prep = cvp_create_pkt_cmd_sys_pc_prep,
@@ -2214,23 +529,10 @@
 		cvp_create_pkt_cmd_session_set_buffers,
 	.session_release_buffers =
 		cvp_create_pkt_cmd_session_release_buffers,
-	.session_etb_decoder = cvp_create_pkt_cmd_session_etb_decoder,
-	.session_etb_encoder = cvp_create_pkt_cmd_session_etb_encoder,
-	.session_ftb = cvp_create_pkt_cmd_session_ftb,
 	.session_get_buf_req = cvp_create_pkt_cmd_session_get_buf_req,
-	.session_flush = cvp_create_pkt_cmd_session_flush,
 	.session_get_property = cvp_create_pkt_cmd_session_get_property,
 	.session_set_property = cvp_create_pkt_cmd_session_set_property,
-	.session_cvp_dfs_config =
-		cvp_create_pkt_cmd_session_cvp_dfs_config,
-	.session_cvp_dfs_frame =
-		cvp_create_pkt_cmd_session_cvp_dfs_frame,
-	.session_cvp_dme_config =
-		cvp_create_pkt_cmd_session_cvp_dme_config,
-	.session_cvp_dme_frame =
-		cvp_create_pkt_cmd_session_cvp_dme_frame,
-	.session_cvp_persist =
-		cvp_create_pckt_cmd_session_cvp_persist,
+	.session_send = cvp_create_pkt_cmd_session_send,
 };
 
 struct hfi_packetization_ops *cvp_hfi_get_pkt_ops_handle(
diff --git a/drivers/media/platform/msm/cvp/hfi_packetization.h b/drivers/media/platform/msm/cvp/hfi_packetization.h
index aac76dc..47af610 100644
--- a/drivers/media/platform/msm/cvp/hfi_packetization.h
+++ b/drivers/media/platform/msm/cvp/hfi_packetization.h
@@ -79,26 +79,10 @@
 	int (*session_sync_process)(
 		struct hfi_cmd_session_sync_process_packet *pkt,
 		struct hal_session *session);
-	int (*session_cvp_dfs_config)(
-			struct hfi_cmd_session_cvp_dfs_config_packet *pkt,
+	int (*session_send)(
+			struct cvp_kmd_hfi_packet *out_pkt,
 			struct hal_session *session,
-			struct msm_cvp_internal_dfsconfig *dfs_config);
-	int (*session_cvp_dfs_frame)(
-			struct hfi_cmd_session_cvp_dfs_frame_packet *pkt,
-			struct hal_session *session,
-			struct msm_cvp_internal_dfsframe *dfs_frame);
-	int (*session_cvp_dme_config)(
-			struct hfi_cmd_session_cvp_dme_config_packet *pkt,
-			struct hal_session *session,
-			struct msm_cvp_internal_dmeconfig *dme_config);
-	int (*session_cvp_dme_frame)(
-			struct hfi_cmd_session_cvp_dme_frame_packet *pkt,
-			struct hal_session *session,
-			struct msm_cvp_internal_dmeframe *dme_frame);
-	int (*session_cvp_persist)(
-			struct hfi_cmd_session_cvp_persist_packet *pkt,
-			struct hal_session *session,
-			struct msm_cvp_internal_persist_cmd *pbuf_cmd);
+			struct cvp_kmd_hfi_packet *in_pkt);
 };
 
 struct hfi_packetization_ops *cvp_hfi_get_pkt_ops_handle(
diff --git a/drivers/media/platform/msm/cvp/hfi_response_handler.c b/drivers/media/platform/msm/cvp/hfi_response_handler.c
index d0ea374..1c40c16 100644
--- a/drivers/media/platform/msm/cvp/hfi_response_handler.c
+++ b/drivers/media/platform/msm/cvp/hfi_response_handler.c
@@ -14,6 +14,9 @@
 #include "cvp_hfi_io.h"
 #include "msm_cvp_debug.h"
 #include "cvp_hfi.h"
+#include "msm_cvp_common.h"
+
+extern struct msm_cvp_drv *cvp_driver;
 
 static enum cvp_status hfi_map_err_status(u32 hfi_err)
 {
@@ -80,201 +83,6 @@
 	return cvp_err;
 }
 
-static int get_hal_pixel_depth(u32 hfi_bit_depth)
-{
-	switch (hfi_bit_depth) {
-	case HFI_BITDEPTH_8: return MSM_CVP_BIT_DEPTH_8;
-	case HFI_BITDEPTH_9:
-	case HFI_BITDEPTH_10: return MSM_CVP_BIT_DEPTH_10;
-	}
-	dprintk(CVP_ERR, "Unsupported bit depth: %d\n", hfi_bit_depth);
-	return MSM_CVP_BIT_DEPTH_UNSUPPORTED;
-}
-
-static int hfi_process_sess_evt_seq_changed(u32 device_id,
-		struct hfi_msg_event_notify_packet *pkt,
-		struct msm_cvp_cb_info *info)
-{
-	struct msm_cvp_cb_event event_notify = {0};
-	int num_properties_changed;
-	struct hfi_frame_size *frame_sz;
-	struct hfi_profile_level *profile_level;
-	struct hfi_bit_depth *pixel_depth;
-	struct hfi_pic_struct *pic_struct;
-	struct hfi_buffer_requirements *buf_req;
-	struct hfi_index_extradata_input_crop_payload *crop_info;
-	u32 entropy_mode = 0;
-	u8 *data_ptr;
-	int prop_id;
-	int luma_bit_depth, chroma_bit_depth;
-	struct hfi_colour_space *colour_info;
-
-	if (sizeof(struct hfi_msg_event_notify_packet) > pkt->size) {
-		dprintk(CVP_ERR,
-				"hal_process_session_init_done: bad_pkt_size\n");
-		return -E2BIG;
-	}
-
-	event_notify.device_id = device_id;
-	event_notify.session_id = (void *)(uintptr_t)pkt->session_id;
-	event_notify.status = CVP_ERR_NONE;
-	num_properties_changed = pkt->event_data2;
-	switch (pkt->event_data1) {
-	case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUFFER_RESOURCES:
-		event_notify.hal_event_type =
-			HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES;
-		break;
-	case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUFFER_RESOURCES:
-		event_notify.hal_event_type =
-			HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES;
-		break;
-	default:
-		break;
-	}
-
-	if (num_properties_changed) {
-		data_ptr = (u8 *) &pkt->rg_ext_event_data[0];
-		do {
-			prop_id = (int) *((u32 *)data_ptr);
-			switch (prop_id) {
-			case HFI_PROPERTY_PARAM_FRAME_SIZE:
-				data_ptr = data_ptr + sizeof(u32);
-				frame_sz =
-					(struct hfi_frame_size *) data_ptr;
-				event_notify.width = frame_sz->width;
-				event_notify.height = frame_sz->height;
-				dprintk(CVP_DBG, "height: %d width: %d\n",
-					frame_sz->height, frame_sz->width);
-				data_ptr +=
-					sizeof(struct hfi_frame_size);
-				break;
-			case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
-				data_ptr = data_ptr + sizeof(u32);
-				profile_level =
-					(struct hfi_profile_level *) data_ptr;
-				event_notify.profile = profile_level->profile;
-				event_notify.level = profile_level->level;
-				dprintk(CVP_DBG, "profile: %d level: %d\n",
-					profile_level->profile,
-					profile_level->level);
-				data_ptr +=
-					sizeof(struct hfi_profile_level);
-				break;
-			case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH:
-				data_ptr = data_ptr + sizeof(u32);
-				pixel_depth = (struct hfi_bit_depth *) data_ptr;
-				/*
-				 * Luma and chroma can have different bitdepths.
-				 * Driver should rely on luma and chroma
-				 * bitdepth for determining output bitdepth
-				 * type.
-				 *
-				 * pixel_depth->bitdepth will include luma
-				 * bitdepth info in bits 0..15 and chroma
-				 * bitdept in bits 16..31.
-				 */
-				luma_bit_depth = get_hal_pixel_depth(
-					pixel_depth->bit_depth &
-					GENMASK(15, 0));
-				chroma_bit_depth = get_hal_pixel_depth(
-					(pixel_depth->bit_depth &
-					GENMASK(31, 16)) >> 16);
-				if (luma_bit_depth == MSM_CVP_BIT_DEPTH_10 ||
-					chroma_bit_depth ==
-						MSM_CVP_BIT_DEPTH_10)
-					event_notify.bit_depth =
-						MSM_CVP_BIT_DEPTH_10;
-				else
-					event_notify.bit_depth = luma_bit_depth;
-				dprintk(CVP_DBG,
-					"bitdepth(%d), luma_bit_depth(%d), chroma_bit_depth(%d)\n",
-					event_notify.bit_depth, luma_bit_depth,
-					chroma_bit_depth);
-				data_ptr += sizeof(struct hfi_bit_depth);
-				break;
-			case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT:
-				data_ptr = data_ptr + sizeof(u32);
-				pic_struct = (struct hfi_pic_struct *) data_ptr;
-				event_notify.pic_struct =
-					pic_struct->progressive_only;
-				dprintk(CVP_DBG,
-					"Progressive only flag: %d\n",
-						pic_struct->progressive_only);
-				data_ptr +=
-					sizeof(struct hfi_pic_struct);
-				break;
-			case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE:
-				data_ptr = data_ptr + sizeof(u32);
-				colour_info =
-					(struct hfi_colour_space *) data_ptr;
-				event_notify.colour_space =
-					colour_info->colour_space;
-				dprintk(CVP_DBG,
-					"Colour space value is: %d\n",
-						colour_info->colour_space);
-				data_ptr +=
-					sizeof(struct hfi_colour_space);
-				break;
-			case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
-				data_ptr = data_ptr + sizeof(u32);
-				entropy_mode = *(u32 *)data_ptr;
-				event_notify.entropy_mode = entropy_mode;
-				dprintk(CVP_DBG,
-					"Entropy Mode: 0x%x\n", entropy_mode);
-				data_ptr +=
-					sizeof(u32);
-				break;
-			case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
-				data_ptr = data_ptr + sizeof(u32);
-				buf_req =
-					(struct hfi_buffer_requirements *)
-						data_ptr;
-				event_notify.capture_buf_count =
-					buf_req->buffer_count_min;
-				dprintk(CVP_DBG,
-					"Capture Count : 0x%x\n",
-						event_notify.capture_buf_count);
-				data_ptr +=
-					sizeof(struct hfi_buffer_requirements);
-				break;
-			case HFI_INDEX_EXTRADATA_INPUT_CROP:
-				data_ptr = data_ptr + sizeof(u32);
-				crop_info = (struct
-				hfi_index_extradata_input_crop_payload *)
-						data_ptr;
-				event_notify.crop_data.left = crop_info->left;
-				event_notify.crop_data.top = crop_info->top;
-				event_notify.crop_data.width = crop_info->width;
-				event_notify.crop_data.height =
-					crop_info->height;
-				dprintk(CVP_DBG,
-					"CROP info : Left = %d Top = %d\n",
-						crop_info->left,
-						crop_info->top);
-				dprintk(CVP_DBG,
-					"CROP info : Width = %d Height = %d\n",
-						crop_info->width,
-						crop_info->height);
-				data_ptr +=
-					sizeof(struct
-					hfi_index_extradata_input_crop_payload);
-				break;
-			default:
-				dprintk(CVP_ERR,
-					"%s cmd: %#x not supported\n",
-					__func__, prop_id);
-				break;
-			}
-			num_properties_changed--;
-		} while (num_properties_changed > 0);
-	}
-
-	info->response_type = HAL_SESSION_EVENT_CHANGE;
-	info->response.event = event_notify;
-
-	return 0;
-}
-
 static int hfi_process_evt_release_buffer_ref(u32 device_id,
 		struct hfi_msg_event_notify_packet *pkt,
 		struct msm_cvp_cb_info *info)
@@ -374,9 +182,9 @@
 		return hfi_process_session_error(device_id, pkt, info);
 
 	case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
-		dprintk(CVP_INFO, "HFI_EVENT_SESSION_SEQUENCE_CHANGED[%#x]\n",
+		dprintk(CVP_WARN, "HFI_EVENT_SESSION_SEQUENCE_CHANGED [%#x]\n",
 			pkt->session_id);
-		return hfi_process_sess_evt_seq_changed(device_id, pkt, info);
+		return 0;
 
 	case HFI_EVENT_RELEASE_BUFFER_REFERENCE:
 		dprintk(CVP_INFO, "HFI_EVENT_RELEASE_BUFFER_REFERENCE[%#x]\n",
@@ -823,12 +631,12 @@
 
 
 static int hfi_process_session_rel_buf_done(u32 device_id,
-		struct hfi_msg_session_cvp_release_buffers_done_packet *pkt,
+		struct hfi_msg_session_hdr *pkt,
 		struct msm_cvp_cb_info *info)
 {
 	struct msm_cvp_cb_cmd_done cmd_done = {0};
 	unsigned int pkt_size =
-		sizeof(struct hfi_msg_session_cvp_release_buffers_done_packet);
+		sizeof(struct hfi_msg_session_hdr);
 
 	if (!pkt || pkt->size < pkt_size) {
 		dprintk(CVP_ERR, "bad packet/packet size %d\n",
@@ -854,6 +662,7 @@
 	struct msm_cvp_cb_info *info)
 {
 	struct msm_cvp_cb_cmd_done cmd_done = {0};
+	int signal;
 
 	if (!pkt) {
 		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
@@ -873,18 +682,14 @@
 		"%s: device_id=%d status=%d, sessionid=%x config=%x\n",
 		__func__, device_id, cmd_done.status,
 		cmd_done.session_id, pkt->op_conf_id);
-	switch (pkt->op_conf_id) {
-	case HFI_CMD_SESSION_CVP_DFS_CONFIG:
-	info->response_type = HAL_SESSION_DFS_CONFIG_CMD_DONE;
-		break;
-	case HFI_CMD_SESSION_CVP_DME_CONFIG:
-		info->response_type = HAL_SESSION_DME_CONFIG_CMD_DONE;
-		break;
-	default:
+
+	signal = get_signal_from_pkt_type(pkt->op_conf_id);
+	if (signal < 0) {
 		dprintk(CVP_ERR, "%s Invalid op config id\n", __func__);
 		return -EINVAL;
 	}
 
+	info->response_type = signal;
 	info->response.cmd = cmd_done;
 	return 0;
 }
@@ -918,6 +723,84 @@
 	return 0;
 }
 
+static struct msm_cvp_inst *cvp_get_inst_from_id(struct msm_cvp_core *core,
+	void *session_id)
+{
+	struct msm_cvp_inst *inst = NULL;
+	bool match = false;
+
+	if (!core || !session_id)
+		return NULL;
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		if (hash32_ptr(inst->session) == (unsigned int)session_id) {
+			match = true;
+			break;
+		}
+	}
+
+	inst = match ? inst : NULL;
+	mutex_unlock(&core->lock);
+
+	return inst;
+
+}
+
+static int hfi_process_session_cvp_msg(u32 device_id,
+	struct hfi_msg_session_hdr *pkt,
+	struct msm_cvp_cb_info *info)
+{
+	struct session_msg *sess_msg;
+	struct msm_cvp_inst *inst = NULL;
+	struct msm_cvp_core *core;
+	void *session_id;
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	} else if (pkt->size > MAX_HFI_PKT_SIZE * sizeof(unsigned int)) {
+		dprintk(CVP_ERR, "%s: bad_pkt_size %d\n", __func__, pkt->size);
+		return -E2BIG;
+	}
+	session_id = (void *)(uintptr_t)pkt->session_id;
+	core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+	inst = cvp_get_inst_from_id(core, session_id);
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		return -EINVAL;
+	}
+
+	sess_msg = kmem_cache_alloc(inst->session_queue.msg_cache, GFP_KERNEL);
+	if (sess_msg == NULL) {
+		dprintk(CVP_ERR, "%s runs out msg cache memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	memcpy(&sess_msg->pkt, pkt, sizeof(struct hfi_msg_session_hdr));
+
+	spin_lock(&inst->session_queue.lock);
+	if (inst->session_queue.msg_count >= MAX_NUM_MSGS_PER_SESSION) {
+		dprintk(CVP_ERR, "Reached session queue size limit\n");
+		goto error_handle_msg;
+	}
+	list_add_tail(&sess_msg->node, &inst->session_queue.msgs);
+	inst->session_queue.msg_count++;
+	spin_unlock(&inst->session_queue.lock);
+
+	wake_up_all(&inst->session_queue.wq);
+
+	info->response_type = HAL_NO_RESP;
+
+	return 0;
+
+error_handle_msg:
+	spin_unlock(&inst->session_queue.lock);
+	kmem_cache_free(inst->session_queue.msg_cache, sess_msg);
+	return -ENOMEM;
+}
+
 static int hfi_process_session_cvp_dme(u32 device_id,
 	struct hfi_msg_session_cvp_dme_packet_type *pkt,
 	struct msm_cvp_cb_info *info)
@@ -927,9 +810,8 @@
 	if (!pkt) {
 		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
 		return -EINVAL;
-	} else if (pkt->size < sizeof(*pkt)) {
-		dprintk(CVP_ERR,
-				"%s: bad_pkt_size\n", __func__);
+	} else if (pkt->size > sizeof(*pkt)) {
+		dprintk(CVP_ERR, "%s: bad_pkt_size %d\n", __func__, pkt->size);
 		return -E2BIG;
 	}
 
@@ -1030,7 +912,7 @@
 				"%s: bad_pkt_size\n", __func__);
 		return -E2BIG;
 	} else if (!pkt->num_properties) {
-		dprintk(CVP_ERR,
+		dprintk(CVP_WARN,
 				"%s: no_properties\n", __func__);
 		return -EINVAL;
 	}
@@ -1065,7 +947,7 @@
 		return -EINVAL;
 	}
 
-	dprintk(CVP_DBG, "Parse response %#x\n", msg_hdr->packet);
+	dprintk(CVP_DBG, "Received HFI MSG with type %d\n", msg_hdr->packet);
 	switch (msg_hdr->packet) {
 	case HFI_MSG_EVENT_NOTIFY:
 		pkt_func = (pkt_func_def)hfi_process_event_notify;
@@ -1092,30 +974,25 @@
 		pkt_func = (pkt_func_def)hfi_process_session_abort_done;
 		break;
 	case HFI_MSG_SESSION_CVP_OPERATION_CONFIG:
-		dprintk(CVP_DBG,
-			"Received HFI_MSG_SESSION_CVP_OPERATION_CONFIG from firmware");
 		pkt_func =
 			(pkt_func_def)hfi_process_session_cvp_operation_config;
 		break;
 	case HFI_MSG_SESSION_CVP_DFS:
-		dprintk(CVP_DBG,
-			"Received HFI_MSG_SESSION_CVP_DFS from firmware");
 		pkt_func = (pkt_func_def)hfi_process_session_cvp_dfs;
 		break;
 	case HFI_MSG_SESSION_CVP_DME:
-		dprintk(CVP_DBG,
-			"Received HFI_MSG_SESSION_CVP_DME from firmware");
 		pkt_func = (pkt_func_def)hfi_process_session_cvp_dme;
 		break;
 	case HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS:
-		dprintk(CVP_DBG,
-			"Received HFI_MSG_SESSION_CVP_PERSIST from firmware");
 		pkt_func = (pkt_func_def)hfi_process_session_cvp_persist;
 		break;
-
+	case HFI_MSG_SESSION_CVP_DS:
+		pkt_func = (pkt_func_def)hfi_process_session_cvp_msg;
+		break;
 	default:
 		dprintk(CVP_DBG, "Unable to parse message: %#x\n",
 				msg_hdr->packet);
+		pkt_func = (pkt_func_def)hfi_process_session_cvp_msg;
 		break;
 	}
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.c b/drivers/media/platform/msm/cvp/msm_cvp.c
index 6aaef05..e1dd50c 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp.c
@@ -4,6 +4,7 @@
  */
 
 #include "msm_cvp.h"
+#include <synx_api.h>
 
 #define MSM_CVP_NOMINAL_CYCLES		(444 * 1000 * 1000)
 #define MSM_CVP_UHD60E_VPSS_CYCLES	(111 * 1000 * 1000)
@@ -13,8 +14,17 @@
 #define MAX_CVP_ISE_CYCLES		(MSM_CVP_NOMINAL_CYCLES - \
 		MSM_CVP_UHD60E_ISE_CYCLES)
 
+struct msm_cvp_fence_thread_data {
+	struct msm_cvp_inst *inst;
+	unsigned int device_id;
+	struct cvp_kmd_hfi_fence_packet in_fence_pkt;
+	unsigned int arg_type;
+};
+
+static struct msm_cvp_fence_thread_data fence_thread_data;
+
 static void print_client_buffer(u32 tag, const char *str,
-		struct msm_cvp_inst *inst, struct msm_cvp_buffer *cbuf)
+		struct msm_cvp_inst *inst, struct cvp_kmd_buffer *cbuf)
 {
 	if (!(tag & msm_cvp_debug) || !inst || !cbuf)
 		return;
@@ -42,13 +52,13 @@
 {
 	enum hal_buffer buftype = HAL_BUFFER_NONE;
 
-	if (type == MSM_CVP_BUFTYPE_INPUT)
+	if (type == CVP_KMD_BUFTYPE_INPUT)
 		buftype = HAL_BUFFER_INPUT;
-	else if (type == MSM_CVP_BUFTYPE_OUTPUT)
+	else if (type == CVP_KMD_BUFTYPE_OUTPUT)
 		buftype = HAL_BUFFER_OUTPUT;
-	else if (type == MSM_CVP_BUFTYPE_INTERNAL_1)
+	else if (type == CVP_KMD_BUFTYPE_INTERNAL_1)
 		buftype = HAL_BUFFER_INTERNAL_SCRATCH_1;
-	else if (type == MSM_CVP_BUFTYPE_INTERNAL_2)
+	else if (type == CVP_KMD_BUFTYPE_INTERNAL_2)
 		buftype = HAL_BUFFER_INTERNAL_SCRATCH_1;
 	else
 		dprintk(CVP_ERR, "%s: unknown buffer type %#x\n",
@@ -87,7 +97,7 @@
 }
 
 static int msm_cvp_get_session_info(struct msm_cvp_inst *inst,
-		struct msm_cvp_session_info *session)
+		struct cvp_kmd_session_info *session)
 {
 	int rc = 0;
 
@@ -133,366 +143,11 @@
 	return 0;
 }
 
-/* DFS feature system call handling */
-static int msm_cvp_session_cvp_dfs_config(
-	struct msm_cvp_inst *inst,
-	struct msm_cvp_dfs_config *dfs_config)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct msm_cvp_internal_dfsconfig internal_dfs_config;
-
-	dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
-
-	if (!inst || !inst->core || !dfs_config) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-	memcpy(&internal_dfs_config.dfs_config.cvp_dfs_config,
-		dfs_config,	sizeof(struct msm_cvp_dfs_config));
-
-	rc = call_hfi_op(hdev, session_cvp_dfs_config,
-			(void *)inst->session, &internal_dfs_config);
-	if (!rc) {
-		rc = wait_for_sess_signal_receipt(inst,
-			HAL_SESSION_DFS_CONFIG_CMD_DONE);
-		if (rc)
-			dprintk(CVP_ERR,
-				"%s: wait for signal failed, rc %d\n",
-				__func__, rc);
-	} else {
-		dprintk(CVP_ERR,
-			"%s: Failed in call_hfi_op for session_cvp_dfs_config\n",
-			__func__);
-	}
-	return rc;
-}
-
-static int msm_cvp_session_cvp_dfs_frame(
-	struct msm_cvp_inst *inst,
-	struct msm_cvp_dfs_frame *dfs_frame)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct msm_cvp_internal_dfsframe internal_dfs_frame;
-	struct msm_cvp_dfs_frame_kmd *dest_ptr = &internal_dfs_frame.dfs_frame;
-	struct msm_cvp_dfs_frame_kmd src_frame;
-	struct msm_cvp_internal_buffer *cbuf;
-
-	dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
-
-	if (!inst || !inst->core || !dfs_frame) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	src_frame = *(struct msm_cvp_dfs_frame_kmd *)dfs_frame;
-	hdev = inst->core->device;
-	memset(&internal_dfs_frame, 0,
-		sizeof(struct msm_cvp_internal_dfsframe));
-
-	memcpy(&internal_dfs_frame.dfs_frame, dfs_frame,
-		CVP_DFS_FRAME_CMD_SIZE*sizeof(unsigned int));
-
-	rc = msm_cvp_session_get_iova_addr(inst, cbuf,
-			src_frame.left_view_buffer_fd,
-			src_frame.left_view_buffer_size,
-			&dest_ptr->left_view_buffer_fd,
-			&dest_ptr->left_view_buffer_size);
-	if (rc) {
-		dprintk(CVP_ERR, "%s:: left buffer not registered. rc=%d\n",
-			__func__, rc);
-		return rc;
-	}
-
-	rc = msm_cvp_session_get_iova_addr(inst, cbuf,
-			src_frame.right_view_buffer_fd,
-			src_frame.right_view_buffer_size,
-			&dest_ptr->right_view_buffer_fd,
-			&dest_ptr->right_view_buffer_size);
-	if (rc) {
-		dprintk(CVP_ERR, "%s:: right buffer not registered. rc=%d\n",
-			__func__, rc);
-		return rc;
-	}
-
-	rc = msm_cvp_session_get_iova_addr(inst, cbuf,
-			src_frame.disparity_map_buffer_fd,
-			src_frame.disparity_map_buffer_size,
-			&dest_ptr->disparity_map_buffer_fd,
-			&dest_ptr->disparity_map_buffer_size);
-	if (rc) {
-		dprintk(CVP_ERR, "%s:: disparity map not registered. rc=%d\n",
-			__func__, rc);
-		return rc;
-	}
-
-	rc = msm_cvp_session_get_iova_addr(inst, cbuf,
-			src_frame.occlusion_mask_buffer_fd,
-			src_frame.occlusion_mask_buffer_size,
-			&dest_ptr->occlusion_mask_buffer_fd,
-			&dest_ptr->occlusion_mask_buffer_size);
-	if (rc) {
-		dprintk(CVP_ERR, "%s:: occlusion mask not registered. rc=%d\n",
-			__func__, rc);
-		return rc;
-	}
-
-	rc = call_hfi_op(hdev, session_cvp_dfs_frame,
-			(void *)inst->session, &internal_dfs_frame);
-
-	if (rc) {
-		dprintk(CVP_ERR,
-			"%s: Failed in call_hfi_op for session_cvp_dfs_frame\n",
-			__func__);
-	}
-
-	return rc;
-}
-
-static int msm_cvp_session_cvp_dfs_frame_response(
-	struct msm_cvp_inst *inst,
-	struct msm_cvp_dfs_frame *dfs_frame)
-{
-	int rc = 0;
-
-	dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
-
-	if (!inst || !inst->core || !dfs_frame) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	rc = wait_for_sess_signal_receipt(inst,
-			HAL_SESSION_DFS_FRAME_CMD_DONE);
-	if (rc)
-		dprintk(CVP_ERR,
-			"%s: wait for signal failed, rc %d\n",
-			__func__, rc);
-	return rc;
-}
-
-/* DME feature system call handling */
-static int msm_cvp_session_cvp_dme_config(
-	struct msm_cvp_inst *inst,
-	struct msm_cvp_dme_config *dme_config)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct msm_cvp_internal_dmeconfig internal_dme_config;
-
-	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
-
-	if (!inst || !inst->core || !dme_config) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-	memcpy(&internal_dme_config.dme_config.cvp_dme_config,
-		dme_config, sizeof(struct msm_cvp_dme_config));
-
-	rc = call_hfi_op(hdev, session_cvp_dme_config,
-			(void *)inst->session, &internal_dme_config);
-	if (!rc) {
-		rc = wait_for_sess_signal_receipt(inst,
-			HAL_SESSION_DME_CONFIG_CMD_DONE);
-		if (rc)
-			dprintk(CVP_ERR,
-				"%s: wait for signal failed, rc %d\n",
-				__func__, rc);
-	} else {
-		dprintk(CVP_ERR, "%s Failed in call_hfi_op\n", __func__);
-	}
-	return rc;
-}
-
-static int msm_cvp_session_cvp_dme_frame(
-	struct msm_cvp_inst *inst,
-	struct msm_cvp_dme_frame *dme_frame)
-{
-	int i, rc = 0;
-	struct hfi_device *hdev;
-	struct msm_cvp_internal_dmeframe internal_dme_frame;
-	struct msm_cvp_dme_frame_kmd *dest_ptr = &internal_dme_frame.dme_frame;
-	struct msm_cvp_dme_frame_kmd src_frame;
-	struct msm_cvp_internal_buffer *cbuf;
-
-	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
-
-	if (!inst || !inst->core || !dme_frame) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	src_frame = *(struct msm_cvp_dme_frame_kmd *)dme_frame;
-	hdev = inst->core->device;
-	memset(&internal_dme_frame, 0,
-		sizeof(struct msm_cvp_internal_dmeframe));
-
-	memcpy(&internal_dme_frame.dme_frame, dme_frame,
-		CVP_DME_FRAME_CMD_SIZE*sizeof(unsigned int));
-
-	for (i = 0; i < CVP_DME_BUF_NUM; i++) {
-		if (!src_frame.bufs[i].fd) {
-			dest_ptr->bufs[i].fd = src_frame.bufs[i].fd;
-			dest_ptr->bufs[i].size = src_frame.bufs[i].size;
-			continue;
-		}
-
-		rc = msm_cvp_session_get_iova_addr(inst, cbuf,
-				src_frame.bufs[i].fd,
-				src_frame.bufs[i].size,
-				&dest_ptr->bufs[i].fd,
-				&dest_ptr->bufs[i].size);
-		if (rc) {
-			dprintk(CVP_ERR,
-				"%s: %d buffer not registered. rc=%d\n",
-				__func__, i, rc);
-			return rc;
-		}
-
-	}
-
-	rc = call_hfi_op(hdev, session_cvp_dme_frame,
-			(void *)inst->session, &internal_dme_frame);
-
-	if (rc) {
-		dprintk(CVP_ERR,
-			"%s:: Failed in call_hfi_op\n",
-			__func__);
-	}
-
-	return rc;
-}
-
-static int msm_cvp_session_cvp_persist(
-	struct msm_cvp_inst *inst,
-	struct msm_cvp_persist_buf *pbuf_cmd)
-{
-	int i, rc = 0;
-	struct hfi_device *hdev;
-	struct msm_cvp_internal_persist_cmd internal_pcmd;
-	struct msm_cvp_persist_kmd *dest_ptr = &internal_pcmd.persist_cmd;
-	struct msm_cvp_persist_kmd src_frame;
-	struct msm_cvp_internal_buffer *cbuf;
-
-	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
-
-	if (!inst || !inst->core || !pbuf_cmd) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	src_frame = *(struct msm_cvp_persist_kmd *)pbuf_cmd;
-	hdev = inst->core->device;
-	memset(&internal_pcmd, 0,
-		sizeof(struct msm_cvp_internal_persist_cmd));
-
-	memcpy(&internal_pcmd.persist_cmd, pbuf_cmd,
-		CVP_PERSIST_CMD_SIZE*sizeof(unsigned int));
-
-	for (i = 0; i < CVP_PSRSIST_BUF_NUM; i++) {
-		if (!src_frame.bufs[i].fd) {
-			dest_ptr->bufs[i].fd = src_frame.bufs[i].fd;
-			dest_ptr->bufs[i].size = src_frame.bufs[i].size;
-			continue;
-		}
-
-		rc = msm_cvp_session_get_iova_addr(inst, cbuf,
-				src_frame.bufs[i].fd,
-				src_frame.bufs[i].size,
-				&dest_ptr->bufs[i].fd,
-				&dest_ptr->bufs[i].size);
-		if (rc) {
-			dprintk(CVP_ERR,
-				"%s:: %d buffer not registered. rc=%d\n",
-				__func__, i, rc);
-			return rc;
-		}
-	}
-
-	rc = call_hfi_op(hdev, session_cvp_persist,
-			(void *)inst->session, &internal_pcmd);
-
-	if (rc)
-		dprintk(CVP_ERR, "%s: Failed in call_hfi_op\n", __func__);
-
-	return rc;
-}
-
-static int msm_cvp_session_cvp_dme_frame_response(
-	struct msm_cvp_inst *inst,
-	struct msm_cvp_dme_frame *dme_frame)
-{
-	int rc = 0;
-
-	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
-
-	if (!inst || !inst->core || !dme_frame) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	rc = wait_for_sess_signal_receipt(inst,
-			HAL_SESSION_DME_FRAME_CMD_DONE);
-	if (rc)
-		dprintk(CVP_ERR,
-			"%s: wait for signal failed, rc %d\n",
-			__func__, rc);
-	return rc;
-}
-
-static int msm_cvp_session_cvp_persist_response(
-	struct msm_cvp_inst *inst,
-	struct msm_cvp_persist_buf *pbuf_cmd)
-{
-	int rc = 0;
-
-	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
-
-	if (!inst || !inst->core || !pbuf_cmd) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	rc = wait_for_sess_signal_receipt(inst,
-			HAL_SESSION_PERSIST_CMD_DONE);
-	if (rc)
-		dprintk(CVP_ERR,
-			"%s: wait for signal failed, rc %d\n",
-			__func__, rc);
-	return rc;
-}
-
-
-
-static int msm_cvp_send_cmd(struct msm_cvp_inst *inst,
-		struct msm_cvp_send_cmd *send_cmd)
-{
-	dprintk(CVP_ERR, "%s: UMD gave a deprecated cmd", __func__);
-
-	return 0;
-}
-
-static int msm_cvp_request_power(struct msm_cvp_inst *inst,
-		struct msm_cvp_request_power *power)
-{
-	int rc = 0;
-
-	if (!inst || !power) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	dprintk(CVP_DBG,
-		"%s: clock_cycles_a %d, clock_cycles_b %d, ddr_bw %d sys_cache_bw %d\n",
-		__func__, power->clock_cycles_a, power->clock_cycles_b,
-		power->ddr_bw, power->sys_cache_bw);
-
-	return rc;
-}
-
-static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
-		struct msm_cvp_buffer *buf)
+static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
+	struct cvp_kmd_buffer *buf)
 {
 	int rc = 0;
 	bool found;
-	struct hfi_device *hdev;
 	struct msm_cvp_internal_buffer *cbuf;
 	struct hal_session *session;
 
@@ -502,13 +157,6 @@
 	}
 
 	session = (struct hal_session *)inst->session;
-	if (!session) {
-		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-	print_client_buffer(CVP_DBG, "register", inst, buf);
-
 	mutex_lock(&inst->cvpbufs.lock);
 	found = false;
 	list_for_each_entry(cbuf, &inst->cvpbufs.list, list) {
@@ -533,7 +181,7 @@
 	list_add_tail(&cbuf->list, &inst->cvpbufs.list);
 	mutex_unlock(&inst->cvpbufs.lock);
 
-	memcpy(&cbuf->buf, buf, sizeof(struct msm_cvp_buffer));
+	memcpy(&cbuf->buf, buf, sizeof(struct cvp_kmd_buffer));
 	cbuf->smem.buffer_type = get_hal_buftype(__func__, buf->type);
 	cbuf->smem.fd = buf->fd;
 	cbuf->smem.offset = buf->offset;
@@ -555,7 +203,6 @@
 			goto exit;
 		}
 	}
-
 	return rc;
 
 exit:
@@ -570,8 +217,442 @@
 	return rc;
 }
 
+static bool _cvp_msg_pending(struct msm_cvp_inst *inst,
+			struct cvp_session_queue *sq,
+			struct session_msg **msg)
+{
+	struct session_msg *mptr = NULL;
+	bool result = false;
+
+	spin_lock(&sq->lock);
+	if (!kref_read(&inst->kref)) {
+		/* The session is being deleted */
+		spin_unlock(&sq->lock);
+		*msg = NULL;
+		return true;
+	}
+	result = list_empty(&sq->msgs);
+	if (!result) {
+		mptr = list_first_entry(&sq->msgs, struct session_msg, node);
+		list_del_init(&mptr->node);
+		sq->msg_count--;
+	}
+	spin_unlock(&sq->lock);
+	*msg = mptr;
+	return !result;
+}
+
+
+static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
+			struct cvp_kmd_hfi_packet *out_pkt)
+{
+	unsigned long wait_time;
+	struct session_msg *msg = NULL;
+	struct cvp_session_queue *sq;
+
+	if (!inst) {
+		dprintk(CVP_ERR, "%s invalid session\n", __func__);
+		return -EINVAL;
+	}
+
+	sq = &inst->session_queue;
+
+	wait_time = msecs_to_jiffies(CVP_MAX_WAIT_TIME);
+
+	if (wait_event_timeout(sq->wq,
+		_cvp_msg_pending(inst, sq, &msg), wait_time) == 0) {
+		dprintk(CVP_ERR, "session queue wait timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	if (msg == NULL) {
+		dprintk(CVP_ERR, "%s: session is deleted, no msg\n", __func__);
+		return -EINVAL;
+	}
+
+	memcpy(out_pkt, &msg->pkt, sizeof(struct hfi_msg_session_hdr));
+	kmem_cache_free(inst->session_queue.msg_cache, msg);
+
+	return 0;
+}
+
+static int msm_cvp_session_process_hfi(
+	struct msm_cvp_inst *inst,
+	struct cvp_kmd_hfi_packet *in_pkt,
+	unsigned int in_offset,
+	unsigned int in_buf_num)
+{
+	int i, pkt_idx, rc = 0;
+	struct hfi_device *hdev;
+	struct msm_cvp_internal_buffer *cbuf;
+	struct buf_desc *buf_ptr;
+	unsigned int offset, buf_num, signal;
+
+	if (!inst || !inst->core || !in_pkt) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	pkt_idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)in_pkt);
+	if (pkt_idx < 0) {
+		dprintk(CVP_ERR, "%s incorrect packet %d, %x\n", __func__,
+				in_pkt->pkt_data[0],
+				in_pkt->pkt_data[1]);
+		offset = in_offset;
+		buf_num = in_buf_num;
+		signal = HAL_NO_RESP;
+	} else {
+		offset = cvp_hfi_defs[pkt_idx].buf_offset;
+		buf_num = cvp_hfi_defs[pkt_idx].buf_num;
+		signal = cvp_hfi_defs[pkt_idx].resp;
+	}
+
+	if (in_offset && in_buf_num) {
+		if (offset != in_offset || buf_num != in_buf_num) {
+			dprintk(CVP_ERR, "%s incorrect offset and num %d, %d\n",
+					__func__, in_offset, in_buf_num);
+			offset = in_offset;
+			buf_num = in_buf_num;
+		}
+	}
+
+	if (offset != 0 && buf_num != 0) {
+		buf_ptr = (struct buf_desc *)&in_pkt->pkt_data[offset];
+
+		for (i = 0; i < buf_num; i++) {
+			if (!buf_ptr[i].fd)
+				continue;
+
+			rc = msm_cvp_session_get_iova_addr(inst, cbuf,
+						buf_ptr[i].fd,
+						buf_ptr[i].size,
+						&buf_ptr[i].fd,
+						&buf_ptr[i].size);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"%s: buf %d unregistered. rc=%d\n",
+					__func__, i, rc);
+				return rc;
+			}
+		}
+	}
+	rc = call_hfi_op(hdev, session_send,
+			(void *)inst->session, in_pkt);
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s: Failed in call_hfi_op %d, %x\n",
+			__func__, in_pkt->pkt_data[0], in_pkt->pkt_data[1]);
+	}
+
+	if (signal != HAL_NO_RESP) {
+		rc = wait_for_sess_signal_receipt(inst, signal);
+		if (rc)
+			dprintk(CVP_ERR,
+				"%s: wait for signal failed, rc %d %d, %x %d\n",
+				__func__, rc,
+				in_pkt->pkt_data[0],
+				in_pkt->pkt_data[1],
+				signal);
+
+	}
+
+	return rc;
+}
+
+static int msm_cvp_thread_fence_run(void *data)
+{
+	int i, pkt_idx, rc = 0;
+	unsigned long timeout_ms = 1000;
+	int synx_obj;
+	struct hfi_device *hdev;
+	struct msm_cvp_fence_thread_data *fence_thread_data;
+	struct cvp_kmd_hfi_fence_packet *in_fence_pkt;
+	struct cvp_kmd_hfi_packet *in_pkt;
+	struct msm_cvp_inst *inst;
+	int *fence;
+	struct msm_cvp_internal_buffer *cbuf;
+	struct buf_desc *buf_ptr;
+	unsigned int offset, buf_num;
+
+	if (!data) {
+		dprintk(CVP_ERR, "%s Wrong input data %pK\n", __func__, data);
+		do_exit(-EINVAL);
+	}
+
+	fence_thread_data = data;
+	inst = cvp_get_inst(get_cvp_core(fence_thread_data->device_id),
+				(void *)fence_thread_data->inst);
+	if (!inst) {
+		dprintk(CVP_ERR, "%s Wrong inst %pK\n", __func__, inst);
+		do_exit(-EINVAL);
+	}
+	in_fence_pkt = (struct cvp_kmd_hfi_fence_packet *)
+					&fence_thread_data->in_fence_pkt;
+	in_pkt = (struct cvp_kmd_hfi_packet *)(in_fence_pkt);
+	fence = (int *)(in_fence_pkt->fence_data);
+	hdev = inst->core->device;
+
+	pkt_idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)in_pkt);
+	if (pkt_idx < 0) {
+		dprintk(CVP_ERR, "%s incorrect packet %d, %x\n", __func__,
+			in_pkt->pkt_data[0],
+			in_pkt->pkt_data[1]);
+		do_exit(pkt_idx);
+	}
+
+	offset = cvp_hfi_defs[pkt_idx].buf_offset;
+	buf_num = cvp_hfi_defs[pkt_idx].buf_num;
+
+	if (offset != 0 && buf_num != 0) {
+		buf_ptr = (struct buf_desc *)&in_pkt->pkt_data[offset];
+
+		for (i = 0; i < buf_num; i++) {
+			if (!buf_ptr[i].fd)
+				continue;
+
+			rc = msm_cvp_session_get_iova_addr(inst, cbuf,
+				buf_ptr[i].fd,
+				buf_ptr[i].size,
+				&buf_ptr[i].fd,
+				&buf_ptr[i].size);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"%s: buf %d unregistered. rc=%d\n",
+					__func__, i, rc);
+				do_exit(rc);
+			}
+		}
+	}
+
+	//wait on synx before signaling HFI
+	switch (fence_thread_data->arg_type) {
+	case CVP_KMD_HFI_DME_FRAME_FENCE_CMD:
+	{
+		for (i = 0; i < HFI_DME_BUF_NUM-1; i++) {
+			if (fence[(i<<1)]) {
+				rc = synx_import(fence[(i<<1)],
+					fence[((i<<1)+1)], &synx_obj);
+				if (rc) {
+					dprintk(CVP_ERR,
+						"%s: synx_import failed\n",
+						__func__);
+					do_exit(rc);
+				}
+				rc = synx_wait(synx_obj, timeout_ms);
+				if (rc) {
+					dprintk(CVP_ERR,
+						"%s: synx_wait failed\n",
+						__func__);
+					do_exit(rc);
+				}
+				rc = synx_release(synx_obj);
+				if (rc) {
+					dprintk(CVP_ERR,
+						"%s: synx_release failed\n",
+						__func__);
+					do_exit(rc);
+				}
+			}
+		}
+
+		rc = call_hfi_op(hdev, session_send,
+				(void *)inst->session, in_pkt);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: Failed in call_hfi_op %d, %x\n",
+				__func__, in_pkt->pkt_data[0],
+				in_pkt->pkt_data[1]);
+			do_exit(rc);
+		}
+
+		rc = wait_for_sess_signal_receipt(inst,
+				HAL_SESSION_DME_FRAME_CMD_DONE);
+		if (rc)	{
+			dprintk(CVP_ERR, "%s: wait for signal failed, rc %d\n",
+			__func__, rc);
+			do_exit(rc);
+		}
+		rc = synx_import(fence[((HFI_DME_BUF_NUM-1)<<1)],
+				fence[((HFI_DME_BUF_NUM-1)<<1)+1],
+				&synx_obj);
+		if (rc) {
+			dprintk(CVP_ERR, "%s: synx_import failed\n", __func__);
+			do_exit(rc);
+		}
+		rc = synx_signal(synx_obj, SYNX_STATE_SIGNALED_SUCCESS);
+		if (rc) {
+			dprintk(CVP_ERR, "%s: synx_signal failed\n", __func__);
+			do_exit(rc);
+		}
+		if (synx_get_status(synx_obj) != SYNX_STATE_SIGNALED_SUCCESS) {
+			dprintk(CVP_ERR, "%s: synx_get_status failed\n",
+					__func__);
+			do_exit(rc);
+		}
+		rc = synx_release(synx_obj);
+		if (rc) {
+			dprintk(CVP_ERR, "%s: synx_release failed\n", __func__);
+			do_exit(rc);
+		}
+		break;
+	}
+	default:
+		dprintk(CVP_ERR, "%s: unknown hfi cmd type 0x%x\n",
+			__func__, fence_thread_data->arg_type);
+		rc = -EINVAL;
+		do_exit(rc);
+		break;
+	}
+
+	do_exit(0);
+}
+
+static int msm_cvp_session_process_hfifence(
+	struct msm_cvp_inst *inst,
+	struct cvp_kmd_arg *arg)
+{
+	static int thread_num;
+	struct task_struct *thread;
+	int rc = 0;
+	char thread_fence_name[32];
+
+	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+	if (!inst || !inst->core || !arg) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	thread_num = thread_num + 1;
+	fence_thread_data.inst = inst;
+	fence_thread_data.device_id = (unsigned int)inst->core->id;
+	memcpy(&fence_thread_data.in_fence_pkt, &arg->data.hfi_fence_pkt,
+				sizeof(struct cvp_kmd_hfi_fence_packet));
+	fence_thread_data.arg_type = arg->type;
+	snprintf(thread_fence_name, sizeof(thread_fence_name),
+				"thread_fence_%d", thread_num);
+	thread = kthread_run(msm_cvp_thread_fence_run,
+			&fence_thread_data, thread_fence_name);
+
+	return rc;
+}
+
+static int msm_cvp_session_cvp_dfs_frame_response(
+	struct msm_cvp_inst *inst,
+	struct cvp_kmd_hfi_packet *dfs_frame)
+{
+	int rc = 0;
+
+	dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
+
+	if (!inst || !inst->core || !dfs_frame) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	rc = wait_for_sess_signal_receipt(inst,
+			HAL_SESSION_DFS_FRAME_CMD_DONE);
+	if (rc)
+		dprintk(CVP_ERR,
+			"%s: wait for signal failed, rc %d\n",
+			__func__, rc);
+	return rc;
+}
+
+static int msm_cvp_session_cvp_dme_frame_response(
+	struct msm_cvp_inst *inst,
+	struct cvp_kmd_hfi_packet *dme_frame)
+{
+	int rc = 0;
+
+	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+
+	if (!inst || !inst->core || !dme_frame) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	rc = wait_for_sess_signal_receipt(inst,
+			HAL_SESSION_DME_FRAME_CMD_DONE);
+	if (rc)
+		dprintk(CVP_ERR,
+			"%s: wait for signal failed, rc %d\n",
+			__func__, rc);
+	return rc;
+}
+
+static int msm_cvp_session_cvp_persist_response(
+	struct msm_cvp_inst *inst,
+	struct cvp_kmd_hfi_packet *pbuf_cmd)
+{
+	int rc = 0;
+
+	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+
+	if (!inst || !inst->core || !pbuf_cmd) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	rc = wait_for_sess_signal_receipt(inst,
+			HAL_SESSION_PERSIST_CMD_DONE);
+	if (rc)
+		dprintk(CVP_ERR,
+			"%s: wait for signal failed, rc %d\n",
+			__func__, rc);
+	return rc;
+}
+
+
+
+static int msm_cvp_send_cmd(struct msm_cvp_inst *inst,
+		struct cvp_kmd_send_cmd *send_cmd)
+{
+	dprintk(CVP_ERR, "%s: UMD gave a deprecated cmd", __func__);
+
+	return 0;
+}
+
+static int msm_cvp_request_power(struct msm_cvp_inst *inst,
+		struct cvp_kmd_request_power *power)
+{
+	int rc = 0;
+
+	if (!inst || !power) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	dprintk(CVP_DBG,
+		"%s: clock_cycles_a %d, clock_cycles_b %d, ddr_bw %d sys_cache_bw %d\n",
+		__func__, power->clock_cycles_a, power->clock_cycles_b,
+		power->ddr_bw, power->sys_cache_bw);
+
+	return rc;
+}
+
+static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
+		struct cvp_kmd_buffer *buf)
+{
+	struct hfi_device *hdev;
+	struct hal_session *session;
+
+	if (!inst || !inst->core || !buf) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	session = (struct hal_session *)inst->session;
+	if (!session) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+	print_client_buffer(CVP_DBG, "register", inst, buf);
+
+	return msm_cvp_map_buf(inst, buf);
+
+}
+
 static int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
-		struct msm_cvp_buffer *buf)
+		struct cvp_kmd_buffer *buf)
 {
 	int rc = 0;
 	bool found;
@@ -626,7 +707,7 @@
 	return rc;
 }
 
-int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct msm_cvp_arg *arg)
+int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg)
 {
 	int rc = 0;
 
@@ -637,112 +718,96 @@
 	dprintk(CVP_DBG, "%s:: arg->type = %x", __func__, arg->type);
 
 	switch (arg->type) {
-	case MSM_CVP_GET_SESSION_INFO:
+	case CVP_KMD_GET_SESSION_INFO:
 	{
-		struct msm_cvp_session_info *session =
-			(struct msm_cvp_session_info *)&arg->data.session;
+		struct cvp_kmd_session_info *session =
+			(struct cvp_kmd_session_info *)&arg->data.session;
 
 		rc = msm_cvp_get_session_info(inst, session);
 		break;
 	}
-	case MSM_CVP_REQUEST_POWER:
+	case CVP_KMD_REQUEST_POWER:
 	{
-		struct msm_cvp_request_power *power =
-			(struct msm_cvp_request_power *)&arg->data.req_power;
+		struct cvp_kmd_request_power *power =
+			(struct cvp_kmd_request_power *)&arg->data.req_power;
 
 		rc = msm_cvp_request_power(inst, power);
 		break;
 	}
-	case MSM_CVP_REGISTER_BUFFER:
+	case CVP_KMD_REGISTER_BUFFER:
 	{
-		struct msm_cvp_buffer *buf =
-			(struct msm_cvp_buffer *)&arg->data.regbuf;
+		struct cvp_kmd_buffer *buf =
+			(struct cvp_kmd_buffer *)&arg->data.regbuf;
 
 		rc = msm_cvp_register_buffer(inst, buf);
 		break;
 	}
-	case MSM_CVP_UNREGISTER_BUFFER:
+	case CVP_KMD_UNREGISTER_BUFFER:
 	{
-		struct msm_cvp_buffer *buf =
-			(struct msm_cvp_buffer *)&arg->data.unregbuf;
+		struct cvp_kmd_buffer *buf =
+			(struct cvp_kmd_buffer *)&arg->data.unregbuf;
 
 		rc = msm_cvp_unregister_buffer(inst, buf);
 		break;
 	}
-	case MSM_CVP_HFI_SEND_CMD:
+	case CVP_KMD_HFI_SEND_CMD:
 	{
-		//struct msm_cvp_buffer *buf =
-		//(struct msm_cvp_buffer *)&arg->data.unregbuf;
-		struct msm_cvp_send_cmd *send_cmd =
-			(struct msm_cvp_send_cmd *)&arg->data.send_cmd;
+		struct cvp_kmd_send_cmd *send_cmd =
+			(struct cvp_kmd_send_cmd *)&arg->data.send_cmd;
 
 		rc = msm_cvp_send_cmd(inst, send_cmd);
 		break;
 	}
-	case MSM_CVP_HFI_DFS_CONFIG_CMD:
+	case CVP_KMD_RECEIVE_MSG_PKT:
 	{
-		struct msm_cvp_dfs_config *dfs_config =
-			(struct msm_cvp_dfs_config *)&arg->data.dfs_config;
-
-		rc = msm_cvp_session_cvp_dfs_config(inst, dfs_config);
+		struct cvp_kmd_hfi_packet *out_pkt =
+			(struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
+		rc = msm_cvp_session_receive_hfi(inst, out_pkt);
 		break;
 	}
-	case MSM_CVP_HFI_DFS_FRAME_CMD:
+	case CVP_KMD_SEND_CMD_PKT:
+	case CVP_KMD_HFI_DFS_CONFIG_CMD:
+	case CVP_KMD_HFI_DFS_FRAME_CMD:
+	case CVP_KMD_HFI_DME_CONFIG_CMD:
+	case CVP_KMD_HFI_DME_FRAME_CMD:
+	case CVP_KMD_HFI_PERSIST_CMD:
 	{
-		struct msm_cvp_dfs_frame *dfs_frame =
-			(struct msm_cvp_dfs_frame *)&arg->data.dfs_frame;
+		struct cvp_kmd_hfi_packet *in_pkt =
+			(struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
 
-		rc = msm_cvp_session_cvp_dfs_frame(inst, dfs_frame);
+		rc = msm_cvp_session_process_hfi(inst, in_pkt,
+				arg->buf_offset, arg->buf_num);
 		break;
 	}
-	case MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_DFS_FRAME_CMD_RESPONSE:
 	{
-		struct msm_cvp_dfs_frame *dfs_frame =
-			(struct msm_cvp_dfs_frame *)&arg->data.dfs_frame;
+		struct cvp_kmd_hfi_packet *dfs_frame =
+			(struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
 
 		rc = msm_cvp_session_cvp_dfs_frame_response(inst, dfs_frame);
 		break;
 	}
-	case MSM_CVP_HFI_DME_CONFIG_CMD:
+	case CVP_KMD_HFI_DME_FRAME_CMD_RESPONSE:
 	{
-		struct msm_cvp_dme_config *dme_config =
-			(struct msm_cvp_dme_config *)&arg->data.dme_config;
+		struct cvp_kmd_hfi_packet *dme_frame =
+			(struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
 
-		rc = msm_cvp_session_cvp_dme_config(inst, dme_config);
+		rc = msm_cvp_session_cvp_dme_frame_response(inst, dme_frame);
 		break;
 	}
-	case MSM_CVP_HFI_DME_FRAME_CMD:
+	case CVP_KMD_HFI_PERSIST_CMD_RESPONSE:
 	{
-		struct msm_cvp_dme_frame *dme_frame =
-			(struct msm_cvp_dme_frame *)&arg->data.dme_frame;
-
-		rc = msm_cvp_session_cvp_dme_frame(inst, dme_frame);
-		break;
-	}
-	case MSM_CVP_HFI_DME_FRAME_CMD_RESPONSE:
-	{
-		struct msm_cvp_dme_frame *dmeframe =
-			(struct msm_cvp_dme_frame *)&arg->data.dme_frame;
-
-		rc = msm_cvp_session_cvp_dme_frame_response(inst, dmeframe);
-		break;
-	}
-	case MSM_CVP_HFI_PERSIST_CMD:
-	{
-		struct msm_cvp_persist_buf *pbuf_cmd =
-			(struct msm_cvp_persist_buf *)&arg->data.pbuf_cmd;
-
-		rc = msm_cvp_session_cvp_persist(inst, pbuf_cmd);
-		break;
-	}
-	case MSM_CVP_HFI_PERSIST_CMD_RESPONSE:
-	{
-		struct msm_cvp_persist_buf *pbuf_cmd =
-			(struct msm_cvp_persist_buf *)&arg->data.pbuf_cmd;
+		struct cvp_kmd_hfi_packet *pbuf_cmd =
+			(struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
 
 		rc = msm_cvp_session_cvp_persist_response(inst, pbuf_cmd);
 		break;
 	}
+	case CVP_KMD_HFI_DME_FRAME_FENCE_CMD:
+	{
+		rc = msm_cvp_session_process_hfifence(inst, arg);
+		break;
+	}
 	default:
 		dprintk(CVP_ERR, "%s: unknown arg type 0x%x\n",
 				__func__, arg->type);
@@ -753,65 +818,6 @@
 	return rc;
 }
 
-static struct msm_cvp_ctrl msm_cvp_ctrls[] = {
-	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE,
-		.name = "Secure mode",
-		.type = V4L2_CTRL_TYPE_BUTTON,
-		.minimum = 0,
-		.maximum = 1,
-		.default_value = 0,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-};
-
-int msm_cvp_control_init(struct msm_cvp_inst *inst,
-		const struct v4l2_ctrl_ops *ctrl_ops)
-{
-	return msm_cvp_comm_ctrl_init(inst, msm_cvp_ctrls,
-		ARRAY_SIZE(msm_cvp_ctrls), ctrl_ops);
-}
-
-int msm_cvp_session_pause(struct msm_cvp_inst *inst)
-{
-	int rc;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-
-	rc = call_hfi_op(hdev, session_pause, (void *)inst->session);
-	if (rc)
-		dprintk(CVP_ERR, "%s: failed to pause inst %pK (%#x)\n",
-			__func__, inst, hash32_ptr(inst->session));
-
-	return rc;
-}
-
-int msm_cvp_session_resume(struct msm_cvp_inst *inst)
-{
-	int rc;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-
-	rc = call_hfi_op(hdev, session_resume, (void *)inst->session);
-	if (rc)
-		dprintk(CVP_ERR, "%s: failed to resume inst %pK (%#x)\n",
-			__func__, inst, hash32_ptr(inst->session));
-
-	return rc;
-}
-
 int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
 {
 	int rc = 0;
@@ -830,7 +836,7 @@
 
 	mutex_lock(&inst->cvpbufs.lock);
 	list_for_each_entry_safe(cbuf, temp, &inst->cvpbufs.list, list) {
-		print_cvp_internal_buffer(CVP_ERR, "unregistered", inst, cbuf);
+		print_cvp_internal_buffer(CVP_DBG, "unregistered", inst, cbuf);
 		rc = msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
 		if (rc)
 			dprintk(CVP_ERR, "%s: unmap failed\n", __func__);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.h b/drivers/media/platform/msm/cvp/msm_cvp.h
index 6bcb799..8b95b8a 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp.h
@@ -11,11 +11,7 @@
 #include "msm_cvp_clocks.h"
 #include "msm_cvp_debug.h"
 #include "msm_cvp_dsp.h"
-int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct msm_cvp_arg *arg);
+int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg);
 int msm_cvp_session_init(struct msm_cvp_inst *inst);
 int msm_cvp_session_deinit(struct msm_cvp_inst *inst);
-int msm_cvp_session_pause(struct msm_cvp_inst *inst);
-int msm_cvp_session_resume(struct msm_cvp_inst *inst);
-int msm_cvp_control_init(struct msm_cvp_inst *inst,
-		const struct v4l2_ctrl_ops *ctrl_ops);
 #endif
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_clocks.c b/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
index aba906b..f22576b 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
@@ -14,18 +14,9 @@
 #define MSM_CVP_MIN_UBWC_COMPRESSION_RATIO (1 << 16)
 #define MSM_CVP_MAX_UBWC_COMPRESSION_RATIO (5 << 16)
 
-static unsigned long msm_cvp_calc_freq_ar50(struct msm_cvp_inst *inst,
-	u32 filled_len);
-static int msm_cvp_decide_work_mode_ar50(struct msm_cvp_inst *inst);
 static unsigned long msm_cvp_calc_freq(struct msm_cvp_inst *inst,
 	u32 filled_len);
 
-struct msm_cvp_core_ops cvp_core_ops_vpu4 = {
-	.calc_freq = msm_cvp_calc_freq_ar50,
-	.decide_work_route = NULL,
-	.decide_work_mode = msm_cvp_decide_work_mode_ar50,
-};
-
 struct msm_cvp_core_ops cvp_core_ops_vpu5 = {
 	.calc_freq = msm_cvp_calc_freq,
 	.decide_work_route = msm_cvp_decide_work_route,
@@ -75,23 +66,6 @@
 	return compression_ratio;
 }
 
-int msm_cvp_get_mbs_per_frame(struct msm_cvp_inst *inst)
-{
-	int height, width;
-
-	if (!inst->in_reconfig) {
-		height = max(inst->prop.height[CAPTURE_PORT],
-			inst->prop.height[OUTPUT_PORT]);
-		width = max(inst->prop.width[CAPTURE_PORT],
-			inst->prop.width[OUTPUT_PORT]);
-	} else {
-		height = inst->reconfig_height;
-		width = inst->reconfig_width;
-	}
-
-	return NUM_MBS_PER_FRAME(height, width);
-}
-
 static int msm_cvp_get_fps(struct msm_cvp_inst *inst)
 {
 	int fps;
@@ -105,94 +79,6 @@
 	return fps;
 }
 
-void cvp_update_recon_stats(struct msm_cvp_inst *inst,
-	struct recon_stats_type *recon_stats)
-{
-	struct recon_buf *binfo;
-	u32 CR = 0, CF = 0;
-	u32 frame_size;
-
-	CR = get_ubwc_compression_ratio(recon_stats->ubwc_stats_info);
-
-	frame_size = (msm_cvp_get_mbs_per_frame(inst) / (32 * 8) * 3) / 2;
-
-	if (frame_size)
-		CF = recon_stats->complexity_number / frame_size;
-	else
-		CF = MSM_CVP_MAX_UBWC_COMPLEXITY_FACTOR;
-
-	mutex_lock(&inst->reconbufs.lock);
-	list_for_each_entry(binfo, &inst->reconbufs.list, list) {
-		if (binfo->buffer_index ==
-				recon_stats->buffer_index) {
-			binfo->CR = CR;
-			binfo->CF = CF;
-		}
-	}
-	mutex_unlock(&inst->reconbufs.lock);
-}
-
-static int fill_dynamic_stats(struct msm_cvp_inst *inst,
-	struct cvp_bus_vote_data *vote_data)
-{
-	struct recon_buf *binfo, *nextb;
-	struct cvp_input_cr_data *temp, *next;
-	u32 min_cf = MSM_CVP_MAX_UBWC_COMPLEXITY_FACTOR, max_cf = 0;
-	u32 min_input_cr = MSM_CVP_MAX_UBWC_COMPRESSION_RATIO,
-		max_input_cr = 0;
-	u32 min_cr = MSM_CVP_MAX_UBWC_COMPRESSION_RATIO, max_cr = 0;
-
-	mutex_lock(&inst->reconbufs.lock);
-	list_for_each_entry_safe(binfo, nextb, &inst->reconbufs.list, list) {
-		if (binfo->CR) {
-			min_cr = min(min_cr, binfo->CR);
-			max_cr = max(max_cr, binfo->CR);
-		}
-		if (binfo->CF) {
-			min_cf = min(min_cf, binfo->CF);
-			max_cf = max(max_cf, binfo->CF);
-		}
-	}
-	mutex_unlock(&inst->reconbufs.lock);
-
-	mutex_lock(&inst->input_crs.lock);
-	list_for_each_entry_safe(temp, next, &inst->input_crs.list, list) {
-		min_input_cr = min(min_input_cr, temp->input_cr);
-		max_input_cr = max(max_input_cr, temp->input_cr);
-	}
-	mutex_unlock(&inst->input_crs.lock);
-
-	/* Sanitize CF values from HW . */
-	max_cf = min_t(u32, max_cf, MSM_CVP_MAX_UBWC_COMPLEXITY_FACTOR);
-	min_cf = max_t(u32, min_cf, MSM_CVP_MIN_UBWC_COMPLEXITY_FACTOR);
-	max_cr = min_t(u32, max_cr, MSM_CVP_MAX_UBWC_COMPRESSION_RATIO);
-	min_cr = max_t(u32, min_cr, MSM_CVP_MIN_UBWC_COMPRESSION_RATIO);
-	max_input_cr = min_t(u32,
-		max_input_cr, MSM_CVP_MAX_UBWC_COMPRESSION_RATIO);
-	min_input_cr = max_t(u32,
-		min_input_cr, MSM_CVP_MIN_UBWC_COMPRESSION_RATIO);
-
-	vote_data->compression_ratio = min_cr;
-	vote_data->complexity_factor = max_cf;
-	vote_data->input_cr = min_input_cr;
-	vote_data->use_dpb_read = false;
-
-	/* Check if driver can vote for lower bus BW */
-	if (inst->clk_data.load < inst->clk_data.load_norm) {
-		vote_data->compression_ratio = max_cr;
-		vote_data->complexity_factor = min_cf;
-		vote_data->input_cr = max_input_cr;
-		vote_data->use_dpb_read = true;
-	}
-
-	dprintk(CVP_PROF,
-		"Input CR = %d Recon CR = %d Complexity Factor = %d\n",
-			vote_data->input_cr, vote_data->compression_ratio,
-			vote_data->complexity_factor);
-
-	return 0;
-}
-
 int msm_cvp_comm_vote_bus(struct msm_cvp_core *core)
 {
 	int rc = 0, vote_data_count = 0, i = 0;
@@ -207,6 +93,7 @@
 	}
 
 	if (!core->resources.bus_devfreq_on)
+		dprintk(CVP_WARN, "%s is not enabled for CVP!\n", __func__);
 		return 0;
 
 	hdev = core->device;
@@ -283,8 +170,6 @@
 		vote_data[i].lcu_size = (codec == V4L2_PIX_FMT_HEVC ||
 				codec == V4L2_PIX_FMT_VP9) ? 32 : 16;
 		vote_data[i].b_frames_enabled = false;
-			//msm_cvp_comm_g_ctrl_for_id(inst,
-				//V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES) != 0;
 
 		vote_data[i].fps = msm_cvp_get_fps(inst);
 		if (inst->session_type == MSM_CVP_ENCODER) {
@@ -306,21 +191,14 @@
 
 		if (msm_cvp_comm_get_stream_output_mode(inst) ==
 				HAL_VIDEO_DECODER_PRIMARY) {
-			vote_data[i].color_formats[0] =
-				msm_cvp_comm_get_hal_uncompressed(
-				inst->clk_data.opb_fourcc);
+			vote_data[i].color_formats[0] = HAL_UNUSED_COLOR;
 			vote_data[i].num_formats = 1;
 		} else {
-			vote_data[i].color_formats[0] =
-				msm_cvp_comm_get_hal_uncompressed(
-				inst->clk_data.dpb_fourcc);
-			vote_data[i].color_formats[1] =
-				msm_cvp_comm_get_hal_uncompressed(
-				inst->clk_data.opb_fourcc);
+			vote_data[i].color_formats[0] = HAL_UNUSED_COLOR;
+			vote_data[i].color_formats[1] = HAL_UNUSED_COLOR;
 			vote_data[i].num_formats = 2;
 		}
 		vote_data[i].work_mode = inst->clk_data.work_mode;
-		fill_dynamic_stats(inst, &vote_data[i]);
 
 		if (core->resources.sys_cache_res_set)
 			vote_data[i].use_sys_cache = true;
@@ -455,21 +333,6 @@
 	mutex_unlock(&inst->freqs.lock);
 }
 
-void msm_cvp_clear_freq_entry(struct msm_cvp_inst *inst,
-	u32 device_addr)
-{
-	struct cvp_freq_data *temp, *next;
-
-	mutex_lock(&inst->freqs.lock);
-	list_for_each_entry_safe(temp, next, &inst->freqs.list, list) {
-		if (temp->device_addr == device_addr)
-			temp->freq = 0;
-	}
-	mutex_unlock(&inst->freqs.lock);
-
-	inst->clk_data.buffer_counter++;
-}
-
 static unsigned long msm_cvp_max_freq(struct msm_cvp_core *core)
 {
 	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
@@ -494,122 +357,6 @@
 	mutex_unlock(&inst->freqs.lock);
 }
 
-void msm_cvp_comm_free_input_cr_table(struct msm_cvp_inst *inst)
-{
-	struct cvp_input_cr_data *temp, *next;
-
-	mutex_lock(&inst->input_crs.lock);
-	list_for_each_entry_safe(temp, next, &inst->input_crs.list, list) {
-		list_del(&temp->list);
-		kfree(temp);
-	}
-	INIT_LIST_HEAD(&inst->input_crs.list);
-	mutex_unlock(&inst->input_crs.lock);
-}
-
-void msm_cvp_comm_update_input_cr(struct msm_cvp_inst *inst,
-	u32 index, u32 cr)
-{
-	struct cvp_input_cr_data *temp, *next;
-	bool found = false;
-
-	mutex_lock(&inst->input_crs.lock);
-	list_for_each_entry_safe(temp, next, &inst->input_crs.list, list) {
-		if (temp->index == index) {
-			temp->input_cr = cr;
-			found = true;
-			break;
-		}
-	}
-
-	if (!found) {
-		temp = kzalloc(sizeof(*temp), GFP_KERNEL);
-		if (!temp)  {
-			dprintk(CVP_WARN, "%s: malloc failure.\n", __func__);
-			goto exit;
-		}
-		temp->index = index;
-		temp->input_cr = cr;
-		list_add_tail(&temp->list, &inst->input_crs.list);
-	}
-exit:
-	mutex_unlock(&inst->input_crs.lock);
-}
-
-static unsigned long msm_cvp_calc_freq_ar50(struct msm_cvp_inst *inst,
-	u32 filled_len)
-{
-	unsigned long freq = 0;
-	unsigned long vpp_cycles = 0, vsp_cycles = 0;
-	u32 vpp_cycles_per_mb;
-	u32 mbs_per_second;
-	struct msm_cvp_core *core = NULL;
-	int i = 0;
-	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
-	u64 rate = 0, fps;
-	struct clock_data *dcvs = NULL;
-
-	core = inst->core;
-	dcvs = &inst->clk_data;
-
-	mbs_per_second = msm_cvp_comm_get_inst_load_per_core(inst,
-		LOAD_CALC_NO_QUIRKS);
-
-	fps = msm_cvp_get_fps(inst);
-
-	/*
-	 * Calculate vpp, vsp cycles separately for encoder and decoder.
-	 * Even though, most part is common now, in future it may change
-	 * between them.
-	 */
-
-	if (inst->session_type == MSM_CVP_ENCODER) {
-		vpp_cycles_per_mb = inst->flags & CVP_LOW_POWER ?
-			inst->clk_data.entry->low_power_cycles :
-			inst->clk_data.entry->vpp_cycles;
-
-		vpp_cycles = mbs_per_second * vpp_cycles_per_mb;
-
-		vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
-
-		/* 10 / 7 is overhead factor */
-		vsp_cycles += (inst->clk_data.bitrate * 10) / 7;
-	} else if (inst->session_type == MSM_CVP_DECODER) {
-		vpp_cycles = mbs_per_second * inst->clk_data.entry->vpp_cycles;
-
-		vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
-		/* 10 / 7 is overhead factor */
-		vsp_cycles += ((fps * filled_len * 8) * 10) / 7;
-
-	} else {
-		dprintk(CVP_ERR, "Unknown session type = %s\n", __func__);
-		return msm_cvp_max_freq(inst->core);
-	}
-
-	freq = max(vpp_cycles, vsp_cycles);
-
-	dprintk(CVP_DBG, "Update DCVS Load\n");
-	allowed_clks_tbl = core->resources.allowed_clks_tbl;
-	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
-		rate = allowed_clks_tbl[i].clock_rate;
-		if (rate >= freq)
-			break;
-	}
-
-	dcvs->load_norm = rate;
-	dcvs->load_low = i < (core->resources.allowed_clks_tbl_size - 1) ?
-		allowed_clks_tbl[i+1].clock_rate : dcvs->load_norm;
-	dcvs->load_high = i > 0 ? allowed_clks_tbl[i-1].clock_rate :
-		dcvs->load_norm;
-
-	msm_dcvs_print_dcvs_stats(dcvs);
-
-	dprintk(CVP_PROF, "%s Inst %pK : Filled Len = %d Freq = %lu\n",
-		__func__, inst, filled_len, freq);
-
-	return freq;
-}
-
 static unsigned long msm_cvp_calc_freq(struct msm_cvp_inst *inst,
 	u32 filled_len)
 {
@@ -717,7 +464,7 @@
 	 * keep checking from lowest to highest rate until
 	 * table rate >= requested rate
 	 */
-	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
+	for (i = 0; i < core->resources.allowed_clks_tbl_size;  i++) {
 		rate = allowed_clks_tbl[i].clock_rate;
 		if (rate >= freq_core_max)
 			break;
@@ -744,69 +491,6 @@
 	return rc;
 }
 
-int msm_cvp_validate_operating_rate(struct msm_cvp_inst *inst,
-	u32 operating_rate)
-{
-	struct msm_cvp_inst *temp;
-	struct msm_cvp_core *core;
-	unsigned long max_freq, freq_left, ops_left, load, cycles, freq = 0;
-	unsigned long mbs_per_second;
-	int rc = 0;
-	u32 curr_operating_rate = 0;
-
-	if (!inst || !inst->core) {
-		dprintk(CVP_ERR, "%s Invalid args\n", __func__);
-		return -EINVAL;
-	}
-	core = inst->core;
-	curr_operating_rate = inst->clk_data.operating_rate >> 16;
-
-	mutex_lock(&core->lock);
-	max_freq = msm_cvp_max_freq(core);
-	list_for_each_entry(temp, &core->instances, list) {
-		if (temp == inst ||
-				temp->state < MSM_CVP_START_DONE ||
-				temp->state >= MSM_CVP_RELEASE_RESOURCES_DONE)
-			continue;
-
-		freq += temp->clk_data.min_freq;
-	}
-
-	freq_left = max_freq - freq;
-
-	mbs_per_second = msm_cvp_comm_get_inst_load_per_core(inst,
-		LOAD_CALC_NO_QUIRKS);
-
-	cycles = inst->clk_data.entry->vpp_cycles;
-	if (inst->session_type == MSM_CVP_ENCODER)
-		cycles = inst->flags & CVP_LOW_POWER ?
-			inst->clk_data.entry->low_power_cycles :
-			cycles;
-
-	load = cycles * mbs_per_second;
-
-	ops_left = load ? (freq_left / load) : 0;
-
-	operating_rate = operating_rate >> 16;
-
-	if ((curr_operating_rate * (1 + ops_left)) >= operating_rate ||
-			msm_cvp_clock_voting ||
-			inst->clk_data.buffer_counter < DCVS_FTB_WINDOW) {
-		dprintk(CVP_DBG,
-			"Requestd operating rate is valid %u\n",
-			operating_rate);
-		rc = 0;
-	} else {
-		dprintk(CVP_DBG,
-			"Current load is high for requested settings. Cannot set operating rate to %u\n",
-			operating_rate);
-		rc = -EINVAL;
-	}
-	mutex_unlock(&core->lock);
-
-	return rc;
-}
-
 int msm_cvp_comm_scale_clocks(struct msm_cvp_inst *inst)
 {
 	struct msm_video_buffer *temp, *next;
@@ -821,8 +505,10 @@
 		return -EINVAL;
 	}
 
-	if (!inst->core->resources.bus_devfreq_on)
+	if (!inst->core->resources.bus_devfreq_on) {
+		dprintk(CVP_WARN, "%s is not enabled for CVP!\n", __func__);
 		return 0;
+	}
 
 	mutex_lock(&inst->registeredbufs.lock);
 	list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) {
@@ -911,9 +597,6 @@
 
 int msm_cvp_comm_init_clocks_and_bus_data(struct msm_cvp_inst *inst)
 {
-	int rc = 0, j = 0;
-	int fourcc, count;
-
 	if (!inst || !inst->core) {
 		dprintk(CVP_ERR, "%s Invalid args: Inst = %pK\n",
 				__func__, inst);
@@ -925,115 +608,7 @@
 		return 0;
 	}
 
-	count = inst->core->resources.codec_data_count;
-	fourcc = inst->session_type == MSM_CVP_DECODER ?
-		inst->fmts[OUTPUT_PORT].fourcc :
-		inst->fmts[CAPTURE_PORT].fourcc;
-
-	for (j = 0; j < count; j++) {
-		if (inst->core->resources.codec_data[j].session_type ==
-				inst->session_type &&
-				inst->core->resources.codec_data[j].fourcc ==
-				fourcc) {
-			inst->clk_data.entry =
-				&inst->core->resources.codec_data[j];
-			break;
-		}
-	}
-
-	if (!inst->clk_data.entry) {
-		dprintk(CVP_ERR, "%s No match found\n", __func__);
-		rc = -EINVAL;
-	}
-
-	return rc;
-}
-
-void msm_cvp_clock_data_reset(struct msm_cvp_inst *inst)
-{
-	struct msm_cvp_core *core;
-	int i = 0, rc = 0;
-	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
-	u64 total_freq = 0, rate = 0, load;
-	int cycles;
-	struct clock_data *dcvs;
-	struct hal_buffer_requirements *buf_req;
-
-	dprintk(CVP_DBG, "Init DCVS Load\n");
-
-	if (!inst || !inst->core) {
-		dprintk(CVP_ERR, "%s Invalid args: Inst = %pK\n",
-			__func__, inst);
-		return;
-	}
-
-	core = inst->core;
-	dcvs = &inst->clk_data;
-	load = msm_cvp_comm_get_inst_load_per_core(inst, LOAD_CALC_NO_QUIRKS);
-	cycles = inst->clk_data.entry->vpp_cycles;
-	allowed_clks_tbl = core->resources.allowed_clks_tbl;
-	if (inst->session_type == MSM_CVP_ENCODER) {
-		cycles = inst->flags & CVP_LOW_POWER ?
-			inst->clk_data.entry->low_power_cycles :
-			cycles;
-
-		dcvs->buffer_type = HAL_BUFFER_INPUT;
-		dcvs->min_threshold =
-			msm_cvp_get_extra_buff_count(inst, HAL_BUFFER_INPUT);
-		buf_req = get_cvp_buff_req_buffer(inst, HAL_BUFFER_INPUT);
-		if (buf_req)
-			dcvs->max_threshold =
-				buf_req->buffer_count_actual -
-				buf_req->buffer_count_min_host + 2;
-		else
-			dprintk(CVP_ERR,
-				"%s: No bufer req for buffer type %x\n",
-				__func__, HAL_BUFFER_INPUT);
-
-	} else if (inst->session_type == MSM_CVP_DECODER) {
-		dcvs->buffer_type = msm_cvp_comm_get_hal_output_buffer(inst);
-		buf_req = get_cvp_buff_req_buffer(inst, dcvs->buffer_type);
-		if (buf_req)
-			dcvs->max_threshold =
-				buf_req->buffer_count_actual -
-				buf_req->buffer_count_min_host + 2;
-		else
-			dprintk(CVP_ERR,
-				"%s: No bufer req for buffer type %x\n",
-				__func__, dcvs->buffer_type);
-
-		dcvs->min_threshold =
-			msm_cvp_get_extra_buff_count(inst, dcvs->buffer_type);
-	} else {
-		dprintk(CVP_ERR, "%s: invalid session type %#x\n",
-			__func__, inst->session_type);
-		return;
-	}
-
-	total_freq = cycles * load;
-
-	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
-		rate = allowed_clks_tbl[i].clock_rate;
-		if (rate >= total_freq)
-			break;
-	}
-
-	dcvs->load = dcvs->load_norm = rate;
-
-	dcvs->load_low = i < (core->resources.allowed_clks_tbl_size - 1) ?
-		allowed_clks_tbl[i+1].clock_rate : dcvs->load_norm;
-	dcvs->load_high = i > 0 ? allowed_clks_tbl[i-1].clock_rate :
-		dcvs->load_norm;
-
-	inst->clk_data.buffer_counter = 0;
-
-	msm_dcvs_print_dcvs_stats(dcvs);
-
-	rc = msm_cvp_comm_scale_clocks_and_bus(inst);
-
-	if (rc)
-		dprintk(CVP_ERR, "%s Failed to scale Clocks and Bus\n",
-			__func__);
+	return 0;
 }
 
 static bool is_output_buffer(struct msm_cvp_inst *inst,
@@ -1093,340 +668,14 @@
 	return -EINVAL;
 }
 
-static int msm_cvp_decide_work_mode_ar50(struct msm_cvp_inst *inst)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct hal_video_work_mode pdata;
-	struct hal_enable latency;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR,
-			"%s Invalid args: Inst = %pK\n",
-			__func__, inst);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-	if (inst->clk_data.low_latency_mode) {
-		pdata.video_work_mode = CVP_WORK_MODE_1;
-		goto decision_done;
-	}
-
-	if (inst->session_type == MSM_CVP_DECODER) {
-		pdata.video_work_mode = CVP_WORK_MODE_2;
-		switch (inst->fmts[OUTPUT_PORT].fourcc) {
-		case V4L2_PIX_FMT_MPEG2:
-			pdata.video_work_mode = CVP_WORK_MODE_1;
-			break;
-		case V4L2_PIX_FMT_H264:
-		case V4L2_PIX_FMT_HEVC:
-			if (inst->prop.height[OUTPUT_PORT] *
-				inst->prop.width[OUTPUT_PORT] <=
-					1280 * 720)
-				pdata.video_work_mode = CVP_WORK_MODE_1;
-			break;
-		}
-	} else if (inst->session_type == MSM_CVP_ENCODER)
-		pdata.video_work_mode = CVP_WORK_MODE_1;
-	else {
-		return -EINVAL;
-	}
-
-decision_done:
-
-	inst->clk_data.work_mode = pdata.video_work_mode;
-	rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session, HAL_PARAM_VIDEO_WORK_MODE,
-			(void *)&pdata);
-	if (rc)
-		dprintk(CVP_WARN,
-				" Failed to configure Work Mode %pK\n", inst);
-
-	/* For WORK_MODE_1, set Low Latency mode by default to HW. */
-
-	if (inst->session_type == MSM_CVP_ENCODER &&
-			inst->clk_data.work_mode == CVP_WORK_MODE_1) {
-		latency.enable = true;
-		rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session, HAL_PARAM_VENC_LOW_LATENCY,
-			(void *)&latency);
-	}
-
-	rc = msm_cvp_comm_scale_clocks_and_bus(inst);
-
-	return rc;
-}
-
 int msm_cvp_decide_work_mode(struct msm_cvp_inst *inst)
 {
 	return -EINVAL;
 }
 
-static inline int msm_cvp_power_save_mode_enable(struct msm_cvp_inst *inst,
-	bool enable)
-{
-	u32 rc = 0, mbs_per_frame;
-	u32 prop_id = 0;
-	void *pdata = NULL;
-	struct hfi_device *hdev = NULL;
-	enum hal_perf_mode venc_mode;
-	u32 rc_mode = 0;
-
-	hdev = inst->core->device;
-	if (inst->session_type != MSM_CVP_ENCODER) {
-		dprintk(CVP_DBG,
-			"%s : Not an encoder session. Nothing to do\n",
-				__func__);
-		return 0;
-	}
-	mbs_per_frame = msm_cvp_get_mbs_per_frame(inst);
-	if (mbs_per_frame > inst->core->resources.max_hq_mbs_per_frame ||
-		msm_cvp_get_fps(inst) > inst->core->resources.max_hq_fps) {
-		enable = true;
-	}
-	/* Power saving always disabled for CQ RC mode. */
-	rc_mode = msm_cvp_comm_g_ctrl_for_id(inst,
-		V4L2_CID_MPEG_VIDEO_BITRATE_MODE);
-	if (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
-		enable = false;
-
-	prop_id = HAL_CONFIG_VENC_PERF_MODE;
-	venc_mode = enable ? HAL_PERF_MODE_POWER_SAVE :
-		HAL_PERF_MODE_POWER_MAX_QUALITY;
-	pdata = &venc_mode;
-	rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session, prop_id, pdata);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"%s: Failed to set power save mode for inst: %pK\n",
-			__func__, inst);
-		goto fail_power_mode_set;
-	}
-	inst->flags = enable ?
-		inst->flags | CVP_LOW_POWER :
-		inst->flags & ~CVP_LOW_POWER;
-
-	dprintk(CVP_PROF,
-		"Power Save Mode for inst: %pK Enable = %d\n", inst, enable);
-fail_power_mode_set:
-	return rc;
-}
-
-static int msm_cvp_move_core_to_power_save_mode(struct msm_cvp_core *core,
-	u32 core_id)
-{
-	struct msm_cvp_inst *inst = NULL;
-
-	dprintk(CVP_PROF, "Core %d : Moving all inst to LP mode\n", core_id);
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list) {
-		if (inst->clk_data.core_id == core_id &&
-			inst->session_type == MSM_CVP_ENCODER)
-			msm_cvp_power_save_mode_enable(inst, true);
-	}
-	mutex_unlock(&core->lock);
-
-	return 0;
-}
-
-static u32 get_core_load(struct msm_cvp_core *core,
-	u32 core_id, bool lp_mode, bool real_time)
-{
-	struct msm_cvp_inst *inst = NULL;
-	u32 current_inst_mbs_per_sec = 0, load = 0;
-	bool real_time_mode = false;
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list) {
-		u32 cycles, lp_cycles;
-
-		real_time_mode = inst->flags & CVP_REALTIME ? true : false;
-		if (!(inst->clk_data.core_id & core_id))
-			continue;
-		if (real_time_mode != real_time)
-			continue;
-		if (inst->session_type == MSM_CVP_DECODER) {
-			cycles = lp_cycles = inst->clk_data.entry->vpp_cycles;
-		} else if (inst->session_type == MSM_CVP_ENCODER) {
-			lp_mode |= inst->flags & CVP_LOW_POWER;
-			cycles = lp_mode ?
-				inst->clk_data.entry->low_power_cycles :
-				inst->clk_data.entry->vpp_cycles;
-		} else {
-			continue;
-		}
-		current_inst_mbs_per_sec =
-			msm_cvp_comm_get_inst_load_per_core(inst,
-			LOAD_CALC_NO_QUIRKS);
-		load += current_inst_mbs_per_sec * cycles /
-			inst->clk_data.work_route;
-	}
-	mutex_unlock(&core->lock);
-
-	return load;
-}
-
-int msm_cvp_decide_core_and_power_mode(
-	struct msm_cvp_inst *inst)
-{
-	int rc = 0, hier_mode = 0;
-	struct hfi_device *hdev;
-	struct msm_cvp_core *core;
-	unsigned long max_freq, lp_cycles = 0;
-	struct hal_videocores_usage_info core_info;
-	u32 core0_load = 0, core1_load = 0, core0_lp_load = 0,
-		core1_lp_load = 0;
-	u32 current_inst_load = 0, current_inst_lp_load = 0,
-		min_load = 0, min_lp_load = 0;
-	u32 min_core_id, min_lp_core_id;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR,
-			"%s Invalid args: Inst = %pK\n",
-			__func__, inst);
-		return -EINVAL;
-	}
-
-	core = inst->core;
-	hdev = core->device;
-	max_freq = msm_cvp_max_freq(inst->core);
-	inst->clk_data.core_id = 0;
-
-	core0_load = get_core_load(core, CVP_CORE_ID_1, false, true);
-	core1_load = get_core_load(core, CVP_CORE_ID_2, false, true);
-	core0_lp_load = get_core_load(core, CVP_CORE_ID_1, true, true);
-	core1_lp_load = get_core_load(core, CVP_CORE_ID_2, true, true);
-
-	min_load = min(core0_load, core1_load);
-	min_core_id = core0_load < core1_load ?
-		CVP_CORE_ID_1 : CVP_CORE_ID_2;
-	min_lp_load = min(core0_lp_load, core1_lp_load);
-	min_lp_core_id = core0_lp_load < core1_lp_load ?
-		CVP_CORE_ID_1 : CVP_CORE_ID_2;
-
-	lp_cycles = inst->session_type == MSM_CVP_ENCODER ?
-			inst->clk_data.entry->low_power_cycles :
-			inst->clk_data.entry->vpp_cycles;
-	/*
-	 * Incase there is only 1 core enabled, mark it as the core
-	 * with min load. This ensures that this core is selected and
-	 * video session is set to run on the enabled core.
-	 */
-	if (inst->capability.max_video_cores.max <= CVP_CORE_ID_1) {
-		min_core_id = min_lp_core_id = CVP_CORE_ID_1;
-		min_load = core0_load;
-		min_lp_load = core0_lp_load;
-	}
-
-	current_inst_load =
-		(msm_cvp_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS) *
-		inst->clk_data.entry->vpp_cycles)/inst->clk_data.work_route;
-
-	current_inst_lp_load = (msm_cvp_comm_get_inst_load(inst,
-		LOAD_CALC_NO_QUIRKS) * lp_cycles)/inst->clk_data.work_route;
-
-	dprintk(CVP_DBG, "Core 0 RT Load = %d Core 1 RT Load = %d\n",
-		 core0_load, core1_load);
-	dprintk(CVP_DBG, "Core 0 RT LP Load = %d\n",
-		core0_lp_load);
-	dprintk(CVP_DBG, "Core 1 RT LP Load = %d\n",
-		core1_lp_load);
-	dprintk(CVP_DBG, "Max Load = %lu\n", max_freq);
-	dprintk(CVP_DBG, "Current Load = %d Current LP Load = %d\n",
-		current_inst_load, current_inst_lp_load);
-
-	/* Hier mode can be normal HP or Hybrid HP. */
-
-	hier_mode = 0; // msm_cvp_comm_g_ctrl_for_id(inst,
-		// V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS);
-	hier_mode |= 0; //msm_cvp_comm_g_ctrl_for_id(inst,
-		//V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE);
-
-	if (current_inst_load + min_load < max_freq) {
-		inst->clk_data.core_id = min_core_id;
-		dprintk(CVP_DBG,
-			"Selected normally : Core ID = %d\n",
-				inst->clk_data.core_id);
-		msm_cvp_power_save_mode_enable(inst, false);
-	} else if (current_inst_lp_load + min_load < max_freq) {
-		/* Move current instance to LP and return */
-		inst->clk_data.core_id = min_core_id;
-		dprintk(CVP_DBG,
-			"Selected by moving current to LP : Core ID = %d\n",
-				inst->clk_data.core_id);
-		msm_cvp_power_save_mode_enable(inst, true);
-
-	} else if (current_inst_lp_load + min_lp_load < max_freq) {
-		/* Move all instances to LP mode and return */
-		inst->clk_data.core_id = min_lp_core_id;
-		dprintk(CVP_DBG,
-			"Moved all inst's to LP: Core ID = %d\n",
-				inst->clk_data.core_id);
-		msm_cvp_move_core_to_power_save_mode(core, min_lp_core_id);
-	} else {
-		rc = -EINVAL;
-		dprintk(CVP_ERR,
-			"Sorry ... Core Can't support this load\n");
-		return rc;
-	}
-
-	core_info.video_core_enable_mask = inst->clk_data.core_id;
-	dprintk(CVP_DBG,
-		"Core Enable Mask %d\n", core_info.video_core_enable_mask);
-
-	rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session,
-			HAL_PARAM_VIDEO_CORES_USAGE, &core_info);
-	if (rc)
-		dprintk(CVP_WARN,
-				" Failed to configure CORE ID %pK\n", inst);
-
-	rc = msm_cvp_comm_scale_clocks_and_bus(inst);
-
-	msm_cvp_print_core_status(core, CVP_CORE_ID_1);
-	msm_cvp_print_core_status(core, CVP_CORE_ID_2);
-
-	return rc;
-}
-
 void msm_cvp_init_core_clk_ops(struct msm_cvp_core *core)
 {
 	if (!core)
 		return;
-
-	if (core->platform_data->vpu_ver == VPU_VERSION_4)
-		core->core_ops = &cvp_core_ops_vpu4;
-	else
-		core->core_ops = &cvp_core_ops_vpu5;
-}
-
-void msm_cvp_print_core_status(struct msm_cvp_core *core, u32 core_id)
-{
-	struct msm_cvp_inst *inst = NULL;
-
-	dprintk(CVP_PROF, "Instances running on core %u", core_id);
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list) {
-
-		if ((inst->clk_data.core_id != core_id) &&
-			(inst->clk_data.core_id != CVP_CORE_ID_3))
-			continue;
-
-		dprintk(CVP_PROF,
-			"inst %pK (%4ux%4u) to (%4ux%4u) %3u %s %s %s %s %lu\n",
-			inst,
-			inst->prop.width[OUTPUT_PORT],
-			inst->prop.height[OUTPUT_PORT],
-			inst->prop.width[CAPTURE_PORT],
-			inst->prop.height[CAPTURE_PORT],
-			inst->prop.fps,
-			inst->session_type == MSM_CVP_ENCODER ? "ENC" : "DEC",
-			inst->clk_data.work_mode == CVP_WORK_MODE_1 ?
-				"WORK_MODE_1" : "WORK_MODE_2",
-			inst->flags & CVP_LOW_POWER ? "LP" : "HQ",
-			inst->flags & CVP_REALTIME ? "RealTime" : "NonRTime",
-			inst->clk_data.min_freq);
-	}
-	mutex_unlock(&core->lock);
+	core->core_ops = &cvp_core_ops_vpu5;
 }
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_clocks.h b/drivers/media/platform/msm/cvp/msm_cvp_clocks.h
index a6f6e5f..9f20946 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_clocks.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_clocks.h
@@ -14,28 +14,15 @@
 /* extra o/p buffers in case of decoder dcvs */
 #define DCVS_DEC_EXTRA_OUTPUT_BUFFERS 4
 
-void msm_cvp_clock_data_reset(struct msm_cvp_inst *inst);
-int msm_cvp_validate_operating_rate(struct msm_cvp_inst *inst,
-	u32 operating_rate);
 int msm_cvp_get_extra_buff_count(struct msm_cvp_inst *inst,
 	enum hal_buffer buffer_type);
 int msm_cvp_set_clocks(struct msm_cvp_core *core);
 int msm_cvp_comm_vote_bus(struct msm_cvp_core *core);
 int msm_cvp_dcvs_try_enable(struct msm_cvp_inst *inst);
-int msm_cvp_get_mbs_per_frame(struct msm_cvp_inst *inst);
 int msm_cvp_comm_scale_clocks_and_bus(struct msm_cvp_inst *inst);
 int msm_cvp_comm_init_clocks_and_bus_data(struct msm_cvp_inst *inst);
 void msm_cvp_comm_free_freq_table(struct msm_cvp_inst *inst);
 int msm_cvp_decide_work_route(struct msm_cvp_inst *inst);
 int msm_cvp_decide_work_mode(struct msm_cvp_inst *inst);
-int msm_cvp_decide_core_and_power_mode(struct msm_cvp_inst *inst);
-void msm_cvp_print_core_status(struct msm_cvp_core *core, u32 core_id);
-void msm_cvp_clear_freq_entry(struct msm_cvp_inst *inst,
-	u32 device_addr);
-void msm_cvp_comm_free_input_cr_table(struct msm_cvp_inst *inst);
-void msm_cvp_comm_update_input_cr(struct msm_cvp_inst *inst, u32 index,
-	u32 cr);
-void cvp_update_recon_stats(struct msm_cvp_inst *inst,
-	struct recon_stats_type *recon_stats);
 void msm_cvp_init_core_clk_ops(struct msm_cvp_core *core);
 #endif
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.c b/drivers/media/platform/msm/cvp/msm_cvp_common.c
index 4f68dad..dff1615 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.c
@@ -20,236 +20,7 @@
 	(__p >= __d)\
 )
 
-#define V4L2_EVENT_SEQ_CHANGED_SUFFICIENT \
-		V4L2_EVENT_MSM_CVP_PORT_SETTINGS_CHANGED_SUFFICIENT
-#define V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT \
-		V4L2_EVENT_MSM_CVP_PORT_SETTINGS_CHANGED_INSUFFICIENT
-#define V4L2_EVENT_RELEASE_BUFFER_REFERENCE \
-		V4L2_EVENT_MSM_CVP_RELEASE_BUFFER_REFERENCE
-#define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
-
-const char *const mpeg_video_cvp_extradata[] = {
-	"Extradata none",
-	"Extradata MB Quantization",
-	"Extradata Interlace Video",
-	"Reserved",
-	"Reserved",
-	"Extradata timestamp",
-	"Extradata S3D Frame Packing",
-	"Extradata Frame Rate",
-	"Extradata Panscan Window",
-	"Extradata Recovery point SEI",
-	"Extradata Multislice info",
-	"Extradata number of concealed MB",
-	"Extradata metadata filler",
-	"Extradata input crop",
-	"Extradata digital zoom",
-	"Extradata aspect ratio",
-	"Extradata mpeg2 seqdisp",
-	"Extradata stream userdata",
-	"Extradata frame QP",
-	"Extradata frame bits info",
-	"Extradata LTR",
-	"Extradata macroblock metadata",
-	"Extradata VQZip SEI",
-	"Extradata HDR10+ Metadata",
-	"Extradata ROI QP",
-	"Extradata output crop",
-	"Extradata display colour SEI",
-	"Extradata light level SEI",
-	"Extradata PQ Info",
-	"Extradata display VUI",
-	"Extradata vpx color space",
-	"Extradata UBWC CR stats info",
-};
-
 static void handle_session_error(enum hal_command_response cmd, void *data);
-static void msm_cvp_print_running_insts(struct msm_cvp_core *core);
-
-int msm_cvp_comm_g_ctrl_for_id(struct msm_cvp_inst *inst, int id)
-{
-	int rc = 0;
-	struct v4l2_control ctrl = {
-		.id = id,
-	};
-
-	rc = msm_comm_g_ctrl(inst, &ctrl);
-	return rc ? rc : ctrl.value;
-}
-
-static struct v4l2_ctrl **get_super_cluster(struct msm_cvp_inst *inst,
-				int num_ctrls)
-{
-	int c = 0;
-	struct v4l2_ctrl **cluster = kmalloc(sizeof(struct v4l2_ctrl *) *
-			num_ctrls, GFP_KERNEL);
-
-	if (!cluster || !inst) {
-		kfree(cluster);
-		return NULL;
-	}
-
-	for (c = 0; c < num_ctrls; c++)
-		cluster[c] =  inst->ctrls[c];
-
-	return cluster;
-}
-
-int msm_cvp_comm_hal_to_v4l2(int id, int value)
-{
-	dprintk(CVP_WARN, "Unknown control (%x, %d)\n", id, value);
-	return -EINVAL;
-}
-
-int msm_cvp_comm_get_v4l2_profile(int fourcc, int profile)
-{
-	dprintk(CVP_DBG, "%s : Begin\n", __func__);
-	return -EINVAL;
-}
-
-int msm_cvp_comm_get_v4l2_level(int fourcc, int level)
-{
-	switch (fourcc) {
-	default:
-		dprintk(CVP_WARN, "Unknown codec id %x\n", fourcc);
-		return 0;
-	}
-}
-
-int msm_cvp_comm_ctrl_init(struct msm_cvp_inst *inst,
-		struct msm_cvp_ctrl *drv_ctrls, u32 num_ctrls,
-		const struct v4l2_ctrl_ops *ctrl_ops)
-{
-	int idx = 0;
-	struct v4l2_ctrl_config ctrl_cfg = {0};
-	int ret_val = 0;
-
-	if (!inst || !drv_ctrls || !ctrl_ops || !num_ctrls) {
-		dprintk(CVP_ERR, "%s - invalid input\n", __func__);
-		return -EINVAL;
-	}
-
-	inst->ctrls = kcalloc(num_ctrls, sizeof(struct v4l2_ctrl *),
-				GFP_KERNEL);
-	if (!inst->ctrls) {
-		dprintk(CVP_ERR, "%s - failed to allocate ctrl\n", __func__);
-		return -ENOMEM;
-	}
-
-	ret_val = v4l2_ctrl_handler_init(&inst->ctrl_handler, num_ctrls);
-
-	if (ret_val) {
-		dprintk(CVP_ERR, "CTRL ERR: Control handler init failed, %d\n",
-				inst->ctrl_handler.error);
-		return ret_val;
-	}
-
-	for (; idx < num_ctrls; idx++) {
-		struct v4l2_ctrl *ctrl = NULL;
-
-		if (IS_PRIV_CTRL(drv_ctrls[idx].id)) {
-			/*add private control*/
-			ctrl_cfg.def = drv_ctrls[idx].default_value;
-			ctrl_cfg.flags = 0;
-			ctrl_cfg.id = drv_ctrls[idx].id;
-			ctrl_cfg.max = drv_ctrls[idx].maximum;
-			ctrl_cfg.min = drv_ctrls[idx].minimum;
-			ctrl_cfg.menu_skip_mask =
-				drv_ctrls[idx].menu_skip_mask;
-			ctrl_cfg.name = drv_ctrls[idx].name;
-			ctrl_cfg.ops = ctrl_ops;
-			ctrl_cfg.step = drv_ctrls[idx].step;
-			ctrl_cfg.type = drv_ctrls[idx].type;
-			ctrl_cfg.qmenu = drv_ctrls[idx].qmenu;
-
-			ctrl = v4l2_ctrl_new_custom(&inst->ctrl_handler,
-					&ctrl_cfg, NULL);
-		} else {
-			if (drv_ctrls[idx].type == V4L2_CTRL_TYPE_MENU) {
-				ctrl = v4l2_ctrl_new_std_menu(
-					&inst->ctrl_handler,
-					ctrl_ops,
-					drv_ctrls[idx].id,
-					drv_ctrls[idx].maximum,
-					drv_ctrls[idx].menu_skip_mask,
-					drv_ctrls[idx].default_value);
-			} else {
-				ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler,
-					ctrl_ops,
-					drv_ctrls[idx].id,
-					drv_ctrls[idx].minimum,
-					drv_ctrls[idx].maximum,
-					drv_ctrls[idx].step,
-					drv_ctrls[idx].default_value);
-			}
-		}
-
-		if (!ctrl) {
-			dprintk(CVP_ERR, "%s - invalid ctrl %s\n", __func__,
-				 drv_ctrls[idx].name);
-			return -EINVAL;
-		}
-
-		ret_val = inst->ctrl_handler.error;
-		if (ret_val) {
-			dprintk(CVP_ERR,
-				"Error adding ctrl (%s) to ctrl handle, %d\n",
-				drv_ctrls[idx].name, inst->ctrl_handler.error);
-			return ret_val;
-		}
-
-		ctrl->flags |= drv_ctrls[idx].flags;
-		inst->ctrls[idx] = ctrl;
-	}
-
-	/* Construct a super cluster of all controls */
-	inst->cluster = get_super_cluster(inst, num_ctrls);
-	if (!inst->cluster) {
-		dprintk(CVP_WARN,
-			"Failed to setup super cluster\n");
-		return -EINVAL;
-	}
-
-	v4l2_ctrl_cluster(num_ctrls, inst->cluster);
-
-	return ret_val;
-}
-
-int msm_cvp_comm_ctrl_deinit(struct msm_cvp_inst *inst)
-{
-	if (!inst) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	kfree(inst->ctrls);
-	kfree(inst->cluster);
-	v4l2_ctrl_handler_free(&inst->ctrl_handler);
-
-	return 0;
-}
-
-int msm_cvp_comm_set_stream_output_mode(struct msm_cvp_inst *inst,
-		enum multi_stream mode)
-{
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	if (!is_decode_session(inst)) {
-		dprintk(CVP_DBG, "%s: not a decode session %x\n",
-			__func__, hash32_ptr(inst->session));
-		return -EINVAL;
-	}
-
-	if (mode == HAL_VIDEO_DECODER_SECONDARY)
-		inst->stream_output_mode = HAL_VIDEO_DECODER_SECONDARY;
-	else
-		inst->stream_output_mode = HAL_VIDEO_DECODER_PRIMARY;
-
-	return 0;
-}
 
 enum multi_stream msm_cvp_comm_get_stream_output_mode(struct msm_cvp_inst *inst)
 {
@@ -268,79 +39,10 @@
 		return HAL_VIDEO_DECODER_PRIMARY;
 }
 
-static int msm_cvp_comm_get_mbs_per_sec(struct msm_cvp_inst *inst)
-{
-	int output_port_mbs, capture_port_mbs;
-	int fps;
-
-	output_port_mbs = inst->in_reconfig ?
-			NUM_MBS_PER_FRAME(inst->reconfig_width,
-				inst->reconfig_height) :
-			NUM_MBS_PER_FRAME(inst->prop.width[OUTPUT_PORT],
-				inst->prop.height[OUTPUT_PORT]);
-
-	capture_port_mbs = NUM_MBS_PER_FRAME(inst->prop.width[CAPTURE_PORT],
-		inst->prop.height[CAPTURE_PORT]);
-
-	if ((inst->clk_data.operating_rate >> 16) > inst->prop.fps)
-		fps = (inst->clk_data.operating_rate >> 16) ?
-			inst->clk_data.operating_rate >> 16 : 1;
-	else
-		fps = inst->prop.fps;
-
-	return max(output_port_mbs, capture_port_mbs) * fps;
-}
-
 int msm_cvp_comm_get_inst_load(struct msm_cvp_inst *inst,
 		enum load_calc_quirks quirks)
 {
-	int load = 0;
-
-	mutex_lock(&inst->lock);
-
-	if (!(inst->state >= MSM_CVP_OPEN_DONE &&
-		inst->state < MSM_CVP_STOP_DONE))
-		goto exit;
-
-	load = msm_cvp_comm_get_mbs_per_sec(inst);
-
-	if (is_thumbnail_session(inst)) {
-		if (quirks & LOAD_CALC_IGNORE_THUMBNAIL_LOAD)
-			load = 0;
-	}
-
-	if (is_turbo_session(inst)) {
-		if (!(quirks & LOAD_CALC_IGNORE_TURBO_LOAD))
-			load = inst->core->resources.max_load;
-	}
-
-	/*  Clock and Load calculations for REALTIME/NON-REALTIME
-	 *                        OPERATING RATE SET/NO OPERATING RATE SET
-	 *
-	 *                 | OPERATING RATE SET   | OPERATING RATE NOT SET |
-	 * ----------------|--------------------- |------------------------|
-	 * REALTIME        | load = res * op_rate |  load = res * fps      |
-	 *                 | clk  = res * op_rate |  clk  = res * fps      |
-	 * ----------------|----------------------|------------------------|
-	 * NON-REALTIME    | load = res * 1 fps   |  load = res * 1 fps    |
-	 *                 | clk  = res * op_rate |  clk  = res * fps      |
-	 * ----------------|----------------------|------------------------|
-	 */
-
-	if (!is_realtime_session(inst) &&
-		(quirks & LOAD_CALC_IGNORE_NON_REALTIME_LOAD)) {
-		if (!inst->prop.fps) {
-			dprintk(CVP_INFO, "instance:%pK fps = 0\n", inst);
-			load = 0;
-		} else {
-			load =
-			msm_cvp_comm_get_mbs_per_sec(inst)/inst->prop.fps;
-		}
-	}
-
-exit:
-	mutex_unlock(&inst->lock);
-	return load;
+	return 0;
 }
 
 int msm_cvp_comm_get_inst_load_per_core(struct msm_cvp_inst *inst,
@@ -354,29 +56,6 @@
 	return load;
 }
 
-int msm_cvp_comm_get_load(struct msm_cvp_core *core,
-	enum session_type type, enum load_calc_quirks quirks)
-{
-	struct msm_cvp_inst *inst = NULL;
-	int num_mbs_per_sec = 0;
-
-	if (!core) {
-		dprintk(CVP_ERR, "Invalid args: %pK\n", core);
-		return -EINVAL;
-	}
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list) {
-		if (inst->session_type != type)
-			continue;
-
-		num_mbs_per_sec += msm_cvp_comm_get_inst_load(inst, quirks);
-	}
-	mutex_unlock(&core->lock);
-
-	return num_mbs_per_sec;
-}
-
 enum hal_domain get_cvp_hal_domain(int session_type)
 {
 	enum hal_domain domain;
@@ -411,11 +90,6 @@
 	return codec;
 }
 
-enum hal_uncompressed_format msm_cvp_comm_get_hal_uncompressed(int fourcc)
-{
-	return HAL_UNUSED_COLOR;
-}
-
 struct msm_cvp_core *get_cvp_core(int core_id)
 {
 	struct msm_cvp_core *core;
@@ -439,26 +113,6 @@
 	return NULL;
 }
 
-struct msm_cvp_format_constraint *msm_cvp_comm_get_pixel_fmt_constraints(
-	struct msm_cvp_format_constraint fmt[], int size, int fourcc)
-{
-	int i;
-
-	if (!fmt) {
-		dprintk(CVP_ERR, "Invalid inputs, fmt = %pK\n", fmt);
-		return NULL;
-	}
-	for (i = 0; i < size; i++) {
-		if (fmt[i].fourcc == fourcc)
-			break;
-	}
-	if (i == size) {
-		dprintk(CVP_INFO, "Format constraint not found.\n");
-		return NULL;
-	}
-	return &fmt[i];
-}
-
 struct buf_queue *msm_cvp_comm_get_vb2q(
 		struct msm_cvp_inst *inst, enum v4l2_buf_type type)
 {
@@ -679,7 +333,7 @@
 	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
 		buf = list_entry(ptr, struct internal_buf, list);
 		if (address == buf->smem.device_addr) {
-			dprintk(CVP_DBG, "releasing persist: %x\n",
+			dprintk(CVP_DBG, "releasing persist: %#x\n",
 					buf->smem.device_addr);
 			buf_found = true;
 		}
@@ -687,7 +341,8 @@
 	mutex_unlock(&inst->persistbufs.lock);
 
 	if (!buf_found)
-		dprintk(CVP_ERR, "invalid buffer received from firmware");
+		dprintk(CVP_WARN, "invalid buffer %#x from firmware\n",
+				address);
 	if (IS_HAL_SESSION_CMD(cmd))
 		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
 	else
@@ -820,54 +475,10 @@
 	handle_session_error(cmd, (void *)&response);
 }
 
-static void print_cap(const char *type,
-		struct hal_capability_supported *cap)
-{
-	dprintk(CVP_DBG,
-		"%-24s: %-8d %-8d %-8d\n",
-		type, cap->min, cap->max, cap->step_size);
-}
-
-//static int msm_cvp_comm_update_ctrl(struct msm_cvp_inst *inst,
-//	u32 id, struct hal_capability_supported *capability)
-//{
-//	struct v4l2_ctrl *ctrl = NULL;
-//	int rc = 0;
-//
-//	ctrl = v4l2_ctrl_find(&inst->ctrl_handler, id);
-//	if (ctrl) {
-//		v4l2_ctrl_modify_range(ctrl, capability->min,
-//				capability->max, ctrl->step,
-//				ctrl->default_value);
-//		dprintk(CVP_DBG,
-//			"%s: Updated Range = %lld --> %lld Def value = %lld\n",
-//			ctrl->name, ctrl->minimum, ctrl->maximum,
-//			ctrl->default_value);
-//	} else {
-//		dprintk(CVP_ERR,
-//			"Failed to find Conrol %d\n", id);
-//		rc = -EINVAL;
-//	}
-//
-//	return rc;
-//	}
-
-static void msm_cvp_comm_update_ctrl_limits(struct msm_cvp_inst *inst)
-{
-	//msm_cvp_comm_update_ctrl(inst,
-	//	V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE,
-	//	&inst->capability.frame_rate);
-}
-
 static void handle_session_init_done(enum hal_command_response cmd, void *data)
 {
 	struct msm_cvp_cb_cmd_done *response = data;
 	struct msm_cvp_inst *inst = NULL;
-	struct msm_cvp_capability *capability = NULL;
-	struct hfi_device *hdev;
-	struct msm_cvp_core *core;
-	struct hal_profile_level *profile_level;
-	u32 i, codec;
 
 	if (!response) {
 		dprintk(CVP_ERR,
@@ -905,297 +516,13 @@
 		return;
 	}
 
-	core = inst->core;
-	hdev = inst->core->device;
-	codec = inst->session_type == MSM_CVP_DECODER ?
-			inst->fmts[OUTPUT_PORT].fourcc :
-			inst->fmts[CAPTURE_PORT].fourcc;
-
-	/* check if capabilities are available for this session */
-	for (i = 0; i < CVP_MAX_SESSIONS; i++) {
-		if (core->capabilities[i].codec ==
-				get_cvp_hal_codec(codec) &&
-			core->capabilities[i].domain ==
-				get_cvp_hal_domain(inst->session_type)) {
-			capability = &core->capabilities[i];
-			break;
-		}
-	}
-
-	if (capability) {
-		dprintk(CVP_DBG,
-			"%s: capabilities for codec 0x%x, domain %#x\n",
-			__func__, capability->codec, capability->domain);
-		memcpy(&inst->capability, capability,
-			sizeof(struct msm_cvp_capability));
-	} else {
-		dprintk(CVP_ERR,
-			"Watch out : Some property may fail inst %pK\n", inst);
-		dprintk(CVP_ERR,
-			"Caps N/A for codec 0x%x, domain %#x\n",
-			inst->capability.codec, inst->capability.domain);
-	}
-	inst->capability.pixelprocess_capabilities =
-		call_hfi_op(hdev, get_core_capabilities, hdev->hfi_device_data);
-
-	dprintk(CVP_DBG,
-		"Capability type : min      max      step size\n");
-	print_cap("width", &inst->capability.width);
-	print_cap("height", &inst->capability.height);
-	print_cap("mbs_per_frame", &inst->capability.mbs_per_frame);
-	print_cap("mbs_per_sec", &inst->capability.mbs_per_sec);
-	print_cap("frame_rate", &inst->capability.frame_rate);
-	print_cap("bitrate", &inst->capability.bitrate);
-	print_cap("peak_bitrate", &inst->capability.peakbitrate);
-	print_cap("scale_x", &inst->capability.scale_x);
-	print_cap("scale_y", &inst->capability.scale_y);
-	print_cap("hier_p", &inst->capability.hier_p);
-	print_cap("ltr_count", &inst->capability.ltr_count);
-	print_cap("bframe", &inst->capability.bframe);
-	print_cap("secure_output2_threshold",
-		&inst->capability.secure_output2_threshold);
-	print_cap("hier_b", &inst->capability.hier_b);
-	print_cap("lcu_size", &inst->capability.lcu_size);
-	print_cap("hier_p_hybrid", &inst->capability.hier_p_hybrid);
-	print_cap("mbs_per_sec_low_power",
-		&inst->capability.mbs_per_sec_power_save);
-	print_cap("extradata", &inst->capability.extradata);
-	print_cap("profile", &inst->capability.profile);
-	print_cap("level", &inst->capability.level);
-	print_cap("i_qp", &inst->capability.i_qp);
-	print_cap("p_qp", &inst->capability.p_qp);
-	print_cap("b_qp", &inst->capability.b_qp);
-	print_cap("rc_modes", &inst->capability.rc_modes);
-	print_cap("blur_width", &inst->capability.blur_width);
-	print_cap("blur_height", &inst->capability.blur_height);
-	print_cap("slice_delivery_mode", &inst->capability.slice_delivery_mode);
-	print_cap("slice_bytes", &inst->capability.slice_bytes);
-	print_cap("slice_mbs", &inst->capability.slice_mbs);
-	print_cap("secure", &inst->capability.secure);
-	print_cap("max_num_b_frames", &inst->capability.max_num_b_frames);
-	print_cap("max_video_cores", &inst->capability.max_video_cores);
-	print_cap("max_work_modes", &inst->capability.max_work_modes);
-	print_cap("ubwc_cr_stats", &inst->capability.ubwc_cr_stats);
-
-	dprintk(CVP_DBG, "profile count : %u\n",
-		inst->capability.profile_level.profile_count);
-	for (i = 0; i < inst->capability.profile_level.profile_count; i++) {
-		profile_level =
-			&inst->capability.profile_level.profile_level[i];
-		dprintk(CVP_DBG, "profile : %u\n", profile_level->profile);
-		dprintk(CVP_DBG, "level   : %u\n", profile_level->level);
-	}
-
-	signal_session_msg_receipt(cmd, inst);
-
-	/*
-	 * Update controls after informing session_init_done to avoid
-	 * timeouts.
-	 */
-
-	msm_cvp_comm_update_ctrl_limits(inst);
-	cvp_put_inst(inst);
+	dprintk(CVP_ERR, "%s Session type must be CVP\n", __func__);
+	return;
 }
 
 static void handle_event_change(enum hal_command_response cmd, void *data)
 {
-	struct msm_cvp_inst *inst = NULL;
-	struct msm_cvp_cb_event *event_notify = data;
-	int event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
-	struct v4l2_event seq_changed_event = {0};
-	int rc = 0;
-	struct hfi_device *hdev;
-	u32 *ptr = NULL;
-	struct hal_buffer_requirements *bufreq;
-	int extra_buff_count = 0;
-
-	if (!event_notify) {
-		dprintk(CVP_WARN, "Got an empty event from hfi\n");
-		return;
-	}
-
-	inst = cvp_get_inst(get_cvp_core(event_notify->device_id),
-			event_notify->session_id);
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_WARN, "Got a response for an inactive session\n");
-		goto err_bad_event;
-	}
-	hdev = inst->core->device;
-
-	switch (event_notify->hal_event_type) {
-	case HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES:
-		event = V4L2_EVENT_SEQ_CHANGED_SUFFICIENT;
-		break;
-	case HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES:
-		event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
-		break;
-	case HAL_EVENT_RELEASE_BUFFER_REFERENCE:
-	{
-		struct msm_video_buffer *mbuf;
-		u32 planes[VIDEO_MAX_PLANES] = {0};
-
-		dprintk(CVP_DBG,
-			"%s: inst: %pK data_buffer: %x extradata_buffer: %x\n",
-			__func__, inst, event_notify->packet_buffer,
-			event_notify->extra_data_buffer);
-
-		planes[0] = event_notify->packet_buffer;
-		planes[1] = event_notify->extra_data_buffer;
-		mbuf = msm_cvp_comm_get_buffer_using_device_planes(inst,
-				V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, planes);
-		if (!mbuf || !kref_cvp_get_mbuf(inst, mbuf)) {
-			dprintk(CVP_ERR,
-				"%s: data_addr %x, extradata_addr %x not found\n",
-				__func__, planes[0], planes[1]);
-		} else {
-			//handle_release_buffer_reference(inst, mbuf);
-			kref_cvp_put_mbuf(mbuf);
-		}
-		goto err_bad_event;
-	}
-	default:
-		break;
-	}
-
-	/* Bit depth and pic struct changed event are combined into a single
-	 * event (insufficient event) for the userspace. Currently bitdepth
-	 * changes is only for HEVC and interlaced support is for all
-	 * codecs except HEVC
-	 * event data is now as follows:
-	 * u32 *ptr = seq_changed_event.u.data;
-	 * ptr[0] = height
-	 * ptr[1] = width
-	 * ptr[2] = bit depth
-	 * ptr[3] = pic struct (progressive or interlaced)
-	 * ptr[4] = colour space
-	 * ptr[5] = crop_data(top)
-	 * ptr[6] = crop_data(left)
-	 * ptr[7] = crop_data(height)
-	 * ptr[8] = crop_data(width)
-	 * ptr[9] = profile
-	 * ptr[10] = level
-	 */
-
-	inst->entropy_mode = event_notify->entropy_mode;
-	inst->profile = event_notify->profile;
-	inst->level = event_notify->level;
-	inst->prop.crop_info.left =
-		event_notify->crop_data.left;
-	inst->prop.crop_info.top =
-		event_notify->crop_data.top;
-	inst->prop.crop_info.height =
-		event_notify->crop_data.height;
-	inst->prop.crop_info.width =
-		event_notify->crop_data.width;
-	/* HW returns progressive_only flag in pic_struct. */
-	inst->pic_struct =
-		event_notify->pic_struct ?
-		MSM_CVP_PIC_STRUCT_PROGRESSIVE :
-		MSM_CVP_PIC_STRUCT_MAYBE_INTERLACED;
-
-	ptr = (u32 *)seq_changed_event.u.data;
-	ptr[0] = event_notify->height;
-	ptr[1] = event_notify->width;
-	ptr[2] = event_notify->bit_depth;
-	ptr[3] = event_notify->pic_struct;
-	ptr[4] = event_notify->colour_space;
-	ptr[5] = event_notify->crop_data.top;
-	ptr[6] = event_notify->crop_data.left;
-	ptr[7] = event_notify->crop_data.height;
-	ptr[8] = event_notify->crop_data.width;
-	ptr[9] = msm_cvp_comm_get_v4l2_profile(
-		inst->fmts[OUTPUT_PORT].fourcc,
-		event_notify->profile);
-	ptr[10] = msm_cvp_comm_get_v4l2_level(
-		inst->fmts[OUTPUT_PORT].fourcc,
-		event_notify->level);
-
-	dprintk(CVP_DBG,
-		"Event payload: height = %u width = %u profile = %u level = %u\n",
-			event_notify->height, event_notify->width,
-			ptr[9], ptr[10]);
-
-	dprintk(CVP_DBG,
-		"Event payload: bit_depth = %u pic_struct = %u colour_space = %u\n",
-		event_notify->bit_depth, event_notify->pic_struct,
-			event_notify->colour_space);
-
-	dprintk(CVP_DBG,
-		"Event payload: CROP top = %u left = %u Height = %u Width = %u\n",
-			event_notify->crop_data.top,
-			event_notify->crop_data.left,
-			event_notify->crop_data.height,
-			event_notify->crop_data.width);
-
-	mutex_lock(&inst->lock);
-	inst->in_reconfig = true;
-	inst->reconfig_height = event_notify->height;
-	inst->reconfig_width = event_notify->width;
-	inst->bit_depth = event_notify->bit_depth;
-
-	if (msm_cvp_comm_get_stream_output_mode(inst) ==
-			HAL_VIDEO_DECODER_SECONDARY) {
-		bufreq = get_cvp_buff_req_buffer(inst,
-				HAL_BUFFER_OUTPUT);
-		if (!bufreq) {
-			mutex_unlock(&inst->lock);
-			cvp_put_inst(inst);
-			return;
-		}
-
-		/* No need to add extra buffers to DPBs */
-		bufreq->buffer_count_min = event_notify->capture_buf_count;
-		bufreq->buffer_count_min_host = bufreq->buffer_count_min;
-
-		bufreq = get_cvp_buff_req_buffer(inst,
-				HAL_BUFFER_OUTPUT2);
-		if (!bufreq) {
-			mutex_unlock(&inst->lock);
-			cvp_put_inst(inst);
-			return;
-		}
-
-		extra_buff_count = msm_cvp_get_extra_buff_count(inst,
-						HAL_BUFFER_OUTPUT2);
-		bufreq->buffer_count_min = event_notify->capture_buf_count;
-		bufreq->buffer_count_min_host = bufreq->buffer_count_min +
-							extra_buff_count;
-	} else {
-
-		bufreq = get_cvp_buff_req_buffer(inst,
-				HAL_BUFFER_OUTPUT);
-		if (!bufreq) {
-			mutex_unlock(&inst->lock);
-			cvp_put_inst(inst);
-			return;
-		}
-
-		extra_buff_count = msm_cvp_get_extra_buff_count(inst,
-						HAL_BUFFER_OUTPUT);
-		bufreq->buffer_count_min = event_notify->capture_buf_count;
-		bufreq->buffer_count_min_host = bufreq->buffer_count_min +
-							extra_buff_count;
-	}
-	dprintk(CVP_DBG, "%s: buffer[%d] count: min %d min_host %d\n",
-		__func__, bufreq->buffer_type, bufreq->buffer_count_min,
-		bufreq->buffer_count_min_host);
-
-	mutex_unlock(&inst->lock);
-
-	rc = msm_cvp_check_session_supported(inst);
-	if (!rc) {
-		seq_changed_event.type = event;
-		v4l2_event_queue_fh(&inst->event_handler, &seq_changed_event);
-	} else if (rc == -ENOTSUPP) {
-		msm_cvp_queue_v4l2_event(inst,
-				V4L2_EVENT_MSM_CVP_HW_UNSUPPORTED);
-	} else if (rc == -EBUSY) {
-		msm_cvp_queue_v4l2_event(inst,
-				V4L2_EVENT_MSM_CVP_HW_OVERLOAD);
-	}
-
-err_bad_event:
-	cvp_put_inst(inst);
+	dprintk(CVP_WARN, "%s is not supported on CVP!\n", __func__);
 }
 
 static void handle_release_res_done(enum hal_command_response cmd, void *data)
@@ -1220,166 +547,9 @@
 	cvp_put_inst(inst);
 }
 
-void msm_cvp_comm_validate_output_buffers(struct msm_cvp_inst *inst)
-{
-	struct internal_buf *binfo;
-	u32 buffers_owned_by_driver = 0;
-	struct hal_buffer_requirements *output_buf;
-
-	output_buf = get_cvp_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
-
-	if (!output_buf) {
-		dprintk(CVP_DBG,
-			"This output buffer not required, buffer_type: %x\n",
-			HAL_BUFFER_OUTPUT);
-		return;
-	}
-	mutex_lock(&inst->outputbufs.lock);
-	if (list_empty(&inst->outputbufs.list)) {
-		dprintk(CVP_DBG, "%s: no OUTPUT buffers allocated\n",
-			__func__);
-		mutex_unlock(&inst->outputbufs.lock);
-		return;
-	}
-	list_for_each_entry(binfo, &inst->outputbufs.list, list) {
-		if (binfo->buffer_ownership != DRIVER) {
-			dprintk(CVP_DBG,
-				"This buffer is with FW %x\n",
-				binfo->smem.device_addr);
-			continue;
-		}
-		buffers_owned_by_driver++;
-	}
-	mutex_unlock(&inst->outputbufs.lock);
-
-	if (buffers_owned_by_driver != output_buf->buffer_count_actual) {
-		dprintk(CVP_WARN,
-			"OUTPUT Buffer count mismatch %d of %d\n",
-			buffers_owned_by_driver,
-			output_buf->buffer_count_actual);
-		msm_cvp_handle_hw_error(inst->core);
-	}
-}
-
-int msm_cvp_comm_queue_output_buffers(struct msm_cvp_inst *inst)
-{
-	struct internal_buf *binfo;
-	struct hfi_device *hdev;
-	struct cvp_frame_data frame_data = {0};
-	struct hal_buffer_requirements *output_buf, *extra_buf;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-
-	output_buf = get_cvp_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
-	if (!output_buf) {
-		dprintk(CVP_DBG,
-			"This output buffer not required, buffer_type: %x\n",
-			HAL_BUFFER_OUTPUT);
-		return 0;
-	}
-	dprintk(CVP_DBG,
-		"output: num = %d, size = %d\n",
-		output_buf->buffer_count_actual,
-		output_buf->buffer_size);
-
-	extra_buf = get_cvp_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_OUTPUT);
-
-	mutex_lock(&inst->outputbufs.lock);
-	list_for_each_entry(binfo, &inst->outputbufs.list, list) {
-		if (binfo->buffer_ownership != DRIVER)
-			continue;
-		if (binfo->mark_remove)
-			continue;
-		frame_data.alloc_len = output_buf->buffer_size;
-		frame_data.filled_len = 0;
-		frame_data.offset = 0;
-		frame_data.device_addr = binfo->smem.device_addr;
-		frame_data.flags = 0;
-		frame_data.extradata_addr = binfo->smem.device_addr +
-		output_buf->buffer_size;
-		frame_data.buffer_type = HAL_BUFFER_OUTPUT;
-		frame_data.extradata_size = extra_buf ?
-			extra_buf->buffer_size : 0;
-		//rc = call_hfi_op(hdev, session_ftb,
-		//	(void *) inst->session, &frame_data);
-		binfo->buffer_ownership = FIRMWARE;
-	}
-	mutex_unlock(&inst->outputbufs.lock);
-
-	return 0;
-}
-
 static void handle_session_flush(enum hal_command_response cmd, void *data)
 {
-	struct msm_cvp_cb_cmd_done *response = data;
-	struct msm_cvp_inst *inst;
-	struct v4l2_event flush_event = {0};
-	u32 *ptr = NULL;
-	enum hal_flush flush_type;
-	int rc;
-
-	if (!response) {
-		dprintk(CVP_ERR, "Failed to get valid response for flush\n");
-		return;
-	}
-
-	inst = cvp_get_inst(get_cvp_core(response->device_id),
-			response->session_id);
-	if (!inst) {
-		dprintk(CVP_WARN, "Got a response for an inactive session\n");
-		return;
-	}
-
-	mutex_lock(&inst->flush_lock);
-	if (msm_cvp_comm_get_stream_output_mode(inst) ==
-			HAL_VIDEO_DECODER_SECONDARY) {
-
-		if (!(inst->fmts[OUTPUT_PORT].defer_outputs &&
-				inst->in_reconfig))
-			msm_cvp_comm_validate_output_buffers(inst);
-
-		if (!inst->in_reconfig) {
-			rc = msm_cvp_comm_queue_output_buffers(inst);
-			if (rc) {
-				dprintk(CVP_ERR,
-						"Failed to queue output buffers: %d\n",
-						rc);
-			}
-		}
-	}
-	inst->in_flush = false;
-	flush_event.type = V4L2_EVENT_MSM_CVP_FLUSH_DONE;
-	ptr = (u32 *)flush_event.u.data;
-
-	flush_type = response->data.flush_type;
-	switch (flush_type) {
-	case HAL_FLUSH_INPUT:
-		ptr[0] = V4L2_CMD_FLUSH_OUTPUT;
-		break;
-	case HAL_FLUSH_OUTPUT:
-		ptr[0] = V4L2_CMD_FLUSH_CAPTURE;
-		break;
-	case HAL_FLUSH_ALL:
-		ptr[0] |= V4L2_CMD_FLUSH_CAPTURE;
-		ptr[0] |= V4L2_CMD_FLUSH_OUTPUT;
-		break;
-	default:
-		dprintk(CVP_ERR, "Invalid flush type received!");
-		goto exit;
-	}
-
-	dprintk(CVP_DBG,
-		"Notify flush complete, flush_type: %x\n", flush_type);
-	v4l2_event_queue_fh(&inst->event_handler, &flush_event);
-
-exit:
-	mutex_unlock(&inst->flush_lock);
-	cvp_put_inst(inst);
+	dprintk(CVP_WARN, "%s is not supported on CVP!\n", __func__);
 }
 
 static void handle_session_error(enum hal_command_response cmd, void *data)
@@ -1572,102 +742,6 @@
 	cvp_put_inst(inst);
 }
 
-struct vb2_buffer *msm_cvp_comm_get_vb_using_video_buffer(
-		struct msm_cvp_inst *inst, struct msm_video_buffer *mbuf)
-{
-	u32 port = 0;
-	struct vb2_buffer *vb = NULL;
-	struct vb2_queue *q = NULL;
-	bool found = false;
-
-	if (mbuf->vvb.vb2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-		port = CAPTURE_PORT;
-	} else if (mbuf->vvb.vb2_buf.type ==
-			V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		port = OUTPUT_PORT;
-	} else {
-		dprintk(CVP_ERR, "%s: invalid type %d\n",
-			__func__, mbuf->vvb.vb2_buf.type);
-		return NULL;
-	}
-
-	mutex_lock(&inst->bufq[port].lock);
-	found = false;
-	q = &inst->bufq[port].vb2_bufq;
-	if (!q->streaming) {
-		dprintk(CVP_ERR, "port %d is not streaming", port);
-		goto unlock;
-	}
-	list_for_each_entry(vb, &q->queued_list, queued_entry) {
-		if (vb->state != VB2_BUF_STATE_ACTIVE)
-			continue;
-		if (msm_cvp_comm_compare_vb2_planes(inst, mbuf, vb)) {
-			found = true;
-			break;
-		}
-	}
-unlock:
-	mutex_unlock(&inst->bufq[port].lock);
-	if (!found) {
-		print_video_buffer(CVP_ERR, "vb2 not found for", inst, mbuf);
-		return NULL;
-	}
-
-	return vb;
-}
-
-int msm_cvp_comm_vb2_buffer_done(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	struct vb2_buffer *vb2;
-	struct vb2_v4l2_buffer *vbuf;
-	u32 i, port;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK\n",
-			__func__, inst, mbuf);
-		return -EINVAL;
-	}
-
-	if (mbuf->vvb.vb2_buf.type ==
-			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-		port = CAPTURE_PORT;
-	else if (mbuf->vvb.vb2_buf.type ==
-			V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-		port = OUTPUT_PORT;
-	else
-		return -EINVAL;
-
-	vb2 = msm_cvp_comm_get_vb_using_video_buffer(inst, mbuf);
-	if (!vb2)
-		return -EINVAL;
-
-	/*
-	 * access vb2 buffer under q->lock and if streaming only to
-	 * ensure the buffer was not free'd by vb2 framework while
-	 * we are accessing it here.
-	 */
-	mutex_lock(&inst->bufq[port].lock);
-	if (inst->bufq[port].vb2_bufq.streaming) {
-		vbuf = to_vb2_v4l2_buffer(vb2);
-		vbuf->flags = mbuf->vvb.flags;
-		vb2->timestamp = mbuf->vvb.vb2_buf.timestamp;
-		for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
-			vb2->planes[i].bytesused =
-				mbuf->vvb.vb2_buf.planes[i].bytesused;
-			vb2->planes[i].data_offset =
-				mbuf->vvb.vb2_buf.planes[i].data_offset;
-		}
-		vb2_buffer_done(vb2, VB2_BUF_STATE_DONE);
-	} else {
-		dprintk(CVP_ERR, "%s: port %d is not streaming\n",
-			__func__, port);
-	}
-	mutex_unlock(&inst->bufq[port].lock);
-
-	return 0;
-}
-
 static void handle_operation_config(enum hal_command_response cmd, void *data)
 {
 	dprintk(CVP_ERR,
@@ -1729,6 +803,7 @@
 	case HAL_SESSION_DFS_CONFIG_CMD_DONE:
 	case HAL_SESSION_DFS_FRAME_CMD_DONE:
 	case HAL_SESSION_DME_CONFIG_CMD_DONE:
+	case HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE:
 	case HAL_SESSION_DME_FRAME_CMD_DONE:
 	case HAL_SESSION_PERSIST_CMD_DONE:
 		cvp_handle_session_cmd_done(cmd, data);
@@ -1800,37 +875,6 @@
 	return true;
 }
 
-bool cvp_is_batching_allowed(struct msm_cvp_inst *inst)
-{
-	bool allowed = false;
-
-	if (!inst || !inst->core)
-		return false;
-
-	/*
-	 * Enable decode batching based on below conditions
-	 * - platform supports batching
-	 * - decode session and H264/HEVC/VP9 format
-	 * - session resolution <= 1080p
-	 * - low latency not enabled
-	 * - not a thumbnail session
-	 * - UBWC color format
-	 */
-	if (inst->core->resources.decode_batching && is_decode_session(inst) &&
-		(inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264 ||
-		inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC ||
-		inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9) &&
-		(msm_cvp_get_mbs_per_frame(inst) <=
-		NUM_MBS_PER_FRAME(MAX_DEC_BATCH_HEIGHT, MAX_DEC_BATCH_WIDTH)) &&
-		!inst->clk_data.low_latency_mode &&
-		!is_thumbnail_session(inst) &&
-		(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_NV12_UBWC ||
-		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_NV12_TP10_UBWC))
-		allowed = true;
-
-	return allowed;
-}
-
 static int msm_comm_session_abort(struct msm_cvp_inst *inst)
 {
 	int rc = 0, abort_completion = 0;
@@ -2098,96 +1142,6 @@
 	return rc;
 }
 
-static int msm_comm_init_buffer_count(struct msm_cvp_inst *inst)
-{
-	int extra_buff_count = 0;
-	struct hal_buffer_requirements *bufreq;
-	int rc = 0;
-	int port;
-
-	if (!is_decode_session(inst) && !is_encode_session(inst))
-		return 0;
-
-	if (is_decode_session(inst))
-		port = OUTPUT_PORT;
-	else
-		port = CAPTURE_PORT;
-
-	/* Update input buff counts */
-	bufreq = get_cvp_buff_req_buffer(inst, HAL_BUFFER_INPUT);
-	if (!bufreq)
-		return -EINVAL;
-
-	extra_buff_count = msm_cvp_get_extra_buff_count(inst,
-				HAL_BUFFER_INPUT);
-	bufreq->buffer_count_min = inst->fmts[port].input_min_count;
-	/* batching needs minimum batch size count of input buffers */
-	if (inst->core->resources.decode_batching &&
-		is_decode_session(inst) &&
-		bufreq->buffer_count_min < inst->batch.size)
-		bufreq->buffer_count_min = inst->batch.size;
-	bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
-				bufreq->buffer_count_min + extra_buff_count;
-
-	dprintk(CVP_DBG, "%s: %x : input min %d min_host %d actual %d\n",
-		__func__, hash32_ptr(inst->session),
-		bufreq->buffer_count_min, bufreq->buffer_count_min_host,
-		bufreq->buffer_count_actual);
-
-	rc = msm_cvp_comm_set_buffer_count(inst,
-			bufreq->buffer_count_min,
-			bufreq->buffer_count_actual, HAL_BUFFER_INPUT);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"%s: Failed to set in buffer count to FW\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	bufreq = get_cvp_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_INPUT);
-	if (!bufreq)
-		return -EINVAL;
-
-	bufreq->buffer_count_min = inst->fmts[port].input_min_count;
-	bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
-				bufreq->buffer_count_min + extra_buff_count;
-
-	/* Update output buff count */
-	bufreq = get_cvp_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
-	if (!bufreq)
-		return -EINVAL;
-
-	extra_buff_count = msm_cvp_get_extra_buff_count(inst,
-				HAL_BUFFER_OUTPUT);
-	bufreq->buffer_count_min = inst->fmts[port].output_min_count;
-	bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
-		bufreq->buffer_count_min + extra_buff_count;
-
-	dprintk(CVP_DBG, "%s: %x : output min %d min_host %d actual %d\n",
-		__func__, hash32_ptr(inst->session),
-		bufreq->buffer_count_min, bufreq->buffer_count_min_host,
-		bufreq->buffer_count_actual);
-
-	rc = msm_cvp_comm_set_buffer_count(inst,
-		bufreq->buffer_count_min,
-		bufreq->buffer_count_actual, HAL_BUFFER_OUTPUT);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"Failed to set out buffer count to FW\n");
-		return -EINVAL;
-	}
-
-	bufreq = get_cvp_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_OUTPUT);
-	if (!bufreq)
-		return -EINVAL;
-
-	bufreq->buffer_count_min = inst->fmts[port].output_min_count;
-	bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
-		bufreq->buffer_count_min + extra_buff_count;
-
-	return 0;
-}
-
 static int msm_comm_session_init(int flipped_state,
 	struct msm_cvp_inst *inst)
 {
@@ -2233,180 +1187,12 @@
 		rc = -EINVAL;
 		goto exit;
 	}
-
-	rc = msm_comm_init_buffer_count(inst);
-	if (rc) {
-		dprintk(CVP_ERR, "Failed to initialize buff counts\n");
-		goto exit;
-	}
 	change_cvp_inst_state(inst, MSM_CVP_OPEN);
 
 exit:
 	return rc;
 }
 
-static void msm_cvp_print_running_insts(struct msm_cvp_core *core)
-{
-	struct msm_cvp_inst *temp;
-	int op_rate = 0;
-
-	dprintk(CVP_ERR, "Running instances:\n");
-	dprintk(CVP_ERR, "%4s|%4s|%4s|%4s|%4s|%4s\n",
-			"type", "w", "h", "fps", "opr", "prop");
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(temp, &core->instances, list) {
-		if (temp->state >= MSM_CVP_OPEN_DONE &&
-				temp->state < MSM_CVP_STOP_DONE) {
-			char properties[4] = "";
-
-			if (is_thumbnail_session(temp))
-				strlcat(properties, "N", sizeof(properties));
-
-			if (is_turbo_session(temp))
-				strlcat(properties, "T", sizeof(properties));
-
-			if (is_realtime_session(temp))
-				strlcat(properties, "R", sizeof(properties));
-
-			if (temp->clk_data.operating_rate)
-				op_rate = temp->clk_data.operating_rate >> 16;
-			else
-				op_rate = temp->prop.fps;
-
-			dprintk(CVP_ERR, "%4d|%4d|%4d|%4d|%4d|%4s\n",
-					temp->session_type,
-					max(temp->prop.width[CAPTURE_PORT],
-						temp->prop.width[OUTPUT_PORT]),
-					max(temp->prop.height[CAPTURE_PORT],
-						temp->prop.height[OUTPUT_PORT]),
-					temp->prop.fps, op_rate, properties);
-		}
-	}
-	mutex_unlock(&core->lock);
-}
-
-static int msm_cvp_load_resources(int flipped_state,
-	struct msm_cvp_inst *inst)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	int num_mbs_per_sec = 0, max_load_adj = 0;
-	struct msm_cvp_core *core;
-	enum load_calc_quirks quirks = LOAD_CALC_IGNORE_TURBO_LOAD |
-		LOAD_CALC_IGNORE_THUMBNAIL_LOAD |
-		LOAD_CALC_IGNORE_NON_REALTIME_LOAD;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	if (inst->state == MSM_CVP_CORE_INVALID) {
-		dprintk(CVP_ERR,
-			"%s: inst %pK is in invalid state\n", __func__, inst);
-		return -EINVAL;
-	}
-	if (IS_ALREADY_IN_STATE(flipped_state, MSM_CVP_LOAD_RESOURCES)) {
-		dprintk(CVP_INFO, "inst: %pK is already in state: %d\n",
-						inst, inst->state);
-		goto exit;
-	}
-	core = inst->core;
-
-	num_mbs_per_sec =
-		msm_cvp_comm_get_load(core, MSM_CVP_DECODER, quirks) +
-		msm_cvp_comm_get_load(core, MSM_CVP_ENCODER, quirks);
-
-	max_load_adj = core->resources.max_load +
-		inst->capability.mbs_per_frame.max;
-
-	if (num_mbs_per_sec > max_load_adj) {
-		dprintk(CVP_ERR, "HW is overloaded, needed: %d max: %d\n",
-			num_mbs_per_sec, max_load_adj);
-		msm_cvp_print_running_insts(core);
-		msm_cvp_comm_kill_session(inst);
-		return -EBUSY;
-	}
-
-	hdev = core->device;
-	dprintk(CVP_DBG, "%s: inst %pK\n", __func__, inst);
-	rc = call_hfi_op(hdev, session_load_res, (void *) inst->session);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"Failed to send load resources\n");
-		goto exit;
-	}
-	change_cvp_inst_state(inst, MSM_CVP_LOAD_RESOURCES);
-exit:
-	return rc;
-}
-
-static int msm_cvp_start(int flipped_state, struct msm_cvp_inst *inst)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	if (inst->state == MSM_CVP_CORE_INVALID) {
-		dprintk(CVP_ERR,
-			"%s: inst %pK is in invalid\n", __func__, inst);
-		return -EINVAL;
-	}
-	if (IS_ALREADY_IN_STATE(flipped_state, MSM_CVP_START)) {
-		dprintk(CVP_INFO,
-			"inst: %pK is already in state: %d\n",
-			inst, inst->state);
-		goto exit;
-	}
-	hdev = inst->core->device;
-	dprintk(CVP_DBG, "%s: inst %pK\n", __func__, inst);
-	rc = call_hfi_op(hdev, session_start, (void *) inst->session);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"Failed to send start\n");
-		goto exit;
-	}
-	change_cvp_inst_state(inst, MSM_CVP_START);
-exit:
-	return rc;
-}
-
-static int msm_cvp_stop(int flipped_state, struct msm_cvp_inst *inst)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	if (inst->state == MSM_CVP_CORE_INVALID) {
-		dprintk(CVP_ERR,
-			"%s: inst %pK is in invalid state\n", __func__, inst);
-		return -EINVAL;
-	}
-	if (IS_ALREADY_IN_STATE(flipped_state, MSM_CVP_STOP)) {
-		dprintk(CVP_INFO,
-			"inst: %pK is already in state: %d\n",
-			inst, inst->state);
-		goto exit;
-	}
-	hdev = inst->core->device;
-	dprintk(CVP_DBG, "%s: inst %pK\n", __func__, inst);
-	rc = call_hfi_op(hdev, session_stop, (void *) inst->session);
-	if (rc) {
-		dprintk(CVP_ERR, "%s: inst %pK session_stop failed\n",
-				__func__, inst);
-		goto exit;
-	}
-	change_cvp_inst_state(inst, MSM_CVP_STOP);
-exit:
-	return rc;
-}
-
 static int msm_comm_session_close(int flipped_state,
 			struct msm_cvp_inst *inst)
 {
@@ -2483,59 +1269,6 @@
 	return flipped_state;
 }
 
-int msm_cvp_comm_reset_bufreqs(struct msm_cvp_inst *inst,
-	enum hal_buffer buf_type)
-{
-	struct hal_buffer_requirements *bufreqs;
-
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	bufreqs = get_cvp_buff_req_buffer(inst, buf_type);
-	if (!bufreqs) {
-		dprintk(CVP_ERR, "%s: invalid buf type %d\n",
-			__func__, buf_type);
-		return -EINVAL;
-	}
-	bufreqs->buffer_size = bufreqs->buffer_region_size =
-	bufreqs->buffer_count_min = bufreqs->buffer_count_min_host =
-	bufreqs->buffer_count_actual = bufreqs->contiguous =
-	bufreqs->buffer_alignment = 0;
-
-	return 0;
-}
-
-int msm_cvp_comm_copy_bufreqs(struct msm_cvp_inst *inst,
-	enum hal_buffer src_type, enum hal_buffer dst_type)
-{
-	struct hal_buffer_requirements *src_bufreqs;
-	struct hal_buffer_requirements *dst_bufreqs;
-
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	src_bufreqs = get_cvp_buff_req_buffer(inst, src_type);
-	dst_bufreqs = get_cvp_buff_req_buffer(inst, dst_type);
-	if (!src_bufreqs || !dst_bufreqs) {
-		dprintk(CVP_ERR, "%s: invalid buf type: src %d dst %d\n",
-			__func__, src_type, dst_type);
-		return -EINVAL;
-	}
-	dst_bufreqs->buffer_size = src_bufreqs->buffer_size;
-	dst_bufreqs->buffer_region_size = src_bufreqs->buffer_region_size;
-	dst_bufreqs->buffer_count_min = src_bufreqs->buffer_count_min;
-	dst_bufreqs->buffer_count_min_host = src_bufreqs->buffer_count_min_host;
-	dst_bufreqs->buffer_count_actual = src_bufreqs->buffer_count_actual;
-	dst_bufreqs->contiguous = src_bufreqs->contiguous;
-	dst_bufreqs->buffer_alignment = src_bufreqs->buffer_alignment;
-
-	return 0;
-}
-
 struct hal_buffer_requirements *get_cvp_buff_req_buffer(
 		struct msm_cvp_inst *inst, enum hal_buffer buffer_type)
 {
@@ -2593,31 +1326,21 @@
 		if (rc || state <= get_flipped_state(inst->state, state))
 			break;
 	case MSM_CVP_LOAD_RESOURCES:
-		rc = msm_cvp_load_resources(flipped_state, inst);
-		if (rc || state <= get_flipped_state(inst->state, state))
-			break;
+		dprintk(CVP_WARN, "Deprecated state LOAD_RESOURCES\n");
 	case MSM_CVP_LOAD_RESOURCES_DONE:
+		dprintk(CVP_WARN, "Deprecated state LOAD_RESOURCES_DONE\n");
 	case MSM_CVP_START:
-		rc = msm_cvp_start(flipped_state, inst);
-		if (rc || state <= get_flipped_state(inst->state, state))
-			break;
+		dprintk(CVP_WARN, "Deprecated state START\n");
 	case MSM_CVP_START_DONE:
-		dprintk(CVP_ERR, "Deprecated HFI packet: START_DONE\n");
-			break;
+		dprintk(CVP_WARN, "Deprecated state START_DONE\n");
 	case MSM_CVP_STOP:
-		rc = msm_cvp_stop(flipped_state, inst);
-		if (rc || state <= get_flipped_state(inst->state, state))
-			break;
+		dprintk(CVP_WARN, "Deprecated state STOP\n");
 	case MSM_CVP_STOP_DONE:
-		rc = wait_for_state(inst, flipped_state, MSM_CVP_STOP_DONE,
-				HAL_SESSION_STOP_DONE);
-		if (rc || state <= get_flipped_state(inst->state, state))
-			break;
-		dprintk(CVP_DBG, "Moving to Stop Done state\n");
+		dprintk(CVP_WARN, "Deprecated state STOP_DONE\n");
 	case MSM_CVP_RELEASE_RESOURCES:
-		dprintk(CVP_ERR, "Deprecated state RELEASE_SOURCES\n");
+		dprintk(CVP_WARN, "Deprecated state RELEASE_SOURCES\n");
 	case MSM_CVP_RELEASE_RESOURCES_DONE:
-		dprintk(CVP_ERR, "Deprecated state RELEASE_SOURCES_DONE\n");
+		dprintk(CVP_WARN, "Deprecated state RELEASE_RESOURCES_DONE\n");
 	case MSM_CVP_CLOSE:
 		rc = msm_comm_session_close(flipped_state, inst);
 		if (rc || state <= get_flipped_state(inst->state, state))
@@ -2655,65 +1378,6 @@
 	return rc;
 }
 
-int msm_cvp_comm_cmd(void *instance, union msm_v4l2_cmd *cmd)
-{
-	return 0;
-}
-
-static void populate_frame_data(struct cvp_frame_data *data,
-		struct msm_video_buffer *mbuf, struct msm_cvp_inst *inst)
-{
-	u64 time_usec;
-	int extra_idx;
-	struct vb2_buffer *vb;
-	struct vb2_v4l2_buffer *vbuf;
-
-	if (!inst || !mbuf || !data) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK %pK\n",
-			__func__, inst, mbuf, data);
-		return;
-	}
-
-	vb = &mbuf->vvb.vb2_buf;
-	vbuf = to_vb2_v4l2_buffer(vb);
-
-	time_usec = vb->timestamp;
-	do_div(time_usec, NSEC_PER_USEC);
-
-	data->alloc_len = vb->planes[0].length;
-	data->device_addr = mbuf->smem[0].device_addr;
-	data->timestamp = time_usec;
-	data->flags = 0;
-	data->clnt_data = data->device_addr;
-
-	if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		data->buffer_type = HAL_BUFFER_INPUT;
-		data->filled_len = vb->planes[0].bytesused;
-		data->offset = vb->planes[0].data_offset;
-
-		if (vbuf->flags & V4L2_BUF_FLAG_EOS)
-			data->flags |= HAL_BUFFERFLAG_EOS;
-
-		if (vbuf->flags & V4L2_BUF_FLAG_CODECCONFIG)
-			data->flags |= HAL_BUFFERFLAG_CODECCONFIG;
-
-		if (inst->session_type == MSM_CVP_DECODER) {
-			msm_cvp_comm_fetch_mark_data(&inst->etb_data, vb->index,
-				&data->mark_data, &data->mark_target);
-		}
-
-	} else if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-		data->buffer_type = msm_cvp_comm_get_hal_output_buffer(inst);
-	}
-
-	extra_idx = EXTRADATA_IDX(vb->num_planes);
-	if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
-		data->extradata_addr = mbuf->smem[extra_idx].device_addr;
-		data->extradata_size = vb->planes[extra_idx].length;
-		data->flags |= HAL_BUFFERFLAG_EXTRADATA;
-	}
-}
-
 enum hal_buffer cvp_get_hal_buffer_type(unsigned int type,
 		unsigned int plane_num)
 {
@@ -2755,512 +1419,6 @@
 	return count;
 }
 
-static int num_pending_qbufs(struct msm_cvp_inst *inst, u32 type)
-{
-	int count = 0;
-	struct msm_video_buffer *mbuf;
-
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return 0;
-	}
-
-	mutex_lock(&inst->registeredbufs.lock);
-	list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
-		if (mbuf->vvb.vb2_buf.type != type)
-			continue;
-		/* Count only deferred buffers */
-		if (!(mbuf->flags & MSM_CVP_FLAG_DEFERRED))
-			continue;
-		count++;
-	}
-	mutex_unlock(&inst->registeredbufs.lock);
-
-	return count;
-}
-
-static int msm_comm_qbuf_to_hfi(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	enum msm_cvp_debugfs_event e = { 0 };
-	struct cvp_frame_data frame_data = {0};
-
-	if (!inst || !inst->core || !inst->core->device || !mbuf) {
-		dprintk(CVP_ERR, "%s: Invalid arguments\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-
-	populate_frame_data(&frame_data, mbuf, inst);
-	/* mbuf is not deferred anymore */
-	mbuf->flags &= ~MSM_CVP_FLAG_DEFERRED;
-	mbuf->flags |= MSM_CVP_FLAG_QUEUED;
-	msm_cvp_debugfs_update(inst, e);
-
-//err_bad_input:
-	return rc;
-}
-
-int msm_cvp_comm_qbuf(struct msm_cvp_inst *inst, struct msm_video_buffer *mbuf)
-{
-	int rc = 0;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: Invalid arguments\n", __func__);
-		return -EINVAL;
-	}
-
-	if (inst->state == MSM_CVP_CORE_INVALID) {
-		dprintk(CVP_ERR, "%s: inst is in bad state\n", __func__);
-		return -EINVAL;
-	}
-
-	if (inst->state != MSM_CVP_START_DONE) {
-		mbuf->flags |= MSM_CVP_FLAG_DEFERRED;
-		print_video_buffer(CVP_DBG, "qbuf deferred", inst, mbuf);
-		return 0;
-	}
-
-	rc = msm_cvp_comm_scale_clocks_and_bus(inst);
-	if (rc)
-		dprintk(CVP_ERR, "%s: scale clocks failed\n", __func__);
-
-	print_video_buffer(CVP_DBG, "qbuf", inst, mbuf);
-	rc = msm_comm_qbuf_to_hfi(inst, mbuf);
-	if (rc)
-		dprintk(CVP_ERR, "%s: Failed qbuf to hfi: %d\n", __func__, rc);
-
-	return rc;
-}
-
-/*
- * msm_comm_qbuf_decode_batch - count the buffers which are not queued to
- *              firmware yet (count includes rbr pending buffers too) and
- *              queue the buffers at once if full batch count reached.
- *              Don't queue rbr pending buffers as they would be queued
- *              when rbr event arrived from firmware.
- */
-int msm_cvp_comm_qbuf_decode_batch(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	int rc = 0;
-	u32 count = 0;
-	struct msm_video_buffer *buf;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: Invalid arguments\n", __func__);
-		return -EINVAL;
-	}
-
-	if (inst->state == MSM_CVP_CORE_INVALID) {
-		dprintk(CVP_ERR, "%s: inst is in bad state\n", __func__);
-		return -EINVAL;
-	}
-
-	if (inst->state != MSM_CVP_START_DONE) {
-		mbuf->flags |= MSM_CVP_FLAG_DEFERRED;
-		print_video_buffer(CVP_DBG, "qbuf deferred", inst, mbuf);
-		return 0;
-	}
-
-	/*
-	 * Don't defer buffers initially to avoid startup
-	 * latency increase due to batching
-	 */
-	if (inst->clk_data.buffer_counter > SKIP_BATCH_WINDOW) {
-		count = num_pending_qbufs(inst,
-			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
-		if (count < inst->batch.size) {
-			print_video_buffer(CVP_DBG,
-				"batch-qbuf deferred", inst, mbuf);
-			return 0;
-		}
-	}
-
-	rc = msm_cvp_comm_scale_clocks_and_bus(inst);
-	if (rc)
-		dprintk(CVP_ERR, "%s: scale clocks failed\n", __func__);
-
-	mutex_lock(&inst->registeredbufs.lock);
-	list_for_each_entry(buf, &inst->registeredbufs.list, list) {
-		/* Don't queue if buffer is not CAPTURE_MPLANE */
-		if (buf->vvb.vb2_buf.type !=
-			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-			goto loop_end;
-		/* Don't queue if buffer is not a deferred buffer */
-		if (!(buf->flags & MSM_CVP_FLAG_DEFERRED))
-			goto loop_end;
-		/* Don't queue if RBR event is pending on this buffer */
-		if (buf->flags & MSM_CVP_FLAG_RBR_PENDING)
-			goto loop_end;
-
-		print_video_buffer(CVP_DBG, "batch-qbuf", inst, buf);
-		rc = msm_comm_qbuf_to_hfi(inst, buf);
-		if (rc) {
-			dprintk(CVP_ERR, "%s: Failed qbuf to hfi: %d\n",
-				__func__, rc);
-			break;
-		}
-loop_end:
-		/* Queue pending buffers till the current buffer only */
-		if (buf == mbuf)
-			break;
-	}
-	mutex_unlock(&inst->registeredbufs.lock);
-
-	return rc;
-}
-
-int msm_cvp_comm_try_get_prop(struct msm_cvp_inst *inst,
-	enum hal_property ptype, union hal_get_property *hprop)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct getprop_buf *buf;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-	mutex_lock(&inst->sync_lock);
-	if (inst->state < MSM_CVP_OPEN_DONE ||
-			inst->state >= MSM_CVP_CLOSE) {
-
-		/* No need to check inst->state == MSM_CVP_INVALID since
-		 * INVALID is > CLOSE_DONE. When core went to INVALID state,
-		 * we put all the active instances in INVALID. So > CLOSE_DONE
-		 * is enough check to have.
-		 */
-
-		dprintk(CVP_ERR,
-			"In Wrong state to call Buf Req: Inst %pK or Core %pK\n",
-				inst, inst->core);
-		rc = -EAGAIN;
-		mutex_unlock(&inst->sync_lock);
-		goto exit;
-	}
-	mutex_unlock(&inst->sync_lock);
-
-	switch (ptype) {
-	case HAL_PARAM_GET_BUFFER_REQUIREMENTS:
-		rc = call_hfi_op(hdev, session_get_buf_req, inst->session);
-		break;
-	default:
-		rc = -EAGAIN;
-		break;
-	}
-
-	if (rc) {
-		dprintk(CVP_ERR, "Can't query hardware for property: %d\n",
-				rc);
-		goto exit;
-	}
-
-	rc = wait_for_completion_timeout(&inst->completions[
-			SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO)],
-		msecs_to_jiffies(
-			inst->core->resources.msm_cvp_hw_rsp_timeout));
-	if (!rc) {
-		dprintk(CVP_ERR,
-			"%s: Wait interrupted or timed out [%pK]: %d\n",
-			__func__, inst,
-			SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO));
-		msm_cvp_comm_kill_session(inst);
-		rc = -ETIMEDOUT;
-		goto exit;
-	} else {
-		/* wait_for_completion_timeout returns jiffies before expiry */
-		rc = 0;
-	}
-
-	mutex_lock(&inst->pending_getpropq.lock);
-	if (!list_empty(&inst->pending_getpropq.list)) {
-		buf = list_first_entry(&inst->pending_getpropq.list,
-					struct getprop_buf, list);
-		*hprop = *(union hal_get_property *)buf->data;
-		kfree(buf->data);
-		list_del(&buf->list);
-		kfree(buf);
-	} else {
-		dprintk(CVP_ERR, "%s getprop list empty\n", __func__);
-		rc = -EINVAL;
-	}
-	mutex_unlock(&inst->pending_getpropq.lock);
-exit:
-	return rc;
-}
-
-int msm_cvp_comm_release_output_buffers(struct msm_cvp_inst *inst,
-	bool force_release)
-{
-	struct msm_smem *handle;
-	struct internal_buf *buf, *dummy;
-	struct cvp_buffer_addr_info buffer_info;
-	int rc = 0;
-	struct msm_cvp_core *core;
-	struct hfi_device *hdev;
-
-	if (!inst) {
-		dprintk(CVP_ERR,
-				"Invalid instance pointer = %pK\n", inst);
-		return -EINVAL;
-	}
-	mutex_lock(&inst->outputbufs.lock);
-	if (list_empty(&inst->outputbufs.list)) {
-		dprintk(CVP_DBG, "%s - No OUTPUT buffers allocated\n",
-			__func__);
-		mutex_unlock(&inst->outputbufs.lock);
-		return 0;
-	}
-	mutex_unlock(&inst->outputbufs.lock);
-
-	core = inst->core;
-	if (!core) {
-		dprintk(CVP_ERR,
-				"Invalid core pointer = %pK\n", core);
-		return -EINVAL;
-	}
-	hdev = core->device;
-	if (!hdev) {
-		dprintk(CVP_ERR, "Invalid device pointer = %pK\n", hdev);
-		return -EINVAL;
-	}
-	mutex_lock(&inst->outputbufs.lock);
-	list_for_each_entry_safe(buf, dummy, &inst->outputbufs.list, list) {
-		handle = &buf->smem;
-
-		if ((buf->buffer_ownership == FIRMWARE) && !force_release) {
-			dprintk(CVP_INFO, "DPB is with f/w. Can't free it\n");
-			/*
-			 * mark this buffer to avoid sending it to video h/w
-			 * again, this buffer belongs to old resolution and
-			 * it will be removed when video h/w returns it.
-			 */
-			buf->mark_remove = true;
-			continue;
-		}
-
-		buffer_info.buffer_size = handle->size;
-		buffer_info.buffer_type = buf->buffer_type;
-		buffer_info.num_buffers = 1;
-		buffer_info.align_device_addr = handle->device_addr;
-		if (inst->buffer_mode_set[CAPTURE_PORT] ==
-				HAL_BUFFER_MODE_STATIC) {
-			buffer_info.response_required = false;
-			rc = call_hfi_op(hdev, session_release_buffers,
-				(void *)inst->session, &buffer_info);
-			if (rc) {
-				dprintk(CVP_WARN,
-					"Rel output buf fail:%x, %d\n",
-					buffer_info.align_device_addr,
-					buffer_info.buffer_size);
-			}
-		}
-
-		list_del(&buf->list);
-		msm_cvp_comm_smem_free(inst, &buf->smem);
-		kfree(buf);
-	}
-
-	mutex_unlock(&inst->outputbufs.lock);
-	return rc;
-}
-
-static enum hal_buffer scratch_buf_sufficient(struct msm_cvp_inst *inst,
-				enum hal_buffer buffer_type)
-{
-	struct hal_buffer_requirements *bufreq = NULL;
-	struct internal_buf *buf;
-	int count = 0;
-
-	if (!inst) {
-		dprintk(CVP_ERR, "%s - invalid param\n", __func__);
-		goto not_sufficient;
-	}
-
-	bufreq = get_cvp_buff_req_buffer(inst, buffer_type);
-	if (!bufreq)
-		goto not_sufficient;
-
-	/* Check if current scratch buffers are sufficient */
-	mutex_lock(&inst->scratchbufs.lock);
-
-	list_for_each_entry(buf, &inst->scratchbufs.list, list) {
-		if (buf->buffer_type == buffer_type &&
-			buf->smem.size >= bufreq->buffer_size)
-			count++;
-	}
-	mutex_unlock(&inst->scratchbufs.lock);
-
-	if (count != bufreq->buffer_count_actual)
-		goto not_sufficient;
-
-	dprintk(CVP_DBG,
-		"Existing scratch buffer is sufficient for buffer type %#x\n",
-		buffer_type);
-
-	return buffer_type;
-
-not_sufficient:
-	return HAL_BUFFER_NONE;
-}
-
-int msm_cvp_comm_release_scratch_buffers(struct msm_cvp_inst *inst,
-					bool check_for_reuse)
-{
-	struct msm_smem *handle;
-	struct internal_buf *buf, *dummy;
-	struct cvp_buffer_addr_info buffer_info;
-	int rc = 0;
-	struct msm_cvp_core *core;
-	struct hfi_device *hdev;
-	enum hal_buffer sufficiency = HAL_BUFFER_NONE;
-
-	if (!inst) {
-		dprintk(CVP_ERR,
-				"Invalid instance pointer = %pK\n", inst);
-		return -EINVAL;
-	}
-	core = inst->core;
-	if (!core) {
-		dprintk(CVP_ERR,
-				"Invalid core pointer = %pK\n", core);
-		return -EINVAL;
-	}
-	hdev = core->device;
-	if (!hdev) {
-		dprintk(CVP_ERR, "Invalid device pointer = %pK\n", hdev);
-		return -EINVAL;
-	}
-
-	if (check_for_reuse) {
-		sufficiency |= scratch_buf_sufficient(inst,
-					HAL_BUFFER_INTERNAL_SCRATCH);
-
-		sufficiency |= scratch_buf_sufficient(inst,
-					HAL_BUFFER_INTERNAL_SCRATCH_1);
-
-		sufficiency |= scratch_buf_sufficient(inst,
-					HAL_BUFFER_INTERNAL_SCRATCH_2);
-	}
-
-	mutex_lock(&inst->scratchbufs.lock);
-	list_for_each_entry_safe(buf, dummy, &inst->scratchbufs.list, list) {
-		handle = &buf->smem;
-		buffer_info.buffer_size = handle->size;
-		buffer_info.buffer_type = buf->buffer_type;
-		buffer_info.num_buffers = 1;
-		buffer_info.align_device_addr = handle->device_addr;
-		buffer_info.response_required = true;
-		rc = call_hfi_op(hdev, session_release_buffers,
-				(void *)inst->session, &buffer_info);
-		if (!rc) {
-			mutex_unlock(&inst->scratchbufs.lock);
-			rc = wait_for_sess_signal_receipt(inst,
-				HAL_SESSION_RELEASE_BUFFER_DONE);
-			if (rc)
-				dprintk(CVP_WARN,
-					"%s: wait for signal failed, rc %d\n",
-					__func__, rc);
-			mutex_lock(&inst->scratchbufs.lock);
-		} else {
-			dprintk(CVP_WARN,
-				"Rel scrtch buf fail:%x, %d\n",
-				buffer_info.align_device_addr,
-				buffer_info.buffer_size);
-		}
-
-		/*If scratch buffers can be reused, do not free the buffers*/
-		if (sufficiency & buf->buffer_type)
-			continue;
-
-		list_del(&buf->list);
-		msm_cvp_comm_smem_free(inst, handle);
-		kfree(buf);
-	}
-
-	mutex_unlock(&inst->scratchbufs.lock);
-	return rc;
-}
-
-void msm_cvp_comm_release_eos_buffers(struct msm_cvp_inst *inst)
-{
-	struct eos_buf *buf, *next;
-
-	if (!inst) {
-		dprintk(CVP_ERR,
-			"Invalid instance pointer = %pK\n", inst);
-		return;
-	}
-
-	mutex_lock(&inst->eosbufs.lock);
-	list_for_each_entry_safe(buf, next, &inst->eosbufs.list, list) {
-		list_del(&buf->list);
-		msm_cvp_comm_smem_free(inst, &buf->smem);
-		kfree(buf);
-	}
-	INIT_LIST_HEAD(&inst->eosbufs.list);
-	mutex_unlock(&inst->eosbufs.lock);
-}
-
-
-int msm_cvp_comm_release_recon_buffers(struct msm_cvp_inst *inst)
-{
-	struct recon_buf *buf, *next;
-
-	if (!inst) {
-		dprintk(CVP_ERR,
-			"Invalid instance pointer = %pK\n", inst);
-		return -EINVAL;
-	}
-
-	mutex_lock(&inst->reconbufs.lock);
-	list_for_each_entry_safe(buf, next, &inst->reconbufs.list, list) {
-		list_del(&buf->list);
-		kfree(buf);
-	}
-	INIT_LIST_HEAD(&inst->reconbufs.list);
-	mutex_unlock(&inst->reconbufs.lock);
-
-	return 0;
-}
-
-int msm_cvp_comm_try_set_prop(struct msm_cvp_inst *inst,
-	enum hal_property ptype, void *pdata)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst) {
-		dprintk(CVP_ERR, "Invalid input: %pK\n", inst);
-		return -EINVAL;
-	}
-
-	if (!inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-
-	mutex_lock(&inst->sync_lock);
-	if (inst->state < MSM_CVP_OPEN_DONE || inst->state >= MSM_CVP_CLOSE) {
-		dprintk(CVP_ERR, "Not in proper state to set property\n");
-		rc = -EAGAIN;
-		goto exit;
-	}
-	rc = call_hfi_op(hdev, session_set_property, (void *)inst->session,
-			ptype, pdata);
-	if (rc)
-		dprintk(CVP_ERR, "Failed to set hal property for framesize\n");
-exit:
-	mutex_unlock(&inst->sync_lock);
-	return rc;
-}
-
 int msm_cvp_comm_set_buffer_count(struct msm_cvp_inst *inst,
 	int host_count, int act_count, enum hal_buffer type)
 {
@@ -3285,218 +1443,6 @@
 	return rc;
 }
 
-static void msm_comm_flush_in_invalid_state(struct msm_cvp_inst *inst)
-{
-	struct list_head *ptr, *next;
-	enum cvp_ports ports[] = {OUTPUT_PORT, CAPTURE_PORT};
-	int c = 0;
-
-	/* before flush ensure venus released all buffers */
-	msm_cvp_comm_try_state(inst, MSM_CVP_RELEASE_RESOURCES_DONE);
-
-	for (c = 0; c < ARRAY_SIZE(ports); ++c) {
-		enum cvp_ports port = ports[c];
-
-		mutex_lock(&inst->bufq[port].lock);
-		list_for_each_safe(ptr, next,
-				&inst->bufq[port].vb2_bufq.queued_list) {
-			struct vb2_buffer *vb = container_of(ptr,
-					struct vb2_buffer, queued_entry);
-			if (vb->state == VB2_BUF_STATE_ACTIVE) {
-				vb->planes[0].bytesused = 0;
-				print_cvp_vb2_buffer(CVP_ERR,
-					"flush in invalid", inst, vb);
-				vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
-			} else {
-				dprintk(CVP_WARN,
-					"%s VB is in state %d not in ACTIVE state\n"
-					, __func__, vb->state);
-			}
-		}
-		mutex_unlock(&inst->bufq[port].lock);
-	}
-	msm_cvp_queue_v4l2_event(inst, V4L2_EVENT_MSM_CVP_FLUSH_DONE);
-}
-
-int msm_cvp_comm_flush(struct msm_cvp_inst *inst, u32 flags)
-{
-	int i, rc =  0;
-	bool ip_flush = false;
-	bool op_flush = false;
-	struct msm_video_buffer *mbuf, *next;
-	struct msm_cvp_core *core;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR,
-				"Invalid params, inst %pK\n", inst);
-		return -EINVAL;
-	}
-	core = inst->core;
-	hdev = core->device;
-
-	ip_flush = flags & V4L2_CMD_FLUSH_OUTPUT;
-	op_flush = flags & V4L2_CMD_FLUSH_CAPTURE;
-
-	if (ip_flush && !op_flush) {
-		dprintk(CVP_WARN,
-			"Input only flush not supported, making it flush all\n");
-		op_flush = true;
-		return 0;
-	}
-
-	msm_cvp_clock_data_reset(inst);
-
-	if (inst->state == MSM_CVP_CORE_INVALID) {
-		dprintk(CVP_ERR,
-				"Core %pK and inst %pK are in bad state\n",
-					core, inst);
-		msm_comm_flush_in_invalid_state(inst);
-		return 0;
-	}
-
-	mutex_lock(&inst->flush_lock);
-	/* enable in flush */
-	inst->in_flush = true;
-
-	mutex_lock(&inst->registeredbufs.lock);
-	list_for_each_entry_safe(mbuf, next, &inst->registeredbufs.list, list) {
-		/* don't flush input buffers if input flush is not requested */
-		if (!ip_flush && mbuf->vvb.vb2_buf.type ==
-				V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-			continue;
-
-		/* flush only deferred or rbr pending buffers */
-		if (!(mbuf->flags & MSM_CVP_FLAG_DEFERRED ||
-			mbuf->flags & MSM_CVP_FLAG_RBR_PENDING))
-			continue;
-
-		/*
-		 * flush buffers which are queued by client already,
-		 * the refcount will be two or more for those buffers.
-		 */
-		if (!(mbuf->smem[0].refcount >= 2))
-			continue;
-
-		print_video_buffer(CVP_DBG, "flush buf", inst, mbuf);
-		msm_cvp_comm_flush_video_buffer(inst, mbuf);
-
-		for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
-			if (msm_cvp_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
-				print_video_buffer(CVP_ERR,
-					"dqbuf: unmap failed.", inst, mbuf);
-			if (msm_cvp_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
-				print_video_buffer(CVP_ERR,
-					"dqbuf: unmap failed..", inst, mbuf);
-		}
-		if (!mbuf->smem[0].refcount) {
-			list_del(&mbuf->list);
-			kref_cvp_put_mbuf(mbuf);
-		} else {
-			/* buffer is no more a deferred buffer */
-			mbuf->flags &= ~MSM_CVP_FLAG_DEFERRED;
-		}
-	}
-	mutex_unlock(&inst->registeredbufs.lock);
-
-	hdev = inst->core->device;
-	if (ip_flush) {
-		dprintk(CVP_DBG, "Send flush on all ports to firmware\n");
-		rc = call_hfi_op(hdev, session_flush, inst->session,
-			HAL_FLUSH_ALL);
-	} else {
-		dprintk(CVP_DBG, "Send flush on output port to firmware\n");
-		rc = call_hfi_op(hdev, session_flush, inst->session,
-			HAL_FLUSH_OUTPUT);
-	}
-	mutex_unlock(&inst->flush_lock);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"Sending flush to firmware failed, flush out all buffers\n");
-		msm_comm_flush_in_invalid_state(inst);
-		/* disable in_flush */
-		inst->in_flush = false;
-	}
-
-	return rc;
-}
-
-enum hal_extradata_id msm_cvp_comm_get_hal_extradata_index(
-	enum v4l2_mpeg_cvp_extradata index)
-{
-	int ret = 0;
-
-	switch (index) {
-	case V4L2_MPEG_CVP_EXTRADATA_NONE:
-		ret = HAL_EXTRADATA_NONE;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_INTERLACE_VIDEO:
-		ret = HAL_EXTRADATA_INTERLACE_VIDEO;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_TIMESTAMP:
-		ret = HAL_EXTRADATA_TIMESTAMP;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_S3D_FRAME_PACKING:
-		ret = HAL_EXTRADATA_S3D_FRAME_PACKING;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_FRAME_RATE:
-		ret = HAL_EXTRADATA_FRAME_RATE;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_PANSCAN_WINDOW:
-		ret = HAL_EXTRADATA_PANSCAN_WINDOW;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_RECOVERY_POINT_SEI:
-		ret = HAL_EXTRADATA_RECOVERY_POINT_SEI;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_NUM_CONCEALED_MB:
-		ret = HAL_EXTRADATA_NUM_CONCEALED_MB;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_ASPECT_RATIO:
-		ret = HAL_EXTRADATA_ASPECT_RATIO;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_MPEG2_SEQDISP:
-		ret = HAL_EXTRADATA_MPEG2_SEQDISP;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_STREAM_USERDATA:
-		ret = HAL_EXTRADATA_STREAM_USERDATA;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_FRAME_QP:
-		ret = HAL_EXTRADATA_FRAME_QP;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_LTR:
-		ret = HAL_EXTRADATA_LTR_INFO;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_ROI_QP:
-		ret = HAL_EXTRADATA_ROI_QP;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_OUTPUT_CROP:
-		ret = HAL_EXTRADATA_OUTPUT_CROP;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_DISPLAY_COLOUR_SEI:
-		ret = HAL_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
-		ret = HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_VUI_DISPLAY:
-		ret = HAL_EXTRADATA_VUI_DISPLAY_INFO;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_VPX_COLORSPACE:
-		ret = HAL_EXTRADATA_VPX_COLORSPACE;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_UBWC_CR_STATS_INFO:
-		ret = HAL_EXTRADATA_UBWC_CR_STATS_INFO;
-		break;
-	case V4L2_MPEG_CVP_EXTRADATA_HDR10PLUS_METADATA:
-		ret = HAL_EXTRADATA_HDR10PLUS_METADATA;
-		break;
-	default:
-		dprintk(CVP_WARN, "Extradata not found: %d\n", index);
-		break;
-	}
-	return ret;
-};
-
 int msm_cvp_noc_error_info(struct msm_cvp_core *core)
 {
 	struct hfi_device *hdev;
@@ -3569,196 +1515,6 @@
 	mutex_unlock(&core->lock);
 }
 
-static int msm_cvp_load_supported(struct msm_cvp_inst *inst)
-{
-	int num_mbs_per_sec = 0, max_load_adj = 0;
-	enum load_calc_quirks quirks = LOAD_CALC_IGNORE_TURBO_LOAD |
-		LOAD_CALC_IGNORE_THUMBNAIL_LOAD |
-		LOAD_CALC_IGNORE_NON_REALTIME_LOAD;
-
-	if (inst->state == MSM_CVP_OPEN_DONE) {
-		max_load_adj = inst->core->resources.max_load;
-		num_mbs_per_sec = msm_cvp_comm_get_load(inst->core,
-					MSM_CVP_DECODER, quirks);
-		num_mbs_per_sec += msm_cvp_comm_get_load(inst->core,
-					MSM_CVP_ENCODER, quirks);
-		if (num_mbs_per_sec > max_load_adj) {
-			dprintk(CVP_ERR,
-				"H/W is overloaded. needed: %d max: %d\n",
-				num_mbs_per_sec,
-				max_load_adj);
-			msm_cvp_print_running_insts(inst->core);
-			return -EBUSY;
-		}
-	}
-	return 0;
-}
-
-int msm_cvp_check_scaling_supported(struct msm_cvp_inst *inst)
-{
-	u32 x_min, x_max, y_min, y_max;
-	u32 input_height, input_width, output_height, output_width;
-
-	if (inst->grid_enable > 0) {
-		dprintk(CVP_DBG, "Skip scaling check for HEIC\n");
-		return 0;
-	}
-
-	input_height = inst->prop.height[OUTPUT_PORT];
-	input_width = inst->prop.width[OUTPUT_PORT];
-	output_height = inst->prop.height[CAPTURE_PORT];
-	output_width = inst->prop.width[CAPTURE_PORT];
-
-	if (!input_height || !input_width || !output_height || !output_width) {
-		dprintk(CVP_ERR,
-			"Invalid : Input height = %d width = %d",
-			input_height, input_width);
-		dprintk(CVP_ERR,
-			" output height = %d width = %d\n",
-			output_height, output_width);
-		return -ENOTSUPP;
-	}
-
-	if (!inst->capability.scale_x.min ||
-		!inst->capability.scale_x.max ||
-		!inst->capability.scale_y.min ||
-		!inst->capability.scale_y.max) {
-
-		if (input_width * input_height !=
-			output_width * output_height) {
-			dprintk(CVP_ERR,
-				"%s: scaling is not supported (%dx%d != %dx%d)\n",
-				__func__, input_width, input_height,
-				output_width, output_height);
-			return -ENOTSUPP;
-		}
-
-		dprintk(CVP_DBG, "%s: supported WxH = %dx%d\n",
-			__func__, input_width, input_height);
-		return 0;
-	}
-
-	x_min = (1<<16)/inst->capability.scale_x.min;
-	y_min = (1<<16)/inst->capability.scale_y.min;
-	x_max = inst->capability.scale_x.max >> 16;
-	y_max = inst->capability.scale_y.max >> 16;
-
-	if (input_height > output_height) {
-		if (input_height > x_min * output_height) {
-			dprintk(CVP_ERR,
-				"Unsupported height min height %d vs %d\n",
-				input_height / x_min, output_height);
-			return -ENOTSUPP;
-		}
-	} else {
-		if (output_height > x_max * input_height) {
-			dprintk(CVP_ERR,
-				"Unsupported height max height %d vs %d\n",
-				x_max * input_height, output_height);
-			return -ENOTSUPP;
-		}
-	}
-	if (input_width > output_width) {
-		if (input_width > y_min * output_width) {
-			dprintk(CVP_ERR,
-				"Unsupported width min width %d vs %d\n",
-				input_width / y_min, output_width);
-			return -ENOTSUPP;
-		}
-	} else {
-		if (output_width > y_max * input_width) {
-			dprintk(CVP_ERR,
-				"Unsupported width max width %d vs %d\n",
-				y_max * input_width, output_width);
-			return -ENOTSUPP;
-		}
-	}
-	return 0;
-}
-
-int msm_cvp_check_session_supported(struct msm_cvp_inst *inst)
-{
-	struct msm_cvp_capability *capability;
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct msm_cvp_core *core;
-	u32 output_height, output_width, input_height, input_width;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_WARN, "%s: Invalid parameter\n", __func__);
-		return -EINVAL;
-	}
-	capability = &inst->capability;
-	hdev = inst->core->device;
-	core = inst->core;
-	rc = msm_cvp_load_supported(inst);
-	if (rc) {
-		dprintk(CVP_WARN,
-			"%s: Hardware is overloaded\n", __func__);
-		return rc;
-	}
-
-	if (!is_thermal_permissible(core)) {
-		dprintk(CVP_WARN,
-			"Thermal level critical, stop all active sessions!\n");
-		return -ENOTSUPP;
-	}
-
-	output_height = inst->prop.height[CAPTURE_PORT];
-	output_width = inst->prop.width[CAPTURE_PORT];
-	input_height = inst->prop.height[OUTPUT_PORT];
-	input_width = inst->prop.width[OUTPUT_PORT];
-
-	if (inst->session_type == MSM_CVP_ENCODER && (input_width % 2 != 0 ||
-			input_height % 2 != 0 || output_width % 2 != 0 ||
-			output_height % 2 != 0)) {
-		dprintk(CVP_ERR,
-			"Height and Width should be even numbers for NV12\n");
-		dprintk(CVP_ERR,
-			"Input WxH = (%u)x(%u), Output WxH = (%u)x(%u)\n",
-			input_width, input_height,
-			output_width, output_height);
-		rc = -ENOTSUPP;
-	}
-
-	output_height = ALIGN(inst->prop.height[CAPTURE_PORT], 16);
-	output_width = ALIGN(inst->prop.width[CAPTURE_PORT], 16);
-
-	if (!rc) {
-		if (output_width < capability->width.min ||
-			output_height < capability->height.min) {
-			dprintk(CVP_ERR,
-				"Unsupported WxH = (%u)x(%u), min supported is - (%u)x(%u)\n",
-				output_width,
-				output_height,
-				capability->width.min,
-				capability->height.min);
-			rc = -ENOTSUPP;
-		}
-		if (!rc && output_width > capability->width.max) {
-			dprintk(CVP_ERR,
-				"Unsupported width = %u supported max width = %u\n",
-				output_width,
-				capability->width.max);
-				rc = -ENOTSUPP;
-		}
-
-		if (!rc && output_height * output_width >
-			capability->width.max * capability->height.max) {
-			dprintk(CVP_ERR,
-			"Unsupported WxH = (%u)x(%u), max supported is - (%u)x(%u)\n",
-			output_width, output_height,
-			capability->width.max, capability->height.max);
-			rc = -ENOTSUPP;
-		}
-	}
-	if (rc) {
-		dprintk(CVP_ERR,
-			"%s: Resolution unsupported\n", __func__);
-	}
-	return rc;
-}
-
 void msm_cvp_comm_generate_session_error(struct msm_cvp_inst *inst)
 {
 	enum hal_command_response cmd = HAL_SESSION_ERROR;
@@ -3894,123 +1650,6 @@
 	mutex_unlock(&core->lock);
 }
 
-int msm_cvp_comm_set_color_format(struct msm_cvp_inst *inst,
-		enum hal_buffer buffer_type, int fourcc)
-{
-	struct hal_uncompressed_format_select hal_fmt = {0};
-	enum hal_uncompressed_format format = HAL_UNUSED_COLOR;
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s - invalid param\n", __func__);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-
-	format = msm_cvp_comm_get_hal_uncompressed(fourcc);
-	if (format == HAL_UNUSED_COLOR) {
-		dprintk(CVP_ERR, "Using unsupported colorformat %#x\n",
-				fourcc);
-		rc = -ENOTSUPP;
-		goto exit;
-	}
-
-	hal_fmt.buffer_type = buffer_type;
-	hal_fmt.format = format;
-
-	rc = call_hfi_op(hdev, session_set_property, inst->session,
-		HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT, &hal_fmt);
-	if (rc)
-		dprintk(CVP_ERR,
-			"Failed to set input color format\n");
-	else
-		dprintk(CVP_DBG, "Setting uncompressed colorformat to %#x\n",
-				format);
-
-exit:
-	return rc;
-}
-
-int msm_cvp_comm_s_parm(struct msm_cvp_inst *inst, struct v4l2_streamparm *a)
-{
-	u32 property_id = 0;
-	u64 us_per_frame = 0;
-	void *pdata;
-	int rc = 0, fps = 0;
-	struct hal_frame_rate frame_rate;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device || !a) {
-		dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-	property_id = HAL_CONFIG_FRAME_RATE;
-
-	if (a->parm.output.timeperframe.denominator) {
-		switch (a->type) {
-		case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-			us_per_frame = a->parm.output.timeperframe.numerator *
-				(u64)USEC_PER_SEC;
-			do_div(us_per_frame,
-				a->parm.output.timeperframe.denominator);
-			break;
-		default:
-			dprintk(CVP_ERR,
-					"Scale clocks : Unknown buffer type %d\n",
-					a->type);
-			break;
-		}
-	}
-
-	if (!us_per_frame) {
-		dprintk(CVP_ERR,
-				"Failed to scale clocks : time between frames is 0\n");
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	fps = us_per_frame > USEC_PER_SEC ?
-		0 : USEC_PER_SEC / (u32)us_per_frame;
-
-	if (fps % 15 == 14 || fps % 24 == 23)
-		fps = fps + 1;
-	else if ((fps > 1) && (fps % 24 == 1 || fps % 15 == 1))
-		fps = fps - 1;
-
-	if (fps < inst->capability.frame_rate.min ||
-			fps > inst->capability.frame_rate.max) {
-		dprintk(CVP_ERR,
-			"FPS is out of limits : fps = %d Min = %d, Max = %d\n",
-			fps, inst->capability.frame_rate.min,
-			inst->capability.frame_rate.max);
-		rc = -EINVAL;
-		goto exit;
-	}
-
-	dprintk(CVP_PROF, "reported fps changed for %pK: %d->%d\n",
-			inst, inst->prop.fps, fps);
-	inst->prop.fps = fps;
-	if (inst->session_type == MSM_CVP_ENCODER &&
-		get_cvp_hal_codec(inst->fmts[CAPTURE_PORT].fourcc) !=
-			HAL_VIDEO_CODEC_TME) {
-		frame_rate.frame_rate = inst->prop.fps * BIT(16);
-		frame_rate.buffer_type = HAL_BUFFER_OUTPUT;
-		pdata = &frame_rate;
-		rc = call_hfi_op(hdev, session_set_property,
-			inst->session, property_id, pdata);
-		if (rc)
-			dprintk(CVP_WARN,
-				"Failed to set frame rate %d\n", rc);
-	}
-exit:
-	return rc;
-}
-
 void msm_cvp_comm_print_inst_info(struct msm_cvp_inst *inst)
 {
 	struct msm_video_buffer *mbuf;
@@ -4029,15 +1668,6 @@
 	port = is_decode ? OUTPUT_PORT : CAPTURE_PORT;
 	is_secure = inst->flags & CVP_SECURE;
 	dprintk(CVP_ERR,
-			"%s session, %s, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n",
-			is_decode ? "Decode" : "Encode",
-			is_secure ? "Secure" : "Non-Secure",
-			inst->fmts[port].name,
-			inst->prop.height[port], inst->prop.width[port],
-			inst->prop.fps, inst->prop.bitrate,
-			!inst->bit_depth ? "8" : "10");
-
-	dprintk(CVP_ERR,
 			"---Buffer details for inst: %pK of type: %d---\n",
 			inst, inst->session_type);
 	mutex_lock(&inst->registeredbufs.lock);
@@ -4046,14 +1676,6 @@
 		print_video_buffer(CVP_ERR, "buf", inst, mbuf);
 	mutex_unlock(&inst->registeredbufs.lock);
 
-	mutex_lock(&inst->scratchbufs.lock);
-	dprintk(CVP_ERR, "scratch buffer list:\n");
-	list_for_each_entry(buf, &inst->scratchbufs.list, list)
-		dprintk(CVP_ERR, "type: %d addr: %x size: %u\n",
-				buf->buffer_type, buf->smem.device_addr,
-				buf->smem.size);
-	mutex_unlock(&inst->scratchbufs.lock);
-
 	mutex_lock(&inst->persistbufs.lock);
 	dprintk(CVP_ERR, "persist buffer list:\n");
 	list_for_each_entry(buf, &inst->persistbufs.list, list)
@@ -4061,74 +1683,6 @@
 				buf->buffer_type, buf->smem.device_addr,
 				buf->smem.size);
 	mutex_unlock(&inst->persistbufs.lock);
-
-	mutex_lock(&inst->outputbufs.lock);
-	dprintk(CVP_ERR, "dpb buffer list:\n");
-	list_for_each_entry(buf, &inst->outputbufs.list, list)
-		dprintk(CVP_ERR, "type: %d addr: %x size: %u\n",
-				buf->buffer_type, buf->smem.device_addr,
-				buf->smem.size);
-	mutex_unlock(&inst->outputbufs.lock);
-}
-
-int msm_cvp_comm_session_continue(void *instance)
-{
-	struct msm_cvp_inst *inst = instance;
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device)
-		return -EINVAL;
-	hdev = inst->core->device;
-	mutex_lock(&inst->lock);
-	if (inst->state >= MSM_CVP_RELEASE_RESOURCES_DONE ||
-			inst->state < MSM_CVP_START_DONE) {
-		dprintk(CVP_DBG,
-			"Inst %pK : Not in valid state to call %s\n",
-				inst, __func__);
-		goto sess_continue_fail;
-	}
-	dprintk(CVP_ERR,
-				"session_continue called in wrong state for decoder");
-
-sess_continue_fail:
-	mutex_unlock(&inst->lock);
-	return rc;
-}
-
-u32 cvp_get_frame_size_nv12(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12, width, height);
-}
-
-u32 cvp_get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
-}
-
-u32 cvp_get_frame_size_rgba(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_RGBA8888, width, height);
-}
-
-u32 cvp_get_frame_size_nv21(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV21, width, height);
-}
-
-u32 cvp_get_frame_size_tp10_ubwc(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
-}
-
-u32 cvp_get_frame_size_p010(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_P010, width, height);
-}
-
-u32 cvp_get_frame_size_nv12_512(int plane, u32 height, u32 width)
-{
-	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_512, width, height);
 }
 
 void print_video_buffer(u32 tag, const char *str, struct msm_cvp_inst *inst,
@@ -4166,552 +1720,6 @@
 			vb2->planes[1].bytesused, mbuf->smem[1].refcount);
 }
 
-void print_cvp_vb2_buffer(u32 tag, const char *str, struct msm_cvp_inst *inst,
-		struct vb2_buffer *vb2)
-{
-	if (!(tag & msm_cvp_debug) || !inst || !vb2)
-		return;
-
-	if (vb2->num_planes == 1)
-		dprintk(tag,
-			"%s: %s: %x : idx %2d fd %d off %d size %d filled %d\n",
-			str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
-			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
-			vb2->index, vb2->planes[0].m.fd,
-			vb2->planes[0].data_offset, vb2->planes[0].length,
-			vb2->planes[0].bytesused);
-	else
-		dprintk(tag,
-			"%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d filled %d\n",
-			str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
-			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
-			vb2->index, vb2->planes[0].m.fd,
-			vb2->planes[0].data_offset, vb2->planes[0].length,
-			vb2->planes[0].bytesused, vb2->planes[1].m.fd,
-			vb2->planes[1].data_offset, vb2->planes[1].length,
-			vb2->planes[1].bytesused);
-}
-
-void print_cvp_v4l2_buffer(u32 tag, const char *str, struct msm_cvp_inst *inst,
-		struct v4l2_buffer *v4l2)
-{
-	if (!(tag & msm_cvp_debug) || !inst || !v4l2)
-		return;
-
-	if (v4l2->length == 1)
-		dprintk(tag,
-			"%s: %s: %x : idx %2d fd %d off %d size %d filled %d\n",
-			str, v4l2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
-			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
-			v4l2->index, v4l2->m.planes[0].m.fd,
-			v4l2->m.planes[0].data_offset,
-			v4l2->m.planes[0].length,
-			v4l2->m.planes[0].bytesused);
-	else
-		dprintk(tag,
-			"%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d filled %d\n",
-			str, v4l2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
-			"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
-			v4l2->index, v4l2->m.planes[0].m.fd,
-			v4l2->m.planes[0].data_offset,
-			v4l2->m.planes[0].length,
-			v4l2->m.planes[0].bytesused,
-			v4l2->m.planes[1].m.fd,
-			v4l2->m.planes[1].data_offset,
-			v4l2->m.planes[1].length,
-			v4l2->m.planes[1].bytesused);
-}
-
-bool msm_cvp_comm_compare_vb2_plane(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, struct vb2_buffer *vb2, u32 i)
-{
-	struct vb2_buffer *vb;
-
-	if (!inst || !mbuf || !vb2) {
-		dprintk(CVP_ERR, "%s: invalid params, %pK %pK %pK\n",
-			__func__, inst, mbuf, vb2);
-		return false;
-	}
-
-	vb = &mbuf->vvb.vb2_buf;
-	if (vb->planes[i].m.fd == vb2->planes[i].m.fd &&
-		vb->planes[i].length == vb2->planes[i].length) {
-		return true;
-	}
-
-	return false;
-}
-
-bool msm_cvp_comm_compare_vb2_planes(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, struct vb2_buffer *vb2)
-{
-	int i = 0;
-	struct vb2_buffer *vb;
-
-	if (!inst || !mbuf || !vb2) {
-		dprintk(CVP_ERR, "%s: invalid params, %pK %pK %pK\n",
-			__func__, inst, mbuf, vb2);
-		return false;
-	}
-
-	vb = &mbuf->vvb.vb2_buf;
-
-	if (vb->num_planes != vb2->num_planes)
-		return false;
-
-	for (i = 0; i < vb->num_planes; i++) {
-		if (!msm_cvp_comm_compare_vb2_plane(inst, mbuf, vb2, i))
-			return false;
-	}
-
-	return true;
-}
-
-bool msm_cvp_comm_compare_dma_plane(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, unsigned long *dma_planes, u32 i)
-{
-	if (!inst || !mbuf || !dma_planes) {
-		dprintk(CVP_ERR, "%s: invalid params, %pK %pK %pK\n",
-			__func__, inst, mbuf, dma_planes);
-		return false;
-	}
-
-	if ((unsigned long)mbuf->smem[i].dma_buf == dma_planes[i])
-		return true;
-
-	return false;
-}
-
-bool msm_cvp_comm_compare_dma_planes(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, unsigned long *dma_planes)
-{
-	int i = 0;
-	struct vb2_buffer *vb;
-
-	if (!inst || !mbuf || !dma_planes) {
-		dprintk(CVP_ERR, "%s: invalid params, %pK %pK %pK\n",
-			__func__, inst, mbuf, dma_planes);
-		return false;
-	}
-
-	vb = &mbuf->vvb.vb2_buf;
-	for (i = 0; i < vb->num_planes; i++) {
-		if (!msm_cvp_comm_compare_dma_plane(inst, mbuf, dma_planes, i))
-			return false;
-	}
-
-	return true;
-}
-
-
-bool msm_cvp_comm_compare_device_plane(struct msm_video_buffer *mbuf,
-		u32 type, u32 *planes, u32 i)
-{
-	if (!mbuf || !planes) {
-		dprintk(CVP_ERR, "%s: invalid params, %pK %pK\n",
-			__func__, mbuf, planes);
-		return false;
-	}
-
-	if (mbuf->vvb.vb2_buf.type == type &&
-		mbuf->smem[i].device_addr == planes[i])
-		return true;
-
-	return false;
-}
-
-bool msm_cvp_comm_compare_device_planes(struct msm_video_buffer *mbuf,
-		u32 type, u32 *planes)
-{
-	int i = 0;
-
-	if (!mbuf || !planes)
-		return false;
-
-	for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
-		if (!msm_cvp_comm_compare_device_plane(mbuf, type, planes, i))
-			return false;
-	}
-
-	return true;
-}
-
-struct msm_video_buffer *msm_cvp_comm_get_buffer_using_device_planes(
-		struct msm_cvp_inst *inst, u32 type, u32 *planes)
-{
-	struct msm_video_buffer *mbuf;
-	bool found = false;
-
-	mutex_lock(&inst->registeredbufs.lock);
-	found = false;
-	list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
-		if (msm_cvp_comm_compare_device_planes(mbuf, type, planes)) {
-			found = true;
-			break;
-		}
-	}
-	mutex_unlock(&inst->registeredbufs.lock);
-	if (!found) {
-		dprintk(CVP_ERR,
-			"%s: data_addr %x, extradata_addr %x not found\n",
-			__func__, planes[0], planes[1]);
-		mbuf = NULL;
-	}
-
-	return mbuf;
-}
-
-int msm_cvp_comm_flush_video_buffer(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	struct vb2_buffer *vb;
-	u32 port;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK\n",
-			__func__, inst, mbuf);
-		return -EINVAL;
-	}
-
-	vb = msm_cvp_comm_get_vb_using_video_buffer(inst, mbuf);
-	if (!vb) {
-		print_video_buffer(CVP_ERR,
-			"vb not found for buf", inst, mbuf);
-		return -EINVAL;
-	}
-
-	if (mbuf->vvb.vb2_buf.type ==
-			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-		port = CAPTURE_PORT;
-	else if (mbuf->vvb.vb2_buf.type ==
-			V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-		port = OUTPUT_PORT;
-	else
-		return -EINVAL;
-
-	mutex_lock(&inst->bufq[port].lock);
-	if (inst->bufq[port].vb2_bufq.streaming) {
-		vb->planes[0].bytesused = 0;
-		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
-	} else {
-		dprintk(CVP_ERR, "%s: port %d is not streaming\n",
-			__func__, port);
-	}
-	mutex_unlock(&inst->bufq[port].lock);
-
-	return 0;
-}
-
-int msm_cvp_comm_qbuf_cache_operations(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	int rc = 0, i;
-	struct vb2_buffer *vb;
-	bool skip;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK\n",
-			__func__, inst, mbuf);
-		return -EINVAL;
-	}
-	vb = &mbuf->vvb.vb2_buf;
-
-	for (i = 0; i < vb->num_planes; i++) {
-		unsigned long offset, size;
-		enum smem_cache_ops cache_op;
-
-		skip = true;
-		if (inst->session_type == MSM_CVP_DECODER) {
-			if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-				if (!i) { /* bitstream */
-					skip = false;
-					offset = vb->planes[i].data_offset;
-					size = vb->planes[i].bytesused;
-					cache_op = SMEM_CACHE_CLEAN_INVALIDATE;
-				}
-			} else if (vb->type ==
-					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-				if (!i) { /* yuv */
-					skip = false;
-					offset = 0;
-					size = vb->planes[i].length;
-					cache_op = SMEM_CACHE_INVALIDATE;
-				}
-			}
-		} else if (inst->session_type == MSM_CVP_ENCODER) {
-			if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-				if (!i) { /* yuv */
-					skip = false;
-					offset = vb->planes[i].data_offset;
-					size = vb->planes[i].bytesused;
-					cache_op = SMEM_CACHE_CLEAN_INVALIDATE;
-				}
-			} else if (vb->type ==
-					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-				if (!i) { /* bitstream */
-					skip = false;
-					offset = 0;
-					size = vb->planes[i].length;
-					cache_op = SMEM_CACHE_INVALIDATE;
-				}
-			}
-		}
-
-		if (!skip) {
-			rc = msm_cvp_smem_cache_operations(
-					mbuf->smem[i].dma_buf,
-					cache_op, offset, size);
-			if (rc)
-				print_video_buffer(CVP_ERR,
-					"qbuf cache ops failed", inst, mbuf);
-		}
-	}
-
-	return rc;
-}
-
-int msm_cvp_comm_dqbuf_cache_operations(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	int rc = 0, i;
-	struct vb2_buffer *vb;
-	bool skip;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK\n",
-			__func__, inst, mbuf);
-		return -EINVAL;
-	}
-	vb = &mbuf->vvb.vb2_buf;
-
-	for (i = 0; i < vb->num_planes; i++) {
-		unsigned long offset, size;
-		enum smem_cache_ops cache_op;
-
-		skip = true;
-		if (inst->session_type == MSM_CVP_DECODER) {
-			if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-				/* bitstream and extradata */
-				/* we do not need cache operations */
-			} else if (vb->type ==
-					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-				if (!i) { /* yuv */
-					skip = false;
-					offset = vb->planes[i].data_offset;
-					size = vb->planes[i].bytesused;
-					cache_op = SMEM_CACHE_INVALIDATE;
-				}
-			}
-		} else if (inst->session_type == MSM_CVP_ENCODER) {
-			if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-				/* yuv and extradata */
-				/* we do not need cache operations */
-			} else if (vb->type ==
-					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-				if (!i) { /* bitstream */
-					skip = false;
-					/*
-					 * Include vp8e header bytes as well
-					 * by making offset equal to zero
-					 */
-					offset = 0;
-					size = vb->planes[i].bytesused +
-						vb->planes[i].data_offset;
-					cache_op = SMEM_CACHE_INVALIDATE;
-				}
-			}
-		}
-
-		if (!skip) {
-			rc = msm_cvp_smem_cache_operations(
-					mbuf->smem[i].dma_buf,
-					cache_op, offset, size);
-			if (rc)
-				print_video_buffer(CVP_ERR,
-					"dqbuf cache ops failed", inst, mbuf);
-		}
-	}
-
-	return rc;
-}
-
-struct msm_video_buffer *msm_cvp_comm_get_video_buffer(
-		struct msm_cvp_inst *inst,
-		struct vb2_buffer *vb2)
-{
-	int rc = 0;
-	struct vb2_v4l2_buffer *vbuf;
-	struct vb2_buffer *vb;
-	unsigned long dma_planes[VB2_MAX_PLANES] = {0};
-	struct msm_video_buffer *mbuf;
-	bool found = false;
-	int i;
-
-	if (!inst || !vb2) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return NULL;
-	}
-
-	for (i = 0; i < vb2->num_planes; i++) {
-		/*
-		 * always compare dma_buf addresses which is guaranteed
-		 * to be same across the processes (duplicate fds).
-		 */
-		dma_planes[i] = (unsigned long)msm_cvp_smem_get_dma_buf(
-				vb2->planes[i].m.fd);
-		if (!dma_planes[i])
-			return NULL;
-		msm_cvp_smem_put_dma_buf((struct dma_buf *)dma_planes[i]);
-	}
-
-	mutex_lock(&inst->registeredbufs.lock);
-	/*
-	 * for encoder input, client may queue the same buffer with different
-	 * fd before driver returned old buffer to the client. This buffer
-	 * should be treated as new buffer Search the list with fd so that
-	 * it will be treated as new msm_video_buffer.
-	 */
-	if (is_encode_session(inst) && vb2->type ==
-			V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
-			if (msm_cvp_comm_compare_vb2_planes(inst, mbuf, vb2)) {
-				found = true;
-				break;
-			}
-		}
-	} else {
-		list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
-			if (msm_cvp_comm_compare_dma_planes(inst, mbuf,
-					dma_planes)) {
-				found = true;
-				break;
-			}
-		}
-	}
-
-	if (!found) {
-		/* this is new vb2_buffer */
-		mbuf = kzalloc(sizeof(struct msm_video_buffer), GFP_KERNEL);
-		if (!mbuf) {
-			dprintk(CVP_ERR, "%s: alloc msm_video_buffer failed\n",
-				__func__);
-			rc = -ENOMEM;
-			goto exit;
-		}
-		kref_init(&mbuf->kref);
-	}
-
-	/* Initially assume all the buffer are going to be deferred */
-	mbuf->flags |= MSM_CVP_FLAG_DEFERRED;
-
-	vbuf = to_vb2_v4l2_buffer(vb2);
-	memcpy(&mbuf->vvb, vbuf, sizeof(struct vb2_v4l2_buffer));
-	vb = &mbuf->vvb.vb2_buf;
-
-	for (i = 0; i < vb->num_planes; i++) {
-		mbuf->smem[i].buffer_type =
-			cvp_get_hal_buffer_type(vb->type, i);
-		mbuf->smem[i].fd = vb->planes[i].m.fd;
-		mbuf->smem[i].offset = vb->planes[i].data_offset;
-		mbuf->smem[i].size = vb->planes[i].length;
-		rc = msm_cvp_smem_map_dma_buf(inst, &mbuf->smem[i]);
-		if (rc) {
-			dprintk(CVP_ERR, "%s: map failed.\n", __func__);
-			goto exit;
-		}
-		/* increase refcount as we get both fbd and rbr */
-		rc = msm_cvp_smem_map_dma_buf(inst, &mbuf->smem[i]);
-		if (rc) {
-			dprintk(CVP_ERR, "%s: map failed..\n", __func__);
-			goto exit;
-		}
-	}
-	/* dma cache operations need to be performed after dma_map */
-	msm_cvp_comm_qbuf_cache_operations(inst, mbuf);
-
-	/* add the new buffer to list */
-	if (!found)
-		list_add_tail(&mbuf->list, &inst->registeredbufs.list);
-
-	mutex_unlock(&inst->registeredbufs.lock);
-
-	/*
-	 * Return mbuf if decode batching is enabled as this buffer
-	 * may trigger queuing full batch to firmware, also this buffer
-	 * will not be queued to firmware while full batch queuing,
-	 * it will be queued when rbr event arrived from firmware.
-	 */
-	if (rc == -EEXIST && !inst->batch.enable)
-		return ERR_PTR(rc);
-
-	return mbuf;
-
-exit:
-	dprintk(CVP_ERR, "%s: rc %d\n", __func__, rc);
-	msm_cvp_comm_unmap_video_buffer(inst, mbuf);
-	if (!found)
-		kref_cvp_put_mbuf(mbuf);
-	mutex_unlock(&inst->registeredbufs.lock);
-
-	return ERR_PTR(rc);
-}
-
-void msm_cvp_comm_put_video_buffer(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf)
-{
-	struct msm_video_buffer *temp;
-	bool found = false;
-	int i = 0;
-
-	if (!inst || !mbuf) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK\n",
-			__func__, inst, mbuf);
-		return;
-	}
-
-	mutex_lock(&inst->registeredbufs.lock);
-	/* check if mbuf was not removed by any chance */
-	list_for_each_entry(temp, &inst->registeredbufs.list, list) {
-		if (msm_cvp_comm_compare_vb2_planes(inst, mbuf,
-				&temp->vvb.vb2_buf)) {
-			found = true;
-			break;
-		}
-	}
-	if (!found) {
-		print_video_buffer(CVP_ERR, "buf was removed", inst, mbuf);
-		goto unlock;
-	}
-
-	print_video_buffer(CVP_DBG, "dqbuf", inst, mbuf);
-	for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
-		if (msm_cvp_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
-			print_video_buffer(CVP_ERR,
-				"dqbuf: unmap failed.", inst, mbuf);
-
-		if (!(mbuf->vvb.flags & V4L2_BUF_FLAG_READONLY)) {
-			/* rbr won't come for this buffer */
-			if (msm_cvp_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
-				print_video_buffer(CVP_ERR,
-					"dqbuf: unmap failed..", inst, mbuf);
-		} else {
-			/* RBR event expected */
-			mbuf->flags |= MSM_CVP_FLAG_RBR_PENDING;
-		}
-	}
-	/*
-	 * remove the entry if plane[0].refcount is zero else
-	 * don't remove as client queued same buffer that's why
-	 * plane[0].refcount is not zero
-	 */
-	if (!mbuf->smem[0].refcount) {
-		list_del(&mbuf->list);
-		kref_cvp_put_mbuf(mbuf);
-	}
-unlock:
-	mutex_unlock(&inst->registeredbufs.lock);
-}
-
 int msm_cvp_comm_unmap_video_buffer(struct msm_cvp_inst *inst,
 		struct msm_video_buffer *mbuf)
 {
@@ -4780,167 +1788,6 @@
 	return ret;
 }
 
-void msm_cvp_comm_store_mark_data(struct msm_cvp_list *data_list,
-		u32 index, u32 mark_data, u32 mark_target)
-{
-	struct msm_cvp_buf_data *pdata = NULL;
-	bool found = false;
-
-	if (!data_list) {
-		dprintk(CVP_ERR, "%s: invalid params %pK\n",
-			__func__, data_list);
-		return;
-	}
-
-	mutex_lock(&data_list->lock);
-	list_for_each_entry(pdata, &data_list->list, list) {
-		if (pdata->index == index) {
-			pdata->mark_data = mark_data;
-			pdata->mark_target = mark_target;
-			found = true;
-			break;
-		}
-	}
-
-	if (!found) {
-		pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
-		if (!pdata)  {
-			dprintk(CVP_WARN, "%s: malloc failure.\n", __func__);
-			goto exit;
-		}
-		pdata->index = index;
-		pdata->mark_data = mark_data;
-		pdata->mark_target = mark_target;
-		list_add_tail(&pdata->list, &data_list->list);
-	}
-
-exit:
-	mutex_unlock(&data_list->lock);
-}
-
-void msm_cvp_comm_fetch_mark_data(struct msm_cvp_list *data_list,
-		u32 index, u32 *mark_data, u32 *mark_target)
-{
-	struct msm_cvp_buf_data *pdata = NULL;
-
-	if (!data_list || !mark_data || !mark_target) {
-		dprintk(CVP_ERR, "%s: invalid params %pK %pK %pK\n",
-			__func__, data_list, mark_data, mark_target);
-		return;
-	}
-
-	*mark_data = *mark_target = 0;
-	mutex_lock(&data_list->lock);
-	list_for_each_entry(pdata, &data_list->list, list) {
-		if (pdata->index == index) {
-			*mark_data = pdata->mark_data;
-			*mark_target = pdata->mark_target;
-			/* clear after fetch */
-			pdata->mark_data = pdata->mark_target = 0;
-			break;
-		}
-	}
-	mutex_unlock(&data_list->lock);
-}
-
-int msm_cvp_comm_release_mark_data(struct msm_cvp_inst *inst)
-{
-	struct msm_cvp_buf_data *pdata, *next;
-
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid params %pK\n",
-			__func__, inst);
-		return -EINVAL;
-	}
-
-	mutex_lock(&inst->etb_data.lock);
-	list_for_each_entry_safe(pdata, next, &inst->etb_data.list, list) {
-		list_del(&pdata->list);
-		kfree(pdata);
-	}
-	mutex_unlock(&inst->etb_data.lock);
-
-	mutex_lock(&inst->fbd_data.lock);
-	list_for_each_entry_safe(pdata, next, &inst->fbd_data.list, list) {
-		list_del(&pdata->list);
-		kfree(pdata);
-	}
-	mutex_unlock(&inst->fbd_data.lock);
-
-	return 0;
-}
-
-int msm_cvp_comm_set_color_format_constraints(struct msm_cvp_inst *inst,
-		enum hal_buffer buffer_type,
-		struct msm_cvp_format_constraint *pix_constraint)
-{
-	struct hal_uncompressed_plane_actual_constraints_info
-		*pconstraint = NULL;
-	u32 num_planes = 2;
-	u32 size = 0;
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(CVP_ERR, "%s - invalid param\n", __func__);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-
-	size = sizeof(buffer_type)
-			+ sizeof(u32)
-			+ num_planes
-			* sizeof(struct hal_uncompressed_plane_constraints);
-
-	pconstraint = kzalloc(size, GFP_KERNEL);
-	if (!pconstraint) {
-		dprintk(CVP_ERR, "No memory cannot alloc constrain\n");
-		rc = -ENOMEM;
-		goto exit;
-	}
-
-	pconstraint->buffer_type = buffer_type;
-	pconstraint->num_planes = pix_constraint->num_planes;
-	//set Y plan constraints
-	dprintk(CVP_INFO, "Set Y plan constraints.\n");
-	pconstraint->rg_plane_format[0].stride_multiples =
-			pix_constraint->y_stride_multiples;
-	pconstraint->rg_plane_format[0].max_stride =
-			pix_constraint->y_max_stride;
-	pconstraint->rg_plane_format[0].min_plane_buffer_height_multiple =
-			pix_constraint->y_min_plane_buffer_height_multiple;
-	pconstraint->rg_plane_format[0].buffer_alignment =
-			pix_constraint->y_buffer_alignment;
-
-	//set UV plan constraints
-	dprintk(CVP_INFO, "Set UV plan constraints.\n");
-	pconstraint->rg_plane_format[1].stride_multiples =
-			pix_constraint->uv_stride_multiples;
-	pconstraint->rg_plane_format[1].max_stride =
-			pix_constraint->uv_max_stride;
-	pconstraint->rg_plane_format[1].min_plane_buffer_height_multiple =
-			pix_constraint->uv_min_plane_buffer_height_multiple;
-	pconstraint->rg_plane_format[1].buffer_alignment =
-			pix_constraint->uv_buffer_alignment;
-
-	rc = call_hfi_op(hdev,
-			session_set_property,
-			inst->session,
-			HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
-			pconstraint);
-	if (rc)
-		dprintk(CVP_ERR,
-			"Failed to set input color format constraint\n");
-	else
-		dprintk(CVP_DBG, "Set color format constraint success\n");
-
-exit:
-	if (!pconstraint)
-		kfree(pconstraint);
-	return rc;
-}
-
 static int set_internal_buf_on_fw(struct msm_cvp_inst *inst,
 				enum hal_buffer buffer_type,
 				struct msm_smem *handle, bool reuse)
@@ -5031,7 +1878,7 @@
 
 
 /* Set ARP buffer for CVP firmware to handle concurrency */
-int cvp_comm_set_persist_buffers(struct msm_cvp_inst *inst)
+int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst)
 {
 	int rc = 0, idx = 0;
 	struct hal_buffer_requirements *internal_buf = NULL;
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.h b/drivers/media/platform/msm/cvp/msm_cvp_common.h
index f1cc762..240e43f 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.h
@@ -8,30 +8,6 @@
 #define _MSM_CVP_COMMON_H_
 #include "msm_cvp_internal.h"
 
-#define MAX_DEC_BATCH_SIZE                     6
-#define MAX_DEC_BATCH_WIDTH                    1920
-#define MAX_DEC_BATCH_HEIGHT                   1088
-#define SKIP_BATCH_WINDOW                      100
-#define MIN_FRAME_QUALITY 0
-#define MAX_FRAME_QUALITY 100
-#define DEFAULT_FRAME_QUALITY 80
-#define FRAME_QUALITY_STEP 1
-#define HEIC_GRID_DIMENSION 512
-#define CBR_MB_LIMIT                           (((1280+15)/16)*((720+15)/16)*30)
-#define CBR_VFR_MB_LIMIT                       (((640+15)/16)*((480+15)/16)*30)
-
-struct vb2_buf_entry {
-	struct list_head list;
-	struct vb2_buffer *vb;
-};
-
-struct getprop_buf {
-	struct list_head list;
-	void *data;
-};
-
-extern const char *const mpeg_video_cvp_extradata[];
-
 enum load_calc_quirks {
 	LOAD_CALC_NO_QUIRKS = 0,
 	LOAD_CALC_IGNORE_TURBO_LOAD = 1 << 0,
@@ -39,26 +15,11 @@
 	LOAD_CALC_IGNORE_NON_REALTIME_LOAD = 1 << 2,
 };
 
-static inline bool is_turbo_session(struct msm_cvp_inst *inst)
-{
-	return !!(inst->flags & CVP_TURBO);
-}
-
 static inline bool is_thumbnail_session(struct msm_cvp_inst *inst)
 {
 	return !!(inst->flags & CVP_THUMBNAIL);
 }
 
-static inline bool is_low_power_session(struct msm_cvp_inst *inst)
-{
-	return !!(inst->flags & CVP_LOW_POWER);
-}
-
-static inline bool is_realtime_session(struct msm_cvp_inst *inst)
-{
-	return !!(inst->flags & CVP_REALTIME);
-}
-
 static inline bool is_decode_session(struct msm_cvp_inst *inst)
 {
 	return inst->session_type == MSM_CVP_DECODER;
@@ -69,28 +30,6 @@
 	return inst->session_type == MSM_CVP_ENCODER;
 }
 
-static inline bool is_primary_output_mode(struct msm_cvp_inst *inst)
-{
-	return inst->stream_output_mode == HAL_VIDEO_DECODER_PRIMARY;
-}
-
-static inline bool is_secondary_output_mode(struct msm_cvp_inst *inst)
-{
-	return inst->stream_output_mode == HAL_VIDEO_DECODER_SECONDARY;
-}
-
-static inline int msm_comm_g_ctrl(struct msm_cvp_inst *inst,
-		struct v4l2_control *ctrl)
-{
-	return v4l2_g_ctrl(&inst->ctrl_handler, ctrl);
-}
-
-static inline int msm_comm_s_ctrl(struct msm_cvp_inst *inst,
-		struct v4l2_control *ctrl)
-{
-	return v4l2_s_ctrl(NULL, &inst->ctrl_handler, ctrl);
-}
-bool cvp_is_batching_allowed(struct msm_cvp_inst *inst);
 enum hal_buffer cvp_get_hal_buffer_type(unsigned int type,
 		unsigned int plane_num);
 void cvp_put_inst(struct msm_cvp_inst *inst);
@@ -99,52 +38,21 @@
 void cvp_change_inst_state(struct msm_cvp_inst *inst,
 		enum instance_state state);
 struct msm_cvp_core *get_cvp_core(int core_id);
-struct msm_cvp_format_constraint *msm_cvp_comm_get_pixel_fmt_constraints(
-	struct msm_cvp_format_constraint fmt[], int size, int fourcc);
-int msm_cvp_comm_set_color_format_constraints(struct msm_cvp_inst *inst,
-		enum hal_buffer buffer_type,
-		struct msm_cvp_format_constraint *pix_constraint);
 struct buf_queue *msm_cvp_comm_get_vb2q(
 		struct msm_cvp_inst *inst, enum v4l2_buf_type type);
 int msm_cvp_comm_try_state(struct msm_cvp_inst *inst, int state);
-int msm_cvp_comm_try_set_prop(struct msm_cvp_inst *inst,
-	enum hal_property ptype, void *pdata);
-int msm_cvp_comm_try_get_prop(struct msm_cvp_inst *inst,
-	enum hal_property ptype, union hal_get_property *hprop);
 int msm_cvp_comm_set_buffer_count(struct msm_cvp_inst *inst,
 	int host_count, int act_count, enum hal_buffer type);
-int msm_cvp_comm_queue_output_buffers(struct msm_cvp_inst *inst);
-int msm_cvp_comm_qbuf(struct msm_cvp_inst *inst, struct msm_video_buffer *mbuf);
-void msm_cvp_comm_flush_dynamic_buffers(struct msm_cvp_inst *inst);
-int msm_cvp_comm_flush(struct msm_cvp_inst *inst, u32 flags);
-int msm_cvp_comm_release_scratch_buffers(struct msm_cvp_inst *inst,
-					bool check_for_reuse);
-int msm_cvp_comm_release_recon_buffers(struct msm_cvp_inst *inst);
-void msm_cvp_comm_release_eos_buffers(struct msm_cvp_inst *inst);
-int msm_cvp_comm_release_output_buffers(struct msm_cvp_inst *inst,
-	bool force_release);
-void msm_cvp_comm_validate_output_buffers(struct msm_cvp_inst *inst);
 int msm_cvp_comm_force_cleanup(struct msm_cvp_inst *inst);
 int msm_cvp_comm_suspend(int core_id);
-enum hal_extradata_id msm_cvp_comm_get_hal_extradata_index(
-	enum v4l2_mpeg_cvp_extradata index);
-int msm_cvp_comm_reset_bufreqs(struct msm_cvp_inst *inst,
-	enum hal_buffer buf_type);
-int msm_cvp_comm_copy_bufreqs(struct msm_cvp_inst *inst,
-	enum hal_buffer src_type, enum hal_buffer dst_type);
 struct hal_buffer_requirements *get_cvp_buff_req_buffer(
 			struct msm_cvp_inst *inst, u32 buffer_type);
-#define IS_PRIV_CTRL(idx) (\
-		(V4L2_CTRL_ID2WHICH(idx) == V4L2_CTRL_CLASS_MPEG) && \
-		V4L2_CTRL_DRIVER_PRIV(idx))
 void msm_cvp_comm_session_clean(struct msm_cvp_inst *inst);
 int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst);
 void msm_cvp_comm_generate_session_error(struct msm_cvp_inst *inst);
 void msm_cvp_comm_generate_sys_error(struct msm_cvp_inst *inst);
 enum multi_stream msm_cvp_comm_get_stream_output_mode(
 		struct msm_cvp_inst *inst);
-int msm_cvp_comm_set_stream_output_mode(struct msm_cvp_inst *inst,
-		enum multi_stream mode);
 enum hal_buffer msm_cvp_comm_get_hal_output_buffer(struct msm_cvp_inst *inst);
 int msm_cvp_comm_smem_alloc(struct msm_cvp_inst *inst, size_t size, u32 align,
 		u32 flags, enum hal_buffer buffer_type, int map_kernel,
@@ -159,83 +67,19 @@
 			enum load_calc_quirks quirks);
 int msm_cvp_comm_get_inst_load_per_core(struct msm_cvp_inst *inst,
 			enum load_calc_quirks quirks);
-int msm_cvp_comm_get_load(struct msm_cvp_core *core,
-			enum session_type type, enum load_calc_quirks quirks);
-int msm_cvp_comm_set_color_format(struct msm_cvp_inst *inst,
-		enum hal_buffer buffer_type, int fourcc);
-int msm_comm_g_ctrl(struct msm_cvp_inst *inst, struct v4l2_control *ctrl);
-int msm_comm_s_ctrl(struct msm_cvp_inst *inst, struct v4l2_control *ctrl);
-int msm_cvp_comm_g_ctrl_for_id(struct msm_cvp_inst *inst, int id);
-int msm_cvp_comm_ctrl_init(struct msm_cvp_inst *inst,
-		struct msm_cvp_ctrl *drv_ctrls, u32 num_ctrls,
-		const struct v4l2_ctrl_ops *ctrl_ops);
-int msm_cvp_comm_ctrl_deinit(struct msm_cvp_inst *inst);
-void msm_cvp_comm_cleanup_internal_buffers(struct msm_cvp_inst *inst);
-int msm_cvp_comm_s_parm(struct msm_cvp_inst *inst, struct v4l2_streamparm *a);
-bool msm_cvp_comm_turbo_session(struct msm_cvp_inst *inst);
 void msm_cvp_comm_print_inst_info(struct msm_cvp_inst *inst);
-int msm_cvp_comm_hal_to_v4l2(int id, int value);
-int msm_cvp_comm_get_v4l2_profile(int fourcc, int profile);
-int msm_cvp_comm_get_v4l2_level(int fourcc, int level);
-int msm_cvp_comm_session_continue(void *instance);
-enum hal_uncompressed_format msm_cvp_comm_get_hal_uncompressed(int fourcc);
-u32 cvp_get_frame_size_nv12(int plane, u32 height, u32 width);
-u32 cvp_get_frame_size_nv12_512(int plane, u32 height, u32 width);
-u32 cvp_get_frame_size_nv12_ubwc(int plane, u32 height, u32 width);
-u32 cvp_get_frame_size_rgba(int plane, u32 height, u32 width);
-u32 cvp_get_frame_size_nv21(int plane, u32 height, u32 width);
-u32 cvp_get_frame_size_tp10_ubwc(int plane, u32 height, u32 width);
-u32 cvp_get_frame_size_p010(int plane, u32 height, u32 width);
-struct vb2_buffer *msm_cvp_comm_get_vb_using_video_buffer(
-		struct msm_cvp_inst *inst, struct msm_video_buffer *mbuf);
-struct msm_video_buffer *msm_cvp_comm_get_buffer_using_device_planes(
-		struct msm_cvp_inst *inst, u32 type, u32 *planes);
 struct msm_video_buffer *msm_cvp_comm_get_video_buffer(
 		struct msm_cvp_inst *inst, struct vb2_buffer *vb2);
-void msm_cvp_comm_put_video_buffer(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf);
-int msm_cvp_comm_vb2_buffer_done(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf);
-int msm_cvp_comm_flush_video_buffer(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf);
 int msm_cvp_comm_unmap_video_buffer(struct msm_cvp_inst *inst,
 		struct msm_video_buffer *mbuf);
-bool msm_cvp_comm_compare_dma_plane(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf,
-		unsigned long *dma_planes, u32 i);
-bool msm_cvp_comm_compare_dma_planes(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, unsigned long *dma_planes);
-bool msm_cvp_comm_compare_vb2_plane(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, struct vb2_buffer *vb2, u32 i);
-bool msm_cvp_comm_compare_vb2_planes(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf, struct vb2_buffer *vb2);
-bool msm_cvp_comm_compare_device_plane(struct msm_video_buffer *mbuf,
-		u32 type, u32 *planes, u32 i);
-bool msm_cvp_comm_compare_device_planes(struct msm_video_buffer *mbuf,
-		u32 type, u32 *planes);
-int msm_cvp_comm_qbuf_cache_operations(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf);
-int msm_cvp_comm_dqbuf_cache_operations(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf);
 void print_video_buffer(u32 tag, const char *str,
 		struct msm_cvp_inst *inst, struct msm_video_buffer *mbuf);
-void print_cvp_vb2_buffer(u32 tag, const char *str,
-		struct msm_cvp_inst *inst, struct vb2_buffer *vb2);
-void print_cvp_v4l2_buffer(u32 tag, const char *str,
-		struct msm_cvp_inst *inst, struct v4l2_buffer *v4l2);
 void kref_cvp_put_mbuf(struct msm_video_buffer *mbuf);
 bool kref_cvp_get_mbuf(struct msm_cvp_inst *inst,
 	struct msm_video_buffer *mbuf);
-void msm_cvp_comm_store_mark_data(struct msm_cvp_list *data_list,
-		u32 index, u32 mark_data, u32 mark_target);
-void msm_cvp_comm_fetch_mark_data(struct msm_cvp_list *data_list,
-		u32 index, u32 *mark_data, u32 *mark_target);
-int msm_cvp_comm_release_mark_data(struct msm_cvp_inst *inst);
-int msm_cvp_comm_qbuf_decode_batch(struct msm_cvp_inst *inst,
-		struct msm_video_buffer *mbuf);
 int msm_cvp_comm_num_queued_bufs(struct msm_cvp_inst *inst, u32 type);
 int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
 	enum hal_command_response cmd);
-int cvp_comm_set_persist_buffers(struct msm_cvp_inst *inst);
+int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst);
 int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst);
 #endif
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_core.c b/drivers/media/platform/msm/cvp/msm_cvp_core.c
index 75de60d..167c0d1 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_core.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_core.c
@@ -18,9 +18,6 @@
 
 #define MAX_EVENTS 30
 
-static int try_get_ctrl(struct msm_cvp_inst *inst,
-	struct v4l2_ctrl *ctrl);
-
 static int get_poll_flags(void *instance)
 {
 	struct msm_cvp_inst *inst = instance;
@@ -75,48 +72,6 @@
 }
 EXPORT_SYMBOL(msm_cvp_poll);
 
-int msm_cvp_querycap(void *instance, struct v4l2_capability *cap)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(msm_cvp_querycap);
-
-int msm_cvp_enum_fmt(void *instance, struct v4l2_fmtdesc *f)
-{
-	struct msm_cvp_inst *inst = instance;
-
-	if (!inst || !f)
-		return -EINVAL;
-
-	return -EINVAL;
-}
-EXPORT_SYMBOL(msm_cvp_enum_fmt);
-
-int msm_cvp_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(msm_cvp_query_ctrl);
-
-int msm_cvp_s_fmt(void *instance, struct v4l2_format *f)
-{
-	int rc = 0;
-	struct msm_cvp_inst *inst = instance;
-
-	if (!inst || !f)
-		return -EINVAL;
-
-	dprintk(CVP_DBG,
-		"s_fmt: %x : type %d wxh %dx%d pixelfmt %#x num_planes %d size[0] %d size[1] %d in_reconfig %d\n",
-		hash32_ptr(inst->session), f->type,
-		f->fmt.pix_mp.width, f->fmt.pix_mp.height,
-		f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.num_planes,
-		f->fmt.pix_mp.plane_fmt[0].sizeimage,
-		f->fmt.pix_mp.plane_fmt[1].sizeimage, inst->in_reconfig);
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_s_fmt);
-
 int msm_cvp_g_fmt(void *instance, struct v4l2_format *f)
 {
 	struct msm_cvp_inst *inst = instance;
@@ -129,10 +84,6 @@
 			"Invalid input, inst = %pK, format = %pK\n", inst, f);
 		return -EINVAL;
 	}
-	if (inst->in_reconfig) {
-		inst->prop.height[OUTPUT_PORT] = inst->reconfig_height;
-		inst->prop.width[OUTPUT_PORT] = inst->reconfig_width;
-	}
 
 	port = f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
 		OUTPUT_PORT : CAPTURE_PORT;
@@ -174,91 +125,11 @@
 			inst->prop.height[port]);
 	f->fmt.pix_mp.plane_fmt[0].sizeimage = VENUS_BUFFER_SIZE(color_format,
 			inst->prop.width[port], inst->prop.height[port]);
-
-	dprintk(CVP_DBG,
-		"g_fmt: %x : type %d wxh %dx%d pixelfmt %#x num_planes %d size[0] %d size[1] %d in_reconfig %d\n",
-		hash32_ptr(inst->session), f->type,
-		f->fmt.pix_mp.width, f->fmt.pix_mp.height,
-		f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.num_planes,
-		f->fmt.pix_mp.plane_fmt[0].sizeimage,
-		f->fmt.pix_mp.plane_fmt[1].sizeimage, inst->in_reconfig);
 exit:
 	return rc;
 }
 EXPORT_SYMBOL(msm_cvp_g_fmt);
 
-int msm_cvp_s_ctrl(void *instance, struct v4l2_control *control)
-{
-	struct msm_cvp_inst *inst = instance;
-
-	if (!inst || !control)
-		return -EINVAL;
-
-	return msm_comm_s_ctrl(instance, control);
-}
-EXPORT_SYMBOL(msm_cvp_s_ctrl);
-
-int msm_cvp_g_crop(void *instance, struct v4l2_crop *crop)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(msm_cvp_g_crop);
-
-int msm_cvp_g_ctrl(void *instance, struct v4l2_control *control)
-{
-	struct msm_cvp_inst *inst = instance;
-	struct v4l2_ctrl *ctrl = NULL;
-	int rc = 0;
-
-	if (!inst || !control)
-		return -EINVAL;
-
-	ctrl = v4l2_ctrl_find(&inst->ctrl_handler, control->id);
-	if (ctrl) {
-		rc = try_get_ctrl(inst, ctrl);
-		if (!rc)
-			control->value = ctrl->val;
-	}
-
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_g_ctrl);
-
-int msm_cvp_g_ext_ctrl(void *instance, struct v4l2_ext_controls *control)
-{
-	struct msm_cvp_inst *inst = instance;
-	struct v4l2_ext_control *ext_control;
-	int i = 0, rc = 0;
-
-	if (!inst || !control)
-		return -EINVAL;
-
-	ext_control = control->controls;
-
-	for (i = 0; i < control->count; i++) {
-		switch (ext_control[i].id) {
-		default:
-			dprintk(CVP_ERR,
-				"This control %x is not supported yet\n",
-					ext_control[i].id);
-			break;
-		}
-	}
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_g_ext_ctrl);
-
-int msm_cvp_s_ext_ctrl(void *instance, struct v4l2_ext_controls *control)
-{
-	struct msm_cvp_inst *inst = instance;
-
-	if (!inst || !control)
-		return -EINVAL;
-
-	return -EINVAL;
-}
-EXPORT_SYMBOL(msm_cvp_s_ext_ctrl);
-
 int msm_cvp_reqbufs(void *instance, struct v4l2_requestbuffers *b)
 {
 	struct msm_cvp_inst *inst = instance;
@@ -285,19 +156,6 @@
 }
 EXPORT_SYMBOL(msm_cvp_reqbufs);
 
-static bool valid_v4l2_buffer(struct v4l2_buffer *b,
-		struct msm_cvp_inst *inst)
-{
-	enum cvp_ports port =
-		!V4L2_TYPE_IS_MULTIPLANAR(b->type) ? MAX_PORT_NUM :
-		b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ? CAPTURE_PORT :
-		b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? OUTPUT_PORT :
-								MAX_PORT_NUM;
-
-	return port != MAX_PORT_NUM &&
-		inst->bufq[port].num_planes == b->length;
-}
-
 int msm_cvp_release_buffer(void *instance, int type, unsigned int index)
 {
 	int rc = 0;
@@ -309,18 +167,6 @@
 		return -EINVAL;
 	}
 
-	if (!inst->in_reconfig &&
-		inst->state > MSM_CVP_LOAD_RESOURCES &&
-		inst->state < MSM_CVP_RELEASE_RESOURCES_DONE) {
-		rc = msm_cvp_comm_try_state(inst,
-			MSM_CVP_RELEASE_RESOURCES_DONE);
-		if (rc) {
-			dprintk(CVP_ERR,
-				"%s: Failed to move inst: %pK to rel res done\n",
-					__func__, inst);
-		}
-	}
-
 	mutex_lock(&inst->registeredbufs.lock);
 	list_for_each_entry_safe(mbuf, dummy, &inst->registeredbufs.list,
 			list) {
@@ -346,142 +192,6 @@
 }
 EXPORT_SYMBOL(msm_cvp_release_buffer);
 
-int msm_cvp_qbuf(void *instance, struct v4l2_buffer *b)
-{
-	struct msm_cvp_inst *inst = instance;
-	int rc = 0, i = 0;
-	struct buf_queue *q = NULL;
-
-	if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst)) {
-		dprintk(CVP_ERR, "%s: invalid params, inst %pK\n",
-			__func__, inst);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < b->length; i++) {
-		b->m.planes[i].m.fd = b->m.planes[i].reserved[0];
-		b->m.planes[i].data_offset = b->m.planes[i].reserved[1];
-	}
-
-	q = msm_cvp_comm_get_vb2q(inst, b->type);
-	if (!q) {
-		dprintk(CVP_ERR,
-			"Failed to find buffer queue for type = %d\n", b->type);
-		return -EINVAL;
-	}
-
-	mutex_lock(&q->lock);
-	rc = vb2_qbuf(&q->vb2_bufq, b);
-	mutex_unlock(&q->lock);
-	if (rc)
-		dprintk(CVP_ERR, "Failed to qbuf, %d\n", rc);
-
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_qbuf);
-
-int msm_cvp_dqbuf(void *instance, struct v4l2_buffer *b)
-{
-	struct msm_cvp_inst *inst = instance;
-	int rc = 0, i = 0;
-	struct buf_queue *q = NULL;
-
-	if (!inst || !b || !valid_v4l2_buffer(b, inst)) {
-		dprintk(CVP_ERR, "%s: invalid params, inst %pK\n",
-			__func__, inst);
-		return -EINVAL;
-	}
-
-	q = msm_cvp_comm_get_vb2q(inst, b->type);
-	if (!q) {
-		dprintk(CVP_ERR,
-			"Failed to find buffer queue for type = %d\n", b->type);
-		return -EINVAL;
-	}
-
-	mutex_lock(&q->lock);
-	rc = vb2_dqbuf(&q->vb2_bufq, b, true);
-	mutex_unlock(&q->lock);
-	if (rc == -EAGAIN) {
-		return rc;
-	} else if (rc) {
-		dprintk(CVP_ERR, "Failed to dqbuf, %d\n", rc);
-		return rc;
-	}
-
-	for (i = 0; i < b->length; i++) {
-		b->m.planes[i].reserved[0] = b->m.planes[i].m.fd;
-		b->m.planes[i].reserved[1] = b->m.planes[i].data_offset;
-	}
-
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_dqbuf);
-
-int msm_cvp_streamon(void *instance, enum v4l2_buf_type i)
-{
-	struct msm_cvp_inst *inst = instance;
-	int rc = 0;
-	struct buf_queue *q;
-
-	if (!inst)
-		return -EINVAL;
-
-	q = msm_cvp_comm_get_vb2q(inst, i);
-	if (!q) {
-		dprintk(CVP_ERR,
-			"Failed to find buffer queue for type = %d\n", i);
-		return -EINVAL;
-	}
-	dprintk(CVP_DBG, "Calling streamon\n");
-	mutex_lock(&q->lock);
-	rc = vb2_streamon(&q->vb2_bufq, i);
-	mutex_unlock(&q->lock);
-	if (rc) {
-		dprintk(CVP_ERR, "streamon failed on port: %d\n", i);
-		msm_cvp_comm_kill_session(inst);
-	}
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_streamon);
-
-int msm_cvp_streamoff(void *instance, enum v4l2_buf_type i)
-{
-	struct msm_cvp_inst *inst = instance;
-	int rc = 0;
-	struct buf_queue *q;
-
-	if (!inst)
-		return -EINVAL;
-
-	q = msm_cvp_comm_get_vb2q(inst, i);
-	if (!q) {
-		dprintk(CVP_ERR,
-			"Failed to find buffer queue for type = %d\n", i);
-		return -EINVAL;
-	}
-
-	if (!inst->in_reconfig) {
-		dprintk(CVP_DBG, "%s: inst %pK release resources\n",
-			__func__, inst);
-		rc = msm_cvp_comm_try_state(inst,
-			MSM_CVP_RELEASE_RESOURCES_DONE);
-		if (rc)
-			dprintk(CVP_ERR,
-				"%s: inst %pK move to rel res done failed\n",
-				__func__, inst);
-	}
-
-	dprintk(CVP_DBG, "Calling streamoff\n");
-	mutex_lock(&q->lock);
-	rc = vb2_streamoff(&q->vb2_bufq, i);
-	mutex_unlock(&q->lock);
-	if (rc)
-		dprintk(CVP_ERR, "streamoff failed on port: %d\n", i);
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_streamoff);
-
 int msm_cvp_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
 {
 	struct msm_cvp_inst *inst = instance;
@@ -661,85 +371,18 @@
 
 static int msm_cvp_start_streaming(struct vb2_queue *q, unsigned int count)
 {
-	dprintk(CVP_ERR, "Invalid input, q = %pK\n", q);
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
 	return -EINVAL;
 }
 
 static void msm_cvp_stop_streaming(struct vb2_queue *q)
 {
-	dprintk(CVP_INFO, "%s: No streaming use case supported\n",
-		__func__);
-}
-
-static int msm_cvp_queue_buf(struct msm_cvp_inst *inst,
-		struct vb2_buffer *vb2)
-{
-	int rc = 0;
-	struct msm_video_buffer *mbuf;
-
-	if (!inst || !vb2) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	mbuf = msm_cvp_comm_get_video_buffer(inst, vb2);
-	if (IS_ERR_OR_NULL(mbuf)) {
-		/*
-		 * if the buffer has RBR_PENDING flag (-EEXIST) then don't queue
-		 * it now, it will be queued via msm_cvp_comm_qbuf_rbr() as
-		 * part of RBR event processing.
-		 */
-		if (PTR_ERR(mbuf) == -EEXIST)
-			return 0;
-		dprintk(CVP_ERR, "%s: failed to get cvp-buf\n", __func__);
-		return -EINVAL;
-	}
-	if (!kref_cvp_get_mbuf(inst, mbuf)) {
-		dprintk(CVP_ERR, "%s: mbuf not found\n", __func__);
-		return -EINVAL;
-	}
-	rc = msm_cvp_comm_qbuf(inst, mbuf);
-	if (rc)
-		dprintk(CVP_ERR, "%s: failed qbuf\n", __func__);
-	kref_cvp_put_mbuf(mbuf);
-
-	return rc;
-}
-
-static int msm_cvp_queue_buf_batch(struct msm_cvp_inst *inst,
-		struct vb2_buffer *vb2)
-{
-	int rc;
-
-	if (!inst || !vb2) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	rc = msm_cvp_queue_buf(inst, vb2);
-
-	return rc;
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
 }
 
 static void msm_cvp_buf_queue(struct vb2_buffer *vb2)
 {
-	int rc = 0;
-	struct msm_cvp_inst *inst = NULL;
-
-	inst = vb2_get_drv_priv(vb2->vb2_queue);
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid inst\n", __func__);
-		return;
-	}
-
-	if (inst->batch.enable)
-		rc = msm_cvp_queue_buf_batch(inst, vb2);
-	else
-		rc = msm_cvp_queue_buf(inst, vb2);
-	if (rc) {
-		print_cvp_vb2_buffer(CVP_ERR, "failed vb2-qbuf", inst, vb2);
-		msm_cvp_comm_generate_session_error(inst);
-	}
+	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
 }
 
 static const struct vb2_ops msm_cvp_vb2q_ops = {
@@ -816,21 +459,8 @@
 }
 EXPORT_SYMBOL(msm_cvp_unsubscribe_event);
 
-int msm_cvp_dqevent(void *inst, struct v4l2_event *event)
-{
-	int rc = 0;
-	struct msm_cvp_inst *cvp_inst = (struct msm_cvp_inst *)inst;
-
-	if (!inst || !event)
-		return -EINVAL;
-
-	rc = v4l2_event_dequeue(&cvp_inst->event_handler, event, false);
-	return rc;
-}
-EXPORT_SYMBOL(msm_cvp_dqevent);
-
 int msm_cvp_private(void *cvp_inst, unsigned int cmd,
-		struct msm_cvp_arg *arg)
+		struct cvp_kmd_arg *arg)
 {
 	int rc = 0;
 	struct msm_cvp_inst *inst = (struct msm_cvp_inst *)cvp_inst;
@@ -877,104 +507,38 @@
 	return overload;
 }
 
-static int msm_cvp_try_set_ctrl(void *instance, struct v4l2_ctrl *ctrl)
+static int _init_session_queue(struct msm_cvp_inst *inst)
 {
-	return -EINVAL;
-}
-
-static int msm_cvp_op_s_ctrl(struct v4l2_ctrl *ctrl)
-{
-
-	int rc = 0, c = 0;
-	struct msm_cvp_inst *inst;
-
-	if (!ctrl) {
-		dprintk(CVP_ERR, "%s invalid parameters for ctrl\n", __func__);
-		return -EINVAL;
+	spin_lock_init(&inst->session_queue.lock);
+	INIT_LIST_HEAD(&inst->session_queue.msgs);
+	inst->session_queue.msg_count = 0;
+	init_waitqueue_head(&inst->session_queue.wq);
+	inst->session_queue.msg_cache = KMEM_CACHE(session_msg, 0);
+	if (!inst->session_queue.msg_cache) {
+		dprintk(CVP_ERR, "Failed to allocate msg quque\n");
+		return -ENOMEM;
 	}
-
-	inst = container_of(ctrl->handler,
-		struct msm_cvp_inst, ctrl_handler);
-	if (!inst) {
-		dprintk(CVP_ERR, "%s invalid parameters for inst\n", __func__);
-		return -EINVAL;
-	}
-
-	for (c = 0; c < ctrl->ncontrols; ++c) {
-		if (ctrl->cluster[c]->is_new) {
-			rc = msm_cvp_try_set_ctrl(inst, ctrl->cluster[c]);
-			if (rc) {
-				dprintk(CVP_ERR, "Failed setting %x\n",
-					ctrl->cluster[c]->id);
-				break;
-			}
-		}
-	}
-	if (rc)
-		dprintk(CVP_ERR, "Failed setting control: Inst = %pK (%s)\n",
-				inst, v4l2_ctrl_get_name(ctrl->id));
-	return rc;
-}
-static int try_get_ctrl(struct msm_cvp_inst *inst, struct v4l2_ctrl *ctrl)
-{
-	switch (ctrl->id) {
-	default:
-		/*
-		 * Other controls aren't really volatile, shouldn't need to
-		 * modify ctrl->value
-		 */
-		break;
-	}
-
 	return 0;
 }
 
-static int msm_cvp_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+static void _deinit_session_queue(struct msm_cvp_inst *inst)
 {
-	int rc = 0, c = 0;
-	struct msm_cvp_inst *inst;
-	struct v4l2_ctrl *master;
+	struct session_msg *msg, *tmpmsg;
 
-	if (!ctrl) {
-		dprintk(CVP_ERR, "%s invalid parameters for ctrl\n", __func__);
-		return -EINVAL;
+	/* free all messages */
+	spin_lock(&inst->session_queue.lock);
+	list_for_each_entry_safe(msg, tmpmsg, &inst->session_queue.msgs, node) {
+		list_del_init(&msg->node);
+		kmem_cache_free(inst->session_queue.msg_cache, msg);
 	}
+	inst->session_queue.msg_count = 0;
+	spin_unlock(&inst->session_queue.lock);
 
-	inst = container_of(ctrl->handler,
-		struct msm_cvp_inst, ctrl_handler);
-	if (!inst) {
-		dprintk(CVP_ERR, "%s invalid parameters for inst\n", __func__);
-		return -EINVAL;
-	}
-	master = ctrl->cluster[0];
-	if (!master) {
-		dprintk(CVP_ERR, "%s invalid parameters for master\n",
-			__func__);
-		return -EINVAL;
-	}
+	wake_up_all(&inst->session_queue.wq);
 
-	for (c = 0; c < master->ncontrols; ++c) {
-		if (master->cluster[c]->flags & V4L2_CTRL_FLAG_VOLATILE) {
-			rc = try_get_ctrl(inst, master->cluster[c]);
-			if (rc) {
-				dprintk(CVP_ERR, "Failed getting %x\n",
-					master->cluster[c]->id);
-				return rc;
-			}
-		}
-	}
-	if (rc)
-		dprintk(CVP_ERR, "Failed getting control: Inst = %pK (%s)\n",
-				inst, v4l2_ctrl_get_name(ctrl->id));
-	return rc;
+	kmem_cache_destroy(inst->session_queue.msg_cache);
 }
 
-static const struct v4l2_ctrl_ops msm_cvp_ctrl_ops = {
-
-	.s_ctrl = msm_cvp_op_s_ctrl,
-	.g_volatile_ctrl = msm_cvp_op_g_volatile_ctrl,
-};
-
 void *msm_cvp_open(int core_id, int session_type)
 {
 	struct msm_cvp_inst *inst = NULL;
@@ -1010,19 +574,10 @@
 	mutex_init(&inst->lock);
 	mutex_init(&inst->flush_lock);
 
-	INIT_MSM_CVP_LIST(&inst->scratchbufs);
 	INIT_MSM_CVP_LIST(&inst->freqs);
-	INIT_MSM_CVP_LIST(&inst->input_crs);
 	INIT_MSM_CVP_LIST(&inst->persistbufs);
-	INIT_MSM_CVP_LIST(&inst->pending_getpropq);
-	INIT_MSM_CVP_LIST(&inst->outputbufs);
 	INIT_MSM_CVP_LIST(&inst->registeredbufs);
 	INIT_MSM_CVP_LIST(&inst->cvpbufs);
-	INIT_MSM_CVP_LIST(&inst->reconbufs);
-	INIT_MSM_CVP_LIST(&inst->eosbufs);
-	INIT_MSM_CVP_LIST(&inst->etb_data);
-	INIT_MSM_CVP_LIST(&inst->fbd_data);
-	INIT_MSM_CVP_LIST(&inst->dfs_config);
 
 	kref_init(&inst->kref);
 
@@ -1035,12 +590,6 @@
 	inst->clk_data.sys_cache_bw = 0;
 	inst->clk_data.bitrate = 0;
 	inst->clk_data.core_id = CVP_CORE_ID_DEFAULT;
-	inst->bit_depth = MSM_CVP_BIT_DEPTH_8;
-	inst->pic_struct = MSM_CVP_PIC_STRUCT_PROGRESSIVE;
-	inst->colour_space = MSM_CVP_BT601_6_525;
-	inst->profile = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
-	inst->level = V4L2_MPEG_VIDEO_H264_LEVEL_1_0;
-	inst->entropy_mode = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC;
 
 	for (i = SESSION_MSG_INDEX(SESSION_MSG_START);
 		i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) {
@@ -1049,11 +598,6 @@
 
 	if (session_type == MSM_CVP_CORE) {
 		msm_cvp_session_init(inst);
-		rc = msm_cvp_control_init(inst, &msm_cvp_ctrl_ops);
-	}
-	if (rc) {
-		dprintk(CVP_ERR, "Failed control initialization\n");
-		goto fail_bufq_capture;
 	}
 
 	rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
@@ -1077,6 +621,11 @@
 	list_add_tail(&inst->list, &core->instances);
 	mutex_unlock(&core->lock);
 
+
+	rc = _init_session_queue(inst);
+	if (rc)
+		goto fail_init;
+
 	rc = msm_cvp_comm_try_state(inst, MSM_CVP_CORE_INIT_DONE);
 	if (rc) {
 		dprintk(CVP_ERR,
@@ -1104,7 +653,7 @@
 				"Failed to move video instance to open done state\n");
 			goto fail_init;
 		}
-		rc = cvp_comm_set_persist_buffers(inst);
+		rc = cvp_comm_set_arp_buffers(inst);
 		if (rc) {
 			dprintk(CVP_ERR,
 				"Failed to set ARP buffers\n");
@@ -1115,6 +664,7 @@
 
 	return inst;
 fail_init:
+	_deinit_session_queue(inst);
 	mutex_lock(&core->lock);
 	list_del(&inst->list);
 	mutex_unlock(&core->lock);
@@ -1125,25 +675,16 @@
 fail_bufq_output:
 	vb2_queue_release(&inst->bufq[CAPTURE_PORT].vb2_bufq);
 fail_bufq_capture:
-	msm_cvp_comm_ctrl_deinit(inst);
 	mutex_destroy(&inst->sync_lock);
 	mutex_destroy(&inst->bufq[CAPTURE_PORT].lock);
 	mutex_destroy(&inst->bufq[OUTPUT_PORT].lock);
 	mutex_destroy(&inst->lock);
 	mutex_destroy(&inst->flush_lock);
 
-	DEINIT_MSM_CVP_LIST(&inst->scratchbufs);
 	DEINIT_MSM_CVP_LIST(&inst->persistbufs);
-	DEINIT_MSM_CVP_LIST(&inst->pending_getpropq);
-	DEINIT_MSM_CVP_LIST(&inst->outputbufs);
 	DEINIT_MSM_CVP_LIST(&inst->cvpbufs);
 	DEINIT_MSM_CVP_LIST(&inst->registeredbufs);
-	DEINIT_MSM_CVP_LIST(&inst->eosbufs);
 	DEINIT_MSM_CVP_LIST(&inst->freqs);
-	DEINIT_MSM_CVP_LIST(&inst->input_crs);
-	DEINIT_MSM_CVP_LIST(&inst->etb_data);
-	DEINIT_MSM_CVP_LIST(&inst->fbd_data);
-	DEINIT_MSM_CVP_LIST(&inst->dfs_config);
 
 	kfree(inst);
 	inst = NULL;
@@ -1155,7 +696,6 @@
 static void msm_cvp_cleanup_instance(struct msm_cvp_inst *inst)
 {
 	struct msm_video_buffer *temp, *dummy;
-	struct getprop_buf *temp_prop, *dummy_prop;
 
 	if (!inst) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -1174,46 +714,13 @@
 
 	msm_cvp_comm_free_freq_table(inst);
 
-	msm_cvp_comm_free_input_cr_table(inst);
-
-	if (msm_cvp_comm_release_scratch_buffers(inst, false))
-		dprintk(CVP_ERR,
-			"Failed to release scratch buffers\n");
-
-	if (msm_cvp_comm_release_recon_buffers(inst))
-		dprintk(CVP_ERR,
-			"Failed to release recon buffers\n");
-
 	if (cvp_comm_release_persist_buffers(inst))
 		dprintk(CVP_ERR,
 			"Failed to release persist buffers\n");
 
-	if (msm_cvp_comm_release_mark_data(inst))
-		dprintk(CVP_ERR,
-			"Failed to release mark_data buffers\n");
-
-	msm_cvp_comm_release_eos_buffers(inst);
-
-	if (msm_cvp_comm_release_output_buffers(inst, true))
-		dprintk(CVP_ERR,
-			"Failed to release output buffers\n");
-
+	/* cvp_comm_release_cvp_buffers cvpbufs */
 	if (inst->extradata_handle)
 		msm_cvp_comm_smem_free(inst, inst->extradata_handle);
-
-	mutex_lock(&inst->pending_getpropq.lock);
-	if (!list_empty(&inst->pending_getpropq.list)) {
-		dprintk(CVP_ERR,
-			"pending_getpropq not empty for instance %pK\n",
-			inst);
-		list_for_each_entry_safe(temp_prop, dummy_prop,
-			&inst->pending_getpropq.list, list) {
-			kfree(temp_prop->data);
-			list_del(&temp_prop->list);
-			kfree(temp_prop);
-		}
-	}
-	mutex_unlock(&inst->pending_getpropq.lock);
 }
 
 int msm_cvp_destroy(struct msm_cvp_inst *inst)
@@ -1233,26 +740,16 @@
 	list_del(&inst->list);
 	mutex_unlock(&core->lock);
 
-	msm_cvp_comm_ctrl_deinit(inst);
-
 	v4l2_fh_del(&inst->event_handler);
 	v4l2_fh_exit(&inst->event_handler);
 
 	for (i = 0; i < MAX_PORT_NUM; i++)
 		vb2_queue_release(&inst->bufq[i].vb2_bufq);
 
-	DEINIT_MSM_CVP_LIST(&inst->scratchbufs);
 	DEINIT_MSM_CVP_LIST(&inst->persistbufs);
-	DEINIT_MSM_CVP_LIST(&inst->pending_getpropq);
-	DEINIT_MSM_CVP_LIST(&inst->outputbufs);
 	DEINIT_MSM_CVP_LIST(&inst->cvpbufs);
 	DEINIT_MSM_CVP_LIST(&inst->registeredbufs);
-	DEINIT_MSM_CVP_LIST(&inst->eosbufs);
 	DEINIT_MSM_CVP_LIST(&inst->freqs);
-	DEINIT_MSM_CVP_LIST(&inst->input_crs);
-	DEINIT_MSM_CVP_LIST(&inst->etb_data);
-	DEINIT_MSM_CVP_LIST(&inst->fbd_data);
-	DEINIT_MSM_CVP_LIST(&inst->dfs_config);
 
 	mutex_destroy(&inst->sync_lock);
 	mutex_destroy(&inst->bufq[CAPTURE_PORT].lock);
@@ -1261,6 +758,7 @@
 	mutex_destroy(&inst->flush_lock);
 
 	msm_cvp_debugfs_deinit_inst(inst);
+	_deinit_session_queue(inst);
 
 	pr_info(CVP_DBG_TAG "Closed cvp instance: %pK\n",
 			"info", inst);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_core.h b/drivers/media/platform/msm/cvp/msm_cvp_core.h
index 97339ed..92290d4 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_core.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_core.h
@@ -97,22 +97,10 @@
 void *msm_cvp_open(int core_id, int session_type);
 int msm_cvp_close(void *instance);
 int msm_cvp_suspend(int core_id);
-int msm_cvp_querycap(void *instance, struct v4l2_capability *cap);
-int msm_cvp_enum_fmt(void *instance, struct v4l2_fmtdesc *f);
-int msm_cvp_s_fmt(void *instance, struct v4l2_format *f);
 int msm_cvp_g_fmt(void *instance, struct v4l2_format *f);
-int msm_cvp_s_ctrl(void *instance, struct v4l2_control *a);
-int msm_cvp_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
-int msm_cvp_g_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
-int msm_cvp_g_ctrl(void *instance, struct v4l2_control *a);
 int msm_cvp_reqbufs(void *instance, struct v4l2_requestbuffers *b);
 int msm_cvp_release_buffer(void *instance, int buffer_type,
 		unsigned int buffer_index);
-int msm_cvp_qbuf(void *instance, struct v4l2_buffer *b);
-int msm_cvp_dqbuf(void *instance, struct v4l2_buffer *b);
-int msm_cvp_streamon(void *instance, enum v4l2_buf_type i);
-int msm_cvp_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl);
-int msm_cvp_streamoff(void *instance, enum v4l2_buf_type i);
 int msm_cvp_comm_cmd(void *instance, union msm_v4l2_cmd *cmd);
 int msm_cvp_poll(void *instance, struct file *filp,
 		struct poll_table_struct *pt);
@@ -120,9 +108,7 @@
 		const struct v4l2_event_subscription *sub);
 int msm_cvp_unsubscribe_event(void *instance,
 		const struct v4l2_event_subscription *sub);
-int msm_cvp_dqevent(void *instance, struct v4l2_event *event);
-int msm_cvp_g_crop(void *instance, struct v4l2_crop *a);
 int msm_cvp_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize);
 int msm_cvp_private(void *cvp_inst, unsigned int cmd,
-		struct msm_cvp_arg *arg);
+		struct cvp_kmd_arg *arg);
 #endif
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_internal.h b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
index a5ca2f2..28b31fc 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_internal.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
@@ -146,24 +146,6 @@
 	bool turbo;
 };
 
-struct cvp_input_cr_data {
-	struct list_head list;
-	u32 index;
-	u32 input_cr;
-};
-
-struct recon_buf {
-	struct list_head list;
-	u32 buffer_index;
-	u32 CR;
-	u32 CF;
-};
-
-struct eos_buf {
-	struct list_head list;
-	struct msm_smem smem;
-};
-
 struct internal_buf {
 	struct list_head list;
 	enum hal_buffer buffer_type;
@@ -172,50 +154,17 @@
 	bool mark_remove;
 };
 
-struct msm_cvp_csc_coeff {
-	u32 *vpe_csc_custom_matrix_coeff;
-	u32 *vpe_csc_custom_bias_coeff;
-	u32 *vpe_csc_custom_limit_coeff;
-};
-
-struct msm_cvp_buf_data {
-	struct list_head list;
-	u32 index;
-	u32 mark_data;
-	u32 mark_target;
-};
-
 struct msm_cvp_common_data {
 	char key[128];
 	int value;
 };
 
-struct msm_cvp_codec_data {
-	u32 fourcc;
-	enum session_type session_type;
-	int vpp_cycles;
-	int vsp_cycles;
-	int low_power_cycles;
-};
-
-enum efuse_purpose {
-	SKU_VERSION = 0,
-};
-
 enum sku_version {
 	SKU_VERSION_0 = 0,
 	SKU_VERSION_1,
 	SKU_VERSION_2,
 };
 
-struct msm_cvp_efuse_data {
-	u32 start_address;
-	u32 size;
-	u32 mask;
-	u32 shift;
-	enum efuse_purpose purpose;
-};
-
 enum vpu_version {
 	VPU_VERSION_4 = 1,
 	VPU_VERSION_5,
@@ -247,11 +196,6 @@
 struct msm_cvp_platform_data {
 	struct msm_cvp_common_data *common_data;
 	unsigned int common_data_length;
-	struct msm_cvp_codec_data *codec_data;
-	unsigned int codec_data_length;
-	struct msm_cvp_csc_coeff csc_data;
-	struct msm_cvp_efuse_data *efuse_data;
-	unsigned int efuse_data_length;
 	unsigned int sku_version;
 	phys_addr_t gcc_register_base;
 	uint32_t gcc_register_size;
@@ -270,19 +214,6 @@
 	u32 output_min_count;
 };
 
-struct msm_cvp_format_constraint {
-	u32 fourcc;
-	u32 num_planes;
-	u32 y_stride_multiples;
-	u32 y_max_stride;
-	u32 y_min_plane_buffer_height_multiple;
-	u32 y_buffer_alignment;
-	u32 uv_stride_multiples;
-	u32 uv_max_stride;
-	u32 uv_min_plane_buffer_height_multiple;
-	u32 uv_buffer_alignment;
-};
-
 struct msm_cvp_drv {
 	struct mutex lock;
 	struct list_head cores;
@@ -403,6 +334,22 @@
 	int (*decide_work_mode)(struct msm_cvp_inst *inst);
 };
 
+#define MAX_NUM_MSGS_PER_SESSION	128
+#define CVP_MAX_WAIT_TIME	2000
+
+struct session_msg {
+	struct list_head node;
+	struct hfi_msg_session_hdr pkt;
+};
+
+struct cvp_session_queue {
+	spinlock_t lock;
+	unsigned int msg_count;
+	struct list_head msgs;
+	wait_queue_head_t wq;
+	struct kmem_cache *msg_cache;
+};
+
 struct msm_cvp_core {
 	struct list_head list;
 	struct mutex lock;
@@ -436,33 +383,21 @@
 	struct mutex sync_lock, lock, flush_lock;
 	struct msm_cvp_core *core;
 	enum session_type session_type;
+	struct cvp_session_queue session_queue;
 	void *session;
 	struct session_prop prop;
 	enum instance_state state;
 	struct msm_cvp_format fmts[MAX_PORT_NUM];
 	struct buf_queue bufq[MAX_PORT_NUM];
 	struct msm_cvp_list freqs;
-	struct msm_cvp_list input_crs;
-	struct msm_cvp_list scratchbufs;
 	struct msm_cvp_list persistbufs;
-	struct msm_cvp_list pending_getpropq;
-	struct msm_cvp_list outputbufs;
-	struct msm_cvp_list reconbufs;
-	struct msm_cvp_list eosbufs;
 	struct msm_cvp_list registeredbufs;
 	struct msm_cvp_list cvpbufs;
-	struct msm_cvp_list etb_data;
-	struct msm_cvp_list fbd_data;
-	struct msm_cvp_list dfs_config;
 	struct buffer_requirements buff_req;
 	struct v4l2_ctrl_handler ctrl_handler;
 	struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1];
-	struct v4l2_ctrl **cluster;
 	struct v4l2_fh event_handler;
 	struct msm_smem *extradata_handle;
-	bool in_reconfig;
-	u32 reconfig_width;
-	u32 reconfig_height;
 	struct dentry *debugfs_root;
 	void *priv;
 	struct msm_cvp_debug debug;
@@ -474,48 +409,17 @@
 	enum buffer_mode_type buffer_mode_set[MAX_PORT_NUM];
 	enum multi_stream stream_output_mode;
 	struct v4l2_ctrl **ctrls;
-	int bit_depth;
 	struct kref kref;
-	bool in_flush;
-	u32 pic_struct;
-	u32 colour_space;
-	u32 profile;
-	u32 level;
-	u32 entropy_mode;
-	u32 grid_enable;
-	u32 frame_quality;
 	struct msm_cvp_codec_data *codec_data;
-	struct hal_hdr10_pq_sei hdr10_sei_params;
 	struct batch_mode batch;
 };
 
 extern struct msm_cvp_drv *cvp_driver;
 
-struct msm_cvp_ctrl_cluster {
-	struct v4l2_ctrl **cluster;
-	struct list_head list;
-};
-
-struct msm_cvp_ctrl {
-	u32 id;
-	char name[MAX_NAME_LENGTH];
-	enum v4l2_ctrl_type type;
-	s64 minimum;
-	s64 maximum;
-	s64 default_value;
-	u32 step;
-	u32 menu_skip_mask;
-	u32 flags;
-	const char * const *qmenu;
-};
-
 void cvp_handle_cmd_response(enum hal_command_response cmd, void *data);
 int msm_cvp_trigger_ssr(struct msm_cvp_core *core,
 	enum hal_ssr_trigger_type type);
 int msm_cvp_noc_error_info(struct msm_cvp_core *core);
-bool heic_encode_session_supported(struct msm_cvp_inst *inst);
-int msm_cvp_check_session_supported(struct msm_cvp_inst *inst);
-int msm_cvp_check_scaling_supported(struct msm_cvp_inst *inst);
 void msm_cvp_queue_v4l2_event(struct msm_cvp_inst *inst, int event_type);
 
 enum msm_cvp_flags {
@@ -535,13 +439,7 @@
 struct msm_cvp_internal_buffer {
 	struct list_head list;
 	struct msm_smem smem;
-	struct msm_cvp_buffer buf;
-};
-
-struct msm_cvp_internal_send_cmd {
-	struct list_head list;
-	struct msm_smem smem;
-	struct msm_cvp_send_cmd send_cmd;
+	struct cvp_kmd_buffer buf;
 };
 
 void msm_cvp_comm_handle_thermal_event(void);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_platform.c b/drivers/media/platform/msm/cvp/msm_cvp_platform.c
index 21335bc..cea328c 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_platform.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_platform.c
@@ -24,24 +24,6 @@
 #define DDR_TYPE_LPDDR4Y 0x8
 #define DDR_TYPE_LPDDR5 0x9
 
-#define CODEC_ENTRY(n, p, vsp, vpp, lp) \
-{	\
-	.fourcc = n,		\
-	.session_type = p,	\
-	.vsp_cycles = vsp,	\
-	.vpp_cycles = vpp,	\
-	.low_power_cycles = lp	\
-}
-
-#define EFUSE_ENTRY(sa, s, m, sh, p) \
-{	\
-	.start_address = sa,		\
-	.size = s,	\
-	.mask = m,	\
-	.shift = sh,	\
-	.purpose = p	\
-}
-
 #define UBWC_CONFIG(mco, mlo, hbo, bslo, bso, rs, mc, ml, hbb, bsl, bsp) \
 {	\
 	.override_bit_info.max_channel_override = mco,	\
@@ -57,49 +39,6 @@
 	.bank_spreading = bsp,	\
 }
 
-/*FIXME: hard coded AXI_REG_START_ADDR???*/
-#define GCC_VIDEO_AXI_REG_START_ADDR	0x10B024
-#define GCC_VIDEO_AXI_REG_SIZE		0xC
-
-static struct msm_cvp_codec_data default_codec_data[] =  {
-	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_CVP_ENCODER, 125, 675, 320),
-	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_CVP_DECODER, 125, 675, 320),
-};
-
-/* Update with 855 data */
-static struct msm_cvp_codec_data sm8150_codec_data[] =  {
-	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_CVP_ENCODER, 10, 675, 320),
-	CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_CVP_ENCODER, 10, 675, 320),
-	CODEC_ENTRY(V4L2_PIX_FMT_VP8, MSM_CVP_ENCODER, 10, 675, 320),
-	CODEC_ENTRY(V4L2_PIX_FMT_TME, MSM_CVP_ENCODER, 0, 540, 540),
-	CODEC_ENTRY(V4L2_PIX_FMT_MPEG2, MSM_CVP_DECODER, 10, 200, 200),
-	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_CVP_DECODER, 10, 200, 200),
-	CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_CVP_DECODER, 10, 200, 200),
-	CODEC_ENTRY(V4L2_PIX_FMT_VP8, MSM_CVP_DECODER, 10, 200, 200),
-	CODEC_ENTRY(V4L2_PIX_FMT_VP9, MSM_CVP_DECODER, 10, 200, 200),
-};
-
-
-/*
- * Custom conversion coefficients for resolution: 176x144 negative
- * coeffs are converted to s4.9 format
- * (e.g. -22 converted to ((1 << 13) - 22)
- * 3x3 transformation matrix coefficients in s4.9 fixed point format
- */
-static u32 vpe_csc_custom_matrix_coeff[HAL_MAX_MATRIX_COEFFS] = {
-	470, 8170, 8148, 0, 490, 50, 0, 34, 483
-};
-
-/* offset coefficients in s9 fixed point format */
-static u32 vpe_csc_custom_bias_coeff[HAL_MAX_BIAS_COEFFS] = {
-	34, 0, 4
-};
-
-/* clamping value for Y/U/V([min,max] for Y/U/V) */
-static u32 vpe_csc_custom_limit_coeff[HAL_MAX_LIMIT_COEFFS] = {
-	16, 235, 16, 240, 16, 240
-};
-
 static struct msm_cvp_common_data default_common_data[] = {
 	{
 		.key = "qcom,never-unload-fw",
@@ -197,15 +136,8 @@
 
 
 static struct msm_cvp_platform_data default_data = {
-	.codec_data = default_codec_data,
-	.codec_data_length =  ARRAY_SIZE(default_codec_data),
 	.common_data = default_common_data,
 	.common_data_length =  ARRAY_SIZE(default_common_data),
-	.csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff,
-	.csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff,
-	.csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff,
-	.efuse_data = NULL,
-	.efuse_data_length = 0,
 	.sku_version = 0,
 	.gcc_register_base = 0,
 	.gcc_register_size = 0,
@@ -214,15 +146,8 @@
 };
 
 static struct msm_cvp_platform_data sm8250_data = {
-	.codec_data = sm8150_codec_data,
-	.codec_data_length =  ARRAY_SIZE(sm8150_codec_data),
 	.common_data = sm8250_common_data,
 	.common_data_length =  ARRAY_SIZE(sm8250_common_data),
-	.csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff,
-	.csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff,
-	.csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff,
-	.efuse_data = NULL,
-	.efuse_data_length = 0,
 	.sku_version = 0,
 	.vpu_ver = VPU_VERSION_5,
 	.ubwc_config = kona_ubwc_data,
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c b/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
index c4962ff..4387061a 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
@@ -19,7 +19,6 @@
 };
 
 #define PERF_GOV "performance"
-#define DEFAULT_CVP_CLK_SVS2
 
 static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
 {
@@ -329,14 +328,8 @@
 /* A comparator to compare loads (needed later on) */
 static int cmp(const void *a, const void *b)
 {
-#ifdef DEFAULT_CVP_CLK_SVS2
 	return ((struct allowed_clock_rates_table *)a)->clock_rate -
 		((struct allowed_clock_rates_table *)b)->clock_rate;
-#else
-	/* want to sort in reverse so flip the comparison */
-	return ((struct allowed_clock_rates_table *)b)->clock_rate -
-		((struct allowed_clock_rates_table *)a)->clock_rate;
-#endif
 }
 
 static int msm_cvp_load_allowed_clocks_table(
@@ -708,9 +701,6 @@
 	platform_data = core->platform_data;
 	res = &core->resources;
 
-	res->codec_data_count = platform_data->codec_data_length;
-	res->codec_data = platform_data->codec_data;
-
 	res->sku_version = platform_data->sku_version;
 
 	res->fw_name = "cvpss";
@@ -766,8 +756,6 @@
 	res->bus_devfreq_on = find_key_value(platform_data,
 			"qcom,use-devfreq-scale-bus");
 
-	res->csc_coeff_data = &platform_data->csc_data;
-
 	res->gcc_register_base = platform_data->gcc_register_base;
 	res->gcc_register_size = platform_data->gcc_register_size;
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_resources.h b/drivers/media/platform/msm/cvp/msm_cvp_resources.h
index a77045e..14f1eda 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_resources.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_resources.h
@@ -186,9 +186,6 @@
 	bool cache_pagetables;
 	bool decode_batching;
 	bool dcvs;
-	struct msm_cvp_codec_data *codec_data;
-	int codec_data_count;
-	struct msm_cvp_csc_coeff *csc_coeff_data;
 	struct msm_cvp_mem_cdsp mem_cdsp;
 	uint32_t vpu_ver;
 	uint32_t fw_cycles;
diff --git a/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c b/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
index 047dc49..3abea69 100644
--- a/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
@@ -78,25 +78,19 @@
 static int msm_cvp_v4l2_querycap(struct file *filp, void *fh,
 			struct v4l2_capability *cap)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(filp, fh);
-
-	return msm_cvp_querycap((void *)cvp_inst, cap);
+	return -EINVAL;
 }
 
 int msm_cvp_v4l2_enum_fmt(struct file *file, void *fh,
 					struct v4l2_fmtdesc *f)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_enum_fmt((void *)cvp_inst, f);
+	return -EINVAL;
 }
 
 int msm_cvp_v4l2_s_fmt(struct file *file, void *fh,
 					struct v4l2_format *f)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_s_fmt((void *)cvp_inst, f);
+	return 0;
 }
 
 int msm_cvp_v4l2_g_fmt(struct file *file, void *fh,
@@ -112,7 +106,7 @@
 {
 	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
 
-	return msm_cvp_s_ctrl((void *)cvp_inst, a);
+	return v4l2_s_ctrl(NULL, &cvp_inst->ctrl_handler, a);
 }
 
 int msm_cvp_v4l2_g_ctrl(struct file *file, void *fh,
@@ -120,23 +114,19 @@
 {
 	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
 
-	return msm_cvp_g_ctrl((void *)cvp_inst, a);
+	return v4l2_g_ctrl(&cvp_inst->ctrl_handler, a);
 }
 
 int msm_cvp_v4l2_s_ext_ctrl(struct file *file, void *fh,
 					struct v4l2_ext_controls *a)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_s_ext_ctrl((void *)cvp_inst, a);
+	return -EINVAL;
 }
 
 int msm_cvp_v4l2_g_ext_ctrl(struct file *file, void *fh,
 					struct v4l2_ext_controls *a)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_g_ext_ctrl((void *)cvp_inst, a);
+	return 0;
 }
 
 int msm_cvp_v4l2_reqbufs(struct file *file, void *fh,
@@ -150,29 +140,25 @@
 int msm_cvp_v4l2_qbuf(struct file *file, void *fh,
 				struct v4l2_buffer *b)
 {
-	return msm_cvp_qbuf(get_cvp_inst(file, fh), b);
+	return 0;
 }
 
 int msm_cvp_v4l2_dqbuf(struct file *file, void *fh,
 				struct v4l2_buffer *b)
 {
-	return msm_cvp_dqbuf(get_cvp_inst(file, fh), b);
+	return 0;
 }
 
 int msm_cvp_v4l2_streamon(struct file *file, void *fh,
 				enum v4l2_buf_type i)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_streamon((void *)cvp_inst, i);
+	return 0;
 }
 
 int msm_cvp_v4l2_streamoff(struct file *file, void *fh,
 				enum v4l2_buf_type i)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_streamoff((void *)cvp_inst, i);
+	return 0;
 }
 
 static int msm_cvp_v4l2_subscribe_event(struct v4l2_fh *fh,
@@ -196,24 +182,18 @@
 static int msm_cvp_v4l2_decoder_cmd(struct file *file, void *fh,
 				struct v4l2_decoder_cmd *dec)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_comm_cmd((void *)cvp_inst, (union msm_v4l2_cmd *)dec);
+	return 0;
 }
 
 static int msm_cvp_v4l2_encoder_cmd(struct file *file, void *fh,
 				struct v4l2_encoder_cmd *enc)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_comm_cmd((void *)cvp_inst, (union msm_v4l2_cmd *)enc);
+	return 0;
 }
 static int msm_cvp_v4l2_s_parm(struct file *file, void *fh,
 			struct v4l2_streamparm *a)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_comm_s_parm(cvp_inst, a);
+	return 0;
 }
 static int msm_cvp_v4l2_g_parm(struct file *file, void *fh,
 		struct v4l2_streamparm *a)
@@ -224,9 +204,7 @@
 static int msm_cvp_v4l2_g_crop(struct file *file, void *fh,
 			struct v4l2_crop *a)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_g_crop(cvp_inst, a);
+	return -EINVAL;
 }
 
 static int msm_cvp_v4l2_enum_framesizes(struct file *file, void *fh,
@@ -240,9 +218,7 @@
 static int msm_cvp_v4l2_queryctrl(struct file *file, void *fh,
 	struct v4l2_queryctrl *ctrl)
 {
-	struct msm_cvp_inst *cvp_inst = get_cvp_inst(file, fh);
-
-	return msm_cvp_query_ctrl((void *)cvp_inst, ctrl);
+	return -EINVAL;
 }
 
 static long msm_cvp_v4l2_default(struct file *file, void *fh,
@@ -282,9 +258,6 @@
 	.vidioc_default = msm_cvp_v4l2_default,
 };
 
-static const struct v4l2_ioctl_ops msm_v4l2_enc_ioctl_ops = {
-};
-
 static unsigned int msm_cvp_v4l2_poll(struct file *filp,
 	struct poll_table_struct *pt)
 {
diff --git a/drivers/media/platform/msm/cvp/msm_v4l2_private.c b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
index 35196a3..6656957 100644
--- a/drivers/media/platform/msm/cvp/msm_v4l2_private.c
+++ b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
@@ -4,12 +4,138 @@
  */
 
 #include "msm_v4l2_private.h"
+#include "cvp_hfi_api.h"
 
-static int convert_from_user(struct msm_cvp_arg *kp, unsigned long arg)
+static int _get_pkt_hdr_from_user(struct cvp_kmd_arg __user *up,
+		struct cvp_hal_session_cmd_pkt *pkt_hdr)
+{
+	struct cvp_kmd_hfi_packet *u;
+
+	u = &up->data.hfi_pkt;
+
+	if (get_user(pkt_hdr->size, &u->pkt_data[0]))
+		return -EFAULT;
+
+	if (get_user(pkt_hdr->packet_type, &u->pkt_data[1]))
+		return -EFAULT;
+
+	if (get_pkt_index(pkt_hdr) < 0) {
+		dprintk(CVP_ERR, "user mode provides incorrect hfi\n");
+		goto set_default_pkt_hdr;
+	}
+
+	if (pkt_hdr->size > MAX_HFI_PKT_SIZE*sizeof(unsigned int)) {
+		dprintk(CVP_ERR, "user HFI packet too large %x\n",
+				pkt_hdr->size);
+		return -EINVAL;
+	}
+
+	return 0;
+
+set_default_pkt_hdr:
+	pkt_hdr->size = sizeof(struct hfi_msg_session_hdr);
+	return 0;
+}
+
+static int _get_fence_pkt_hdr_from_user(struct cvp_kmd_arg __user *up,
+		struct cvp_hal_session_cmd_pkt *pkt_hdr)
+{
+	struct cvp_kmd_hfi_fence_packet *u;
+
+	u = &up->data.hfi_fence_pkt;
+
+	if (get_user(pkt_hdr->packet_type, &u->pkt_data[1]))
+		return -EFAULT;
+
+	pkt_hdr->size = (MAX_HFI_FENCE_OFFSET + MAX_HFI_FENCE_SIZE)
+			* sizeof(unsigned int);
+
+	if (pkt_hdr->size > (MAX_HFI_PKT_SIZE*sizeof(unsigned int)))
+		return -EINVAL;
+
+	return 0;
+}
+
+/* Size is in unit of u32 */
+static int _copy_pkt_from_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up,
+		unsigned int size)
+{
+	struct cvp_kmd_hfi_packet *k, *u;
+	int i;
+
+	k = &kp->data.hfi_pkt;
+	u = &up->data.hfi_pkt;
+	for (i = 0; i < size; i++)
+		if (get_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+
+	return 0;
+}
+
+/* Size is in unit of u32 */
+static int _copy_fence_pkt_from_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up,
+		unsigned int size)
+{
+	struct cvp_kmd_hfi_fence_packet *k, *u;
+	int i;
+
+	k = &kp->data.hfi_fence_pkt;
+	u = &up->data.hfi_fence_pkt;
+	for (i = 0; i < MAX_HFI_FENCE_OFFSET; i++) {
+		if (get_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+	}
+	for (i = 0; i < MAX_HFI_FENCE_SIZE; i++) {
+		if (get_user(k->fence_data[i], &u->fence_data[i]))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static int _copy_pkt_to_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up,
+		unsigned int size)
+{
+	struct cvp_kmd_hfi_packet *k, *u;
+	int i;
+
+	k = &kp->data.hfi_pkt;
+	u = &up->data.hfi_pkt;
+	for (i = 0; i < size; i++)
+		if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+
+	return 0;
+}
+
+static int _copy_fence_pkt_to_user(struct cvp_kmd_arg *kp,
+		struct cvp_kmd_arg __user *up,
+		unsigned int size)
+{
+	struct cvp_kmd_hfi_fence_packet *k, *u;
+	int i;
+
+	k = &kp->data.hfi_fence_pkt;
+	u = &up->data.hfi_fence_pkt;
+	for (i = 0; i < MAX_HFI_FENCE_OFFSET; i++) {
+		if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+			return -EFAULT;
+	}
+	for (i = 0; i < MAX_HFI_FENCE_SIZE; i++) {
+		if (put_user(k->fence_data[i], &u->fence_data[i]))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static int convert_from_user(struct cvp_kmd_arg *kp, unsigned long arg)
 {
 	int rc = 0;
 	int i;
-	struct msm_cvp_arg __user *up = compat_ptr(arg);
+	struct cvp_kmd_arg __user *up = compat_ptr(arg);
+	struct cvp_hal_session_cmd_pkt pkt_hdr;
 
 	if (!kp || !up) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -20,9 +146,9 @@
 		return -EFAULT;
 
 	switch (kp->type) {
-	case MSM_CVP_GET_SESSION_INFO:
+	case CVP_KMD_GET_SESSION_INFO:
 	{
-		struct msm_cvp_session_info *k, *u;
+		struct cvp_kmd_session_info *k, *u;
 
 		k = &kp->data.session;
 		u = &up->data.session;
@@ -33,9 +159,9 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_REQUEST_POWER:
+	case CVP_KMD_REQUEST_POWER:
 	{
-		struct msm_cvp_request_power *k, *u;
+		struct cvp_kmd_request_power *k, *u;
 
 		k = &kp->data.req_power;
 		u = &up->data.req_power;
@@ -49,9 +175,9 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_REGISTER_BUFFER:
+	case CVP_KMD_REGISTER_BUFFER:
 	{
-		struct msm_cvp_buffer *k, *u;
+		struct cvp_kmd_buffer *k, *u;
 
 		k = &kp->data.regbuf;
 		u = &up->data.regbuf;
@@ -68,9 +194,9 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_UNREGISTER_BUFFER:
+	case CVP_KMD_UNREGISTER_BUFFER:
 	{
-		struct msm_cvp_buffer *k, *u;
+		struct cvp_kmd_buffer *k, *u;
 
 		k = &kp->data.unregbuf;
 		u = &up->data.unregbuf;
@@ -87,12 +213,10 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_HFI_SEND_CMD:
+	case CVP_KMD_HFI_SEND_CMD:
 	{
-		struct msm_cvp_send_cmd *k, *u;
+		struct cvp_kmd_send_cmd *k, *u;
 
-		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_SEND_CMD\n",
-				__func__);
 		k = &kp->data.send_cmd;
 		u = &up->data.send_cmd;
 		if (get_user(k->cmd_address_fd, &u->cmd_address_fd) ||
@@ -103,79 +227,42 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_HFI_DFS_CONFIG_CMD:
+	case CVP_KMD_SEND_CMD_PKT:
+	case CVP_KMD_HFI_DFS_CONFIG_CMD:
+	case CVP_KMD_HFI_DFS_FRAME_CMD:
+	case CVP_KMD_HFI_DME_CONFIG_CMD:
+	case CVP_KMD_HFI_DME_FRAME_CMD:
+	case CVP_KMD_HFI_PERSIST_CMD:
 	{
-		struct msm_cvp_dfs_config *k, *u;
+		if (_get_pkt_hdr_from_user(up, &pkt_hdr)) {
+			dprintk(CVP_ERR, "Invalid syscall: %x, %x, %x\n",
+				kp->type, pkt_hdr.size, pkt_hdr.packet_type);
+			return -EFAULT;
+		}
 
-		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DFS_CONFIG_CMD\n", __func__);
-		k = &kp->data.dfs_config;
-		u = &up->data.dfs_config;
-		for (i = 0; i < CVP_DFS_CONFIG_CMD_SIZE; i++)
-			if (get_user(k->cvp_dfs_config[i],
-				&u->cvp_dfs_config[i]))
-				return -EFAULT;
+		dprintk(CVP_DBG, "system call cmd pkt: %d 0x%x\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+		rc = _copy_pkt_from_user(kp, up, (pkt_hdr.size >> 2));
 		break;
 	}
-	case MSM_CVP_HFI_DFS_FRAME_CMD:
-	case MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_DME_FRAME_FENCE_CMD:
 	{
-		struct msm_cvp_dfs_frame *k, *u;
+		if (_get_fence_pkt_hdr_from_user(up, &pkt_hdr)) {
+			dprintk(CVP_ERR, "Invalid syscall: %x, %x, %x\n",
+				kp->type, pkt_hdr.size, pkt_hdr.packet_type);
+			return -EFAULT;
+		}
 
-		dprintk(CVP_DBG, "%s: Type =%d\n", __func__, kp->type);
-		k = &kp->data.dfs_frame;
-		u = &up->data.dfs_frame;
-		for (i = 0; i < CVP_DFS_FRAME_CMD_SIZE; i++)
-			if (get_user(k->frame_data[i], &u->frame_data[i]))
-				return -EFAULT;
-
+		dprintk(CVP_DBG, "system call cmd pkt: %d 0x%x\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+		rc = _copy_fence_pkt_from_user(kp, up, (pkt_hdr.size >> 2));
 		break;
 	}
-	case MSM_CVP_HFI_DME_CONFIG_CMD:
-	{
-		struct msm_cvp_dme_config *k, *u;
-
-		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DFS_CONFIG_CMD\n", __func__);
-		k = &kp->data.dme_config;
-		u = &up->data.dme_config;
-		for (i = 0; i < CVP_DME_CONFIG_CMD_SIZE; i++)
-			if (get_user(k->cvp_dme_config[i],
-				&u->cvp_dme_config[i]))
-				return -EFAULT;
+	case CVP_KMD_HFI_DFS_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_DME_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_PERSIST_CMD_RESPONSE:
+	case CVP_KMD_RECEIVE_MSG_PKT:
 		break;
-	}
-	case MSM_CVP_HFI_DME_FRAME_CMD:
-	case MSM_CVP_HFI_DME_FRAME_CMD_RESPONSE:
-	{
-		struct msm_cvp_dme_frame *k, *u;
-
-		dprintk(CVP_DBG, "%s: type = %d\n",
-					__func__, kp->type);
-		k = &kp->data.dme_frame;
-		u = &up->data.dme_frame;
-
-		for (i = 0; i < CVP_DME_FRAME_CMD_SIZE; i++)
-			if (get_user(k->frame_data[i], &u->frame_data[i]))
-				return -EFAULT;
-
-		break;
-	}
-	case MSM_CVP_HFI_PERSIST_CMD:
-	case MSM_CVP_HFI_PERSIST_CMD_RESPONSE:
-	{
-		struct msm_cvp_persist_buf *k, *u;
-
-		dprintk(CVP_DBG, "%s: type = %d\n",
-					__func__, kp->type);
-		k = &kp->data.pbuf_cmd;
-		u = &up->data.pbuf_cmd;
-
-		for (i = 0; i < CVP_PERSIST_CMD_SIZE; i++)
-			if (get_user(k->persist_data[i], &u->persist_data[i]))
-				return -EFAULT;
-
-		break;
-	}
-
 	default:
 		dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
 			__func__, kp->type);
@@ -186,11 +273,12 @@
 	return rc;
 }
 
-static int convert_to_user(struct msm_cvp_arg *kp, unsigned long arg)
+static int convert_to_user(struct cvp_kmd_arg *kp, unsigned long arg)
 {
 	int rc = 0;
-	int i;
-	struct msm_cvp_arg __user *up = compat_ptr(arg);
+	int i, size = sizeof(struct hfi_msg_session_hdr) >> 2;
+	struct cvp_kmd_arg __user *up = compat_ptr(arg);
+	struct cvp_hal_session_cmd_pkt pkt_hdr;
 
 	if (!kp || !up) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -201,9 +289,20 @@
 		return -EFAULT;
 
 	switch (kp->type) {
-	case MSM_CVP_GET_SESSION_INFO:
+	case CVP_KMD_RECEIVE_MSG_PKT:
 	{
-		struct msm_cvp_session_info *k, *u;
+		struct cvp_kmd_hfi_packet *k, *u;
+
+		k = &kp->data.hfi_pkt;
+		u = &up->data.hfi_pkt;
+		for (i = 0; i < size; i++)
+			if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+				return -EFAULT;
+		break;
+	}
+	case CVP_KMD_GET_SESSION_INFO:
+	{
+		struct cvp_kmd_session_info *k, *u;
 
 		k = &kp->data.session;
 		u = &up->data.session;
@@ -214,9 +313,9 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_REQUEST_POWER:
+	case CVP_KMD_REQUEST_POWER:
 	{
-		struct msm_cvp_request_power *k, *u;
+		struct cvp_kmd_request_power *k, *u;
 
 		k = &kp->data.req_power;
 		u = &up->data.req_power;
@@ -230,9 +329,9 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_REGISTER_BUFFER:
+	case CVP_KMD_REGISTER_BUFFER:
 	{
-		struct msm_cvp_buffer *k, *u;
+		struct cvp_kmd_buffer *k, *u;
 
 		k = &kp->data.regbuf;
 		u = &up->data.regbuf;
@@ -249,9 +348,9 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_UNREGISTER_BUFFER:
+	case CVP_KMD_UNREGISTER_BUFFER:
 	{
-		struct msm_cvp_buffer *k, *u;
+		struct cvp_kmd_buffer *k, *u;
 
 		k = &kp->data.unregbuf;
 		u = &up->data.unregbuf;
@@ -268,11 +367,11 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_HFI_SEND_CMD:
+	case CVP_KMD_HFI_SEND_CMD:
 	{
-		struct msm_cvp_send_cmd *k, *u;
+		struct cvp_kmd_send_cmd *k, *u;
 
-		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_SEND_CMD\n",
+		dprintk(CVP_DBG, "%s: CVP_KMD_HFI_SEND_CMD\n",
 					__func__);
 
 		k = &kp->data.send_cmd;
@@ -285,80 +384,32 @@
 				return -EFAULT;
 		break;
 	}
-	case MSM_CVP_HFI_DFS_CONFIG_CMD:
+	case CVP_KMD_SEND_CMD_PKT:
+	case CVP_KMD_HFI_DFS_CONFIG_CMD:
+	case CVP_KMD_HFI_DFS_FRAME_CMD:
+	case CVP_KMD_HFI_DFS_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_DME_CONFIG_CMD:
+	case CVP_KMD_HFI_DME_FRAME_CMD:
+	case CVP_KMD_HFI_DME_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_PERSIST_CMD:
+	case CVP_KMD_HFI_PERSIST_CMD_RESPONSE:
 	{
-		struct msm_cvp_dfs_config *k, *u;
+		if (_get_pkt_hdr_from_user(up, &pkt_hdr))
+			return -EFAULT;
 
-		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DFS_CONFIG_CMD\n",
-					__func__);
-
-		k = &kp->data.dfs_config;
-		u = &up->data.dfs_config;
-		for (i = 0; i < CVP_DFS_CONFIG_CMD_SIZE; i++)
-			if (put_user(k->cvp_dfs_config[i],
-				&u->cvp_dfs_config[i]))
-				return -EFAULT;
+		dprintk(CVP_DBG, "Send user cmd pkt: %d %d\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+		rc = _copy_pkt_to_user(kp, up, (pkt_hdr.size >> 2));
 		break;
 	}
-	case MSM_CVP_HFI_DFS_FRAME_CMD:
-	case MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE:
+	case CVP_KMD_HFI_DME_FRAME_FENCE_CMD:
 	{
-		struct msm_cvp_dfs_frame *k, *u;
+		if (_get_fence_pkt_hdr_from_user(up, &pkt_hdr))
+			return -EFAULT;
 
-		dprintk(CVP_DBG, "%s: type = %d\n",
-					__func__, kp->type);
-		k = &kp->data.dfs_frame;
-		u = &up->data.dfs_frame;
-
-		for (i = 0; i < CVP_DFS_FRAME_CMD_SIZE; i++)
-			if (put_user(k->frame_data[i], &u->frame_data[i]))
-				return -EFAULT;
-
-		break;
-	}
-	case MSM_CVP_HFI_DME_CONFIG_CMD:
-	{
-		struct msm_cvp_dme_config *k, *u;
-
-		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DME_CONFIG_CMD\n", __func__);
-		k = &kp->data.dme_config;
-		u = &up->data.dme_config;
-		for (i = 0; i < CVP_DME_CONFIG_CMD_SIZE; i++)
-			if (put_user(k->cvp_dme_config[i],
-				&u->cvp_dme_config[i]))
-				return -EFAULT;
-		break;
-	}
-	case MSM_CVP_HFI_DME_FRAME_CMD:
-	case MSM_CVP_HFI_DME_FRAME_CMD_RESPONSE:
-	{
-		struct msm_cvp_dme_frame *k, *u;
-
-		dprintk(CVP_DBG, "%s: type = %d\n",
-					__func__, kp->type);
-		k = &kp->data.dme_frame;
-		u = &up->data.dme_frame;
-
-		for (i = 0; i < CVP_DME_FRAME_CMD_SIZE; i++)
-			if (put_user(k->frame_data[i], &u->frame_data[i]))
-				return -EFAULT;
-
-		break;
-	}
-	case MSM_CVP_HFI_PERSIST_CMD:
-	case MSM_CVP_HFI_PERSIST_CMD_RESPONSE:
-	{
-		struct msm_cvp_persist_buf *k, *u;
-
-		dprintk(CVP_DBG, "%s: type = %d\n",
-					__func__, kp->type);
-		k = &kp->data.pbuf_cmd;
-		u = &up->data.pbuf_cmd;
-
-		for (i = 0; i < CVP_PERSIST_CMD_SIZE; i++)
-			if (put_user(k->persist_data[i], &u->persist_data[i]))
-				return -EFAULT;
-
+		dprintk(CVP_DBG, "Send user cmd pkt: %d %d\n",
+				pkt_hdr.size, pkt_hdr.packet_type);
+		rc = _copy_fence_pkt_to_user(kp, up, (pkt_hdr.size >> 2));
 		break;
 	}
 	default:
@@ -376,7 +427,7 @@
 {
 	int rc;
 	struct msm_cvp_inst *inst;
-	struct msm_cvp_arg karg;
+	struct cvp_kmd_arg karg;
 
 	if (!filp || !filp->private_data) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -385,15 +436,18 @@
 
 	inst = container_of(filp->private_data, struct msm_cvp_inst,
 			event_handler);
-	memset(&karg, 0, sizeof(struct msm_cvp_arg));
+	memset(&karg, 0, sizeof(struct cvp_kmd_arg));
 
 	/*
 	 * the arg points to user space memory and needs
 	 * to be converted to kernel space before using it.
 	 * Check do_video_ioctl() for more details.
 	 */
-	if (convert_from_user(&karg, arg))
+	if (convert_from_user(&karg, arg)) {
+		dprintk(CVP_ERR, "%s: failed to get from user cmd %x\n",
+			__func__, karg.type);
 		return -EFAULT;
+	}
 
 	rc = msm_cvp_private((void *)inst, cmd, &karg);
 	if (rc) {
@@ -402,8 +456,11 @@
 		return -EINVAL;
 	}
 
-	if (convert_to_user(&karg, arg))
+	if (convert_to_user(&karg, arg)) {
+		dprintk(CVP_ERR, "%s: failed to copy to user cmd %x\n",
+			__func__, karg.type);
 		return -EFAULT;
+	}
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h
index ac122e1..672b207 100644
--- a/drivers/media/platform/msm/npu/npu_common.h
+++ b/drivers/media/platform/msm/npu/npu_common.h
@@ -55,6 +55,15 @@
 	NPU_PWRLEVEL_OFF = 0xFFFFFFFF,
 };
 
+#define NPU_ERR(fmt, args...)                            \
+	pr_err("NPU_ERR: %s: %d " fmt "\n", __func__,  __LINE__, ##args)
+#define NPU_WARN(fmt, args...)                           \
+	pr_warn("NPU_WARN: %s: %d " fmt "\n", __func__,  __LINE__, ##args)
+#define NPU_INFO(fmt, args...)                           \
+	pr_info("NPU_INFO: %s: %d " fmt "\n", __func__,  __LINE__, ##args)
+#define NPU_DBG(fmt, args...)                           \
+	pr_debug("NPU_DBG: %s: %d " fmt "\n", __func__,  __LINE__, ##args)
+
 /* -------------------------------------------------------------------------
  * Data Structures
  * -------------------------------------------------------------------------
@@ -159,7 +168,6 @@
 	uint32_t num_pwrlevels;
 
 	struct device *devbw;
-	uint32_t bwmon_enabled;
 	uint32_t uc_pwrlevel;
 	uint32_t cdsprm_pwrlevel;
 	uint32_t fmax_pwrlevel;
@@ -206,7 +214,6 @@
 	struct npu_io_data tcm_io;
 	struct npu_io_data qdsp_io;
 	struct npu_io_data apss_shared_io;
-	struct npu_io_data bwmon_io;
 	struct npu_io_data qfprom_io;
 
 	uint32_t core_clk_num;
diff --git a/drivers/media/platform/msm/npu/npu_dbg.c b/drivers/media/platform/msm/npu/npu_dbg.c
index f1486a2..b0dfc85 100644
--- a/drivers/media/platform/msm/npu/npu_dbg.c
+++ b/drivers/media/platform/msm/npu/npu_dbg.c
@@ -3,8 +3,6 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 /* -------------------------------------------------------------------------
  * Includes
  * -------------------------------------------------------------------------
@@ -24,9 +22,9 @@
 	uint32_t reg_val;
 
 	reg_val = REGR(npu_dev, REG_FW_JOB_CNT_START);
-	pr_info("fw jobs execute started count = %d\n", reg_val);
+	NPU_INFO("fw jobs execute started count = %d\n", reg_val);
 	reg_val = REGR(npu_dev, REG_FW_JOB_CNT_END);
-	pr_info("fw jobs execute finished count = %d\n", reg_val);
+	NPU_INFO("fw jobs execute finished count = %d\n", reg_val);
 	reg_val = REGR(npu_dev, REG_NPU_FW_DEBUG_DATA);
-	pr_info("fw jobs aco parser debug = %d\n", reg_val);
+	NPU_INFO("fw jobs aco parser debug = %d\n", reg_val);
 }
diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c
index df83e2a..987e182 100644
--- a/drivers/media/platform/msm/npu/npu_debugfs.c
+++ b/drivers/media/platform/msm/npu/npu_debugfs.c
@@ -3,8 +3,6 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 /* -------------------------------------------------------------------------
  * Includes
  * -------------------------------------------------------------------------
@@ -120,7 +118,7 @@
 	buf[count] = 0;	/* end of string */
 
 	cnt = sscanf(buf, "%zx %x", &off, &data);
-	pr_debug("%s %s 0x%zx, 0x%08x\n", __func__, buf, off, data);
+	NPU_DBG("%s 0x%zx, 0x%08x\n", buf, off, data);
 
 	return count;
 	if (cnt < 2)
@@ -133,7 +131,7 @@
 
 	npu_disable_core_power(npu_dev);
 
-	pr_debug("write: addr=%zx data=%x\n", off, data);
+	NPU_DBG("write: addr=%zx data=%x\n", off, data);
 
 	return count;
 }
@@ -193,9 +191,9 @@
 		return 0; /* done reading */
 
 	len = min(count, debugfs->buf_len - (size_t) *ppos);
-	pr_debug("read %zi %zi\n", count, debugfs->buf_len - (size_t) *ppos);
+	NPU_DBG("read %zi %zi\n", count, debugfs->buf_len - (size_t) *ppos);
 	if (copy_to_user(user_buf, debugfs->buf + *ppos, len)) {
-		pr_err("failed to copy to user\n");
+		NPU_ERR("failed to copy to user\n");
 		return -EFAULT;
 	}
 
@@ -216,7 +214,7 @@
 	struct npu_device *npu_dev = file->private_data;
 	struct npu_debugfs_ctx *debugfs;
 
-	pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
+	NPU_DBG("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
 	npu_dev = g_npu_dev;
 	debugfs = &npu_dev->debugfs_ctx;
 
@@ -231,7 +229,7 @@
 	cnt = sscanf(buf, "%zx %x", &off, &reg_cnt);
 	if (cnt == 1)
 		reg_cnt = DEFAULT_REG_DUMP_NUM;
-	pr_debug("reg off = %zx, %d cnt=%d\n", off, reg_cnt, cnt);
+	NPU_DBG("reg off = %zx, %d cnt=%d\n", off, reg_cnt, cnt);
 	if (cnt >= 1) {
 		debugfs->reg_off = off;
 		debugfs->reg_cnt = reg_cnt;
@@ -248,7 +246,7 @@
 	struct npu_device *npu_dev = file->private_data;
 	struct npu_debugfs_ctx *debugfs;
 
-	pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
+	NPU_DBG("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
 	npu_dev = g_npu_dev;
 	debugfs = &npu_dev->debugfs_ctx;
 
@@ -259,7 +257,7 @@
 		debugfs->reg_off, debugfs->reg_cnt);
 
 	if (copy_to_user(user_buf, buf, len)) {
-		pr_err("failed to copy to user\n");
+		NPU_ERR("failed to copy to user\n");
 		return -EFAULT;
 	}
 
@@ -278,7 +276,7 @@
 	struct npu_device *npu_dev = file->private_data;
 	struct npu_debugfs_ctx *debugfs;
 
-	pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
+	NPU_DBG("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
 	npu_dev = g_npu_dev;
 	debugfs = &npu_dev->debugfs_ctx;
 
@@ -298,7 +296,7 @@
 
 			if (copy_to_user(dst_addr, src_addr,
 				remaining_to_end)) {
-				pr_err("%s failed to copy to user\n", __func__);
+				NPU_ERR("failed to copy to user\n");
 				mutex_unlock(&debugfs->log_lock);
 				return -EFAULT;
 			}
@@ -307,7 +305,7 @@
 			if (copy_to_user(dst_addr, src_addr,
 				debugfs->log_num_bytes_buffered -
 				remaining_to_end)) {
-				pr_err("%s failed to copy to user\n", __func__);
+				NPU_ERR("failed to copy to user\n");
 				mutex_unlock(&debugfs->log_lock);
 				return -EFAULT;
 			}
@@ -318,7 +316,7 @@
 			if (copy_to_user(user_buf, (debugfs->log_buf +
 				debugfs->log_read_index),
 				debugfs->log_num_bytes_buffered)) {
-				pr_err("%s failed to copy to user\n", __func__);
+				NPU_ERR("failed to copy to user\n");
 				mutex_unlock(&debugfs->log_lock);
 				return -EFAULT;
 			}
@@ -350,7 +348,7 @@
 	int32_t rc = 0;
 	uint32_t val;
 
-	pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
+	NPU_DBG("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
 	npu_dev = g_npu_dev;
 	debugfs = &npu_dev->debugfs_ctx;
 
@@ -366,14 +364,14 @@
 		buf[count-1] = 0;/* remove line feed */
 
 	if (strcmp(buf, "on") == 0) {
-		pr_info("triggering fw_init\n");
+		NPU_INFO("triggering fw_init\n");
 		if (fw_init(npu_dev) != 0)
-			pr_info("error in fw_init\n");
+			NPU_INFO("error in fw_init\n");
 	} else if (strcmp(buf, "off") == 0) {
-		pr_info("triggering fw_deinit\n");
+		NPU_INFO("triggering fw_deinit\n");
 		fw_deinit(npu_dev, false, true);
 	} else if (strcmp(buf, "ssr") == 0) {
-		pr_info("trigger error irq\n");
+		NPU_INFO("trigger error irq\n");
 		if (npu_enable_core_power(npu_dev))
 			return -EPERM;
 
@@ -381,20 +379,20 @@
 		REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_SET(0), 2);
 		npu_disable_core_power(npu_dev);
 	} else if (strcmp(buf, "ssr_wdt") == 0) {
-		pr_info("trigger wdt irq\n");
+		NPU_INFO("trigger wdt irq\n");
 		npu_disable_post_pil_clocks(npu_dev);
 	} else if (strcmp(buf, "loopback") == 0) {
-		pr_debug("loopback test\n");
+		NPU_DBG("loopback test\n");
 		rc = npu_host_loopback_test(npu_dev);
-		pr_debug("loopback test end: %d\n", rc);
+		NPU_DBG("loopback test end: %d\n", rc);
 	} else {
 		rc = kstrtou32(buf, 10, &val);
 		if (rc) {
-			pr_err("Invalid input for power level settings\n");
+			NPU_ERR("Invalid input for power level settings\n");
 		} else {
 			val = min(val, npu_dev->pwrctrl.max_pwrlevel);
 			npu_dev->pwrctrl.active_pwrlevel = val;
-			pr_info("setting power state to %d\n", val);
+			NPU_INFO("setting power state to %d\n", val);
 		}
 	}
 
@@ -414,62 +412,62 @@
 
 	debugfs->root = debugfs_create_dir("npu", NULL);
 	if (IS_ERR_OR_NULL(debugfs->root)) {
-		pr_err("debugfs_create_dir for npu failed, error %ld\n",
+		NPU_ERR("debugfs_create_dir for npu failed, error %ld\n",
 			PTR_ERR(debugfs->root));
 		return -ENODEV;
 	}
 
 	if (!debugfs_create_file("reg", 0644, debugfs->root,
 		npu_dev, &npu_reg_fops)) {
-		pr_err("debugfs_create_file reg fail\n");
+		NPU_ERR("debugfs_create_file reg fail\n");
 		goto err;
 	}
 
 	if (!debugfs_create_file("off", 0644, debugfs->root,
 		npu_dev, &npu_off_fops)) {
-		pr_err("debugfs_create_file off fail\n");
+		NPU_ERR("debugfs_create_file off fail\n");
 		goto err;
 	}
 
 	if (!debugfs_create_file("log", 0644, debugfs->root,
 		npu_dev, &npu_log_fops)) {
-		pr_err("debugfs_create_file log fail\n");
+		NPU_ERR("debugfs_create_file log fail\n");
 		goto err;
 	}
 
 	if (!debugfs_create_file("ctrl", 0644, debugfs->root,
 		npu_dev, &npu_ctrl_fops)) {
-		pr_err("debugfs_create_file ctrl fail\n");
+		NPU_ERR("debugfs_create_file ctrl fail\n");
 		goto err;
 	}
 
 	if (!debugfs_create_bool("sys_cache_disable", 0644,
 		debugfs->root, &(host_ctx->sys_cache_disable))) {
-		pr_err("debugfs_creat_bool fail for sys cache\n");
+		NPU_ERR("debugfs_creat_bool fail for sys cache\n");
 		goto err;
 	}
 
 	if (!debugfs_create_u32("fw_dbg_mode", 0644,
 		debugfs->root, &(host_ctx->fw_dbg_mode))) {
-		pr_err("debugfs_create_u32 fail for fw_dbg_mode\n");
+		NPU_ERR("debugfs_create_u32 fail for fw_dbg_mode\n");
 		goto err;
 	}
 
 	if (!debugfs_create_u32("fw_state", 0444,
 		debugfs->root, &(host_ctx->fw_state))) {
-		pr_err("debugfs_create_u32 fail for fw_state\n");
+		NPU_ERR("debugfs_create_u32 fail for fw_state\n");
 		goto err;
 	}
 
 	if (!debugfs_create_u32("pwr_level", 0444,
 		debugfs->root, &(pwr->active_pwrlevel))) {
-		pr_err("debugfs_create_u32 fail for pwr_level\n");
+		NPU_ERR("debugfs_create_u32 fail for pwr_level\n");
 		goto err;
 	}
 
 	if (!debugfs_create_u32("exec_flags", 0644,
 		debugfs->root, &(host_ctx->exec_flags_override))) {
-		pr_err("debugfs_create_u32 fail for exec_flags\n");
+		NPU_ERR("debugfs_create_u32 fail for exec_flags\n");
 		goto err;
 	}
 
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index f8f0d51..196b51a 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -3,8 +3,6 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 /* -------------------------------------------------------------------------
  * Includes
  * -------------------------------------------------------------------------
@@ -106,23 +104,10 @@
  * -------------------------------------------------------------------------
  */
 static const char * const npu_post_clocks[] = {
-	"npu_cpc_clk",
-	"npu_cpc_timer_clk"
 };
 
 static const char * const npu_exclude_rate_clocks[] = {
-	"qdss_clk",
-	"at_clk",
-	"trig_clk",
-	"sleep_clk",
 	"xo_clk",
-	"conf_noc_ahb_clk",
-	"comp_noc_axi_clk",
-	"npu_core_cti_clk",
-	"npu_core_apb_clk",
-	"npu_core_atb_clk",
-	"npu_cpc_timer_clk",
-	"qtimer_core_clk",
 	"bwmon_clk",
 	"bto_core_clk",
 	"llm_xo_clk",
@@ -134,7 +119,14 @@
 	"dsp_bwmon_ahb_clk",
 	"cal_hm0_perf_cnt_clk",
 	"cal_hm1_perf_cnt_clk",
-	"dsp_ahbs_clk"
+	"dsp_ahbs_clk",
+	"axi_clk",
+	"ahb_clk",
+	"dma_clk",
+	"llm_temp_clk",
+	"llm_curr_clk",
+	"atb_clk",
+	"s2p_clk",
 };
 
 static const struct npu_irq npu_irq_info[NPU_MAX_IRQ] = {
@@ -281,13 +273,13 @@
 
 	rc = kstrtou32(buf, 10, &val);
 	if (rc) {
-		pr_err("Invalid input for perf mode setting\n");
+		NPU_ERR("Invalid input for perf mode setting\n");
 		return -EINVAL;
 	}
 
 	val = min(val, npu_dev->pwrctrl.num_pwrlevels);
 	npu_dev->pwrctrl.perf_mode_override = val;
-	pr_info("setting uc_pwrlevel_override to %d\n", val);
+	NPU_INFO("setting uc_pwrlevel_override to %d\n", val);
 	npu_set_power_level(npu_dev, true);
 
 	return count;
@@ -333,7 +325,7 @@
 		pwr->active_pwrlevel = thermalctrl->pwr_level;
 		pwr->uc_pwrlevel = pwr->max_pwrlevel;
 		pwr->cdsprm_pwrlevel = pwr->max_pwrlevel;
-		pr_debug("setting back to power level=%d\n",
+		NPU_DBG("setting back to power level=%d\n",
 			pwr->active_pwrlevel);
 	}
 }
@@ -404,8 +396,8 @@
 	 * settings
 	 */
 	ret_level = min(therm_pwr_level, uc_pwr_level);
-	pr_debug("%s therm=%d active=%d uc=%d set level=%d\n",
-		__func__, therm_pwr_level, active_pwr_level, uc_pwr_level,
+	NPU_DBG("therm=%d active=%d uc=%d set level=%d\n",
+		therm_pwr_level, active_pwr_level, uc_pwr_level,
 		ret_level);
 
 	return ret_level;
@@ -423,7 +415,7 @@
 	pwr_level_to_cdsprm = pwr_level_to_set;
 
 	if (!pwr->pwr_vote_num) {
-		pr_debug("power is not enabled during set request\n");
+		NPU_DBG("power is not enabled during set request\n");
 		pwr->active_pwrlevel = min(pwr_level_to_set,
 			npu_dev->pwrctrl.cdsprm_pwrlevel);
 		return 0;
@@ -434,11 +426,11 @@
 
 	/* if the same as current, dont do anything */
 	if (pwr_level_to_set == pwr->active_pwrlevel) {
-		pr_debug("power level %d doesn't change\n", pwr_level_to_set);
+		NPU_DBG("power level %d doesn't change\n", pwr_level_to_set);
 		return 0;
 	}
 
-	pr_debug("setting power level to [%d]\n", pwr_level_to_set);
+	NPU_DBG("setting power level to [%d]\n", pwr_level_to_set);
 	pwr_level_idx = npu_power_level_to_index(npu_dev, pwr_level_to_set);
 	pwrlevel = &npu_dev->pwrctrl.pwrlevels[pwr_level_idx];
 
@@ -453,13 +445,13 @@
 				continue;
 		}
 
-		pr_debug("requested rate of clock [%s] to [%ld]\n",
+		NPU_DBG("requested rate of clock [%s] to [%ld]\n",
 			npu_dev->core_clks[i].clk_name, pwrlevel->clk_freq[i]);
 
 		ret = clk_set_rate(npu_dev->core_clks[i].clk,
 			pwrlevel->clk_freq[i]);
 		if (ret) {
-			pr_debug("clk_set_rate %s to %ld failed with %d\n",
+			NPU_DBG("clk_set_rate %s to %ld failed with %d\n",
 				npu_dev->core_clks[i].clk_name,
 				pwrlevel->clk_freq[i], ret);
 			break;
@@ -541,11 +533,11 @@
 				continue;
 		}
 
-		pr_debug("enabling clock %s\n", core_clks[i].clk_name);
+		NPU_DBG("enabling clock %s\n", core_clks[i].clk_name);
 
 		rc = clk_prepare_enable(core_clks[i].clk);
 		if (rc) {
-			pr_err("%s enable failed\n",
+			NPU_ERR("%s enable failed\n",
 				core_clks[i].clk_name);
 			break;
 		}
@@ -553,14 +545,14 @@
 		if (npu_is_exclude_rate_clock(core_clks[i].clk_name))
 			continue;
 
-		pr_debug("setting rate of clock %s to %ld\n",
+		NPU_DBG("setting rate of clock %s to %ld\n",
 			core_clks[i].clk_name, pwrlevel->clk_freq[i]);
 
 		rc = clk_set_rate(core_clks[i].clk,
 			pwrlevel->clk_freq[i]);
 		/* not fatal error, keep using previous clk rate */
 		if (rc) {
-			pr_err("clk_set_rate %s to %ld failed\n",
+			NPU_ERR("clk_set_rate %s to %ld failed\n",
 				core_clks[i].clk_name,
 				pwrlevel->clk_freq[i]);
 			rc = 0;
@@ -576,7 +568,7 @@
 				if (npu_is_post_clock(core_clks[i].clk_name))
 					continue;
 			}
-			pr_debug("disabling clock %s\n", core_clks[i].clk_name);
+			NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
 			clk_disable_unprepare(core_clks[i].clk);
 		}
 	}
@@ -600,17 +592,17 @@
 
 		/* set clock rate to 0 before disabling it */
 		if (!npu_is_exclude_rate_clock(core_clks[i].clk_name)) {
-			pr_debug("setting rate of clock %s to 0\n",
+			NPU_DBG("setting rate of clock %s to 0\n",
 				core_clks[i].clk_name);
 
 			rc = clk_set_rate(core_clks[i].clk, 0);
 			if (rc) {
-				pr_err("clk_set_rate %s to 0 failed\n",
+				NPU_ERR("clk_set_rate %s to 0 failed\n",
 					core_clks[i].clk_name);
 			}
 		}
 
-		pr_debug("disabling clock %s\n", core_clks[i].clk_name);
+		NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
 		clk_disable_unprepare(core_clks[i].clk);
 	}
 }
@@ -625,8 +617,7 @@
 	struct npu_device *npu_dev = cdev->devdata;
 	struct npu_thermalctrl *thermalctrl = &npu_dev->thermalctrl;
 
-	pr_debug("enter %s thermal max state=%lu\n", __func__,
-		thermalctrl->max_state);
+	NPU_DBG("thermal max state=%lu\n", thermalctrl->max_state);
 
 	*state = thermalctrl->max_state;
 
@@ -639,8 +630,7 @@
 	struct npu_device *npu_dev = cdev->devdata;
 	struct npu_thermalctrl *thermal = &npu_dev->thermalctrl;
 
-	pr_debug("enter %s thermal current state=%lu\n", __func__,
-		thermal->current_state);
+	NPU_DBG("thermal current state=%lu\n", thermal->current_state);
 
 	*state = thermal->current_state;
 
@@ -653,7 +643,7 @@
 	struct npu_device *npu_dev = cdev->devdata;
 	struct npu_thermalctrl *thermal = &npu_dev->thermalctrl;
 
-	pr_debug("enter %s request state=%lu\n", __func__, state);
+	NPU_DBG("request state=%lu\n", state);
 	if (state > thermal->max_state)
 		return -EINVAL;
 
@@ -679,11 +669,11 @@
 		for (i = 0; i < npu_dev->regulator_num; i++) {
 			rc = regulator_enable(regulators[i].regulator);
 			if (rc < 0) {
-				pr_err("%s enable failed\n",
+				NPU_ERR("%s enable failed\n",
 					regulators[i].regulator_name);
 				break;
 			}
-			pr_debug("regulator %s enabled\n",
+			NPU_DBG("regulator %s enabled\n",
 				regulators[i].regulator_name);
 		}
 	}
@@ -700,7 +690,7 @@
 	if (host_ctx->power_vote_num > 0) {
 		for (i = 0; i < npu_dev->regulator_num; i++) {
 			regulator_disable(regulators[i].regulator);
-			pr_debug("regulator %s disabled\n",
+			NPU_DBG("regulator %s disabled\n",
 				regulators[i].regulator_name);
 		}
 		host_ctx->power_vote_num--;
@@ -718,7 +708,7 @@
 	for (i = 0; i < NPU_MAX_IRQ; i++) {
 		if (npu_dev->irq[i].irq != 0) {
 			enable_irq(npu_dev->irq[i].irq);
-			pr_debug("enable irq %d\n", npu_dev->irq[i].irq);
+			NPU_DBG("enable irq %d\n", npu_dev->irq[i].irq);
 		}
 	}
 
@@ -732,7 +722,7 @@
 	for (i = 0; i < NPU_MAX_IRQ; i++) {
 		if (npu_dev->irq[i].irq != 0) {
 			disable_irq(npu_dev->irq[i].irq);
-			pr_debug("disable irq %d\n", npu_dev->irq[i].irq);
+			NPU_DBG("disable irq %d\n", npu_dev->irq[i].irq);
 		}
 	}
 }
@@ -749,7 +739,7 @@
 	if (!npu_dev->host_ctx.sys_cache_disable) {
 		npu_dev->sys_cache = llcc_slice_getd(LLCC_NPU);
 		if (IS_ERR_OR_NULL(npu_dev->sys_cache)) {
-			pr_warn("unable to init sys cache\n");
+			NPU_WARN("unable to init sys cache\n");
 			npu_dev->sys_cache = NULL;
 			npu_dev->host_ctx.sys_cache_disable = true;
 			return 0;
@@ -780,12 +770,12 @@
 		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(3), reg_val);
 		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(4), reg_val);
 
-		pr_debug("prior to activate sys cache\n");
+		NPU_DBG("prior to activate sys cache\n");
 		rc = llcc_slice_activate(npu_dev->sys_cache);
 		if (rc)
-			pr_err("failed to activate sys cache\n");
+			NPU_ERR("failed to activate sys cache\n");
 		else
-			pr_debug("sys cache activated\n");
+			NPU_DBG("sys cache activated\n");
 	}
 
 	return rc;
@@ -799,10 +789,10 @@
 		if (npu_dev->sys_cache) {
 			rc = llcc_slice_deactivate(npu_dev->sys_cache);
 			if (rc) {
-				pr_err("failed to deactivate sys cache\n");
+				NPU_ERR("failed to deactivate sys cache\n");
 				return;
 			}
-			pr_debug("sys cache deactivated\n");
+			NPU_DBG("sys cache deactivated\n");
 			llcc_slice_putd(npu_dev->sys_cache);
 			npu_dev->sys_cache = NULL;
 		}
@@ -866,21 +856,21 @@
 	ret = copy_from_user(&req, argp, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	ret = npu_host_get_info(npu_dev, &req);
 
 	if (ret) {
-		pr_err("npu_host_get_info failed\n");
+		NPU_ERR("npu_host_get_info failed\n");
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -895,21 +885,21 @@
 	ret = copy_from_user(&req, argp, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	ret = npu_host_map_buf(client, &req);
 
 	if (ret) {
-		pr_err("npu_host_map_buf failed\n");
+		NPU_ERR("npu_host_map_buf failed\n");
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -924,21 +914,21 @@
 	ret = copy_from_user(&req, argp, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	ret = npu_host_unmap_buf(client, &req);
 
 	if (ret) {
-		pr_err("npu_host_unmap_buf failed\n");
+		NPU_ERR("npu_host_unmap_buf failed\n");
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -955,21 +945,21 @@
 	ret = copy_from_user(&req, argp, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
-	pr_debug("network load with perf request %d\n", req.perf_mode);
+	NPU_DBG("network load with perf request %d\n", req.perf_mode);
 
 	ret = npu_host_load_network(client, &req);
 	if (ret) {
-		pr_err("npu_host_load_network failed %d\n", ret);
+		NPU_ERR("npu_host_load_network failed %d\n", ret);
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		ret = -EFAULT;
 		unload_req.network_hdl = req.network_hdl;
 		npu_host_unload_network(client, &unload_req);
@@ -988,12 +978,12 @@
 
 	ret = copy_from_user(&req, argp, sizeof(req));
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	if (req.patch_info_num > MSM_NPU_MAX_PATCH_LAYER_NUM) {
-		pr_err("Invalid patch info num %d[max:%d]\n",
+		NPU_ERR("Invalid patch info num %d[max:%d]\n",
 			req.patch_info_num, MSM_NPU_MAX_PATCH_LAYER_NUM);
 		return -EINVAL;
 	}
@@ -1008,25 +998,25 @@
 			(void __user *)req.patch_info,
 			req.patch_info_num * sizeof(*patch_info));
 		if (ret) {
-			pr_err("fail to copy patch info\n");
+			NPU_ERR("fail to copy patch info\n");
 			kfree(patch_info);
 			return -EFAULT;
 		}
 	}
 
-	pr_debug("network load with perf request %d\n", req.perf_mode);
+	NPU_DBG("network load with perf request %d\n", req.perf_mode);
 
 	ret = npu_host_load_network_v2(client, &req, patch_info);
 
 	kfree(patch_info);
 	if (ret) {
-		pr_err("npu_host_load_network_v2 failed %d\n", ret);
+		NPU_ERR("npu_host_load_network_v2 failed %d\n", ret);
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		ret = -EFAULT;
 		unload_req.network_hdl = req.network_hdl;
 		npu_host_unload_network(client, &unload_req);
@@ -1045,21 +1035,21 @@
 	ret = copy_from_user(&req, argp, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	ret = npu_host_unload_network(client, &req);
 
 	if (ret) {
-		pr_err("npu_host_unload_network failed %d\n", ret);
+		NPU_ERR("npu_host_unload_network failed %d\n", ret);
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -1075,13 +1065,13 @@
 	ret = copy_from_user(&req, argp, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	if ((req.input_layer_num > MSM_NPU_MAX_INPUT_LAYER_NUM) ||
 		(req.output_layer_num > MSM_NPU_MAX_OUTPUT_LAYER_NUM)) {
-		pr_err("Invalid input/out layer num %d[max:%d] %d[max:%d]\n",
+		NPU_ERR("Invalid input/out layer num %d[max:%d] %d[max:%d]\n",
 			req.input_layer_num, MSM_NPU_MAX_INPUT_LAYER_NUM,
 			req.output_layer_num, MSM_NPU_MAX_OUTPUT_LAYER_NUM);
 		return -EINVAL;
@@ -1090,14 +1080,14 @@
 	ret = npu_host_exec_network(client, &req);
 
 	if (ret) {
-		pr_err("npu_host_exec_network failed %d\n", ret);
+		NPU_ERR("npu_host_exec_network failed %d\n", ret);
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -1113,18 +1103,18 @@
 
 	ret = copy_from_user(&req, argp, sizeof(req));
 	if (ret) {
-		pr_err("fail to copy from user\n");
+		NPU_ERR("fail to copy from user\n");
 		return -EFAULT;
 	}
 
 	if (req.patch_buf_info_num > MSM_NPU_MAX_PATCH_LAYER_NUM) {
-		pr_err("Invalid patch buf info num %d[max:%d]\n",
+		NPU_ERR("Invalid patch buf info num %d[max:%d]\n",
 			req.patch_buf_info_num, MSM_NPU_MAX_PATCH_LAYER_NUM);
 		return -EINVAL;
 	}
 
 	if (req.stats_buf_size > NPU_MAX_STATS_BUF_SIZE) {
-		pr_err("Invalid stats buffer size %d max %d\n",
+		NPU_ERR("Invalid stats buffer size %d max %d\n",
 			req.stats_buf_size, NPU_MAX_STATS_BUF_SIZE);
 		return -EINVAL;
 	}
@@ -1139,7 +1129,7 @@
 			(void __user *)req.patch_buf_info,
 			req.patch_buf_info_num * sizeof(*patch_buf_info));
 		if (ret) {
-			pr_err("fail to copy patch buf info\n");
+			NPU_ERR("fail to copy patch buf info\n");
 			kfree(patch_buf_info);
 			return -EFAULT;
 		}
@@ -1149,13 +1139,13 @@
 
 	kfree(patch_buf_info);
 	if (ret) {
-		pr_err("npu_host_exec_network_v2 failed %d\n", ret);
+		NPU_ERR("npu_host_exec_network_v2 failed %d\n", ret);
 		return ret;
 	}
 
 	ret = copy_to_user(argp, &req, sizeof(req));
 	if (ret) {
-		pr_err("fail to copy to user\n");
+		NPU_ERR("fail to copy to user\n");
 		ret = -EFAULT;
 	}
 
@@ -1172,7 +1162,7 @@
 			(void *)&kevt->reserved[0],
 			kevt->evt.u.exec_v2_done.stats_buf_size);
 		if (ret) {
-			pr_err("fail to copy to user\n");
+			NPU_ERR("fail to copy to user\n");
 			kevt->evt.u.exec_v2_done.stats_buf_size = 0;
 			ret = -EFAULT;
 		}
@@ -1193,7 +1183,7 @@
 
 	mutex_lock(&client->list_lock);
 	if (list_empty(&client->evt_list)) {
-		pr_err("event list is empty\n");
+		NPU_ERR("event list is empty\n");
 		ret = -EINVAL;
 	} else {
 		kevt = list_first_entry(&client->evt_list,
@@ -1203,7 +1193,7 @@
 		ret = copy_to_user(argp, &kevt->evt,
 			sizeof(struct msm_npu_event));
 		if (ret) {
-			pr_err("fail to copy to user\n");
+			NPU_ERR("fail to copy to user\n");
 			ret = -EFAULT;
 		}
 		kfree(kevt);
@@ -1248,7 +1238,7 @@
 		ret = npu_receive_event(client, arg);
 		break;
 	default:
-		pr_err("unexpected IOCTL %x\n", cmd);
+		NPU_ERR("unexpected IOCTL %x\n", cmd);
 	}
 
 	return ret;
@@ -1263,7 +1253,7 @@
 
 	mutex_lock(&client->list_lock);
 	if (!list_empty(&client->evt_list)) {
-		pr_debug("poll cmd done\n");
+		NPU_DBG("poll cmd done\n");
 		rc = POLLIN | POLLRDNORM;
 	}
 	mutex_unlock(&client->list_lock);
@@ -1287,11 +1277,11 @@
 	num_clk = of_property_count_strings(pdev->dev.of_node,
 			"clock-names");
 	if (num_clk <= 0) {
-		pr_err("clocks are not defined\n");
+		NPU_ERR("clocks are not defined\n");
 		rc = -EINVAL;
 		goto clk_err;
 	} else if (num_clk > NUM_MAX_CLK_NUM) {
-		pr_err("number of clocks %d exceeds limit\n", num_clk);
+		NPU_ERR("number of clocks %d exceeds limit\n", num_clk);
 		rc = -EINVAL;
 		goto clk_err;
 	}
@@ -1304,7 +1294,7 @@
 			sizeof(core_clks[i].clk_name));
 		core_clks[i].clk = devm_clk_get(&pdev->dev, clock_name);
 		if (IS_ERR(core_clks[i].clk)) {
-			pr_err("unable to get clk: %s\n", clock_name);
+			NPU_ERR("unable to get clk: %s\n", clock_name);
 			rc = -EINVAL;
 			break;
 		}
@@ -1327,12 +1317,12 @@
 			"qcom,proxy-reg-names");
 	if (num <= 0) {
 		rc = -EINVAL;
-		pr_err("regulator not defined\n");
+		NPU_ERR("regulator not defined\n");
 		goto regulator_err;
 	}
 	if (num > NPU_MAX_REGULATOR_NUM) {
 		rc = -EINVAL;
-		pr_err("regulator number %d is over the limit %d\n", num,
+		NPU_ERR("regulator number %d is over the limit %d\n", num,
 			NPU_MAX_REGULATOR_NUM);
 		num = NPU_MAX_REGULATOR_NUM;
 	}
@@ -1345,7 +1335,7 @@
 				sizeof(regulators[i].regulator_name));
 		regulators[i].regulator = devm_regulator_get(&pdev->dev, name);
 		if (IS_ERR(regulators[i].regulator)) {
-			pr_err("unable to get regulator: %s\n", name);
+			NPU_ERR("unable to get regulator: %s\n", name);
 			rc = -EINVAL;
 			break;
 		}
@@ -1376,17 +1366,17 @@
 		struct npu_pwrlevel *level;
 
 		if (of_property_read_u32(child, "reg", &index)) {
-			pr_err("Can't find reg property\n");
+			NPU_ERR("Can't find reg property\n");
 			return -EINVAL;
 		}
 
 		if (of_property_read_u32(child, "vreg", &pwr_level)) {
-			pr_err("Can't find vreg property\n");
+			NPU_ERR("Can't find vreg property\n");
 			return -EINVAL;
 		}
 
 		if (index >= NPU_MAX_PWRLEVELS) {
-			pr_err("pwrlevel index %d is out of range\n",
+			NPU_ERR("pwrlevel index %d is out of range\n",
 				index);
 			continue;
 		}
@@ -1396,7 +1386,7 @@
 
 		if (of_property_read_u32_array(child, "clk-freq",
 			clk_array_values, npu_dev->core_clk_num)) {
-			pr_err("pwrlevel index %d read clk-freq failed %d\n",
+			NPU_ERR("pwrlevel index %d read clk-freq failed %d\n",
 				index, npu_dev->core_clk_num);
 			return -EINVAL;
 		}
@@ -1415,7 +1405,7 @@
 
 			clk_rate = clk_round_rate(npu_dev->core_clks[i].clk,
 				clk_array_values[i]);
-			pr_debug("clk %s rate [%u]:[%u]\n",
+			NPU_DBG("clk %s rate [%u]:[%u]\n",
 				npu_dev->core_clks[i].clk_name,
 				clk_array_values[i], clk_rate);
 			level->clk_freq[i] = clk_rate;
@@ -1427,7 +1417,7 @@
 		fmax = (npu_qfprom_reg_read(npu_dev,
 			QFPROM_FMAX_REG_OFFSET) & QFPROM_FMAX_BITS_MASK) >>
 			QFPROM_FMAX_BITS_SHIFT;
-		pr_debug("fmax %x\n", fmax);
+		NPU_DBG("fmax %x\n", fmax);
 
 		switch (fmax) {
 		case 1:
@@ -1447,7 +1437,7 @@
 	}
 
 	of_property_read_u32(node, "initial-pwrlevel", &init_level_index);
-	pr_debug("initial-pwrlevel %d\n", init_level_index);
+	NPU_DBG("initial-pwrlevel %d\n", init_level_index);
 
 	if (init_level_index >= pwr->num_pwrlevels)
 		init_level_index = pwr->num_pwrlevels - 1;
@@ -1456,10 +1446,10 @@
 		init_level_index);
 	if (init_power_level > pwr->max_pwrlevel) {
 		init_power_level = pwr->max_pwrlevel;
-		pr_debug("Adjust init power level to %d\n", init_power_level);
+		NPU_DBG("Adjust init power level to %d\n", init_power_level);
 	}
 
-	pr_debug("init power level %d max %d min %d\n", init_power_level,
+	NPU_DBG("init power level %d max %d min %d\n", init_power_level,
 		pwr->max_pwrlevel, pwr->min_pwrlevel);
 	pwr->active_pwrlevel = pwr->default_pwrlevel = init_power_level;
 	pwr->uc_pwrlevel = pwr->max_pwrlevel;
@@ -1474,14 +1464,12 @@
 	struct platform_device *pdev = npu_dev->pdev;
 	struct device_node *node;
 	int ret = 0;
-	struct platform_device *p2dev;
-	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
 
 	/* Power levels */
 	node = of_find_node_by_name(pdev->dev.of_node, "qcom,npu-pwrlevels");
 
 	if (!node) {
-		pr_err("unable to find 'qcom,npu-pwrlevels'\n");
+		NPU_ERR("unable to find 'qcom,npu-pwrlevels'\n");
 		return -EINVAL;
 	}
 
@@ -1489,26 +1477,6 @@
 	if (ret)
 		return ret;
 
-	/* Parse Bandwidth */
-	node = of_parse_phandle(pdev->dev.of_node,
-				"qcom,npubw-dev", 0);
-
-	if (node) {
-		/* Set to 1 initially - we assume bwmon is on */
-		pwr->bwmon_enabled = 1;
-		p2dev = of_find_device_by_node(node);
-		if (p2dev) {
-			pwr->devbw = &p2dev->dev;
-		} else {
-			pr_err("parser power level failed\n");
-			ret = -EINVAL;
-			return ret;
-		}
-	} else {
-		pr_warn("bwdev is not defined in dts\n");
-		pwr->devbw = NULL;
-	}
-
 	return ret;
 }
 
@@ -1533,13 +1501,13 @@
 		npu_dev->irq[i].irq = platform_get_irq_byname(
 			npu_dev->pdev, npu_dev->irq[i].name);
 		if (npu_dev->irq[i].irq < 0) {
-			pr_err("get_irq for %s failed\n\n",
+			NPU_ERR("get_irq for %s failed\n\n",
 				npu_dev->irq[i].name);
 			ret = -EINVAL;
 			break;
 		}
 
-		pr_debug("irq %s: %d\n", npu_dev->irq[i].name,
+		NPU_DBG("irq %s: %d\n", npu_dev->irq[i].name,
 			npu_dev->irq[i].irq);
 		irq_set_status_flags(npu_dev->irq[i].irq,
 						IRQ_NOAUTOEN);
@@ -1548,7 +1516,7 @@
 				irq_type, npu_dev->irq[i].name,
 				npu_dev);
 		if (ret) {
-			pr_err("devm_request_irq(%s:%d) failed\n",
+			NPU_ERR("devm_request_irq(%s:%d) failed\n",
 				npu_dev->irq[i].name,
 				npu_dev->irq[i].irq);
 			break;
@@ -1571,7 +1539,7 @@
 
 		mbox_aop->chan = mbox_request_channel(&mbox_aop->client, 0);
 		if (IS_ERR(mbox_aop->chan)) {
-			pr_warn("aop mailbox is not available\n");
+			NPU_WARN("aop mailbox is not available\n");
 			mbox_aop->chan = NULL;
 		}
 	}
@@ -1609,7 +1577,7 @@
 	res = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "core");
 	if (!res) {
-		pr_err("unable to get core resource\n");
+		NPU_ERR("unable to get core resource\n");
 		rc = -ENODEV;
 		goto error_get_dev_num;
 	}
@@ -1618,17 +1586,17 @@
 	npu_dev->core_io.base = devm_ioremap(&pdev->dev, res->start,
 					npu_dev->core_io.size);
 	if (unlikely(!npu_dev->core_io.base)) {
-		pr_err("unable to map core\n");
+		NPU_ERR("unable to map core\n");
 		rc = -ENOMEM;
 		goto error_get_dev_num;
 	}
-	pr_debug("core phy address=0x%llx virt=%pK\n",
+	NPU_DBG("core phy address=0x%llx virt=%pK\n",
 		res->start, npu_dev->core_io.base);
 
 	res = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "tcm");
 	if (!res) {
-		pr_err("unable to get tcm resource\n");
+		NPU_ERR("unable to get tcm resource\n");
 		rc = -ENODEV;
 		goto error_get_dev_num;
 	}
@@ -1637,17 +1605,17 @@
 	npu_dev->tcm_io.base = devm_ioremap(&pdev->dev, res->start,
 					npu_dev->tcm_io.size);
 	if (unlikely(!npu_dev->tcm_io.base)) {
-		pr_err("unable to map tcm\n");
+		NPU_ERR("unable to map tcm\n");
 		rc = -ENOMEM;
 		goto error_get_dev_num;
 	}
-	pr_debug("tcm phy address=0x%llx virt=%pK\n",
+	NPU_DBG("tcm phy address=0x%llx virt=%pK\n",
 		res->start, npu_dev->tcm_io.base);
 
 	res = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "qdsp");
 	if (!res) {
-		pr_err("unable to get qdsp resource\n");
+		NPU_ERR("unable to get qdsp resource\n");
 		rc = -ENODEV;
 		goto error_get_dev_num;
 	}
@@ -1656,17 +1624,17 @@
 	npu_dev->qdsp_io.base = devm_ioremap(&pdev->dev, res->start,
 					npu_dev->qdsp_io.size);
 	if (unlikely(!npu_dev->qdsp_io.base)) {
-		pr_err("unable to map qdsp\n");
+		NPU_ERR("unable to map qdsp\n");
 		rc = -ENOMEM;
 		goto error_get_dev_num;
 	}
-	pr_debug("qdsp phy address=0x%llx virt=%pK\n",
+	NPU_DBG("qdsp phy address=0x%llx virt=%pK\n",
 		res->start, npu_dev->qdsp_io.base);
 
 	res = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "apss_shared");
 	if (!res) {
-		pr_err("unable to get apss_shared resource\n");
+		NPU_ERR("unable to get apss_shared resource\n");
 		rc = -ENODEV;
 		goto error_get_dev_num;
 	}
@@ -1675,46 +1643,28 @@
 	npu_dev->apss_shared_io.base = devm_ioremap(&pdev->dev, res->start,
 					npu_dev->apss_shared_io.size);
 	if (unlikely(!npu_dev->apss_shared_io.base)) {
-		pr_err("unable to map apss_shared\n");
+		NPU_ERR("unable to map apss_shared\n");
 		rc = -ENOMEM;
 		goto error_get_dev_num;
 	}
-	pr_debug("apss_shared phy address=0x%llx virt=%pK\n",
+	NPU_DBG("apss_shared phy address=0x%llx virt=%pK\n",
 		res->start, npu_dev->apss_shared_io.base);
 
 	res = platform_get_resource_byname(pdev,
-		IORESOURCE_MEM, "bwmon");
-	if (!res) {
-		pr_info("unable to get bwmon resource\n");
-	} else {
-		npu_dev->bwmon_io.size = resource_size(res);
-		npu_dev->bwmon_io.phy_addr = res->start;
-		npu_dev->bwmon_io.base = devm_ioremap(&pdev->dev, res->start,
-						npu_dev->bwmon_io.size);
-		if (unlikely(!npu_dev->bwmon_io.base)) {
-			pr_err("unable to map bwmon\n");
-			rc = -ENOMEM;
-			goto error_get_dev_num;
-		}
-		pr_debug("bwmon phy address=0x%llx virt=%pK\n",
-			res->start, npu_dev->bwmon_io.base);
-	}
-
-	res = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "qfprom_physical");
 	if (!res) {
-		pr_info("unable to get qfprom_physical resource\n");
+		NPU_INFO("unable to get qfprom_physical resource\n");
 	} else {
 		npu_dev->qfprom_io.size = resource_size(res);
 		npu_dev->qfprom_io.phy_addr = res->start;
 		npu_dev->qfprom_io.base = devm_ioremap(&pdev->dev, res->start,
 					npu_dev->qfprom_io.size);
 		if (unlikely(!npu_dev->qfprom_io.base)) {
-			pr_err("unable to map qfprom_physical\n");
+			NPU_ERR("unable to map qfprom_physical\n");
 			rc = -ENOMEM;
 			goto error_get_dev_num;
 		}
-		pr_debug("qfprom_physical phy address=0x%llx virt=%pK\n",
+		NPU_DBG("qfprom_physical phy address=0x%llx virt=%pK\n",
 			res->start, npu_dev->qfprom_io.base);
 	}
 
@@ -1745,14 +1695,14 @@
 	/* character device might be optional */
 	rc = alloc_chrdev_region(&npu_dev->dev_num, 0, 1, DRIVER_NAME);
 	if (rc < 0) {
-		pr_err("alloc_chrdev_region failed: %d\n", rc);
+		NPU_ERR("alloc_chrdev_region failed: %d\n", rc);
 		goto error_get_dev_num;
 	}
 
 	npu_dev->class = class_create(THIS_MODULE, CLASS_NAME);
 	if (IS_ERR(npu_dev->class)) {
 		rc = PTR_ERR(npu_dev->class);
-		pr_err("class_create failed: %d\n", rc);
+		NPU_ERR("class_create failed: %d\n", rc);
 		goto error_class_create;
 	}
 
@@ -1760,7 +1710,7 @@
 		npu_dev->dev_num, NULL, DRIVER_NAME);
 	if (IS_ERR(npu_dev->device)) {
 		rc = PTR_ERR(npu_dev->device);
-		pr_err("device_create failed: %d\n", rc);
+		NPU_ERR("device_create failed: %d\n", rc);
 		goto error_class_device_create;
 	}
 
@@ -1768,15 +1718,15 @@
 	rc = cdev_add(&npu_dev->cdev,
 			MKDEV(MAJOR(npu_dev->dev_num), 0), 1);
 	if (rc < 0) {
-		pr_err("cdev_add failed %d\n", rc);
+		NPU_ERR("cdev_add failed %d\n", rc);
 		goto error_cdev_add;
 	}
 	dev_set_drvdata(npu_dev->device, npu_dev);
-	pr_debug("drvdata %pK %pK\n", dev_get_drvdata(&pdev->dev),
+	NPU_DBG("drvdata %pK %pK\n", dev_get_drvdata(&pdev->dev),
 		dev_get_drvdata(npu_dev->device));
 	rc = sysfs_create_group(&npu_dev->device->kobj, &npu_fs_attr_group);
 	if (rc) {
-		pr_err("unable to register npu sysfs nodes\n");
+		NPU_ERR("unable to register npu sysfs nodes\n");
 		goto error_res_init;
 	}
 
@@ -1802,7 +1752,7 @@
 
 	rc = npu_host_init(npu_dev);
 	if (rc) {
-		pr_err("unable to init host\n");
+		NPU_ERR("unable to init host\n");
 		goto error_driver_init;
 	}
 
@@ -1874,7 +1824,7 @@
 
 	rc = platform_driver_register(&npu_driver);
 	if (rc)
-		pr_err("register failed %d\n", rc);
+		NPU_ERR("register failed %d\n", rc);
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c
index b26d221..85e8187 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.c
@@ -3,8 +3,6 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 /* -------------------------------------------------------------------------
  * Includes
  * -------------------------------------------------------------------------
@@ -169,9 +167,9 @@
 	}
 
 	if (status == 0)
-		pr_debug("Cmd Msg put on Command Queue - SUCCESSS\n");
+		NPU_DBG("Cmd Msg put on Command Queue - SUCCESSS\n");
 	else
-		pr_err("Cmd Msg put on Command Queue - FAILURE\n");
+		NPU_ERR("Cmd Msg put on Command Queue - FAILURE\n");
 
 	return status;
 }
@@ -232,7 +230,7 @@
 	MEMR(npu_dev, (void *)((size_t)read_ptr), packet, 4);
 	packet_size = *((uint32_t *)packet);
 
-	pr_debug("target_que: %d, packet_size: %d\n",
+	NPU_DBG("target_que: %d, packet_size: %d\n",
 			target_que,
 			packet_size);
 
diff --git a/drivers/media/platform/msm/npu/npu_hw.h b/drivers/media/platform/msm/npu/npu_hw.h
index 9894a28..7a884dc 100644
--- a/drivers/media/platform/msm/npu/npu_hw.h
+++ b/drivers/media/platform/msm/npu/npu_hw.h
@@ -47,13 +47,6 @@
 #define NPU_GPR14 (0x00000138)
 #define NPU_GPR15 (0x0000013C)
 
-#define BWMON2_SAMPLING_WINDOW (0x000003A8)
-#define BWMON2_BYTE_COUNT_THRESHOLD_HIGH (0x000003AC)
-#define BWMON2_BYTE_COUNT_THRESHOLD_MEDIUM (0x000003B0)
-#define BWMON2_BYTE_COUNT_THRESHOLD_LOW (0x000003B4)
-#define BWMON2_ZONE_ACTIONS (0x000003B8)
-#define BWMON2_ZONE_COUNT_THRESHOLD (0x000003BC)
-
 #define NPU_QDSP6SS_IPC 0x00088000
 #define NPU_QDSP6SS_IPC1 0x00088004
 
diff --git a/drivers/media/platform/msm/npu/npu_hw_access.c b/drivers/media/platform/msm/npu/npu_hw_access.c
index 43d3189..f2862ab 100644
--- a/drivers/media/platform/msm/npu/npu_hw_access.c
+++ b/drivers/media/platform/msm/npu/npu_hw_access.c
@@ -3,8 +3,6 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 /* -------------------------------------------------------------------------
  * Includes
  * -------------------------------------------------------------------------
@@ -65,21 +63,6 @@
 	__iowmb();
 }
 
-uint32_t npu_bwmon_reg_read(struct npu_device *npu_dev, uint32_t off)
-{
-	uint32_t ret = 0;
-
-	ret = readl(npu_dev->bwmon_io.base + off);
-	return ret;
-}
-
-void npu_bwmon_reg_write(struct npu_device *npu_dev, uint32_t off,
-	uint32_t val)
-{
-	writel_relaxed(val, npu_dev->bwmon_io.base + off);
-	__iowmb();
-}
-
 uint32_t npu_qfprom_reg_read(struct npu_device *npu_dev, uint32_t off)
 {
 	uint32_t ret = 0;
@@ -103,7 +86,7 @@
 	uint32_t i = 0;
 	uint32_t num = 0;
 
-	pr_debug("write dst_off %zx size %x\n", dst_off, size);
+	NPU_DBG("write dst_off %zx size %x\n", dst_off, size);
 	num = size/4;
 	for (i = 0; i < num; i++) {
 		writel_relaxed(src_ptr32[i], npu_dev->tcm_io.base + dst_off);
@@ -130,7 +113,7 @@
 	uint32_t i = 0;
 	uint32_t num = 0;
 
-	pr_debug("read src_off %zx size %x\n", src_off, size);
+	NPU_DBG("read src_off %zx size %x\n", src_off, size);
 
 	num = size/4;
 	for (i = 0; i < num; i++) {
@@ -195,7 +178,7 @@
 
 	if (ret_val) {
 		/* mapped already, treat as invalid request */
-		pr_err("ion buf has been mapped\n");
+		NPU_ERR("ion buf has been mapped\n");
 		ret_val = NULL;
 	} else {
 		ret_val = kzalloc(sizeof(*ret_val), GFP_KERNEL);
@@ -262,7 +245,7 @@
 
 	ion_buf = npu_alloc_npu_ion_buffer(client, buf_hdl, size);
 	if (!ion_buf) {
-		pr_err("%s fail to alloc npu_ion_buffer\n", __func__);
+		NPU_ERR("fail to alloc npu_ion_buffer\n");
 		ret = -ENOMEM;
 		return ret;
 	}
@@ -271,7 +254,7 @@
 
 	ion_buf->dma_buf = dma_buf_get(ion_buf->fd);
 	if (IS_ERR_OR_NULL(ion_buf->dma_buf)) {
-		pr_err("dma_buf_get failed %d\n", ion_buf->fd);
+		NPU_ERR("dma_buf_get failed %d\n", ion_buf->fd);
 		ret = -ENOMEM;
 		ion_buf->dma_buf = NULL;
 		goto map_end;
@@ -290,7 +273,7 @@
 	ion_buf->table = dma_buf_map_attachment(ion_buf->attachment,
 			DMA_BIDIRECTIONAL);
 	if (IS_ERR(ion_buf->table)) {
-		pr_err("npu dma_buf_map_attachment failed\n");
+		NPU_ERR("npu dma_buf_map_attachment failed\n");
 		ret = -ENOMEM;
 		ion_buf->table = NULL;
 		goto map_end;
@@ -301,9 +284,9 @@
 	ion_buf->iova = ion_buf->table->sgl->dma_address;
 	ion_buf->size = ion_buf->dma_buf->size;
 	*addr = ion_buf->iova;
-	pr_debug("mapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
+	NPU_DBG("mapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
 		ion_buf->size);
-	pr_debug("physical address 0x%llx\n", sg_phys(ion_buf->table->sgl));
+	NPU_DBG("physical address 0x%llx\n", sg_phys(ion_buf->table->sgl));
 map_end:
 	if (ret)
 		npu_mem_unmap(client, buf_hdl, 0);
@@ -318,7 +301,7 @@
 		buf_hdl);
 
 	if (!ion_buf)
-		pr_err("%s cant find ion buf\n", __func__);
+		NPU_ERR("cant find ion buf\n");
 	else
 		dma_sync_sg_for_cpu(&(npu_dev->pdev->dev), ion_buf->table->sgl,
 			ion_buf->table->nents, DMA_BIDIRECTIONAL);
@@ -351,12 +334,12 @@
 	/* clear entry and retrieve the corresponding buffer */
 	ion_buf = npu_get_npu_ion_buffer(client, buf_hdl);
 	if (!ion_buf) {
-		pr_err("%s could not find buffer\n", __func__);
+		NPU_ERR("could not find buffer\n");
 		return;
 	}
 
 	if (ion_buf->iova != addr)
-		pr_warn("unmap address %llu doesn't match %llu\n", addr,
+		NPU_WARN("unmap address %llu doesn't match %llu\n", addr,
 			ion_buf->iova);
 
 	if (ion_buf->table)
@@ -368,7 +351,7 @@
 		dma_buf_put(ion_buf->dma_buf);
 	npu_dev->smmu_ctx.attach_cnt--;
 
-	pr_debug("unmapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
+	NPU_DBG("unmapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
 		ion_buf->size);
 	npu_free_npu_ion_buffer(client, buf_hdl);
 }
diff --git a/drivers/media/platform/msm/npu/npu_hw_access.h b/drivers/media/platform/msm/npu/npu_hw_access.h
index d893faa..24da853 100644
--- a/drivers/media/platform/msm/npu/npu_hw_access.h
+++ b/drivers/media/platform/msm/npu/npu_hw_access.h
@@ -56,9 +56,6 @@
 uint32_t npu_apss_shared_reg_read(struct npu_device *npu_dev, uint32_t off);
 void npu_apss_shared_reg_write(struct npu_device *npu_dev, uint32_t off,
 	uint32_t val);
-uint32_t npu_bwmon_reg_read(struct npu_device *npu_dev, uint32_t off);
-void npu_bwmon_reg_write(struct npu_device *npu_dev, uint32_t off,
-	uint32_t val);
 void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
 	uint32_t size);
 int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index 4cbea60..3c716da 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -3,8 +3,6 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 /* -------------------------------------------------------------------------
  * Includes
  * -------------------------------------------------------------------------
@@ -93,7 +91,7 @@
 	REGW(npu_dev, REG_NPU_HOST_CTRL_VALUE, 0x0);
 	REGW(npu_dev, REG_FW_TO_HOST_EVENT, 0x0);
 
-	pr_debug("fw_dbg_mode %x\n", host_ctx->fw_dbg_mode);
+	NPU_DBG("fw_dbg_mode %x\n", host_ctx->fw_dbg_mode);
 	reg_val = 0;
 	if (host_ctx->fw_dbg_mode & FW_DBG_MODE_PAUSE)
 		reg_val |= HOST_CTRL_STATUS_FW_PAUSE_VAL;
@@ -115,7 +113,7 @@
 	/* Boot the NPU subsystem */
 	host_ctx->subsystem_handle = subsystem_get_local("npu");
 	if (IS_ERR(host_ctx->subsystem_handle)) {
-		pr_err("pil load npu fw failed\n");
+		NPU_ERR("pil load npu fw failed\n");
 		ret = -ENODEV;
 		goto subsystem_get_fail;
 	}
@@ -127,7 +125,7 @@
 	}
 
 	/* Keep reading ctrl status until NPU is ready */
-	pr_debug("waiting for status ready from fw\n");
+	NPU_DBG("waiting for status ready from fw\n");
 
 	if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
 		FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
@@ -149,11 +147,11 @@
 	reinit_completion(&host_ctx->fw_deinit_done);
 
 	mutex_unlock(&host_ctx->lock);
-	pr_debug("firmware init complete\n");
+	NPU_DBG("firmware init complete\n");
 
 	/* Set logging state */
 	if (!npu_hw_log_enabled()) {
-		pr_debug("fw logging disabled\n");
+		NPU_DBG("fw logging disabled\n");
 		turn_off_fw_logging(npu_dev);
 	}
 
@@ -185,10 +183,10 @@
 	if (!ssr && (host_ctx->fw_ref_cnt > 0))
 		host_ctx->fw_ref_cnt--;
 
-	pr_debug("fw_ref_cnt %d\n", host_ctx->fw_ref_cnt);
+	NPU_DBG("fw_ref_cnt %d\n", host_ctx->fw_ref_cnt);
 
 	if (host_ctx->fw_state != FW_ENABLED) {
-		pr_err("fw is not enabled\n");
+		NPU_ERR("fw is not enabled\n");
 		mutex_unlock(&host_ctx->lock);
 		return;
 	}
@@ -211,17 +209,17 @@
 		ret = npu_host_ipc_send_cmd(npu_dev,
 			IPC_QUEUE_CMD_HIGH_PRIORITY, &cmd_shutdown_pkt);
 
-		pr_debug("NPU_IPC_CMD_SHUTDOWN sent status: %d\n", ret);
+		NPU_DBG("NPU_IPC_CMD_SHUTDOWN sent status: %d\n", ret);
 
 		if (ret) {
-			pr_err("npu_host_ipc_send_cmd failed\n");
+			NPU_ERR("npu_host_ipc_send_cmd failed\n");
 		} else {
 			/* Keep reading ctrl status until NPU shuts down */
-			pr_debug("waiting for shutdown status from fw\n");
+			NPU_DBG("waiting for shutdown status from fw\n");
 			if (wait_for_status_ready(npu_dev,
 				REG_NPU_FW_CTRL_STATUS,
 				FW_CTRL_STATUS_SHUTDOWN_DONE_VAL)) {
-				pr_err("wait for fw shutdown timedout\n");
+				NPU_ERR("wait for fw shutdown timedout\n");
 				ret = -ETIMEDOUT;
 			}
 		}
@@ -256,7 +254,7 @@
 
 	complete(&host_ctx->fw_deinit_done);
 	mutex_unlock(&host_ctx->lock);
-	pr_debug("firmware deinit complete\n");
+	NPU_DBG("firmware deinit complete\n");
 	npu_notify_aop(npu_dev, false);
 }
 
@@ -298,7 +296,7 @@
 	struct npu_device *npu_dev = (struct npu_device *)ptr;
 	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
 
-	pr_debug("NPU irq %d\n", irq);
+	NPU_DBG("NPU irq %d\n", irq);
 	INTERRUPT_ACK(npu_dev, irq);
 
 	/* Check that the event thread currently is running */
@@ -324,7 +322,7 @@
 		return 0;
 
 	if (host_ctx->wdg_irq_sts)
-		pr_info("watchdog irq triggered\n");
+		NPU_INFO("watchdog irq triggered\n");
 
 	fw_deinit(npu_dev, true, force);
 	host_ctx->wdg_irq_sts = 0;
@@ -337,14 +335,14 @@
 		if (network->is_valid && network->cmd_pending &&
 			network->fw_error) {
 			if (network->cmd_async) {
-				pr_debug("async cmd, queue ssr event\n");
+				NPU_DBG("async cmd, queue ssr event\n");
 				kevt.evt.type = MSM_NPU_EVENT_TYPE_SSR;
 				kevt.evt.u.ssr.network_hdl =
 					network->network_hdl;
 				if (npu_queue_event(network->client, &kevt))
-					pr_err("queue npu event failed\n");
+					NPU_ERR("queue npu event failed\n");
 			} else {
-				pr_debug("complete network %llx\n",
+				NPU_DBG("complete network %llx\n",
 					network->id);
 				complete(&network->cmd_done);
 			}
@@ -387,10 +385,10 @@
 	ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_CMD_HIGH_PRIORITY,
 		&log_packet);
 
-	pr_debug("NPU_IPC_CMD_CONFIG_LOG sent status: %d\n", ret);
+	NPU_DBG("NPU_IPC_CMD_CONFIG_LOG sent status: %d\n", ret);
 
 	if (ret)
-		pr_err("npu_host_ipc_send_cmd failed\n");
+		NPU_ERR("npu_host_ipc_send_cmd failed\n");
 }
 
 static int wait_for_status_ready(struct npu_device *npu_dev,
@@ -409,12 +407,12 @@
 		msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
 		wait_cnt += NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
 		if (wait_cnt >= max_wait_ms) {
-			pr_err("timeout wait for status %x[%x] in reg %x\n",
+			NPU_ERR("timeout wait for status %x[%x] in reg %x\n",
 				status_bits, ctrl_sts, status_reg);
 			return -EPERM;
 		}
 	}
-	pr_debug("status %x[reg %x] ready received\n", status_bits, status_reg);
+	NPU_DBG("status %x[reg %x] ready received\n", status_bits, status_reg);
 	return 0;
 }
 
@@ -432,25 +430,25 @@
 	int buf_size, rc = 0;
 
 	if (!npu_dev->mbox_aop.chan) {
-		pr_warn("aop mailbox channel is not available\n");
+		NPU_WARN("aop mailbox channel is not available\n");
 		return 0;
 	}
 
 	buf_size = scnprintf(buf, MAX_LEN, "{class: bcm, res: npu_on, val: %d}",
 		on ? 1 : 0);
 	if (buf_size < 0) {
-		pr_err("prepare qmp notify buf failed\n");
+		NPU_ERR("prepare qmp notify buf failed\n");
 		return -EINVAL;
 	}
 
-	pr_debug("send msg %s to aop\n", buf);
+	NPU_DBG("send msg %s to aop\n", buf);
 	memset(&pkt, 0, sizeof(pkt));
 	pkt.size = (buf_size + 3) & ~0x3;
 	pkt.data = buf;
 
 	rc = mbox_send_message(npu_dev->mbox_aop.chan, &pkt);
 	if (rc < 0)
-		pr_err("qmp message send failed, ret=%d\n", rc);
+		NPU_ERR("qmp message send failed, ret=%d\n", rc);
 
 	return rc;
 }
@@ -491,7 +489,7 @@
 	}
 
 	if (i == MAX_LOADED_NETWORK) {
-		pr_err("No free network\n");
+		NPU_ERR("No free network\n");
 		return NULL;
 	}
 
@@ -527,12 +525,12 @@
 	}
 
 	if ((i == MAX_LOADED_NETWORK) || !network->is_valid) {
-		pr_err("network hdl invalid %d\n", hdl);
+		NPU_ERR("network hdl invalid %d\n", hdl);
 		return NULL;
 	}
 
 	if (client && (client != network->client)) {
-		pr_err("network %lld doesn't belong to this client\n",
+		NPU_ERR("network %lld doesn't belong to this client\n",
 			network->id);
 		return NULL;
 	}
@@ -550,13 +548,13 @@
 
 	if (id < 1 || id > MAX_LOADED_NETWORK ||
 		!ctx->networks[id - 1].is_valid) {
-		pr_err("Invalid network id %d\n", (int32_t)id);
+		NPU_ERR("Invalid network id %d\n", (int32_t)id);
 		return NULL;
 	}
 
 	network = &ctx->networks[id - 1];
 	if (client && (client != network->client)) {
-		pr_err("network %lld doesn't belong to this client\n", id);
+		NPU_ERR("network %lld doesn't belong to this client\n", id);
 		return NULL;
 	}
 
@@ -579,7 +577,7 @@
 			memset(network, 0, sizeof(struct npu_network));
 			ctx->network_num--;
 		} else {
-			pr_warn("network %lld:%d is in use\n", network->id,
+			NPU_WARN("network %lld:%d is in use\n", network->id,
 				atomic_read(&network->ref_cnt));
 		}
 	}
@@ -619,28 +617,28 @@
 		struct ipc_msg_execute_pkt *exe_rsp_pkt =
 			(struct ipc_msg_execute_pkt *)msg;
 
-		pr_debug("NPU_IPC_MSG_EXECUTE_DONE status: %d\n",
+		NPU_DBG("NPU_IPC_MSG_EXECUTE_DONE status: %d\n",
 			exe_rsp_pkt->header.status);
-		pr_debug("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
-		pr_debug("e2e_IPC_time: %d (in tick count)\n",
+		NPU_DBG("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
+		NPU_DBG("e2e_IPC_time: %d (in tick count)\n",
 			exe_rsp_pkt->stats.e2e_ipc_tick_count);
-		pr_debug("aco_load_time: %d (in tick count)\n",
+		NPU_DBG("aco_load_time: %d (in tick count)\n",
 			exe_rsp_pkt->stats.aco_load_tick_count);
-		pr_debug("aco_execute_time: %d (in tick count)\n",
+		NPU_DBG("aco_execute_time: %d (in tick count)\n",
 			exe_rsp_pkt->stats.aco_execution_tick_count);
-		pr_debug("total_num_layers: %d\n",
+		NPU_DBG("total_num_layers: %d\n",
 			exe_rsp_pkt->stats.exe_stats.total_num_layers);
 
 		network = get_network_by_hdl(host_ctx, NULL,
 			exe_rsp_pkt->network_hdl);
 		if (!network) {
-			pr_err("can't find network %x\n",
+			NPU_ERR("can't find network %x\n",
 				exe_rsp_pkt->network_hdl);
 			break;
 		}
 
 		if (network->trans_id != exe_rsp_pkt->header.trans_id) {
-			pr_err("execute_pkt trans_id is not match %d:%d\n",
+			NPU_ERR("execute_pkt trans_id is not match %d:%d\n",
 				network->trans_id,
 				exe_rsp_pkt->header.trans_id);
 			network_put(network);
@@ -653,14 +651,14 @@
 		if (!network->cmd_async) {
 			complete(&network->cmd_done);
 		} else {
-			pr_debug("async cmd, queue event\n");
+			NPU_DBG("async cmd, queue event\n");
 			kevt.evt.type = MSM_NPU_EVENT_TYPE_EXEC_DONE;
 			kevt.evt.u.exec_done.network_hdl =
 				exe_rsp_pkt->network_hdl;
 			kevt.evt.u.exec_done.exec_result =
 				exe_rsp_pkt->header.status;
 			if (npu_queue_event(network->client, &kevt))
-				pr_err("queue npu event failed\n");
+				NPU_ERR("queue npu event failed\n");
 		}
 		network_put(network);
 
@@ -672,29 +670,29 @@
 			(struct ipc_msg_execute_pkt_v2 *)msg;
 		uint32_t stats_size = 0;
 
-		pr_debug("NPU_IPC_MSG_EXECUTE_V2_DONE status: %d\n",
+		NPU_DBG("NPU_IPC_MSG_EXECUTE_V2_DONE status: %d\n",
 			exe_rsp_pkt->header.status);
-		pr_debug("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
+		NPU_DBG("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
 
 		network = get_network_by_hdl(host_ctx, NULL,
 			exe_rsp_pkt->network_hdl);
 		if (!network) {
-			pr_err("can't find network %x\n",
+			NPU_ERR("can't find network %x\n",
 				exe_rsp_pkt->network_hdl);
 			break;
 		}
 
 		if (network->trans_id != exe_rsp_pkt->header.trans_id) {
-			pr_err("execute_pkt_v2 trans_id is not match %d:%d\n",
+			NPU_ERR("execute_pkt_v2 trans_id is not match %d:%d\n",
 				network->trans_id,
 				exe_rsp_pkt->header.trans_id);
 			network_put(network);
 			break;
 		}
 
-		pr_debug("network id : %lld\n", network->id);
+		NPU_DBG("network id : %lld\n", network->id);
 		stats_size = exe_rsp_pkt->header.size - sizeof(*exe_rsp_pkt);
-		pr_debug("stats_size %d:%d\n", exe_rsp_pkt->header.size,
+		NPU_DBG("stats_size %d:%d\n", exe_rsp_pkt->header.size,
 			stats_size);
 		stats_size = stats_size < network->stats_buf_size ?
 			stats_size : network->stats_buf_size;
@@ -707,7 +705,7 @@
 		network->cmd_ret_status = exe_rsp_pkt->header.status;
 
 		if (network->cmd_async) {
-			pr_debug("async cmd, queue event\n");
+			NPU_DBG("async cmd, queue event\n");
 			kevt.evt.type = MSM_NPU_EVENT_TYPE_EXEC_V2_DONE;
 			kevt.evt.u.exec_v2_done.network_hdl =
 				exe_rsp_pkt->network_hdl;
@@ -717,7 +715,7 @@
 			kevt.reserved[0] = (uint64_t)network->stats_buf;
 			kevt.reserved[1] = (uint64_t)network->stats_buf_u;
 			if (npu_queue_event(network->client, &kevt))
-				pr_err("queue npu event failed\n");
+				NPU_ERR("queue npu event failed\n");
 		} else {
 			complete(&network->cmd_done);
 		}
@@ -730,7 +728,7 @@
 		struct ipc_msg_load_pkt *load_rsp_pkt =
 			(struct ipc_msg_load_pkt *)msg;
 
-		pr_debug("NPU_IPC_MSG_LOAD_DONE status: %d, trans_id: %d\n",
+		NPU_DBG("NPU_IPC_MSG_LOAD_DONE status: %d, trans_id: %d\n",
 			load_rsp_pkt->header.status,
 			load_rsp_pkt->header.trans_id);
 
@@ -738,16 +736,16 @@
 		 * the upper 16 bits in returned network_hdl is
 		 * the network ID
 		 */
-		pr_debug("network_hdl: %x\n", load_rsp_pkt->network_hdl);
+		NPU_DBG("network_hdl: %x\n", load_rsp_pkt->network_hdl);
 		network_id = load_rsp_pkt->network_hdl >> 16;
 		network = get_network_by_id(host_ctx, NULL, network_id);
 		if (!network) {
-			pr_err("can't find network %d\n", network_id);
+			NPU_ERR("can't find network %d\n", network_id);
 			break;
 		}
 
 		if (network->trans_id != load_rsp_pkt->header.trans_id) {
-			pr_err("load_rsp_pkt trans_id is not match %d:%d\n",
+			NPU_ERR("load_rsp_pkt trans_id is not match %d:%d\n",
 				network->trans_id,
 				load_rsp_pkt->header.trans_id);
 			network_put(network);
@@ -767,20 +765,20 @@
 		struct ipc_msg_unload_pkt *unload_rsp_pkt =
 			(struct ipc_msg_unload_pkt *)msg;
 
-		pr_debug("NPU_IPC_MSG_UNLOAD_DONE status: %d, trans_id: %d\n",
+		NPU_DBG("NPU_IPC_MSG_UNLOAD_DONE status: %d, trans_id: %d\n",
 			unload_rsp_pkt->header.status,
 			unload_rsp_pkt->header.trans_id);
 
 		network = get_network_by_hdl(host_ctx, NULL,
 			unload_rsp_pkt->network_hdl);
 		if (!network) {
-			pr_err("can't find network %x\n",
+			NPU_ERR("can't find network %x\n",
 				unload_rsp_pkt->network_hdl);
 			break;
 		}
 
 		if (network->trans_id != unload_rsp_pkt->header.trans_id) {
-			pr_err("unload_rsp_pkt trans_id is not match %d:%d\n",
+			NPU_ERR("unload_rsp_pkt trans_id is not match %d:%d\n",
 				network->trans_id,
 				unload_rsp_pkt->header.trans_id);
 			network_put(network);
@@ -799,13 +797,13 @@
 		struct ipc_msg_loopback_pkt *lb_rsp_pkt =
 			(struct ipc_msg_loopback_pkt *)msg;
 
-		pr_debug("NPU_IPC_MSG_LOOPBACK_DONE loopbackParams: 0x%x\n",
+		NPU_DBG("NPU_IPC_MSG_LOOPBACK_DONE loopbackParams: 0x%x\n",
 			lb_rsp_pkt->loopbackParams);
 		complete_all(&host_ctx->loopback_done);
 		break;
 	}
 	default:
-		pr_err("Not supported apps response received %d\n",
+		NPU_ERR("Not supported apps response received %d\n",
 			msg_id);
 		break;
 	}
@@ -822,12 +820,12 @@
 
 	mutex_lock(&host_ctx->lock);
 	if (host_ctx->fw_state == FW_DISABLED) {
-		pr_warn("handle npu session msg when FW is disabled\n");
+		NPU_WARN("handle npu session msg when FW is disabled\n");
 		goto skip_read_msg;
 	}
 
 	while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_APPS_RSP, msg) == 0) {
-		pr_debug("received from msg queue\n");
+		NPU_DBG("received from msg queue\n");
 		app_msg_proc(host_ctx, msg);
 	}
 
@@ -852,7 +850,7 @@
 		npu_process_log_message(npu_dev, log_msg, size);
 		break;
 	default:
-		pr_err("unsupported log response received %d\n", msg_id);
+		NPU_ERR("unsupported log response received %d\n", msg_id);
 		break;
 	}
 }
@@ -869,12 +867,12 @@
 
 	mutex_lock(&host_ctx->lock);
 	if (host_ctx->fw_state == FW_DISABLED) {
-		pr_warn("handle npu session msg when FW is disabled\n");
+		NPU_WARN("handle npu session msg when FW is disabled\n");
 		goto skip_read_msg;
 	}
 
 	while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_LOG, msg) == 0) {
-		pr_debug("received from log queue\n");
+		NPU_DBG("received from log queue\n");
 		log_msg_proc(npu_dev, msg);
 	}
 
@@ -915,7 +913,7 @@
 	if (host_ctx->fw_error && (host_ctx->fw_state == FW_ENABLED) &&
 		!wait_for_completion_interruptible_timeout(
 		&host_ctx->fw_deinit_done, NW_CMD_TIMEOUT))
-		pr_warn("npu: wait for fw_deinit_done time out\n");
+		NPU_WARN("npu: wait for fw_deinit_done time out\n");
 
 	npu_mem_unmap(client, unmap_ioctl->buf_ion_hdl,
 		unmap_ioctl->npu_phys_addr);
@@ -930,13 +928,13 @@
 
 	if (network->fw_error || host_ctx->fw_error ||
 		(host_ctx->fw_state == FW_DISABLED)) {
-		pr_err("fw is in error state or disabled, can't send network cmd\n");
+		NPU_ERR("fw is in error state or disabled\n");
 		ret = -EIO;
 	} else if (network->cmd_pending) {
-		pr_err("Another cmd is pending\n");
+		NPU_ERR("Another cmd is pending\n");
 		ret = -EBUSY;
 	} else {
-		pr_debug("Send cmd %d network id %lld\n",
+		NPU_DBG("Send cmd %d network id %lld\n",
 			((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
 			network->id);
 		network->cmd_async = async;
@@ -960,10 +958,10 @@
 
 	mutex_lock(&host_ctx->lock);
 	if (host_ctx->fw_error || (host_ctx->fw_state == FW_DISABLED)) {
-		pr_err("fw is in error state or disabled, can't send misc cmd\n");
+		NPU_ERR("fw is in error state or disabled\n");
 		ret = -EIO;
 	} else {
-		pr_debug("Send cmd %d\n",
+		NPU_DBG("Send cmd %d\n",
 			((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type);
 		ret = npu_host_ipc_send_cmd(npu_dev, q_idx, cmd_ptr);
 	}
@@ -985,7 +983,7 @@
 	param->variable_size_in_bits =
 		layer_info->patch_info.variable_size_in_bits;
 
-	pr_debug("copy_patch_data: %x %d %x %x %x %x\n",
+	NPU_DBG("copy_patch_data: %x %d %x %x %x %x\n",
 		param->value,
 		param->chunk_id,
 		param->loc_offset,
@@ -1004,7 +1002,7 @@
 		patch_info->instruction_size_in_bytes;
 	param->shift_value_in_bits = patch_info->shift_value_in_bits;
 	param->variable_size_in_bits = patch_info->variable_size_in_bits;
-	pr_debug("copy_patch_data_v2: %x %d %x %x %x %x\n",
+	NPU_DBG("copy_patch_data_v2: %x %d %x %x %x %x\n",
 		param->value,
 		param->chunk_id,
 		param->loc_offset,
@@ -1028,7 +1026,7 @@
 			max_perf_mode = network->perf_mode;
 		network++;
 	}
-	pr_debug("max perf mode for networks: %d\n", max_perf_mode);
+	NPU_DBG("max perf mode for networks: %d\n", max_perf_mode);
 
 	return max_perf_mode;
 }
@@ -1072,7 +1070,7 @@
 
 	ret = npu_set_uc_power_level(npu_dev, networks_perf_mode);
 	if (ret) {
-		pr_err("network load failed due to power level set\n");
+		NPU_ERR("network load failed due to power level set\n");
 		goto error_free_network;
 	}
 
@@ -1091,7 +1089,7 @@
 	reinit_completion(&network->cmd_done);
 	ret = npu_send_network_cmd(npu_dev, network, &load_packet, false);
 	if (ret) {
-		pr_err("NPU_IPC_CMD_LOAD sent failed: %d\n", ret);
+		NPU_ERR("NPU_IPC_CMD_LOAD sent failed: %d\n", ret);
 		goto error_free_network;
 	}
 
@@ -1104,17 +1102,17 @@
 
 	mutex_lock(&host_ctx->lock);
 	if (!ret) {
-		pr_err_ratelimited("NPU_IPC_CMD_LOAD time out\n");
+		NPU_ERR("NPU_IPC_CMD_LOAD time out\n");
 		ret = -ETIMEDOUT;
 		goto error_free_network;
 	} else if (ret < 0) {
-		pr_err("NPU_IPC_CMD_LOAD is interrupted by signal\n");
+		NPU_ERR("NPU_IPC_CMD_LOAD is interrupted by signal\n");
 		goto error_free_network;
 	}
 
 	if (network->fw_error) {
 		ret = -EIO;
-		pr_err("fw is in error state during load network\n");
+		NPU_ERR("fw is in error state during load network\n");
 		goto error_free_network;
 	}
 
@@ -1186,17 +1184,17 @@
 
 	/* verify mapped physical address */
 	if (!npu_mem_verify_addr(client, network->phy_add)) {
-		pr_err("Invalid network address %llx\n", network->phy_add);
+		NPU_ERR("Invalid network address %llx\n", network->phy_add);
 		ret = -EINVAL;
 		goto error_free_network;
 	}
 
-	pr_debug("network address %llx\n", network->phy_add);
+	NPU_DBG("network address %llx\n", network->phy_add);
 	networks_perf_mode = find_networks_perf_mode(host_ctx);
 
 	ret = npu_set_uc_power_level(npu_dev, networks_perf_mode);
 	if (ret) {
-		pr_err("network load failed due to power level set\n");
+		NPU_ERR("network load failed due to power level set\n");
 		goto error_free_network;
 	}
 
@@ -1217,7 +1215,7 @@
 	reinit_completion(&network->cmd_done);
 	ret = npu_send_network_cmd(npu_dev, network, load_packet, false);
 	if (ret) {
-		pr_debug("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
+		NPU_DBG("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
 		goto error_free_network;
 	}
 
@@ -1231,17 +1229,17 @@
 	mutex_lock(&host_ctx->lock);
 
 	if (!ret) {
-		pr_err_ratelimited("npu: NPU_IPC_CMD_LOAD time out\n");
+		NPU_ERR("npu: NPU_IPC_CMD_LOAD time out\n");
 		ret = -ETIMEDOUT;
 		goto error_free_network;
 	} else if (ret < 0) {
-		pr_err("NPU_IPC_CMD_LOAD_V2 is interrupted by signal\n");
+		NPU_ERR("NPU_IPC_CMD_LOAD_V2 is interrupted by signal\n");
 		goto error_free_network;
 	}
 
 	if (network->fw_error) {
 		ret = -EIO;
-		pr_err("fw is in error state during load_v2 network\n");
+		NPU_ERR("fw is in error state during load_v2 network\n");
 		goto error_free_network;
 	}
 
@@ -1287,18 +1285,18 @@
 	}
 
 	if (!network->is_active) {
-		pr_err("network is not active\n");
+		NPU_ERR("network is not active\n");
 		network_put(network);
 		mutex_unlock(&host_ctx->lock);
 		return -EINVAL;
 	}
 
 	if (network->fw_error) {
-		pr_err("fw in error state, skip unload network in fw\n");
+		NPU_ERR("fw in error state, skip unload network in fw\n");
 		goto free_network;
 	}
 
-	pr_debug("Unload network %lld\n", network->id);
+	NPU_DBG("Unload network %lld\n", network->id);
 	/* prepare IPC packet for UNLOAD */
 	unload_packet.header.cmd_type = NPU_IPC_CMD_UNLOAD;
 	unload_packet.header.size = sizeof(struct ipc_cmd_unload_pkt);
@@ -1312,13 +1310,13 @@
 	ret = npu_send_network_cmd(npu_dev, network, &unload_packet, false);
 
 	if (ret) {
-		pr_err("NPU_IPC_CMD_UNLOAD sent failed: %d\n", ret);
+		NPU_ERR("NPU_IPC_CMD_UNLOAD sent failed: %d\n", ret);
 		/*
 		 * If another command is running on this network,
 		 * don't free_network now.
 		 */
 		if (ret == -EBUSY) {
-			pr_err("Network is running, retry later\n");
+			NPU_ERR("Network is running, retry later\n");
 			network_put(network);
 			mutex_unlock(&host_ctx->lock);
 			return ret;
@@ -1336,22 +1334,22 @@
 	mutex_lock(&host_ctx->lock);
 
 	if (!ret) {
-		pr_err_ratelimited("npu: NPU_IPC_CMD_UNLOAD time out\n");
+		NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out\n");
 		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
 		goto free_network;
 	} else if (ret < 0) {
-		pr_err("Wait for unload done interrupted by signal\n");
+		NPU_ERR("Wait for unload done interrupted by signal\n");
 		network->cmd_pending = false;
 		goto free_network;
 	}
 
 	if (network->fw_error) {
 		ret = -EIO;
-		pr_err("fw is in error state during unload network\n");
+		NPU_ERR("fw is in error state during unload network\n");
 	} else {
 		ret = network->cmd_ret_status;
-		pr_debug("unload network status %d\n", ret);
+		NPU_DBG("unload network status %d\n", ret);
 	}
 
 free_network:
@@ -1366,7 +1364,7 @@
 	if (networks_perf_mode > 0) {
 		ret = npu_set_uc_power_level(npu_dev, networks_perf_mode);
 		if (ret)
-			pr_warn("restore uc power level failed\n");
+			NPU_WARN("restore uc power level failed\n");
 	}
 	mutex_unlock(&host_ctx->lock);
 	fw_deinit(npu_dev, false, true);
@@ -1395,23 +1393,23 @@
 	}
 
 	if (!network->is_active) {
-		pr_err("network is not active\n");
+		NPU_ERR("network is not active\n");
 		ret = -EINVAL;
 		goto exec_done;
 	}
 
 	if (network->fw_error) {
-		pr_err("fw is in error state\n");
+		NPU_ERR("fw is in error state\n");
 		ret = -EIO;
 		goto exec_done;
 	}
 
-	pr_debug("execute network %lld\n", network->id);
+	NPU_DBG("execute network %lld\n", network->id);
 	memset(&exec_packet, 0, sizeof(exec_packet));
 	if (exec_ioctl->patching_required) {
 		if ((exec_ioctl->input_layer_num != 1) ||
 			(exec_ioctl->output_layer_num != 1)) {
-			pr_err("Invalid input/output layer num\n");
+			NPU_ERR("Invalid input/output layer num\n");
 			ret = -EINVAL;
 			goto exec_done;
 		}
@@ -1421,7 +1419,7 @@
 		/* verify mapped physical address */
 		if (!npu_mem_verify_addr(client, input_off) ||
 			!npu_mem_verify_addr(client, output_off)) {
-			pr_err("Invalid patch buf address\n");
+			NPU_ERR("Invalid patch buf address\n");
 			ret = -EINVAL;
 			goto exec_done;
 		}
@@ -1447,12 +1445,12 @@
 	ret = npu_send_network_cmd(npu_dev, network, &exec_packet, async_ioctl);
 
 	if (ret) {
-		pr_err("NPU_IPC_CMD_EXECUTE sent failed: %d\n", ret);
+		NPU_ERR("NPU_IPC_CMD_EXECUTE sent failed: %d\n", ret);
 		goto exec_done;
 	}
 
 	if (async_ioctl) {
-		pr_debug("Async ioctl, return now\n");
+		NPU_DBG("Async ioctl, return now\n");
 		goto exec_done;
 	}
 
@@ -1465,24 +1463,24 @@
 
 	mutex_lock(&host_ctx->lock);
 	if (!ret) {
-		pr_err_ratelimited("npu: NPU_IPC_CMD_EXECUTE time out\n");
+		NPU_ERR("npu: NPU_IPC_CMD_EXECUTE time out\n");
 		/* dump debug stats */
 		npu_dump_debug_timeout_stats(npu_dev);
 		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
 		goto exec_done;
 	} else if (ret == -ERESTARTSYS) {
-		pr_err("Wait for execution done interrupted by signal\n");
+		NPU_ERR("Wait for execution done interrupted by signal\n");
 		network->cmd_pending = false;
 		goto exec_done;
 	}
 
 	if (network->fw_error) {
 		ret = -EIO;
-		pr_err("fw is in error state during execute network\n");
+		NPU_ERR("fw is in error state during execute network\n");
 	} else {
 		ret = network->cmd_ret_status;
-		pr_debug("execution status %d\n", ret);
+		NPU_DBG("execution status %d\n", ret);
 	}
 
 exec_done:
@@ -1494,7 +1492,7 @@
 	 * as error in order to force npu fw to stop execution
 	 */
 	if ((ret == -ETIMEDOUT) || (ret == -ERESTARTSYS)) {
-		pr_err("Error handling after execution failure\n");
+		NPU_ERR("Error handling after execution failure\n");
 		host_error_hdlr(npu_dev, true);
 	}
 
@@ -1524,18 +1522,18 @@
 	}
 
 	if (!network->is_active) {
-		pr_err("network is not active\n");
+		NPU_ERR("network is not active\n");
 		ret = -EINVAL;
 		goto exec_v2_done;
 	}
 
 	if (network->fw_error) {
-		pr_err("fw is in error state\n");
+		NPU_ERR("fw is in error state\n");
 		ret = -EIO;
 		goto exec_v2_done;
 	}
 
-	pr_debug("execute_v2 network %lld\n", network->id);
+	NPU_DBG("execute_v2 network %lld\n", network->id);
 	num_patch_params = exec_ioctl->patch_buf_info_num;
 	pkt_size = num_patch_params * sizeof(struct npu_patch_params_v2) +
 		sizeof(*exec_packet);
@@ -1548,17 +1546,17 @@
 
 	for (i = 0; i < num_patch_params; i++) {
 		exec_packet->patch_params[i].id = patch_buf_info[i].buf_id;
-		pr_debug("%d: patch_id: %x\n", i,
+		NPU_DBG("%d: patch_id: %x\n", i,
 			exec_packet->patch_params[i].id);
 		exec_packet->patch_params[i].value =
 			patch_buf_info[i].buf_phys_addr;
-		pr_debug("%d: patch value: %x\n", i,
+		NPU_DBG("%d: patch value: %x\n", i,
 			exec_packet->patch_params[i].value);
 
 		/* verify mapped physical address */
 		if (!npu_mem_verify_addr(client,
 			patch_buf_info[i].buf_phys_addr)) {
-			pr_err("Invalid patch value\n");
+			NPU_ERR("Invalid patch value\n");
 			ret = -EINVAL;
 			goto free_exec_packet;
 		}
@@ -1576,7 +1574,7 @@
 	network->stats_buf_u = (void __user *)exec_ioctl->stats_buf_addr;
 	network->stats_buf_size = exec_ioctl->stats_buf_size;
 
-	pr_debug("Execute_v2 flags %x stats_buf_size %d\n",
+	NPU_DBG("Execute_v2 flags %x stats_buf_size %d\n",
 		exec_packet->header.flags, exec_ioctl->stats_buf_size);
 
 	/* Send it on the high priority queue */
@@ -1584,12 +1582,12 @@
 	ret = npu_send_network_cmd(npu_dev, network, exec_packet, async_ioctl);
 
 	if (ret) {
-		pr_err("NPU_IPC_CMD_EXECUTE_V2 sent failed: %d\n", ret);
+		NPU_ERR("NPU_IPC_CMD_EXECUTE_V2 sent failed: %d\n", ret);
 		goto free_exec_packet;
 	}
 
 	if (async_ioctl) {
-		pr_debug("Async ioctl, return now\n");
+		NPU_DBG("Async ioctl, return now\n");
 		goto free_exec_packet;
 	}
 
@@ -1602,21 +1600,21 @@
 
 	mutex_lock(&host_ctx->lock);
 	if (!ret) {
-		pr_err_ratelimited("npu: NPU_IPC_CMD_EXECUTE_V2 time out\n");
+		NPU_ERR("npu: NPU_IPC_CMD_EXECUTE_V2 time out\n");
 		/* dump debug stats */
 		npu_dump_debug_timeout_stats(npu_dev);
 		network->cmd_pending = false;
 		ret = -ETIMEDOUT;
 		goto free_exec_packet;
 	} else if (ret == -ERESTARTSYS) {
-		pr_err("Wait for execution_v2 done interrupted by signal\n");
+		NPU_ERR("Wait for execution_v2 done interrupted by signal\n");
 		network->cmd_pending = false;
 		goto free_exec_packet;
 	}
 
 	if (network->fw_error) {
 		ret = -EIO;
-		pr_err("fw is in error state during execute_v2 network\n");
+		NPU_ERR("fw is in error state during execute_v2 network\n");
 		goto free_exec_packet;
 	}
 
@@ -1627,11 +1625,11 @@
 			(void __user *)exec_ioctl->stats_buf_addr,
 			network->stats_buf,
 			exec_ioctl->stats_buf_size)) {
-			pr_err("copy stats to user failed\n");
+			NPU_ERR("copy stats to user failed\n");
 			exec_ioctl->stats_buf_size = 0;
 		}
 	} else {
-		pr_err("execution failed %d\n", ret);
+		NPU_ERR("execution failed %d\n", ret);
 	}
 
 free_exec_packet:
@@ -1645,7 +1643,7 @@
 	 * as error in order to force npu fw to stop execution
 	 */
 	if ((ret == -ETIMEDOUT) || (ret == -ERESTARTSYS)) {
-		pr_err("Error handling after execution failure\n");
+		NPU_ERR("Error handling after execution failure\n");
 		host_error_hdlr(npu_dev, true);
 	}
 
@@ -1673,7 +1671,7 @@
 	ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, &loopback_packet);
 
 	if (ret) {
-		pr_err("NPU_IPC_CMD_LOOPBACK sent failed: %d\n", ret);
+		NPU_ERR("NPU_IPC_CMD_LOOPBACK sent failed: %d\n", ret);
 		goto loopback_exit;
 	}
 
@@ -1683,10 +1681,10 @@
 		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
 
 	if (!ret) {
-		pr_err_ratelimited("npu: NPU_IPC_CMD_LOOPBACK time out\n");
+		NPU_ERR("npu: NPU_IPC_CMD_LOOPBACK time out\n");
 		ret = -ETIMEDOUT;
 	} else if (ret < 0) {
-		pr_err("Wait for loopback done interrupted by signal\n");
+		NPU_ERR("Wait for loopback done interrupted by signal\n");
 	}
 
 loopback_exit:
@@ -1708,7 +1706,7 @@
 	for (i = 0; i < MAX_LOADED_NETWORK; i++) {
 		network = &host_ctx->networks[i];
 		if (network->client == client) {
-			pr_warn("network %d is not unloaded before close\n",
+			NPU_WARN("network %d is not unloaded before close\n",
 				network->network_hdl);
 			unload_req.network_hdl = network->network_hdl;
 			npu_host_unload_network(client, &unload_req);
@@ -1719,7 +1717,7 @@
 	while (!list_empty(&client->mapped_buffer_list)) {
 		ion_buf = list_first_entry(&client->mapped_buffer_list,
 			struct npu_ion_buf, list);
-		pr_warn("unmap buffer %x:%llx\n", ion_buf->fd, ion_buf->iova);
+		NPU_WARN("unmap buffer %x:%llx\n", ion_buf->fd, ion_buf->iova);
 		unmap_req.buf_ion_hdl = ion_buf->fd;
 		unmap_req.npu_phys_addr = ion_buf->iova;
 		npu_host_unmap_buf(client, &unmap_req);
diff --git a/drivers/media/platform/msm/synx/synx_util.c b/drivers/media/platform/msm/synx/synx_util.c
index c72fac9..829298d 100644
--- a/drivers/media/platform/msm/synx/synx_util.c
+++ b/drivers/media/platform/msm/synx/synx_util.c
@@ -516,7 +516,9 @@
 	if (!row)
 		return -EINVAL;
 
-	get_random_bytes(&row->secure_key, sizeof(row->secure_key));
+	if (!row->secure_key)
+		get_random_bytes(&row->secure_key, sizeof(row->secure_key));
+
 	return row->secure_key;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 69d41ed..c71cf0c 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -71,7 +71,6 @@
 		.maximum = 0,
 		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -95,7 +94,6 @@
 				  V4L2_MPEG_VIDC_VIDEO_PICTYPE_DECODE_P |
 				  V4L2_MPEG_VIDC_VIDEO_PICTYPE_DECODE_B),
 		.step = 0,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -106,7 +104,7 @@
 		.maximum = V4L2_MPEG_MSM_VIDC_ENABLE,
 		.default_value = V4L2_MPEG_MSM_VIDC_DISABLE,
 		.step = 1,
-		},
+	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE,
 		.name = "Secure mode",
@@ -123,7 +121,6 @@
 		.minimum = EXTRADATA_NONE,
 		.maximum = EXTRADATA_DEFAULT | EXTRADATA_ADVANCED,
 		.default_value = EXTRADATA_DEFAULT,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -136,7 +133,6 @@
 			V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY,
 		.default_value =
 			V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_PRIMARY,
-		.menu_skip_mask = 0,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -154,7 +150,6 @@
 		(1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
 		(1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
 	},
 	{
@@ -186,7 +181,6 @@
 		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_1) |
 		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_2)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
 	},
 	{
@@ -201,7 +195,6 @@
 		(1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
 	},
 	{
@@ -226,7 +219,6 @@
 		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
 	},
 	{
@@ -240,7 +232,6 @@
 		(1 << V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_TIER_HIGH)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
 	},
 	{
@@ -251,7 +242,6 @@
 		.maximum = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
 		.default_value = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
 		.menu_skip_mask = ~(1 << V4L2_MPEG_VIDEO_VP8_PROFILE_0),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
 	},
 	{
@@ -269,7 +259,6 @@
 		(1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3)
 		),
 		.qmenu = vp8_profile_level,
-		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
@@ -284,7 +273,6 @@
 		(1 << V4L2_MPEG_VIDEO_VP9_PROFILE_2)
 		),
 		.qmenu = NULL,
-		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_VP9_LEVEL,
@@ -309,7 +297,6 @@
 		(1 << V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61)
 		),
 		.qmenu = vp9_level,
-		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE,
@@ -323,7 +310,6 @@
 		(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_PROFILE_MAIN)
 		),
 		.qmenu = mpeg2_profile,
-		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL,
@@ -338,7 +324,6 @@
 			(1 << V4L2_MPEG_VIDC_VIDEO_MPEG2_LEVEL_2)
 		),
 		.qmenu = mpeg2_level,
-		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT,
@@ -366,7 +351,6 @@
 		.maximum = INT_MAX,
 		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -377,9 +361,7 @@
 		.maximum = MAX_NUM_CAPTURE_BUFFERS,
 		.default_value = MIN_NUM_CAPTURE_BUFFERS,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 	},
 	{
 		.id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
@@ -389,9 +371,7 @@
 		.maximum = MAX_NUM_OUTPUT_BUFFERS,
 		.default_value = MIN_NUM_OUTPUT_BUFFERS,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE,
@@ -401,7 +381,6 @@
 		.maximum = (MAXIMUM_FPS << 16),
 		.default_value = (DEFAULT_FPS << 16),
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -421,7 +400,6 @@
 		.maximum = INT_MAX,
 		.default_value =  (DEFAULT_FPS << 16),
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -904,21 +882,7 @@
 			inst->flags |= VIDC_REALTIME;
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
-		if (ctrl->val == INT_MAX) {
-			dprintk(VIDC_DBG,
-				"inst(%pK) Request for turbo mode\n", inst);
-			inst->clk_data.turbo_mode = true;
-		} else if (msm_vidc_validate_operating_rate(inst, ctrl->val)) {
-			dprintk(VIDC_ERR, "Failed to set operating rate\n");
-			rc = -ENOTSUPP;
-		} else {
-			dprintk(VIDC_DBG,
-				"inst(%pK) operating rate changed from %d to %d\n",
-				inst, inst->clk_data.operating_rate >> 16,
-					ctrl->val >> 16);
-			inst->clk_data.operating_rate = ctrl->val;
-			inst->clk_data.turbo_mode = false;
-		}
+		inst->clk_data.operating_rate = ctrl->val;
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE:
 		inst->clk_data.low_latency_mode = !!ctrl->val;
@@ -1051,6 +1015,22 @@
 			__func__, buffer_type);
 		return -EINVAL;
 	}
+	if (buffer_type == HAL_BUFFER_OUTPUT2) {
+		/*
+		 * For split mode set DPB count as well
+		 * For DPB actual count is same as min output count
+		 */
+		rc = msm_comm_set_buffer_count(inst,
+			bufreq->buffer_count_min,
+			bufreq->buffer_count_min,
+			HAL_BUFFER_OUTPUT);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: failed to set buffer count(%#x)\n",
+				__func__, buffer_type);
+			return -EINVAL;
+		}
+	}
 	rc = msm_comm_set_buffer_count(inst,
 			bufreq->buffer_count_min,
 			bufreq->buffer_count_actual,
@@ -1382,7 +1362,6 @@
 {
 	uint32_t display_info = HFI_PROPERTY_PARAM_VUI_DISPLAY_INFO_EXTRADATA;
 	u32 value = 0x0;
-	u32 hdr10_hist = 0x0;
 
 	switch (inst->fmts[OUTPUT_PORT].fourcc) {
 	case V4L2_PIX_FMT_H264:
@@ -1407,10 +1386,10 @@
 	msm_comm_set_extradata(inst, display_info, 0x1);
 	if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9 ||
 		inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC) {
-		hdr10_hist = 0x1;
+		msm_comm_set_extradata(inst,
+			HFI_PROPERTY_PARAM_HDR10_HIST_EXTRADATA, 0x1);
 	}
-	msm_comm_set_extradata(inst,
-		HFI_PROPERTY_PARAM_HDR10_HIST_EXTRADATA, hdr10_hist);
+
 	msm_comm_set_extradata(inst,
 		HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB, 0x1);
 	if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC) {
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index bbbf3ea..23b422e 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -28,11 +28,10 @@
 #define QP_ENABLE_P 0x2
 #define QP_ENABLE_B 0x4
 #define MIN_QP 0
-#define MAX_QP 0x33
-#define MAX_QP_PACKED 0x333333
-#define DEFAULT_MIN_QP 0xA
-#define DEFAULT_MIN_QP_PACKED 0xA0A0A
-#define DEFAULT_MAX_QP_PACKED 0x2C2C2C
+#define MAX_QP 0x7F
+#define MAX_QP_PACKED 0x7F7F7F
+#define DEFAULT_QP 0xA
+#define DEFAULT_QP_PACKED 0xA0A0A
 #define MAX_INTRA_REFRESH_MBS ((7680 * 4320) >> 8)
 #define MAX_LTR_FRAME_COUNT 10
 #define MAX_NUM_B_FRAMES 1
@@ -91,7 +90,6 @@
 		.maximum = INT_MAX,
 		.default_value = 2*DEFAULT_FPS-1,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -100,9 +98,8 @@
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = MIN_QP,
 		.maximum = MAX_QP,
-		.default_value = DEFAULT_MIN_QP,
+		.default_value = DEFAULT_QP,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -111,9 +108,8 @@
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = MIN_QP,
 		.maximum = MAX_QP,
-		.default_value = DEFAULT_MIN_QP,
+		.default_value = DEFAULT_QP,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -122,9 +118,8 @@
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = MIN_QP,
 		.maximum = MAX_QP,
-		.default_value = DEFAULT_MIN_QP,
+		.default_value = DEFAULT_QP,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -133,9 +128,8 @@
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = MIN_QP,
 		.maximum = MAX_QP_PACKED,
-		.default_value = DEFAULT_MIN_QP_PACKED,
+		.default_value = DEFAULT_QP_PACKED,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -144,9 +138,8 @@
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = MIN_QP,
 		.maximum = MAX_QP_PACKED,
-		.default_value = DEFAULT_MAX_QP_PACKED,
+		.default_value = DEFAULT_QP_PACKED,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -157,7 +150,6 @@
 		.maximum = MAX_NUM_B_FRAMES,
 		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -168,9 +160,7 @@
 		.maximum = MAX_NUM_CAPTURE_BUFFERS,
 		.default_value = MIN_NUM_CAPTURE_BUFFERS,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 	},
 	{
 		.id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
@@ -180,9 +170,7 @@
 		.maximum = MAX_NUM_OUTPUT_BUFFERS,
 		.default_value = MIN_NUM_OUTPUT_BUFFERS,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 	},
 
 	{
@@ -193,7 +181,6 @@
 		.maximum = 0,
 		.default_value = 0,
 		.step = 0,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -221,7 +208,6 @@
 		.maximum = MAX_FRAME_QUALITY,
 		.default_value = DEFAULT_FRAME_QUALITY,
 		.step = FRAME_QUALITY_STEP,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -232,7 +218,6 @@
 		.maximum = 512,
 		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -243,7 +228,6 @@
 		.maximum = (MAXIMUM_FPS << 16),
 		.default_value = (DEFAULT_FPS << 16),
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -254,7 +238,6 @@
 		.maximum = MAX_BIT_RATE,
 		.default_value = DEFAULT_BIT_RATE,
 		.step = BIT_RATE_STEP,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -283,7 +266,6 @@
 		(1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
 		(1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
 	},
 	{
@@ -315,7 +297,6 @@
 		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_1) |
 		(1 << V4L2_MPEG_VIDEO_H264_LEVEL_6_2)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
 	},
 	{
@@ -326,7 +307,6 @@
 		.maximum = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
 		.default_value = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
 		.menu_skip_mask = ~(1 << V4L2_MPEG_VIDEO_VP8_PROFILE_0),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
 	},
 	{
@@ -344,7 +324,6 @@
 		(1 << V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3)
 		),
 		.qmenu = vp8_profile_level,
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
@@ -358,7 +337,6 @@
 		(1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
 	},
 	{
@@ -384,7 +362,6 @@
 		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
 	},
 	{
@@ -398,7 +375,6 @@
 		(1 << V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) |
 		(1 << V4L2_MPEG_VIDEO_HEVC_TIER_HIGH)
 		),
-		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
 	},
 	{
@@ -432,7 +408,6 @@
 		.maximum = MAX_SLICE_BYTE_SIZE,
 		.default_value = MIN_SLICE_BYTE_SIZE,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -443,7 +418,6 @@
 		.maximum = MAX_SLICE_MB_SIZE,
 		.default_value = 1,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -465,7 +439,6 @@
 		.maximum = MAX_INTRA_REFRESH_MBS,
 		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -476,7 +449,6 @@
 		.maximum = 6,
 		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -487,7 +459,6 @@
 		.maximum = 6,
 		.default_value = 0,
 		.step = 1,
-		.menu_skip_mask = 0,
 		.qmenu = NULL,
 	},
 	{
@@ -916,10 +887,10 @@
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VENC_HDR_INFO,
-		.name = "Enable/Disable HDR INFO",
-		.type = V4L2_CTRL_TYPE_U32,
-		.minimum = 0,
-		.maximum = UINT_MAX,
+		.name = "HDR PQ information",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = INT_MIN,
+		.maximum = INT_MAX,
 		.default_value = 0,
 		.step = 1,
 	},
@@ -954,6 +925,15 @@
 		.default_value = V4L2_MPEG_MSM_VIDC_DISABLE,
 		.step = 1,
 	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS,
+		.name = "Enable/Disable bitrate savings",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_MSM_VIDC_DISABLE,
+		.maximum = V4L2_MPEG_MSM_VIDC_ENABLE,
+		.default_value = V4L2_MPEG_MSM_VIDC_ENABLE,
+		.step = 1,
+	},
 };
 
 #define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
@@ -1384,9 +1364,7 @@
 	struct msm_vidc_mastering_display_colour_sei_payload *mdisp_sei = NULL;
 	struct msm_vidc_content_light_level_sei_payload *cll_sei = NULL;
 	struct hal_buffer_requirements *buff_req_buffer = NULL;
-	struct v4l2_ctrl *i_qp = NULL;
-	struct v4l2_ctrl *p_qp = NULL;
-	struct v4l2_ctrl *b_qp = NULL;
+	u32 i_qp_min, i_qp_max, p_qp_min, p_qp_max, b_qp_min, b_qp_max;
 
 	if (!inst || !inst->core || !inst->core->device || !ctrl) {
 		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
@@ -1487,21 +1465,7 @@
 		}
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
-		if (ctrl->val == INT_MAX) {
-			dprintk(VIDC_DBG, "inst(%pK) Request for turbo mode\n",
-				inst);
-			inst->clk_data.turbo_mode = true;
-		} else if (msm_vidc_validate_operating_rate(inst, ctrl->val)) {
-			dprintk(VIDC_ERR, "Failed to set operating rate\n");
-			rc = -ENOTSUPP;
-		} else {
-			dprintk(VIDC_DBG,
-				"inst(%pK) operating rate changed from %d to %d\n",
-				inst, inst->clk_data.operating_rate >> 16,
-				ctrl->val >> 16);
-			inst->clk_data.operating_rate = ctrl->val;
-			inst->clk_data.turbo_mode = false;
-		}
+		inst->clk_data.operating_rate = ctrl->val;
 		if (inst->state == MSM_VIDC_START_DONE) {
 			rc = msm_venc_set_operating_rate(inst);
 			if (rc)
@@ -1625,15 +1589,18 @@
 		break;
 	case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP:
 	case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP:
-		i_qp = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP);
-		p_qp = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP);
-		b_qp = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP);
-		if ((ctrl->val & 0xff) < i_qp->minimum ||
-			((ctrl->val >> 8) & 0xff) < p_qp->minimum ||
-			((ctrl->val >> 16) & 0xff) < b_qp->minimum ||
-			(ctrl->val & 0xff) > i_qp->maximum ||
-			((ctrl->val >> 8) & 0xff) > p_qp->maximum ||
-			((ctrl->val >> 16) & 0xff) > b_qp->maximum) {
+		i_qp_min = inst->capability.cap[CAP_I_FRAME_QP].min;
+		i_qp_max = inst->capability.cap[CAP_I_FRAME_QP].max;
+		p_qp_min = inst->capability.cap[CAP_P_FRAME_QP].min;
+		p_qp_max = inst->capability.cap[CAP_P_FRAME_QP].max;
+		b_qp_min = inst->capability.cap[CAP_B_FRAME_QP].min;
+		b_qp_max = inst->capability.cap[CAP_B_FRAME_QP].max;
+		if ((ctrl->val & 0xff) < i_qp_min ||
+			((ctrl->val >> 8) & 0xff) < p_qp_min ||
+			((ctrl->val >> 16) & 0xff) < b_qp_min ||
+			(ctrl->val & 0xff) > i_qp_max ||
+			((ctrl->val >> 8) & 0xff) > p_qp_max ||
+			((ctrl->val >> 16) & 0xff) > b_qp_max) {
 			dprintk(VIDC_ERR, "Invalid QP %#x\n", ctrl->val);
 			return -EINVAL;
 		}
@@ -1643,6 +1610,12 @@
 			inst->client_set_ctrls |= CLIENT_SET_MAX_QP;
 		break;
 	case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
+		i_qp_min = inst->capability.cap[CAP_I_FRAME_QP].min;
+		i_qp_max = inst->capability.cap[CAP_I_FRAME_QP].max;
+		if (ctrl->val < i_qp_min || ctrl->val > i_qp_max) {
+			dprintk(VIDC_ERR, "Invalid I QP %#x\n", ctrl->val);
+			return -EINVAL;
+		}
 		inst->client_set_ctrls |= CLIENT_SET_I_QP;
 		if (inst->state == MSM_VIDC_START_DONE) {
 			rc = msm_venc_set_dyn_qp(inst, ctrl);
@@ -1653,9 +1626,21 @@
 		}
 		break;
 	case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
+		p_qp_min = inst->capability.cap[CAP_P_FRAME_QP].min;
+		p_qp_max = inst->capability.cap[CAP_P_FRAME_QP].max;
+		if (ctrl->val < p_qp_min || ctrl->val > p_qp_max) {
+			dprintk(VIDC_ERR, "Invalid P QP %#x\n", ctrl->val);
+			return -EINVAL;
+		}
 		inst->client_set_ctrls |= CLIENT_SET_P_QP;
 		break;
 	case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
+		b_qp_min = inst->capability.cap[CAP_B_FRAME_QP].min;
+		b_qp_max = inst->capability.cap[CAP_B_FRAME_QP].max;
+		if (ctrl->val < b_qp_min || ctrl->val > b_qp_max) {
+			dprintk(VIDC_ERR, "Invalid B QP %#x\n", ctrl->val);
+			return -EINVAL;
+		}
 		inst->client_set_ctrls |= CLIENT_SET_B_QP;
 		break;
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER:
@@ -2740,7 +2725,7 @@
 	struct v4l2_ctrl *ctrl_t;
 	struct hfi_multi_slice_control multi_slice_control;
 	int temp = 0;
-	u32 mb_per_frame, fps, mbps, bitrate;
+	u32 mb_per_frame, fps, mbps, bitrate, max_slices;
 	u32 slice_val, slice_mode, max_avg_slicesize;
 	u32 rc_mode, output_width, output_height;
 	struct v4l2_ctrl *rc_enable;
@@ -2801,13 +2786,17 @@
 		if (output_width <= 4096 || output_height <= 4096 ||
 			mb_per_frame <= NUM_MBS_PER_FRAME(4096, 2160) ||
 			mbps <= NUM_MBS_PER_SEC(4096, 2160, 60)) {
-			slice_val = max(slice_val, mb_per_frame / 10);
+			max_slices = inst->capability.cap[CAP_SLICE_MB].max ?
+				inst->capability.cap[CAP_SLICE_MB].max : 1;
+			slice_val = max(slice_val, mb_per_frame / max_slices);
 		}
 	} else {
 		if (output_width <= 1920 || output_height <= 1920 ||
 			mb_per_frame <= NUM_MBS_PER_FRAME(1088, 1920) ||
 			mbps <= NUM_MBS_PER_SEC(1088, 1920, 60)) {
-			max_avg_slicesize = ((bitrate / fps) / 8) / 10;
+			max_slices = inst->capability.cap[CAP_SLICE_BYTE].max ?
+				inst->capability.cap[CAP_SLICE_BYTE].max : 1;
+			max_avg_slicesize = ((bitrate / fps) / 8) / max_slices;
 			slice_val = max(slice_val, max_avg_slicesize);
 		}
 	}
@@ -2853,6 +2842,9 @@
 		rc_mode->val == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR))
 		return 0;
 
+	/* Firmware supports only random mode */
+	intra_refresh.mode = HFI_INTRA_REFRESH_RANDOM;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM);
 	intra_refresh.mbs = 0;
 	if (ctrl->val) {
@@ -2860,9 +2852,6 @@
 		u32 width = inst->prop.width[CAPTURE_PORT];
 		u32 height = inst->prop.height[CAPTURE_PORT];
 
-		/* ignore cyclic mode if random mode is set */
-		intra_refresh.mode = HFI_INTRA_REFRESH_RANDOM;
-
 		num_mbs_per_frame = NUM_MBS_PER_FRAME(height, width);
 		intra_refresh.mbs = num_mbs_per_frame / ctrl->val;
 		if (num_mbs_per_frame % ctrl->val) {
@@ -2871,7 +2860,6 @@
 	} else {
 		ctrl = get_ctrl(inst,
 			V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB);
-		intra_refresh.mode = HFI_INTRA_REFRESH_CYCLIC;
 		intra_refresh.mbs = ctrl->val;
 	}
 	if (!intra_refresh.mbs) {
@@ -2890,6 +2878,37 @@
 	return rc;
 }
 
+int msm_venc_set_bitrate_savings_mode(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct v4l2_ctrl *ctrl = NULL;
+	struct hfi_enable enable;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS);
+	enable.enable = !!ctrl->val;
+	if (!ctrl->val && inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) {
+		dprintk(VIDC_DBG,
+			"Can't disable bitrate savings for non-VBR_CFR\n");
+		enable.enable = 1;
+	}
+
+	dprintk(VIDC_DBG, "%s: %d\n", __func__, enable.enable);
+	rc = call_hfi_op(hdev, session_set_property, inst->session,
+		HFI_PROPERTY_PARAM_VENC_BITRATE_SAVINGS, &enable,
+		sizeof(enable));
+	if (rc)
+		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
+
+	return rc;
+}
+
 int msm_venc_set_loop_filter_mode(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
@@ -3820,6 +3839,9 @@
 	rc = msm_venc_set_rate_control(inst);
 	if (rc)
 		goto exit;
+	rc = msm_venc_set_bitrate_savings_mode(inst);
+	if (rc)
+		goto exit;
 	rc = msm_venc_set_input_timestamp_rc(inst);
 	if (rc)
 		goto exit;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 068d8d0..32feffd 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -817,7 +817,7 @@
 	}
 
 	/* Assign Core and LP mode for current session */
-	rc = msm_vidc_decide_core_and_power_mode(inst);
+	rc = call_core_op(inst->core, decide_core_and_power_mode, inst);
 	if (rc) {
 		dprintk(VIDC_ERR,
 			"This session can't be submitted to HW %pK\n", inst);
@@ -1348,6 +1348,7 @@
 				inst, v4l2_ctrl_get_name(ctrl->id));
 	return rc;
 }
+
 static int try_get_ctrl_for_instance(struct msm_vidc_inst *inst,
 	struct v4l2_ctrl *ctrl)
 {
@@ -1355,7 +1356,6 @@
 	struct hal_buffer_requirements *bufreq = NULL;
 
 	switch (ctrl->id) {
-
 	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
 		ctrl->val = msm_comm_hfi_to_v4l2(
 			V4L2_CID_MPEG_VIDEO_H264_PROFILE,
@@ -1384,8 +1384,6 @@
 			V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
 			inst->level);
 		break;
-
-
 	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
 		bufreq = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
 		if (!bufreq) {
@@ -1407,7 +1405,6 @@
 					HAL_BUFFER_INPUT);
 			return -EINVAL;
 		}
-
 		ctrl->val = bufreq->buffer_count_min_host;
 		dprintk(VIDC_DBG, "g_min: %x : hal_buffer %d min buffers %d\n",
 			hash32_ptr(inst->session), HAL_BUFFER_INPUT, ctrl->val);
@@ -1416,62 +1413,15 @@
 		ctrl->val = inst->prop.extradata_ctrls;
 		break;
 	default:
-		/*
-		 * Other controls aren't really volatile, shouldn't need to
-		 * modify ctrl->value
-		 */
 		break;
 	}
 
 	return rc;
 }
 
-static int msm_vidc_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
-	int rc = 0;
-	unsigned int c = 0;
-	struct msm_vidc_inst *inst;
-	struct v4l2_ctrl *master;
-
-	if (!ctrl) {
-		dprintk(VIDC_ERR, "%s invalid parameters for ctrl\n", __func__);
-		return -EINVAL;
-	}
-
-	inst = container_of(ctrl->handler,
-		struct msm_vidc_inst, ctrl_handler);
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s invalid parameters for inst\n", __func__);
-		return -EINVAL;
-	}
-	master = ctrl->cluster[0];
-	if (!master) {
-		dprintk(VIDC_ERR, "%s invalid parameters for master\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	for (c = 0; c < master->ncontrols; ++c) {
-		if (master->cluster[c]->flags & V4L2_CTRL_FLAG_VOLATILE) {
-			rc = try_get_ctrl_for_instance(inst,
-				master->cluster[c]);
-			if (rc) {
-				dprintk(VIDC_ERR, "Failed getting %x\n",
-					master->cluster[c]->id);
-				return rc;
-			}
-		}
-	}
-	if (rc)
-		dprintk(VIDC_ERR, "Failed getting control: Inst = %pK (%s)\n",
-				inst, v4l2_ctrl_get_name(ctrl->id));
-	return rc;
-}
-
 static const struct v4l2_ctrl_ops msm_vidc_ctrl_ops = {
 
 	.s_ctrl = msm_vidc_op_s_ctrl,
-	.g_volatile_ctrl = msm_vidc_op_g_volatile_ctrl,
 };
 
 static struct msm_vidc_inst_smem_ops  msm_vidc_smem_ops = {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
index ea0107d..e862a23 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
@@ -255,6 +255,7 @@
 #define NUM_MBS_4k (((4096 + 15) >> 4) * ((2304 + 15) >> 4))
 #define MB_SIZE_IN_PIXEL (16 * 16)
 #define HDR10PLUS_PAYLOAD_SIZE 1024
+#define HDR10_HIST_EXTRADATA_SIZE 4096
 
 static inline u32 calculate_h264d_scratch_size(struct msm_vidc_inst *inst,
 	u32 width, u32 height, bool is_interlaced);
@@ -286,14 +287,11 @@
 	u32 width, u32 height, u32 min_buf_count, bool split_mode_enabled);
 
 static inline u32 calculate_h264e_scratch1_size(struct msm_vidc_inst *inst,
-	u32 width, u32 height, u32 num_ref, bool ten_bit,
-	u32 num_vpp_pipes);
+	u32 width, u32 height, u32 num_ref, bool ten_bit);
 static inline u32 calculate_h265e_scratch1_size(struct msm_vidc_inst *inst,
-	u32 width, u32 height, u32 num_ref, bool ten_bit,
-	u32 num_vpp_pipes);
+	u32 width, u32 height, u32 num_ref, bool ten_bit);
 static inline u32 calculate_vp8e_scratch1_size(struct msm_vidc_inst *inst,
-	u32 width, u32 height, u32 num_ref, bool ten_bit,
-	u32 num_vpp_pipes);
+	u32 width, u32 height, u32 num_ref, bool ten_bit);
 
 static inline u32 calculate_enc_scratch2_size(struct msm_vidc_inst *inst,
 	u32 width, u32 height, u32 num_ref, bool ten_bit);
@@ -536,8 +534,7 @@
 			curr_req->buffer_size =
 				enc_calculators->calculate_scratch1_size(
 					inst, width, height, num_ref,
-					is_tenbit,
-					inst->clk_data.work_route);
+					is_tenbit);
 			valid_buffer_type = true;
 		} else if (curr_req->buffer_type ==
 			HAL_BUFFER_INTERNAL_SCRATCH_2) {
@@ -769,6 +766,18 @@
 	}
 
 	frame_size = base_res_mbs * MB_SIZE_IN_PIXEL * 3 / 2 / div_factor;
+
+	if (is_secure_session(inst)) {
+		u32 max_bitrate = inst->capability.cap[CAP_SECURE_BITRATE].max;
+
+		/*
+		 * for secure, calc frame_size based on max bitrate,
+		 * peak bitrate can be 10 times more and
+		 * frame rate assumed to be 30 fps at least
+		 */
+		frame_size = (max_bitrate * 10 / 8) / 30;
+	}
+
 	 /* multiply by 10/8 (1.25) to get size for 10 bit case */
 	if ((inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9) ||
 		(inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC))
@@ -1244,7 +1253,8 @@
 	size_singlePipe = sao_bin_buffer_size + padded_bin_size;
 	size_singlePipe = ALIGN(size_singlePipe, VENUS_DMA_ALIGNMENT);
 	bitbin_size = size_singlePipe * NUM_OF_VPP_PIPES;
-	size = ALIGN(bitbin_size, VENUS_DMA_ALIGNMENT) * total_bitbin_buffers;
+	size = ALIGN(bitbin_size, VENUS_DMA_ALIGNMENT) * total_bitbin_buffers
+			+ 512;
 
 	return size;
 }
@@ -1295,7 +1305,8 @@
 	if (split_mode_enabled)
 		vpss_lb_size = size_vpss_lb(width, height);
 
-	size = co_mv_size + nonco_mv_size + vpss_lb_size;
+	size = co_mv_size + nonco_mv_size + vpss_lb_size +
+			HDR10_HIST_EXTRADATA_SIZE;
 	return size;
 }
 
@@ -1360,7 +1371,7 @@
 	if (split_mode_enabled)
 		vpss_lb_size = size_vpss_lb(width, height);
 
-	size += vpss_lb_size;
+	size += vpss_lb_size + HDR10_HIST_EXTRADATA_SIZE;
 	return size;
 }
 
@@ -1503,13 +1514,13 @@
 	bse_slice_cmd_buffer_size = ((((8192 << 2) + 7) & (~7)) * 6);
 	bse_reg_buffer_size = ((((512 << 3) + 7) & (~7)) * 4);
 	vpp_reg_buffer_size = ((((HFI_VENUS_VPPSG_MAX_REGISTERS << 3) + 31) &
-		(~31)) * 8);
-	lambda_lut_size = ((((52 << 1) + 7) & (~7)) * 3);
+		(~31)) * 10);
+	lambda_lut_size = ((((52 << 1) + 7) & (~7)) * 11);
 	override_buffer_size = 16 * ((frame_num_lcu + 7) >> 3);
 	override_buffer_size = ALIGN(override_buffer_size,
 		VENUS_DMA_ALIGNMENT) * 2;
 	ir_buffer_size = (((frame_num_lcu << 1) + 7) & (~7)) * 3;
-	vpss_line_buf = ((16 * width_coded) + (16 * height_coded));
+	vpss_line_buf = ((((width_coded + 3) >> 2) << 5) + 256) * 16;
 	topline_bufsize_fe_1stg_sao = (16 * (width_coded >> 5));
 	topline_bufsize_fe_1stg_sao = ALIGN(topline_bufsize_fe_1stg_sao,
 		VENUS_DMA_ALIGNMENT);
@@ -1531,27 +1542,24 @@
 }
 
 static inline u32 calculate_h264e_scratch1_size(struct msm_vidc_inst *inst,
-	u32 width, u32 height, u32 num_ref, bool ten_bit,
-	u32 num_vpp_pipes)
+	u32 width, u32 height, u32 num_ref, bool ten_bit)
 {
 	return calculate_enc_scratch1_size(inst, width, height, 16,
-		num_ref, ten_bit, num_vpp_pipes, false);
+		num_ref, ten_bit, NUM_OF_VPP_PIPES, false);
 }
 
 static inline u32 calculate_h265e_scratch1_size(struct msm_vidc_inst *inst,
-	u32 width, u32 height, u32 num_ref, bool ten_bit,
-	u32 num_vpp_pipes)
+	u32 width, u32 height, u32 num_ref, bool ten_bit)
 {
 	return calculate_enc_scratch1_size(inst, width, height, 32,
-		num_ref, ten_bit, num_vpp_pipes, true);
+		num_ref, ten_bit, NUM_OF_VPP_PIPES, true);
 }
 
 static inline u32 calculate_vp8e_scratch1_size(struct msm_vidc_inst *inst,
-	u32 width, u32 height, u32 num_ref, bool ten_bit,
-	u32 num_vpp_pipes)
+	u32 width, u32 height, u32 num_ref, bool ten_bit)
 {
 	return calculate_enc_scratch1_size(inst, width, height, 16,
-		num_ref, ten_bit, num_vpp_pipes, false);
+		num_ref, ten_bit, 1, false);
 }
 
 
@@ -1612,16 +1620,11 @@
 			16, HFI_COLOR_FORMAT_YUV420_NV12_UBWC_Y_TILE_HEIGHT);
 		meta_size_y = hfi_ubwc_metadata_plane_buffer_size(
 			metadata_stride, meta_buf_height);
-		metadata_stride = hfi_ubwc_uv_metadata_plane_stride(width,
-			64, HFI_COLOR_FORMAT_YUV420_NV12_UBWC_UV_TILE_WIDTH);
-		meta_buf_height = hfi_ubwc_uv_metadata_plane_bufheight(
-			height, 16,
-			HFI_COLOR_FORMAT_YUV420_NV12_UBWC_UV_TILE_HEIGHT);
 		meta_size_c = hfi_ubwc_metadata_plane_buffer_size(
 			metadata_stride, meta_buf_height);
 		size = (aligned_height + chroma_height) * aligned_width +
 			meta_size_y + meta_size_c;
-		size = (size * ((num_ref)+1)) + 4096;
+		size = (size * ((num_ref)+2)) + 4096;
 	} else {
 		ref_buf_height = (height + (HFI_VENUS_HEIGHT_ALIGNMENT - 1))
 			& (~(HFI_VENUS_HEIGHT_ALIGNMENT - 1));
@@ -1644,13 +1647,6 @@
 		metadata_stride = hfi_ubwc_calc_metadata_plane_stride(
 			width,
 			VENUS_METADATA_STRIDE_MULTIPLE,
-			HFI_COLOR_FORMAT_YUV420_NV12_UBWC_Y_TILE_WIDTH);
-		meta_buf_height = hfi_ubwc_metadata_plane_bufheight(height,
-			VENUS_METADATA_HEIGHT_MULTIPLE,
-			HFI_COLOR_FORMAT_YUV420_NV12_UBWC_Y_TILE_HEIGHT);
-		metadata_stride = hfi_ubwc_calc_metadata_plane_stride(
-			width,
-			VENUS_METADATA_STRIDE_MULTIPLE,
 			HFI_COLOR_FORMAT_YUV420_TP10_UBWC_Y_TILE_WIDTH);
 		meta_buf_height = hfi_ubwc_metadata_plane_bufheight(
 			height,
@@ -1661,7 +1657,7 @@
 		meta_size_c = hfi_ubwc_metadata_plane_buffer_size(
 			metadata_stride, meta_buf_height);
 		size = ref_buf_size + meta_size_y + meta_size_c;
-		size = (size * ((num_ref)+1)) + 4096;
+		size = (size * ((num_ref)+2)) + 4096;
 	}
 	return size;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h
index cddae12..29fe98c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h
@@ -18,8 +18,7 @@
 	u32 (*calculate_scratch_size)(struct msm_vidc_inst *inst, u32 width,
 		u32 height, u32 work_mode);
 	u32 (*calculate_scratch1_size)(struct msm_vidc_inst *inst,
-		u32 width, u32 height, u32 num_ref, bool ten_bit,
-		u32 num_vpp_pipes);
+		u32 width, u32 height, u32 num_ref, bool ten_bit);
 	u32 (*calculate_scratch2_size)(struct msm_vidc_inst *inst,
 		u32 width, u32 height, u32 num_ref, bool ten_bit);
 	u32 (*calculate_persist_size)(void);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 7e10ec6..3077152 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -27,18 +27,21 @@
 	.calc_freq = msm_vidc_calc_freq_ar50,
 	.decide_work_route = NULL,
 	.decide_work_mode = msm_vidc_decide_work_mode_ar50,
+	.decide_core_and_power_mode = NULL,
 };
 
 struct msm_vidc_core_ops core_ops_iris1 = {
 	.calc_freq = msm_vidc_calc_freq_iris1,
 	.decide_work_route = msm_vidc_decide_work_route_iris1,
 	.decide_work_mode = msm_vidc_decide_work_mode_iris1,
+	.decide_core_and_power_mode = msm_vidc_decide_core_and_power_mode_iris1,
 };
 
 struct msm_vidc_core_ops core_ops_iris2 = {
 	.calc_freq = msm_vidc_calc_freq_iris2,
 	.decide_work_route = msm_vidc_decide_work_route_iris2,
 	.decide_work_mode = msm_vidc_decide_work_mode_iris2,
+	.decide_core_and_power_mode = msm_vidc_decide_core_and_power_mode_iris2,
 };
 
 static inline void msm_dcvs_print_dcvs_stats(struct clock_data *dcvs)
@@ -847,7 +850,9 @@
 	struct hfi_device *hdev;
 	unsigned long freq_core_1 = 0, freq_core_2 = 0, rate = 0;
 	unsigned long freq_core_max = 0;
-	struct msm_vidc_inst *temp = NULL;
+	struct msm_vidc_inst *inst = NULL;
+	struct msm_vidc_buffer *temp, *next;
+	u32 device_addr, filled_len;
 	int rc = 0, i = 0;
 	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
 	bool increment, decrement;
@@ -863,15 +868,34 @@
 	mutex_lock(&core->lock);
 	increment = false;
 	decrement = true;
-	list_for_each_entry(temp, &core->instances, list) {
+	list_for_each_entry(inst, &core->instances, list) {
+		device_addr = 0;
+		filled_len = 0;
+		mutex_lock(&inst->registeredbufs.lock);
+		list_for_each_entry_safe(temp, next,
+				&inst->registeredbufs.list, list) {
+			if (temp->vvb.vb2_buf.type ==
+				V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+				filled_len = max(filled_len,
+					temp->vvb.vb2_buf.planes[0].bytesused);
+				device_addr = temp->smem[0].device_addr;
+			}
+		}
+		mutex_unlock(&inst->registeredbufs.lock);
 
-		if (temp->clk_data.core_id == VIDC_CORE_ID_1)
-			freq_core_1 += temp->clk_data.min_freq;
-		else if (temp->clk_data.core_id == VIDC_CORE_ID_2)
-			freq_core_2 += temp->clk_data.min_freq;
-		else if (temp->clk_data.core_id == VIDC_CORE_ID_3) {
-			freq_core_1 += temp->clk_data.min_freq;
-			freq_core_2 += temp->clk_data.min_freq;
+		if (!filled_len || !device_addr) {
+			dprintk(VIDC_DBG, "%s no input for session %x\n",
+				__func__, hash32_ptr(inst->session));
+			continue;
+		}
+
+		if (inst->clk_data.core_id == VIDC_CORE_ID_1)
+			freq_core_1 += inst->clk_data.min_freq;
+		else if (inst->clk_data.core_id == VIDC_CORE_ID_2)
+			freq_core_2 += inst->clk_data.min_freq;
+		else if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
+			freq_core_1 += inst->clk_data.min_freq;
+			freq_core_2 += inst->clk_data.min_freq;
 		}
 
 		freq_core_max = max_t(unsigned long, freq_core_1, freq_core_2);
@@ -885,18 +909,11 @@
 			break;
 		}
 
-		if (temp->clk_data.turbo_mode) {
-			dprintk(VIDC_PROF,
-				"Found an instance with Turbo request\n");
-			freq_core_max = msm_vidc_max_freq(core);
-			decrement = false;
-			break;
-		}
 		/* increment even if one session requested for it */
-		if (temp->clk_data.dcvs_flags & MSM_VIDC_DCVS_INCR)
+		if (inst->clk_data.dcvs_flags & MSM_VIDC_DCVS_INCR)
 			increment = true;
 		/* decrement only if all sessions requested for it */
-		if (!(temp->clk_data.dcvs_flags & MSM_VIDC_DCVS_DECR))
+		if (!(inst->clk_data.dcvs_flags & MSM_VIDC_DCVS_DECR))
 			decrement = false;
 	}
 
@@ -931,69 +948,6 @@
 	return rc;
 }
 
-int msm_vidc_validate_operating_rate(struct msm_vidc_inst *inst,
-	u32 operating_rate)
-{
-	struct msm_vidc_inst *temp;
-	struct msm_vidc_core *core;
-	unsigned long max_freq, freq_left, ops_left, load, cycles, freq = 0;
-	unsigned long mbs_per_second;
-	int rc = 0;
-	u32 curr_operating_rate = 0;
-
-	if (!inst || !inst->core) {
-		dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
-		return -EINVAL;
-	}
-	core = inst->core;
-	curr_operating_rate = inst->clk_data.operating_rate >> 16;
-
-	mutex_lock(&core->lock);
-	max_freq = msm_vidc_max_freq(core);
-	list_for_each_entry(temp, &core->instances, list) {
-		if (temp == inst ||
-				temp->state < MSM_VIDC_START_DONE ||
-				temp->state >= MSM_VIDC_RELEASE_RESOURCES_DONE)
-			continue;
-
-		freq += temp->clk_data.min_freq;
-	}
-
-	freq_left = max_freq - freq;
-
-	mbs_per_second = msm_comm_get_inst_load_per_core(inst,
-		LOAD_CALC_NO_QUIRKS);
-
-	cycles = inst->clk_data.entry->vpp_cycles;
-	if (inst->session_type == MSM_VIDC_ENCODER)
-		cycles = inst->flags & VIDC_LOW_POWER ?
-			inst->clk_data.entry->low_power_cycles :
-			cycles;
-
-	load = cycles * mbs_per_second;
-
-	ops_left = load ? (freq_left / load) : 0;
-
-	operating_rate = operating_rate >> 16;
-
-	if ((curr_operating_rate * (1 + ops_left)) >= operating_rate ||
-			msm_vidc_clock_voting ||
-			inst->clk_data.buffer_counter < DCVS_FTB_WINDOW) {
-		dprintk(VIDC_DBG,
-			"Requestd operating rate is valid %u\n",
-			operating_rate);
-		rc = 0;
-	} else {
-		dprintk(VIDC_DBG,
-			"Current load is high for requested settings. Cannot set operating rate to %u\n",
-			operating_rate);
-		rc = -EINVAL;
-	}
-	mutex_unlock(&core->lock);
-
-	return rc;
-}
-
 int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
 {
 	struct msm_vidc_buffer *temp, *next;
@@ -1027,7 +981,7 @@
 	if (!filled_len || !device_addr) {
 		dprintk(VIDC_DBG, "%s no input for session %x\n",
 			__func__, hash32_ptr(inst->session));
-		goto no_clock_change;
+		return 0;
 	}
 
 	freq = call_core_op(inst->core, calc_freq, inst, filled_len);
@@ -1045,7 +999,6 @@
 
 	msm_vidc_set_clocks(inst->core);
 
-no_clock_change:
 	return 0;
 }
 
@@ -1574,7 +1527,7 @@
 static inline int msm_vidc_power_save_mode_enable(struct msm_vidc_inst *inst,
 	bool enable)
 {
-	u32 rc = 0, mbs_per_frame;
+	u32 rc = 0, mbs_per_frame, mbs_per_sec;
 	u32 prop_id = 0;
 	void *pdata = NULL;
 	struct hfi_device *hdev = NULL;
@@ -1587,15 +1540,17 @@
 				__func__);
 		return 0;
 	}
-	mbs_per_frame = msm_vidc_get_mbs_per_frame(inst);
-	if (mbs_per_frame > inst->core->resources.max_hq_mbs_per_frame ||
-		msm_vidc_get_fps(inst) >
-		(int) inst->core->resources.max_hq_fps) {
-		enable = true;
-	}
+
 	/* Power saving always disabled for CQ RC mode. */
-	if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
+	mbs_per_frame = msm_vidc_get_mbs_per_frame(inst);
+	mbs_per_sec = mbs_per_frame * msm_vidc_get_fps(inst);
+	if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ ||
+		(mbs_per_frame <=
+		 inst->core->resources.max_hq_mbs_per_frame &&
+		 mbs_per_sec <=
+		 inst->core->resources.max_hq_mbs_per_sec)) {
 		enable = false;
+	}
 
 	prop_id = HFI_PROPERTY_CONFIG_VENC_PERF_MODE;
 	hfi_perf_mode = enable ? HFI_VENC_PERFMODE_POWER_SAVE :
@@ -1673,7 +1628,7 @@
 	return load;
 }
 
-int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
+int msm_vidc_decide_core_and_power_mode_iris1(struct msm_vidc_inst *inst)
 {
 	int rc = 0, hier_mode = 0;
 	struct hfi_device *hdev;
@@ -1813,6 +1768,14 @@
 	return rc;
 }
 
+int msm_vidc_decide_core_and_power_mode_iris2(struct msm_vidc_inst *inst)
+{
+	inst->clk_data.core_id = VIDC_CORE_ID_1;
+	msm_print_core_status(inst->core, VIDC_CORE_ID_1);
+
+	return msm_vidc_power_save_mode_enable(inst, true);
+}
+
 void msm_vidc_init_core_clk_ops(struct msm_vidc_core *core)
 {
 	if (!core)
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index 4742d37..3882f5e 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -8,8 +8,6 @@
 #include "msm_vidc_internal.h"
 
 void msm_clock_data_reset(struct msm_vidc_inst *inst);
-int msm_vidc_validate_operating_rate(struct msm_vidc_inst *inst,
-	u32 operating_rate);
 int msm_vidc_set_clocks(struct msm_vidc_core *core);
 int msm_comm_vote_bus(struct msm_vidc_core *core);
 int msm_dcvs_try_enable(struct msm_vidc_inst *inst);
@@ -21,7 +19,8 @@
 int msm_vidc_decide_work_mode_iris1(struct msm_vidc_inst *inst);
 int msm_vidc_decide_work_route_iris2(struct msm_vidc_inst *inst);
 int msm_vidc_decide_work_mode_iris2(struct msm_vidc_inst *inst);
-int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst);
+int msm_vidc_decide_core_and_power_mode_iris1(struct msm_vidc_inst *inst);
+int msm_vidc_decide_core_and_power_mode_iris2(struct msm_vidc_inst *inst);
 void msm_print_core_status(struct msm_vidc_core *core, u32 core_id);
 void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
 	u32 device_addr);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index c77cba9..7b0edfc 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1431,15 +1431,6 @@
 		msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE,
 				&inst->capability.cap[CAP_BITRATE]);
 		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP,
-				&inst->capability.cap[CAP_I_FRAME_QP]);
-		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP,
-				&inst->capability.cap[CAP_P_FRAME_QP]);
-		msm_vidc_comm_update_ctrl(inst,
-				V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP,
-				&inst->capability.cap[CAP_B_FRAME_QP]);
-		msm_vidc_comm_update_ctrl(inst,
 				V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES,
 				&inst->capability.cap[CAP_SLICE_BYTE]);
 		msm_vidc_comm_update_ctrl(inst,
@@ -2582,6 +2573,10 @@
 		msm_comm_store_mark_data(&inst->fbd_data, vb->index,
 			fill_buf_done->mark_data, fill_buf_done->mark_target);
 	}
+	if (inst->session_type == MSM_VIDC_ENCODER) {
+		msm_comm_store_filled_length(&inst->fbd_data, vb->index,
+			fill_buf_done->filled_len1);
+	}
 
 	extra_idx = EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes);
 	if (extra_idx && extra_idx < VIDEO_MAX_PLANES)
@@ -3556,7 +3551,7 @@
 	int rc = 0;
 	struct internal_buf *binfo = NULL;
 	u32 smem_flags = SMEM_UNCACHED, buffer_size, num_buffers, hfi_fmt;
-	struct hal_buffer_requirements *output_buf, *extradata_buf;
+	struct hal_buffer_requirements *output_buf;
 	unsigned int i;
 	struct hfi_device *hdev;
 	struct hfi_buffer_size_minimum b;
@@ -3571,6 +3566,17 @@
 		return 0;
 	}
 
+	/* Set DPB buffer count to firmware */
+	rc = msm_comm_set_buffer_count(inst,
+			output_buf->buffer_count_min,
+			output_buf->buffer_count_min,
+			HAL_BUFFER_OUTPUT);
+	if (rc) {
+		dprintk(VIDC_ERR, "%s: failed to set bufreqs(%#x)\n",
+			__func__, buffer_type);
+		return -EINVAL;
+	}
+
 	/* For DPB buffers, Always use FW count */
 	num_buffers = output_buf->buffer_count_min;
 	hfi_fmt = msm_comm_convert_color_fmt(inst->clk_data.dpb_fourcc);
@@ -3590,12 +3596,15 @@
 		inst->session, HFI_PROPERTY_PARAM_BUFFER_SIZE_MINIMUM,
 		&b, sizeof(b));
 
-	extradata_buf = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_OUTPUT);
-	if (extradata_buf) {
+	if (inst->bufq[CAPTURE_PORT].num_planes == 1 ||
+		!inst->bufq[CAPTURE_PORT].plane_sizes[1]) {
 		dprintk(VIDC_DBG,
-			"extradata: num = %d, size = %d\n",
-			extradata_buf->buffer_count_actual,
-			extradata_buf->buffer_size);
+			"This extradata buffer not required, buffer_type: %x\n",
+			buffer_type);
+	} else {
+		dprintk(VIDC_DBG,
+			"extradata: num = 1, size = %d\n",
+			inst->bufq[CAPTURE_PORT].plane_sizes[1]);
 		inst->dpb_extra_binfo = NULL;
 		inst->dpb_extra_binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
 		if (!inst->dpb_extra_binfo) {
@@ -3604,17 +3613,13 @@
 			goto fail_kzalloc;
 		}
 		rc = msm_comm_smem_alloc(inst,
-			extradata_buf->buffer_size, 1, smem_flags,
+			inst->bufq[CAPTURE_PORT].plane_sizes[1], 1, smem_flags,
 			buffer_type, 0, &inst->dpb_extra_binfo->smem);
 		if (rc) {
 			dprintk(VIDC_ERR,
 				"Failed to allocate output memory\n");
 			goto err_no_mem;
 		}
-	} else {
-		dprintk(VIDC_DBG,
-			"This extradata buffer not required, buffer_type: %x\n",
-			buffer_type);
 	}
 
 	if (inst->flags & VIDC_SECURE)
@@ -4505,7 +4510,7 @@
 	}
 
 	dprintk(VIDC_DBG, "Buffer requirements :\n");
-	dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s\n",
+	dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s %8s\n",
 		"buffer type", "count", "mincount_host", "mincount_fw", "size",
 		"alignment");
 	for (i = 0; i < HAL_BUFFER_MAX; i++) {
@@ -5377,6 +5382,7 @@
 	struct msm_vidc_core *core;
 	u32 output_height, output_width, input_height, input_width;
 	u32 width_min, width_max, height_min, height_max;
+	u32 mbpf_max;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__);
@@ -5398,10 +5404,19 @@
 		return -ENOTSUPP;
 	}
 
-	width_min = capability->cap[CAP_FRAME_WIDTH].min;
-	width_max = capability->cap[CAP_FRAME_WIDTH].max;
-	height_min = capability->cap[CAP_FRAME_HEIGHT].min;
-	height_max = capability->cap[CAP_FRAME_HEIGHT].max;
+	if (is_secure_session(inst)) {
+		width_min = capability->cap[CAP_SECURE_FRAME_WIDTH].min;
+		width_max = capability->cap[CAP_SECURE_FRAME_WIDTH].max;
+		height_min = capability->cap[CAP_SECURE_FRAME_HEIGHT].min;
+		height_max = capability->cap[CAP_SECURE_FRAME_HEIGHT].max;
+		mbpf_max = capability->cap[CAP_SECURE_MBS_PER_FRAME].max;
+	} else {
+		width_min = capability->cap[CAP_FRAME_WIDTH].min;
+		width_max = capability->cap[CAP_FRAME_WIDTH].max;
+		height_min = capability->cap[CAP_FRAME_HEIGHT].min;
+		height_max = capability->cap[CAP_FRAME_HEIGHT].max;
+		mbpf_max = capability->cap[CAP_MBS_PER_FRAME].max;
+	}
 
 	output_height = inst->prop.height[CAPTURE_PORT];
 	output_width = inst->prop.width[CAPTURE_PORT];
@@ -5447,6 +5462,13 @@
 			width_max, height_max);
 			rc = -ENOTSUPP;
 		}
+		if (!rc && NUM_MBS_PER_FRAME(input_width, input_height) >
+			mbpf_max) {
+			dprintk(VIDC_ERR, "Unsupported mbpf %d, max %d\n",
+				NUM_MBS_PER_FRAME(input_width, input_height),
+				mbpf_max);
+			rc = -ENOTSUPP;
+		}
 	}
 	if (rc) {
 		dprintk(VIDC_ERR,
@@ -6056,9 +6078,14 @@
 			} else if (vb->type ==
 					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 				if (!i) { /* bitstream */
+					u32 size_u32;
 					skip = false;
 					offset = 0;
-					size = vb->planes[i].length;
+					size_u32 = vb->planes[i].length;
+					msm_comm_fetch_filled_length(
+						&inst->fbd_data, vb->index,
+						&size_u32);
+					size = size_u32;
 					cache_op = SMEM_CACHE_INVALIDATE;
 				}
 			}
@@ -6520,6 +6547,63 @@
 	return ret;
 }
 
+void msm_comm_store_filled_length(struct msm_vidc_list *data_list,
+		u32 index, u32 filled_length)
+{
+	struct msm_vidc_buf_data *pdata = NULL;
+	bool found = false;
+
+	if (!data_list) {
+		dprintk(VIDC_ERR, "%s: invalid params %pK\n",
+			__func__, data_list);
+		return;
+	}
+
+	mutex_lock(&data_list->lock);
+	list_for_each_entry(pdata, &data_list->list, list) {
+		if (pdata->index == index) {
+			pdata->filled_length = filled_length;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+		if (!pdata)  {
+			dprintk(VIDC_WARN, "%s: malloc failure.\n", __func__);
+			goto exit;
+		}
+		pdata->index = index;
+		pdata->filled_length = filled_length;
+		list_add_tail(&pdata->list, &data_list->list);
+	}
+
+exit:
+	mutex_unlock(&data_list->lock);
+}
+
+void msm_comm_fetch_filled_length(struct msm_vidc_list *data_list,
+		u32 index, u32 *filled_length)
+{
+	struct msm_vidc_buf_data *pdata = NULL;
+
+	if (!data_list || !filled_length) {
+		dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+			__func__, data_list, filled_length);
+		return;
+	}
+
+	mutex_lock(&data_list->lock);
+	list_for_each_entry(pdata, &data_list->list, list) {
+		if (pdata->index == index) {
+			*filled_length = pdata->filled_length;
+			break;
+		}
+	}
+	mutex_unlock(&data_list->lock);
+}
+
 void msm_comm_store_mark_data(struct msm_vidc_list *data_list,
 		u32 index, u32 mark_data, u32 mark_target)
 {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index f79a6b7..6b0e882 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -80,6 +80,11 @@
 	return !!(inst->flags & VIDC_REALTIME);
 }
 
+static inline bool is_secure_session(struct msm_vidc_inst *inst)
+{
+	return !!(inst->flags & VIDC_SECURE);
+}
+
 static inline bool is_decode_session(struct msm_vidc_inst *inst)
 {
 	return inst->session_type == MSM_VIDC_DECODER;
@@ -252,6 +257,10 @@
 		struct v4l2_buffer *v4l2);
 void kref_put_mbuf(struct msm_vidc_buffer *mbuf);
 bool kref_get_mbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf);
+void msm_comm_store_filled_length(struct msm_vidc_list *data_list,
+		u32 index, u32 filled_length);
+void msm_comm_fetch_filled_length(struct msm_vidc_list *data_list,
+		u32 index, u32 *filled_length);
 void msm_comm_store_mark_data(struct msm_vidc_list *data_list,
 		u32 index, u32 mark_data, u32 mark_target);
 void msm_comm_fetch_mark_data(struct msm_vidc_list *data_list,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 8bf2272..51e382ac 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -7,6 +7,7 @@
 #define MAX_SSR_STRING_LEN 10
 #include "msm_vidc_debug.h"
 #include "vidc_hfi_api.h"
+#include <linux/of_fdt.h>
 
 int msm_vidc_debug = VIDC_ERR | VIDC_WARN;
 EXPORT_SYMBOL(msm_vidc_debug);
@@ -87,6 +88,8 @@
 	cur += write_str(cur, end - cur,
 		"register_size: %u\n", fw_info.register_size);
 	cur += write_str(cur, end - cur, "irq: %u\n", fw_info.irq);
+	cur += write_str(cur, end - cur,
+		"ddr_type: %d\n", of_fdt_get_ddrtype());
 
 err_fw_info:
 	for (i = SYS_MSG_START; i < SYS_MSG_END; i++) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c
index f158c35..f144070 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c
@@ -3,28 +3,21 @@
  * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
-
-#include <linux/module.h>
-#include "governor.h"
+#include "msm_vidc_debug.h"
 #include "fixedpoint.h"
 #include "msm_vidc_internal.h"
-#include "msm_vidc_debug.h"
 #include "vidc_hfi_api.h"
 #define COMPRESSION_RATIO_MAX 5
 
-enum governor_mode {
-	GOVERNOR_DDR,
-	GOVERNOR_LLCC,
-};
-
-struct governor {
-	enum governor_mode mode;
-	struct devfreq_governor devfreq_gov;
+enum vidc_bus_type {
+	PERF,
+	DDR,
+	LLCC,
 };
 
 /*
- * Minimum dimensions that the governor is willing to calculate
- * bandwidth for.  This means that anything bandwidth(0, 0) ==
+ * Minimum dimensions for which to calculate bandwidth.
+ * This means that anything bandwidth(0, 0) ==
  * bandwidth(BASELINE_DIMENSIONS.width, BASELINE_DIMENSIONS.height)
  */
 static const struct {
@@ -34,15 +27,6 @@
 	.height = 720,
 };
 
-/*
- * These are hardcoded AB values that the governor votes for in certain
- * situations, where a certain bus frequency is desired.  It isn't exactly
- * scalable since different platforms have different bus widths, but we'll
- * deal with that in the future.
- */
-const unsigned long NOMINAL_BW_MBPS = 6000 /* ideally 320 Mhz */,
-	SVS_BW_MBPS = 2000 /* ideally 100 Mhz */;
-
 /* converts Mbps to bps (the "b" part can be bits or bytes based on context) */
 #define kbps(__mbps) ((__mbps) * 1000)
 #define bps(__mbps) (kbps(__mbps) * 1000)
@@ -207,6 +191,16 @@
 	},
 };
 
+static u32 get_type_frm_name(char *name)
+{
+	if (!strcmp(name, "venus-llcc"))
+		return LLCC;
+	else if (!strcmp(name, "venus-ddr"))
+		return DDR;
+	else
+		return PERF;
+}
+
 static struct lut const *__lut(int width, int height, int fps)
 {
 	int frame_size = height * width, c = 0;
@@ -273,27 +267,25 @@
 
 			}
 		}
-
-		dprintk(VIDC_PROF, "%s", formatted_line);
 	}
 }
 
 static unsigned long __calculate_vpe(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
 	return 0;
 }
 
 static unsigned long __calculate_cvp(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
 	unsigned long ret = 0;
 
-	switch (gm) {
-	case GOVERNOR_DDR:
+	switch (type) {
+	case DDR:
 		ret = d->ddr_bw;
 		break;
-	case GOVERNOR_LLCC:
+	case LLCC:
 		ret = d->sys_cache_bw;
 		break;
 	default:
@@ -334,7 +326,7 @@
 }
 
 static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
 	/*
 	 * XXX: Don't fool around with any of the hardcoded numbers unless you
@@ -575,11 +567,11 @@
 		__dump(dump, ARRAY_SIZE(dump));
 	}
 
-	switch (gm) {
-	case GOVERNOR_DDR:
+	switch (type) {
+	case DDR:
 		ret = kbps(fp_round(ddr.total));
 		break;
-	case GOVERNOR_LLCC:
+	case LLCC:
 		ret = kbps(fp_round(llc.total));
 		break;
 	default:
@@ -590,7 +582,7 @@
 }
 
 static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
 	/*
 	 * XXX: Don't fool around with any of the hardcoded numbers unless you
@@ -872,11 +864,11 @@
 		__dump(dump, ARRAY_SIZE(dump));
 	}
 
-	switch (gm) {
-	case GOVERNOR_DDR:
+	switch (type) {
+	case DDR:
 		ret = kbps(fp_round(ddr.total));
 		break;
-	case GOVERNOR_LLCC:
+	case LLCC:
 		ret = kbps(fp_round(llc.total));
 		break;
 	default:
@@ -887,41 +879,37 @@
 }
 
 static unsigned long __calculate(struct vidc_bus_vote_data *d,
-		enum governor_mode gm)
+		enum vidc_bus_type type)
 {
-	unsigned long (*calc[])(struct vidc_bus_vote_data *,
-			enum governor_mode) = {
-		[HAL_VIDEO_DOMAIN_VPE] = __calculate_vpe,
-		[HAL_VIDEO_DOMAIN_ENCODER] = __calculate_encoder,
-		[HAL_VIDEO_DOMAIN_DECODER] = __calculate_decoder,
-		[HAL_VIDEO_DOMAIN_CVP] = __calculate_cvp,
-	};
+	unsigned long value = 0;
 
-	if (d->domain >= ARRAY_SIZE(calc)) {
-		dprintk(VIDC_ERR, "%s: invalid domain %d\n",
-			__func__, d->domain);
-		return 0;
+	switch (d->domain) {
+	case HAL_VIDEO_DOMAIN_VPE:
+		value = __calculate_vpe(d, type);
+		break;
+	case HAL_VIDEO_DOMAIN_ENCODER:
+		value = __calculate_encoder(d, type);
+		break;
+	case HAL_VIDEO_DOMAIN_DECODER:
+		value = __calculate_decoder(d, type);
+		break;
+	case HAL_VIDEO_DOMAIN_CVP:
+		value = __calculate_cvp(d, type);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Unknown Domain");
 	}
-	return calc[d->domain](d, gm);
+
+	return value;
 }
 
-
-static int __get_target_freq(struct devfreq *dev, unsigned long *freq)
+unsigned long __calc_bw(struct bus_info *bus,
+				struct msm_vidc_gov_data *vidc_data)
 {
 	unsigned long ab_kbps = 0, c = 0;
-	struct devfreq_dev_status stats = {0};
-	struct msm_vidc_gov_data *vidc_data = NULL;
-	struct governor *gov = NULL;
+	enum vidc_bus_type type;
 
-	if (!dev || !freq)
-		return -EINVAL;
-
-	gov = container_of(dev->governor,
-			struct governor, devfreq_gov);
-	dev->profile->get_dev_status(dev->dev.parent, &stats);
-	vidc_data = (struct msm_vidc_gov_data *)stats.private_data;
-
-	if (!vidc_data || !vidc_data->data_count)
+	if (!vidc_data || !vidc_data->data_count || !vidc_data->data)
 		goto exit;
 
 	for (c = 0; c < vidc_data->data_count; ++c) {
@@ -931,85 +919,12 @@
 		}
 	}
 
+	type = get_type_frm_name(bus->name);
+
 	for (c = 0; c < vidc_data->data_count; ++c)
-		ab_kbps += __calculate(&vidc_data->data[c], gov->mode);
+		ab_kbps += __calculate(&vidc_data->data[c], type);
 
 exit:
-	*freq = clamp(ab_kbps, dev->min_freq, dev->max_freq ?
-		dev->max_freq : UINT_MAX);
-	trace_msm_vidc_perf_bus_vote(gov->devfreq_gov.name, *freq);
-	return 0;
+	trace_msm_vidc_perf_bus_vote(bus->name, ab_kbps);
+	return ab_kbps;
 }
-
-static int __event_handler(struct devfreq *devfreq, unsigned int event,
-		void *data)
-{
-	int rc = 0;
-
-	if (!devfreq)
-		return -EINVAL;
-
-	switch (event) {
-	case DEVFREQ_GOV_START:
-	case DEVFREQ_GOV_RESUME:
-	case DEVFREQ_GOV_SUSPEND:
-		mutex_lock(&devfreq->lock);
-		rc = update_devfreq(devfreq);
-		mutex_unlock(&devfreq->lock);
-		break;
-	}
-
-	return rc;
-}
-
-static struct governor governors[] = {
-	{
-		.mode = GOVERNOR_DDR,
-		.devfreq_gov = {
-			.name = "msm-vidc-ddr",
-			.get_target_freq = __get_target_freq,
-			.event_handler = __event_handler,
-		},
-	},
-	{
-		.mode = GOVERNOR_LLCC,
-		.devfreq_gov = {
-			.name = "msm-vidc-llcc",
-			.get_target_freq = __get_target_freq,
-			.event_handler = __event_handler,
-		},
-	},
-};
-
-static int __init msm_vidc_bw_gov_init(void)
-{
-	int c = 0, rc = 0;
-
-	for (c = 0; c < ARRAY_SIZE(governors); ++c) {
-		dprintk(VIDC_DBG, "Adding governor %s\n",
-				governors[c].devfreq_gov.name);
-
-		rc = devfreq_add_governor(&governors[c].devfreq_gov);
-		if (rc) {
-			dprintk(VIDC_ERR, "Error adding governor %s: %d\n",
-				governors[c].devfreq_gov.name, rc);
-			break;
-		}
-	}
-
-	return rc;
-}
-module_init(msm_vidc_bw_gov_init);
-
-static void __exit msm_vidc_bw_gov_exit(void)
-{
-	int c = 0;
-
-	for (c = 0; c < ARRAY_SIZE(governors); ++c) {
-		dprintk(VIDC_DBG, "Removing governor %s\n",
-				governors[c].devfreq_gov.name);
-		devfreq_remove_governor(&governors[c].devfreq_gov);
-	}
-}
-module_exit(msm_vidc_bw_gov_exit);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 7f266c3..27c9ceb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -187,6 +187,7 @@
 	u32 index;
 	u32 mark_data;
 	u32 mark_target;
+	u32 filled_length;
 };
 
 struct msm_vidc_common_data {
@@ -383,7 +384,6 @@
 	u32 work_mode;
 	bool low_latency_mode;
 	bool is_cbr_plus;
-	bool turbo_mode;
 	u32 work_route;
 	u32 dcvs_flags;
 	u32 frame_rate;
@@ -416,6 +416,7 @@
 	unsigned long (*calc_freq)(struct msm_vidc_inst *inst, u32 filled_len);
 	int (*decide_work_route)(struct msm_vidc_inst *inst);
 	int (*decide_work_mode)(struct msm_vidc_inst *inst);
+	int (*decide_core_and_power_mode)(struct msm_vidc_inst *inst);
 };
 
 struct msm_vidc_core {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index e9fce79..c14348f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -168,10 +168,9 @@
 	{CAP_I_FRAME_QP, ENC, VP8|VP9, 0, 127, 1, 20},
 	{CAP_P_FRAME_QP, ENC, VP8|VP9, 0, 127, 1, 40},
 	{CAP_B_FRAME_QP, ENC, VP8|VP9, 0, 127, 1, 40},
-	/* (CAP_BITRATE / 8) / 10 slices */
-	{CAP_SLICE_BYTE, ENC, H264|HEVC, 0, 2750000, 1, 2750000},
-	/* CAP_MBS_PER_FRAME / 10 slices */
-	{CAP_SLICE_MB, ENC, H264|HEVC, 0, 13824, 1, 13824},
+	/* 10 slices */
+	{CAP_SLICE_BYTE, ENC, H264|HEVC, 1, 10, 1, 10},
+	{CAP_SLICE_MB, ENC, H264|HEVC, 1, 10, 1, 10},
 	{CAP_MAX_VIDEOCORES, DOMAINS_ALL, CODECS_ALL, 0, 1, 1, 1},
 
 	/* VP8 specific */
@@ -194,6 +193,13 @@
 	{CAP_MBS_PER_SECOND, DEC, MPEG2, 1, 244800, 1, 244800},
 	{CAP_FRAMERATE, DEC, MPEG2, 1, 30, 1, 30},
 	{CAP_BITRATE, DEC, MPEG2, 1, 40000000, 1, 20000000},
+
+	/* Secure usecase specific */
+	{CAP_SECURE_FRAME_WIDTH, DOMAINS_ALL, CODECS_ALL, 128, 4096, 1, 1920},
+	{CAP_SECURE_FRAME_HEIGHT, DOMAINS_ALL, CODECS_ALL, 128, 4096, 1, 1080},
+	/* (4096 * 2304) / 256 */
+	{CAP_SECURE_MBS_PER_FRAME, DOMAINS_ALL, CODECS_ALL, 1, 36864, 1, 36864},
+	{CAP_SECURE_BITRATE, DOMAINS_ALL, CODECS_ALL, 1, 40000000, 1, 20000000},
 };
 
 /*
@@ -223,7 +229,6 @@
 	},
 };
 
-/* Update with kona */
 static struct msm_vidc_common_data kona_common_data[] = {
 	{
 		.key = "qcom,never-unload-fw",
@@ -231,7 +236,7 @@
 	},
 	{
 		.key = "qcom,sw-power-collapse",
-		.value = 0,
+		.value = 1,
 	},
 	{
 		.key = "qcom,domain-attr-non-fatal-faults",
@@ -251,19 +256,19 @@
 	{
 		.key = "qcom,max-hw-load",
 		.value = 3916800,       /*
-					 * 1920x1088/256 MBs@480fps. It is less
-					 * any other usecases (ex:
+					 * 1920x1088/256 MB's@480fps. It is more
+					 * than any other usecases (ex:
 					 * 3840x2160@120fps, 4096x2160@96ps,
 					 * 7680x4320@30fps)
 					 */
 	},
 	{
 		.key = "qcom,max-hq-mbs-per-frame",
-		.value = 8160,
+		.value = 34560,		/* 4096x2160 */
 	},
 	{
-		.key = "qcom,max-hq-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-hq-mbs-per-sec",
+		.value = 1036800,	/* 4096x2160@30fps */
 	},
 	{
 		.key = "qcom,max-b-frame-mbs-per-frame",
@@ -275,11 +280,11 @@
 	},
 	{
 		.key = "qcom,power-collapse-delay",
-		.value = 15000,
+		.value = 1500,
 	},
 	{
 		.key = "qcom,hw-resp-timeout",
-		.value = 10000,
+		.value = 1000,
 	},
 	{
 		.key = "qcom,debug-timeout",
@@ -333,8 +338,8 @@
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-hq-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-hq-mbs-per-sec",
+		.value = 244800,  /* 1920 x 1088 @ 30 fps */
 	},
 	{
 		.key = "qcom,max-b-frame-mbs-per-frame",
@@ -404,8 +409,8 @@
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-hq-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-hq-mbs-per-sec",
+		.value = 244800,  /* 1920 x 1088 @ 30 fps */
 	},
 	{
 		.key = "qcom,max-b-frame-mbs-per-frame",
@@ -479,8 +484,8 @@
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-hq-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-hq-mbs-per-sec",
+		.value = 244800,  /* 1920 x 1088 @ 30 fps */
 	},
 	{
 		.key = "qcom,max-b-frame-mbs-per-frame",
@@ -534,8 +539,8 @@
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-hq-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-hq-mbs-per-sec",
+		.value = 244800,  /* 1920 x 1088 @ 30 fps */
 	},
 	{
 		.key = "qcom,max-b-frame-mbs-per-frame",
@@ -585,8 +590,8 @@
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-hq-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-hq-mbs-per-sec",
+		.value = 244800,  /* 1920 x 1088 @ 30 fps */
 	},
 	{
 		.key = "qcom,max-b-frame-mbs-per-frame",
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index ce7d3a3..e738949 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -420,23 +420,12 @@
 		goto err_bus;
 	}
 
-	rc = of_property_read_string(dev->of_node, "qcom,bus-governor",
-			&bus->governor);
-	if (rc) {
-		rc = 0;
-		dprintk(VIDC_DBG,
-				"'qcom,bus-governor' not found, default to performance governor\n");
-		bus->governor = PERF_GOV;
-	}
+	rc = of_property_read_string(dev->of_node, "qcom,mode",
+			&bus->mode);
 
-	if (!strcmp(bus->governor, PERF_GOV))
+	if (!rc && !strcmp(bus->mode, PERF_GOV))
 		bus->is_prfm_gov_used = true;
 
-	if (of_find_property(dev->of_node, "operating-points-v2", NULL))
-		bus->has_freq_table = true;
-	else
-		bus->has_freq_table = false;
-
 	rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps",
 			range, ARRAY_SIZE(range));
 	if (rc) {
@@ -452,8 +441,8 @@
 
 	buses->count++;
 	bus->dev = dev;
-	dprintk(VIDC_DBG, "Found bus %s [%d->%d] with governor %s\n",
-			bus->name, bus->master, bus->slave, bus->governor);
+	dprintk(VIDC_DBG, "Found bus %s [%d->%d] with mode %s\n",
+			bus->name, bus->master, bus->slave, bus->mode);
 err_bus:
 	return rc;
 }
@@ -785,8 +774,8 @@
 	res->max_hq_mbs_per_frame = find_key_value(platform_data,
 			"qcom,max-hq-mbs-per-frame");
 
-	res->max_hq_fps = find_key_value(platform_data,
-			"qcom,max-hq-frames-per-sec");
+	res->max_hq_mbs_per_sec = find_key_value(platform_data,
+			"qcom,max-hq-mbs-per-sec");
 
 	res->max_bframe_mbs_per_frame = find_key_value(platform_data,
 			"qcom,max-b-frame-mbs-per-frame");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 31b1df6..234ee9d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -6,7 +6,6 @@
 #ifndef __MSM_VIDC_RESOURCES_H__
 #define __MSM_VIDC_RESOURCES_H__
 
-#include <linux/devfreq.h>
 #include <linux/platform_device.h>
 #include "msm_vidc.h"
 #include <linux/soc/qcom/llcc-qcom.h>
@@ -94,13 +93,10 @@
 	int master;
 	int slave;
 	unsigned int range[2];
-	const char *governor;
 	struct device *dev;
-	struct devfreq_dev_profile devfreq_prof;
-	struct devfreq *devfreq;
 	struct msm_bus_client_handle *client;
 	bool is_prfm_gov_used;
-	bool has_freq_table;
+	const char *mode;
 };
 
 struct bus_set {
@@ -170,7 +166,7 @@
 	struct buffer_usage_set buffer_usage_set;
 	uint32_t max_load;
 	uint32_t max_hq_mbs_per_frame;
-	uint32_t max_hq_fps;
+	uint32_t max_hq_mbs_per_sec;
 	uint32_t max_bframe_mbs_per_frame;
 	uint32_t max_bframe_mbs_per_sec;
 	struct platform_device *pdev;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index f0aeb37..95330d3 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -8,7 +8,6 @@
 #include <linux/clk/qcom.h>
 #include <linux/coresight-stm.h>
 #include <linux/delay.h>
-#include <linux/devfreq.h>
 #include <linux/hash.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -107,12 +106,18 @@
 static void clock_config_on_enable_iris1(struct venus_hfi_device *device);
 static int reset_ahb2axi_bridge(struct venus_hfi_device *device);
 static int __set_ubwc_config(struct venus_hfi_device *device);
+static void power_off_common(struct venus_hfi_device *device);
+static void power_off_iris2(struct venus_hfi_device *device);
+static void noc_error_info_common(struct venus_hfi_device *device);
+static void noc_error_info_iris2(struct venus_hfi_device *device);
 
 struct venus_hfi_vpu_ops vpu4_ops = {
 	.interrupt_init = interrupt_init_vpu4,
 	.setup_dsp_uc_memmap = NULL,
 	.clock_config_on_enable = NULL,
 	.reset_ahb2axi_bridge = NULL,
+	.power_off = power_off_common,
+	.noc_error_info = noc_error_info_common,
 };
 
 struct venus_hfi_vpu_ops iris1_ops = {
@@ -120,6 +125,8 @@
 	.setup_dsp_uc_memmap = setup_dsp_uc_memmap_iris1,
 	.clock_config_on_enable = clock_config_on_enable_iris1,
 	.reset_ahb2axi_bridge = reset_ahb2axi_bridge,
+	.power_off = power_off_common,
+	.noc_error_info = noc_error_info_common,
 };
 
 struct venus_hfi_vpu_ops iris2_ops = {
@@ -127,6 +134,8 @@
 	.setup_dsp_uc_memmap = NULL,
 	.clock_config_on_enable = NULL,
 	.reset_ahb2axi_bridge = reset_ahb2axi_bridge,
+	.power_off = power_off_iris2,
+	.noc_error_info = noc_error_info_iris2,
 };
 
 /**
@@ -974,84 +983,24 @@
 		dprintk(VIDC_ERR, "Failed to restore threshold values\n");
 }
 
-static int __devfreq_target(struct device *devfreq_dev,
-		unsigned long *freq, u32 flags)
+static int __vote_bandwidth(struct bus_info *bus,
+		unsigned long *freq)
 {
 	int rc = 0;
 	uint64_t ab = 0;
-	struct bus_info *bus = NULL, *temp = NULL;
-	struct venus_hfi_device *device = dev_get_drvdata(devfreq_dev);
 
-	venus_hfi_for_each_bus(device, temp) {
-		if (temp->dev == devfreq_dev) {
-			bus = temp;
-			break;
-		}
-	}
-
-	if (!bus) {
-		rc = -EBADHANDLE;
-		goto err_unknown_device;
-	}
-
-	/*
-	 * Clamp for all non zero frequencies. This clamp is necessary to stop
-	 * devfreq driver from spamming - Couldn't update frequency - logs, if
-	 * the scaled ab value is not part of the frequency table.
-	 */
 	if (*freq)
 		*freq = clamp_t(typeof(*freq), *freq, bus->range[0],
 				bus->range[1]);
 
-	/* we expect governors to provide values in kBps form, convert to Bps */
+	/* Bus Driver expects values in Bps */
 	ab = *freq * 1000;
-	rc = msm_bus_scale_update_bw(bus->client, ab, 0);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed voting bus %s to ab %llu\n: %d",
-				bus->name, ab, rc);
-		goto err_unknown_device;
-	}
-
 	dprintk(VIDC_PROF, "Voting bus %s to ab %llu\n", bus->name, ab);
+	rc = msm_bus_scale_update_bw(bus->client, ab, 0);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed voting bus %s to ab %llu, rc=%d\n",
+				bus->name, ab, rc);
 
-	return 0;
-err_unknown_device:
-	return rc;
-}
-
-static int __devfreq_get_status(struct device *devfreq_dev,
-		struct devfreq_dev_status *stat)
-{
-	int rc = 0;
-	struct bus_info *bus = NULL, *temp = NULL;
-	struct venus_hfi_device *device = dev_get_drvdata(devfreq_dev);
-
-	venus_hfi_for_each_bus(device, temp) {
-		if (temp->dev == devfreq_dev) {
-			bus = temp;
-			break;
-		}
-	}
-
-	if (!bus) {
-		rc = -EBADHANDLE;
-		goto err_unknown_device;
-	}
-
-	*stat = (struct devfreq_dev_status) {
-		.private_data = &device->bus_vote,
-		/*
-		 * Put in dummy place holder values for upstream govs, our
-		 * custom gov only needs .private_data.  We should fill this in
-		 * properly if we can actually measure busy_time accurately
-		 * (which we can't at the moment)
-		 */
-		.total_time = 1,
-		.busy_time = 1,
-		.current_frequency = 0,
-	};
-
-err_unknown_device:
 	return rc;
 }
 
@@ -1059,18 +1008,19 @@
 {
 	int rc = 0;
 	struct bus_info *bus = NULL;
+	unsigned long freq = 0, zero = 0;
 
 	kfree(device->bus_vote.data);
 	device->bus_vote.data = NULL;
 	device->bus_vote.data_count = 0;
 
 	venus_hfi_for_each_bus(device, bus) {
-		unsigned long zero = 0;
-
-		if (!bus->is_prfm_gov_used)
-			rc = devfreq_suspend_device(bus->devfreq);
+		if (!bus->is_prfm_gov_used) {
+			freq = __calc_bw(bus, &device->bus_vote);
+			rc = __vote_bandwidth(bus, &freq);
+		}
 		else
-			rc = __devfreq_target(bus->dev, &zero, 0);
+			rc = __vote_bandwidth(bus, &zero);
 
 		if (rc)
 			goto err_unknown_device;
@@ -1086,6 +1036,7 @@
 	int rc = 0;
 	struct bus_info *bus = NULL;
 	struct vidc_bus_vote_data *new_data = NULL;
+	unsigned long freq = 0;
 
 	if (!num_data) {
 		dprintk(VIDC_DBG, "No vote data available\n");
@@ -1108,15 +1059,18 @@
 	device->bus_vote.data_count = num_data;
 
 	venus_hfi_for_each_bus(device, bus) {
-		if (bus && bus->devfreq) {
+		if (bus) {
 			if (!bus->is_prfm_gov_used) {
-				rc = devfreq_resume_device(bus->devfreq);
-				if (rc)
-					goto err_no_mem;
+				freq = __calc_bw(bus, &device->bus_vote);
 			} else {
-				bus->devfreq->nb.notifier_call(
-					&bus->devfreq->nb, 0, NULL);
+				freq = bus->range[1];
+				dprintk(VIDC_ERR, "%s %s perf Vote %u\n",
+						__func__, bus->name,
+						bus->range[1]);
 			}
+			rc = __vote_bandwidth(bus, &freq);
+		} else {
+			dprintk(VIDC_ERR, "No BUS to Vote\n");
 		}
 	}
 
@@ -3698,67 +3652,55 @@
 }
 
 static int __handle_reset_clk(struct msm_vidc_platform_resources *res,
-			enum reset_state state)
+			int reset_index, enum reset_state state)
 {
-	int rc = 0, i;
+	int rc = 0;
 	struct reset_control *rst;
 	struct reset_set *rst_set = &res->reset_set;
 
 	if (!rst_set->reset_tbl)
 		return 0;
 
-	dprintk(VIDC_DBG, "%s reset_state %d rst_set->count = %d\n",
-		__func__, state, rst_set->count);
+	rst = rst_set->reset_tbl[reset_index].rst;
+	dprintk(VIDC_DBG, "reset_clk: name %s reset_state %d rst %pK\n",
+		rst_set->reset_tbl[reset_index].name, state, rst);
 
-	for (i = 0; i < rst_set->count; i++) {
-		rst = rst_set->reset_tbl[i].rst;
-		switch (state) {
-		case INIT:
-			dprintk(VIDC_DBG, "%s reset_state name = %s %pK\n",
-				__func__, rst_set->reset_tbl[i].name, rst);
+	switch (state) {
+	case INIT:
+		if (rst)
+			goto skip_reset_init;
 
-			if (rst)
-				continue;
-			rst = devm_reset_control_get(&res->pdev->dev,
-				rst_set->reset_tbl[i].name);
-			if (IS_ERR(rst))
-				rc = PTR_ERR(rst);
+		rst = devm_reset_control_get(&res->pdev->dev,
+				rst_set->reset_tbl[reset_index].name);
+		if (IS_ERR(rst))
+			rc = PTR_ERR(rst);
 
-			rst_set->reset_tbl[i].rst = rst;
-			break;
-		case ASSERT:
-			if (!rst) {
-				dprintk(VIDC_DBG,
-					"%s reset_state name = %s %pK\n",
-					__func__, rst_set->reset_tbl[i].name,
-					 rst);
-				 rc = PTR_ERR(rst);
-				goto failed_to_reset;
-			}
-
-			rc = reset_control_assert(rst);
-			break;
-		case DEASSERT:
-			if (!rst) {
-				dprintk(VIDC_DBG,
-					"%s reset_state name = %s %pK\n",
-					__func__, rst_set->reset_tbl[i].name,
-					 rst);
-				 rc = PTR_ERR(rst);
-				goto failed_to_reset;
-			}
-			rc = reset_control_deassert(rst);
-			break;
-		default:
-			dprintk(VIDC_ERR, "Invalid reset request\n");
+		rst_set->reset_tbl[reset_index].rst = rst;
+		break;
+	case ASSERT:
+		if (!rst) {
+			rc = PTR_ERR(rst);
+			goto failed_to_reset;
 		}
 
+		rc = reset_control_assert(rst);
+		break;
+	case DEASSERT:
+		if (!rst) {
+			rc = PTR_ERR(rst);
+			goto failed_to_reset;
+		}
+		rc = reset_control_deassert(rst);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid reset request\n");
 		if (rc)
 			goto failed_to_reset;
 	}
 
 	return 0;
 
+skip_reset_init:
 failed_to_reset:
 	return rc;
 }
@@ -3794,29 +3736,37 @@
 
 static int reset_ahb2axi_bridge(struct venus_hfi_device *device)
 {
-	int rc;
+	int rc, i;
 
 	if (!device) {
 		dprintk(VIDC_ERR, "NULL device\n");
-		return -EINVAL;
+		rc = -EINVAL;
+		goto failed_to_reset;
 	}
 
-	rc = __handle_reset_clk(device->res, ASSERT);
-	if (rc) {
-		dprintk(VIDC_ERR, "failed to assert reset clocks\n");
-		return rc;
-	}
+	for (i = 0; i < device->res->reset_set.count; i++) {
+		rc = __handle_reset_clk(device->res, i, ASSERT);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"failed to assert reset clocks\n");
+			goto failed_to_reset;
+		}
 
-	/* wait for deassert */
-	usleep_range(150, 250);
+		/* wait for deassert */
+		usleep_range(150, 250);
 
-	rc = __handle_reset_clk(device->res, DEASSERT);
-	if (rc) {
-		dprintk(VIDC_ERR, "failed to deassert reset clocks\n");
-		return rc;
+		rc = __handle_reset_clk(device->res, i, DEASSERT);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"failed to deassert reset clocks\n");
+			goto failed_to_reset;
+		}
 	}
 
 	return 0;
+
+failed_to_reset:
+	return rc;
 }
 
 static inline int __prepare_enable_clks(struct venus_hfi_device *device)
@@ -3885,10 +3835,6 @@
 	device->bus_vote = DEFAULT_BUS_VOTE;
 
 	venus_hfi_for_each_bus_reverse(device, bus) {
-		devfreq_remove_device(bus->devfreq);
-		bus->devfreq = NULL;
-		dev_set_drvdata(bus->dev, NULL);
-
 		msm_bus_scale_unregister(bus->client);
 		bus->client = NULL;
 	}
@@ -3903,41 +3849,14 @@
 		return -EINVAL;
 
 	venus_hfi_for_each_bus(device, bus) {
-		struct devfreq_dev_profile profile = {
-			.initial_freq = 0,
-			.polling_ms = INT_MAX,
-			.freq_table = NULL,
-			.max_state = 0,
-			.target = __devfreq_target,
-			.get_dev_status = __devfreq_get_status,
-			.exit = NULL,
-			/*.get_cur_greq = NULL,*/
-		};
-
-		if (!strcmp(bus->governor, "msm-vidc-llcc")) {
+		if (!strcmp(bus->mode, "msm-vidc-llcc")) {
 			if (msm_vidc_syscache_disable) {
 				dprintk(VIDC_DBG,
 					 "Skipping LLC bus init %s: %s\n",
-				bus->name, bus->governor);
+				bus->name, bus->mode);
 				continue;
 			}
 		}
-
-		/*
-		 * This is stupid, but there's no other easy way to get a hold
-		 * of struct bus_info in venus_hfi_devfreq_*()
-		 */
-		WARN(dev_get_drvdata(bus->dev), "%s's drvdata already set\n",
-				dev_name(bus->dev));
-		dev_set_drvdata(bus->dev, device);
-
-		if (bus->has_freq_table) {
-			rc = dev_pm_opp_of_add_table(bus->dev);
-			if (rc)
-				dprintk(VIDC_ERR, "Failed to add %s OPP table",
-						bus->name);
-		}
-
 		bus->client = msm_bus_scale_register(bus->master, bus->slave,
 				bus->name, false);
 		if (IS_ERR_OR_NULL(bus->client)) {
@@ -3948,25 +3867,6 @@
 			bus->client = NULL;
 			goto err_add_dev;
 		}
-
-		bus->devfreq_prof = profile;
-		bus->devfreq = devfreq_add_device(bus->dev,
-				&bus->devfreq_prof, bus->governor, NULL);
-		if (IS_ERR_OR_NULL(bus->devfreq)) {
-			rc = PTR_ERR(bus->devfreq) ?
-				PTR_ERR(bus->devfreq) : -EBADHANDLE;
-			dprintk(VIDC_ERR,
-					"Failed to add devfreq device for bus %s and governor %s: %d\n",
-					bus->name, bus->governor, rc);
-			bus->devfreq = NULL;
-			goto err_add_dev;
-		}
-
-		/*
-		 * Devfreq starts monitoring immediately, since we are just
-		 * initializing stuff at this point, force it to suspend
-		 */
-		devfreq_suspend_device(bus->devfreq);
 	}
 
 	return 0;
@@ -4087,7 +3987,7 @@
 static int __init_resources(struct venus_hfi_device *device,
 				struct msm_vidc_platform_resources *res)
 {
-	int rc = 0;
+	int i, rc = 0;
 
 	rc = __init_regulators(device);
 	if (rc) {
@@ -4102,11 +4002,13 @@
 		goto err_init_clocks;
 	}
 
-	rc = __handle_reset_clk(res, INIT);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to init reset clocks\n");
-		rc = -ENODEV;
-		goto err_init_reset_clk;
+	for (i = 0; i < device->res->reset_set.count; i++) {
+		rc = __handle_reset_clk(res, i, INIT);
+		if (rc) {
+			dprintk(VIDC_ERR, "Failed to init reset clocks\n");
+			rc = -ENODEV;
+			goto err_init_reset_clk;
+		}
 	}
 
 	rc = __init_bus(device);
@@ -4598,7 +4500,7 @@
 	return rc;
 }
 
-static void __venus_power_off(struct venus_hfi_device *device)
+static void power_off_common(struct venus_hfi_device *device)
 {
 	if (!device->power_enabled)
 		return;
@@ -4619,6 +4521,93 @@
 	device->power_enabled = false;
 }
 
+static void power_off_iris2(struct venus_hfi_device *device)
+{
+	u32 lpi_status, reg_status = 0, count = 0, max_count = 10;
+
+	if (!device->power_enabled)
+		return;
+
+	if (!(device->intr_status & VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK))
+		disable_irq_nosync(device->hal_data->irq);
+	device->intr_status = 0;
+
+	/* HPG 6.1.2 Step 1  */
+	__write_register(device, VIDC_CPU_CS_X2RPMh, 0x3);
+
+	/* HPG 6.1.2 Step 2, noc to low power */
+	__write_register(device, VIDC_AON_WRAPPER_MVP_NOC_LPI_CONTROL, 0x1);
+	while (!reg_status && count < max_count) {
+		lpi_status =
+			 __read_register(device,
+				VIDC_AON_WRAPPER_MVP_NOC_LPI_STATUS);
+		reg_status = lpi_status & BIT(0);
+		dprintk(VIDC_DBG,
+			"Noc: lpi_status %d noc_status %d (count %d)\n",
+			lpi_status, reg_status, count);
+		usleep_range(50, 100);
+		count++;
+	}
+	if (count == max_count) {
+		dprintk(VIDC_ERR,
+			"NOC not in qaccept status %d\n", reg_status);
+	}
+
+	/* HPG 6.1.2 Step 3, debug bridge to low power */
+	__write_register(device,
+		VIDC_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL, 0x7);
+	reg_status = 0;
+	count = 0;
+	while ((reg_status != 0x7) && count < max_count) {
+		lpi_status = __read_register(device,
+				 VIDC_WRAPPER_DEBUG_BRIDGE_LPI_STATUS);
+		reg_status = lpi_status & 0x7;
+		dprintk(VIDC_DBG,
+			"DBLP Set : lpi_status %d reg_status %d (count %d)\n",
+			lpi_status, reg_status, count);
+		usleep_range(50, 100);
+		count++;
+	}
+	if (count == max_count) {
+		dprintk(VIDC_ERR,
+			"DBLP Set: status %d\n", reg_status);
+	}
+
+	/* HPG 6.1.2 Step 4, debug bridge to lpi release */
+	__write_register(device,
+		VIDC_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL, 0x0);
+	lpi_status = 0x1;
+	count = 0;
+	while (lpi_status && count < max_count) {
+		lpi_status = __read_register(device,
+				 VIDC_WRAPPER_DEBUG_BRIDGE_LPI_STATUS);
+		dprintk(VIDC_DBG,
+			"DBLP Release: lpi_status %d(count %d)\n",
+			lpi_status, count);
+		usleep_range(50, 100);
+		count++;
+	}
+	if (count == max_count) {
+		dprintk(VIDC_ERR,
+			"DBLP Release: lpi_status %d\n", lpi_status);
+	}
+
+	/* HPG 6.1.2 Step 6 */
+	__disable_unprepare_clks(device);
+
+	/* HPG 6.1.2 Step 7 & 8 */
+	if (call_venus_op(device, reset_ahb2axi_bridge, device))
+		dprintk(VIDC_ERR, "Failed to reset ahb2axi\n");
+
+	/* HPG 6.1.2 Step 5 */
+	if (__disable_regulators(device))
+		dprintk(VIDC_WARN, "Failed to disable regulators\n");
+
+	if (__unvote_buses(device))
+		dprintk(VIDC_WARN, "Failed to unvote for buses\n");
+	device->power_enabled = false;
+}
+
 static inline int __suspend(struct venus_hfi_device *device)
 {
 	int rc = 0;
@@ -4645,7 +4634,7 @@
 
 	__disable_subcaches(device);
 
-	__venus_power_off(device);
+	call_venus_op(device, power_off, device);
 	dprintk(VIDC_PROF, "Venus power off\n");
 	return rc;
 
@@ -4720,7 +4709,7 @@
 err_reset_core:
 	__tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
 err_set_video_state:
-	__venus_power_off(device);
+	call_venus_op(device, power_off, device);
 err_venus_power_on:
 	dprintk(VIDC_ERR, "Failed to resume from power collapse\n");
 	return rc;
@@ -4780,7 +4769,7 @@
 		subsystem_put(device->resources.fw.cookie);
 	device->resources.fw.cookie = NULL;
 fail_load_fw:
-	__venus_power_off(device);
+	call_venus_op(device, power_off, device);
 fail_venus_power_on:
 fail_init_pkt:
 	__deinit_resources(device);
@@ -4801,7 +4790,7 @@
 	__vote_buses(device, NULL, 0);
 	subsystem_put(device->resources.fw.cookie);
 	__interface_queues_release(device);
-	__venus_power_off(device);
+	call_venus_op(device, power_off, device);
 	device->resources.fw.cookie = NULL;
 	__deinit_resources(device);
 
@@ -4934,10 +4923,54 @@
 	dprintk(VIDC_ERR, "CORE%d_NOC_ERR_ERRLOG3_HIGH: %#x\n", core_num, val);
 }
 
+static void noc_error_info_common(struct venus_hfi_device *device)
+{
+	const u32 core0 = 0, core1 = 1;
+
+	if (__read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
+		__noc_error_info(device, core0);
+
+	if (__read_register(device, VCODEC_CORE1_VIDEO_NOC_BASE_OFFS +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
+		__noc_error_info(device, core1);
+}
+
+static void noc_error_info_iris2(struct venus_hfi_device *device)
+{
+	u32 val = 0;
+
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_SWID_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_SWID_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_SWID_HIGH);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_SWID_HIGH:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_MAINCTL_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_MAINCTL_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRVLD_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRVLD_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRCLR_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRCLR_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW:     %#x\n", val);
+	val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH);
+	dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH:     %#x\n", val);
+}
+
 static int venus_hfi_noc_error_info(void *dev)
 {
 	struct venus_hfi_device *device;
-	const u32 core0 = 0, core1 = 1;
 
 	if (!dev) {
 		dprintk(VIDC_ERR, "%s: null device\n", __func__);
@@ -4948,13 +4981,7 @@
 	mutex_lock(&device->lock);
 	dprintk(VIDC_ERR, "%s: non error information\n", __func__);
 
-	if (__read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
-		__noc_error_info(device, core0);
-
-	if (__read_register(device, VCODEC_CORE1_VIDEO_NOC_BASE_OFFS +
-			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
-		__noc_error_info(device, core1);
+	call_venus_op(device, noc_error_info, device);
 
 	mutex_unlock(&device->lock);
 
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.h b/drivers/media/platform/msm/vidc/venus_hfi.h
index 6a89b19..a361a23 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.h
+++ b/drivers/media/platform/msm/vidc/venus_hfi.h
@@ -42,6 +42,9 @@
 #define VIDC_MAX_SUBCACHES 4
 #define VIDC_MAX_SUBCACHE_SIZE 52
 
+extern unsigned long __calc_bw(struct bus_info *bus,
+			struct msm_vidc_gov_data *vidc_data);
+
 struct hfi_queue_table_header {
 	u32 qtbl_version;
 	u32 qtbl_size;
@@ -237,6 +240,8 @@
 	void (*setup_dsp_uc_memmap)(struct venus_hfi_device *device);
 	void (*clock_config_on_enable)(struct venus_hfi_device *device);
 	int (*reset_ahb2axi_bridge)(struct venus_hfi_device *device);
+	void (*power_off)(struct venus_hfi_device *device);
+	void (*noc_error_info)(struct venus_hfi_device *device);
 };
 
 struct venus_hfi_device {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 93a164c..4406c84 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -272,6 +272,10 @@
 	CAP_MAX_VIDEOCORES,
 	CAP_MAX_WORKMODES,
 	CAP_UBWC_CR_STATS,
+	CAP_SECURE_FRAME_WIDTH,
+	CAP_SECURE_FRAME_HEIGHT,
+	CAP_SECURE_MBS_PER_FRAME,
+	CAP_SECURE_BITRATE,
 	CAP_MAX,
 };
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index fadc48f..49c0856 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -346,6 +346,8 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x036)
 #define  HFI_PROPERTY_PARAM_VENC_ADAPTIVE_B \
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x037)
+#define  HFI_PROPERTY_PARAM_VENC_BITRATE_SAVINGS \
+	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x038)
 
 #define HFI_PROPERTY_CONFIG_VENC_COMMON_START				\
 	(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_io.h b/drivers/media/platform/msm/vidc/vidc_hfi_io.h
index eb47f68..847c75f 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_io.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_io.h
@@ -12,6 +12,7 @@
 
 #define VIDC_CPU_BASE_OFFS			0x000A0000
 #define VIDEO_CC_BASE_OFFS			0x000F0000
+#define VIDC_AON_BASE_OFFS			0x000E0000
 #define VIDC_CPU_CS_BASE_OFFS		(VIDC_CPU_BASE_OFFS)
 #define VIDC_CPU_IC_BASE_OFFS		(VIDC_CPU_BASE_OFFS)
 
@@ -107,6 +108,8 @@
 #define VIDC_WRAPPER_CPU_CGC_DIS	(VIDC_WRAPPER_BASE_OFFS + 0x2010)
 #define VIDC_WRAPPER_CPU_STATUS	(VIDC_WRAPPER_BASE_OFFS + 0x2014)
 
+#define VIDC_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL	(VIDC_WRAPPER_BASE_OFFS + 0x54)
+#define VIDC_WRAPPER_DEBUG_BRIDGE_LPI_STATUS	(VIDC_WRAPPER_BASE_OFFS + 0x58)
 /*
  * --------------------------------------------------------------------------
  * MODULE: vidc_tz_wrapper
@@ -172,7 +175,7 @@
 
 /*
  * --------------------------------------------------------------------------
- * MODULE: vcodec noc error log registers
+ * MODULE: vcodec noc error log registers (iris1)
  * --------------------------------------------------------------------------
  */
 #define VCODEC_CORE0_VIDEO_NOC_BASE_OFFS		0x00004000
@@ -191,4 +194,27 @@
 #define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_LOW_OFFS	0x0538
 #define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_HIGH_OFFS	0x053C
 
+#define VIDC_AON_WRAPPER_MVP_NOC_LPI_CONTROL	(VIDC_AON_BASE_OFFS)
+#define VIDC_AON_WRAPPER_MVP_NOC_LPI_STATUS	(VIDC_AON_BASE_OFFS + 0x4)
+
+/*
+ * --------------------------------------------------------------------------
+ * MODULE: vcodec noc error log registers (iris2)
+ * --------------------------------------------------------------------------
+ */
+#define VCODEC_NOC_VIDEO_A_NOC_BASE_OFFS		0x00010000
+#define VCODEC_NOC_ERL_MAIN_SWID_LOW			0x00011200
+#define VCODEC_NOC_ERL_MAIN_SWID_HIGH			0x00011204
+#define VCODEC_NOC_ERL_MAIN_MAINCTL_LOW			0x00011208
+#define VCODEC_NOC_ERL_MAIN_ERRVLD_LOW			0x00011210
+#define VCODEC_NOC_ERL_MAIN_ERRCLR_LOW			0x00011218
+#define VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW			0x00011220
+#define VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH		0x00011224
+#define VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW			0x00011228
+#define VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH		0x0001122C
+#define VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW			0x00011230
+#define VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH		0x00011234
+#define VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW			0x00011238
+#define VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH		0x0001123C
+#
 #endif
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 361abbc..6f1fd40 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1065,11 +1065,19 @@
 			return -EINVAL;
 		}
 
-		/* Make sure the terminal type MSB is not null, otherwise it
-		 * could be confused with a unit.
+		/*
+		 * Reject invalid terminal types that would cause issues:
+		 *
+		 * - The high byte must be non-zero, otherwise it would be
+		 *   confused with a unit.
+		 *
+		 * - Bit 15 must be 0, as we use it internally as a terminal
+		 *   direction flag.
+		 *
+		 * Other unknown types are accepted.
 		 */
 		type = get_unaligned_le16(&buffer[4]);
-		if ((type & 0xff00) == 0) {
+		if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
 			uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
 				"interface %d INPUT_TERMINAL %d has invalid "
 				"type 0x%04x, skipping\n", udev->devnum,
diff --git a/drivers/misc/hdcp_qseecom.c b/drivers/misc/hdcp_qseecom.c
index 53947b5..6de29bc 100644
--- a/drivers/misc/hdcp_qseecom.c
+++ b/drivers/misc/hdcp_qseecom.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[hdcp-qseecom] %s: " fmt, __func__
@@ -1044,11 +1044,7 @@
 	}
 
 	rc = handle->tx_init(handle);
-	if (rc)
-		goto error;
 
-	if (!handle->legacy_app)
-		rc = hdcp2_app_start_auth(handle);
 error:
 	return rc;
 }
@@ -1188,6 +1184,7 @@
 	pr_err("failed, rc=%d\n", rc);
 	return rc;
 }
+EXPORT_SYMBOL(hdcp2_force_encryption);
 
 static int hdcp2_app_query_stream(struct hdcp2_handle *handle)
 {
@@ -1236,6 +1233,9 @@
 	case HDCP2_CMD_START:
 		rc = hdcp2_app_start(handle);
 		break;
+	case HDCP2_CMD_START_AUTH:
+		rc = hdcp2_app_start_auth(handle);
+		break;
 	case HDCP2_CMD_PROCESS_MSG:
 		rc = hdcp2_app_process_msg(handle);
 		break;
@@ -1268,6 +1268,7 @@
 error:
 	return rc;
 }
+EXPORT_SYMBOL(hdcp2_app_comm);
 
 static int hdcp2_open_stream_helper(struct hdcp2_handle *handle,
 		uint8_t vc_payload_id,
@@ -1322,6 +1323,7 @@
 	return hdcp2_open_stream_helper(handle, vc_payload_id, stream_number,
 		stream_id);
 }
+EXPORT_SYMBOL(hdcp2_open_stream);
 
 static int hdcp2_close_stream_helper(struct hdcp2_handle *handle,
 		uint32_t stream_id)
@@ -1368,6 +1370,7 @@
 
 	return hdcp2_close_stream_helper(handle, stream_id);
 }
+EXPORT_SYMBOL(hdcp2_close_stream);
 
 void *hdcp2_init(u32 device_type)
 {
@@ -1382,11 +1385,13 @@
 error:
 	return handle;
 }
+EXPORT_SYMBOL(hdcp2_init);
 
 void hdcp2_deinit(void *ctx)
 {
 	kzfree(ctx);
 }
+EXPORT_SYMBOL(hdcp2_deinit);
 
 void *hdcp1_init(void)
 {
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 413308a..ea76572 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -97,7 +97,7 @@
 	if (!data)
 		return;
 
-	if (cmd->error || data->error ||
+	if ((cmd && cmd->error) || data->error ||
 	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
 		return;
 
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index 159270e..a8af682 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -201,7 +201,7 @@
 	cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
 
 	cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
-		(cq_host->num_slots - 1);
+		cq_host->mmc->cqe_qdepth;
 
 	pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
 		 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
@@ -217,12 +217,21 @@
 						 cq_host->desc_size,
 						 &cq_host->desc_dma_base,
 						 GFP_KERNEL);
+	if (!cq_host->desc_base)
+		return -ENOMEM;
+
 	cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 					      cq_host->data_size,
 					      &cq_host->trans_desc_dma_base,
 					      GFP_KERNEL);
-	if (!cq_host->desc_base || !cq_host->trans_desc_base)
+	if (!cq_host->trans_desc_base) {
+		dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
+				   cq_host->desc_base,
+				   cq_host->desc_dma_base);
+		cq_host->desc_base = NULL;
+		cq_host->desc_dma_base = 0;
 		return -ENOMEM;
+	}
 
 	pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
 		 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 476e53d..67f6bd2 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1447,6 +1447,7 @@
 		mmc->caps &= ~MMC_CAP_NEEDS_POLL;
 		mmc_gpiod_request_cd_irq(mmc);
 	}
+	mmc_detect_change(mmc, 0);
 
 	if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
 		has_ro = true;
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 5389c48..c3d63ed 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -68,6 +68,7 @@
 	.scc_offset	= 0x0300,
 	.taps		= rcar_gen2_scc_taps,
 	.taps_num	= ARRAY_SIZE(rcar_gen2_scc_taps),
+	.max_blk_count  = 0xffffffff,
 };
 
 /* Definitions for sampling clocks */
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index f44e490..753973d 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1097,11 +1097,12 @@
 		writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
 			| ESDHC_BURST_LEN_EN_INCR,
 			host->ioaddr + SDHCI_HOST_CONTROL);
+
 		/*
-		* erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
-		* TO1.1, it's harmless for MX6SL
-		*/
-		writel(readl(host->ioaddr + 0x6c) | BIT(7),
+		 * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
+		 * TO1.1, it's harmless for MX6SL
+		 */
+		writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
 			host->ioaddr + 0x6c);
 
 		/* disable DLL_CTRL delay line settings */
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 5d141f7..7c40a7e 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -279,6 +279,11 @@
 	iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
 }
 
+static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+	iowrite32(val, host->ctl + (addr << host->bus_shift));
+}
+
 static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
 				       const u32 *buf, int count)
 {
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 261b4d6..7d13ca9 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -46,6 +46,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/mmc/sdio.h>
 #include <linux/scatterlist.h>
+#include <linux/sizes.h>
 #include <linux/spinlock.h>
 #include <linux/swiotlb.h>
 #include <linux/workqueue.h>
@@ -703,7 +704,7 @@
 	return false;
 }
 
-static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
+static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
 {
 	struct mmc_host *mmc = host->mmc;
 	struct tmio_mmc_data *pdata = host->pdata;
@@ -711,7 +712,7 @@
 	unsigned int sdio_status;
 
 	if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
-		return;
+		return false;
 
 	status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
 	ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
@@ -724,6 +725,8 @@
 
 	if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
 		mmc_signal_sdio_irq(mmc);
+
+	return ireg;
 }
 
 irqreturn_t tmio_mmc_irq(int irq, void *devid)
@@ -742,9 +745,10 @@
 	if (__tmio_mmc_sdcard_irq(host, ireg, status))
 		return IRQ_HANDLED;
 
-	__tmio_mmc_sdio_irq(host);
+	if (__tmio_mmc_sdio_irq(host))
+		return IRQ_HANDLED;
 
-	return IRQ_HANDLED;
+	return IRQ_NONE;
 }
 EXPORT_SYMBOL_GPL(tmio_mmc_irq);
 
@@ -774,7 +778,10 @@
 
 	/* Set transfer length / blocksize */
 	sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
-	sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
+	if (host->mmc->max_blk_count >= SZ_64K)
+		sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks);
+	else
+		sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
 
 	tmio_mmc_start_dma(host, data);
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a6fcc5c..b2c42ca 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1171,29 +1171,22 @@
 		}
 	}
 
-	/* Link-local multicast packets should be passed to the
-	 * stack on the link they arrive as well as pass them to the
-	 * bond-master device. These packets are mostly usable when
-	 * stack receives it with the link on which they arrive
-	 * (e.g. LLDP) they also must be available on master. Some of
-	 * the use cases include (but are not limited to): LLDP agents
-	 * that must be able to operate both on enslaved interfaces as
-	 * well as on bonds themselves; linux bridges that must be able
-	 * to process/pass BPDUs from attached bonds when any kind of
-	 * STP version is enabled on the network.
+	/*
+	 * For packets determined by bond_should_deliver_exact_match() call to
+	 * be suppressed we want to make an exception for link-local packets.
+	 * This is necessary for e.g. LLDP daemons to be able to monitor
+	 * inactive slave links without being forced to bind to them
+	 * explicitly.
+	 *
+	 * At the same time, packets that are passed to the bonding master
+	 * (including link-local ones) can have their originating interface
+	 * determined via PACKET_ORIGDEV socket option.
 	 */
-	if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
-		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
-		if (nskb) {
-			nskb->dev = bond->dev;
-			nskb->queue_mapping = 0;
-			netif_rx(nskb);
-		}
-		return RX_HANDLER_PASS;
-	}
-	if (bond_should_deliver_exact_match(skb, slave, bond))
+	if (bond_should_deliver_exact_match(skb, slave, bond)) {
+		if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+			return RX_HANDLER_PASS;
 		return RX_HANDLER_EXACT;
+	}
 
 	skb->dev = bond->dev;
 
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 9f697a5..c078c79 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -884,7 +884,7 @@
 	default:
 		return U64_MAX;
 	}
-	value = (((u64)high) << 16) | low;
+	value = (((u64)high) << 32) | low;
 	return value;
 }
 
@@ -3070,7 +3070,7 @@
 	.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
 	.port_link_state = mv88e6352_port_link_state,
 	.port_get_cmode = mv88e6185_port_get_cmode,
-	.stats_snapshot = mv88e6320_g1_stats_snapshot,
+	.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
 	.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
 	.stats_get_sset_count = mv88e6095_stats_get_sset_count,
 	.stats_get_strings = mv88e6095_stats_get_strings,
@@ -4188,7 +4188,7 @@
 		.name = "Marvell 88E6190",
 		.num_databases = 4096,
 		.num_ports = 11,	/* 10 + Z80 */
-		.num_internal_phys = 11,
+		.num_internal_phys = 9,
 		.num_gpio = 16,
 		.max_vid = 8191,
 		.port_base_addr = 0x0,
@@ -4211,7 +4211,7 @@
 		.name = "Marvell 88E6190X",
 		.num_databases = 4096,
 		.num_ports = 11,	/* 10 + Z80 */
-		.num_internal_phys = 11,
+		.num_internal_phys = 9,
 		.num_gpio = 16,
 		.max_vid = 8191,
 		.port_base_addr = 0x0,
@@ -4234,7 +4234,7 @@
 		.name = "Marvell 88E6191",
 		.num_databases = 4096,
 		.num_ports = 11,	/* 10 + Z80 */
-		.num_internal_phys = 11,
+		.num_internal_phys = 9,
 		.max_vid = 8191,
 		.port_base_addr = 0x0,
 		.phy_base_addr = 0x0,
@@ -4281,7 +4281,7 @@
 		.name = "Marvell 88E6290",
 		.num_databases = 4096,
 		.num_ports = 11,	/* 10 + Z80 */
-		.num_internal_phys = 11,
+		.num_internal_phys = 9,
 		.num_gpio = 16,
 		.max_vid = 8191,
 		.port_base_addr = 0x0,
@@ -4443,7 +4443,7 @@
 		.name = "Marvell 88E6390",
 		.num_databases = 4096,
 		.num_ports = 11,	/* 10 + Z80 */
-		.num_internal_phys = 11,
+		.num_internal_phys = 9,
 		.num_gpio = 16,
 		.max_vid = 8191,
 		.port_base_addr = 0x0,
@@ -4466,7 +4466,7 @@
 		.name = "Marvell 88E6390X",
 		.num_databases = 4096,
 		.num_ports = 11,	/* 10 + Z80 */
-		.num_internal_phys = 11,
+		.num_internal_phys = 9,
 		.num_gpio = 16,
 		.max_vid = 8191,
 		.port_base_addr = 0x0,
@@ -4561,6 +4561,14 @@
 	return 0;
 }
 
+static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
+{
+	int i;
+
+	for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+		chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
+}
+
 static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
 							int port)
 {
@@ -4597,6 +4605,8 @@
 	if (err)
 		goto free;
 
+	mv88e6xxx_ports_cmode_init(chip);
+
 	mutex_lock(&chip->reg_lock);
 	err = mv88e6xxx_switch_reset(chip);
 	mutex_unlock(&chip->reg_lock);
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 9294584..7fffce7 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -190,7 +190,7 @@
 		/* normal duplex detection */
 		break;
 	default:
-		return -EINVAL;
+		return -EOPNOTSUPP;
 	}
 
 	err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
@@ -374,6 +374,10 @@
 		cmode = 0;
 	}
 
+	/* cmode doesn't change, nothing to do for us */
+	if (cmode == chip->ports[port].cmode)
+		return 0;
+
 	lane = mv88e6390x_serdes_get_lane(chip, port);
 	if (lane < 0)
 		return lane;
@@ -384,7 +388,7 @@
 			return err;
 	}
 
-	err = mv88e6390_serdes_power(chip, port, false);
+	err = mv88e6390x_serdes_power(chip, port, false);
 	if (err)
 		return err;
 
@@ -400,7 +404,7 @@
 		if (err)
 			return err;
 
-		err = mv88e6390_serdes_power(chip, port, true);
+		err = mv88e6390x_serdes_power(chip, port, true);
 		if (err)
 			return err;
 
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index b319100..95b59f5 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -52,6 +52,7 @@
 #define MV88E6185_PORT_STS_CMODE_1000BASE_X	0x0005
 #define MV88E6185_PORT_STS_CMODE_PHY		0x0006
 #define MV88E6185_PORT_STS_CMODE_DISABLED	0x0007
+#define MV88E6XXX_PORT_STS_CMODE_INVALID	0xff
 
 /* Offset 0x01: MAC (or PCS or Physical) Control Register */
 #define MV88E6XXX_PORT_MAC_CTL				0x01
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 0fb986b..0ae723f 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -145,7 +145,8 @@
 			& 0xffff;
 
 	if (inuse) { /* Tx FIFO is not empty */
-		ready = priv->tx_prod - priv->tx_cons - inuse - 1;
+		ready = max_t(int,
+			      priv->tx_prod - priv->tx_cons - inuse - 1, 0);
 	} else {
 		/* Check for buffered last packet */
 		status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index baca8f7..c3c1195 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -714,8 +714,10 @@
 
 		phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
 				     priv->phy_iface);
-		if (IS_ERR(phydev))
+		if (IS_ERR(phydev)) {
 			netdev_err(dev, "Could not attach to PHY\n");
+			phydev = NULL;
+		}
 
 	} else {
 		int ret;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 034f575..1fdaf86b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -463,6 +463,12 @@
 	}
 
 	length >>= 9;
+	if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
+		dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
+				     skb->len);
+		i = 0;
+		goto tx_dma_error;
+	}
 	flags |= bnxt_lhint_arr[length];
 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
 
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 3d45f4c..9bbaad9 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -643,6 +643,7 @@
 #define MACB_CAPS_JUMBO				0x00000020
 #define MACB_CAPS_GEM_HAS_PTP			0x00000040
 #define MACB_CAPS_BD_RD_PREFETCH		0x00000080
+#define MACB_CAPS_NEEDS_RSTONUBR		0x00000100
 #define MACB_CAPS_FIFO_MODE			0x10000000
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE	0x20000000
 #define MACB_CAPS_SG_DISABLED			0x40000000
@@ -1214,6 +1215,8 @@
 
 	int	rx_bd_rd_prefetch;
 	int	tx_bd_rd_prefetch;
+
+	u32	rx_intr_mask;
 };
 
 #ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 8f4b2f9..8abea1c 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -56,8 +56,7 @@
 /* level of occupied TX descriptors under which we wake up TX process */
 #define MACB_TX_WAKEUP_THRESH(bp)	(3 * (bp)->tx_ring_size / 4)
 
-#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
-				 | MACB_BIT(ISR_ROVR))
+#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
 #define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 					| MACB_BIT(ISR_RLE)		\
 					| MACB_BIT(TXERR))
@@ -1271,7 +1270,7 @@
 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
 			napi_reschedule(napi);
 		} else {
-			queue_writel(queue, IER, MACB_RX_INT_FLAGS);
+			queue_writel(queue, IER, bp->rx_intr_mask);
 		}
 	}
 
@@ -1289,7 +1288,7 @@
 	u32 ctrl;
 
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
-		queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+		queue_writel(queue, IDR, bp->rx_intr_mask |
 					 MACB_TX_INT_FLAGS |
 					 MACB_BIT(HRESP));
 	}
@@ -1319,7 +1318,7 @@
 
 		/* Enable interrupts */
 		queue_writel(queue, IER,
-			     MACB_RX_INT_FLAGS |
+			     bp->rx_intr_mask |
 			     MACB_TX_INT_FLAGS |
 			     MACB_BIT(HRESP));
 	}
@@ -1373,14 +1372,14 @@
 			    (unsigned int)(queue - bp->queues),
 			    (unsigned long)status);
 
-		if (status & MACB_RX_INT_FLAGS) {
+		if (status & bp->rx_intr_mask) {
 			/* There's no point taking any more interrupts
 			 * until we have processed the buffers. The
 			 * scheduling call may fail if the poll routine
 			 * is already scheduled, so disable interrupts
 			 * now.
 			 */
-			queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
+			queue_writel(queue, IDR, bp->rx_intr_mask);
 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
 
@@ -1413,8 +1412,9 @@
 		/* There is a hardware issue under heavy load where DMA can
 		 * stop, this causes endless "used buffer descriptor read"
 		 * interrupts but it can be cleared by re-enabling RX. See
-		 * the at91 manual, section 41.3.1 or the Zynq manual
-		 * section 16.7.4 for details.
+		 * the at91rm9200 manual, section 41.3.1 or the Zynq manual
+		 * section 16.7.4 for details. RXUBR is only enabled for
+		 * these two versions.
 		 */
 		if (status & MACB_BIT(RXUBR)) {
 			ctrl = macb_readl(bp, NCR);
@@ -2264,7 +2264,7 @@
 
 		/* Enable interrupts */
 		queue_writel(queue, IER,
-			     MACB_RX_INT_FLAGS |
+			     bp->rx_intr_mask |
 			     MACB_TX_INT_FLAGS |
 			     MACB_BIT(HRESP));
 	}
@@ -3912,6 +3912,7 @@
 };
 
 static const struct macb_config emac_config = {
+	.caps = MACB_CAPS_NEEDS_RSTONUBR,
 	.clk_init = at91ether_clk_init,
 	.init = at91ether_init,
 };
@@ -3933,7 +3934,8 @@
 };
 
 static const struct macb_config zynq_config = {
-	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
+	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
+		MACB_CAPS_NEEDS_RSTONUBR,
 	.dma_burst_length = 16,
 	.clk_init = macb_clk_init,
 	.init = macb_init,
@@ -4088,6 +4090,10 @@
 						macb_dma_desc_get_size(bp);
 	}
 
+	bp->rx_intr_mask = MACB_RX_INT_FLAGS;
+	if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
+		bp->rx_intr_mask |= MACB_BIT(RXUBR);
+
 	mac = of_get_mac_address(np);
 	if (mac) {
 		ether_addr_copy(bp->dev->dev_addr, mac);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 6242249..b043370 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2419,6 +2419,8 @@
 out_notify_fail:
 	(void)cancel_work_sync(&priv->service_task);
 out_read_prop_fail:
+	/* safe for ACPI FW */
+	of_node_put(to_of_node(priv->fwnode));
 	free_netdev(ndev);
 	return ret;
 }
@@ -2448,6 +2450,9 @@
 	set_bit(NIC_STATE_REMOVING, &priv->state);
 	(void)cancel_work_sync(&priv->service_task);
 
+	/* safe for ACPI FW */
+	of_node_put(to_of_node(priv->fwnode));
+
 	free_netdev(ndev);
 	return 0;
 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 774beda..e2710ff 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1157,16 +1157,18 @@
  */
 static int hns_nic_nway_reset(struct net_device *netdev)
 {
-	int ret = 0;
 	struct phy_device *phy = netdev->phydev;
 
-	if (netif_running(netdev)) {
-		/* if autoneg is disabled, don't restart auto-negotiation */
-		if (phy && phy->autoneg == AUTONEG_ENABLE)
-			ret = genphy_restart_aneg(phy);
-	}
+	if (!netif_running(netdev))
+		return 0;
 
-	return ret;
+	if (!phy)
+		return -EOPNOTSUPP;
+
+	if (phy->autoneg != AUTONEG_ENABLE)
+		return -EINVAL;
+
+	return genphy_restart_aneg(phy);
 }
 
 static u32
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 017e084..baf5cc2 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -321,7 +321,7 @@
 		}
 
 		hns_mdio_cmd_write(mdio_dev, is_c45,
-				   MDIO_C45_WRITE_ADDR, phy_id, devad);
+				   MDIO_C45_READ, phy_id, devad);
 	}
 
 	/* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 91f48c0..f70cb4d 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1314,7 +1314,6 @@
 	unsigned long lpar_rc;
 	u16 mss = 0;
 
-restart_poll:
 	while (frames_processed < budget) {
 		if (!ibmveth_rxq_pending_buffer(adapter))
 			break;
@@ -1402,7 +1401,6 @@
 		    napi_reschedule(napi)) {
 			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
 					       VIO_IRQ_DISABLE);
-			goto restart_poll;
 		}
 	}
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 41fa22c..f81ad0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -424,9 +424,9 @@
 				  struct rtnl_link_stats64 *stats)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_ring *tx_ring, *rx_ring;
 	struct i40e_vsi *vsi = np->vsi;
 	struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
+	struct i40e_ring *ring;
 	int i;
 
 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
@@ -440,24 +440,26 @@
 		u64 bytes, packets;
 		unsigned int start;
 
-		tx_ring = READ_ONCE(vsi->tx_rings[i]);
-		if (!tx_ring)
+		ring = READ_ONCE(vsi->tx_rings[i]);
+		if (!ring)
 			continue;
-		i40e_get_netdev_stats_struct_tx(tx_ring, stats);
+		i40e_get_netdev_stats_struct_tx(ring, stats);
 
-		rx_ring = &tx_ring[1];
+		if (i40e_enabled_xdp_vsi(vsi)) {
+			ring++;
+			i40e_get_netdev_stats_struct_tx(ring, stats);
+		}
 
+		ring++;
 		do {
-			start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
-			packets = rx_ring->stats.packets;
-			bytes   = rx_ring->stats.bytes;
-		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+			start   = u64_stats_fetch_begin_irq(&ring->syncp);
+			packets = ring->stats.packets;
+			bytes   = ring->stats.bytes;
+		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
 
 		stats->rx_packets += packets;
 		stats->rx_bytes   += bytes;
 
-		if (i40e_enabled_xdp_vsi(vsi))
-			i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
 	}
 	rcu_read_unlock();
 
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index ae2f350..1485f66 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -46,6 +46,7 @@
 #include <linux/mii.h>
 #include <linux/of_device.h>
 #include <linux/of_net.h>
+#include <linux/dmi.h>
 
 #include <asm/irq.h>
 
@@ -93,7 +94,7 @@
 module_param(copybreak, int, 0);
 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
 
-static int disable_msi = 0;
+static int disable_msi = -1;
 module_param(disable_msi, int, 0);
 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
 
@@ -4931,6 +4932,24 @@
 	return buf;
 }
 
+static const struct dmi_system_id msi_blacklist[] = {
+	{
+		.ident = "Dell Inspiron 1545",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
+		},
+	},
+	{
+		.ident = "Gateway P-79",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
+		},
+	},
+	{}
+};
+
 static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	struct net_device *dev, *dev1;
@@ -5042,6 +5061,9 @@
 		goto err_out_free_pci;
 	}
 
+	if (disable_msi == -1)
+		disable_msi = !!dmi_check_system(msi_blacklist);
+
 	if (!disable_msi && pci_enable_msi(pdev) == 0) {
 		err = sky2_test_msi(hw);
 		if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e65bc3c..857588e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2645,6 +2645,8 @@
 	if (!priv->cmd.context)
 		return -ENOMEM;
 
+	if (mlx4_is_mfunc(dev))
+		mutex_lock(&priv->cmd.slave_cmd_mutex);
 	down_write(&priv->cmd.switch_sem);
 	for (i = 0; i < priv->cmd.max_cmds; ++i) {
 		priv->cmd.context[i].token = i;
@@ -2670,6 +2672,8 @@
 	down(&priv->cmd.poll_sem);
 	priv->cmd.use_events = 1;
 	up_write(&priv->cmd.switch_sem);
+	if (mlx4_is_mfunc(dev))
+		mutex_unlock(&priv->cmd.slave_cmd_mutex);
 
 	return err;
 }
@@ -2682,6 +2686,8 @@
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	int i;
 
+	if (mlx4_is_mfunc(dev))
+		mutex_lock(&priv->cmd.slave_cmd_mutex);
 	down_write(&priv->cmd.switch_sem);
 	priv->cmd.use_events = 0;
 
@@ -2689,9 +2695,12 @@
 		down(&priv->cmd.event_sem);
 
 	kfree(priv->cmd.context);
+	priv->cmd.context = NULL;
 
 	up(&priv->cmd.poll_sem);
 	up_write(&priv->cmd.switch_sem);
+	if (mlx4_is_mfunc(dev))
+		mutex_unlock(&priv->cmd.slave_cmd_mutex);
 }
 
 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 31bd567..676428a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2719,13 +2719,13 @@
 	int total_pages;
 	int total_mem;
 	int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+	int tot;
 
 	sq_size = 1 << (log_sq_size + log_sq_sride + 4);
 	rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
 	total_mem = sq_size + rq_size;
-	total_pages =
-		roundup_pow_of_two((total_mem + (page_offset << 6)) >>
-				   page_shift);
+	tot = (total_mem + (page_offset << 6)) >> page_shift;
+	total_pages = !tot ? 1 : roundup_pow_of_two(tot);
 
 	return total_pages;
 }
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 42f5bfa..2083415 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -585,8 +585,7 @@
 
 		if (adapter->csr.flags &
 		   LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
-			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
-				LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
+			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
 				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
 				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
 				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
@@ -599,12 +598,6 @@
 			/* map TX interrupt to vector */
 			int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
 			lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
-			if (flags &
-			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
-				int_vec_en_auto_clr |= INT_VEC_EN_(vector);
-				lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
-						  int_vec_en_auto_clr);
-			}
 
 			/* Remove TX interrupt from shared mask */
 			intr->vector_list[0].int_mask &= ~int_bit;
@@ -1403,7 +1396,8 @@
 }
 
 static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
-				     unsigned int frame_length)
+				     unsigned int frame_length,
+				     int nr_frags)
 {
 	/* called only from within lan743x_tx_xmit_frame.
 	 * assuming tx->ring_lock has already been acquired.
@@ -1413,6 +1407,10 @@
 
 	/* wrap up previous descriptor */
 	tx->frame_data0 |= TX_DESC_DATA0_EXT_;
+	if (nr_frags <= 0) {
+		tx->frame_data0 |= TX_DESC_DATA0_LS_;
+		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+	}
 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
 	tx_descriptor->data0 = tx->frame_data0;
 
@@ -1517,8 +1515,11 @@
 	u32 tx_tail_flags = 0;
 
 	/* wrap up previous descriptor */
-	tx->frame_data0 |= TX_DESC_DATA0_LS_;
-	tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+	if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
+	    TX_DESC_DATA0_DTYPE_DATA_) {
+		tx->frame_data0 |= TX_DESC_DATA0_LS_;
+		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+	}
 
 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
 	buffer_info = &tx->buffer_info[tx->frame_tail];
@@ -1603,7 +1604,7 @@
 	}
 
 	if (gso)
-		lan743x_tx_frame_add_lso(tx, frame_length);
+		lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
 
 	if (nr_frags <= 0)
 		goto finish;
@@ -1897,7 +1898,17 @@
 	return ((++index) % rx->ring_size);
 }
 
-static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
+static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
+{
+	int length = 0;
+
+	length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
+	return __netdev_alloc_skb(rx->adapter->netdev,
+				  length, GFP_ATOMIC | GFP_DMA);
+}
+
+static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
+					struct sk_buff *skb)
 {
 	struct lan743x_rx_buffer_info *buffer_info;
 	struct lan743x_rx_descriptor *descriptor;
@@ -1906,9 +1917,7 @@
 	length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
 	descriptor = &rx->ring_cpu_ptr[index];
 	buffer_info = &rx->buffer_info[index];
-	buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
-					      length,
-					      GFP_ATOMIC | GFP_DMA);
+	buffer_info->skb = skb;
 	if (!(buffer_info->skb))
 		return -ENOMEM;
 	buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
@@ -2055,8 +2064,19 @@
 		/* packet is available */
 		if (first_index == last_index) {
 			/* single buffer packet */
+			struct sk_buff *new_skb = NULL;
 			int packet_length;
 
+			new_skb = lan743x_rx_allocate_skb(rx);
+			if (!new_skb) {
+				/* failed to allocate next skb.
+				 * Memory is very low.
+				 * Drop this packet and reuse buffer.
+				 */
+				lan743x_rx_reuse_ring_element(rx, first_index);
+				goto process_extension;
+			}
+
 			buffer_info = &rx->buffer_info[first_index];
 			skb = buffer_info->skb;
 			descriptor = &rx->ring_cpu_ptr[first_index];
@@ -2076,7 +2096,7 @@
 			skb_put(skb, packet_length - 4);
 			skb->protocol = eth_type_trans(skb,
 						       rx->adapter->netdev);
-			lan743x_rx_allocate_ring_element(rx, first_index);
+			lan743x_rx_init_ring_element(rx, first_index, new_skb);
 		} else {
 			int index = first_index;
 
@@ -2089,26 +2109,23 @@
 			if (first_index <= last_index) {
 				while ((index >= first_index) &&
 				       (index <= last_index)) {
-					lan743x_rx_release_ring_element(rx,
-									index);
-					lan743x_rx_allocate_ring_element(rx,
-									 index);
+					lan743x_rx_reuse_ring_element(rx,
+								      index);
 					index = lan743x_rx_next_index(rx,
 								      index);
 				}
 			} else {
 				while ((index >= first_index) ||
 				       (index <= last_index)) {
-					lan743x_rx_release_ring_element(rx,
-									index);
-					lan743x_rx_allocate_ring_element(rx,
-									 index);
+					lan743x_rx_reuse_ring_element(rx,
+								      index);
 					index = lan743x_rx_next_index(rx,
 								      index);
 				}
 			}
 		}
 
+process_extension:
 		if (extension_index >= 0) {
 			descriptor = &rx->ring_cpu_ptr[extension_index];
 			buffer_info = &rx->buffer_info[extension_index];
@@ -2285,7 +2302,9 @@
 
 	rx->last_head = 0;
 	for (index = 0; index < rx->ring_size; index++) {
-		ret = lan743x_rx_allocate_ring_element(rx, index);
+		struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
+
+		ret = lan743x_rx_init_ring_element(rx, index, new_skb);
 		if (ret)
 			goto cleanup;
 	}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 2f69ee9..4dd82a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -473,19 +473,19 @@
 
 /* get pq index according to PQ_FLAGS */
 static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
-					   u32 pq_flags)
+					   unsigned long pq_flags)
 {
 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
 
 	/* Can't have multiple flags set here */
-	if (bitmap_weight((unsigned long *)&pq_flags,
+	if (bitmap_weight(&pq_flags,
 			  sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
-		DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
+		DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
 		goto err;
 	}
 
 	if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
-		DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
+		DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
 		goto err;
 	}
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 67c02ea93..64ac95c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -609,6 +609,10 @@
 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
 
+		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
+			  (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
+			   !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
 		SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
 			  !!(accept_filter & QED_ACCEPT_BCAST));
 
@@ -744,6 +748,11 @@
 		return rc;
 	}
 
+	if (p_params->update_ctl_frame_check) {
+		p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
+		p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
+	}
+
 	/* Update mcast bins for VFs, PF doesn't use this functionality */
 	qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
 
@@ -2207,7 +2216,7 @@
 			u16 num_queues = 0;
 
 			/* Since the feature controls only queue-zones,
-			 * make sure we have the contexts [rx, tx, xdp] to
+			 * make sure we have the contexts [rx, xdp, tcs] to
 			 * match.
 			 */
 			for_each_hwfn(cdev, i) {
@@ -2217,7 +2226,8 @@
 				u16 cids;
 
 				cids = hwfn->pf_params.eth_pf_params.num_cons;
-				num_queues += min_t(u16, l2_queues, cids / 3);
+				cids /= (2 + info->num_tc);
+				num_queues += min_t(u16, l2_queues, cids);
 			}
 
 			/* queues might theoretically be >256, but interrupts'
@@ -2688,7 +2698,8 @@
 	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
 		accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
 						 QED_ACCEPT_MCAST_UNMATCHED;
-		accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+		accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+						 QED_ACCEPT_MCAST_UNMATCHED;
 	} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
 		accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
 		accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 8d80f10..7127d5a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -219,6 +219,9 @@
 	struct qed_rss_params		*rss_params;
 	struct qed_filter_accept_flags	accept_flags;
 	struct qed_sge_tpa_params	*sge_tpa_params;
+	u8				update_ctl_frame_check;
+	u8				mac_chk_en;
+	u8				ethtype_chk_en;
 };
 
 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 92cd8ab..015de1e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -2430,19 +2430,24 @@
 {
 	struct qed_ll2_tx_pkt_info pkt;
 	const skb_frag_t *frag;
+	u8 flags = 0, nr_frags;
 	int rc = -EINVAL, i;
 	dma_addr_t mapping;
 	u16 vlan = 0;
-	u8 flags = 0;
 
 	if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
 		DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
 		return -EINVAL;
 	}
 
-	if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+	/* Cache number of fragments from SKB since SKB may be freed by
+	 * the completion routine after calling qed_ll2_prepare_tx_packet()
+	 */
+	nr_frags = skb_shinfo(skb)->nr_frags;
+
+	if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
 		DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
-		       1 + skb_shinfo(skb)->nr_frags);
+		       1 + nr_frags);
 		return -EINVAL;
 	}
 
@@ -2464,7 +2469,7 @@
 	}
 
 	memset(&pkt, 0, sizeof(pkt));
-	pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
+	pkt.num_of_bds = 1 + nr_frags;
 	pkt.vlan = vlan;
 	pkt.bd_flags = flags;
 	pkt.tx_dest = QED_LL2_TX_DEST_NW;
@@ -2475,12 +2480,17 @@
 	    test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
 		pkt.remove_stag = true;
 
+	/* qed_ll2_prepare_tx_packet() may actually send the packet if
+	 * there are no fragments in the skb and subsequently the completion
+	 * routine may run and free the SKB, so no dereferencing the SKB
+	 * beyond this point unless skb has any fragments.
+	 */
 	rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
 				       &pkt, 1);
 	if (rc)
 		goto err;
 
-	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+	for (i = 0; i < nr_frags; i++) {
 		frag = &skb_shinfo(skb)->frags[i];
 
 		mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 3157c0d..dae2896e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -380,6 +380,7 @@
  * @param p_hwfn
  */
 void qed_consq_free(struct qed_hwfn *p_hwfn);
+int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
 
 /**
  * @file
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 7106ad1..a0ee847 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -402,6 +402,11 @@
 
 	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
 
+	/* Attempt to post pending requests */
+	spin_lock_bh(&p_hwfn->p_spq->lock);
+	rc = qed_spq_pend_post(p_hwfn);
+	spin_unlock_bh(&p_hwfn->p_spq->lock);
+
 	return rc;
 }
 
@@ -745,7 +750,7 @@
 	return 0;
 }
 
-static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
 {
 	struct qed_spq *p_spq = p_hwfn->p_spq;
 	struct qed_spq_entry *p_ent = NULL;
@@ -883,7 +888,6 @@
 	struct qed_spq_entry	*p_ent = NULL;
 	struct qed_spq_entry	*tmp;
 	struct qed_spq_entry	*found = NULL;
-	int			rc;
 
 	if (!p_hwfn)
 		return -EINVAL;
@@ -941,12 +945,7 @@
 		 */
 		qed_spq_return_entry(p_hwfn, found);
 
-	/* Attempt to post pending requests */
-	spin_lock_bh(&p_spq->lock);
-	rc = qed_spq_pend_post(p_hwfn);
-	spin_unlock_bh(&p_spq->lock);
-
-	return rc;
+	return 0;
 }
 
 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index ca6290f..71a7af1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1969,7 +1969,9 @@
 	params.vport_id = vf->vport_id;
 	params.max_buffers_per_cqe = start->max_buffers_per_cqe;
 	params.mtu = vf->mtu;
-	params.check_mac = true;
+
+	/* Non trusted VFs should enable control frame filtering */
+	params.check_mac = !vf->p_vf_info.is_trusted_configured;
 
 	rc = qed_sp_eth_vport_start(p_hwfn, &params);
 	if (rc) {
@@ -5130,6 +5132,9 @@
 		params.opaque_fid = vf->opaque_fid;
 		params.vport_id = vf->vport_id;
 
+		params.update_ctl_frame_check = 1;
+		params.mac_chk_en = !vf_info->is_trusted_configured;
+
 		if (vf_info->rx_accept_mode & mask) {
 			flags->update_rx_mode_config = 1;
 			flags->rx_accept_filter = vf_info->rx_accept_mode;
@@ -5147,7 +5152,8 @@
 		}
 
 		if (flags->update_rx_mode_config ||
-		    flags->update_tx_mode_config)
+		    flags->update_tx_mode_config ||
+		    params.update_ctl_frame_check)
 			qed_sp_vport_update(hwfn, &params,
 					    QED_SPQ_MODE_EBLOCK, NULL);
 	}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index be118d0..6ab3fb0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -261,6 +261,7 @@
 	struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
 	struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
 	struct vf_pf_resc_request *p_resc;
+	u8 retry_cnt = VF_ACQUIRE_THRESH;
 	bool resources_acquired = false;
 	struct vfpf_acquire_tlv *req;
 	int rc = 0, attempts = 0;
@@ -314,6 +315,15 @@
 
 		/* send acquire request */
 		rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+		/* Re-try acquire in case of vf-pf hw channel timeout */
+		if (retry_cnt && rc == -EBUSY) {
+			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+				   "VF retrying to acquire due to VPC timeout\n");
+			retry_cnt--;
+			continue;
+		}
+
 		if (rc)
 			goto exit;
 
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 6a4d266..d242a57 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -489,6 +489,9 @@
 
 /* Datapath functions definition */
 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
+		      struct net_device *sb_dev,
+		      select_queue_fallback_t fallback);
 netdev_features_t qede_features_check(struct sk_buff *skb,
 				      struct net_device *dev,
 				      netdev_features_t features);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 1a78027..a96da16 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1695,6 +1695,19 @@
 	return NETDEV_TX_OK;
 }
 
+u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
+		      struct net_device *sb_dev,
+		      select_queue_fallback_t fallback)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+	int total_txq;
+
+	total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
+
+	return QEDE_TSS_COUNT(edev) ?
+		fallback(dev, skb, NULL) % total_txq :  0;
+}
+
 /* 8B udp header + 8B base tunnel header + 32B option length */
 #define QEDE_MAX_TUN_HDR_LEN 48
 
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 46d0f2e..f3d9c40 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -631,6 +631,7 @@
 	.ndo_open = qede_open,
 	.ndo_stop = qede_close,
 	.ndo_start_xmit = qede_start_xmit,
+	.ndo_select_queue = qede_select_queue,
 	.ndo_set_rx_mode = qede_set_rx_mode,
 	.ndo_set_mac_address = qede_set_mac_addr,
 	.ndo_validate_addr = eth_validate_addr,
@@ -666,6 +667,7 @@
 	.ndo_open = qede_open,
 	.ndo_stop = qede_close,
 	.ndo_start_xmit = qede_start_xmit,
+	.ndo_select_queue = qede_select_queue,
 	.ndo_set_rx_mode = qede_set_rx_mode,
 	.ndo_set_mac_address = qede_set_mac_addr,
 	.ndo_validate_addr = eth_validate_addr,
@@ -684,6 +686,7 @@
 	.ndo_open = qede_open,
 	.ndo_stop = qede_close,
 	.ndo_start_xmit = qede_start_xmit,
+	.ndo_select_queue = qede_select_queue,
 	.ndo_set_rx_mode = qede_set_rx_mode,
 	.ndo_set_mac_address = qede_set_mac_addr,
 	.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 3075bfa..4d47bd1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  *
  * RMNET configuration engine
  *
@@ -216,6 +216,10 @@
 		synchronize_rcu();
 		kfree(ep);
 	}
+
+	if (!port->nr_rmnet_devs)
+		qmi_rmnet_qmi_exit(port->qmi_info, port);
+
 	rmnet_unregister_real_device(real_dev, port);
 
 	unregister_netdevice_queue(dev, head);
@@ -236,6 +240,7 @@
 	ASSERT_RTNL();
 
 	port = rmnet_get_port_rtnl(dev);
+	qmi_rmnet_qmi_exit(port->qmi_info, port);
 
 	rmnet_unregister_bridge(dev, port);
 
@@ -250,8 +255,6 @@
 
 	unregister_netdevice_many(&list);
 
-	qmi_rmnet_qmi_exit(port->qmi_info, port);
-
 	rmnet_unregister_real_device(real_dev, port);
 }
 
@@ -554,6 +557,7 @@
 
 	*tx = 0;
 	*rx = 0;
+	rcu_read_lock();
 	hash_for_each(((struct rmnet_port *)port)->muxed_ep, bkt, ep, hlnode) {
 		priv = netdev_priv(ep->egress_dev);
 		for_each_possible_cpu(cpu) {
@@ -565,6 +569,7 @@
 			} while (u64_stats_fetch_retry_irq(&ps->syncp, start));
 		}
 	}
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL(rmnet_get_packets);
 
@@ -601,6 +606,30 @@
 }
 EXPORT_SYMBOL(rmnet_enable_all_flows);
 
+bool rmnet_all_flows_enabled(void *port)
+{
+	struct rmnet_endpoint *ep;
+	unsigned long bkt;
+	bool ret = true;
+
+	if (unlikely(!port))
+		return true;
+
+	rcu_read_lock();
+	hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
+			  bkt, ep, hlnode) {
+		if (!qmi_rmnet_all_flows_enabled(ep->egress_dev)) {
+			ret = false;
+			goto out;
+		}
+	}
+out:
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(rmnet_all_flows_enabled);
+
 int rmnet_get_powersave_notif(void *port)
 {
 	if (!port)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 9901c13..d2a667d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -28,8 +28,6 @@
 	u64 dl_hdr_count;
 	u64 dl_hdr_total_bytes;
 	u64 dl_hdr_total_pkts;
-	u64 dl_hdr_avg_bytes;
-	u64 dl_hdr_avg_pkts;
 	u64 dl_trl_last_seq;
 	u64 dl_trl_count;
 };
@@ -58,6 +56,7 @@
 	struct timespec agg_time;
 	struct timespec agg_last;
 	struct hrtimer hrtimer;
+	struct work_struct agg_wq;
 
 	void *qmi_info;
 
@@ -130,6 +129,11 @@
 	void __rcu *qos_info;
 };
 
+enum rmnet_dl_marker_prio {
+	RMNET_PERF,
+	RMNET_SHS,
+};
+
 enum rmnet_trace_func {
 	RMNET_MODULE,
 	NW_STACK_MODULE,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index a5d0578..b606760 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -83,6 +83,11 @@
 			   struct rmnet_port *port) __rcu __read_mostly;
 EXPORT_SYMBOL(rmnet_shs_skb_entry);
 
+/* Shs hook handler for work queue*/
+int (*rmnet_shs_skb_entry_wq)(struct sk_buff *skb,
+			      struct rmnet_port *port) __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_shs_skb_entry_wq);
+
 /* Generic handler */
 
 void
@@ -125,6 +130,59 @@
 }
 EXPORT_SYMBOL(rmnet_deliver_skb);
 
+/* Important to note, port cannot be used here if it has gone stale */
+void
+rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
+		     enum rmnet_packet_context ctx)
+{
+	int (*rmnet_shs_stamp)(struct sk_buff *skb, struct rmnet_port *port);
+	struct rmnet_priv *priv = netdev_priv(skb->dev);
+
+	trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
+			0xDEF, 0xDEF, (void *)skb, NULL);
+	skb_reset_transport_header(skb);
+	skb_reset_network_header(skb);
+	rmnet_vnd_rx_fixup(skb->dev, skb->len);
+
+	skb->pkt_type = PACKET_HOST;
+	skb_set_mac_header(skb, 0);
+
+	/* packets coming from work queue context due to packet flush timer
+	 * must go through the special workqueue path in SHS driver
+	 */
+	rmnet_shs_stamp = (!ctx) ? rcu_dereference(rmnet_shs_skb_entry) :
+				   rcu_dereference(rmnet_shs_skb_entry_wq);
+	if (rmnet_shs_stamp) {
+		rmnet_shs_stamp(skb, port);
+		return;
+	}
+
+	if (ctx == RMNET_NET_RX_CTX) {
+		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
+			if (!rmnet_check_skb_can_gro(skb) &&
+			    port->dl_marker_flush >= 0) {
+				struct napi_struct *napi =
+					get_current_napi_context();
+				napi_gro_receive(napi, skb);
+				port->dl_marker_flush++;
+			} else {
+				netif_receive_skb(skb);
+			}
+		} else {
+			if (!rmnet_check_skb_can_gro(skb))
+				gro_cells_receive(&priv->gro_cells, skb);
+			else
+				netif_receive_skb(skb);
+		}
+	} else {
+		if ((port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) &&
+		    port->dl_marker_flush >= 0)
+			port->dl_marker_flush++;
+		gro_cells_receive(&priv->gro_cells, skb);
+	}
+}
+EXPORT_SYMBOL(rmnet_deliver_skb_wq);
+
 /* Deliver a list of skbs after undoing coalescing */
 static void rmnet_deliver_skb_list(struct sk_buff_head *head,
 				   struct rmnet_port *port)
@@ -154,6 +212,7 @@
 
 	qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
 	if (qmap->cd_bit) {
+		qmi_rmnet_set_dl_msg_active(port);
 		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
 			if (!rmnet_map_flow_command(skb, port, false))
 				return;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
index 8ce6fe6c..09a2954 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2013, 2016-2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013, 2016-2019 The Linux Foundation. All rights reserved.
  *
  * RMNET Data ingress/egress handler
  *
@@ -10,8 +10,15 @@
 
 #include "rmnet_config.h"
 
+enum rmnet_packet_context {
+	RMNET_NET_RX_CTX,
+	RMNET_WQ_CTX,
+};
+
 void rmnet_egress_handler(struct sk_buff *skb);
 void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
+void rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
+			  enum rmnet_packet_context ctx);
 void rmnet_set_skb_proto(struct sk_buff *skb);
 rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
 					       struct rmnet_port *port);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 6bc8a02..03ce4f3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -166,6 +166,7 @@
 } __aligned(1);
 
 struct rmnet_map_dl_ind {
+	u8 priority;
 	void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *dlhdr);
 	void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *dltrl);
 	struct list_head list;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index cb8bdf5..33da4bf 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -136,15 +136,6 @@
 	port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
 	port->stats.dl_hdr_count++;
 
-	if (unlikely(!(port->stats.dl_hdr_count)))
-		port->stats.dl_hdr_count = 1;
-
-	port->stats.dl_hdr_avg_bytes = port->stats.dl_hdr_total_bytes /
-				       port->stats.dl_hdr_count;
-
-	port->stats.dl_hdr_avg_pkts = port->stats.dl_hdr_total_pkts /
-				      port->stats.dl_hdr_count;
-
 	rmnet_map_dl_hdr_notify(port, dlhdr);
 	if (rmnet_perf) {
 		unsigned int pull_size;
@@ -261,11 +252,38 @@
 int rmnet_map_dl_ind_register(struct rmnet_port *port,
 			      struct rmnet_map_dl_ind *dl_ind)
 {
+	struct rmnet_map_dl_ind *dl_ind_iterator;
+	bool empty_ind_list = true;
+
 	if (!port || !dl_ind || !dl_ind->dl_hdr_handler ||
 	    !dl_ind->dl_trl_handler)
 		return -EINVAL;
 
-	list_add_rcu(&dl_ind->list, &port->dl_list);
+	list_for_each_entry_rcu(dl_ind_iterator, &port->dl_list, list) {
+		empty_ind_list = false;
+		if (dl_ind_iterator->priority < dl_ind->priority) {
+			if (dl_ind_iterator->list.next) {
+				if (dl_ind->priority
+				    < list_entry_rcu(dl_ind_iterator->list.next,
+				    typeof(*dl_ind_iterator), list)->priority) {
+					list_add_rcu(&dl_ind->list,
+						     &dl_ind_iterator->list);
+					break;
+				}
+			} else {
+				list_add_rcu(&dl_ind->list,
+					     &dl_ind_iterator->list);
+				break;
+			}
+		} else {
+			list_add_tail_rcu(&dl_ind->list,
+					  &dl_ind_iterator->list);
+			break;
+		}
+	}
+
+	if (empty_ind_list)
+		list_add_rcu(&dl_ind->list, &port->dl_list);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 8c13eed..e7b25ad 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -1044,11 +1044,6 @@
 	return rc;
 }
 
-struct rmnet_agg_work {
-	struct work_struct work;
-	struct rmnet_port *port;
-};
-
 long rmnet_agg_time_limit __read_mostly = 1000000L;
 long rmnet_agg_bypass_time __read_mostly = 10000000L;
 
@@ -1082,22 +1077,17 @@
 
 static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
 {
-	struct rmnet_agg_work *real_work;
+	struct sk_buff *skb = NULL;
 	struct rmnet_port *port;
 	unsigned long flags;
-	struct sk_buff *skb;
-	int agg_count = 0;
 
-	real_work = (struct rmnet_agg_work *)work;
-	port = real_work->port;
-	skb = NULL;
+	port = container_of(work, struct rmnet_port, agg_wq);
 
 	spin_lock_irqsave(&port->agg_lock, flags);
 	if (likely(port->agg_state == -EINPROGRESS)) {
 		/* Buffer may have already been shipped out */
 		if (likely(port->agg_skb)) {
 			skb = port->agg_skb;
-			agg_count = port->agg_count;
 			port->agg_skb = NULL;
 			port->agg_count = 0;
 			memset(&port->agg_time, 0, sizeof(struct timespec));
@@ -1108,27 +1098,15 @@
 	spin_unlock_irqrestore(&port->agg_lock, flags);
 	if (skb)
 		dev_queue_xmit(skb);
-
-	kfree(work);
 }
 
 enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
 {
-	struct rmnet_agg_work *work;
 	struct rmnet_port *port;
 
 	port = container_of(t, struct rmnet_port, hrtimer);
 
-	work = kmalloc(sizeof(*work), GFP_ATOMIC);
-	if (!work) {
-		port->agg_state = 0;
-
-		return HRTIMER_NORESTART;
-	}
-
-	INIT_WORK(&work->work, rmnet_map_flush_tx_packet_work);
-	work->port = port;
-	schedule_work((struct work_struct *)work);
+	schedule_work(&port->agg_wq);
 	return HRTIMER_NORESTART;
 }
 
@@ -1150,15 +1128,16 @@
 		 * sparse, don't aggregate. We will need to tune this later
 		 */
 		diff = timespec_sub(port->agg_last, last);
+		size = port->egress_agg_size - skb->len;
 
-		if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time) {
+		if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time ||
+		    size <= 0) {
 			spin_unlock_irqrestore(&port->agg_lock, flags);
 			skb->protocol = htons(ETH_P_MAP);
 			dev_queue_xmit(skb);
 			return;
 		}
 
-		size = port->egress_agg_size - skb->len;
 		port->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
 		if (!port->agg_skb) {
 			port->agg_skb = 0;
@@ -1213,6 +1192,8 @@
 	port->egress_agg_size = 8192;
 	port->egress_agg_count = 20;
 	spin_lock_init(&port->agg_lock);
+
+	INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
 }
 
 void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
@@ -1220,6 +1201,8 @@
 	unsigned long flags;
 
 	hrtimer_cancel(&port->hrtimer);
+	cancel_work_sync(&port->agg_wq);
+
 	spin_lock_irqsave(&port->agg_lock, flags);
 	if (port->agg_state == -EINPROGRESS) {
 		if (port->agg_skb) {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 2c4c825..e6bba00 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -16,6 +16,7 @@
 #include "rmnet_vnd.h"
 
 #include <soc/qcom/qmi_rmnet.h>
+#include <soc/qcom/rmnet_qmi.h>
 #include <trace/events/rmnet.h>
 
 /* RX/TX Fixup */
@@ -65,6 +66,7 @@
 		trace_rmnet_xmit_skb(skb);
 		rmnet_egress_handler(skb);
 		qmi_rmnet_burst_fc_check(dev, ip_type, mark, len);
+		qmi_rmnet_work_maybe_restart(rmnet_get_rmnet_port(dev));
 	} else {
 		this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
 		kfree_skb(skb);
@@ -219,8 +221,6 @@
 	"DL header pkts received",
 	"DL header total bytes received",
 	"DL header total pkts received",
-	"DL header average bytes",
-	"DL header average packets",
 	"DL trailer last seen sequence",
 	"DL trailer pkts received",
 };
@@ -311,10 +311,6 @@
 
 	rmnet_dev->needs_free_netdev = true;
 	rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
-
-	/* This perm addr will be used as interface identifier by IPv6 */
-	rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
-	eth_random_addr(rmnet_dev->perm_addr);
 }
 
 /* Exposed API */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 8441c86..5f092bb 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -459,7 +459,7 @@
 		   RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
 
 	/* Set FIFO size */
-	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
+	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
 
 	/* Timestamp enable */
 	ravb_write(ndev, TCCR_TFEN, TCCR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 7b92336..3b174ea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1342,8 +1342,10 @@
 	}
 
 	ret = phy_power_on(bsp_priv, true);
-	if (ret)
+	if (ret) {
+		gmac_clk_enable(bsp_priv, false);
 		return ret;
+	}
 
 	pm_runtime_enable(dev);
 	pm_runtime_get_sync(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 9caf79b..4d5fb4b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -719,8 +719,11 @@
 {
 	unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
 
-	if (!clk)
-		return 0;
+	if (!clk) {
+		clk = priv->plat->clk_ref_rate;
+		if (!clk)
+			return 0;
+	}
 
 	return (usec * (clk / 1000000)) / 256;
 }
@@ -729,8 +732,11 @@
 {
 	unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
 
-	if (!clk)
-		return 0;
+	if (!clk) {
+		clk = priv->plat->clk_ref_rate;
+		if (!clk)
+			return 0;
+	}
 
 	return (riwt * 256) / (clk / 1000000);
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 123b74e..43ab9e9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3028,10 +3028,22 @@
 
 	tx_q = &priv->tx_queue[queue];
 
+	if (priv->tx_path_in_lpi_mode)
+		stmmac_disable_eee_mode(priv);
+
 	/* Manage oversized TCP frames for GMAC4 device */
 	if (skb_is_gso(skb) && priv->tso) {
-		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+			/*
+			 * There is no way to determine the number of TSO
+			 * capable Queues. Let's use always the Queue 0
+			 * because if TSO is supported then at least this
+			 * one will be capable.
+			 */
+			skb_set_queue_mapping(skb, 0);
+
 			return stmmac_tso_xmit(skb, dev);
+		}
 	}
 
 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -3046,9 +3058,6 @@
 		return NETDEV_TX_BUSY;
 	}
 
-	if (priv->tx_path_in_lpi_mode)
-		stmmac_disable_eee_mode(priv);
-
 	entry = tx_q->cur_tx;
 	first_entry = entry;
 	WARN_ON(tx_q->tx_skbuff[first_entry]);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 01711e6..e1427b5 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -636,15 +636,20 @@
 static int geneve_open(struct net_device *dev)
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
-	bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6);
 	bool metadata = geneve->collect_md;
+	bool ipv4, ipv6;
 	int ret = 0;
 
+	ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata;
+	ipv4 = !ipv6 || metadata;
 #if IS_ENABLED(CONFIG_IPV6)
-	if (ipv6 || metadata)
+	if (ipv6) {
 		ret = geneve_sock_add(geneve, true);
+		if (ret < 0 && ret != -EAFNOSUPPORT)
+			ipv4 = false;
+	}
 #endif
-	if (!ret && (!ipv6 || metadata))
+	if (ipv4)
 		ret = geneve_sock_add(geneve, false);
 	if (ret < 0)
 		geneve_sock_release(geneve);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index a32ded5..42d2846 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -144,6 +144,8 @@
 	u32 total_data_buflen;
 };
 
+#define NETVSC_HASH_KEYLEN 40
+
 struct netvsc_device_info {
 	unsigned char mac_adr[ETH_ALEN];
 	u32  num_chn;
@@ -151,6 +153,8 @@
 	u32  recv_sections;
 	u32  send_section_size;
 	u32  recv_section_size;
+
+	u8 rss_key[NETVSC_HASH_KEYLEN];
 };
 
 enum rndis_device_state {
@@ -160,8 +164,6 @@
 	RNDIS_DEV_DATAINITIALIZED,
 };
 
-#define NETVSC_HASH_KEYLEN 40
-
 struct rndis_device {
 	struct net_device *ndev;
 
@@ -210,7 +212,9 @@
 void netvsc_channel_cb(void *context);
 int netvsc_poll(struct napi_struct *napi, int budget);
 
-int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
+int rndis_set_subchannel(struct net_device *ndev,
+			 struct netvsc_device *nvdev,
+			 struct netvsc_device_info *dev_info);
 int rndis_filter_open(struct netvsc_device *nvdev);
 int rndis_filter_close(struct netvsc_device *nvdev);
 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index fe01e14..1a942fe 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -84,7 +84,7 @@
 
 	rdev = nvdev->extension;
 	if (rdev) {
-		ret = rndis_set_subchannel(rdev->ndev, nvdev);
+		ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
 		if (ret == 0) {
 			netif_device_attach(rdev->ndev);
 		} else {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 1c37a82..c832040 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -743,6 +743,14 @@
 	schedule_delayed_work(&ndev_ctx->dwork, 0);
 }
 
+static void netvsc_comp_ipcsum(struct sk_buff *skb)
+{
+	struct iphdr *iph = (struct iphdr *)skb->data;
+
+	iph->check = 0;
+	iph->check = ip_fast_csum(iph, iph->ihl);
+}
+
 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
 					     struct napi_struct *napi,
 					     const struct ndis_tcp_ip_checksum_info *csum_info,
@@ -766,9 +774,17 @@
 	/* skb is already created with CHECKSUM_NONE */
 	skb_checksum_none_assert(skb);
 
-	/*
-	 * In Linux, the IP checksum is always checked.
-	 * Do L4 checksum offload if enabled and present.
+	/* Incoming packets may have IP header checksum verified by the host.
+	 * They may not have IP header checksum computed after coalescing.
+	 * We compute it here if the flags are set, because on Linux, the IP
+	 * checksum is always checked.
+	 */
+	if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
+	    csum_info->receive.ip_checksum_succeeded &&
+	    skb->protocol == htons(ETH_P_IP))
+		netvsc_comp_ipcsum(skb);
+
+	/* Do L4 checksum offload if enabled and present.
 	 */
 	if (csum_info && (net->features & NETIF_F_RXCSUM)) {
 		if (csum_info->receive.tcp_checksum_succeeded ||
@@ -856,6 +872,39 @@
 	}
 }
 
+/* Alloc struct netvsc_device_info, and initialize it from either existing
+ * struct netvsc_device, or from default values.
+ */
+static struct netvsc_device_info *netvsc_devinfo_get
+			(struct netvsc_device *nvdev)
+{
+	struct netvsc_device_info *dev_info;
+
+	dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
+
+	if (!dev_info)
+		return NULL;
+
+	if (nvdev) {
+		dev_info->num_chn = nvdev->num_chn;
+		dev_info->send_sections = nvdev->send_section_cnt;
+		dev_info->send_section_size = nvdev->send_section_size;
+		dev_info->recv_sections = nvdev->recv_section_cnt;
+		dev_info->recv_section_size = nvdev->recv_section_size;
+
+		memcpy(dev_info->rss_key, nvdev->extension->rss_key,
+		       NETVSC_HASH_KEYLEN);
+	} else {
+		dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
+		dev_info->send_sections = NETVSC_DEFAULT_TX;
+		dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
+		dev_info->recv_sections = NETVSC_DEFAULT_RX;
+		dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
+	}
+
+	return dev_info;
+}
+
 static int netvsc_detach(struct net_device *ndev,
 			 struct netvsc_device *nvdev)
 {
@@ -907,7 +956,7 @@
 		return PTR_ERR(nvdev);
 
 	if (nvdev->num_chn > 1) {
-		ret = rndis_set_subchannel(ndev, nvdev);
+		ret = rndis_set_subchannel(ndev, nvdev, dev_info);
 
 		/* if unavailable, just proceed with one queue */
 		if (ret) {
@@ -941,7 +990,7 @@
 	struct net_device_context *net_device_ctx = netdev_priv(net);
 	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
 	unsigned int orig, count = channels->combined_count;
-	struct netvsc_device_info device_info;
+	struct netvsc_device_info *device_info;
 	int ret;
 
 	/* We do not support separate count for rx, tx, or other */
@@ -960,24 +1009,26 @@
 
 	orig = nvdev->num_chn;
 
-	memset(&device_info, 0, sizeof(device_info));
-	device_info.num_chn = count;
-	device_info.send_sections = nvdev->send_section_cnt;
-	device_info.send_section_size = nvdev->send_section_size;
-	device_info.recv_sections = nvdev->recv_section_cnt;
-	device_info.recv_section_size = nvdev->recv_section_size;
+	device_info = netvsc_devinfo_get(nvdev);
+
+	if (!device_info)
+		return -ENOMEM;
+
+	device_info->num_chn = count;
 
 	ret = netvsc_detach(net, nvdev);
 	if (ret)
-		return ret;
+		goto out;
 
-	ret = netvsc_attach(net, &device_info);
+	ret = netvsc_attach(net, device_info);
 	if (ret) {
-		device_info.num_chn = orig;
-		if (netvsc_attach(net, &device_info))
+		device_info->num_chn = orig;
+		if (netvsc_attach(net, device_info))
 			netdev_err(net, "restoring channel setting failed\n");
 	}
 
+out:
+	kfree(device_info);
 	return ret;
 }
 
@@ -1044,48 +1095,45 @@
 	struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
 	int orig_mtu = ndev->mtu;
-	struct netvsc_device_info device_info;
+	struct netvsc_device_info *device_info;
 	int ret = 0;
 
 	if (!nvdev || nvdev->destroy)
 		return -ENODEV;
 
+	device_info = netvsc_devinfo_get(nvdev);
+
+	if (!device_info)
+		return -ENOMEM;
+
 	/* Change MTU of underlying VF netdev first. */
 	if (vf_netdev) {
 		ret = dev_set_mtu(vf_netdev, mtu);
 		if (ret)
-			return ret;
+			goto out;
 	}
 
-	memset(&device_info, 0, sizeof(device_info));
-	device_info.num_chn = nvdev->num_chn;
-	device_info.send_sections = nvdev->send_section_cnt;
-	device_info.send_section_size = nvdev->send_section_size;
-	device_info.recv_sections = nvdev->recv_section_cnt;
-	device_info.recv_section_size = nvdev->recv_section_size;
-
 	ret = netvsc_detach(ndev, nvdev);
 	if (ret)
 		goto rollback_vf;
 
 	ndev->mtu = mtu;
 
-	ret = netvsc_attach(ndev, &device_info);
-	if (ret)
-		goto rollback;
+	ret = netvsc_attach(ndev, device_info);
+	if (!ret)
+		goto out;
 
-	return 0;
-
-rollback:
 	/* Attempt rollback to original MTU */
 	ndev->mtu = orig_mtu;
 
-	if (netvsc_attach(ndev, &device_info))
+	if (netvsc_attach(ndev, device_info))
 		netdev_err(ndev, "restoring mtu failed\n");
 rollback_vf:
 	if (vf_netdev)
 		dev_set_mtu(vf_netdev, orig_mtu);
 
+out:
+	kfree(device_info);
 	return ret;
 }
 
@@ -1690,7 +1738,7 @@
 {
 	struct net_device_context *ndevctx = netdev_priv(ndev);
 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
-	struct netvsc_device_info device_info;
+	struct netvsc_device_info *device_info;
 	struct ethtool_ringparam orig;
 	u32 new_tx, new_rx;
 	int ret = 0;
@@ -1710,26 +1758,29 @@
 	    new_rx == orig.rx_pending)
 		return 0;	 /* no change */
 
-	memset(&device_info, 0, sizeof(device_info));
-	device_info.num_chn = nvdev->num_chn;
-	device_info.send_sections = new_tx;
-	device_info.send_section_size = nvdev->send_section_size;
-	device_info.recv_sections = new_rx;
-	device_info.recv_section_size = nvdev->recv_section_size;
+	device_info = netvsc_devinfo_get(nvdev);
+
+	if (!device_info)
+		return -ENOMEM;
+
+	device_info->send_sections = new_tx;
+	device_info->recv_sections = new_rx;
 
 	ret = netvsc_detach(ndev, nvdev);
 	if (ret)
-		return ret;
+		goto out;
 
-	ret = netvsc_attach(ndev, &device_info);
+	ret = netvsc_attach(ndev, device_info);
 	if (ret) {
-		device_info.send_sections = orig.tx_pending;
-		device_info.recv_sections = orig.rx_pending;
+		device_info->send_sections = orig.tx_pending;
+		device_info->recv_sections = orig.rx_pending;
 
-		if (netvsc_attach(ndev, &device_info))
+		if (netvsc_attach(ndev, device_info))
 			netdev_err(ndev, "restoring ringparam failed");
 	}
 
+out:
+	kfree(device_info);
 	return ret;
 }
 
@@ -2158,7 +2209,7 @@
 {
 	struct net_device *net = NULL;
 	struct net_device_context *net_device_ctx;
-	struct netvsc_device_info device_info;
+	struct netvsc_device_info *device_info = NULL;
 	struct netvsc_device *nvdev;
 	int ret = -ENOMEM;
 
@@ -2205,21 +2256,21 @@
 	netif_set_real_num_rx_queues(net, 1);
 
 	/* Notify the netvsc driver of the new device */
-	memset(&device_info, 0, sizeof(device_info));
-	device_info.num_chn = VRSS_CHANNEL_DEFAULT;
-	device_info.send_sections = NETVSC_DEFAULT_TX;
-	device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
-	device_info.recv_sections = NETVSC_DEFAULT_RX;
-	device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
+	device_info = netvsc_devinfo_get(NULL);
 
-	nvdev = rndis_filter_device_add(dev, &device_info);
+	if (!device_info) {
+		ret = -ENOMEM;
+		goto devinfo_failed;
+	}
+
+	nvdev = rndis_filter_device_add(dev, device_info);
 	if (IS_ERR(nvdev)) {
 		ret = PTR_ERR(nvdev);
 		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
 		goto rndis_failed;
 	}
 
-	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+	memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
 
 	/* We must get rtnl lock before scheduling nvdev->subchan_work,
 	 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
@@ -2257,12 +2308,16 @@
 
 	list_add(&net_device_ctx->list, &netvsc_dev_list);
 	rtnl_unlock();
+
+	kfree(device_info);
 	return 0;
 
 register_failed:
 	rtnl_unlock();
 	rndis_filter_device_remove(dev, nvdev);
 rndis_failed:
+	kfree(device_info);
+devinfo_failed:
 	free_percpu(net_device_ctx->vf_stats);
 no_stats:
 	hv_set_drvdata(dev, NULL);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2a5209f..53c6039 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -715,8 +715,8 @@
 	return ret;
 }
 
-int rndis_filter_set_rss_param(struct rndis_device *rdev,
-			       const u8 *rss_key)
+static int rndis_set_rss_param_msg(struct rndis_device *rdev,
+				   const u8 *rss_key, u16 flag)
 {
 	struct net_device *ndev = rdev->ndev;
 	struct rndis_request *request;
@@ -745,7 +745,7 @@
 	rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
 	rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
 	rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
-	rssp->flag = 0;
+	rssp->flag = flag;
 	rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
 			 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
 			 NDIS_HASH_TCP_IPV6;
@@ -770,9 +770,12 @@
 
 	wait_for_completion(&request->wait_event);
 	set_complete = &request->response_msg.msg.set_complete;
-	if (set_complete->status == RNDIS_STATUS_SUCCESS)
-		memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
-	else {
+	if (set_complete->status == RNDIS_STATUS_SUCCESS) {
+		if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
+		    !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
+			memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
+
+	} else {
 		netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
 			   set_complete->status);
 		ret = -EINVAL;
@@ -783,6 +786,16 @@
 	return ret;
 }
 
+int rndis_filter_set_rss_param(struct rndis_device *rdev,
+			       const u8 *rss_key)
+{
+	/* Disable RSS before change */
+	rndis_set_rss_param_msg(rdev, rss_key,
+				NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
+
+	return rndis_set_rss_param_msg(rdev, rss_key, 0);
+}
+
 static int rndis_filter_query_device_link_status(struct rndis_device *dev,
 						 struct netvsc_device *net_device)
 {
@@ -1062,7 +1075,9 @@
  * This breaks overlap of processing the host message for the
  * new primary channel with the initialization of sub-channels.
  */
-int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
+int rndis_set_subchannel(struct net_device *ndev,
+			 struct netvsc_device *nvdev,
+			 struct netvsc_device_info *dev_info)
 {
 	struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
@@ -1103,7 +1118,10 @@
 		   atomic_read(&nvdev->open_chn) == nvdev->num_chn);
 
 	/* ignore failues from setting rss parameters, still have channels */
-	rndis_filter_set_rss_param(rdev, netvsc_hash_key);
+	if (dev_info)
+		rndis_filter_set_rss_param(rdev, dev_info->rss_key);
+	else
+		rndis_filter_set_rss_param(rdev, netvsc_hash_key);
 
 	netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
 	netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 5fb5418..68b8007 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -494,6 +494,8 @@
 
 	if (!data)
 		return 0;
+	if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
+		return -EPERM;
 
 	if (data[IFLA_IPVLAN_MODE]) {
 		u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
@@ -596,6 +598,8 @@
 		struct ipvl_dev *tmp = netdev_priv(phy_dev);
 
 		phy_dev = tmp->phy_dev;
+		if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
+			return -EPERM;
 	} else if (!netif_is_ipvlan_port(phy_dev)) {
 		/* Exit early if the underlying link is invalid or busy */
 		if (phy_dev->type != ARPHRD_ETHER ||
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 15c5586..c5588d4 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -380,7 +380,6 @@
 	err = device_register(&bus->dev);
 	if (err) {
 		pr_err("mii_bus %s failed to register\n", bus->id);
-		put_device(&bus->dev);
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3db06b4..05a6ae3 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -339,6 +339,17 @@
 	return genphy_config_aneg(phydev);
 }
 
+static int ksz8061_config_init(struct phy_device *phydev)
+{
+	int ret;
+
+	ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+	if (ret)
+		return ret;
+
+	return kszphy_config_init(phydev);
+}
+
 static int ksz9021_load_values_from_of(struct phy_device *phydev,
 				       const struct device_node *of_node,
 				       u16 reg,
@@ -934,7 +945,7 @@
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
-	.config_init	= kszphy_config_init,
+	.config_init	= ksz8061_config_init,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.suspend	= genphy_suspend,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 2787e8b..f6e70f2 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -348,6 +348,10 @@
 	linkmode_zero(state->lp_advertising);
 	state->interface = pl->link_config.interface;
 	state->an_enabled = pl->link_config.an_enabled;
+	state->speed = SPEED_UNKNOWN;
+	state->duplex = DUPLEX_UNKNOWN;
+	state->pause = MLO_PAUSE_NONE;
+	state->an_complete = 0;
 	state->link = 1;
 
 	return pl->ops->mac_link_state(ndev, state);
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 8f09edd..50c6055 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -532,6 +532,7 @@
 		pppox_unbind_sock(sk);
 	}
 	skb_queue_purge(&sk->sk_receive_queue);
+	dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
 }
 
 static int pptp_create(struct net *net, struct socket *sock, int kern)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 723814d..95ee9d8 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1259,7 +1259,7 @@
 	list_add_tail_rcu(&port->list, &team->port_list);
 	team_port_enable(team, port);
 	__team_compute_features(team);
-	__team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
+	__team_port_change_port_added(port, !!netif_oper_up(port_dev));
 	__team_options_change_check(team);
 
 	netdev_info(dev, "Port device %s added\n", portname);
@@ -2918,7 +2918,7 @@
 
 	switch (event) {
 	case NETDEV_UP:
-		if (netif_carrier_ok(dev))
+		if (netif_oper_up(dev))
 			team_port_change_check(port, true);
 		break;
 	case NETDEV_DOWN:
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index a5ef970..5541e1c 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -325,6 +325,20 @@
 	return 0;
 }
 
+static void lb_bpf_func_free(struct team *team)
+{
+	struct lb_priv *lb_priv = get_lb_priv(team);
+	struct bpf_prog *fp;
+
+	if (!lb_priv->ex->orig_fprog)
+		return;
+
+	__fprog_destroy(lb_priv->ex->orig_fprog);
+	fp = rcu_dereference_protected(lb_priv->fp,
+				       lockdep_is_held(&team->lock));
+	bpf_prog_destroy(fp);
+}
+
 static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
 {
 	struct lb_priv *lb_priv = get_lb_priv(team);
@@ -639,6 +653,7 @@
 
 	team_options_unregister(team, lb_options,
 				ARRAY_SIZE(lb_options));
+	lb_bpf_func_free(team);
 	cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
 	free_percpu(lb_priv->pcpu_stats);
 	kfree(lb_priv->ex);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0baade2..ee4f901 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2126,9 +2126,9 @@
 	}
 
 	add_wait_queue(&tfile->wq.wait, &wait);
-	current->state = TASK_INTERRUPTIBLE;
 
 	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
 		ptr = ptr_ring_consume(&tfile->tx_ring);
 		if (ptr)
 			break;
@@ -2144,7 +2144,7 @@
 		schedule();
 	}
 
-	current->state = TASK_RUNNING;
+	__set_current_state(TASK_RUNNING);
 	remove_wait_queue(&tfile->wq.wait, &wait);
 
 out:
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index b654f05..3d93993 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -739,8 +739,13 @@
 	asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
 	chipcode &= AX_CHIPCODE_MASK;
 
-	(chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
-					    ax88772a_hw_reset(dev, 0);
+	ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
+						  ax88772a_hw_reset(dev, 0);
+
+	if (ret < 0) {
+		netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret);
+		return ret;
+	}
 
 	/* Read PHYID register *AFTER* the PHY was reset properly */
 	phyid = asix_get_phyid(dev);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 735ad83..6e38135 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -976,6 +976,13 @@
 					      0xff),
 		.driver_info	    = (unsigned long)&qmi_wwan_info_quirk_dtr,
 	},
+	{	/* Quectel EG12/EM12 */
+		USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
+					      USB_CLASS_VENDOR_SPEC,
+					      USB_SUBCLASS_VENDOR_SPEC,
+					      0xff),
+		.driver_info	    = (unsigned long)&qmi_wwan_info_quirk_dtr,
+	},
 
 	/* 3. Combined interface devices matching on interface number */
 	{QMI_FIXED_INTF(0x0408, 0xea42, 4)},	/* Yota / Megafon M100-1 */
@@ -1343,17 +1350,20 @@
 	return false;
 }
 
-static bool quectel_ep06_diag_detected(struct usb_interface *intf)
+static bool quectel_diag_detected(struct usb_interface *intf)
 {
 	struct usb_device *dev = interface_to_usbdev(intf);
 	struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
+	u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
+	u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
 
-	if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
-	    le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
-	    intf_desc.bNumEndpoints == 2)
+	if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
+		return false;
+
+	if (id_product == 0x0306 || id_product == 0x0512)
 		return true;
-
-	return false;
+	else
+		return false;
 }
 
 static int qmi_wwan_probe(struct usb_interface *intf,
@@ -1390,13 +1400,13 @@
 		return -ENODEV;
 	}
 
-	/* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
+	/* Several Quectel modems supports dynamic interface configuration, so
 	 * we need to match on class/subclass/protocol. These values are
 	 * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
 	 * different. Ignore the current interface if the number of endpoints
 	 * the number for the diag interface (two).
 	 */
-	if (quectel_ep06_diag_detected(intf))
+	if (quectel_diag_detected(intf))
 		return -ENODEV;
 
 	return usbnet_probe(intf, id);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 9fc9aed6..52387f7 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1469,6 +1469,14 @@
 		goto drop;
 	}
 
+	rcu_read_lock();
+
+	if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
+		rcu_read_unlock();
+		atomic_long_inc(&vxlan->dev->rx_dropped);
+		goto drop;
+	}
+
 	stats = this_cpu_ptr(vxlan->dev->tstats);
 	u64_stats_update_begin(&stats->syncp);
 	stats->rx_packets++;
@@ -1476,6 +1484,9 @@
 	u64_stats_update_end(&stats->syncp);
 
 	gro_cells_receive(&vxlan->gro_cells, skb);
+
+	rcu_read_unlock();
+
 	return 0;
 
 drop:
@@ -2460,6 +2471,8 @@
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 
+	gro_cells_destroy(&vxlan->gro_cells);
+
 	vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
 
 	free_percpu(dev->tstats);
@@ -3526,7 +3539,6 @@
 
 	vxlan_flush(vxlan, true);
 
-	gro_cells_destroy(&vxlan->gro_cells);
 	list_del(&vxlan->next);
 	unregister_netdevice_queue(dev, head);
 }
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index c070a9e..fae572b 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -636,15 +636,15 @@
 		ret = ath9k_eeprom_request(sc, eeprom_name);
 		if (ret)
 			return ret;
+
+		ah->ah_flags &= ~AH_USE_EEPROM;
+		ah->ah_flags |= AH_NO_EEP_SWAP;
 	}
 
 	mac = of_get_mac_address(np);
 	if (mac)
 		ether_addr_copy(common->macaddr, mac);
 
-	ah->ah_flags &= ~AH_USE_EEPROM;
-	ah->ah_flags |= AH_NO_EEP_SWAP;
-
 	return 0;
 }
 
diff --git a/drivers/net/wireless/cnss2/Makefile b/drivers/net/wireless/cnss2/Makefile
index 4859eb2..3d6b813 100644
--- a/drivers/net/wireless/cnss2/Makefile
+++ b/drivers/net/wireless/cnss2/Makefile
@@ -7,4 +7,5 @@
 cnss2-y += debug.o
 cnss2-y += pci.o
 cnss2-y += power.o
+cnss2-$(CONFIG_CNSS2_DEBUG) += genl.o
 cnss2-$(CONFIG_CNSS2_QMI) += qmi.o wlan_firmware_service_v01.o coexistence_service_v01.o
diff --git a/drivers/net/wireless/cnss2/bus.c b/drivers/net/wireless/cnss2/bus.c
index f808ca1..99ce869 100644
--- a/drivers/net/wireless/cnss2/bus.c
+++ b/drivers/net/wireless/cnss2/bus.c
@@ -124,6 +124,37 @@
 	}
 }
 
+int cnss_bus_alloc_qdss_mem(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_alloc_qdss_mem(plat_priv->bus_priv);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
+
+void cnss_bus_free_qdss_mem(struct cnss_plat_data *plat_priv)
+{
+	if (!plat_priv)
+		return;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		cnss_pci_free_qdss_mem(plat_priv->bus_priv);
+		return;
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return;
+	}
+}
+
 u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv)
 {
 	if (!plat_priv)
diff --git a/drivers/net/wireless/cnss2/bus.h b/drivers/net/wireless/cnss2/bus.h
index ad5cb1b..710f92f 100644
--- a/drivers/net/wireless/cnss2/bus.h
+++ b/drivers/net/wireless/cnss2/bus.h
@@ -24,6 +24,8 @@
 void cnss_bus_deinit(struct cnss_plat_data *plat_priv);
 int cnss_bus_load_m3(struct cnss_plat_data *plat_priv);
 int cnss_bus_alloc_fw_mem(struct cnss_plat_data *plat_priv);
+int cnss_bus_alloc_qdss_mem(struct cnss_plat_data *plat_priv);
+void cnss_bus_free_qdss_mem(struct cnss_plat_data *plat_priv);
 u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv);
 int cnss_bus_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv);
 void cnss_bus_fw_boot_timeout_hdlr(struct timer_list *t);
diff --git a/drivers/net/wireless/cnss2/genl.c b/drivers/net/wireless/cnss2/genl.c
new file mode 100644
index 0000000..5a7fb1f5
--- /dev/null
+++ b/drivers/net/wireless/cnss2/genl.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#define pr_fmt(fmt) "cnss_genl: " fmt
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "main.h"
+#include "debug.h"
+
+#define CNSS_GENL_FAMILY_NAME "cnss-genl"
+#define CNSS_GENL_MCAST_GROUP_NAME "cnss-genl-grp"
+#define CNSS_GENL_VERSION 1
+#define CNSS_GENL_DATA_LEN_MAX (15 * 1024)
+#define CNSS_GENL_STR_LEN_MAX 16
+
+enum {
+	CNSS_GENL_ATTR_MSG_UNSPEC,
+	CNSS_GENL_ATTR_MSG_TYPE,
+	CNSS_GENL_ATTR_MSG_FILE_NAME,
+	CNSS_GENL_ATTR_MSG_TOTAL_SIZE,
+	CNSS_GENL_ATTR_MSG_SEG_ID,
+	CNSS_GENL_ATTR_MSG_END,
+	CNSS_GENL_ATTR_MSG_DATA_LEN,
+	CNSS_GENL_ATTR_MSG_DATA,
+	__CNSS_GENL_ATTR_MAX,
+};
+
+#define CNSS_GENL_ATTR_MAX (__CNSS_GENL_ATTR_MAX - 1)
+
+enum {
+	CNSS_GENL_CMD_UNSPEC,
+	CNSS_GENL_CMD_MSG,
+	__CNSS_GENL_CMD_MAX,
+};
+
+#define CNSS_GENL_CMD_MAX (__CNSS_GENL_CMD_MAX - 1)
+
+static struct nla_policy cnss_genl_msg_policy[CNSS_GENL_ATTR_MAX + 1] = {
+	[CNSS_GENL_ATTR_MSG_TYPE] = { .type = NLA_U8 },
+	[CNSS_GENL_ATTR_MSG_FILE_NAME] = { .type = NLA_NUL_STRING,
+					   .len = CNSS_GENL_STR_LEN_MAX },
+	[CNSS_GENL_ATTR_MSG_TOTAL_SIZE] = { .type = NLA_U32 },
+	[CNSS_GENL_ATTR_MSG_SEG_ID] = { .type = NLA_U32 },
+	[CNSS_GENL_ATTR_MSG_END] = { .type = NLA_U8 },
+	[CNSS_GENL_ATTR_MSG_DATA_LEN] = { .type = NLA_U32 },
+	[CNSS_GENL_ATTR_MSG_DATA] = { .type = NLA_BINARY,
+				      .len = CNSS_GENL_DATA_LEN_MAX },
+};
+
+static int cnss_genl_process_msg(struct sk_buff *skb, struct genl_info *info)
+{
+	return 0;
+}
+
+static struct genl_ops cnss_genl_ops[] = {
+	{
+		.cmd = CNSS_GENL_CMD_MSG,
+		.policy = cnss_genl_msg_policy,
+		.doit = cnss_genl_process_msg,
+	},
+};
+
+static struct genl_multicast_group cnss_genl_mcast_grp[] = {
+	{
+		.name = CNSS_GENL_MCAST_GROUP_NAME,
+	},
+};
+
+static struct genl_family cnss_genl_family = {
+	.id = 0,
+	.hdrsize = 0,
+	.name = CNSS_GENL_FAMILY_NAME,
+	.version = CNSS_GENL_VERSION,
+	.maxattr = CNSS_GENL_ATTR_MAX,
+	.module = THIS_MODULE,
+	.ops = cnss_genl_ops,
+	.n_ops = ARRAY_SIZE(cnss_genl_ops),
+	.mcgrps = cnss_genl_mcast_grp,
+	.n_mcgrps = ARRAY_SIZE(cnss_genl_mcast_grp),
+};
+
+static int cnss_genl_send_data(u8 type, char *file_name, u32 total_size,
+			       u32 seg_id, u8 end, u32 data_len, u8 *msg_buff)
+{
+	struct sk_buff *skb = NULL;
+	void *msg_header = NULL;
+	int ret = 0;
+	char filename[CNSS_GENL_STR_LEN_MAX + 1];
+
+	cnss_pr_dbg("type: %u, file_name %s, total_size: %x, seg_id %u, end %u, data_len %u\n",
+		    type, file_name, total_size, seg_id, end, data_len);
+
+	if (!file_name)
+		strlcpy(filename, "default", sizeof(filename));
+	else
+		strlcpy(filename, file_name, sizeof(filename));
+
+	skb = genlmsg_new(NLMSG_HDRLEN +
+			  nla_total_size(sizeof(type)) +
+			  nla_total_size(strlen(filename) + 1) +
+			  nla_total_size(sizeof(total_size)) +
+			  nla_total_size(sizeof(seg_id)) +
+			  nla_total_size(sizeof(end)) +
+			  nla_total_size(sizeof(data_len)) +
+			  nla_total_size(data_len), GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	msg_header = genlmsg_put(skb, 0, 0,
+				 &cnss_genl_family, 0,
+				 CNSS_GENL_CMD_MSG);
+	if (!msg_header) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = nla_put_u8(skb, CNSS_GENL_ATTR_MSG_TYPE, type);
+	if (ret < 0)
+		goto fail;
+	ret = nla_put_string(skb, CNSS_GENL_ATTR_MSG_FILE_NAME, filename);
+	if (ret < 0)
+		goto fail;
+	ret = nla_put_u32(skb, CNSS_GENL_ATTR_MSG_TOTAL_SIZE, total_size);
+	if (ret < 0)
+		goto fail;
+	ret = nla_put_u32(skb, CNSS_GENL_ATTR_MSG_SEG_ID, seg_id);
+	if (ret < 0)
+		goto fail;
+	ret = nla_put_u8(skb, CNSS_GENL_ATTR_MSG_END, end);
+	if (ret < 0)
+		goto fail;
+	ret = nla_put_u32(skb, CNSS_GENL_ATTR_MSG_DATA_LEN, data_len);
+	if (ret < 0)
+		goto fail;
+	ret = nla_put(skb, CNSS_GENL_ATTR_MSG_DATA, data_len, msg_buff);
+	if (ret < 0)
+		goto fail;
+
+	genlmsg_end(skb, msg_header);
+	ret = genlmsg_multicast(&cnss_genl_family, skb, 0, 0, GFP_KERNEL);
+	if (ret < 0)
+		cnss_pr_err("Fail to send genl msg: %d\n", ret);
+
+	return ret;
+fail:
+	cnss_pr_err("Fail to generate genl msg: %d\n", ret);
+	if (skb)
+		nlmsg_free(skb);
+	return ret;
+}
+
+int cnss_genl_send_msg(void *buff, u8 type, char *file_name, u32 total_size)
+{
+	int ret = 0;
+	u8 *msg_buff = buff;
+	u32 remaining = total_size;
+	u32 seg_id = 0;
+	u32 data_len = 0;
+	u8 end = 0;
+
+	cnss_pr_dbg("type: %u, total_size: %x\n", type, total_size);
+
+	while (remaining) {
+		if (remaining > CNSS_GENL_DATA_LEN_MAX) {
+			data_len = CNSS_GENL_DATA_LEN_MAX;
+		} else {
+			data_len = remaining;
+			end = 1;
+		}
+		ret = cnss_genl_send_data(type, file_name, total_size,
+					  seg_id, end, data_len, msg_buff);
+		if (ret < 0) {
+			cnss_pr_err("fail to send genl data, ret %d\n", ret);
+			return ret;
+		}
+
+		remaining -= data_len;
+		msg_buff += data_len;
+		seg_id++;
+	}
+
+	return ret;
+}
+
+int cnss_genl_init(void)
+{
+	int ret = 0;
+
+	ret = genl_register_family(&cnss_genl_family);
+	if (ret != 0)
+		cnss_pr_err("genl_register_family fail: %d\n", ret);
+
+	return ret;
+}
+
+void cnss_genl_exit(void)
+{
+	genl_unregister_family(&cnss_genl_family);
+}
diff --git a/drivers/net/wireless/cnss2/genl.h b/drivers/net/wireless/cnss2/genl.h
new file mode 100644
index 0000000..33ca30a
--- /dev/null
+++ b/drivers/net/wireless/cnss2/genl.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#ifndef __CNSS_GENL_H__
+#define __CNSS_GENL_H__
+
+enum cnss_genl_msg_type {
+	CNSS_GENL_MSG_TYPE_UNSPEC,
+	CNSS_GENL_MSG_TYPE_QDSS,
+};
+
+#ifdef CONFIG_CNSS2_DEBUG
+int cnss_genl_init(void);
+void cnss_genl_exit(void);
+int cnss_genl_send_msg(void *buff, u8 type,
+		       char *file_name, u32 total_size);
+#else
+static inline int cnss_genl_init(void)
+{
+	return 0;
+}
+
+static inline void cnss_genl_exit(void)
+{
+}
+
+static inline int cnss_genl_send_msg(void *buff, u8 type,
+				     char *file_name, u32 total_size)
+{
+	return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index c6c66f2..898d59a 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -16,6 +16,7 @@
 #include "main.h"
 #include "bus.h"
 #include "debug.h"
+#include "genl.h"
 
 #define CNSS_DUMP_FORMAT_VER		0x11
 #define CNSS_DUMP_FORMAT_VER_V2		0x22
@@ -484,6 +485,12 @@
 		return "POWER_UP";
 	case CNSS_DRIVER_EVENT_POWER_DOWN:
 		return "POWER_DOWN";
+	case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
+		return "QDSS_TRACE_REQ_MEM";
+	case CNSS_DRIVER_EVENT_QDSS_TRACE_SAVE:
+		return "QDSS_TRACE_SAVE";
+	case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
+		return "QDSS_TRACE_FREE";
 	case CNSS_DRIVER_EVENT_MAX:
 		return "EVENT_MAX";
 	}
@@ -1137,6 +1144,109 @@
 	return 0;
 }
 
+static int cnss_qdss_trace_req_mem_hdlr(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+
+	ret = cnss_bus_alloc_qdss_mem(plat_priv);
+	if (ret < 0)
+		return ret;
+
+	return cnss_wlfw_qdss_trace_mem_info_send_sync(plat_priv);
+}
+
+static void *cnss_qdss_trace_pa_to_va(struct cnss_plat_data *plat_priv,
+				      u64 pa, u32 size, int *seg_id)
+{
+	int i = 0;
+	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+	u64 offset = 0;
+	void *va = NULL;
+	u64 local_pa;
+	u32 local_size;
+
+	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+		local_pa = (u64)qdss_mem[i].pa;
+		local_size = (u32)qdss_mem[i].size;
+		if (pa == local_pa && size <= local_size) {
+			va = qdss_mem[i].va;
+			break;
+		}
+		if (pa > local_pa &&
+		    pa < local_pa + local_size &&
+		    pa + size <= local_pa + local_size) {
+			offset = pa - local_pa;
+			va = qdss_mem[i].va + offset;
+			break;
+		}
+	}
+
+	*seg_id = i;
+	return va;
+}
+
+static int cnss_qdss_trace_save_hdlr(struct cnss_plat_data *plat_priv,
+				     void *data)
+{
+	struct cnss_qmi_event_qdss_trace_save_data *event_data = data;
+	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+	int ret = 0;
+	int i;
+	void *va = NULL;
+	u64 pa;
+	u32 size;
+	int seg_id = 0;
+
+	if (!plat_priv->qdss_mem_seg_len) {
+		cnss_pr_err("Memory for QDSS trace is not available\n");
+		return -ENOMEM;
+	}
+
+	if (event_data->mem_seg_len == 0) {
+		for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+			ret = cnss_genl_send_msg(qdss_mem[i].va,
+						 CNSS_GENL_MSG_TYPE_QDSS,
+						 event_data->file_name,
+						 qdss_mem[i].size);
+			if (ret < 0) {
+				cnss_pr_err("Fail to save QDSS data: %d\n",
+					    ret);
+				break;
+			}
+		}
+	} else {
+		for (i = 0; i < event_data->mem_seg_len; i++) {
+			pa = event_data->mem_seg[i].addr;
+			size = event_data->mem_seg[i].size;
+			va = cnss_qdss_trace_pa_to_va(plat_priv, pa,
+						      size, &seg_id);
+			if (!va) {
+				cnss_pr_err("Fail to find matching va for pa %pa\n",
+					    pa);
+				ret = -EINVAL;
+				break;
+			}
+			ret = cnss_genl_send_msg(va, CNSS_GENL_MSG_TYPE_QDSS,
+						 event_data->file_name, size);
+			if (ret < 0) {
+				cnss_pr_err("Fail to save QDSS data: %d\n",
+					    ret);
+				break;
+			}
+		}
+	}
+
+	kfree(data);
+	return ret;
+}
+
+static int cnss_qdss_trace_free_hdlr(struct cnss_plat_data *plat_priv)
+{
+	cnss_bus_free_qdss_mem(plat_priv);
+
+	return 0;
+}
+
 static void cnss_driver_event_work(struct work_struct *work)
 {
 	struct cnss_plat_data *plat_priv =
@@ -1210,6 +1320,16 @@
 		case CNSS_DRIVER_EVENT_POWER_DOWN:
 			ret = cnss_power_down_hdlr(plat_priv);
 			break;
+		case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
+			ret = cnss_qdss_trace_req_mem_hdlr(plat_priv);
+			break;
+		case CNSS_DRIVER_EVENT_QDSS_TRACE_SAVE:
+			ret = cnss_qdss_trace_save_hdlr(plat_priv,
+							event->data);
+			break;
+		case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
+			ret = cnss_qdss_trace_free_hdlr(plat_priv);
+			break;
 		default:
 			cnss_pr_err("Invalid driver event type: %d",
 				    event->type);
@@ -1746,6 +1866,10 @@
 
 	cnss_register_coex_service(plat_priv);
 
+	ret = cnss_genl_init();
+	if (ret < 0)
+		cnss_pr_err("CNSS genl init failed %d\n", ret);
+
 	cnss_pr_info("Platform driver probed successfully.\n");
 
 	return 0;
@@ -1781,6 +1905,7 @@
 {
 	struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
 
+	cnss_genl_exit();
 	cnss_unregister_coex_service(plat_priv);
 	cnss_misc_deinit(plat_priv);
 	cnss_debugfs_destroy(plat_priv);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index ac87b7d..2756d55 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -161,6 +161,9 @@
 	CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
 	CNSS_DRIVER_EVENT_POWER_UP,
 	CNSS_DRIVER_EVENT_POWER_DOWN,
+	CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM,
+	CNSS_DRIVER_EVENT_QDSS_TRACE_SAVE,
+	CNSS_DRIVER_EVENT_QDSS_TRACE_FREE,
 	CNSS_DRIVER_EVENT_MAX,
 };
 
@@ -274,6 +277,9 @@
 	u32 fw_mem_seg_len;
 	struct cnss_fw_mem fw_mem[QMI_WLFW_MAX_NUM_MEM_SEG];
 	struct cnss_fw_mem m3_mem;
+	u32 qdss_mem_seg_len;
+	struct cnss_fw_mem qdss_mem[QMI_WLFW_MAX_NUM_MEM_SEG];
+	u32 *qdss_reg;
 	struct cnss_pin_connect_result pin_result;
 	struct dentry *root_dentry;
 	atomic_t pm_count;
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 582aae0..b09a32e 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -88,6 +88,13 @@
 
 #define QCA6390_CE_REG_INTERVAL			0x2000
 
+#define QDSS_APB_DEC_CSR_BASE			0x1C01000
+
+#define QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET	0x6C
+#define QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET	0x70
+#define QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET	0x74
+#define QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET	0x78
+
 #define MAX_UNWINDOWED_ADDRESS			0x80000
 #define WINDOW_ENABLE_BIT			0x40000000
 #define WINDOW_SHIFT				19
@@ -128,6 +135,14 @@
 	{ NULL },
 };
 
+static struct cnss_pci_reg qdss_csr[] = {
+	{ "QDSSCSR_ETRIRQCTRL", QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET },
+	{ "QDSSCSR_PRESERVEETF", QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET },
+	{ "QDSSCSR_PRESERVEETR0", QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET },
+	{ "QDSSCSR_PRESERVEETR1", QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET },
+	{ NULL },
+};
+
 static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset)
 {
 	u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
@@ -605,7 +620,7 @@
 
 	ret = cnss_pci_start_mhi(pci_priv);
 	if (ret) {
-		cnss_pr_err("Failed to start MHI, err = %d\n", ret);
+		cnss_fatal_err("Failed to start MHI, err = %d\n", ret);
 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
 		    !pci_priv->pci_link_down_ind && timeout)
 			mod_timer(&plat_priv->fw_boot_timer,
@@ -669,6 +684,8 @@
 
 	cnss_power_off_device(plat_priv);
 
+	pci_priv->remap_window = 0;
+
 	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
 	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
@@ -1493,6 +1510,63 @@
 	return 0;
 }
 
+int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+	int i, j;
+
+	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+		if (!qdss_mem[i].va && qdss_mem[i].size) {
+			qdss_mem[i].va =
+				dma_alloc_coherent(&pci_priv->pci_dev->dev,
+						   qdss_mem[i].size,
+						   &qdss_mem[i].pa,
+						   GFP_KERNEL);
+			if (!qdss_mem[i].va) {
+				cnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
+					    qdss_mem[i].size,
+					    qdss_mem[i].type, i);
+				break;
+			}
+		}
+	}
+
+	/* Best-effort allocation for QDSS trace */
+	if (i < plat_priv->qdss_mem_seg_len) {
+		for (j = i; j < plat_priv->qdss_mem_seg_len; j++) {
+			qdss_mem[j].type = 0;
+			qdss_mem[j].size = 0;
+		}
+		plat_priv->qdss_mem_seg_len = i;
+	}
+
+	return 0;
+}
+
+void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+	int i;
+
+	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+		if (qdss_mem[i].va && qdss_mem[i].size) {
+			cnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
+				    &qdss_mem[i].pa, qdss_mem[i].size,
+				    qdss_mem[i].type);
+			dma_free_coherent(&pci_priv->pci_dev->dev,
+					  qdss_mem[i].size, qdss_mem[i].va,
+					  qdss_mem[i].pa);
+			qdss_mem[i].va = NULL;
+			qdss_mem[i].pa = 0;
+			qdss_mem[i].size = 0;
+			qdss_mem[i].type = 0;
+		}
+	}
+	plat_priv->qdss_mem_seg_len = 0;
+}
+
 static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
@@ -1584,7 +1658,7 @@
 
 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
 	if (ret) {
-		cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
+		cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
 				       CNSS_REASON_DEFAULT);
 		return ret;
@@ -1978,6 +2052,30 @@
 	}
 };
 
+static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	int i, array_size = ARRAY_SIZE(qdss_csr) - 1;
+	gfp_t gfp = GFP_KERNEL;
+	u32 reg_offset;
+
+	if (in_interrupt() || irqs_disabled())
+		gfp = GFP_ATOMIC;
+
+	if (!plat_priv->qdss_reg)
+		plat_priv->qdss_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
+						   sizeof(*plat_priv->qdss_reg)
+						   * array_size, gfp);
+
+	for (i = 0; qdss_csr[i].name; i++) {
+		reg_offset = QDSS_APB_DEC_CSR_BASE + qdss_csr[i].offset;
+		plat_priv->qdss_reg[i] = cnss_pci_reg_read(pci_priv,
+							   reg_offset);
+		cnss_pr_dbg("%s[0x%x] = 0x%x\n", qdss_csr[i].name, reg_offset,
+			    plat_priv->qdss_reg[i]);
+	}
+}
+
 static void cnss_pci_dump_ce_reg(struct cnss_pci_data *pci_priv,
 				 enum cnss_ce_index ce)
 {
@@ -2047,9 +2145,12 @@
 		return;
 	}
 
+	cnss_pci_dump_qdss_reg(pci_priv);
+
 	ret = mhi_download_rddm_img(pci_priv->mhi_ctrl, in_panic);
 	if (ret) {
-		cnss_pr_err("Failed to download RDDM image, err = %d\n", ret);
+		cnss_fatal_err("Failed to download RDDM image, err = %d\n",
+			       ret);
 		cnss_pci_dump_registers(pci_priv);
 		return;
 	}
@@ -2687,6 +2788,7 @@
 
 	cnss_pci_free_m3_mem(pci_priv);
 	cnss_pci_free_fw_mem(pci_priv);
+	cnss_pci_free_qdss_mem(pci_priv);
 
 	switch (pci_dev->device) {
 	case QCA6290_DEVICE_ID:
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 43e42d1..ed28e86 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -123,6 +123,8 @@
 int cnss_pci_init(struct cnss_plat_data *plat_priv);
 void cnss_pci_deinit(struct cnss_plat_data *plat_priv);
 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv);
+int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv);
+void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv);
 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv);
 int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
 			   enum cnss_mhi_state state);
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index dc60e9e..e21f182 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -13,6 +13,7 @@
 #define WLFW_SERVICE_INS_ID_V01		1
 #define WLFW_CLIENT_ID			0x4b4e454c
 #define MAX_BDF_FILE_NAME		13
+#define BDF_FILE_NAME_PREFIX		"bdwlan"
 #define ELF_BDF_FILE_NAME		"bdwlan.elf"
 #define ELF_BDF_FILE_NAME_PREFIX	"bdwlan.e"
 #define BIN_BDF_FILE_NAME		"bdwlan.bin"
@@ -84,6 +85,12 @@
 	req->pin_connect_result_enable = 1;
 	req->cal_done_enable_valid = 1;
 	req->cal_done_enable = 1;
+	req->qdss_trace_req_mem_enable_valid = 1;
+	req->qdss_trace_req_mem_enable = 1;
+	req->qdss_trace_save_enable_valid = 1;
+	req->qdss_trace_save_enable = 1;
+	req->qdss_trace_free_enable_valid = 1;
+	req->qdss_trace_free_enable = 1;
 
 	ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
 			   wlfw_ind_register_resp_msg_v01_ei, resp);
@@ -436,8 +443,9 @@
 				 plat_priv->board_info.board_id);
 		else
 			snprintf(filename, sizeof(filename),
-				 ELF_BDF_FILE_NAME_PREFIX "%04x",
-				 plat_priv->board_info.board_id);
+				 BDF_FILE_NAME_PREFIX "%02x.e%02x",
+				 plat_priv->board_info.board_id >> 8 & 0xFF,
+				 plat_priv->board_info.board_id & 0xFF);
 		break;
 	case CNSS_BDF_BIN:
 		if (plat_priv->board_info.board_id == 0xFF)
@@ -448,8 +456,9 @@
 				 plat_priv->board_info.board_id);
 		else
 			snprintf(filename, sizeof(filename),
-				 BIN_BDF_FILE_NAME_PREFIX "%04x",
-				 plat_priv->board_info.board_id);
+				 BDF_FILE_NAME_PREFIX "%02x.b%02x",
+				 plat_priv->board_info.board_id >> 8 & 0xFF,
+				 plat_priv->board_info.board_id & 0xFF);
 		break;
 	case CNSS_BDF_REGDB:
 		snprintf(filename, sizeof(filename), REGDB_FILE_NAME);
@@ -1201,6 +1210,82 @@
 	return ret;
 }
 
+int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv)
+{
+	struct wlfw_qdss_trace_mem_info_req_msg_v01 *req;
+	struct wlfw_qdss_trace_mem_info_resp_msg_v01 *resp;
+	struct qmi_txn txn;
+	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+	int ret = 0;
+	int i;
+
+	cnss_pr_dbg("Sending QDSS trace mem info, state: 0x%lx\n",
+		    plat_priv->driver_state);
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->mem_seg_len = plat_priv->qdss_mem_seg_len;
+	for (i = 0; i < req->mem_seg_len; i++) {
+		cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
+			    qdss_mem[i].va, &qdss_mem[i].pa,
+			    qdss_mem[i].size, qdss_mem[i].type);
+
+		req->mem_seg[i].addr = qdss_mem[i].pa;
+		req->mem_seg[i].size = qdss_mem[i].size;
+		req->mem_seg[i].type = qdss_mem[i].type;
+	}
+
+	ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+			   wlfw_qdss_trace_mem_info_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		cnss_pr_err("Fail to initialize txn for QDSS trace mem request: err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+			       QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01,
+			       WLFW_QDSS_TRACE_MEM_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+			       wlfw_qdss_trace_mem_info_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		cnss_pr_err("Fail to send QDSS trace mem info request: err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+	if (ret < 0) {
+		cnss_pr_err("Fail to wait for response of QDSS trace mem info request, err %d\n",
+			    ret);
+		goto out;
+	}
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("QDSS trace mem info request failed, result: %d, err: %d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+		goto out;
+	}
+
+	kfree(req);
+	kfree(resp);
+	return 0;
+
+out:
+	kfree(req);
+	kfree(resp);
+	return ret;
+}
+
 unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv)
 {
 	cnss_pr_dbg("QMI timeout is %u ms\n", QMI_WLFW_TIMEOUT_MS);
@@ -1336,6 +1421,118 @@
 			       0, NULL);
 }
 
+static void cnss_wlfw_qdss_trace_req_mem_ind_cb(struct qmi_handle *qmi_wlfw,
+						struct sockaddr_qrtr *sq,
+						struct qmi_txn *txn,
+						const void *data)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+	const struct wlfw_qdss_trace_req_mem_ind_msg_v01 *ind_msg = data;
+	int i;
+
+	cnss_pr_dbg("Received QMI WLFW QDSS trace request mem indication\n");
+
+	if (!txn) {
+		cnss_pr_err("Spurious indication\n");
+		return;
+	}
+
+	if (plat_priv->qdss_mem_seg_len) {
+		cnss_pr_err("Ignore double allocation for QDSS trace, current len %u\n",
+			    plat_priv->qdss_mem_seg_len);
+		return;
+	}
+
+	plat_priv->qdss_mem_seg_len = ind_msg->mem_seg_len;
+	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+		cnss_pr_dbg("QDSS requests for memory, size: 0x%zx, type: %u\n",
+			    ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
+		plat_priv->qdss_mem[i].type = ind_msg->mem_seg[i].type;
+		plat_priv->qdss_mem[i].size = ind_msg->mem_seg[i].size;
+	}
+
+	cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM,
+			       0, NULL);
+}
+
+static void cnss_wlfw_qdss_trace_save_ind_cb(struct qmi_handle *qmi_wlfw,
+					     struct sockaddr_qrtr *sq,
+					     struct qmi_txn *txn,
+					     const void *data)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+	const struct wlfw_qdss_trace_save_ind_msg_v01 *ind_msg = data;
+	struct cnss_qmi_event_qdss_trace_save_data *event_data;
+	int i = 0;
+
+	cnss_pr_dbg("Received QMI WLFW QDSS trace save indication\n");
+
+	if (!txn) {
+		cnss_pr_err("Spurious indication\n");
+		return;
+	}
+
+	cnss_pr_dbg("QDSS_trace_save info: source %u, total_size %u, file_name_valid %u, file_name %s\n",
+		    ind_msg->source, ind_msg->total_size,
+		    ind_msg->file_name_valid, ind_msg->file_name);
+
+	if (ind_msg->source == 1)
+		return;
+
+	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
+	if (!event_data)
+		return;
+
+	if (ind_msg->mem_seg_valid) {
+		if (ind_msg->mem_seg_len > QDSS_TRACE_SEG_LEN_MAX) {
+			cnss_pr_err("Invalid seg len %u\n",
+				    ind_msg->mem_seg_len);
+			goto free_event_data;
+		}
+		cnss_pr_dbg("QDSS_trace_save seg len %u\n",
+			    ind_msg->mem_seg_len);
+		event_data->mem_seg_len = ind_msg->mem_seg_len;
+		for (i = 0; i < ind_msg->mem_seg_len; i++) {
+			event_data->mem_seg[i].addr = ind_msg->mem_seg[i].addr;
+			event_data->mem_seg[i].size = ind_msg->mem_seg[i].size;
+			cnss_pr_dbg("seg-%d: addr 0x%llx size 0x%x\n",
+				    i, ind_msg->mem_seg[i].addr,
+				    ind_msg->mem_seg[i].size);
+		}
+	}
+
+	event_data->total_size = ind_msg->total_size;
+
+	if (ind_msg->file_name_valid)
+		strlcpy(event_data->file_name, ind_msg->file_name,
+			QDSS_TRACE_FILE_NAME_MAX + 1);
+	else
+		strlcpy(event_data->file_name, "qdss_trace",
+			QDSS_TRACE_FILE_NAME_MAX + 1);
+
+	cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_SAVE,
+			       0, event_data);
+
+	return;
+
+free_event_data:
+	kfree(event_data);
+}
+
+static void cnss_wlfw_qdss_trace_free_ind_cb(struct qmi_handle *qmi_wlfw,
+					     struct sockaddr_qrtr *sq,
+					     struct qmi_txn *txn,
+					     const void *data)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+
+	cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_FREE,
+			       0, NULL);
+}
+
 static struct qmi_msg_handler qmi_wlfw_msg_handlers[] = {
 	{
 		.type = QMI_INDICATION,
@@ -1380,6 +1577,30 @@
 		.decoded_size = sizeof(struct wlfw_cal_done_ind_msg_v01),
 		.fn = cnss_wlfw_cal_done_ind_cb
 	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_WLFW_QDSS_TRACE_REQ_MEM_IND_V01,
+		.ei = wlfw_qdss_trace_req_mem_ind_msg_v01_ei,
+		.decoded_size =
+		sizeof(struct wlfw_qdss_trace_req_mem_ind_msg_v01),
+		.fn = cnss_wlfw_qdss_trace_req_mem_ind_cb
+	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_WLFW_QDSS_TRACE_SAVE_IND_V01,
+		.ei = wlfw_qdss_trace_save_ind_msg_v01_ei,
+		.decoded_size =
+		sizeof(struct wlfw_qdss_trace_save_ind_msg_v01),
+		.fn = cnss_wlfw_qdss_trace_save_ind_cb
+	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_WLFW_QDSS_TRACE_FREE_IND_V01,
+		.ei = wlfw_qdss_trace_free_ind_msg_v01_ei,
+		.decoded_size =
+		sizeof(struct wlfw_qdss_trace_free_ind_msg_v01),
+		.fn = cnss_wlfw_qdss_trace_free_ind_cb
+	},
 	{}
 };
 
diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h
index 137d549..784aadc 100644
--- a/drivers/net/wireless/cnss2/qmi.h
+++ b/drivers/net/wireless/cnss2/qmi.h
@@ -8,15 +8,30 @@
 
 struct cnss_plat_data;
 
-#ifdef CONFIG_CNSS2_QMI
-#include "wlan_firmware_service_v01.h"
-#include "coexistence_service_v01.h"
-
 struct cnss_qmi_event_server_arrive_data {
 	unsigned int node;
 	unsigned int port;
 };
 
+#define QDSS_TRACE_SEG_LEN_MAX 32
+#define QDSS_TRACE_FILE_NAME_MAX 16
+
+struct cnss_mem_seg {
+	u64 addr;
+	u32 size;
+};
+
+struct cnss_qmi_event_qdss_trace_save_data {
+	u32 total_size;
+	u32 mem_seg_len;
+	struct cnss_mem_seg mem_seg[QDSS_TRACE_SEG_LEN_MAX];
+	char file_name[QDSS_TRACE_FILE_NAME_MAX + 1];
+};
+
+#ifdef CONFIG_CNSS2_QMI
+#include "wlan_firmware_service_v01.h"
+#include "coexistence_service_v01.h"
+
 int cnss_qmi_init(struct cnss_plat_data *plat_priv);
 void cnss_qmi_deinit(struct cnss_plat_data *plat_priv);
 unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv);
@@ -46,7 +61,7 @@
 void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv);
 int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv);
 int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv);
-
+int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv);
 #else
 #define QMI_WLFW_TIMEOUT_MS		10000
 
@@ -165,6 +180,9 @@
 
 static inline
 int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv)
+
+static inline
+int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv)
 {
 	return 0;
 }
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 750bea3..627df16 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -164,6 +164,12 @@
 	}
 
 	sdio_claim_host(func);
+	/*
+	 * To guarantee that the SDIO card is power cycled, as required to make
+	 * the FW programming to succeed, let's do a brute force HW reset.
+	 */
+	mmc_hw_reset(card->host);
+
 	sdio_enable_func(func);
 	sdio_release_host(func);
 
@@ -174,20 +180,13 @@
 {
 	struct sdio_func *func = dev_to_sdio_func(glue->dev);
 	struct mmc_card *card = func->card;
-	int error;
 
 	sdio_claim_host(func);
 	sdio_disable_func(func);
 	sdio_release_host(func);
 
 	/* Let runtime PM know the card is powered off */
-	error = pm_runtime_put(&card->dev);
-	if (error < 0 && error != -EBUSY) {
-		dev_err(&card->dev, "%s failed: %i\n", __func__, error);
-
-		return error;
-	}
-
+	pm_runtime_put(&card->dev);
 	return 0;
 }
 
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 0ccb021..10d580c 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -454,6 +454,8 @@
 	if (xenvif_hash_cache_size == 0)
 		return;
 
+	BUG_ON(vif->hash.cache.count);
+
 	spin_lock_init(&vif->hash.cache.lock);
 	INIT_LIST_HEAD(&vif->hash.cache.list);
 }
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f6ae23f..82add0a 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -153,6 +153,13 @@
 {
 	struct xenvif *vif = netdev_priv(dev);
 	unsigned int size = vif->hash.size;
+	unsigned int num_queues;
+
+	/* If queues are not set up internally - always return 0
+	 * as the packet going to be dropped anyway */
+	num_queues = READ_ONCE(vif->num_queues);
+	if (num_queues < 1)
+		return 0;
 
 	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
 		return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 3621e05..d5081ff 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1072,11 +1072,6 @@
 		skb_frag_size_set(&frags[i], len);
 	}
 
-	/* Copied all the bits from the frag list -- free it. */
-	skb_frag_list_init(skb);
-	xenvif_skb_zerocopy_prepare(queue, nskb);
-	kfree_skb(nskb);
-
 	/* Release all the original (foreign) frags. */
 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
 		skb_frag_unref(skb, f);
@@ -1145,6 +1140,8 @@
 		xenvif_fill_frags(queue, skb);
 
 		if (unlikely(skb_has_frag_list(skb))) {
+			struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+			xenvif_skb_zerocopy_prepare(queue, nskb);
 			if (xenvif_handle_frag_list(queue, skb)) {
 				if (net_ratelimit())
 					netdev_err(queue->vif->dev,
@@ -1153,6 +1150,9 @@
 				kfree_skb(skb);
 				continue;
 			}
+			/* Copied all the bits from the frag list -- free it. */
+			skb_frag_list_init(skb);
+			kfree_skb(nskb);
 		}
 
 		skb->dev      = queue->vif->dev;
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 354bece..2db1bd1 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -1295,7 +1295,7 @@
 	gpio_free(platform_data->clkreq_gpio);
 err_ese_gpio:
 	/* optional gpio, not sure was configured in probe */
-	if (nqx_dev->ese_gpio > 0)
+	if (gpio_is_valid(platform_data->ese_gpio))
 		gpio_free(platform_data->ese_gpio);
 err_firm_gpio:
 	gpio_free(platform_data->firm_gpio);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index e0d2b74..2cdb303 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1182,6 +1182,7 @@
 	 * effects say only one namespace is affected.
 	 */
 	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+		mutex_lock(&ctrl->scan_lock);
 		nvme_start_freeze(ctrl);
 		nvme_wait_freeze(ctrl);
 	}
@@ -1210,8 +1211,10 @@
 	 */
 	if (effects & NVME_CMD_EFFECTS_LBCC)
 		nvme_update_formats(ctrl);
-	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK))
+	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
 		nvme_unfreeze(ctrl);
+		mutex_unlock(&ctrl->scan_lock);
+	}
 	if (effects & NVME_CMD_EFFECTS_CCC)
 		nvme_init_identify(ctrl);
 	if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
@@ -3292,6 +3295,7 @@
 	if (nvme_identify_ctrl(ctrl, &id))
 		return;
 
+	mutex_lock(&ctrl->scan_lock);
 	nn = le32_to_cpu(id->nn);
 	if (ctrl->vs >= NVME_VS(1, 1, 0) &&
 	    !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -3300,6 +3304,7 @@
 	}
 	nvme_scan_ns_sequential(ctrl, nn);
 out_free_id:
+	mutex_unlock(&ctrl->scan_lock);
 	kfree(id);
 	down_write(&ctrl->namespaces_rwsem);
 	list_sort(NULL, &ctrl->namespaces, ns_cmp);
@@ -3535,6 +3540,7 @@
 
 	ctrl->state = NVME_CTRL_NEW;
 	spin_lock_init(&ctrl->lock);
+	mutex_init(&ctrl->scan_lock);
 	INIT_LIST_HEAD(&ctrl->namespaces);
 	init_rwsem(&ctrl->namespaces_rwsem);
 	ctrl->dev = dev;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 815509d..da8f5ad 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -531,8 +531,7 @@
 	timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
 	ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
 		ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
-	if (!(ctrl->anacap & (1 << 6)))
-		ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
+	ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
 
 	if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
 		dev_err(ctrl->device,
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 60220de..e82cdae 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -148,6 +148,7 @@
 	enum nvme_ctrl_state state;
 	bool identified;
 	spinlock_t lock;
+	struct mutex scan_lock;
 	const struct nvme_ctrl_ops *ops;
 	struct request_queue *admin_q;
 	struct request_queue *connect_q;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f46313f..7b9ef8e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2260,6 +2260,27 @@
 	if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
 		nvme_dev_disable(dev, false);
 
+	mutex_lock(&dev->shutdown_lock);
+	result = nvme_pci_enable(dev);
+	if (result)
+		goto out_unlock;
+
+	result = nvme_pci_configure_admin_queue(dev);
+	if (result)
+		goto out_unlock;
+
+	result = nvme_alloc_admin_tags(dev);
+	if (result)
+		goto out_unlock;
+
+	/*
+	 * Limit the max command size to prevent iod->sg allocations going
+	 * over a single page.
+	 */
+	dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
+	dev->ctrl.max_segments = NVME_MAX_SEGS;
+	mutex_unlock(&dev->shutdown_lock);
+
 	/*
 	 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
 	 * initializing procedure here.
@@ -2270,25 +2291,6 @@
 		goto out;
 	}
 
-	result = nvme_pci_enable(dev);
-	if (result)
-		goto out;
-
-	result = nvme_pci_configure_admin_queue(dev);
-	if (result)
-		goto out;
-
-	result = nvme_alloc_admin_tags(dev);
-	if (result)
-		goto out;
-
-	/*
-	 * Limit the max command size to prevent iod->sg allocations going
-	 * over a single page.
-	 */
-	dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
-	dev->ctrl.max_segments = NVME_MAX_SEGS;
-
 	result = nvme_init_identify(&dev->ctrl);
 	if (result)
 		goto out;
@@ -2352,6 +2354,8 @@
 	nvme_start_ctrl(&dev->ctrl);
 	return;
 
+ out_unlock:
+	mutex_unlock(&dev->shutdown_lock);
  out:
 	nvme_remove_dead_ctrl(dev, result);
 }
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index b6a28de..0939a4e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1672,18 +1672,28 @@
 nvme_rdma_timeout(struct request *rq, bool reserved)
 {
 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+	struct nvme_rdma_queue *queue = req->queue;
+	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
 
-	dev_warn(req->queue->ctrl->ctrl.device,
-		 "I/O %d QID %d timeout, reset controller\n",
-		 rq->tag, nvme_rdma_queue_idx(req->queue));
+	dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
+		 rq->tag, nvme_rdma_queue_idx(queue));
 
-	/* queue error recovery */
-	nvme_rdma_error_recovery(req->queue->ctrl);
+	if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
+		/*
+		 * Teardown immediately if controller times out while starting
+		 * or we are already started error recovery. all outstanding
+		 * requests are completed on shutdown, so we return BLK_EH_DONE.
+		 */
+		flush_work(&ctrl->err_work);
+		nvme_rdma_teardown_io_queues(ctrl, false);
+		nvme_rdma_teardown_admin_queue(ctrl, false);
+		return BLK_EH_DONE;
+	}
 
-	/* fail with DNR on cmd timeout */
-	nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+	dev_warn(ctrl->ctrl.device, "starting error recovery\n");
+	nvme_rdma_error_recovery(ctrl);
 
-	return BLK_EH_DONE;
+	return BLK_EH_RESET_TIMER;
 }
 
 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 2653abf..e12f274 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -361,32 +361,36 @@
 		map += out_size;
 		map_len -= out_size;
 	}
-	if (match) {
-		/* Get the irqdomain-map-pass-thru property (optional) */
-		pass = of_get_property(cur, pass_name, NULL);
-		if (!pass)
-			pass = dummy_pass;
 
-		/*
-		 * Successfully parsed a irqdomain-map translation; copy new
-		 * specifier into the out structure, keeping the
-		 * bits specified in irqdomain-map-pass-thru.
-		 */
-		match_array = map - out_size;
-		for (i = 0; i < out_size; i++) {
-			__be32 val = *(map - out_size + i);
-
-			out->param[i] = in->param[i];
-			if (i < in_size) {
-				val &= ~pass[i];
-				val |= cpu_to_be32(out->param[i]) & pass[i];
-			}
-
-			out->param[i] = be32_to_cpu(val);
-		}
-		out->param_count = in_size = out_size;
-		out->fwnode = of_node_to_fwnode(new);
+	if (!match) {
+		ret = -EINVAL;
+		goto put;
 	}
+
+	/* Get the irqdomain-map-pass-thru property (optional) */
+	pass = of_get_property(cur, pass_name, NULL);
+	if (!pass)
+		pass = dummy_pass;
+
+	/*
+	 * Successfully parsed a irqdomain-map translation; copy new
+	 * specifier into the out structure, keeping the
+	 * bits specified in irqdomain-map-pass-thru.
+	 */
+	match_array = map - out_size;
+	for (i = 0; i < out_size; i++) {
+		__be32 val = *(map - out_size + i);
+
+		out->param[i] = in->param[i];
+		if (i < in_size) {
+			val &= ~pass[i];
+			val |= cpu_to_be32(out->param[i]) & pass[i];
+		}
+
+		out->param[i] = be32_to_cpu(val);
+	}
+	out->param_count = in_size = out_size;
+	out->fwnode = of_node_to_fwnode(new);
 put:
 	of_node_put(cur);
 	of_node_put(new);
diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c
index d25a75e..c1d3850 100644
--- a/drivers/pci/controller/pci-msm.c
+++ b/drivers/pci/controller/pci-msm.c
@@ -551,6 +551,7 @@
 	uint32_t wr_halt_size;
 	uint32_t slv_addr_space_size;
 	uint32_t phy_status_offset;
+	uint32_t phy_status_bit;
 	uint32_t phy_power_down_offset;
 	uint32_t cpl_timeout;
 	uint32_t current_bdf;
@@ -1197,6 +1198,8 @@
 		dev->slv_addr_space_size);
 	PCIE_DBG_FS(dev, "phy_status_offset: 0x%x\n",
 		dev->phy_status_offset);
+	PCIE_DBG_FS(dev, "phy_status_bit: %u\n",
+		dev->phy_status_bit);
 	PCIE_DBG_FS(dev, "phy_power_down_offset: 0x%x\n",
 		dev->phy_power_down_offset);
 	PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
@@ -3217,7 +3220,8 @@
 	if (dev->rumi)
 		return true;
 
-	if (readl_relaxed(dev->phy + dev->phy_status_offset) & BIT(6))
+	if (readl_relaxed(dev->phy + dev->phy_status_offset) &
+		BIT(dev->phy_status_bit))
 		return false;
 	else
 		return true;
@@ -5717,6 +5721,11 @@
 	PCIE_DBG(pcie_dev, "RC%d: phy-status-offset: 0x%x.\n", pcie_dev->rc_idx,
 		pcie_dev->phy_status_offset);
 
+	of_property_read_u32(pdev->dev.of_node, "qcom,phy-status-bit",
+				&pcie_dev->phy_status_bit);
+	PCIE_DBG(pcie_dev, "RC%d: phy-status-bit: %u.\n", pcie_dev->rc_idx,
+		pcie_dev->phy_status_bit);
+
 	of_property_read_u32(of_node, "qcom,phy-power-down-offset",
 				&pcie_dev->phy_power_down_offset);
 	PCIE_DBG(pcie_dev, "RC%d: phy-power-down-offset: 0x%x.\n",
diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c
index 6fd6e07..09a77e5 100644
--- a/drivers/phy/qualcomm/phy-ath79-usb.c
+++ b/drivers/phy/qualcomm/phy-ath79-usb.c
@@ -31,7 +31,7 @@
 
 	err = reset_control_deassert(priv->reset);
 	if (err && priv->no_suspend_override)
-		reset_control_assert(priv->no_suspend_override);
+		reset_control_deassert(priv->no_suspend_override);
 
 	return err;
 }
@@ -69,7 +69,7 @@
 	if (!priv)
 		return -ENOMEM;
 
-	priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy");
+	priv->reset = devm_reset_control_get(&pdev->dev, "phy");
 	if (IS_ERR(priv->reset))
 		return PTR_ERR(priv->reset);
 
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index cf73a40..cecbce2 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -832,8 +832,13 @@
 		break;
 
 	case MCP_TYPE_S18:
+		one_regmap_config =
+			devm_kmemdup(dev, &mcp23x17_regmap,
+				sizeof(struct regmap_config), GFP_KERNEL);
+		if (!one_regmap_config)
+			return -ENOMEM;
 		mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
-					       &mcp23x17_regmap);
+					       one_regmap_config);
 		mcp->reg_shift = 1;
 		mcp->chip.ngpio = 16;
 		mcp->chip.label = "mcp23s18";
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 539c1ab..a5105aa 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -143,6 +143,15 @@
 	  the MHI device without AP involvement, with the exception of
 	  power management.
 
+config IPA3_MHI_PRIME_MANAGER
+	tristate "IPA3_MHI Prime Manager driver"
+	depends on IPA3
+	help
+	  This driver functionality is to setup MHI Prime channels between Host and
+	  modem and enable the ability for MHI Prime communication.
+	  Once the configuration is done modem will communicate directly with
+	  the Host without AP involvement for tethering data offload.
+
 config IPA_UT
 	tristate "IPA Unit-Test Framework and Test Suites"
 	depends on IPA3 && DEBUG_FS
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 140e3b6..0f86ca0 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -201,6 +201,11 @@
 	__stringify(IPA_CLIENT_WIGIG4_CONS),
 	__stringify(RESERVERD_PROD_94),
 	__stringify(IPA_CLIENT_APPS_WAN_COAL_CONS),
+	__stringify(IPA_CLIENT_MHI_PRIME_RMNET_PROD),
+	__stringify(IPA_CLIENT_MHI_PRIME_RMNET_CONS),
+	__stringify(IPA_CLIENT_MHI_PRIME_TETH_PROD),
+	__stringify(IPA_CLIENT_MHI_PRIME_TETH_CONS),
+	__stringify(IPA_CLIENT_MHI_PRIME_DPL_PROD),
 };
 
 /**
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 4246f1a..b3e28e6 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/mutex.h>
@@ -1910,6 +1910,15 @@
 		goto connect_dl_fail;
 	}
 
+	/* MHIP pipe enablement */
+	if (ipa3_is_mhip_offload_enabled()) {
+		result = ipa_mpm_mhip_xdci_pipe_enable(params->teth_prot);
+		if (result) {
+			IPA_USB_ERR("failed to connect MHIP channel\n");
+			goto connect_dl_fail;
+		}
+	}
+
 	/* Connect tethering protocol */
 	result = ipa3_usb_connect_teth_prot(params->teth_prot);
 	if (result) {
@@ -2403,6 +2412,14 @@
 		if (orig_state != IPA_USB_SUSPENDED) {
 			spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
 				flags);
+			/* Stop UL MHIP channel */
+			if (ipa3_is_mhip_offload_enabled()) {
+				result = ipa_mpm_mhip_ul_data_stop(teth_prot);
+				if (result) {
+					IPA_USB_ERR("fail UL MHIPData stop\n");
+					goto bad_params;
+				}
+			}
 			/* Stop UL channel */
 			result = ipa3_xdci_disconnect(ul_clnt_hdl,
 				true,
@@ -2418,10 +2435,28 @@
 	} else
 		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
 
+	if (teth_prot == IPA_USB_RMNET) {
+		IPA_USB_DBG("USB suspend resetting dma mode\n");
+		result = ipa_mpm_reset_dma_mode(IPA_CLIENT_USB_PROD,
+			IPA_CLIENT_MHI_PRIME_RMNET_CONS);
+		if (result) {
+			IPA_USB_ERR("failed to reset dma mode\n");
+			goto bad_params;
+		}
+	}
+
 	result = ipa_usb_xdci_dismiss_channels(ul_clnt_hdl, dl_clnt_hdl,
 			teth_prot);
 	if (result)
 		goto bad_params;
+	/* Stop UL/DL MHIP channels */
+	if (ipa3_is_mhip_offload_enabled()) {
+		result = ipa_mpm_mhip_xdci_pipe_disable(teth_prot);
+		if (result) {
+			IPA_USB_ERR("failed to disconnect MHIP channel\n");
+			goto bad_params;
+		}
+	}
 
 	/* Disconnect tethering protocol */
 	result = ipa3_usb_disconnect_teth_prot(teth_prot);
@@ -2684,6 +2719,16 @@
 		goto bad_params;
 	}
 
+	if (teth_prot == IPA_USB_RMNET) {
+		IPA_USB_DBG("USB suspend resetting dma mode\n");
+		result = ipa_mpm_reset_dma_mode(IPA_CLIENT_USB_PROD,
+			IPA_CLIENT_MHI_PRIME_RMNET_CONS);
+		if (result) {
+			IPA_USB_ERR("failed to reset dma mode\n");
+			goto bad_params;
+		}
+	}
+
 	/* Stop UL channel & suspend DL/DPL EP */
 	result = ipa3_xdci_suspend(ul_clnt_hdl, dl_clnt_hdl,
 		true,
@@ -2732,7 +2777,14 @@
 			&ipa3_usb_notify_remote_wakeup_work);
 	}
 	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
+	/* Stop MHIP channel */
+	if (ipa3_is_mhip_offload_enabled()) {
+		result = ipa_mpm_mhip_xdci_pipe_disable(teth_prot);
+		if (result) {
+			IPA_USB_ERR("failed to disconnect MHIP channel\n");
+			goto release_prod_fail;
+		}
+	}
 	IPA_USB_DBG_LOW("exit\n");
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
 	return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index 7d07e03..3fbd673 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -14,6 +14,8 @@
 
 obj-$(CONFIG_IPA3_MHI_PROXY) += ipa_mhi_proxy.o
 
+obj-$(CONFIG_IPA3_MHI_PRIME_MANAGER) += ipa_mpm.o
+
 ipat-$(CONFIG_IPA3_REGDUMP) += dump/ipa_reg_dump.o
 
 ccflags-$(CONFIG_IPA3_REGDUMP) += -Idrivers/platform/msm/ipa/ipa_v3/dump
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h
index 9d8e8a2..7392102 100644
--- a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h
@@ -8,21 +8,17 @@
 /* VLVL defs are available for 854 */
 #define FEATURE_VLVL_DEFS                            true
 
-/* IPAv4 version flag for Sdx24 */
-#define FEATURE_IPA_HW_VERSION_4_0                   true
+#define FEATURE_IPA_HW_VERSION_4_5                   true
 
 /* Important Platform Specific Values : IRQ_NUM, IRQ_CNT, BCR */
-#define IPA_HW_BAM_IRQ_NUM                           440
+#define IPA_HW_BAM_IRQ_NUM                           639
 
 /* Q6 IRQ number for IPA. */
-#define IPA_HW_IRQ_NUM                               441
+#define IPA_HW_IRQ_NUM                               640
 
 /* Total number of different interrupts that can be enabled */
 #define IPA_HW_IRQ_CNT_TOTAL                         23
 
-/* IPAv4 BCR value */
-#define IPA_HW_BCR_REG_VAL                           0x00000039
-
 /* IPAv4 spare reg value */
 #define IPA_HW_SPARE_1_REG_VAL                       0xC0000005
 
@@ -50,6 +46,8 @@
 
 /* HPS Sequences */
 #define IPA_HW_PKT_PROCESS_HPS_DMA                      0x0
+#define IPA_HW_PKT_PROCESS_HPS_DMA_DECIPH_CIPHE         0x1
+#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_NO_DECIPH_UCP    0x2
 #define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_DECIPH_UCP       0x3
 #define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_NO_DECIPH      0x4
 #define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_DECIPH         0x5
@@ -57,35 +55,48 @@
 #define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_DECIPH_NO_UCP    0x7
 #define IPA_HW_PKT_PROCESS_HPS_DMA_PARSER               0x8
 #define IPA_HW_PKT_PROCESS_HPS_DMA_DECIPH_PARSER        0x9
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_UCP_TWICE_NO_DECIPH  0xA
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_UCP_TWICE_DECIPH     0xB
+#define IPA_HW_PKT_PROCESS_HPS_3_PKT_PRS_UCP_TWICE_NO_DECIPH  0xC
+#define IPA_HW_PKT_PROCESS_HPS_3_PKT_PRS_UCP_TWICE_DECIPH     0xD
 
 /* DPS Sequences */
 #define IPA_HW_PKT_PROCESS_DPS_DMA                      0x0
 #define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_DECIPH          0x1
 #define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_DECOMP          0x2
+#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_CIPH            0x3
 
 /* Src RSRC GRP config */
-#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_0           0x05050404
-#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_1           0x0A0A0A0A
-#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_2           0x0C0C0C0C
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_0           0x0B040803
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_1           0x0C0C0909
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_2           0x0E0E0909
 #define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_3           0x3F003F00
-#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_4           0x0E0E0E0E
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_4           0x10101616
 
-#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_0           0x00000101
-#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_1           0x00000808
-#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_2           0x00000808
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_0           0x01010101
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_1           0x02020202
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_2           0x04040404
 #define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_3           0x3F003F00
-#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_4           0x00000E0E
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_4           0x02020606
+
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_0           0x00000000
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_1           0x00000000
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_2           0x00000000
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_3           0x00003F00
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_4           0x00000000
 
 /* Dest RSRC GRP config */
-#define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_0           0x04040404
+#define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_0           0x05051010
 #define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_1           0x3F013F02
 
-#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_0           0x02020303
-#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_1           0x02000201
+#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_0           0x02020202
+#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_1           0x02010201
 
+#define IPA_HW_DST_RSRC_GRP_45_RSRC_TYPE_0           0x00000000
+#define IPA_HW_DST_RSRC_GRP_45_RSRC_TYPE_1           0x00000200
 
-#define IPA_HW_RX_HPS_CLIENTS_MIN_DEPTH_0            0x00020703
-#define IPA_HW_RX_HPS_CLIENTS_MAX_DEPTH_0            0x00020703
+#define IPA_HW_RX_HPS_CLIENTS_MIN_DEPTH_0            0x03030303
+#define IPA_HW_RX_HPS_CLIENTS_MAX_DEPTH_0            0x03030303
 
 #define IPA_HW_RSRP_GRP_0                            0x0
 #define IPA_HW_RSRP_GRP_1                            0x1
@@ -98,8 +109,11 @@
 #define IPA_HW_DDR_SRC_RSRP_GRP                      IPA_HW_RSRP_GRP_1
 #define IPA_HW_DDR_DEST_RSRP_GRP                     IPA_HW_RSRP_GRP_1
 
-#define IPA_HW_SRC_RSRP_TYPE_MAX                     0x4
-#define IPA_HW_DST_RSRP_TYPE_MAX                     0x3
+#define IPA_HW_DMA_SRC_RSRP_GRP                      IPA_HW_RSRP_GRP_2
+#define IPA_HW_DMA_DEST_RSRP_GRP                     IPA_HW_RSRP_GRP_2
+
+#define IPA_HW_SRC_RSRP_TYPE_MAX HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_MAXn
+#define IPA_HW_DST_RSRP_TYPE_MAX HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_MAXn
 
 #define GSI_HW_QSB_LOG_MISC_MAX 0x4
 
@@ -112,6 +126,9 @@
 /* Whether to allow setting step mode on IPA when we crash or not */
 #define IPA_HW_IS_STEP_MODE_ALLOWED                  (true)
 
+/* Max number of virtual pipes for UL QBAP provided by HW */
+#define IPA_HW_MAX_VP_NUM                             (32)
+
 /*
  * HW specific clock vote freq values in KHz
  * (BIMC/SNOC/PCNOC/IPA/Q6 CPU)
@@ -137,11 +154,11 @@
 	IPA_HW_CLK_FREQ_SNOC_SVS_2      = 50000,
 
 	/* IPA */
-	IPA_HW_CLK_FREQ_IPA_PEAK        = 500000,
-	IPA_HW_CLK_FREQ_IPA_NOM_PLUS    = 440000,
-	IPA_HW_CLK_FREQ_IPA_NOM         = 440000,
+	IPA_HW_CLK_FREQ_IPA_PEAK        = 600000,
+	IPA_HW_CLK_FREQ_IPA_NOM_PLUS    = 500000,
+	IPA_HW_CLK_FREQ_IPA_NOM         = 500000,
 	IPA_HW_CLK_FREQ_IPA_SVS         = 250000,
-	IPA_HW_CLK_FREQ_IPA_SVS_2       = 120000,
+	IPA_HW_CLK_FREQ_IPA_SVS_2       = 150000,
 
 	/* Q6 CPU */
 	IPA_HW_CLK_FREQ_Q6_PEAK         = 729600,
@@ -150,6 +167,12 @@
 	IPA_HW_CLK_FREQ_Q6_SVS          = 729600,
 };
 
+enum ipa_hw_qtimer_gran_e {
+	IPA_HW_QTIMER_GRAN_0 = 0, /* granularity 0 is 10us */
+	IPA_HW_QTIMER_GRAN_1 = 1, /* granularity 1 is 100us */
+	IPA_HW_QTIMER_GRAN_MAX,
+};
+
 /* Pipe ID of all the IPA pipes */
 enum ipa_hw_pipe_id_e {
 	IPA_HW_PIPE_ID_0,
@@ -175,62 +198,95 @@
 	IPA_HW_PIPE_ID_20,
 	IPA_HW_PIPE_ID_21,
 	IPA_HW_PIPE_ID_22,
+	IPA_HW_PIPE_ID_23,
+	IPA_HW_PIPE_ID_24,
+	IPA_HW_PIPE_ID_25,
+	IPA_HW_PIPE_ID_26,
+	IPA_HW_PIPE_ID_27,
+	IPA_HW_PIPE_ID_28,
+	IPA_HW_PIPE_ID_29,
+	IPA_HW_PIPE_ID_30,
 	IPA_HW_PIPE_ID_MAX
 };
 
 /* Pipe ID's of System Bam Endpoints between Q6 & IPA */
 enum ipa_hw_q6_pipe_id_e {
 	/* Pipes used by IPA Q6 driver */
-	IPA_HW_Q6_DL_CONSUMER_PIPE_ID           = IPA_HW_PIPE_ID_3,
-	IPA_HW_Q6_CTL_CONSUMER_PIPE_ID          = IPA_HW_PIPE_ID_4,
-	IPA_HW_Q6_UL_PRODUCER_PIPE_ID           = IPA_HW_PIPE_ID_13,
-	IPA_HW_Q6_DL_PRODUCER_PIPE_ID           = IPA_HW_PIPE_ID_14,
+	IPA_HW_Q6_DL_CONSUMER_PIPE_ID           = IPA_HW_PIPE_ID_5,
+	IPA_HW_Q6_CTL_CONSUMER_PIPE_ID          = IPA_HW_PIPE_ID_6,
+	IPA_HW_Q6_DL_NLO_CONSUMER_PIPE_ID       = IPA_HW_PIPE_ID_8,
 
-	IPA_HW_Q6_LTE_DL_CONSUMER_PIPE_ID       = IPA_HW_PIPE_ID_6,
-	IPA_HW_Q6_LWA_DL_PRODUCER_PIPE_ID       = IPA_HW_PIPE_ID_16,
+	IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE_ID   = IPA_HW_PIPE_ID_20,
+	IPA_HW_Q6_UL_PRODUCER_PIPE_ID           = IPA_HW_PIPE_ID_21,
+	IPA_HW_Q6_DL_PRODUCER_PIPE_ID           = IPA_HW_PIPE_ID_17,
+	IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE_ID  = IPA_HW_PIPE_ID_18,
+	IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE_ID  = IPA_HW_PIPE_ID_19,
+
+	IPA_HW_Q6_UL_ACK_PRODUCER_PIPE_ID  =
+	  IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE_ID,
+	IPA_HW_Q6_UL_DATA_PRODUCER_PIPE_ID =
+	  IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE_ID,
+
+	IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE_ID    = IPA_HW_PIPE_ID_4,
+	IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE_ID    = IPA_HW_PIPE_ID_29,
+
 	/* Test Simulator Pipes */
 	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_ID     = IPA_HW_PIPE_ID_0,
-	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_ID     = IPA_HW_PIPE_ID_12,
 	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_ID     = IPA_HW_PIPE_ID_1,
-	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_ID     = IPA_HW_PIPE_ID_10,
-	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_ID     = IPA_HW_PIPE_ID_2,
-	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_ID     = IPA_HW_PIPE_ID_11,
+
 	/* GSI UT channel SW->IPA */
-	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_ID     = IPA_HW_PIPE_ID_5,
-	/* GSI UT channel IPA->SW */
-	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_ID     = IPA_HW_PIPE_ID_17,
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_ID     = IPA_HW_PIPE_ID_3,
 	/* GSI UT channel SW->IPA */
-	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_ID     = IPA_HW_PIPE_ID_7,
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_ID     = IPA_HW_PIPE_ID_10,
+
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_ID     = IPA_HW_PIPE_ID_7,
+
 	/* GSI UT channel IPA->SW */
-	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_ID     = IPA_HW_PIPE_ID_18,
-	IPA_HW_Q6_DIAG_CONSUMER_PIPE_ID         = IPA_HW_PIPE_ID_19,
+	IPA_HW_Q6_DIAG_CONSUMER_PIPE_ID         = IPA_HW_PIPE_ID_9,
+
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_ID     = IPA_HW_PIPE_ID_23,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_ID     = IPA_HW_PIPE_ID_24,
+
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_ID     = IPA_HW_PIPE_ID_25,
+
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_ID     = IPA_HW_PIPE_ID_26,
+
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_ID     = IPA_HW_PIPE_ID_27,
 	IPA_HW_Q6_PIPE_ID_MAX                   = IPA_HW_PIPE_ID_MAX,
 };
 
 enum ipa_hw_q6_pipe_ch_id_e {
 	/* Channels used by IPA Q6 driver */
-	IPA_HW_Q6_DL_CONSUMER_PIPE_CH_ID           = 0,
-	IPA_HW_Q6_CTL_CONSUMER_PIPE_CH_ID          = 1,
-	IPA_HW_Q6_UL_PRODUCER_PIPE_CH_ID           = 3,
-	IPA_HW_Q6_DL_PRODUCER_PIPE_CH_ID           = 4,
+	IPA_HW_Q6_DL_CONSUMER_PIPE_CH_ID                = 0,
+	IPA_HW_Q6_CTL_CONSUMER_PIPE_CH_ID               = 1,
+	IPA_HW_Q6_DL_NLO_CONSUMER_PIPE_CH_ID            = 2,
+	IPA_HW_Q6_UL_ACC_PATH_ACK_PRODUCER_PIPE_CH_ID   = 6,
+	IPA_HW_Q6_UL_PRODUCER_PIPE_CH_ID                = 7,
+	IPA_HW_Q6_DL_PRODUCER_PIPE_CH_ID                = 3,
+	IPA_HW_Q6_UL_ACC_PATH_DATA_PRODUCER_PIPE_CH_ID  = 5,
+	IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE_CH_ID       = 4,
 
-	IPA_HW_Q6_LTE_DL_CONSUMER_PIPE_CH_ID       = 2,
-	IPA_HW_Q6_LWA_DL_PRODUCER_PIPE_CH_ID       = 5,
+	IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE_CH_ID         = 8,
+	IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE_CH_ID         = 9,
+	/* CH_ID 8 and 9 are Q6 SPARE CONSUMERs */
+
 	/* Test Simulator Channels */
-	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_CH_ID     = 6,
-	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_CH_ID     = 8,
-	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_CH_ID     = 9,
-	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_CH_ID     = 10,
-	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_CH_ID     = 11,
-	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_CH_ID     = 12,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_CH_ID     = 10,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_CH_ID     = 11,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_CH_ID     = 12,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_CH_ID     = 13,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_CH_ID     = 14,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_CH_ID     = 15,
 	/* GSI UT channel SW->IPA */
-	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_CH_ID     = 13,
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_CH_ID     = 16,
 	/* GSI UT channel IPA->SW */
-	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_CH_ID     = 14,
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_CH_ID     = 17,
 	/* GSI UT channel SW->IPA */
-	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_CH_ID     = 15,
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_CH_ID     = 18,
 	/* GSI UT channel IPA->SW */
-	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_CH_ID     = 16,
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_CH_ID     = 19,
 };
 
 /* System Bam Endpoints between Q6 & IPA */
@@ -243,33 +299,42 @@
 	IPA_HW_Q6_DL_CONSUMER_PIPE = 2,
 	/* CTL Pipe Q6->IPA */
 	IPA_HW_Q6_CTL_CONSUMER_PIPE = 3,
-	/*  Q6 -> IPA,  LTE DL Optimized path */
-	IPA_HW_Q6_LTE_DL_CONSUMER_PIPE = 4,
-	/* LWA DL(Wifi to Q6) */
-	IPA_HW_Q6_LWA_DL_PRODUCER_PIPE = 5,
+	/*  Q6 -> IPA,  DL NLO  */
+	IPA_HW_Q6_DL_NLO_CONSUMER_PIPE = 4,
+	/* DMA ASYNC CONSUMER */
+	IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE = 5,
+	/* DMA ASYNC PRODUCER */
+	IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE = 6,
+	/* UL Acc Path Data Pipe IPA->Q6 */
+	IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE = 7,
+	/* UL Acc Path ACK Pipe IPA->Q6 */
+	IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE = 8,
+	/* UL Acc Path QBAP status Pipe IPA->Q6 */
+	IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE = 9,
 	/* Diag status pipe IPA->Q6 */
 	/* Used only when FEATURE_IPA_TEST_PER_SIM is ON */
 	/* SIM Pipe IPA->Sim */
-	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0 = 7,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0 = 10,
 	/* SIM Pipe Sim->IPA */
-	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1 = 8,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1 = 11,
 	/* SIM Pipe Sim->IPA */
-	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2 = 9,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2 = 12,
 	/* SIM Pipe Sim->IPA */
-	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0 = 10,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0 = 13,
 	/* SIM B2B PROD Pipe  */
-	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1 = 11,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1 = 14,
 	/* SIM Pipe IPA->Sim */
-	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2 = 12,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2 = 15,
 	/* End FEATURE_IPA_TEST_PER_SIM */
 	/* GSI UT channel SW->IPA */
-	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1 = 13,
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1 = 16,
 	/* GSI UT channel IPA->SW */
-	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1 = 14,
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1 = 17,
 	/* GSI UT channel SW->IPA */
-	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2 = 15,
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2 = 18,
 	/* GSI UT channel IPA->SW */
-	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2 = 16,
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2 = 19,
+
 	IPA_HW_Q6_PIPE_TOTAL
 };
 
@@ -375,12 +440,7 @@
 /*
  * Total number of channel contexts that need to be saved for APPS
  */
-#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7          14
-
-/*
- * Total number of channel contexts that need to be saved for Q6
- */
-#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_Q6          6
+#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7          19
 
 /*
  * Total number of channel contexts that need to be saved for UC
@@ -390,12 +450,7 @@
 /*
  * Total number of event ring contexts that need to be saved for APPS
  */
-#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7         12
-
-/*
- * Total number of event ring contexts that need to be saved for Q6
- */
-#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_Q6         4
+#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7         19
 
 /*
  * Total number of event ring contexts that need to be saved for UC
@@ -413,38 +468,12 @@
  * Total number of endpoints for which ipa_reg_save.pipes[endp_number]
  * are always saved
  */
-#define IPA_HW_REG_SAVE_NUM_ACTIVE_PIPES             23
+#define IPA_HW_REG_SAVE_NUM_ACTIVE_PIPES             IPA_HW_PIPE_ID_MAX
 
 /*
- * Macro to set the active flag for all active pipe indexed register
+ * SHRAM Bytes per ch
  */
-#define IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_ACTIVE() \
-	do { \
-		ipa_reg_save.ipa.pipes[0].active = true; \
-		ipa_reg_save.ipa.pipes[1].active = true; \
-		ipa_reg_save.ipa.pipes[2].active = true; \
-		ipa_reg_save.ipa.pipes[3].active = true; \
-		ipa_reg_save.ipa.pipes[4].active = true; \
-		ipa_reg_save.ipa.pipes[5].active = true; \
-		ipa_reg_save.ipa.pipes[6].active = true; \
-		ipa_reg_save.ipa.pipes[7].active = true; \
-		ipa_reg_save.ipa.pipes[8].active = true; \
-		ipa_reg_save.ipa.pipes[9].active = true; \
-		ipa_reg_save.ipa.pipes[10].active = true; \
-		ipa_reg_save.ipa.pipes[11].active = true; \
-		ipa_reg_save.ipa.pipes[12].active = true; \
-		ipa_reg_save.ipa.pipes[13].active = true; \
-		ipa_reg_save.ipa.pipes[14].active = true; \
-		ipa_reg_save.ipa.pipes[15].active = true; \
-		ipa_reg_save.ipa.pipes[16].active = true; \
-		ipa_reg_save.ipa.pipes[17].active = true; \
-		ipa_reg_save.ipa.pipes[18].active = true; \
-		ipa_reg_save.ipa.pipes[19].active = true; \
-		ipa_reg_save.ipa.pipes[20].active = true; \
-		ipa_reg_save.ipa.pipes[21].active = true; \
-		ipa_reg_save.ipa.pipes[22].active = true; \
-	} while (0)
-
+#define IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM         12
 
 /*
  * Total number of rx splt cmdq's see:
@@ -453,6 +482,74 @@
 #define IPA_RX_SPLT_CMDQ_MAX 4
 
 /*
+ * Macro to define a particular register cfg entry for all pipe
+ * indexed register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(reg_name, var_name)	\
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.pipes[0].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.pipes[1].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.pipes[2].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.ipa.pipes[3].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 4), \
+		(u32 *)&ipa_reg_save.ipa.pipes[4].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 5), \
+		(u32 *)&ipa_reg_save.ipa.pipes[5].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 6), \
+		(u32 *)&ipa_reg_save.ipa.pipes[6].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 7), \
+		(u32 *)&ipa_reg_save.ipa.pipes[7].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 8), \
+		(u32 *)&ipa_reg_save.ipa.pipes[8].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 9), \
+		(u32 *)&ipa_reg_save.ipa.pipes[9].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 10), \
+		(u32 *)&ipa_reg_save.ipa.pipes[10].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 11), \
+		(u32 *)&ipa_reg_save.ipa.pipes[11].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 12), \
+		(u32 *)&ipa_reg_save.ipa.pipes[12].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 13), \
+		(u32 *)&ipa_reg_save.ipa.pipes[13].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 14), \
+		(u32 *)&ipa_reg_save.ipa.pipes[14].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 15), \
+		(u32 *)&ipa_reg_save.ipa.pipes[15].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 16), \
+		(u32 *)&ipa_reg_save.ipa.pipes[16].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 17), \
+		(u32 *)&ipa_reg_save.ipa.pipes[17].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 18), \
+		(u32 *)&ipa_reg_save.ipa.pipes[18].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 19), \
+		(u32 *)&ipa_reg_save.ipa.pipes[19].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 20), \
+		(u32 *)&ipa_reg_save.ipa.pipes[20].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 21), \
+		(u32 *)&ipa_reg_save.ipa.pipes[21].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 22), \
+		(u32 *)&ipa_reg_save.ipa.pipes[22].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 23), \
+		(u32 *)&ipa_reg_save.ipa.pipes[23].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 24), \
+		(u32 *)&ipa_reg_save.ipa.pipes[24].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 25), \
+		(u32 *)&ipa_reg_save.ipa.pipes[25].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 26), \
+		(u32 *)&ipa_reg_save.ipa.pipes[26].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 27), \
+		(u32 *)&ipa_reg_save.ipa.pipes[27].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 28), \
+		(u32 *)&ipa_reg_save.ipa.pipes[28].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 29), \
+		(u32 *)&ipa_reg_save.ipa.pipes[29].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 30), \
+		(u32 *)&ipa_reg_save.ipa.pipes[30].endp.var_name }
+
+/*
  * Macro to define a particular register cfg entry for the remaining
  * pipe indexed register.  In Stingray case we don't have extra
  * endpoints so it is intentially empty
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h
index 9ab8667..56b0713 100644
--- a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h
@@ -935,7 +935,7 @@
 #define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_INM(m) \
 	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
 			m)
-#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OUT(v) out_dword(	\
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OUT(v) out_dword( \
 		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
 		v)
 #define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OUTM(m, \
@@ -971,15 +971,24 @@
 		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
 		mask)
 #define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OUTI(n, val) \
-		out_dword( \
-			HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
-			val)
-#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OUTMI(n, mask, val) \
-		out_dword_masked_ns( \
-			HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
-			mask, \
-			val, \
-			HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INI(n))
+	out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OUTMI(n, mask, \
+					      val) out_dword_masked_ns(	\
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INI(n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_3_BMSK 0xff000000
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_3_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_2_BMSK 0xff0000
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_2_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_1_BMSK 0xff00
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_1_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_0_BMSK 0xff
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_0_SHFT 0x0
 #define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE + \
 					      0x00002000 + 0x4 * (n))
 #define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_PHYS(n) ( \
@@ -7404,11 +7413,47 @@
 					  0x0000038c)
 #define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
 					  0x0000038c)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_RMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_WR_IN)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_BLOCK_WR_BMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_BLOCK_WR_SHFT 0x0
 #define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000390)
 #define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
 					  0x00000390)
 #define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
 					  0x00000390)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_RMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CMDQ_CFG_RD_IN)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_BLOCK_RD_BMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_BLOCK_RD_SHFT 0x0
 #define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
 					     0x00000394)
 #define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h
index 1600f0a..306dfec 100644
--- a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h
@@ -1941,6 +1941,22 @@
 	struct ipa_hwio_def_ipa_rx_hps_cmdq_cmd_s	def;
 	u32						value;
 };
+struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_s {
+	u32	block_wr : 5;
+	u32	reserved0 : 27;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_s {
+	u32	block_rd : 5;
+	u32	reserved0 : 27;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_s	def;
+	u32						value;
+};
 struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_s {
 	u32	cmdq_packet_len_f : 16;
 	u32	cmdq_dest_len_f : 16;
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
index 0b498e0..831f9c8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
@@ -246,7 +246,12 @@
 	IPA_REG_SAVE_RX_SPLT_CMDQ(
 		IPA_RX_SPLT_CMDQ_STATUS_n, ipa_rx_splt_cmdq_status_n),
 
-
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_CFG_WR,
+				  ipa.dbg,
+				  ipa_rx_hps_cmdq_cfg_wr),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_CFG_RD,
+				  ipa.dbg,
+				  ipa_rx_hps_cmdq_cfg_rd),
 	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_CMD,
 			     ipa.dbg,
 			     ipa_rx_hps_cmdq_cmd),
@@ -731,9 +736,11 @@
  */
 void ipa_save_gsi_ver(void)
 {
+	if (!ipa3_ctx->do_register_collection_on_crash)
+		return;
+
 	ipa_reg_save.gsi.fw_ver =
-		IPA_READ_1xVECTOR_REG(IPA_GSI_TOP_GSI_INST_RAM_n, 0) &
-		0x0000FFFF;
+		IPA_READ_1xVECTOR_REG(IPA_GSI_TOP_GSI_INST_RAM_n, 0);
 }
 
 /*
@@ -775,7 +782,11 @@
 			in_dword(ipa_regs_to_save_array[i].src_addr);
 	}
 
-	IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_ACTIVE();
+	/*
+	 * Set the active flag for all active pipe indexed registers.
+	 */
+	for (i = 0; i < IPA_HW_PIPE_ID_MAX; i++)
+		ipa_reg_save.ipa.pipes[i].active = true;
 
 	/* Now save the per endp registers for the remaining pipes */
 	for (i = 0; i < (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
@@ -864,26 +875,6 @@
 				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 1);
 	}
 
-	for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_Q6; i++) {
-		u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.q6[
-			i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
-		u32 n = phys_ch_idx*IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
-
-		if (!ipa_reg_save.gsi.ch_cntxt.q6[
-				i].gsi_map_ee_n_ch_k_vp_table.valid)
-			continue;
-		ipa_reg_save.gsi.ch_cntxt.q6[
-			i].mcs_channel_scratch.scratch4.shram =
-			IPA_READ_1xVECTOR_REG(
-				GSI_SHRAM_n,
-				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 2);
-		ipa_reg_save.gsi.ch_cntxt.q6[
-			i].mcs_channel_scratch.scratch5.shram =
-			IPA_READ_1xVECTOR_REG(
-				GSI_SHRAM_n,
-				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 1);
-	}
-
 	for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC; i++) {
 		u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.uc[
 			i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
index 6a6619a..8707e9c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
@@ -86,8 +86,6 @@
 #define IPA_DEBUG_TESTBUS_DEF_EXTERNAL           50
 #define IPA_DEBUG_TESTBUS_DEF_INTERNAL           6
 
-#define IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM        8
-
 #define IPA_REG_SAVE_GSI_NUM_EE                  3
 
 #define IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS         22
@@ -257,58 +255,6 @@
  * Macro to define a particular register cfg entry for all pipe
  * indexed register
  */
-#define IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(reg_name, var_name) \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
-		(u32 *)&ipa_reg_save.ipa.pipes[0].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
-		(u32 *)&ipa_reg_save.ipa.pipes[1].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
-		(u32 *)&ipa_reg_save.ipa.pipes[2].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
-		(u32 *)&ipa_reg_save.ipa.pipes[3].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 4), \
-		(u32 *)&ipa_reg_save.ipa.pipes[4].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 5), \
-		(u32 *)&ipa_reg_save.ipa.pipes[5].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 6), \
-		(u32 *)&ipa_reg_save.ipa.pipes[6].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 7), \
-		(u32 *)&ipa_reg_save.ipa.pipes[7].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 8), \
-		(u32 *)&ipa_reg_save.ipa.pipes[8].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 9), \
-		(u32 *)&ipa_reg_save.ipa.pipes[9].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 10), \
-		(u32 *)&ipa_reg_save.ipa.pipes[10].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 11), \
-		(u32 *)&ipa_reg_save.ipa.pipes[11].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 12), \
-		(u32 *)&ipa_reg_save.ipa.pipes[12].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 13), \
-		(u32 *)&ipa_reg_save.ipa.pipes[13].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 14), \
-		(u32 *)&ipa_reg_save.ipa.pipes[14].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 15), \
-		(u32 *)&ipa_reg_save.ipa.pipes[15].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 16), \
-		(u32 *)&ipa_reg_save.ipa.pipes[16].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 17), \
-		(u32 *)&ipa_reg_save.ipa.pipes[17].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 18), \
-		(u32 *)&ipa_reg_save.ipa.pipes[18].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 19), \
-		(u32 *)&ipa_reg_save.ipa.pipes[19].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 20), \
-		(u32 *)&ipa_reg_save.ipa.pipes[20].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 21), \
-		(u32 *)&ipa_reg_save.ipa.pipes[21].endp.var_name }, \
-	{ GEN_1xVECTOR_REG_OFST(reg_name, 22), \
-		(u32 *)&ipa_reg_save.ipa.pipes[22].endp.var_name }
-
-/*
- * Macro to define a particular register cfg entry for all pipe
- * indexed register
- */
 #define IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(reg_name, var_name) \
 	{ 0, 0 }
 
@@ -394,18 +340,6 @@
  * register
  */
 #define IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(reg_name, var_name) \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 0), \
-		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[0].var_name }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 1), \
-		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[1].var_name }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 2), \
-		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[2].var_name }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 3), \
-		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[3].var_name }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 4), \
-		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[4].var_name }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 5), \
-		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[5].var_name }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 0), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[0].var_name }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 1), \
@@ -434,20 +368,22 @@
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[12].var_name }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 13), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[13].var_name }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1), \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 14), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[14].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 15), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[15].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 16), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[16].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 17), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[17].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 18), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[18].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1),	\
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[0].var_name }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 3), \
 		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[1].var_name }
 
 #define IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(reg_name, var_name) \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 0), \
-		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[0].var_name }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 1), \
-		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[1].var_name }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 2), \
-		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[2].var_name }, \
-	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 3), \
-		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[3].var_name }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 0), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[0].var_name }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 1), \
@@ -472,6 +408,20 @@
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[10].var_name }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 11), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[11].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 12), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[12].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 13), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[13].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 14), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[14].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 15), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[15].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 16), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[16].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 17), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[17].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 18), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[18].var_name }, \
 	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1), \
 		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[0].var_name }
 
@@ -808,6 +758,11 @@
 	struct ipa_hwio_def_ipa_rx_splt_cmdq_status_n_s
 	  ipa_rx_splt_cmdq_status_n[IPA_RX_SPLT_CMDQ_MAX];
 
+	union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_u
+		ipa_rx_hps_cmdq_cfg_wr;
+	union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_u
+		ipa_rx_hps_cmdq_cfg_rd;
+
 	struct ipa_hwio_def_ipa_rx_hps_cmdq_cmd_s
 	  ipa_rx_hps_cmdq_cmd;
 	union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_u
@@ -1210,8 +1165,6 @@
 	struct ipa_reg_save_gsi_ch_cntxt_per_ep_s
 		a7[IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7];
 	struct ipa_reg_save_gsi_ch_cntxt_per_ep_s
-		q6[IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_Q6];
-	struct ipa_reg_save_gsi_ch_cntxt_per_ep_s
 		uc[IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC];
 };
 
@@ -1220,8 +1173,6 @@
 	struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
 		a7[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7];
 	struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
-		q6[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_Q6];
-	struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
 		uc[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_UC];
 };
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 74d0e15..1bf724c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -6685,6 +6685,7 @@
 
 	smmu_info.present[IPA_SMMU_CB_AP] = true;
 	ipa3_ctx->pdev = dev;
+	cb->next_addr = cb->va_end;
 
 	return 0;
 }
@@ -7082,6 +7083,10 @@
 				!(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] |
 				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
 		break;
+	case IPA_SMMU_AP_CLIENT:
+		is_smmu_enable =
+			!(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]);
+		break;
 	default:
 		is_smmu_enable = false;
 		IPAERR("Trying to get illegal clients SMMU status");
@@ -7112,7 +7117,9 @@
 		ret = PTR_ERR(ipa3_ctx->mbox);
 		if (ret != -EPROBE_DEFER)
 			IPAERR("mailbox channel request failed, ret=%d\n", ret);
-		goto cleanup;
+
+		ipa3_ctx->mbox = NULL;
+		return;
 	}
 
 	/* prepare the QMP packet to send */
@@ -7127,8 +7134,10 @@
 	}
 
 cleanup:
-	ipa3_ctx->mbox = NULL;
-	mbox_free_channel(ipa3_ctx->mbox);
+	if (ipa3_ctx->mbox) {
+		mbox_free_channel(ipa3_ctx->mbox);
+		ipa3_ctx->mbox = NULL;
+	}
 }
 
 /**************************************************************
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 27b724f..e4daeb6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <asm/barrier.h>
@@ -1015,7 +1015,7 @@
 	return result;
 }
 
-static int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
+int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
 	unsigned long chan_hdl)
 {
 	enum gsi_status gsi_res;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 1f28884..cb932cb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -34,7 +34,7 @@
 };
 
 
-const char *ipa3_event_name[] = {
+const char *ipa3_event_name[IPA_EVENT_MAX_NUM] = {
 	__stringify(WLAN_CLIENT_CONNECT),
 	__stringify(WLAN_CLIENT_DISCONNECT),
 	__stringify(WLAN_CLIENT_POWER_SAVE_MODE),
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index dad8582..d1422db 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -92,7 +92,8 @@
 				entry->hdr->phys_base,
 				hdr_base_addr,
 				entry->hdr->offset_entry,
-				entry->l2tp_params);
+				entry->l2tp_params,
+				ipa3_ctx->use_64_bit_dma_mask);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index ac65618..192fced 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -235,8 +235,8 @@
 #define IPA_PIPE_MEM_START_OFST (0x0)
 #define IPA_PIPE_MEM_SIZE (0x0)
 #define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
-			       x == IPA_MODE_MOBILE_AP_WAN || \
-			       x == IPA_MODE_MOBILE_AP_WLAN)
+				   x == IPA_MODE_MOBILE_AP_WAN || \
+				   x == IPA_MODE_MOBILE_AP_WLAN)
 #define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
 #define IPA_A5_MUX_HEADER_LENGTH (8)
 
@@ -2735,4 +2735,54 @@
 static inline void ipa_save_registers(void) {};
 static inline void ipa_save_gsi_ver(void) {};
 #endif
+
+#ifdef CONFIG_IPA_ETH
+int ipa_eth_init(void);
+void ipa_eth_exit(void);
+#else
+static inline int ipa_eth_init(void) { return 0; }
+static inline void ipa_eth_exit(void) { }
+#endif // CONFIG_IPA_ETH
+int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
+	unsigned long chan_hdl);
+#ifdef CONFIG_IPA3_MHI_PRIME_MANAGER
+int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot prot);
+int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot);
+int ipa_mpm_notify_wan_state(void);
+int ipa_mpm_mhip_ul_data_stop(enum ipa_usb_teth_prot xdci_teth_prot);
+int ipa3_is_mhip_offload_enabled(void);
+int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
+	enum ipa_client_type dst_pipe);
+#else
+static inline int ipa_mpm_mhip_xdci_pipe_enable(
+	enum ipa_usb_teth_prot prot)
+{
+	return 0;
+}
+static inline int ipa_mpm_mhip_xdci_pipe_disable(
+	enum ipa_usb_teth_prot xdci_teth_prot)
+{
+	return 0;
+}
+static inline int ipa_mpm_notify_wan_state(void)
+{
+	return 0;
+}
+static inline int ipa_mpm_mhip_ul_data_stop(
+	enum ipa_usb_teth_prot xdci_teth_prot)
+{
+	return 0;
+}
+static inline int ipa3_is_mhip_offload_enabled(void)
+{
+	return 0;
+}
+static inline int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
+	enum ipa_client_type dst_pipe)
+{
+	return 0;
+}
+
+#endif /* CONFIG_IPA3_MHI_PRIME_MANAGER */
+
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
new file mode 100644
index 0000000..c8ced3a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -0,0 +1,2394 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mhi.h>
+#include <linux/msm_gsi.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include "../ipa_common_i.h"
+#include "ipa_i.h"
+
+#define IPA_MPM_DRV_NAME "ipa_mpm"
+
+#define IPA_MPM_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_MPM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_MPM_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_MPM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_MPM_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_MPM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				IPA_MPM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_MPM_FUNC_ENTRY() \
+	IPA_MPM_DBG("ENTRY\n")
+#define IPA_MPM_FUNC_EXIT() \
+	IPA_MPM_DBG("EXIT\n")
+
+#define IPA_MPM_MAX_MHIP_CHAN 3
+
+#define IPA_MPM_NUM_RING_DESC 0x400
+#define IPA_MPM_RING_LEN (IPA_MPM_NUM_RING_DESC - 10)
+
+#define IPA_MPM_MHI_HOST_UL_CHANNEL 4
+#define IPA_MPM_MHI_HOST_DL_CHANNEL  5
+#define DEFAULT_AGGR_TIME_LIMIT 1000 /* 1ms */
+#define DEFAULT_AGGR_PKT_LIMIT 0
+#define TRE_BUFF_SIZE 32768
+#define IPA_HOLB_TMR_EN 0x1
+#define IPA_HOLB_TMR_DIS 0x0
+#define RNDIS_IPA_DFLT_RT_HDL 0
+#define IPA_POLL_FOR_EMPTINESS_NUM 50
+#define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
+#define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
+#define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
+
+enum mhip_re_type {
+	MHIP_RE_XFER = 0x2,
+	MHIP_RE_NOP = 0x4,
+};
+
+enum ipa_mpm_mhi_ch_id_type {
+	IPA_MPM_MHIP_CH_ID_0,
+	IPA_MPM_MHIP_CH_ID_1,
+	IPA_MPM_MHIP_CH_ID_2,
+	IPA_MPM_MHIP_CH_ID_MAX,
+};
+
+enum ipa_mpm_dma_data_direction {
+	DMA_HIPA_BIDIRECTIONAL = 0,
+	DMA_TO_HIPA = 1,
+	DMA_FROM_HIPA = 2,
+	DMA_HIPA_NONE = 3,
+};
+
+enum ipa_mpm_ipa_teth_client_type {
+	IPA_MPM_MHIP_USB,
+	IPA_MPM_MHIP_WIFI,
+};
+
+enum ipa_mpm_mhip_client_type {
+	IPA_MPM_MHIP_INIT,
+	/* USB RMNET CLIENT */
+	IPA_MPM_MHIP_USB_RMNET,
+	/* USB RNDIS / WIFI CLIENT */
+	IPA_MPM_MHIP_TETH,
+	/* USB DPL CLIENT */
+	IPA_MPM_MHIP_USB_DPL,
+	IPA_MPM_MHIP_NONE,
+};
+
+enum ipa_mpm_start_stop_type {
+	STOP,
+	START,
+};
+
+enum ipa_mpm_clk_vote_type {
+	CLK_ON,
+	CLK_OFF,
+};
+
+enum mhip_status_type {
+	MHIP_STATUS_SUCCESS,
+	MHIP_STATUS_NO_OP,
+	MHIP_STATUS_FAIL,
+	MHIP_STATUS_BAD_STATE,
+	MHIP_STATUS_EP_NOT_FOUND,
+	MHIP_STATUS_EP_NOT_READY,
+};
+
+enum mhip_smmu_domain_type {
+	MHIP_SMMU_DOMAIN_IPA,
+	MHIP_SMMU_DOMAIN_PCIE,
+	MHIP_SMMU_DOMAIN_NONE,
+};
+
+/* each pair of UL/DL channels are defined below */
+static const struct mhi_device_id mhi_driver_match_table[] = {
+	{ .chan = "IP_HW_MHIP_0" }, // for rmnet pipes
+	{ .chan = "IP_HW_MHIP_1" }, // for MHIP teth pipes - rndis/wifi
+	{ .chan = "IP_HW_ADPL" }, // DPL/ODL DL pipe
+};
+
+/*
+ * MHI PRIME GSI Descriptor format that Host IPA uses.
+ */
+struct __packed mhi_p_desc {
+	uint64_t buffer_ptr;
+	uint16_t buff_len;
+	uint16_t resvd1;
+	uint16_t chain : 1;
+	uint16_t resvd4 : 7;
+	uint16_t ieob : 1;
+	uint16_t ieot : 1;
+	uint16_t bei : 1;
+	uint16_t sct : 1;
+	uint16_t resvd3 : 4;
+	uint8_t re_type;
+	uint8_t resvd2;
+};
+
+/*
+ * MHI PRIME Channel Context and Event Context Array
+ * Information that is sent to Device IPA.
+ */
+struct ipa_mpm_channel_context_type {
+	u32 chstate : 8;
+	u32 reserved1 : 24;
+	u32 chtype;
+	u32 erindex;
+	u64 rbase;
+	u64 rlen;
+	u64 reserved2;
+	u64 reserved3;
+} __packed;
+
+struct ipa_mpm_event_context_type {
+	u32 reserved1 : 8;
+	u32 update_rp_modc : 8;
+	u32 update_rp_intmodt : 16;
+	u32 ertype;
+	u32 update_rp_addr;
+	u64 rbase;
+	u64 rlen;
+	u32 buff_size : 16;
+	u32 reserved2 : 16;
+	u32 reserved3;
+	u64 reserved4;
+} __packed;
+
+struct ipa_mpm_pipes_info_type {
+	enum ipa_client_type ipa_client;
+	struct ipa_ep_cfg ep_cfg;
+};
+
+struct ipa_mpm_channel_type {
+	struct ipa_mpm_pipes_info_type dl_cons;
+	struct ipa_mpm_pipes_info_type ul_prod;
+	enum ipa_mpm_mhip_client_type mhip_client;
+};
+
+static struct ipa_mpm_channel_type ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_MAX];
+
+/* For configuring IPA_CLIENT_MHI_PRIME_TETH_CONS */
+static struct ipa_ep_cfg mhip_dl_teth_ep_cfg = {
+	.mode = {
+		.mode = IPA_BASIC,
+		.dst = IPA_CLIENT_MHI_PRIME_TETH_CONS,
+	},
+};
+
+static struct ipa_ep_cfg mhip_ul_teth_ep_cfg = {
+	.mode = {
+		.mode = IPA_BASIC,
+		.dst = IPA_CLIENT_MHI_PRIME_TETH_PROD,
+	},
+};
+
+/* WARNING!! Temporary for rndis intgration only */
+
+
+/* For configuring IPA_CLIENT_MHIP_RMNET_PROD */
+static struct ipa_ep_cfg mhip_dl_rmnet_ep_cfg = {
+	.mode = {
+		.mode = IPA_DMA,
+		.dst = IPA_CLIENT_USB_CONS,
+	},
+};
+
+/* For configuring IPA_CLIENT_MHIP_RMNET_CONS */
+static struct ipa_ep_cfg mhip_ul_rmnet_ep_cfg = {
+	.mode = {
+		.mode = IPA_DMA,
+		.dst = IPA_CLIENT_USB_CONS,
+	},
+};
+
+/* For configuring IPA_CLIENT_MHIP_DPL_PROD */
+static struct ipa_ep_cfg mhip_dl_dpl_ep_cfg = {
+	.mode = {
+		.mode = IPA_DMA,
+		.dst = IPA_CLIENT_USB_CONS,
+	},
+};
+
+
+struct ipa_mpm_iova_addr {
+	dma_addr_t base;
+	unsigned int size;
+};
+
+struct ipa_mpm_dev_info {
+	struct platform_device *pdev;
+	struct device *dev;
+	bool ipa_smmu_enabled;
+	bool pcie_smmu_enabled;
+	struct ipa_mpm_iova_addr ctrl;
+	struct ipa_mpm_iova_addr data;
+	u32 chdb_base;
+	u32 erdb_base;
+};
+
+struct ipa_mpm_event_props {
+	u16 id;
+	phys_addr_t device_db;
+	struct ipa_mpm_event_context_type ev_ctx;
+};
+
+struct ipa_mpm_channel_props {
+	u16 id;
+	phys_addr_t device_db;
+	struct ipa_mpm_channel_context_type ch_ctx;
+};
+
+struct ipa_mpm_channel {
+	struct ipa_mpm_channel_props chan_props;
+	struct ipa_mpm_event_props evt_props;
+};
+
+enum ipa_mpm_gsi_state {
+	GSI_ERR,
+	GSI_INIT,
+	GSI_ALLOCATED,
+	GSI_STARTED,
+	GSI_STOPPED,
+};
+
+enum ipa_mpm_teth_state {
+	IPA_MPM_TETH_INIT = 0,
+	IPA_MPM_TETH_INPROGRESS,
+	IPA_MPM_TETH_CONNECTED,
+};
+
+enum ipa_mpm_mhip_chan {
+	IPA_MPM_MHIP_CHAN_UL,
+	IPA_MPM_MHIP_CHAN_DL,
+	IPA_MPM_MHIP_CHAN_BOTH,
+};
+
+struct producer_rings {
+	struct mhi_p_desc *tr_va;
+	struct mhi_p_desc *er_va;
+	dma_addr_t tr_pa;
+	dma_addr_t er_pa;
+	void *tre_buff[IPA_MPM_RING_LEN];
+	/*
+	 * The iova generated for AP CB,
+	 * used only for dma_map_single to flush the cache.
+	 */
+	dma_addr_t ap_iova_er;
+	dma_addr_t ap_iova_tr;
+	dma_addr_t ap_iova_buff[IPA_MPM_RING_LEN];
+};
+
+struct ipa_mpm_mhi_driver {
+	struct mhi_device *mhi_dev;
+	struct producer_rings ul_prod_ring;
+	struct producer_rings dl_prod_ring;
+	struct ipa_mpm_channel ul_prod;
+	struct ipa_mpm_channel dl_cons;
+	enum ipa_mpm_mhip_client_type mhip_client;
+	enum ipa_mpm_gsi_state gsi_state;
+	enum ipa_mpm_teth_state teth_state;
+	struct mutex mutex;
+	bool init_complete;
+};
+
+struct ipa_mpm_context {
+	struct ipa_mpm_dev_info dev_info;
+	struct ipa_mpm_mhi_driver md[IPA_MPM_MAX_MHIP_CHAN];
+	struct mutex mutex;
+	atomic_t ipa_clk_ref_cnt;
+	atomic_t pcie_clk_ref_cnt;
+	struct device *parent_pdev;
+	struct ipa_smmu_cb_ctx carved_smmu_cb;
+};
+
+#define IPA_MPM_DESC_SIZE (sizeof(struct mhi_p_desc))
+#define IPA_MPM_RING_TOTAL_SIZE (IPA_MPM_RING_LEN * IPA_MPM_DESC_SIZE)
+#define IPA_MPM_PAGE_SIZE roundup_pow_of_two(IPA_MPM_RING_TOTAL_SIZE)
+
+
+static struct ipa_mpm_context *ipa_mpm_ctx;
+static struct platform_device *m_pdev;
+static int ipa_mpm_mhi_probe_cb(struct mhi_device *,
+	const struct mhi_device_id *);
+static void ipa_mpm_mhi_remove_cb(struct mhi_device *);
+static void ipa_mpm_mhi_status_cb(struct mhi_device *, enum MHI_CB);
+static void ipa_mpm_change_teth_state(int probe_id,
+	enum ipa_mpm_teth_state ip_state);
+static void ipa_mpm_change_gsi_state(int probe_id,
+	enum ipa_mpm_gsi_state next_state);
+static int ipa_mpm_start_stop_mhip_data_path(int probe_id,
+	enum ipa_mpm_start_stop_type start);
+static int ipa_mpm_probe(struct platform_device *pdev);
+static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
+	int probe_id);
+static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote);
+static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
+	enum ipa_mpm_mhip_chan mhip_chan,
+	int probe_id,
+	enum ipa_mpm_start_stop_type start_stop);
+
+static struct mhi_driver mhi_driver = {
+	.id_table = mhi_driver_match_table,
+	.probe = ipa_mpm_mhi_probe_cb,
+	.remove = ipa_mpm_mhi_remove_cb,
+	.status_cb = ipa_mpm_mhi_status_cb,
+	.driver = {
+		.name = IPA_MPM_DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static void ipa_mpm_ipa3_delayed_probe(struct work_struct *work)
+{
+	(void)ipa_mpm_probe(m_pdev);
+}
+
+static DECLARE_WORK(ipa_mpm_ipa3_scheduled_probe, ipa_mpm_ipa3_delayed_probe);
+
+static void ipa_mpm_ipa3_ready_cb(void *user_data)
+{
+	struct platform_device *pdev = (struct platform_device *)(user_data);
+
+	m_pdev = pdev;
+
+	IPA_MPM_DBG("IPA ready callback has been triggered\n");
+
+	schedule_work(&ipa_mpm_ipa3_scheduled_probe);
+}
+
+void ipa_mpm_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *err_data)
+{
+	IPA_MPM_ERR("GSI EVT RING ERROR, not expected..\n");
+	ipa_assert();
+}
+
+void ipa_mpm_gsi_chan_err_cb(struct gsi_chan_err_notify *err_data)
+{
+	IPA_MPM_ERR("GSI CHAN ERROR, not expected..\n");
+	ipa_assert();
+}
+
+/**
+ * ipa_mpm_smmu_map() - SMMU maps ring and the buffer pointer.
+ * @va_addr: virtual address that needs to be mapped
+ * @sz: size of the address to be mapped
+ * @dir: ipa_mpm_dma_data_direction
+ * @ap_cb_iova: iova for AP context bank
+ *
+ * This function SMMU maps both ring and the buffer pointer.
+ * The ring pointers will be aligned to ring size and
+ * the buffer pointers should be aligned to buffer size.
+ *
+ * Returns: iova of the mapped address
+ */
+static dma_addr_t ipa_mpm_smmu_map(void *va_addr,
+	int sz,
+	int dir,
+	dma_addr_t *ap_cb_iova)
+{
+	struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
+	phys_addr_t phys_addr;
+	dma_addr_t iova;
+	int smmu_enabled;
+	unsigned long iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
+	unsigned long carved_iova = roundup(cb->next_addr, IPA_MPM_PAGE_SIZE);
+	int ret = 0;
+
+	if (carved_iova >= cb->va_end) {
+		IPA_MPM_ERR("running out of carved_iova %x\n", carved_iova);
+		ipa_assert();
+	}
+	/*
+	 * Both Host IPA and PCIE SMMU should be enabled or disabled
+	 * for proceed.
+	 * If SMMU Enabled => iova == pa
+	 * If SMMU Disabled => iova == iommu mapped iova
+	 * dma_map_single ensures cache is flushed and the memory is not
+	 * touched again until dma_unmap_single() is called
+	 */
+	smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
+		ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
+
+	if (smmu_enabled) {
+		/* Map the phys addr to both PCIE and IPA AP CB
+		 * from the carved out common iova range.
+		 */
+		ipa_smmu_domain = ipa3_get_smmu_domain();
+
+		if (!ipa_smmu_domain) {
+			IPA_MPM_ERR("invalid IPA smmu domain\n");
+			ipa_assert();
+		}
+
+		if (!ipa_mpm_ctx->md[0].mhi_dev->dev.parent) {
+			IPA_MPM_ERR("invalid PCIE SMMU domain\n");
+			ipa_assert();
+		}
+
+		phys_addr = virt_to_phys((void *) va_addr);
+		IPA_SMMU_ROUND_TO_PAGE(carved_iova, phys_addr, sz,
+					iova_p, pa_p, size_p);
+
+		/* Flush the cache with dma_map_single for IPA AP CB */
+		*ap_cb_iova = dma_map_single(ipa3_ctx->pdev, va_addr,
+						sz, dir);
+		ret = ipa3_iommu_map(ipa_smmu_domain, iova_p,
+					pa_p, size_p, prot);
+		if (ret) {
+			IPA_MPM_ERR("IPA IOMMU returned failure, ret = %d\n",
+					ret);
+			ipa_assert();
+		}
+
+		pcie_smmu_domain = iommu_get_domain_for_dev(
+			ipa_mpm_ctx->md[0].mhi_dev->dev.parent);
+		ret = iommu_map(pcie_smmu_domain, iova_p, pa_p, size_p, prot);
+
+		if (ret) {
+			IPA_MPM_ERR("PCIe IOMMU returned failure, ret = %d\n",
+				ret);
+			ipa_assert();
+		}
+
+		iova = iova_p;
+		cb->next_addr = iova_p + size_p;
+	} else {
+		iova = dma_map_single(ipa3_ctx->pdev, va_addr, sz, dir);
+		*ap_cb_iova = iova;
+	}
+	return iova;
+}
+
+/**
+ * ipa_mpm_smmu_unmap() - SMMU unmaps ring and the buffer pointer.
+ * @va_addr: virtual address that needs to be mapped
+ * @sz: size of the address to be mapped
+ * @dir: ipa_mpm_dma_data_direction
+ * @ap_cb_iova: iova for AP context bank
+ *
+ * This function SMMU unmaps both ring and the buffer pointer.
+ * The ring pointers will be aligned to ring size and
+ * the buffer pointers should be aligned to buffer size.
+ *
+ * Return: none
+ */
+static void ipa_mpm_smmu_unmap(dma_addr_t carved_iova, int sz, int dir,
+	dma_addr_t ap_cb_iova)
+{
+	int ret;
+	unsigned long iova_p;
+	unsigned long pa_p;
+	u32 size_p = 0;
+	struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
+	struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
+	int smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
+		ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
+
+	if (smmu_enabled) {
+		ipa_smmu_domain = ipa3_get_smmu_domain();
+		if (!ipa_smmu_domain) {
+			IPA_MPM_ERR("invalid IPA smmu domain\n");
+			ipa_assert();
+		}
+
+		if (!ipa_mpm_ctx->md[0].mhi_dev->dev.parent) {
+			IPA_MPM_ERR("invalid PCIE SMMU domain\n");
+			ipa_assert();
+		}
+
+		IPA_SMMU_ROUND_TO_PAGE(carved_iova, carved_iova, sz,
+					iova_p, pa_p, size_p);
+
+		ret = iommu_unmap(ipa_smmu_domain, carved_iova, size_p);
+		if (ret) {
+			IPA_MPM_ERR("IPA IOMMU Unmap failure, ret = %d\n",
+					ret);
+			ipa_assert();
+		}
+		pcie_smmu_domain = iommu_get_domain_for_dev(
+			ipa_mpm_ctx->md[0].mhi_dev->dev.parent);
+
+		ret = iommu_unmap(pcie_smmu_domain, carved_iova, size_p);
+
+		if (ret) {
+			IPA_MPM_ERR("PCIe IOMMU Unmap failure, ret = %d\n",
+				ret);
+			ipa_assert();
+		}
+		cb->next_addr -= size_p;
+		dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova, size_p, dir);
+	} else {
+		dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova, sz, dir);
+	}
+}
+
+static u32 ipa_mpm_smmu_map_doorbell(enum mhip_smmu_domain_type smmu_domain,
+	u32 pa_addr)
+{
+	/*
+	 * Doorbells are already in PA, map these to
+	 * PCIE/IPA doman if SMMUs are enabled.
+	 */
+	struct iommu_domain *ipa_smmu_domain, *pcie_smmu_domain;
+	int smmu_enabled;
+	unsigned long iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
+	int ret = 0;
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	struct ipa_smmu_cb_ctx *cb = &ipa_mpm_ctx->carved_smmu_cb;
+	unsigned long carved_iova = roundup(cb->next_addr, PAGE_SIZE);
+	u32 iova = 0;
+	u64 offset = 0;
+
+	if (carved_iova >= cb->va_end) {
+		IPA_MPM_ERR("running out of carved_iova %x\n", carved_iova);
+		ipa_assert();
+	}
+
+	smmu_enabled = (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
+		ipa_mpm_ctx->dev_info.pcie_smmu_enabled) ? 1 : 0;
+
+	if (smmu_enabled) {
+		IPA_SMMU_ROUND_TO_PAGE(carved_iova, pa_addr, PAGE_SIZE,
+					iova_p, pa_p, size_p);
+		if (smmu_domain == MHIP_SMMU_DOMAIN_IPA) {
+			ipa_smmu_domain = ipa3_get_smmu_domain();
+			ret = ipa3_iommu_map(ipa_smmu_domain,
+				iova_p, pa_p, size_p, prot);
+			if (ret) {
+				IPA_MPM_ERR("IPA doorbell mapping failed\n");
+				ipa_assert();
+			}
+			offset = pa_addr - pa_p;
+		} else if (smmu_domain == MHIP_SMMU_DOMAIN_PCIE) {
+			pcie_smmu_domain = iommu_get_domain_for_dev(
+				ipa_mpm_ctx->md[0].mhi_dev->dev.parent);
+			 ret = iommu_map(pcie_smmu_domain,
+				iova_p, pa_p, size_p, prot);
+			if (ret) {
+				IPA_MPM_ERR("PCIe doorbell mapping failed\n");
+				ipa_assert();
+			}
+			offset = pa_addr - pa_p;
+		}
+		iova = iova_p + offset;
+		cb->next_addr = iova_p + PAGE_SIZE;
+	} else {
+		iova = pa_addr;
+	}
+	return iova;
+}
+
+int get_idx_from_id(const struct mhi_device_id *id)
+{
+	return (id - mhi_driver_match_table);
+}
+
+static void get_ipa3_client(int id,
+	enum ipa_client_type *ul_prod,
+	enum ipa_client_type *dl_cons)
+{
+	IPA_MPM_FUNC_ENTRY();
+
+	if (id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		*ul_prod = IPA_CLIENT_MAX;
+		*dl_cons = IPA_CLIENT_MAX;
+	} else {
+		*ul_prod = ipa_mpm_pipes[id].ul_prod.ipa_client;
+		*dl_cons = ipa_mpm_pipes[id].dl_cons.ipa_client;
+	}
+	IPA_MPM_FUNC_EXIT();
+}
+
+static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
+	int mhi_idx, struct ipa_req_chan_out_params *out_params)
+{
+	int ipa_ep_idx;
+	int res;
+	struct mhi_p_desc *ev_ring;
+	struct mhi_p_desc *tr_ring;
+	int tr_ring_sz, ev_ring_sz;
+	dma_addr_t ev_ring_iova, tr_ring_iova;
+	dma_addr_t ap_cb_iova;
+	struct ipa_request_gsi_channel_params gsi_params;
+	int dir;
+	int i;
+	void *buff;
+	int result;
+	int k;
+	struct ipa3_ep_context *ep;
+
+	if (mhip_client == IPA_CLIENT_MAX)
+		goto fail_gen;
+
+	if (mhi_idx == IPA_MPM_MHIP_CH_ID_MAX)
+		goto fail_gen;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(mhip_client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPA_MPM_ERR("fail to find channel EP.\n");
+		goto fail_gen;
+	}
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (ep->valid == 1) {
+		IPAERR("EP %d already allocated.\n", ipa_ep_idx);
+		return 0;
+	}
+
+	IPA_MPM_DBG("connecting client %d (ep: %d)\n", mhip_client, ipa_ep_idx);
+
+	IPA_MPM_FUNC_ENTRY();
+
+	ev_ring_sz = IPA_MPM_RING_TOTAL_SIZE;
+	ev_ring = kzalloc(ev_ring_sz, GFP_KERNEL);
+	if (!ev_ring)
+		goto fail_evt_alloc;
+
+	tr_ring_sz = IPA_MPM_RING_TOTAL_SIZE;
+	tr_ring = kzalloc(tr_ring_sz, GFP_KERNEL);
+	if (!tr_ring)
+		goto fail_tr_alloc;
+
+	tr_ring[0].re_type = MHIP_RE_NOP;
+
+	dir = IPA_CLIENT_IS_PROD(mhip_client) ?
+		DMA_TO_HIPA : DMA_FROM_HIPA;
+
+	/* allocate transfer ring elements */
+	for (i = 1, k = 0; i < IPA_MPM_RING_LEN; i++, k++) {
+		buff = kzalloc(TRE_BUFF_SIZE, GFP_KERNEL);
+
+		if (!buff)
+			goto fail_buff_alloc;
+
+		if (IPA_CLIENT_IS_PROD(mhip_client))
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff[k] =
+									buff;
+		else
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff[k] =
+									buff;
+
+		tr_ring[i].buffer_ptr =
+			ipa_mpm_smmu_map(buff, TRE_BUFF_SIZE, dir,
+				&ap_cb_iova);
+
+		if (!tr_ring[i].buffer_ptr)
+			goto fail_smmu_map_ring;
+
+		tr_ring[i].buff_len = TRE_BUFF_SIZE;
+		tr_ring[i].chain = 0;
+		tr_ring[i].ieob = 0;
+		tr_ring[i].ieot = 0;
+		tr_ring[i].bei = 0;
+		tr_ring[i].sct = 0;
+		tr_ring[i].re_type = MHIP_RE_XFER;
+
+		if (IPA_CLIENT_IS_PROD(mhip_client))
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[k] =
+				ap_cb_iova;
+		else
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[k] =
+				ap_cb_iova;
+	}
+
+	tr_ring_iova = ipa_mpm_smmu_map(tr_ring, tr_ring_sz, dir,
+		&ap_cb_iova);
+	if (!tr_ring_iova)
+		goto fail_smmu_map_ring;
+
+	ev_ring_iova = ipa_mpm_smmu_map(ev_ring, ev_ring_sz, dir,
+		&ap_cb_iova);
+	if (!ev_ring_iova)
+		goto fail_smmu_map_ring;
+
+	/* Store Producer channel rings */
+	if (IPA_CLIENT_IS_PROD(mhip_client)) {
+		/* Device UL */
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = ev_ring;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = tr_ring;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa = ev_ring_iova;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa = tr_ring_iova;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr =
+			ap_cb_iova;
+	} else {
+		/* Host UL */
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = ev_ring;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = tr_ring;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa = ev_ring_iova;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa = tr_ring_iova;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_tr =
+			ap_cb_iova;
+	}
+
+	memset(&gsi_params, 0, sizeof(struct ipa_request_gsi_channel_params));
+
+	if (IPA_CLIENT_IS_PROD(mhip_client))
+		gsi_params.ipa_ep_cfg =
+		ipa_mpm_pipes[mhi_idx].dl_cons.ep_cfg;
+	else
+		gsi_params.ipa_ep_cfg =
+		ipa_mpm_pipes[mhi_idx].ul_prod.ep_cfg;
+
+	gsi_params.client = mhip_client;
+	gsi_params.skip_ep_cfg = false;
+
+	/*
+	 * RP update address = Device channel DB address
+	 * CLIENT_PROD -> Host DL
+	 * CLIENT_CONS -> Host UL
+	 */
+	if (IPA_CLIENT_IS_PROD(mhip_client)) {
+		gsi_params.evt_ring_params.rp_update_addr =
+			ipa_mpm_smmu_map_doorbell(
+			MHIP_SMMU_DOMAIN_IPA,
+			ipa_mpm_ctx->md[mhi_idx].dl_cons.chan_props.device_db);
+		if (gsi_params.evt_ring_params.rp_update_addr == 0)
+			goto fail_smmu_map_db;
+		gsi_params.evt_ring_params.ring_base_addr =
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa;
+		gsi_params.chan_params.ring_base_addr =
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa;
+	} else {
+		gsi_params.evt_ring_params.rp_update_addr =
+			ipa_mpm_smmu_map_doorbell(
+			MHIP_SMMU_DOMAIN_IPA,
+			ipa_mpm_ctx->md[mhi_idx].ul_prod.chan_props.device_db);
+		if (gsi_params.evt_ring_params.rp_update_addr == 0)
+			goto fail_smmu_map_db;
+		gsi_params.evt_ring_params.ring_base_addr =
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa;
+		gsi_params.chan_params.ring_base_addr =
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa;
+	}
+
+	/* Fill Event ring params */
+	gsi_params.evt_ring_params.intf = GSI_EVT_CHTYPE_MHIP_EV;
+	gsi_params.evt_ring_params.intr = GSI_INTR_MSI;
+	gsi_params.evt_ring_params.re_size = GSI_EVT_RING_RE_SIZE_16B;
+	gsi_params.evt_ring_params.ring_len =
+		(IPA_MPM_RING_LEN) * GSI_EVT_RING_RE_SIZE_16B;
+	gsi_params.evt_ring_params.ring_base_vaddr = NULL;
+	gsi_params.evt_ring_params.int_modt = 0;
+	gsi_params.evt_ring_params.int_modc = 0;
+	gsi_params.evt_ring_params.intvec = 0;
+	gsi_params.evt_ring_params.msi_addr = 0;
+	gsi_params.evt_ring_params.exclusive = true;
+	gsi_params.evt_ring_params.err_cb = ipa_mpm_gsi_evt_ring_err_cb;
+	gsi_params.evt_ring_params.user_data = NULL;
+
+	/* Evt Scratch Params */
+	/* Disable the Moderation for ringing doorbells */
+	gsi_params.evt_scratch.mhip.rp_mod_threshold = 1;
+	gsi_params.evt_scratch.mhip.rp_mod_timer = 0;
+	gsi_params.evt_scratch.mhip.rp_mod_counter = 0;
+	gsi_params.evt_scratch.mhip.rp_mod_timer_id = 0;
+	gsi_params.evt_scratch.mhip.rp_mod_timer_running = 0;
+	gsi_params.evt_scratch.mhip.fixed_buffer_sz = TRE_BUFF_SIZE;
+
+	if (IPA_CLIENT_IS_PROD(mhip_client))
+		gsi_params.evt_scratch.mhip.rp_mod_threshold = 4;
+
+	/* Channel Params */
+	gsi_params.chan_params.prot = GSI_CHAN_PROT_MHIP;
+	gsi_params.chan_params.dir = IPA_CLIENT_IS_PROD(mhip_client) ?
+		GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI;
+	/* chan_id is set in ipa3_request_gsi_channel() */
+	gsi_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B;
+	gsi_params.chan_params.ring_len =
+		(IPA_MPM_RING_LEN) * GSI_EVT_RING_RE_SIZE_16B;
+	gsi_params.chan_params.ring_base_vaddr = NULL;
+	gsi_params.chan_params.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	gsi_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	gsi_params.chan_params.low_weight = 1;
+	gsi_params.chan_params.xfer_cb = NULL;
+	gsi_params.chan_params.err_cb = ipa_mpm_gsi_chan_err_cb;
+	gsi_params.chan_params.chan_user_data = NULL;
+
+	/* Channel scratch */
+	gsi_params.chan_scratch.mhip.assert_bit_40 = 0;
+	gsi_params.chan_scratch.mhip.host_channel = 1;
+
+	res = ipa3_request_gsi_channel(&gsi_params, out_params);
+	if (res) {
+		IPA_MPM_ERR("failed to allocate GSI channel res=%d\n", res);
+		goto fail_alloc_channel;
+	}
+
+	ipa_mpm_change_gsi_state(mhi_idx, GSI_ALLOCATED);
+
+	result = ipa3_start_gsi_channel(ipa_ep_idx);
+	if (result) {
+		IPA_MPM_ERR("start MHIP channel %d failed\n", mhip_client);
+		ipa_mpm_ctx->md[mhi_idx].gsi_state = GSI_ERR;
+		goto fail_start_channel;
+	}
+	ipa_mpm_change_gsi_state(mhi_idx, GSI_STARTED);
+
+	/* Fill in the Device Context params */
+	if (IPA_CLIENT_IS_PROD(mhip_client)) {
+		/* This is the DL channel :: Device -> Host */
+		ipa_mpm_ctx->md[mhi_idx].dl_cons.evt_props.ev_ctx.rbase =
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa;
+		ipa_mpm_ctx->md[mhi_idx].dl_cons.chan_props.ch_ctx.rbase =
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa;
+	} else {
+		ipa_mpm_ctx->md[mhi_idx].ul_prod.evt_props.ev_ctx.rbase =
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod.chan_props.ch_ctx.rbase =
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa;
+	}
+
+	IPA_MPM_FUNC_EXIT();
+
+	return 0;
+
+fail_start_channel:
+	ipa3_disable_data_path(ipa_ep_idx);
+	ipa3_stop_gsi_channel(ipa_ep_idx);
+fail_alloc_channel:
+	ipa3_release_gsi_channel(ipa_ep_idx);
+fail_smmu_map_db:
+fail_smmu_map_ring:
+fail_tr_alloc:
+fail_evt_alloc:
+fail_buff_alloc:
+	ipa_assert();
+fail_gen:
+	return -EFAULT;
+}
+
+void ipa_mpm_clean_mhip_chan(int mhi_idx, enum ipa_client_type mhip_client)
+{
+	int dir;
+	int i;
+	int result;
+	int ipa_ep_idx;
+	struct mhi_p_desc *ev_ring;
+	struct mhi_p_desc *tr_ring;
+	int tr_ring_sz, ev_ring_sz;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (mhip_client == IPA_CLIENT_MAX)
+		return;
+	if (mhi_idx == IPA_MPM_MHIP_CH_ID_MAX)
+		return;
+
+	dir = IPA_CLIENT_IS_PROD(mhip_client) ?
+		DMA_TO_HIPA : DMA_FROM_HIPA;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(mhip_client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPA_MPM_ERR("fail to find channel EP.\n");
+		return;
+	}
+
+	/* Release channel */
+	result = ipa3_release_gsi_channel(ipa_ep_idx);
+	if (result) {
+		IPA_MPM_ERR("start MHIP channel %d failed\n", mhip_client);
+		ipa_mpm_ctx->md[mhi_idx].gsi_state = GSI_ERR;
+	}
+
+	ipa_mpm_change_gsi_state(mhi_idx, GSI_INIT);
+
+
+	/* deallocate transfer ring buffers  */
+	for (i = 0; i < IPA_MPM_RING_LEN; i++) {
+		if (IPA_CLIENT_IS_PROD(mhip_client)) {
+			ipa_mpm_smmu_unmap(
+			(dma_addr_t)
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff[i],
+			TRE_BUFF_SIZE, dir,
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[i]);
+
+			kfree(
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff[i]);
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff[i]
+								= NULL;
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[i]
+								= 0;
+		} else {
+			ipa_mpm_smmu_unmap(
+			(dma_addr_t)
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff[i],
+			TRE_BUFF_SIZE, dir,
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[i]
+			);
+
+			kfree(
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff[i]);
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff[i]
+								= NULL;
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[i]
+								= 0;
+		}
+	}
+
+	tr_ring_sz = sizeof(*tr_ring) * (IPA_MPM_RING_LEN);
+	ev_ring_sz = sizeof(*ev_ring) * (IPA_MPM_RING_LEN);
+
+	if (IPA_CLIENT_IS_PROD(mhip_client)) {
+		ipa_mpm_smmu_unmap(
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa,
+			ev_ring_sz, dir,
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er);
+
+		ipa_mpm_smmu_unmap(
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa,
+			tr_ring_sz, dir,
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr);
+
+		kfree(ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va);
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = NULL;
+
+		kfree(ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va);
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = NULL;
+
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er = 0;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr = 0;
+	} else {
+		ipa_mpm_smmu_unmap(
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa,
+			ev_ring_sz, dir,
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er);
+
+		ipa_mpm_smmu_unmap(
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa,
+			tr_ring_sz, dir,
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr);
+
+		kfree(ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va);
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = NULL;
+
+		kfree(ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va);
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = NULL;
+
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er = 0;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_tr = 0;
+	}
+
+	IPA_MPM_FUNC_EXIT();
+}
+
+/* round addresses for closest page per SMMU requirements */
+static inline void ipa_mpm_smmu_round_to_page(uint64_t iova, uint64_t pa,
+	uint64_t size, unsigned long *iova_p, phys_addr_t *pa_p, u32 *size_p)
+{
+	*iova_p = rounddown(iova, PAGE_SIZE);
+	*pa_p = rounddown(pa, PAGE_SIZE);
+	*size_p = roundup(size + pa - *pa_p, PAGE_SIZE);
+}
+
+
+static int __ipa_mpm_configure_mhi_device(struct ipa_mpm_channel *ch,
+	int mhi_idx, int dir)
+{
+	struct mhi_buf ch_config[2];
+	int ret;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (ch == NULL) {
+		IPA_MPM_ERR("ch config is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Populate CCA */
+	ch_config[0].buf = &ch->chan_props.ch_ctx;
+	ch_config[0].len = sizeof(ch->chan_props.ch_ctx);
+	ch_config[0].name = "CCA";
+
+	/* populate ECA */
+	ch_config[1].buf = &ch->evt_props.ev_ctx;
+	ch_config[1].len = sizeof(ch->evt_props.ev_ctx);
+	ch_config[1].name = "ECA";
+
+	IPA_MPM_DBG("Configuring MHI PRIME device for mhi_idx %d\n", mhi_idx);
+
+	ret = mhi_device_configure(ipa_mpm_ctx->md[mhi_idx].mhi_dev, dir,
+			ch_config, 2);
+	if (ret) {
+		IPA_MPM_ERR("mhi_device_configure failed\n");
+		return -EINVAL;
+	}
+
+	IPA_MPM_FUNC_EXIT();
+
+	return 0;
+}
+
+static void ipa_mpm_mhip_shutdown(void)
+{
+	int mhip_idx;
+	enum ipa_client_type ul_chan, dl_chan;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	for (mhip_idx = 0; mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
+		if (ipa_mpm_ctx->md[mhip_idx].gsi_state >= GSI_ALLOCATED) {
+			get_ipa3_client(mhip_idx, &ul_chan, &dl_chan);
+			IPA_MPM_DBG("Stopping chan = %d\n", mhip_idx);
+			/* MHIP PROD: Enable HOLB and Stop the GSI UL channel */
+			ipa_mpm_start_stop_mhip_data_path(mhip_idx, STOP);
+			ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
+							mhip_idx, STOP);
+			ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_DL,
+							mhip_idx, STOP);
+
+			/* Clean up the GSI UL and DL channels */
+			if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled &&
+				ipa_mpm_ctx->dev_info.pcie_smmu_enabled) {
+				IPA_MPM_DBG("Cleaning SMMU entries..\n");
+			}
+
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, mhip_idx);
+			ipa_mpm_vote_unvote_ipa_clk(CLK_OFF);
+			if (ul_chan != IPA_CLIENT_MAX)
+				ipa_mpm_clean_mhip_chan(mhip_idx, ul_chan);
+			if (dl_chan != IPA_CLIENT_MAX)
+				ipa_mpm_clean_mhip_chan(mhip_idx, dl_chan);
+		}
+	}
+	IPA_MPM_FUNC_EXIT();
+}
+
+/*
+ * Turning on/OFF PCIE Clock is done once for all clients.
+ * Always vote for Probe_ID 0 as a standard.
+ */
+static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
+	int probe_id)
+{
+	int result = 0;
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("probe_id not found\n");
+		return -EINVAL;
+	}
+
+	if (vote > CLK_OFF) {
+		IPA_MPM_ERR("Invalid vote\n");
+		return -EINVAL;
+	}
+
+	if (ipa_mpm_ctx->md[probe_id].mhi_dev == NULL) {
+		IPA_MPM_ERR("MHI not initialized yet\n");
+		return 0;
+	}
+	if (vote == CLK_ON) {
+		if (atomic_read(&ipa_mpm_ctx->pcie_clk_ref_cnt) == 0) {
+			result = mhi_device_get_sync(
+				ipa_mpm_ctx->md[probe_id].mhi_dev);
+			if (result) {
+				IPA_MPM_ERR("mhi_sync_get failed %d\n",
+					result);
+				return result;
+			}
+			IPA_MPM_DBG("PCIE clock now ON\n");
+		}
+		atomic_inc(&ipa_mpm_ctx->pcie_clk_ref_cnt);
+	} else {
+		if ((atomic_read(&ipa_mpm_ctx->pcie_clk_ref_cnt) == 1)) {
+			mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev);
+			IPA_MPM_DBG("PCIE clock off ON\n");
+		}
+		atomic_dec(&ipa_mpm_ctx->pcie_clk_ref_cnt);
+	}
+
+	return result;
+}
+
+/*
+ * Turning on/OFF IPA Clock is done only once- for all clients
+ */
+static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote)
+{
+	if (vote > CLK_OFF)
+		return;
+
+	if (vote == CLK_ON) {
+		if (!atomic_read(&ipa_mpm_ctx->ipa_clk_ref_cnt)) {
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL("ipa_mpm");
+			IPA_MPM_DBG("IPA clock now ON\n");
+		}
+		atomic_inc(&ipa_mpm_ctx->ipa_clk_ref_cnt);
+	} else {
+		if (atomic_read(&ipa_mpm_ctx->ipa_clk_ref_cnt) == 1) {
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("ipa_mpm");
+			IPA_MPM_DBG("IPA clock now OFF\n");
+		}
+		atomic_dec(&ipa_mpm_ctx->ipa_clk_ref_cnt);
+	}
+}
+
+static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
+	enum ipa_mpm_mhip_chan mhip_chan,
+	int probe_id,
+	enum ipa_mpm_start_stop_type start_stop)
+{
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	bool is_start;
+	enum ipa_client_type ul_chan, dl_chan;
+	u32 source_pipe_bitmask = 0;
+	enum gsi_status gsi_res = GSI_STATUS_SUCCESS;
+	int result;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (mhip_chan > IPA_MPM_MHIP_CHAN_BOTH) {
+		IPA_MPM_ERR("MHI not initialized yet\n");
+		return MHIP_STATUS_FAIL;
+	}
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("MHI not initialized yet\n");
+		return MHIP_STATUS_FAIL;
+	}
+
+	get_ipa3_client(probe_id, &ul_chan, &dl_chan);
+
+	if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
+		ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
+	} else if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
+		ipa_ep_idx = ipa3_get_ep_mapping(dl_chan);
+	} else if (mhip_chan == IPA_MPM_MHIP_CHAN_BOTH) {
+		ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
+		ipa_ep_idx = ipa3_get_ep_mapping(dl_chan);
+	}
+
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPA_MPM_ERR("fail to get EP# for idx %d\n", ipa_ep_idx);
+		return MHIP_STATUS_EP_NOT_FOUND;
+	}
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	IPA_MPM_DBG("current GSI state = %d, action = %d\n",
+		ipa_mpm_ctx->md[probe_id].gsi_state, start_stop);
+
+	if (ipa_mpm_ctx->md[probe_id].gsi_state < GSI_ALLOCATED) {
+		IPA_MPM_ERR("GSI chan is not allocated yet..\n");
+		return MHIP_STATUS_EP_NOT_READY;
+	}
+
+	is_start = (start_stop == START) ? true : false;
+
+	if (is_start) {
+		if (ipa_mpm_ctx->md[probe_id].gsi_state == GSI_STARTED) {
+			IPA_MPM_ERR("GSI chan is already started\n");
+			return MHIP_STATUS_NO_OP;
+		}
+
+		/* Start GSI channel */
+		gsi_res = ipa3_start_gsi_channel(ipa_ep_idx);
+		if (gsi_res != GSI_STATUS_SUCCESS) {
+			IPA_MPM_ERR("Error starting channel: err = %d\n",
+					gsi_res);
+			goto gsi_chan_fail;
+		} else {
+			ipa_mpm_change_gsi_state(probe_id, GSI_STARTED);
+		}
+	} else {
+		if (ipa_mpm_ctx->md[probe_id].gsi_state == GSI_STOPPED) {
+			IPA_MPM_ERR("GSI chan is already stopped\n");
+			return MHIP_STATUS_NO_OP;
+		} else if (ipa_mpm_ctx->md[probe_id].gsi_state !=
+							GSI_STARTED) {
+			IPA_MPM_ERR("GSI chan is not previously started\n");
+			return MHIP_STATUS_BAD_STATE;
+		}
+
+		if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
+			source_pipe_bitmask = 1 <<
+				ipa3_get_ep_mapping(ep->client);
+
+			/* First Stop UL GSI channel before unvote PCIe clock */
+			result = ipa3_stop_gsi_channel(ipa_ep_idx);
+
+			if (result) {
+				IPA_MPM_ERR("UL chan stop failed\n");
+				goto gsi_chan_fail;
+			} else {
+				ipa_mpm_change_gsi_state(probe_id,
+							GSI_STARTED);
+			}
+		}
+
+		if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
+			result = ipa3_stop_gsi_channel(ipa_ep_idx);
+			if (result) {
+				IPA_MPM_ERR("Fail to stop DL channel\n");
+				goto gsi_chan_fail;
+			} else {
+				ipa_mpm_change_gsi_state(probe_id, GSI_STOPPED);
+			}
+		}
+	}
+	IPA_MPM_FUNC_EXIT();
+
+	return MHIP_STATUS_SUCCESS;
+gsi_chan_fail:
+	ipa3_disable_data_path(ipa_ep_idx);
+	ipa_mpm_change_gsi_state(probe_id, GSI_ERR);
+	ipa_assert();
+
+	return MHIP_STATUS_FAIL;
+}
+
+int ipa_mpm_notify_wan_state(void)
+{
+	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
+	int i;
+	static enum mhip_status_type status;
+	int ret = 0;
+	enum ipa_client_type ul_chan, dl_chan;
+	enum ipa_mpm_mhip_client_type mhip_client = IPA_MPM_MHIP_TETH;
+
+	if (!ipa3_is_mhip_offload_enabled())
+		return -EPERM;
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
+			probe_id = i;
+			break;
+		}
+	}
+
+	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Unknown probe_id\n");
+		return -EPERM;
+	}
+
+	IPA_MPM_DBG("WAN backhaul available for probe_id = %d\n", probe_id);
+	get_ipa3_client(probe_id, &ul_chan, &dl_chan);
+
+	/* Start UL MHIP channel for offloading the tethering connection */
+	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
+
+	if (ret) {
+		IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
+		return ret;
+	}
+
+	status = ipa_mpm_start_stop_mhip_chan(
+				IPA_MPM_MHIP_CHAN_UL, probe_id, START);
+	switch (status) {
+	case MHIP_STATUS_SUCCESS:
+	case MHIP_STATUS_NO_OP:
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
+		ret = ipa_mpm_start_stop_mhip_data_path(probe_id, START);
+
+		if (ret) {
+			IPA_MPM_ERR("Couldnt start UL GSI channel");
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+			return ret;
+		}
+
+		if (status == MHIP_STATUS_NO_OP) {
+			/* Channels already have been started,
+			 * we can devote for pcie clocks
+			 */
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		}
+		break;
+	case MHIP_STATUS_EP_NOT_READY:
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INPROGRESS);
+		break;
+	case MHIP_STATUS_FAIL:
+	case MHIP_STATUS_BAD_STATE:
+	case MHIP_STATUS_EP_NOT_FOUND:
+		IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
+		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		ret = -EFAULT;
+		break;
+	default:
+		IPA_MPM_ERR("Err not found\n");
+		break;
+	}
+
+	return ret;
+}
+
+static void ipa_mpm_change_gsi_state(int probe_id,
+	enum ipa_mpm_gsi_state next_state)
+{
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX)
+		return;
+
+	mutex_lock(&ipa_mpm_ctx->md[probe_id].mutex);
+	ipa_mpm_ctx->md[probe_id].gsi_state = next_state;
+	IPA_MPM_DBG("GSI next_state = %d\n",
+		ipa_mpm_ctx->md[probe_id].gsi_state);
+	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mutex);
+}
+
+static void ipa_mpm_change_teth_state(int probe_id,
+	enum ipa_mpm_teth_state next_state)
+{
+	enum ipa_mpm_teth_state curr_state;
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Unknown probe_id\n");
+		return;
+	}
+
+	curr_state = ipa_mpm_ctx->md[probe_id].teth_state;
+
+	IPA_MPM_DBG("curr_state = %d, ip_state = %d mhip_s\n",
+		curr_state, next_state);
+
+	switch (curr_state) {
+	case IPA_MPM_TETH_INIT:
+		if (next_state == IPA_MPM_TETH_CONNECTED)
+			next_state = IPA_MPM_TETH_INPROGRESS;
+		break;
+	case IPA_MPM_TETH_INPROGRESS:
+		break;
+	case IPA_MPM_TETH_CONNECTED:
+		break;
+	default:
+		IPA_MPM_ERR("No change in state\n");
+		break;
+	}
+
+	ipa_mpm_ctx->md[probe_id].teth_state = next_state;
+	IPA_MPM_DBG("next_state = %d\n", next_state);
+}
+
+static void ipa_mpm_read_channel(enum ipa_client_type chan)
+{
+	struct gsi_chan_info chan_info;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	int res;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(chan);
+
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+		IPAERR("failed to get idx");
+		return;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	IPA_MPM_ERR("Reading channel for chan %d, ep = %d, gsi_chan_hdl = %d\n",
+		chan, ep, ep->gsi_chan_hdl);
+
+	res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl);
+	if (res)
+		IPA_MPM_ERR("Reading of channel failed for ep %d\n", ep);
+}
+
+static int ipa_mpm_start_stop_mhip_data_path(int probe_id,
+	enum ipa_mpm_start_stop_type start)
+{
+	int ipa_ep_idx;
+	int res = 0;
+	enum ipa_client_type ul_chan, dl_chan;
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Unknown probe_id\n");
+		return 0;
+	}
+	get_ipa3_client(probe_id, &ul_chan, &dl_chan);
+	IPA_MPM_DBG("Start/Stop Data Path ? = %d\n", start);
+
+	/* Defensive check to make sure start/stop MHIP channels only if
+	 *  MHIP channels are allocated.
+	 */
+
+	if (ipa_mpm_ctx->md[probe_id].gsi_state < GSI_ALLOCATED) {
+		IPA_MPM_ERR("Cant start/stop data, GSI state = %d\n",
+			ipa_mpm_ctx->md[probe_id].gsi_state);
+		return -EFAULT;
+	}
+
+	/* MHIP Start Data path:
+	 * IPA MHIP Producer: remove HOLB
+	 * IPA MHIP Consumer : no op as there is no delay on these pipes.
+	 */
+	if (start) {
+		IPA_MPM_DBG("Enabling data path\n");
+		if (ul_chan != IPA_CLIENT_MAX) {
+			/* Remove HOLB on the producer pipe */
+			IPA_MPM_DBG("Removing HOLB on ep = %s\n",
+				__stringify(ul_chan));
+			ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
+
+			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+				IPAERR("failed to get idx");
+				return ipa_ep_idx;
+			}
+
+			res = ipa3_enable_data_path(ipa_ep_idx);
+			if (res)
+				IPA_MPM_ERR("Enable data path failed res=%d\n",
+					res);
+		}
+	} else {
+		IPA_MPM_DBG("Disabling data path\n");
+		if (ul_chan != IPA_CLIENT_MAX) {
+			/* Set HOLB on the producer pipe */
+			ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
+
+			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+				IPAERR("failed to get idx");
+				return ipa_ep_idx;
+			}
+
+			res = ipa3_disable_data_path(ipa_ep_idx);
+			if (res)
+				IPA_MPM_ERR("disable data path failed res=%d\n",
+					res);
+		}
+	}
+
+	return res;
+}
+
+/* ipa_mpm_mhi_probe_cb is received for each MHI'/MHI channel
+ * Currently we have 4 MHI channels.
+ */
+static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
+	const struct mhi_device_id *mhi_id)
+{
+	struct ipa_mpm_channel *ch;
+	int ret;
+	enum ipa_client_type ul_prod, dl_cons;
+	int probe_id;
+	struct ipa_req_chan_out_params ul_out_params, dl_out_params;
+	void __iomem  *db_addr;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	u32 evt_ring_db_addr_low, evt_ring_db_addr_high;
+	u32 wp_addr;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (ipa_mpm_ctx == NULL) {
+		IPA_MPM_ERR("ipa_mpm_ctx is NULL not expected, returning..\n");
+		return -ENOMEM;
+	}
+
+	probe_id = get_idx_from_id(mhi_id);
+
+	if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("chan=%s is not supported for now\n", mhi_id);
+		return -EPERM;
+	}
+
+	if (ipa_mpm_ctx->md[probe_id].init_complete) {
+		IPA_MPM_ERR("Probe initialization already done, returning\n");
+		return -EPERM;
+	}
+
+	IPA_MPM_DBG("Received probe for id=%d\n", probe_id);
+
+	if (probe_id == IPA_MPM_MHIP_CH_ID_2) {
+		/* NOTE :: DPL not supported yet , remove later */
+		IPA_MPM_DBG("DPL not supported yet - returning for DPL..\n");
+		return 0;
+	}
+
+	get_ipa3_client(probe_id, &ul_prod, &dl_cons);
+
+	/* Vote for IPA clock for first time in initialization seq.
+	 * IPA clock will be devoted when MHI enters LPM
+	 * PCIe clock will be voted / devoted with every channel probe
+	 * we receive.
+	 * ul_prod = Host -> Device
+	 * dl_cons = Device -> Host
+	 */
+	ipa_mpm_ctx->md[probe_id].mhi_dev = mhi_dev;
+
+	ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
+	ipa_mpm_vote_unvote_ipa_clk(CLK_ON);
+	/* NOTE :: Duplicate IPA vote - just for BU, remove later */
+	ipa_mpm_vote_unvote_ipa_clk(CLK_ON);
+
+	IPA_MPM_DBG("ul chan = %d, dl_chan = %d\n", ul_prod, dl_cons);
+
+	/*
+	 * Set up MHI' pipes for Device IPA filling in
+	 * Channel Context and Event Context.
+	 * These params will be sent to Device side.
+	 * UL CHAN = HOST -> Device
+	 * DL CHAN = Device -> HOST
+	 * per channel a TRE and EV is allocated.
+	 * for a UL channel -
+	 * IPA HOST PROD TRE -> IPA DEVICE CONS EV
+	 * IPA HOST PROD EV ->  IPA DEVICE CONS TRE
+	 * for a DL channel -
+	 * IPA Device PROD TRE -> IPA HOST CONS EV
+	 * IPA Device PROD EV ->  IPA HOST CONS TRE
+	 */
+	if (probe_id != IPA_MPM_MHIP_CH_ID_2) {
+		if (ul_prod != IPA_CLIENT_MAX) {
+			/* store UL properties */
+			ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
+			/* Store Channel properties */
+			ch->chan_props.id = mhi_dev->ul_chan_id;
+			ch->chan_props.device_db =
+				ipa_mpm_ctx->dev_info.chdb_base +
+				ch->chan_props.id * 8;
+			/* Fill Channel Conext to be sent to Device side */
+			ch->chan_props.ch_ctx.chtype =
+				IPA_MPM_MHI_HOST_UL_CHANNEL;
+			ch->chan_props.ch_ctx.erindex =
+				mhi_dev->ul_event_id;
+			ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
+				GSI_EVT_RING_RE_SIZE_16B;
+			/* Store Event properties */
+			ch->evt_props.ev_ctx.update_rp_modc = 0;
+			ch->evt_props.ev_ctx.update_rp_intmodt = 0;
+			ch->evt_props.ev_ctx.ertype = 1;
+			ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
+				GSI_EVT_RING_RE_SIZE_16B;
+			ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
+			ch->evt_props.device_db =
+				ipa_mpm_ctx->dev_info.erdb_base +
+				ch->chan_props.ch_ctx.erindex * 8;
+		}
+	}
+	if (dl_cons != IPA_CLIENT_MAX) {
+		/* store DL channel properties */
+		ch = &ipa_mpm_ctx->md[probe_id].dl_cons;
+		/* Store Channel properties */
+		ch->chan_props.id = mhi_dev->dl_chan_id;
+		ch->chan_props.device_db =
+			ipa_mpm_ctx->dev_info.chdb_base +
+			ch->chan_props.id * 8;
+		/* Fill Channel Conext to be be sent to Dev side */
+		ch->chan_props.ch_ctx.chstate = 1;
+		ch->chan_props.ch_ctx.chtype =
+			IPA_MPM_MHI_HOST_DL_CHANNEL;
+		ch->chan_props.ch_ctx.erindex = mhi_dev->dl_event_id;
+		ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
+			GSI_EVT_RING_RE_SIZE_16B;
+		/* Store Event properties */
+		ch->evt_props.ev_ctx.update_rp_modc = 0;
+		ch->evt_props.ev_ctx.update_rp_intmodt = 0;
+		ch->evt_props.ev_ctx.ertype = 1;
+		ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
+			GSI_EVT_RING_RE_SIZE_16B;
+		ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
+		ch->evt_props.device_db =
+			ipa_mpm_ctx->dev_info.erdb_base +
+			ch->chan_props.ch_ctx.erindex * 8;
+	}
+	/* connect Host GSI pipes with MHI' protocol */
+	if (probe_id != IPA_MPM_MHIP_CH_ID_2)  {
+		ret = ipa_mpm_connect_mhip_gsi_pipe(ul_prod,
+			probe_id, &ul_out_params);
+		if (ret) {
+			IPA_MPM_ERR("failed connecting MPM client %d\n",
+					ul_prod);
+			goto fail_gsi_setup;
+		}
+	}
+	ret = ipa_mpm_connect_mhip_gsi_pipe(dl_cons, probe_id, &dl_out_params);
+	if (ret) {
+		IPA_MPM_ERR("connecting MPM client = %d failed\n",
+			dl_cons);
+		goto fail_gsi_setup;
+	}
+	if (probe_id != IPA_MPM_MHIP_CH_ID_2)  {
+		if (ul_prod != IPA_CLIENT_MAX) {
+			ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
+			ch->evt_props.ev_ctx.update_rp_addr =
+				ipa_mpm_smmu_map_doorbell(
+					MHIP_SMMU_DOMAIN_PCIE,
+					ul_out_params.db_reg_phs_addr_lsb);
+
+			if (ch->evt_props.ev_ctx.update_rp_addr == 0)
+				ipa_assert();
+
+			ret = __ipa_mpm_configure_mhi_device(
+					ch, probe_id, DMA_TO_HIPA);
+			if (ret) {
+				IPA_MPM_ERR("configure_mhi_dev fail %d\n",
+						ret);
+				goto fail_smmu;
+			}
+		}
+	}
+
+	if (dl_cons != IPA_CLIENT_MAX) {
+		ch = &ipa_mpm_ctx->md[probe_id].dl_cons;
+		ch->evt_props.ev_ctx.update_rp_addr =
+			ipa_mpm_smmu_map_doorbell(
+					MHIP_SMMU_DOMAIN_PCIE,
+					dl_out_params.db_reg_phs_addr_lsb);
+
+		if (ch->evt_props.ev_ctx.update_rp_addr == 0)
+			ipa_assert();
+
+		ret = __ipa_mpm_configure_mhi_device(ch, probe_id,
+					DMA_FROM_HIPA);
+		if (ret) {
+			IPA_MPM_ERR("mpm_config_mhi_dev failed %d\n", ret);
+			goto fail_smmu;
+		}
+	}
+
+	ret = mhi_prepare_for_transfer(ipa_mpm_ctx->md[probe_id].mhi_dev);
+	if (ret) {
+		IPA_MPM_ERR("mhi_prepare_for_transfer failed %d\n", ret);
+		goto fail_smmu;
+	}
+
+	/*
+	 * Ring initial channel db - Host Side UL and Device side DL channel.
+	 * To ring doorbell, write "WP" into doorbell register.
+	 * This WP should be set to 1 element less than ring max.
+	 */
+
+	/* Ring UL PRODUCER TRANSFER RING (HOST IPA -> DEVICE IPA) Doorbell */
+	if (ul_prod != IPA_CLIENT_MAX) {
+		IPA_MPM_DBG("Host UL TR PA DB = 0X%0x\n",
+			ul_out_params.db_reg_phs_addr_lsb);
+
+		db_addr = ioremap(
+			(phys_addr_t)(ul_out_params.db_reg_phs_addr_lsb), 4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.tr_pa +
+			((IPA_MPM_RING_LEN - 1) * GSI_CHAN_RE_SIZE_16B);
+
+		iowrite32(wp_addr, db_addr);
+
+		IPA_MPM_DBG("Host UL TR  DB = 0X%0x, wp_addr = 0X%0x",
+			db_addr, wp_addr);
+
+		iounmap(db_addr);
+		ipa_mpm_read_channel(ul_prod);
+	}
+
+	/* Ring UL PRODUCER EVENT RING (HOST IPA -> DEVICE IPA) Doorbell
+	 * Ring the event DB to a value outside the
+	 * ring range such that rp and wp never meet.
+	 */
+	if (ul_prod != IPA_CLIENT_MAX) {
+		ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
+		if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+			IPA_MPM_ERR("fail to alloc EP.\n");
+			goto fail_start_channel;
+		}
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+		IPA_MPM_DBG("for ep_idx %d , gsi_evt_ring_hdl = %d\n",
+			ipa_ep_idx, ep->gsi_evt_ring_hdl);
+		gsi_query_evt_ring_db_addr(ep->gsi_evt_ring_hdl,
+			&evt_ring_db_addr_low, &evt_ring_db_addr_high);
+
+		IPA_MPM_DBG("Host UL ER PA DB = 0X%0x\n",
+			evt_ring_db_addr_low);
+
+		db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.er_pa +
+			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
+		IPA_MPM_DBG("Host UL ER  DB = 0X%0x, wp_addr = 0X%0x",
+			db_addr, wp_addr);
+
+		iowrite32(wp_addr, db_addr);
+		iounmap(db_addr);
+	}
+
+	/* Ring DEVICE IPA DL CONSUMER Event Doorbell */
+	if (ul_prod != IPA_CLIENT_MAX) {
+		db_addr = ioremap((phys_addr_t)
+			(ipa_mpm_ctx->md[probe_id].ul_prod.evt_props.device_db),
+			4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.tr_pa +
+			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
+
+		iowrite32(wp_addr, db_addr);
+		iounmap(db_addr);
+	}
+
+	/* Ring DL PRODUCER (DEVICE IPA -> HOST IPA) Doorbell */
+	if (dl_cons != IPA_CLIENT_MAX) {
+		db_addr = ioremap((phys_addr_t)
+		(ipa_mpm_ctx->md[probe_id].dl_cons.chan_props.device_db),
+		4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.tr_pa +
+			((IPA_MPM_RING_LEN - 1) * GSI_CHAN_RE_SIZE_16B);
+
+		IPA_MPM_DBG("Device DL TR  DB = 0X%0X, wp_addr = 0X%0x",
+			db_addr, wp_addr);
+
+		iowrite32(wp_addr, db_addr);
+
+		iounmap(db_addr);
+	}
+
+	/*
+	 * Ring event ring DB on Device side.
+	 * ipa_mpm should ring the event DB to a value outside the
+	 * ring range such that rp and wp never meet.
+	 */
+	if (dl_cons != IPA_CLIENT_MAX) {
+		db_addr =
+		ioremap(
+		(phys_addr_t)
+		(ipa_mpm_ctx->md[probe_id].dl_cons.evt_props.device_db),
+		4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.er_pa +
+			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
+
+		iowrite32(wp_addr, db_addr);
+		IPA_MPM_DBG("Device  UL ER  DB = 0X%0X,wp_addr = 0X%0x",
+			db_addr, wp_addr);
+		iounmap(db_addr);
+	}
+
+	/* Ring DL EVENT RING CONSUMER (DEVICE IPA CONSUMER) Doorbell */
+	if (dl_cons != IPA_CLIENT_MAX) {
+		ipa_ep_idx = ipa3_get_ep_mapping(dl_cons);
+		if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+			IPA_MPM_ERR("fail to alloc EP.\n");
+			goto fail_start_channel;
+		}
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+		gsi_query_evt_ring_db_addr(ep->gsi_evt_ring_hdl,
+			&evt_ring_db_addr_low, &evt_ring_db_addr_high);
+		IPA_MPM_DBG("Host DL ER PA DB = 0X%0x\n",
+				evt_ring_db_addr_low);
+		db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
+
+		wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.tr_pa +
+			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
+		iowrite32(wp_addr, db_addr);
+		IPA_MPM_DBG("Host  DL ER  DB = 0X%0X, wp_addr = 0X%0x",
+			db_addr, wp_addr);
+		iounmap(db_addr);
+	}
+
+	/* Check if TETH connection is in progress, no op
+	 * if no then Stop UL channel.
+	 */
+	switch (ipa_mpm_ctx->md[probe_id].teth_state) {
+	case IPA_MPM_TETH_INIT:
+		/* No teth started yet, disable UL channel */
+		ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
+						probe_id, STOP);
+
+		/* Disable data path */
+		if (ipa_mpm_start_stop_mhip_data_path(probe_id, STOP)) {
+			IPA_MPM_ERR("MHIP Enable data path failed\n");
+			goto fail_start_channel;
+		}
+		break;
+	case IPA_MPM_TETH_INPROGRESS:
+	case IPA_MPM_TETH_CONNECTED:
+		IPA_MPM_DBG("UL channel is already started, continue\n");
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
+
+		/* Enable data path */
+		if (ipa_mpm_start_stop_mhip_data_path(probe_id, START)) {
+			IPA_MPM_ERR("MHIP Enable data path failed\n");
+			goto fail_start_channel;
+		}
+
+		/* Lyft the delay for rmnet USB prod pipe */
+		ipa3_set_reset_client_prod_pipe_delay(false,
+			IPA_CLIENT_USB_PROD);
+		break;
+	default:
+		IPA_MPM_DBG("No op for UL channel, in teth state = %d");
+		break;
+	}
+
+	IPA_MPM_FUNC_EXIT();
+	return 0;
+
+fail_gsi_setup:
+fail_start_channel:
+fail_smmu:
+	if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled)
+		IPA_MPM_DBG("SMMU failed\n");
+	ipa_assert();
+	return ret;
+}
+
+static void ipa_mpm_init_mhip_channel_info(void)
+{
+	/* IPA_MPM_MHIP_CH_ID_0 => MHIP TETH PIPES  */
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].dl_cons.ipa_client =
+		IPA_CLIENT_MHI_PRIME_TETH_PROD;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].dl_cons.ep_cfg =
+		mhip_dl_teth_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].ul_prod.ipa_client =
+		IPA_CLIENT_MHI_PRIME_TETH_CONS;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].ul_prod.ep_cfg =
+		mhip_ul_teth_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].mhip_client =
+		IPA_MPM_MHIP_TETH;
+
+	/* IPA_MPM_MHIP_CH_ID_1 => MHIP RMNET PIPES */
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].dl_cons.ipa_client =
+		IPA_CLIENT_MHI_PRIME_RMNET_PROD;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].dl_cons.ep_cfg =
+		mhip_dl_rmnet_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].ul_prod.ipa_client =
+		IPA_CLIENT_MHI_PRIME_RMNET_CONS;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].ul_prod.ep_cfg =
+		mhip_ul_rmnet_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].mhip_client =
+		IPA_MPM_MHIP_USB_RMNET;
+
+	/* IPA_MPM_MHIP_CH_ID_2 => MHIP ADPL PIPE */
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].dl_cons.ipa_client =
+		IPA_CLIENT_MHI_PRIME_DPL_PROD;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].dl_cons.ep_cfg =
+		mhip_dl_dpl_ep_cfg;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].ul_prod.ipa_client =
+		IPA_CLIENT_MAX;
+	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_2].mhip_client =
+	IPA_MPM_MHIP_USB_DPL;
+}
+
+static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
+{
+	IPA_MPM_FUNC_ENTRY();
+	ipa_mpm_mhip_shutdown();
+	IPA_MPM_FUNC_EXIT();
+}
+
+static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
+				enum MHI_CB mhi_cb)
+{
+	int mhip_idx;
+	enum mhip_status_type status;
+
+	IPA_MPM_DBG("%d\n", mhi_cb);
+
+	for (mhip_idx = 0; mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
+		if (mhi_dev == ipa_mpm_ctx->md[mhip_idx].mhi_dev)
+			break;
+	}
+	if (mhip_idx >= IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_DBG("ignoring secondary callbacks\n");
+		return;
+	}
+	switch (mhi_cb) {
+	case MHI_CB_IDLE:
+		break;
+	case MHI_CB_LPM_ENTER:
+		status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_DL,
+							mhip_idx, STOP);
+		IPA_MPM_DBG("status = %d\n", status);
+		ipa_mpm_vote_unvote_ipa_clk(CLK_OFF);
+		break;
+	case MHI_CB_LPM_EXIT:
+		status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_DL,
+							mhip_idx, START);
+		ipa_mpm_vote_unvote_ipa_clk(CLK_ON);
+		break;
+	case MHI_CB_EE_RDDM:
+	case MHI_CB_PENDING_DATA:
+	case MHI_CB_SYS_ERROR:
+	case MHI_CB_FATAL_ERROR:
+		IPA_MPM_ERR("unexpected event %d\n", mhi_cb);
+		break;
+	}
+}
+
+int ipa_mpm_set_dma_mode(enum ipa_client_type src_pipe,
+	enum ipa_client_type dst_pipe)
+{
+	int result = 0;
+	struct ipa_ep_cfg ep_cfg = { { 0 } };
+
+	IPA_MPM_FUNC_ENTRY();
+	IPA_MPM_DBG("DMA from %d to %d\n", src_pipe, dst_pipe);
+
+	/* Set USB PROD PIPE DMA to MHIP PROD PIPE */
+	ep_cfg.mode.mode = IPA_DMA;
+	ep_cfg.mode.dst = dst_pipe;
+	ep_cfg.seq.set_dynamic = true;
+
+	result = ipa_cfg_ep(ipa_get_ep_mapping(src_pipe), &ep_cfg);
+	IPA_MPM_FUNC_EXIT();
+
+	return result;
+}
+
+int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
+	enum ipa_client_type dst_pipe)
+{
+	int result = 0;
+	struct ipa_ep_cfg ep_cfg = { { 0 } };
+
+	IPA_MPM_FUNC_ENTRY();
+	IPA_MPM_DBG("DMA from %d to %d\n", src_pipe, dst_pipe);
+
+	/* Set USB PROD PIPE DMA to MHIP PROD PIPE */
+	ep_cfg.mode.mode = IPA_BASIC;
+	ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
+	ep_cfg.seq.set_dynamic = true;
+
+	result = ipa_cfg_ep(ipa_get_ep_mapping(src_pipe), &ep_cfg);
+	IPA_MPM_FUNC_EXIT();
+
+	return result;
+}
+
+void ipa_mpm_mhip_map_prot(enum ipa_usb_teth_prot prot,
+	enum ipa_mpm_mhip_client_type *mhip_client)
+{
+	switch (prot) {
+	case IPA_USB_RNDIS:
+		*mhip_client = IPA_MPM_MHIP_TETH;
+		break;
+	case IPA_USB_RMNET:
+		*mhip_client = IPA_MPM_MHIP_USB_RMNET;
+		break;
+	case IPA_USB_DIAG:
+		*mhip_client = IPA_MPM_MHIP_USB_DPL;
+		break;
+	default:
+		*mhip_client = IPA_MPM_MHIP_NONE;
+		break;
+	}
+	IPA_MPM_DBG("Mapped xdci prot %d -> MHIP prot %d\n", prot,
+		*mhip_client);
+}
+
+int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
+{
+	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
+	int i;
+	enum ipa_mpm_mhip_client_type mhip_client;
+	enum mhip_status_type status;
+	int ret = 0;
+
+	if (ipa_mpm_ctx == NULL) {
+		IPA_MPM_ERR("MPM not platform probed yet, returning ..\n");
+		return 0;
+	}
+
+	ipa_mpm_mhip_map_prot(xdci_teth_prot, &mhip_client);
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
+			probe_id = i;
+			break;
+		}
+	}
+
+	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Unknown probe_id\n");
+		return 0;
+	}
+
+	IPA_MPM_DBG("Connect xdci prot %d -> mhip_client = %d probe_id = %d\n",
+		xdci_teth_prot, mhip_client, probe_id);
+
+	ipa_mpm_ctx->md[probe_id].mhip_client = mhip_client;
+
+	switch (mhip_client) {
+	case IPA_MPM_MHIP_USB_RMNET:
+		ipa_mpm_set_dma_mode(IPA_CLIENT_USB_PROD,
+			IPA_CLIENT_MHI_PRIME_RMNET_CONS);
+		break;
+	case IPA_MPM_MHIP_TETH:
+	case IPA_MPM_MHIP_USB_DPL:
+		IPA_MPM_DBG("Teth connecting for prot %d\n", mhip_client);
+		return 0;
+	default:
+		IPA_MPM_ERR("mhip_client = %d not supported\n", mhip_client);
+		ret = 0;
+		break;
+	}
+
+	/* Start UL MHIP channel for offloading the tethering connection */
+	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
+
+	if (ret) {
+		IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
+		return ret;
+	}
+
+	status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
+						probe_id, START);
+
+	switch (status) {
+	case MHIP_STATUS_SUCCESS:
+	case MHIP_STATUS_NO_OP:
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
+		ipa_mpm_start_stop_mhip_data_path(probe_id, START);
+		/* Lift the delay for rmnet USB prod pipe */
+		ipa3_set_reset_client_prod_pipe_delay(false,
+			IPA_CLIENT_USB_PROD);
+		if (status == MHIP_STATUS_NO_OP) {
+			/* Channels already have been started,
+			 * we can devote for pcie clocks
+			 */
+			ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		}
+		break;
+	case MHIP_STATUS_EP_NOT_READY:
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INPROGRESS);
+		break;
+	case MHIP_STATUS_FAIL:
+	case MHIP_STATUS_BAD_STATE:
+	case MHIP_STATUS_EP_NOT_FOUND:
+		IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
+		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		ret = -EFAULT;
+		break;
+	default:
+		IPA_MPM_ERR("Err not found\n");
+		break;
+	}
+	return ret;
+}
+
+int ipa_mpm_mhip_ul_data_stop(enum ipa_usb_teth_prot xdci_teth_prot)
+{
+	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
+	int i;
+	enum ipa_mpm_mhip_client_type mhip_client;
+	int ret = 0;
+
+	if (ipa_mpm_ctx == NULL) {
+		IPA_MPM_ERR("MPM not platform probed, returning ..\n");
+		return 0;
+	}
+
+	ipa_mpm_mhip_map_prot(xdci_teth_prot, &mhip_client);
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
+			probe_id = i;
+			break;
+		}
+	}
+
+	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Invalid probe_id\n");
+		return 0;
+	}
+
+	IPA_MPM_DBG("Map xdci prot %d to mhip_client = %d probe_id = %d\n",
+		xdci_teth_prot, mhip_client, probe_id);
+
+	ret = ipa_mpm_start_stop_mhip_data_path(probe_id, STOP);
+
+	if (ret)
+		IPA_MPM_ERR("Error stopping UL path, err = %d\n", ret);
+
+	return ret;
+}
+
+int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
+{
+	int probe_id = IPA_MPM_MHIP_CH_ID_MAX;
+	int i;
+	enum ipa_mpm_mhip_client_type mhip_client;
+	enum mhip_status_type status;
+	int ret = 0;
+
+	if (ipa_mpm_ctx == NULL) {
+		IPA_MPM_ERR("MPM not platform probed, returning ..\n");
+		return 0;
+	}
+
+	ipa_mpm_mhip_map_prot(xdci_teth_prot, &mhip_client);
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
+		if (ipa_mpm_pipes[i].mhip_client == mhip_client) {
+			probe_id = i;
+			break;
+		}
+	}
+
+	if (probe_id == IPA_MPM_MHIP_CH_ID_MAX) {
+		IPA_MPM_ERR("Invalid probe_id\n");
+		return 0;
+	}
+
+	IPA_MPM_DBG("xdci disconnect prot %d mhip_client = %d probe_id = %d\n",
+			xdci_teth_prot, mhip_client, probe_id);
+
+	switch (mhip_client) {
+	case IPA_MPM_MHIP_USB_RMNET:
+	case IPA_MPM_MHIP_TETH:
+	case IPA_MPM_MHIP_USB_DPL:
+		IPA_MPM_DBG("Teth Disconnecting for prot %d\n", mhip_client);
+		return 0;
+	default:
+		IPA_MPM_ERR("mhip_client = %d not supported\n", mhip_client);
+		return 0;
+	}
+
+	status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
+		probe_id, STOP);
+
+	switch (status) {
+	case MHIP_STATUS_SUCCESS:
+	case MHIP_STATUS_NO_OP:
+	case MHIP_STATUS_EP_NOT_READY:
+		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
+		ipa_mpm_start_stop_mhip_data_path(probe_id, STOP);
+		break;
+	case MHIP_STATUS_FAIL:
+	case MHIP_STATUS_BAD_STATE:
+	case MHIP_STATUS_EP_NOT_FOUND:
+		IPA_MPM_ERR("UL chan cant be started err =%d\n", status);
+		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+		ret = -EFAULT;
+		break;
+	default:
+		IPA_MPM_ERR("Err not found\n");
+		break;
+	}
+
+	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
+
+	if (ret) {
+		IPA_MPM_ERR("Error cloking off PCIe clk, err = %d\n", ret);
+		return ret;
+	}
+
+	ipa_mpm_ctx->md[probe_id].mhip_client = IPA_MPM_MHIP_NONE;
+
+	return ret;
+}
+
+static int ipa_mpm_populate_smmu_info(struct platform_device *pdev)
+{
+	struct ipa_smmu_in_params smmu_in;
+	struct ipa_smmu_out_params smmu_out;
+	u32 carved_iova_ap_mapping[2];
+	struct ipa_smmu_cb_ctx *cb;
+	struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	int ret = 0;
+
+	if (ipa_mpm_ctx->carved_smmu_cb.valid) {
+		IPA_MPM_DBG("SMMU Context allocated, returning ..\n");
+		return ret;
+	}
+
+	cb = &ipa_mpm_ctx->carved_smmu_cb;
+
+	/* get IPA SMMU enabled status */
+	smmu_in.smmu_client = IPA_SMMU_AP_CLIENT;
+	if (ipa_get_smmu_params(&smmu_in, &smmu_out))
+		ipa_mpm_ctx->dev_info.ipa_smmu_enabled = false;
+	else
+		ipa_mpm_ctx->dev_info.ipa_smmu_enabled =
+		smmu_out.smmu_enable;
+
+	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iova-mapping",
+		carved_iova_ap_mapping, 2)) {
+		IPA_MPM_ERR("failed to read of_node %s\n",
+			"qcom,mpm-iova-mapping");
+		return -EINVAL;
+	}
+	ipa_mpm_ctx->dev_info.pcie_smmu_enabled = true;
+
+	if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled !=
+		ipa_mpm_ctx->dev_info.pcie_smmu_enabled) {
+		IPA_MPM_DBG("PCIE/IPA SMMU config mismatch\n");
+		return -EINVAL;
+	}
+
+	cb->va_start = carved_iova_ap_mapping[0];
+	cb->va_size = carved_iova_ap_mapping[1];
+	cb->va_end = cb->va_start + cb->va_size;
+
+	if (cb->va_start >= ap_cb->va_start && cb->va_start < ap_cb->va_end) {
+		IPA_MPM_ERR("MPM iommu and AP overlap addr 0x%lx\n",
+				cb->va_start);
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	cb->dev = ipa_mpm_ctx->dev_info.dev;
+	cb->valid = true;
+	cb->next_addr = cb->va_start;
+
+	if (dma_set_mask_and_coherent(ipa_mpm_ctx->dev_info.dev,
+		DMA_BIT_MASK(64))) {
+		IPA_MPM_ERR("setting DMA mask to 64 failed.\n");
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int ipa_mpm_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	int i = 0;
+	int idx = 0;
+
+	IPA_MPM_FUNC_ENTRY();
+
+	if (ipa_mpm_ctx) {
+		IPA_MPM_DBG("MPM is already probed, returning\n");
+		return 0;
+	}
+
+	ret = ipa_register_ipa_ready_cb(ipa_mpm_ipa3_ready_cb, (void *)pdev);
+	/*
+	 * If we received -EEXIST, IPA has initialized. So we need
+	 * to continue the probing process.
+	 */
+	if (!ret) {
+		IPA_MPM_DBG("IPA not ready yet, registering callback\n");
+		return ret;
+	}
+	IPA_MPM_DBG("IPA is ready, continue with probe\n");
+
+	ipa_mpm_ctx = kzalloc(sizeof(*ipa_mpm_ctx), GFP_KERNEL);
+
+	if (!ipa_mpm_ctx)
+		return -ENOMEM;
+
+	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++)
+		mutex_init(&ipa_mpm_ctx->md[i].mutex);
+	ipa_mpm_ctx->dev_info.pdev = pdev;
+	ipa_mpm_ctx->dev_info.dev = &pdev->dev;
+
+	ipa_mpm_init_mhip_channel_info();
+
+	if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-chdb-base",
+		&ipa_mpm_ctx->dev_info.chdb_base)) {
+		IPA_MPM_ERR("failed to read qcom,mhi-chdb-base\n");
+		goto fail_probe;
+	}
+	IPA_MPM_DBG("chdb-base=0x%x\n", ipa_mpm_ctx->dev_info.chdb_base);
+
+	if (of_property_read_u32(pdev->dev.of_node, "qcom,mhi-erdb-base",
+		&ipa_mpm_ctx->dev_info.erdb_base)) {
+		IPA_MPM_ERR("failed to read qcom,mhi-erdb-base\n");
+		goto fail_probe;
+	}
+	IPA_MPM_DBG("erdb-base=0x%x\n", ipa_mpm_ctx->dev_info.erdb_base);
+
+	ret = ipa_mpm_populate_smmu_info(pdev);
+
+	if (ret) {
+		IPA_MPM_DBG("SMMU Config failed\n");
+		goto fail_probe;
+	}
+
+	atomic_set(&ipa_mpm_ctx->ipa_clk_ref_cnt, 0);
+	atomic_set(&ipa_mpm_ctx->pcie_clk_ref_cnt, 0);
+
+	for (idx = 0; idx < IPA_MPM_MHIP_CH_ID_MAX; idx++)
+		ipa_mpm_ctx->md[idx].gsi_state = GSI_INIT;
+
+	ret = mhi_driver_register(&mhi_driver);
+	if (ret) {
+		IPA_MPM_ERR("mhi_driver_register failed %d\n", ret);
+		goto fail_probe;
+	}
+	IPA_MPM_FUNC_EXIT();
+	return 0;
+
+fail_probe:
+	kfree(ipa_mpm_ctx);
+	ipa_mpm_ctx = NULL;
+	return -EFAULT;
+}
+
+static int ipa_mpm_remove(struct platform_device *pdev)
+{
+	IPA_MPM_FUNC_ENTRY();
+
+	mhi_driver_unregister(&mhi_driver);
+	IPA_MPM_FUNC_EXIT();
+	return 0;
+}
+
+static const struct of_device_id ipa_mpm_dt_match[] = {
+	{ .compatible = "qcom,ipa-mpm" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ipa_mpm_dt_match);
+
+static struct platform_driver ipa_ipa_mpm_driver = {
+	.driver = {
+		.name = "ipa_mpm",
+		.of_match_table = ipa_mpm_dt_match,
+	},
+	.probe = ipa_mpm_probe,
+	.remove = ipa_mpm_remove,
+};
+
+/**
+ * ipa_mpm_init() - Registers ipa_mpm as a platform device for a APQ
+ *
+ * This function is called after bootup for APQ device.
+ * ipa_mpm will register itself as a platform device, and probe
+ * function will get called.
+ *
+ * Return: None
+ */
+static int __init ipa_mpm_init(void)
+{
+	IPA_MPM_DBG("register ipa_mpm platform device\n");
+	return platform_driver_register(&ipa_ipa_mpm_driver);
+}
+
+/**
+ * ipa3_is_mhip_offload_enabled() - check if IPA MPM module was initialized
+ * successfully. If it is initialized, MHIP is enabled for teth
+ *
+ * Return value: 1 for yes; 0 for no
+ */
+int ipa3_is_mhip_offload_enabled(void)
+{
+	if (ipa_mpm_ctx == NULL)
+		return 0;
+	else
+		return 1;
+}
+
+late_initcall(ipa_mpm_init);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Proxy Manager Driver");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index f808f69..6d14b83 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -792,7 +792,7 @@
 {
 	struct ipa_configure_ul_firewall_rules_resp_msg_v01 resp;
 	struct ipa_msg_desc req_desc, resp_desc;
-	int rc;
+	int rc, i;
 
 	IPAWANDBG("IPACM pass %u rules to Q6\n",
 		req->firewall_rules_list_len);
@@ -812,6 +812,37 @@
 	}
 	mutex_unlock(&ipa3_qmi_lock);
 
+	/* check if modem is up */
+	if (!ipa3_qmi_indication_fin ||
+		!ipa3_qmi_modem_init_fin ||
+		!ipa_q6_clnt) {
+		IPAWANDBG("modem QMI service is not up yet\n");
+		return -EINVAL;
+	}
+
+	/* Passing 0 rules means that firewall is disabled */
+	if (req->firewall_rules_list_len == 0)
+		IPAWANDBG("IPACM passed 0 rules to Q6\n");
+
+	if (req->firewall_rules_list_len >= QMI_IPA_MAX_UL_FIREWALL_RULES_V01) {
+		IPAWANERR(
+		"Number of rules passed by IPACM, %d, exceed limit %d\n",
+			req->firewall_rules_list_len,
+			QMI_IPA_MAX_UL_FIREWALL_RULES_V01);
+		return -EINVAL;
+	}
+
+	/* Check for valid IP type */
+	for (i = 0; i < req->firewall_rules_list_len; i++) {
+		if (req->firewall_rules_list[i].ip_type !=
+				QMI_IPA_IP_TYPE_V4_V01 &&
+			req->firewall_rules_list[i].ip_type !=
+				QMI_IPA_IP_TYPE_V6_V01)
+			IPAWANERR("Invalid IP type %d\n",
+					req->firewall_rules_list[i].ip_type);
+		return -EINVAL;
+	}
+
 	req_desc.max_msg_len =
 		QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01;
 	req_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01;
@@ -825,7 +856,6 @@
 	resp_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01;
 	resp_desc.ei_array =
 		ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei;
-
 	rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
 		&req_desc, req,
 		&resp_desc, &resp,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index a5391a9..82cf654 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -497,6 +497,38 @@
 	return 0;
 }
 
+static int ipa_create_ap_smmu_mapping_pa(phys_addr_t pa, size_t len,
+		bool device, unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE),
+			PAGE_SIZE);
+	int ret;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	if (len > PAGE_SIZE)
+		va = roundup(cb->next_addr, len);
+
+	ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
+			true_len,
+			device ? (prot | IOMMU_MMIO) : prot);
+	if (ret) {
+		IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len);
+		return -EINVAL;
+	}
+
+	ipa3_ctx->wdi_map_cnt++;
+	cb->next_addr = va + true_len;
+	*iova = va + pa - rounddown(pa, PAGE_SIZE);
+	return 0;
+}
+
 static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
 		bool device, unsigned long *iova)
 {
@@ -526,6 +558,67 @@
 	return 0;
 }
 
+static int ipa_create_ap_smmu_mapping_sgt(struct sg_table *sgt,
+		unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	int ret, i;
+	struct scatterlist *sg;
+	unsigned long start_iova = va;
+	phys_addr_t phys;
+	size_t len = 0;
+	int count = 0;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+	if (!sgt) {
+		IPAERR("Bad parameters, scatter / gather list is NULL\n");
+		return -EINVAL;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		/* directly get sg_tbl PA from wlan-driver */
+		len += PAGE_ALIGN(sg->offset + sg->length);
+	}
+
+	if (len > PAGE_SIZE) {
+		va = roundup(cb->next_addr,
+				roundup_pow_of_two(len));
+		start_iova = va;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		/* directly get sg_tbl PA from wlan-driver */
+		phys = sg->dma_address;
+		len = PAGE_ALIGN(sg->offset + sg->length);
+
+		ret = ipa3_iommu_map(cb->mapping->domain, va, phys, len, prot);
+		if (ret) {
+			IPAERR("iommu map failed for pa=%pa len=%zu\n",
+					&phys, len);
+			goto bad_mapping;
+		}
+		va += len;
+		ipa3_ctx->wdi_map_cnt++;
+		count++;
+	}
+	cb->next_addr = va;
+	*iova = start_iova;
+
+	return 0;
+
+bad_mapping:
+	for_each_sg(sgt->sgl, sg, count, i)
+		iommu_unmap(cb->mapping->domain, sg_dma_address(sg),
+				sg_dma_len(sg));
+	return -EINVAL;
+}
+
+
 static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
 		unsigned long *iova)
 {
@@ -576,6 +669,43 @@
 	return -EINVAL;
 }
 
+static void ipa_release_ap_smmu_mappings(enum ipa_client_type client)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+	int i, j, start, end;
+
+	if (IPA_CLIENT_IS_CONS(client)) {
+		start = IPA_WDI_TX_RING_RES;
+		if (ipa3_ctx->ipa_wdi3_over_gsi)
+			end = IPA_WDI_TX_DB_RES;
+		else
+			end = IPA_WDI_CE_DB_RES;
+	} else {
+		start = IPA_WDI_RX_RING_RES;
+		if (ipa3_ctx->ipa_wdi2 ||
+			ipa3_ctx->ipa_wdi3_over_gsi)
+			end = IPA_WDI_RX_COMP_RING_WP_RES;
+		else
+			end = IPA_WDI_RX_RING_RP_RES;
+	}
+
+	for (i = start; i <= end; i++) {
+		if (wdi_res[i].valid) {
+			for (j = 0; j < wdi_res[i].nents; j++) {
+				iommu_unmap(cb->mapping->domain,
+					wdi_res[i].res[j].iova,
+					wdi_res[i].res[j].size);
+				ipa3_ctx->wdi_map_cnt--;
+			}
+			kfree(wdi_res[i].res);
+			wdi_res[i].valid = false;
+		}
+	}
+
+	if (ipa3_ctx->wdi_map_cnt == 0)
+		cb->next_addr = cb->va_end;
+}
+
 static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
 {
 	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
@@ -751,9 +881,11 @@
 
 	/* no SMMU on WLAN but SMMU on IPA */
 	if (!wlan_smmu_en && !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
-		if (ipa3_smmu_map_peer_buff(*iova, pa, len,
-						sgt, IPA_SMMU_CB_WLAN)) {
-			IPAERR("Fail to create mapping res %d\n", res_idx);
+		if (ipa_create_ap_smmu_mapping_pa(pa, len,
+				(res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+					iova)) {
+			IPAERR("Fail to create mapping res %d\n",
+					res_idx);
 			return -EFAULT;
 		}
 		ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
@@ -765,10 +897,12 @@
 		case IPA_WDI_RX_RING_RP_RES:
 		case IPA_WDI_RX_COMP_RING_WP_RES:
 		case IPA_WDI_CE_DB_RES:
-			if (ipa3_smmu_map_peer_buff(*iova, pa, len, sgt,
-							IPA_SMMU_CB_WLAN)) {
+		case IPA_WDI_TX_DB_RES:
+			if (ipa_create_ap_smmu_mapping_pa(pa, len,
+				(res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+						iova)) {
 				IPAERR("Fail to create mapping res %d\n",
-					res_idx);
+						res_idx);
 				return -EFAULT;
 			}
 			ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
@@ -777,10 +911,9 @@
 		case IPA_WDI_RX_COMP_RING_RES:
 		case IPA_WDI_TX_RING_RES:
 		case IPA_WDI_CE_RING_RES:
-			if (ipa3_smmu_map_peer_reg(pa, true,
-							IPA_SMMU_CB_WLAN)) {
+			if (ipa_create_ap_smmu_mapping_sgt(sgt, iova)) {
 				IPAERR("Fail to create mapping res %d\n",
-					res_idx);
+						res_idx);
 				return -EFAULT;
 			}
 			ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova);
@@ -1304,7 +1437,7 @@
 ipa_cfg_ep_fail:
 	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
 gsi_timeout:
-	ipa_release_uc_smmu_mappings(in->sys.client);
+	ipa_release_ap_smmu_mappings(in->sys.client);
 	IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
 fail:
 	return result;
@@ -1864,7 +1997,7 @@
 				result);
 		goto fail_dealloc_channel;
 	}
-	ipa_release_uc_smmu_mappings(clnt_hdl);
+	ipa_release_ap_smmu_mappings(clnt_hdl);
 
 	/* for AP+STA stats update */
 	if (ipa3_ctx->uc_wdi_ctx.stats_notify)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index d563427..c53c26a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -167,8 +167,7 @@
 #define IPA_v4_2_DST_GROUP_MAX		(1)
 
 #define IPA_v4_5_MHI_GROUP_PCIE		(0)
-#define IPA_v4_5_GROUP_UL_DL_DST	(0)
-#define IPA_v4_5_GROUP_UL_DL_SRC	(1)
+#define IPA_v4_5_GROUP_UL_DL		(1)
 #define IPA_v4_5_MHI_GROUP_DDR		(1)
 #define IPA_v4_5_MHI_GROUP_DMA		(2)
 #define IPA_v4_5_MHI_GROUP_QDSS		(3)
@@ -245,6 +244,7 @@
 	IPA_4_0,
 	IPA_4_0_MHI,
 	IPA_4_1,
+	IPA_4_1_APQ,
 	IPA_4_2,
 	IPA_4_5,
 	IPA_4_5_MHI,
@@ -366,9 +366,9 @@
 		{5, 5}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
 	},
 	[IPA_4_5] = {
-		/* unused  UL_DL_SRC  unused  unused  UC_RX_Q N/A */
+		/* unused  UL_DL  unused  unused  UC_RX_Q N/A */
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
-		{0, 0}, {1, 63}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
+		{0, 0}, {1, 11}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
 		{0, 0}, {14, 14}, {0, 0}, {0, 0}, {3, 3}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
@@ -379,7 +379,7 @@
 		{0, 0}, {24, 24}, {0, 0}, {0, 0}, {8, 8}, {0, 0} },
 	},
 	[IPA_4_5_MHI] = {
-		/* PCIE  DDR  DMA  QDSS  unused  N/A  N/A */
+		/* PCIE  DDR  DMA  QDSS  unused  N/A */
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
 		{3, 8}, {4, 11}, {1, 1}, {1, 1}, {0, 0}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
@@ -392,9 +392,9 @@
 		{22, 22}, {16, 16}, {6, 6}, {2, 2}, {0, 0}, {0, 0} },
 	},
 	[IPA_4_5_APQ] = {
-		/* unused  UL_DL_SRC  unused  unused  UC_RX_Q N/A */
+		/* unused  UL_DL  unused  unused  UC_RX_Q N/A */
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
-		{0, 0}, {1, 63}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
+		{0, 0}, {1, 11}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
 		{0, 0}, {14, 14}, {0, 0}, {0, 0}, {3, 3}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
@@ -467,11 +467,11 @@
 		{1, 63}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
 	},
 	[IPA_4_5] = {
-		/* UL/DL/DPL_DST unused unused unused uC N/A */
+		/* unused  UL/DL/DPL unused  unused  uC  N/A */
 		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
-		{16, 16}, {5, 5}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
+		{0, 0}, {16, 16}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
-		{2, 63}, {1, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
+		{0, 0}, {2, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
 	},
 	[IPA_4_5_MHI] = {
 		/* PCIE/DPL  DDR  DMA/CV2X  QDSS  uC  N/A */
@@ -481,11 +481,11 @@
 		{2, 63}, {1, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
 	},
 	[IPA_4_5_APQ] = {
-		/* UL/DL/DPL_DST unused unused unused uC N/A */
+		/* unused  UL/DL/DPL unused  unused  uC  N/A */
 		[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
-		{16, 16}, {5, 5}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
+		{0, 0}, {16, 16}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
-		{2, 63}, {1, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
+		{0, 0}, {2, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
 	},
 };
 
@@ -1900,6 +1900,39 @@
 			QMB_MASTER_SELECT_DDR,
 			{ 31, 31, 8, 8, IPA_EE_AP } },
 
+	/* MHI PRIME PIPES - Client producer / IPA Consumer pipes */
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_DPL_PROD] = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{7, 9, 8, 16, IPA_EE_AP } },
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_TETH_PROD] = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_RMNET_PROD] = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	/* MHI PRIME PIPES - Client Consumer / IPA Producer pipes */
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_TETH_CONS] = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 20, 13, 9, 9, IPA_EE_AP } },
+	[IPA_4_1_APQ][IPA_CLIENT_MHI_PRIME_RMNET_CONS] = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 14, 9, 9, IPA_EE_AP } },
+
 	/* IPA_4_2 */
 	[IPA_4_2][IPA_CLIENT_WLAN1_PROD]          = {
 			true, IPA_v4_2_GROUP_UL_DL,
@@ -2078,177 +2111,177 @@
 
 	/* IPA_4_5 */
 	[IPA_4_5][IPA_CLIENT_WLAN2_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 9, 12, 8, 16, IPA_EE_AP, GSI_FREE_PRE_FETCH, 2 } },
 	[IPA_4_5][IPA_CLIENT_USB_PROD]            = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_APPS_LAN_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 11, 14, 10, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2 } },
 	[IPA_4_5][IPA_CLIENT_APPS_WAN_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 2, 7, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } },
 	[IPA_4_5][IPA_CLIENT_APPS_CMD_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 7, 9, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_ODU_PROD]            = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 10, 13, 8, 19, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_ETHERNET_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 12, 0, 8, 16, IPA_EE_UC, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5][IPA_CLIENT_Q6_WAN_PROD]         = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
 	[IPA_4_5][IPA_CLIENT_Q6_CMD_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 6, 1, 20, 24, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_Q6_DL_NLO_DATA_PROD] = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 } },
 	/* Only for test purpose */
 	[IPA_4_5][IPA_CLIENT_TEST_PROD]           = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST1_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST2_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 3, 5, 8, 16, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST3_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 9, 12, 8, 16, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST4_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 11, 14, 8, 16, IPA_EE_AP } },
 
 	[IPA_4_5][IPA_CLIENT_WLAN2_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 24, 3, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5][IPA_CLIENT_USB_CONS]            = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 26, 17, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_USB_DPL_CONS]        = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 15, 15, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_ODL_DPL_CONS]        = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 22, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_APPS_LAN_CONS]       = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 10, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_APPS_WAN_COAL_CONS]       = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 13, 4, 8, 11, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5][IPA_CLIENT_APPS_WAN_CONS]       = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 14, 1, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_ODU_EMB_CONS]        = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 23, 8, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5][IPA_CLIENT_ETHERNET_CONS]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 28, 1, 9, 9, IPA_EE_UC, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5][IPA_CLIENT_Q6_LAN_CONS]         = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 17, 3, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_Q6_WAN_CONS]         = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 21, 7, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5][IPA_CLIENT_Q6_UL_NLO_DATA_CONS] = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 19, 5, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
 	[IPA_4_5][IPA_CLIENT_Q6_UL_NLO_ACK_CONS]  = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 20, 6, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
 	[IPA_4_5][IPA_CLIENT_Q6_QBAP_STATUS_CONS] = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
@@ -2256,38 +2289,38 @@
 	/* Only for test purpose */
 	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
 	[IPA_4_5][IPA_CLIENT_TEST_CONS]           = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 14, 1, 9, 9, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST1_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 14, 1, 9, 9, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST2_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 24, 3, 8, 14, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST3_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 26, 17, 9, 9, IPA_EE_AP } },
 	[IPA_4_5][IPA_CLIENT_TEST4_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 27, 18, 9, 9, IPA_EE_AP } },
 	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
 	[IPA_4_5][IPA_CLIENT_DUMMY_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
@@ -2344,7 +2377,7 @@
 			{ 10, 13, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	/* Only for test purpose */
 	[IPA_4_5_MHI][IPA_CLIENT_TEST_PROD]           = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, QMB_MASTER_SELECT_DDR,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
@@ -2425,7 +2458,7 @@
 
 	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
 	[IPA_4_5_MHI][IPA_CLIENT_DUMMY_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, QMB_MASTER_SELECT_DDR,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
@@ -2433,117 +2466,117 @@
 
 	/* IPA_4_5 APQ */
 	[IPA_4_5_APQ][IPA_CLIENT_WLAN2_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 9, 3, 8, 16, IPA_EE_AP, GSI_FREE_PRE_FETCH, 2 } },
 	[IPA_4_5_APQ][IPA_CLIENT_WIGIG_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 1, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5_APQ][IPA_CLIENT_USB_PROD]            = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5_APQ][IPA_CLIENT_APPS_LAN_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 11, 4, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5_APQ][IPA_CLIENT_APPS_CMD_PROD]	  = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 7, 12, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	/* Only for test purpose */
 	[IPA_4_5_APQ][IPA_CLIENT_TEST_PROD]           = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST1_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST2_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 1, 8, 16, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST3_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 9, 3, 8, 16, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST4_PROD]          = {
-			true, IPA_v4_5_GROUP_UL_DL_SRC,
+			true, IPA_v4_5_GROUP_UL_DL,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 10, 10, 8, 16, IPA_EE_AP } },
 
 	[IPA_4_5_APQ][IPA_CLIENT_WLAN2_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 23, 8, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5_APQ][IPA_CLIENT_WIGIG1_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 14, 14, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5_APQ][IPA_CLIENT_WIGIG2_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 20, 18, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5_APQ][IPA_CLIENT_WIGIG3_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 22, 5, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5_APQ][IPA_CLIENT_WIGIG4_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 29, 10, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
 	[IPA_4_5_APQ][IPA_CLIENT_USB_CONS]            = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 24, 9, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5_APQ][IPA_CLIENT_USB_DPL_CONS]        = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 16, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5_APQ][IPA_CLIENT_APPS_LAN_CONS]       = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 13, 13, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
 	[IPA_4_5_APQ][IPA_CLIENT_ODL_DPL_CONS]       = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
@@ -2551,42 +2584,74 @@
 	/* Only for test purpose */
 	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
 	[IPA_4_5_APQ][IPA_CLIENT_TEST_CONS]           = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 16, 5, 5, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST1_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 16, 5, 5, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST2_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 22, 5, 9, 9, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST3_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 24, 9, 9, 9, IPA_EE_AP } },
 	[IPA_4_5_APQ][IPA_CLIENT_TEST4_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 23, 8, 8, 13, IPA_EE_AP } },
 	/* Dummy consumer (pipe 31) is used in L2TP rt rule */
 	[IPA_4_5_APQ][IPA_CLIENT_DUMMY_CONS]          = {
-			true, IPA_v4_5_GROUP_UL_DL_DST,
+			true, IPA_v4_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 31, 31, 8, 8, IPA_EE_AP } },
+	/* MHI PRIME PIPES - Client producer / IPA Consumer pipes */
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_DPL_PROD] = {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{3, 2, 8, 16, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_TETH_PROD] = {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 7, 8, 16, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_RMNET_PROD] = {
+			true, IPA_v4_5_MHI_GROUP_DDR,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 11, 16, 32, IPA_EE_AP } },
+	/* MHI PRIME PIPES - Client Consumer / IPA Producer pipes */
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_TETH_CONS] = {
+			true, IPA_v4_5_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 28, 6, 9, 9, IPA_EE_AP } },
+	[IPA_4_5_APQ][IPA_CLIENT_MHI_PRIME_RMNET_CONS] = {
+			true, IPA_v4_5_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 17, 9, 9, IPA_EE_AP } },
 };
 
 static struct ipa3_mem_partition ipa_4_1_mem_part = {
@@ -6627,7 +6692,7 @@
 		if (src) {
 			switch (group_index) {
 			case IPA_v4_5_MHI_GROUP_PCIE:
-			case IPA_v4_5_GROUP_UL_DL_SRC:
+			case IPA_v4_5_GROUP_UL_DL:
 				ipahal_write_reg_n_fields(
 					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
 					n, val);
@@ -6651,8 +6716,8 @@
 			}
 		} else {
 			switch (group_index) {
-			case IPA_v4_5_GROUP_UL_DL_DST:
-			case IPA_v4_5_MHI_GROUP_DDR:
+			case IPA_v4_5_MHI_GROUP_PCIE:
+			case IPA_v4_5_GROUP_UL_DL:
 				ipahal_write_reg_n_fields(
 					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
 					n, val);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
index 9cbaa39..1e98d20 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
@@ -74,6 +74,7 @@
 	const struct ipa_gsi_ep_config *gsi_ep_info;
 	int result, len;
 	unsigned long va;
+	uint32_t addr_low, addr_high;
 
 	if (!info || !info_smmu || !ep) {
 		IPAERR("invalid input\n");
@@ -211,17 +212,105 @@
 		IPAERR("failed to write evt ring scratch\n");
 		goto fail_write_scratch;
 	}
-	/* write event ring db address */
+
+	if (!is_smmu_enabled) {
+		IPADBG("smmu disabled\n");
+		if (info->is_evt_rn_db_pcie_addr == true)
+			IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
+		else
+			IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
+		IPADBG_LOW("LSB 0x%x\n",
+			(u32)info->event_ring_doorbell_pa);
+		IPADBG_LOW("MSB 0x%x\n",
+			(u32)((u64)info->event_ring_doorbell_pa >> 32));
+	} else {
+		IPADBG("smmu enabled\n");
+		if (info_smmu->is_evt_rn_db_pcie_addr == true)
+			IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
+		else
+			IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
+		IPADBG_LOW("LSB 0x%x\n",
+			(u32)info_smmu->event_ring_doorbell_pa);
+		IPADBG_LOW("MSB 0x%x\n",
+			(u32)((u64)info_smmu->event_ring_doorbell_pa >> 32));
+	}
+
+	if (!is_smmu_enabled) {
+		addr_low = (u32)info->event_ring_doorbell_pa;
+		addr_high = (u32)((u64)info->event_ring_doorbell_pa >> 32);
+	} else {
+		if (dir == IPA_WDI3_TX_DIR) {
+			if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_DB_RES,
+				true, info_smmu->event_ring_doorbell_pa,
+				NULL, 4, true, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_write_scratch;
+			}
+		} else {
+			if (ipa_create_gsi_smmu_mapping(
+				IPA_WDI_RX_COMP_RING_WP_RES,
+				true, info_smmu->event_ring_doorbell_pa,
+				NULL, 4, true, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_write_scratch;
+			}
+		}
+		addr_low = (u32)va;
+		addr_high = (u32)((u64)va >> 32);
+	}
+
+	/*
+	 * Arch specific:
+	 * pcie addr which are not via smmu, use pa directly!
+	 * pcie and DDR via 2 different port
+	 * assert bit 40 to indicate it is pcie addr
+	 * WDI-3.0, MSM --> pcie via smmu
+	 * WDI-3.0, MDM --> pcie not via smmu + dual port
+	 * assert bit 40 in case
+	 */
+	if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+		is_smmu_enabled) {
+		/*
+		 * Ir-respective of smmu enabled don't use IOVA addr
+		 * since pcie not via smmu in MDM's
+		 */
+		if (info_smmu->is_evt_rn_db_pcie_addr == true) {
+			addr_low = (u32)info_smmu->event_ring_doorbell_pa;
+			addr_high =
+				(u32)((u64)info_smmu->event_ring_doorbell_pa
+				>> 32);
+		}
+	}
+
+	/*
+	 * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
+	 * from wdi-3.0 interface document
+	 */
+	if (!is_smmu_enabled) {
+		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+			info->is_evt_rn_db_pcie_addr)
+			addr_high |= (1 << 8);
+	} else {
+		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+			info_smmu->is_evt_rn_db_pcie_addr)
+			addr_high |= (1 << 8);
+	}
+
 	gsi_wdi3_write_evt_ring_db(ep->gsi_evt_ring_hdl,
-		(u32)info->event_ring_doorbell_pa,
-		(u32)((u64)info->event_ring_doorbell_pa >> 32));
+			addr_low,
+			addr_high);
 
 	/* write channel scratch */
 	memset(&ch_scratch, 0, sizeof(ch_scratch));
 	ch_scratch.wdi3.update_rp_moderation_threshold =
 		UPDATE_RP_MODERATION_THRESHOLD;
 	if (dir == IPA_WDI3_RX_DIR) {
-		ch_scratch.wdi3.rx_pkt_offset = info->pkt_offset;
+		if (!is_smmu_enabled)
+			ch_scratch.wdi3.rx_pkt_offset = info->pkt_offset;
+		else
+			ch_scratch.wdi3.rx_pkt_offset = info_smmu->pkt_offset;
 		/* this metadata reg offset need to be in words */
 		ch_scratch.wdi3.endp_metadata_reg_offset =
 			ipahal_get_reg_mn_ofst(IPA_ENDP_INIT_HDR_METADATA_n, 0,
@@ -229,6 +318,28 @@
 	}
 
 	if (!is_smmu_enabled) {
+		IPADBG_LOW("smmu disabled\n");
+		if (info->is_txr_rn_db_pcie_addr == true)
+			IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
+		else
+			IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
+		IPADBG_LOW("LSB 0x%x\n",
+			(u32)info->transfer_ring_doorbell_pa);
+		IPADBG_LOW("MSB 0x%x\n",
+			(u32)((u64)info->transfer_ring_doorbell_pa >> 32));
+	} else {
+		IPADBG_LOW("smmu eabled\n");
+		if (info_smmu->is_txr_rn_db_pcie_addr == true)
+			IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
+		else
+			IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
+		IPADBG_LOW("LSB 0x%x\n",
+			(u32)info_smmu->transfer_ring_doorbell_pa);
+		IPADBG_LOW("MSB 0x%x\n",
+			(u32)((u64)info_smmu->transfer_ring_doorbell_pa >> 32));
+	}
+
+	if (!is_smmu_enabled) {
 		ch_scratch.wdi3.wifi_rp_address_low =
 			(u32)info->transfer_ring_doorbell_pa;
 		ch_scratch.wdi3.wifi_rp_address_high =
@@ -262,6 +373,49 @@
 				(u32)((u64)va >> 32);
 		}
 	}
+
+	/*
+	 * Arch specific:
+	 * pcie addr which are not via smmu, use pa directly!
+	 * pcie and DDR via 2 different port
+	 * assert bit 40 to indicate it is pcie addr
+	 * WDI-3.0, MSM --> pcie via smmu
+	 * WDI-3.0, MDM --> pcie not via smmu + dual port
+	 * assert bit 40 in case
+	 */
+	if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+		is_smmu_enabled) {
+		/*
+		 * Ir-respective of smmu enabled don't use IOVA addr
+		 * since pcie not via smmu in MDM's
+		 */
+		if (info_smmu->is_txr_rn_db_pcie_addr == true) {
+			ch_scratch.wdi3.wifi_rp_address_low =
+				(u32)info_smmu->transfer_ring_doorbell_pa;
+			ch_scratch.wdi3.wifi_rp_address_high =
+				(u32)((u64)info_smmu->transfer_ring_doorbell_pa
+				>> 32);
+		}
+	}
+
+	/*
+	 * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
+	 * from wdi-3.0 interface document
+	 */
+	if (!is_smmu_enabled) {
+		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+			info->is_txr_rn_db_pcie_addr)
+			ch_scratch.wdi3.wifi_rp_address_high =
+			(u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
+			(1 << 8));
+	} else {
+		if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+			info_smmu->is_txr_rn_db_pcie_addr)
+			ch_scratch.wdi3.wifi_rp_address_high =
+			(u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
+			(1 << 8));
+	}
+
 	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
 	if (result != GSI_STATUS_SUCCESS) {
 		IPAERR("failed to write evt ring scratch\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
index 8e44841..2f02db7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
@@ -31,7 +31,7 @@
 static int ipa3_wigig_uc_loaded_handler(struct notifier_block *self,
 	unsigned long val, void *data)
 {
-	IPADBG("val %d\n", val);
+	IPADBG("val %ld\n", val);
 
 	if (!ipa3_ctx) {
 		IPAERR("IPA ctx is null\n");
@@ -829,7 +829,7 @@
 		if (
 		IPA_WIGIG_MSB(input->dbuff.data_buffer_base_pa) & 0xFFFFFF00) {
 			IPAERR(
-				"data_buffers_base_address_msb is over the 8 bit limit (0xpa)\n"
+				"data_buffers_base_address_msb is over the 8 bit limit (0x%pa)\n"
 				, &input->dbuff.data_buffer_base_pa);
 			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 			return -EFAULT;
@@ -971,7 +971,7 @@
 			!= IPA_WIGIG_8_MSB(
 				input_smmu->pipe_smmu.status_ring_HWTAIL_pa)) {
 			IPAERR(
-				"status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%X tail 0x%X\n"
+				"status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
 			, input_smmu->pipe_smmu.status_ring_HWHEAD_pa,
 			input_smmu->pipe_smmu.status_ring_HWTAIL_pa);
 			return -EFAULT;
@@ -1010,7 +1010,7 @@
 			!= IPA_WIGIG_8_MSB(
 				input->pipe.status_ring_HWTAIL_pa)) {
 			IPAERR(
-				"status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%X tail 0x%X\n"
+				"status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
 				, input->pipe.status_ring_HWHEAD_pa,
 				input->pipe.status_ring_HWTAIL_pa);
 			return -EFAULT;
@@ -1472,7 +1472,7 @@
 			ep->gsi_mem_info.chan_ring_len -
 			IPA_WIGIG_DESC_RING_EL_SIZE;
 
-		IPADBG("ring ch doorbell (0x%llX) TX %d\n", val,
+		IPADBG("ring ch doorbell (0x%llX) TX %ld\n", val,
 			ep->gsi_chan_hdl);
 		res = gsi_ring_ch_ring_db(ep->gsi_chan_hdl, val);
 		if (res) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 45efd47..3f38a3a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -1182,6 +1182,13 @@
 	memcpy(base + offset, hdr, hdr_len);
 }
 
+/* Header address update logic. */
+#define IPAHAL_CP_PROC_CTX_HEADER_UPDATE(hdr_lsb, hdr_msb, addr) \
+	do { \
+		hdr_lsb = lower_32_bits(addr); \
+		hdr_msb = upper_32_bits(addr); \
+	} while (0)
+
 /*
  * ipahal_cp_proc_ctx_to_hw_buff_v3() - copy processing context to
  * base address and offset given.
@@ -1195,26 +1202,31 @@
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
  * @l2tp_params: l2tp parameters
+ * @is_64: Indicates whether header base address/dma base address is 64 bit.
  */
 static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
 		void *const base, u32 offset,
 		u32 hdr_len, bool is_hdr_proc_ctx,
 		dma_addr_t phys_base, u64 hdr_base_addr,
 		struct ipa_hdr_offset_entry *offset_entry,
-		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params)
+		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params, bool is_64)
 {
+	u64 hdr_addr;
+
 	if (type == IPA_HDR_PROC_NONE) {
 		struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx;
 
 		ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_seq *)
 			(base + offset);
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
-		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.length = is_64 ? 2 : 1;
 		ctx->hdr_add.tlv.value = hdr_len;
-		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+		hdr_addr = is_hdr_proc_ctx ? phys_base :
 			hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx\n",
-			ctx->hdr_add.hdr_addr);
+			hdr_addr);
+		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+			ctx->hdr_add.hdr_addr_hi, hdr_addr);
 		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
 		ctx->end.length = 0;
 		ctx->end.value = 0;
@@ -1224,12 +1236,14 @@
 		ctx = (struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *)
 			(base + offset);
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
-		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.length = is_64 ? 2 : 1;
 		ctx->hdr_add.tlv.value = hdr_len;
-		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+		hdr_addr = is_hdr_proc_ctx ? phys_base :
 			hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx\n",
-			ctx->hdr_add.hdr_addr);
+			hdr_addr);
+		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+			ctx->hdr_add.hdr_addr_hi, hdr_addr);
 		ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
 		ctx->l2tp_params.tlv.length = 1;
 		ctx->l2tp_params.tlv.value =
@@ -1251,12 +1265,14 @@
 		ctx = (struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *)
 			(base + offset);
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
-		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.length = is_64 ? 2 : 1;
 		ctx->hdr_add.tlv.value = hdr_len;
-		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+		hdr_addr = is_hdr_proc_ctx ? phys_base :
 			hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx length %d\n",
-			ctx->hdr_add.hdr_addr, ctx->hdr_add.tlv.value);
+			hdr_addr, ctx->hdr_add.tlv.value);
+		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+			ctx->hdr_add.hdr_addr_hi, hdr_addr);
 		ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
 		ctx->l2tp_params.tlv.length = 1;
 		ctx->l2tp_params.tlv.value =
@@ -1287,12 +1303,14 @@
 		ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *)
 			(base + offset);
 		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
-		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.length = is_64 ? 2 : 1;
 		ctx->hdr_add.tlv.value = hdr_len;
-		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+		hdr_addr = is_hdr_proc_ctx ? phys_base :
 			hdr_base_addr + offset_entry->offset;
 		IPAHAL_DBG("header address 0x%llx\n",
-			ctx->hdr_add.hdr_addr);
+			hdr_addr);
+		IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+			ctx->hdr_add.hdr_addr_hi, hdr_addr);
 		ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
 		ctx->cmd.length = 0;
 		switch (type) {
@@ -1350,7 +1368,8 @@
 			bool is_hdr_proc_ctx, dma_addr_t phys_base,
 			u64 hdr_base_addr,
 			struct ipa_hdr_offset_entry *offset_entry,
-			struct ipa_l2tp_hdr_proc_ctx_params l2tp_params);
+			struct ipa_l2tp_hdr_proc_ctx_params l2tp_params,
+			bool is_64);
 
 	int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
 };
@@ -1416,17 +1435,18 @@
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
  * @l2tp_params: l2tp parameters
+ * @is_64: Indicates whether header base address/dma base address is 64 bit.
  */
 int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
 		void *const base, u32 offset, u32 hdr_len,
 		bool is_hdr_proc_ctx, dma_addr_t phys_base,
 		u64 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry,
-		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params)
+		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params, bool is_64)
 {
 	IPAHAL_DBG(
-		"type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %llu, offset_entry %pK\n"
+		"type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %llu, offset_entry %pK, bool %d\n"
 			, type, base, offset, hdr_len, is_hdr_proc_ctx,
-			hdr_base_addr, offset_entry);
+			hdr_base_addr, offset_entry, is_64);
 
 	if (!base ||
 		!hdr_len ||
@@ -1442,7 +1462,7 @@
 
 	return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
 			hdr_len, is_hdr_proc_ctx, phys_base,
-			hdr_base_addr, offset_entry, l2tp_params);
+			hdr_base_addr, offset_entry, l2tp_params, is_64);
 }
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index fb2ba48..942fa52 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -630,13 +630,14 @@
  * @hdr_base_addr: base address in table
  * @offset_entry: offset from hdr_base_addr in table
  * @l2tp_params: l2tp parameters
+ * @is_64: Indicates whether header base address/dma base address is 64 bit.
  */
 int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
 		void *base, u32 offset, u32 hdr_len,
 		bool is_hdr_proc_ctx, dma_addr_t phys_base,
 		u64 hdr_base_addr,
 		struct ipa_hdr_offset_entry *offset_entry,
-		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params);
+		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params, bool is_64);
 
 /*
  * ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 70ff6f5..6811244 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -648,7 +648,8 @@
  */
 struct ipa_hw_hdr_proc_ctx_hdr_add {
 	struct ipa_hw_hdr_proc_ctx_tlv tlv;
-	u64 hdr_addr;
+	u32 hdr_addr;
+	u32 hdr_addr_hi;
 };
 
 /**
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 8077cd3..f68280a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/init.h>
@@ -12,6 +12,7 @@
 #include <linux/uaccess.h>
 #include <linux/rmnet_ipa_fd_ioctl.h>
 #include "ipa_qmi_service.h"
+#include "ipa_i.h"
 
 #define DRIVER_NAME "wwan_ioctl"
 
@@ -335,6 +336,10 @@
 			break;
 		}
 
+		if (ipa_mpm_notify_wan_state()) {
+			IPAWANERR("WAN_IOC_NOTIFY_WAN_STATE failed\n");
+			retval = -EPERM;
+		}
 		break;
 	case WAN_IOC_ENABLE_PER_CLIENT_STATS:
 		IPAWANDBG_LOW("got WAN_IOC_ENABLE_PER_CLIENT_STATS :>>>\n");
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index e2c362c..54c7fc7 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -26,9 +26,6 @@
 #include "wil_platform.h"
 #include "msm_11ad.h"
 
-#define SMMU_BASE	0x20000000 /* Device address range base */
-#define SMMU_SIZE	((SZ_1G * 4ULL) - SMMU_BASE)
-
 #define WIGIG_ENABLE_DELAY	50
 
 #define WIGIG_SUBSYS_NAME	"WIGIG"
@@ -39,9 +36,12 @@
 #define VDD_MIN_UV	1028000
 #define VDD_MAX_UV	1028000
 #define VDD_MAX_UA	575000
-#define VDDIO_MIN_UV	1950000
+#define VDDIO_MIN_UV	1824000
 #define VDDIO_MAX_UV	2040000
 #define VDDIO_MAX_UA	70300
+#define VDD_LDO_MIN_UV	1800000
+#define VDD_LDO_MAX_UV	1800000
+#define VDD_LDO_MAX_UA	100000
 
 #define WIGIG_MIN_CPU_BOOST_KBPS	150000
 
@@ -92,15 +92,6 @@
 	struct pci_saved_state *golden_state;
 	struct msm_pcie_register_event pci_event;
 
-	/* SMMU */
-	bool use_smmu; /* have SMMU enabled? */
-	int smmu_s1_en;
-	int smmu_fast_map;
-	int smmu_coherent;
-	struct dma_iommu_mapping *mapping;
-	u32 smmu_base;
-	u32 smmu_size;
-
 	/* bus frequency scaling */
 	struct msm_bus_scale_pdata *bus_scale;
 	u32 msm_bus_handle;
@@ -122,8 +113,9 @@
 	/* external vregs and clocks */
 	struct msm11ad_vreg vdd;
 	struct msm11ad_vreg vddio;
-	struct msm11ad_clk rf_clk3;
-	struct msm11ad_clk rf_clk3_pin;
+	struct msm11ad_vreg vdd_ldo;
+	struct msm11ad_clk rf_clk;
+	struct msm11ad_clk rf_clk_pin;
 
 	/* cpu boost support */
 	bool use_cpu_boost;
@@ -256,8 +248,18 @@
 	ctx->vddio.min_uV = VDDIO_MIN_UV;
 	ctx->vddio.max_uA = VDDIO_MAX_UA;
 
+	rc = msm_11ad_init_vreg(dev, &ctx->vdd_ldo, "vdd-ldo");
+	if (rc)
+		goto vdd_ldo_fail;
+
+	ctx->vdd_ldo.max_uV = VDD_LDO_MAX_UV;
+	ctx->vdd_ldo.min_uV = VDD_LDO_MIN_UV;
+	ctx->vdd_ldo.max_uA = VDD_LDO_MAX_UA;
+
 	return rc;
 
+vdd_ldo_fail:
+	msm_11ad_release_vreg(dev, &ctx->vddio);
 vddio_fail:
 	msm_11ad_release_vreg(dev, &ctx->vdd);
 out:
@@ -266,6 +268,7 @@
 
 static void msm_11ad_release_vregs(struct msm11ad_ctx *ctx)
 {
+	msm_11ad_release_vreg(ctx->dev, &ctx->vdd_ldo);
 	msm_11ad_release_vreg(ctx->dev, &ctx->vdd);
 	msm_11ad_release_vreg(ctx->dev, &ctx->vddio);
 }
@@ -381,8 +384,14 @@
 	if (rc)
 		goto vddio_fail;
 
+	rc = msm_11ad_enable_vreg(ctx, &ctx->vdd_ldo);
+	if (rc)
+		goto vdd_ldo_fail;
+
 	return rc;
 
+vdd_ldo_fail:
+	msm_11ad_disable_vreg(ctx, &ctx->vddio);
 vddio_fail:
 	msm_11ad_disable_vreg(ctx, &ctx->vdd);
 out:
@@ -391,10 +400,11 @@
 
 static int msm_11ad_disable_vregs(struct msm11ad_ctx *ctx)
 {
-	if (!ctx->vdd.reg && !ctx->vddio.reg)
+	if (!ctx->vdd.reg && !ctx->vddio.reg && !ctx->vdd_ldo.reg)
 		goto out;
 
 	/* ignore errors on disable vreg */
+	msm_11ad_disable_vreg(ctx, &ctx->vdd_ldo);
 	msm_11ad_disable_vreg(ctx, &ctx->vdd);
 	msm_11ad_disable_vreg(ctx, &ctx->vddio);
 
@@ -446,13 +456,13 @@
 {
 	int rc;
 
-	rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3);
+	rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk);
 	if (rc)
 		return rc;
 
-	rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3_pin);
+	rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk_pin);
 	if (rc)
-		msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
+		msm_11ad_disable_clk(ctx, &ctx->rf_clk);
 
 	return rc;
 }
@@ -461,22 +471,22 @@
 {
 	int rc;
 	struct device *dev = ctx->dev;
-	int rf_clk3_pin_idx;
+	int rf_clk_pin_idx;
 
 	if (!of_property_read_bool(dev->of_node, "qcom,use-ext-clocks"))
 		return 0;
 
-	rc = msm_11ad_init_clk(dev, &ctx->rf_clk3, "rf_clk3_clk");
+	rc = msm_11ad_init_clk(dev, &ctx->rf_clk, "rf_clk_clk");
 	if (rc)
 		return rc;
 
-	rf_clk3_pin_idx = of_property_match_string(dev->of_node, "clock-names",
-						   "rf_clk3_pin_clk");
-	if (rf_clk3_pin_idx >= 0) {
-		rc = msm_11ad_init_clk(dev, &ctx->rf_clk3_pin,
-				       "rf_clk3_pin_clk");
+	rf_clk_pin_idx = of_property_match_string(dev->of_node, "clock-names",
+						   "rf_clk_pin_clk");
+	if (rf_clk_pin_idx >= 0) {
+		rc = msm_11ad_init_clk(dev, &ctx->rf_clk_pin,
+				       "rf_clk_pin_clk");
 		if (rc)
-			msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3);
+			msm_11ad_release_clk(ctx->dev, &ctx->rf_clk);
 	}
 
 	return rc;
@@ -484,14 +494,14 @@
 
 static void msm_11ad_release_clocks(struct msm11ad_ctx *ctx)
 {
-	msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3_pin);
-	msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3);
+	msm_11ad_release_clk(ctx->dev, &ctx->rf_clk_pin);
+	msm_11ad_release_clk(ctx->dev, &ctx->rf_clk);
 }
 
 static void msm_11ad_disable_clocks(struct msm11ad_ctx *ctx)
 {
-	msm_11ad_disable_clk(ctx, &ctx->rf_clk3_pin);
-	msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
+	msm_11ad_disable_clk(ctx, &ctx->rf_clk_pin);
+	msm_11ad_disable_clk(ctx, &ctx->rf_clk);
 }
 
 static int msm_11ad_turn_device_power_off(struct msm11ad_ctx *ctx)
@@ -769,86 +779,6 @@
 	return rc;
 }
 
-static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
-{
-	int atomic_ctx = 1;
-	int rc;
-	int force_pt_coherent = 1;
-	int smmu_bypass = !ctx->smmu_s1_en;
-
-	if (!ctx->use_smmu)
-		return 0;
-
-	dev_info(ctx->dev, "Initialize SMMU, bypass=%d, fastmap=%d, coherent=%d\n",
-		 smmu_bypass, ctx->smmu_fast_map, ctx->smmu_coherent);
-
-	ctx->mapping = __depr_arm_iommu_create_mapping(&platform_bus_type,
-						ctx->smmu_base, ctx->smmu_size);
-	if (IS_ERR_OR_NULL(ctx->mapping)) {
-		rc = PTR_ERR(ctx->mapping) ?: -ENODEV;
-		dev_err(ctx->dev, "Failed to create IOMMU mapping (%d)\n", rc);
-		return rc;
-	}
-
-	rc = iommu_domain_set_attr(ctx->mapping->domain,
-				   DOMAIN_ATTR_ATOMIC,
-				   &atomic_ctx);
-	if (rc) {
-		dev_err(ctx->dev, "Set atomic attribute to SMMU failed (%d)\n",
-			rc);
-		goto release_mapping;
-	}
-
-	if (smmu_bypass) {
-		rc = iommu_domain_set_attr(ctx->mapping->domain,
-					   DOMAIN_ATTR_S1_BYPASS,
-					   &smmu_bypass);
-		if (rc) {
-			dev_err(ctx->dev, "Set bypass attribute to SMMU failed (%d)\n",
-				rc);
-			goto release_mapping;
-		}
-	} else {
-		/* Set dma-coherent and page table coherency */
-		if (ctx->smmu_coherent) {
-			arch_setup_dma_ops(&ctx->pcidev->dev, 0, 0, NULL, true);
-			rc = iommu_domain_set_attr(ctx->mapping->domain,
-				   DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
-				   &force_pt_coherent);
-			if (rc) {
-				dev_err(ctx->dev,
-					"Set SMMU PAGE_TABLE_FORCE_COHERENT attr failed (%d)\n",
-					rc);
-				goto release_mapping;
-			}
-		}
-
-		if (ctx->smmu_fast_map) {
-			rc = iommu_domain_set_attr(ctx->mapping->domain,
-						   DOMAIN_ATTR_FAST,
-						   &ctx->smmu_fast_map);
-			if (rc) {
-				dev_err(ctx->dev, "Set fast attribute to SMMU failed (%d)\n",
-					rc);
-				goto release_mapping;
-			}
-		}
-	}
-
-	rc = __depr_arm_iommu_attach_device(&ctx->pcidev->dev, ctx->mapping);
-	if (rc) {
-		dev_err(ctx->dev, "arm_iommu_attach_device failed (%d)\n", rc);
-		goto release_mapping;
-	}
-	dev_dbg(ctx->dev, "attached to IOMMU\n");
-
-	return 0;
-release_mapping:
-	__depr_arm_iommu_release_mapping(ctx->mapping);
-	ctx->mapping = NULL;
-	return rc;
-}
-
 static int msm_11ad_ssr_shutdown(const struct subsys_desc *subsys,
 				 bool force_stop)
 {
@@ -1091,7 +1021,6 @@
 	struct device_node *of_node = dev->of_node;
 	struct device_node *rc_node;
 	struct pci_dev *pcidev = NULL;
-	u32 smmu_mapping[2];
 	int rc, i;
 	bool pcidev_found = false;
 	struct msm_pcie_register_event *pci_event;
@@ -1118,7 +1047,6 @@
 	 *	qcom,msm-bus,vectors-KBps =
 	 *		<100 512 0 0>,
 	 *		<100 512 600000 800000>;
-	 *	qcom,smmu-support;
 	 *};
 	 * rc_node stands for "qcom,pcie", selected entries:
 	 * cell-index = <1>; (ctx->rc_index)
@@ -1149,7 +1077,6 @@
 		dev_err(ctx->dev, "Parent PCIE device index not found\n");
 		return -EINVAL;
 	}
-	ctx->use_smmu = of_property_read_bool(of_node, "qcom,smmu-support");
 	ctx->keep_radio_on_during_sleep = of_property_read_bool(of_node,
 		"qcom,keep-radio-on-during-sleep");
 	ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
@@ -1158,28 +1085,6 @@
 		return -EINVAL;
 	}
 
-	ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
-	if (ctx->smmu_s1_en) {
-		ctx->smmu_fast_map = of_property_read_bool(
-						of_node, "qcom,smmu-fast-map");
-		ctx->smmu_coherent = of_property_read_bool(
-						of_node, "qcom,smmu-coherent");
-	}
-	rc = of_property_read_u32_array(dev->of_node, "qcom,smmu-mapping",
-			smmu_mapping, 2);
-	if (rc) {
-		dev_err(ctx->dev,
-			"Failed to read base/size smmu addresses %d, fallback to default\n",
-			rc);
-		ctx->smmu_base = SMMU_BASE;
-		ctx->smmu_size = SMMU_SIZE;
-	} else {
-		ctx->smmu_base = smmu_mapping[0];
-		ctx->smmu_size = smmu_mapping[1];
-	}
-	dev_dbg(ctx->dev, "smmu_base=0x%x smmu_sise=0x%x\n",
-		ctx->smmu_base, ctx->smmu_size);
-
 	/*== execute ==*/
 	/* turn device on */
 	rc = msm_11ad_init_vregs(ctx);
@@ -1310,10 +1215,9 @@
 		 "  gpio_dc = %d\n"
 		 "  sleep_clk_en = %d\n"
 		 "  rc_index = %d\n"
-		 "  use_smmu = %d\n"
 		 "  pcidev = %pK\n"
 		 "}\n", ctx, ctx->gpio_en, ctx->gpio_dc, ctx->sleep_clk_en,
-		 ctx->rc_index, ctx->use_smmu, ctx->pcidev);
+		 ctx->rc_index, ctx->pcidev);
 
 	platform_set_drvdata(pdev, ctx);
 	device_disable_async_suspend(&pcidev->dev);
@@ -1543,12 +1447,6 @@
 		ctx->msm_bus_handle = 0;
 	}
 
-	if (ctx->use_smmu) {
-		__depr_arm_iommu_detach_device(&ctx->pcidev->dev);
-		__depr_arm_iommu_release_mapping(ctx->mapping);
-		ctx->mapping = NULL;
-	}
-
 	memset(&ctx->rops, 0, sizeof(ctx->rops));
 	ctx->wil_handle = NULL;
 
@@ -1587,12 +1485,12 @@
 		break;
 	case WIL_PLATFORM_EVT_PRE_RESET:
 		/*
-		 * Enable rf_clk3 clock before resetting the device to ensure
+		 * Enable rf_clk clock before resetting the device to ensure
 		 * stable ref clock during the device reset
 		 */
 		if (ctx->features &
 		    BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL)) {
-			rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3);
+			rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk);
 			if (rc) {
 				dev_err(ctx->dev,
 					"failed to enable clk, rc %d\n", rc);
@@ -1602,12 +1500,12 @@
 		break;
 	case WIL_PLATFORM_EVT_FW_RDY:
 		/*
-		 * Disable rf_clk3 clock after the device is up to allow
+		 * Disable rf_clk clock after the device is up to allow
 		 * the device to control it via its GPIO for power saving
 		 */
 		if (ctx->features &
 		    BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL))
-			msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
+			msm_11ad_disable_clk(ctx, &ctx->rf_clk);
 
 		/*
 		 * Save golden config space for pci linkdown recovery.
@@ -1659,6 +1557,10 @@
 {
 	struct pci_dev *pcidev = to_pci_dev(dev);
 	struct msm11ad_ctx *ctx = pcidev2ctx(pcidev);
+	struct iommu_domain *domain;
+	int bypass = 0;
+	int fastmap = 0;
+	int coherent = 0;
 
 	if (!ctx) {
 		pr_err("Context not found for pcidev %pK\n", pcidev);
@@ -1673,11 +1575,19 @@
 		return NULL;
 	}
 	dev_info(ctx->dev, "msm_bus handle 0x%x\n", ctx->msm_bus_handle);
-	/* smmu */
-	if (msm_11ad_smmu_init(ctx)) {
-		msm_bus_scale_unregister_client(ctx->msm_bus_handle);
-		ctx->msm_bus_handle = 0;
-		return NULL;
+
+	domain = iommu_get_domain_for_dev(&pcidev->dev);
+	if (domain) {
+		iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
+		iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &fastmap);
+		iommu_domain_get_attr(domain,
+				      DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
+				      &coherent);
+
+		dev_info(ctx->dev, "SMMU initialized, bypass=%d, fastmap=%d, coherent=%d\n",
+			 bypass, fastmap, coherent);
+	} else {
+		dev_warn(ctx->dev, "Unable to get iommu domain\n");
 	}
 
 	/* subsystem restart */
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index 1802f16..c9c574e 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -36,6 +36,11 @@
 static unsigned long default_bus_bw_set[] = {0, 19200000, 50000000,
 				100000000, 150000000, 200000000, 236000000};
 
+struct bus_vectors {
+	int src;
+	int dst;
+};
+
 /**
  * @struct geni_se_device - Data structure to represent the QUPv3 Core
  * @dev:		Device pointer of the QUPv3 core.
@@ -45,18 +50,32 @@
  * @iommu_s1_bypass:	Bypass IOMMU stage 1 translation.
  * @base:		Base address of this instance of QUPv3 core.
  * @bus_bw:		Client handle to the bus bandwidth request.
+ * @bus_bw_noc:		Client handle to the QUP clock and DDR path bus
+			bandwidth request.
  * @bus_mas_id:		Master Endpoint ID for bus BW request.
  * @bus_slv_id:		Slave Endpoint ID for bus BW request.
  * @geni_dev_lock:		Lock to protect the bus ab & ib values, list.
  * @ab_list_head:	Sorted resource list based on average bus BW.
  * @ib_list_head:	Sorted resource list based on instantaneous bus BW.
+ * @ab_list_head_noc:	Sorted resource list based on average DDR path bus BW.
+ * @ib_list_head_noc:	Sorted resource list based on instantaneous DDR path
+			bus BW.
  * @cur_ab:		Current Bus Average BW request value.
  * @cur_ib:		Current Bus Instantaneous BW request value.
+ * @cur_ab_noc:		Current DDR Bus Average BW request value.
+ * @cur_ib_noc:		Current DDR Bus Instantaneous BW request value.
  * @bus_bw_set:		Clock plan for the bus driver.
+ * @bus_bw_set_noc:	Clock plan for DDR path.
  * @cur_bus_bw_idx:	Current index within the bus clock plan.
+ * @cur_bus_bw_idx_noc:	Current index within the DDR path clock plan.
  * @num_clk_levels:	Number of valid clock levels in clk_perf_tbl.
  * @clk_perf_tbl:	Table of clock frequency input to Serial Engine clock.
- * @log_ctx:		Logging context to hold the debug information
+ * @log_ctx:		Logging context to hold the debug information.
+ * @vectors:		Structure to store Master End and Slave End IDs for
+			QUPv3 clock and DDR path bus BW request.
+ * @num_paths:		Two paths. QUPv3 clock and DDR paths.
+ * @num_usecases:	One usecase to vote for both QUPv3 clock and DDR paths.
+ * @pdata:		To register our client handle with the ICB driver.
  */
 struct geni_se_device {
 	struct device *dev;
@@ -66,19 +85,31 @@
 	bool iommu_s1_bypass;
 	void __iomem *base;
 	struct msm_bus_client_handle *bus_bw;
+	uint32_t bus_bw_noc;
 	u32 bus_mas_id;
 	u32 bus_slv_id;
 	struct mutex geni_dev_lock;
 	struct list_head ab_list_head;
 	struct list_head ib_list_head;
+	struct list_head ab_list_head_noc;
+	struct list_head ib_list_head_noc;
 	unsigned long cur_ab;
 	unsigned long cur_ib;
+	unsigned long cur_ab_noc;
+	unsigned long cur_ib_noc;
 	int bus_bw_set_size;
+	int bus_bw_set_size_noc;
 	unsigned long *bus_bw_set;
+	unsigned long *bus_bw_set_noc;
 	int cur_bus_bw_idx;
+	int cur_bus_bw_idx_noc;
 	unsigned int num_clk_levels;
 	unsigned long *clk_perf_tbl;
 	void *log_ctx;
+	struct bus_vectors *vectors;
+	int num_paths;
+	int num_usecases;
+	struct msm_bus_scale_pdata *pdata;
 };
 
 /* Offset of QUPV3 Hardware Version Register */
@@ -641,11 +672,37 @@
 	return bus_bw_update;
 }
 
+static bool geni_se_check_bus_bw_noc(struct geni_se_device *geni_se_dev)
+{
+	int i;
+	int new_bus_bw_idx = geni_se_dev->bus_bw_set_size_noc - 1;
+	unsigned long new_bus_bw;
+	bool bus_bw_update = false;
+
+	new_bus_bw = max(geni_se_dev->cur_ib_noc, geni_se_dev->cur_ab_noc) /
+							DEFAULT_BUS_WIDTH;
+
+	for (i = 0; i < geni_se_dev->bus_bw_set_size_noc; i++) {
+		if (geni_se_dev->bus_bw_set_noc[i] >= new_bus_bw) {
+			new_bus_bw_idx = i;
+			break;
+		}
+	}
+
+	if (geni_se_dev->cur_bus_bw_idx_noc != new_bus_bw_idx) {
+		geni_se_dev->cur_bus_bw_idx_noc = new_bus_bw_idx;
+		bus_bw_update = true;
+	}
+
+	return bus_bw_update;
+}
+
 static int geni_se_rmv_ab_ib(struct geni_se_device *geni_se_dev,
 			     struct se_geni_rsc *rsc)
 {
 	struct se_geni_rsc *tmp;
 	bool bus_bw_update = false;
+	bool bus_bw_update_noc = false;
 	int ret = 0;
 
 	if (unlikely(list_empty(&rsc->ab_list) || list_empty(&rsc->ib_list)))
@@ -664,14 +721,51 @@
 		geni_se_dev->cur_ib = 0;
 
 	bus_bw_update = geni_se_check_bus_bw(geni_se_dev);
-	if (bus_bw_update)
+
+	if (geni_se_dev->num_paths == 2) {
+		geni_se_dev->pdata->usecase[1].vectors[0].ab  =
+			geni_se_dev->cur_ab;
+		geni_se_dev->pdata->usecase[1].vectors[0].ib  =
+			geni_se_dev->cur_ib;
+	}
+
+	if (bus_bw_update && geni_se_dev->num_paths != 2) {
 		ret = msm_bus_scale_update_bw(geni_se_dev->bus_bw,
 						geni_se_dev->cur_ab,
 						geni_se_dev->cur_ib);
-	GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+		GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
 			"%s: %s: cur_ab_ib(%lu:%lu) req_ab_ib(%lu:%lu) %d\n",
 			__func__, dev_name(rsc->ctrl_dev), geni_se_dev->cur_ab,
 			geni_se_dev->cur_ib, rsc->ab, rsc->ib, bus_bw_update);
+	}
+
+	if (geni_se_dev->num_paths == 2) {
+		if (unlikely(list_empty(&rsc->ab_list_noc) ||
+					list_empty(&rsc->ib_list_noc)))
+			return -EINVAL;
+
+		list_del_init(&rsc->ab_list_noc);
+		geni_se_dev->cur_ab_noc -= rsc->ab_noc;
+
+		list_del_init(&rsc->ib_list_noc);
+		tmp = list_first_entry_or_null(&geni_se_dev->ib_list_head_noc,
+					struct se_geni_rsc, ib_list_noc);
+		if (tmp && tmp->ib_noc != geni_se_dev->cur_ib_noc)
+			geni_se_dev->cur_ib_noc = tmp->ib_noc;
+		else if (!tmp && geni_se_dev->cur_ib_noc)
+			geni_se_dev->cur_ib_noc = 0;
+
+		bus_bw_update_noc = geni_se_check_bus_bw_noc(geni_se_dev);
+
+			geni_se_dev->pdata->usecase[1].vectors[1].ab  =
+				geni_se_dev->cur_ab_noc;
+			geni_se_dev->pdata->usecase[1].vectors[1].ib  =
+				geni_se_dev->cur_ib_noc;
+
+		if (bus_bw_update_noc || bus_bw_update)
+			ret = msm_bus_scale_client_update_request
+						(geni_se_dev->bus_bw_noc, 1);
+	}
 	mutex_unlock(&geni_se_dev->geni_dev_lock);
 	return ret;
 }
@@ -692,7 +786,8 @@
 		return -EINVAL;
 
 	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
-	if (unlikely(!geni_se_dev || !geni_se_dev->bus_bw))
+	if (unlikely(!geni_se_dev || !(geni_se_dev->bus_bw ||
+					geni_se_dev->bus_bw_noc)))
 		return -ENODEV;
 
 	clk_disable_unprepare(rsc->se_clk);
@@ -703,6 +798,7 @@
 	if (ret)
 		GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
 			"%s: Error %d during bus_bw_update\n", __func__, ret);
+
 	return ret;
 }
 EXPORT_SYMBOL(se_geni_clks_off);
@@ -723,7 +819,9 @@
 		return -EINVAL;
 
 	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
-	if (unlikely(!geni_se_dev || !geni_se_dev->bus_bw))
+	if (unlikely(!geni_se_dev ||
+			!(geni_se_dev->bus_bw ||
+					geni_se_dev->bus_bw_noc)))
 		return -ENODEV;
 
 	ret = se_geni_clks_off(rsc);
@@ -743,10 +841,13 @@
 {
 	struct se_geni_rsc *tmp = NULL;
 	struct list_head *ins_list_head;
+	struct list_head *ins_list_head_noc;
 	bool bus_bw_update = false;
+	bool bus_bw_update_noc = false;
 	int ret = 0;
 
 	mutex_lock(&geni_se_dev->geni_dev_lock);
+
 	list_add(&rsc->ab_list, &geni_se_dev->ab_list_head);
 	geni_se_dev->cur_ab += rsc->ab;
 
@@ -762,14 +863,51 @@
 		geni_se_dev->cur_ib = rsc->ib;
 
 	bus_bw_update = geni_se_check_bus_bw(geni_se_dev);
-	if (bus_bw_update)
+
+	if (geni_se_dev->num_paths == 2) {
+		geni_se_dev->pdata->usecase[1].vectors[0].ab  =
+			geni_se_dev->cur_ab;
+		geni_se_dev->pdata->usecase[1].vectors[0].ib  =
+			geni_se_dev->cur_ib;
+	}
+
+	if (bus_bw_update && geni_se_dev->num_paths != 2) {
 		ret = msm_bus_scale_update_bw(geni_se_dev->bus_bw,
 						geni_se_dev->cur_ab,
 						geni_se_dev->cur_ib);
-	GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
-			"%s: %s: cur_ab_ib(%lu:%lu) req_ab_ib(%lu:%lu) %d\n",
-			__func__, dev_name(rsc->ctrl_dev), geni_se_dev->cur_ab,
-			geni_se_dev->cur_ib, rsc->ab, rsc->ib, bus_bw_update);
+		GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+			"%s: %lu:%lu (%lu:%lu) %d\n", __func__,
+			geni_se_dev->cur_ab, geni_se_dev->cur_ib,
+			rsc->ab, rsc->ib, bus_bw_update);
+	}
+
+	if (geni_se_dev->num_paths == 2) {
+
+		list_add(&rsc->ab_list_noc, &geni_se_dev->ab_list_head_noc);
+		geni_se_dev->cur_ab_noc += rsc->ab_noc;
+		ins_list_head_noc = &geni_se_dev->ib_list_head_noc;
+
+		list_for_each_entry(tmp, &geni_se_dev->ib_list_head_noc,
+					ib_list_noc) {
+			if (tmp->ib < rsc->ib)
+				break;
+			ins_list_head_noc = &tmp->ib_list_noc;
+		}
+		list_add(&rsc->ib_list_noc, ins_list_head_noc);
+
+		if (ins_list_head_noc == &geni_se_dev->ib_list_head_noc)
+			geni_se_dev->cur_ib_noc = rsc->ib_noc;
+
+		bus_bw_update_noc = geni_se_check_bus_bw_noc(geni_se_dev);
+
+			geni_se_dev->pdata->usecase[1].vectors[1].ab  =
+				geni_se_dev->cur_ab_noc;
+			geni_se_dev->pdata->usecase[1].vectors[1].ib  =
+				geni_se_dev->cur_ib_noc;
+		if (bus_bw_update_noc || bus_bw_update)
+			ret = msm_bus_scale_client_update_request
+						(geni_se_dev->bus_bw_noc, 1);
+	}
 	mutex_unlock(&geni_se_dev->geni_dev_lock);
 	return ret;
 }
@@ -882,21 +1020,42 @@
 	if (unlikely(!geni_se_dev))
 		return -EPROBE_DEFER;
 
-	if (unlikely(IS_ERR_OR_NULL(geni_se_dev->bus_bw))) {
-		geni_se_dev->bus_bw = msm_bus_scale_register(
-					geni_se_dev->bus_mas_id,
-					geni_se_dev->bus_slv_id,
-					(char *)dev_name(geni_se_dev->dev),
-					false);
-		if (IS_ERR_OR_NULL(geni_se_dev->bus_bw)) {
-			GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
-				"%s: Error creating bus client\n", __func__);
-			return (int)PTR_ERR(geni_se_dev->bus_bw);
+	if (geni_se_dev->num_paths == 2) {
+		if (unlikely(!(geni_se_dev->bus_bw_noc))) {
+			geni_se_dev->bus_bw_noc =
+			msm_bus_scale_register_client(geni_se_dev->pdata);
+			if (!(geni_se_dev->bus_bw_noc)) {
+				GENI_SE_ERR(geni_se_dev->log_ctx,
+					false, NULL,
+				"%s: Error creating bus client\n",  __func__);
+				return -EFAULT;
+			}
 		}
+
+		rsc->ab = ab;
+		rsc->ib = ab;
+		rsc->ab_noc = 0;
+		rsc->ib_noc = ib;
+		INIT_LIST_HEAD(&rsc->ab_list_noc);
+		INIT_LIST_HEAD(&rsc->ib_list_noc);
+	} else {
+		if (unlikely(IS_ERR_OR_NULL(geni_se_dev->bus_bw))) {
+			geni_se_dev->bus_bw = msm_bus_scale_register(
+						geni_se_dev->bus_mas_id,
+						geni_se_dev->bus_slv_id,
+					(char *)dev_name(geni_se_dev->dev),
+						false);
+			if (IS_ERR_OR_NULL(geni_se_dev->bus_bw)) {
+				GENI_SE_ERR(geni_se_dev->log_ctx,
+					false, NULL,
+				"%s: Error creating bus client\n", __func__);
+				return (int)PTR_ERR(geni_se_dev->bus_bw);
+			}
+		}
+		rsc->ab = ab;
+		rsc->ib = ib;
 	}
 
-	rsc->ab = ab;
-	rsc->ib = ib;
 	INIT_LIST_HEAD(&rsc->ab_list);
 	INIT_LIST_HEAD(&rsc->ib_list);
 
@@ -993,6 +1152,9 @@
 	unsigned long *tbl;
 	int num_clk_levels;
 	int i;
+	unsigned long best_delta = 0;
+	unsigned long new_delta;
+	unsigned int divider;
 
 	num_clk_levels = geni_se_clk_tbl_get(rsc, &tbl);
 	if (num_clk_levels < 0)
@@ -1002,17 +1164,21 @@
 		return -EFAULT;
 
 	*res_freq = 0;
-	for (i = 0; i < num_clk_levels; i++) {
-		if (!(tbl[i] % req_freq)) {
-			*index = i;
-			*res_freq = tbl[i];
-			return 0;
-		}
 
-		if (!(*res_freq) || ((tbl[i] > *res_freq) &&
-				     (tbl[i] < req_freq))) {
+	for (i = 0; i < num_clk_levels; i++) {
+		divider = DIV_ROUND_UP(tbl[i], req_freq);
+		new_delta = req_freq - (tbl[i] / divider);
+
+		if (!best_delta || new_delta < best_delta) {
+			/* We have a new best! */
 			*index = i;
 			*res_freq = tbl[i];
+
+			/*If the new best is exact then we're done*/
+			if (new_delta == 0)
+				return 0;
+
+			best_delta = new_delta;
 		}
 	}
 
@@ -1422,6 +1588,87 @@
 	{}
 };
 
+static struct msm_bus_scale_pdata *ab_ib_register(struct platform_device *pdev,
+				struct geni_se_device *host)
+{
+	int rc = 0;
+	struct device *dev = &pdev->dev;
+	int i = 0, j, len;
+	bool mem_err = false;
+	const uint32_t *vec_arr = NULL;
+	struct msm_bus_scale_pdata *pdata = NULL;
+	struct msm_bus_paths *usecase = NULL;
+
+	vec_arr = of_get_property(dev->of_node,
+			"qcom,msm-bus,vectors-bus-ids", &len);
+	if (vec_arr == NULL) {
+		pr_err("Error: Vector array not found\n");
+		rc = 1;
+		goto out;
+	}
+
+	if (len != host->num_paths * sizeof(uint32_t) * 2) {
+		pr_err("Error: Length-error on getting vectors\n");
+		rc = 1;
+		goto out;
+	}
+
+
+	pdata = devm_kzalloc(dev, sizeof(struct msm_bus_scale_pdata),
+							GFP_KERNEL);
+	if (!pdata) {
+		mem_err = true;
+		goto out;
+	}
+
+	pdata->name = (char *)dev_name(host->dev);
+
+	pdata->num_usecases = 2;
+
+	pdata->active_only = 0;
+
+	usecase = devm_kzalloc(dev, (sizeof(struct msm_bus_paths) *
+		pdata->num_usecases), GFP_KERNEL);
+	if (!usecase) {
+		mem_err = true;
+		goto out;
+	}
+
+	for (i = 0; i < pdata->num_usecases; i++) {
+		usecase[i].num_paths = host->num_paths;
+		usecase[i].vectors = devm_kzalloc(dev, host->num_paths *
+			sizeof(struct msm_bus_vectors), GFP_KERNEL);
+		if (!usecase[i].vectors) {
+			mem_err = true;
+			pr_err("Error: Mem alloc failure in vectors\n");
+			goto out;
+		}
+
+		for (j = 0; j < host->num_paths; j++) {
+			int index = (j * 2);
+
+			usecase[i].vectors[j].src =
+					be32_to_cpu(vec_arr[index]);
+			usecase[i].vectors[j].dst =
+					be32_to_cpu(vec_arr[index + 1]);
+			usecase[i].vectors[j].ab = 0;
+			usecase[i].vectors[j].ib = 0;
+		}
+	}
+
+	pdata->usecase = usecase;
+
+	return pdata;
+out:
+	if (mem_err) {
+		for ( ; i > 0; i--)
+			devm_kfree(dev, usecase[i-1].vectors);
+		devm_kfree(dev, usecase);
+		devm_kfree(dev, pdata);
+	}
+	return NULL;
+}
+
 static int geni_se_iommu_probe(struct device *dev)
 {
 	struct geni_se_device *geni_se_dev;
@@ -1482,29 +1729,57 @@
 	}
 
 	geni_se_dev->dev = dev;
-	geni_se_dev->cb_dev = dev;
-	ret = of_property_read_u32(dev->of_node, "qcom,bus-mas-id",
-				   &geni_se_dev->bus_mas_id);
-	if (ret) {
-		dev_err(dev, "%s: Error missing bus master id\n", __func__);
-		devm_iounmap(dev, geni_se_dev->base);
-		devm_kfree(dev, geni_se_dev);
+
+	ret = of_property_read_u32(dev->of_node, "qcom,msm-bus,num-paths",
+					&geni_se_dev->num_paths);
+	if (!ret) {
+		geni_se_dev->pdata = ab_ib_register(pdev, geni_se_dev);
+		if (geni_se_dev->pdata == NULL) {
+			dev_err(dev,
+			"%s: Error missing bus master and slave id\n",
+								__func__);
+			devm_iounmap(dev, geni_se_dev->base);
+			devm_kfree(dev, geni_se_dev);
+		}
 	}
-	ret = of_property_read_u32(dev->of_node, "qcom,bus-slv-id",
+
+	else {
+		geni_se_dev->num_paths = 1;
+		ret = of_property_read_u32(dev->of_node, "qcom,bus-mas-id",
+				   &geni_se_dev->bus_mas_id);
+		if (ret) {
+			dev_err(dev, "%s: Error missing bus master id\n",
+								__func__);
+			devm_iounmap(dev, geni_se_dev->base);
+			devm_kfree(dev, geni_se_dev);
+		}
+		ret = of_property_read_u32(dev->of_node, "qcom,bus-slv-id",
 				   &geni_se_dev->bus_slv_id);
-	if (ret) {
-		dev_err(dev, "%s: Error missing bus slave id\n", __func__);
-		devm_iounmap(dev, geni_se_dev->base);
-		devm_kfree(dev, geni_se_dev);
+		if (ret) {
+			dev_err(dev, "%s: Error missing bus slave id\n",
+								 __func__);
+			devm_iounmap(dev, geni_se_dev->base);
+			devm_kfree(dev, geni_se_dev);
+		}
 	}
 
 	geni_se_dev->iommu_s1_bypass = of_property_read_bool(dev->of_node,
 							"qcom,iommu-s1-bypass");
 	geni_se_dev->bus_bw_set = default_bus_bw_set;
-	geni_se_dev->bus_bw_set_size = ARRAY_SIZE(default_bus_bw_set);
+	geni_se_dev->bus_bw_set_size =
+				ARRAY_SIZE(default_bus_bw_set);
+	if (geni_se_dev->num_paths == 2) {
+		geni_se_dev->bus_bw_set_noc = default_bus_bw_set;
+		geni_se_dev->bus_bw_set_size_noc =
+				ARRAY_SIZE(default_bus_bw_set);
+	}
 	mutex_init(&geni_se_dev->iommu_lock);
 	INIT_LIST_HEAD(&geni_se_dev->ab_list_head);
 	INIT_LIST_HEAD(&geni_se_dev->ib_list_head);
+	if (geni_se_dev->num_paths == 2) {
+		INIT_LIST_HEAD(&geni_se_dev->ab_list_head_noc);
+		INIT_LIST_HEAD(&geni_se_dev->ib_list_head_noc);
+	}
 	mutex_init(&geni_se_dev->geni_dev_lock);
 	geni_se_dev->log_ctx = ipc_log_context_create(NUM_LOG_PAGES,
 						dev_name(geni_se_dev->dev), 0);
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index 81c2ec5..b325676 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -2328,8 +2328,11 @@
 	mutex_lock(&bam->lock);
 	sps_bam_device_de_init(bam);
 	mutex_unlock(&bam->lock);
+	ipc_log_context_destroy(bam->ipc_log0);
 	ipc_log_context_destroy(bam->ipc_log1);
 	ipc_log_context_destroy(bam->ipc_log2);
+	ipc_log_context_destroy(bam->ipc_log3);
+	ipc_log_context_destroy(bam->ipc_log4);
 	if (bam->props.virt_size)
 		(void)iounmap(bam->props.virt_addr);
 
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 0c1aa6c..7563c07 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -856,6 +856,7 @@
 config ACPI_CMPC
 	tristate "CMPC Laptop Extras"
 	depends on ACPI && INPUT
+	depends on BACKLIGHT_LCD_SUPPORT
 	depends on RFKILL || RFKILL=n
 	select BACKLIGHT_CLASS_DEVICE
 	help
@@ -1077,6 +1078,7 @@
 config SAMSUNG_Q10
 	tristate "Samsung Q10 Extras"
 	depends on ACPI
+	depends on BACKLIGHT_LCD_SUPPORT
 	select BACKLIGHT_CLASS_DEVICE
 	---help---
 	  This driver provides support for backlight control on Samsung Q10
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index d16e4b7..301006d 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -450,6 +450,7 @@
 	POWER_SUPPLY_ATTR(qc_opti_disable),
 	POWER_SUPPLY_ATTR(cc_soc),
 	POWER_SUPPLY_ATTR(batt_age_level),
+	POWER_SUPPLY_ATTR(scale_mode_en),
 	/* Charge pump properties */
 	POWER_SUPPLY_ATTR(cp_status1),
 	POWER_SUPPLY_ATTR(cp_status2),
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c
index e053a07..4b84efd 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen4.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c
@@ -264,6 +264,7 @@
 	bool			rslow_low;
 	bool			rapid_soc_dec_en;
 	bool			vbatt_low;
+	bool			chg_term_good;
 };
 
 struct bias_config {
@@ -2348,8 +2349,16 @@
 				new_recharge_soc = msoc - (FULL_CAPACITY -
 								recharge_soc);
 				fg->recharge_soc_adjusted = true;
+				if (fg->health == POWER_SUPPLY_HEALTH_GOOD)
+					chip->chg_term_good = true;
 			} else {
-				/* adjusted already, do nothing */
+				/*
+				 * If charge termination happened properly then
+				 * do nothing.
+				 */
+				if (chip->chg_term_good)
+					return 0;
+
 				if (fg->health != POWER_SUPPLY_HEALTH_GOOD)
 					return 0;
 
@@ -2360,7 +2369,7 @@
 
 				new_recharge_soc = recharge_soc;
 				fg->recharge_soc_adjusted = false;
-				return 0;
+				chip->chg_term_good = false;
 			}
 		} else {
 			if (!fg->recharge_soc_adjusted)
@@ -2379,11 +2388,13 @@
 			/* Restore the default value */
 			new_recharge_soc = recharge_soc;
 			fg->recharge_soc_adjusted = false;
+			chip->chg_term_good = false;
 		}
 	} else {
 		/* Restore the default value */
 		new_recharge_soc = recharge_soc;
 		fg->recharge_soc_adjusted = false;
+		chip->chg_term_good = false;
 	}
 
 	if (recharge_soc_status == fg->recharge_soc_adjusted)
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 86926db..da7e614 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"QG-K: %s: " fmt, __func__
@@ -1807,6 +1807,9 @@
 	case POWER_SUPPLY_PROP_SOH:
 		pval->intval = chip->soh;
 		break;
+	case POWER_SUPPLY_PROP_CC_SOC:
+		rc = qg_get_cc_soc(chip, &pval->intval);
+		break;
 	default:
 		pr_debug("Unsupported property %d\n", psp);
 		break;
@@ -1857,6 +1860,7 @@
 	POWER_SUPPLY_PROP_ESR_ACTUAL,
 	POWER_SUPPLY_PROP_ESR_NOMINAL,
 	POWER_SUPPLY_PROP_SOH,
+	POWER_SUPPLY_PROP_CC_SOC,
 };
 
 static const struct power_supply_desc qg_psy_desc = {
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index d22bfa1..b11818e 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -322,19 +322,21 @@
 		chip->chg.smb_version = PM8150B_SUBTYPE;
 		chg->param = smb5_pm8150b_params;
 		chg->name = "pm8150b_charger";
+		chg->wa_flags |= CHG_TERMINATION_WA;
 		break;
 	case PM6150_SUBTYPE:
 		chip->chg.smb_version = PM6150_SUBTYPE;
 		chg->param = smb5_pm8150b_params;
 		chg->name = "pm6150_charger";
-		chg->wa_flags |= SW_THERM_REGULATION_WA;
+		chg->wa_flags |= SW_THERM_REGULATION_WA | CHG_TERMINATION_WA;
 		if (pmic_rev_id->rev4 >= 2)
 			chg->uusb_moisture_protection_capable = true;
 		chg->main_fcc_max = PM6150_MAX_FCC_UA;
 		break;
 	case PMI632_SUBTYPE:
 		chip->chg.smb_version = PMI632_SUBTYPE;
-		chg->wa_flags |= WEAK_ADAPTER_WA | USBIN_OV_WA;
+		chg->wa_flags |= WEAK_ADAPTER_WA | USBIN_OV_WA
+				| CHG_TERMINATION_WA;
 		chg->param = smb5_pmi632_params;
 		chg->use_extcon = true;
 		chg->name = "pmi632_charger";
@@ -436,8 +438,8 @@
 	chg->sw_jeita_enabled = of_property_read_bool(node,
 				"qcom,sw-jeita-enable");
 
-	chg->pd_not_supported = of_property_read_bool(node,
-				"qcom,usb-pd-disable");
+	chg->pd_not_supported = chg->pd_not_supported ||
+			of_property_read_bool(node, "qcom,usb-pd-disable");
 
 	chg->lpd_disabled = of_property_read_bool(node, "qcom,lpd-disable");
 
@@ -2444,7 +2446,7 @@
 
 	val = (ilog2(chip->dt.wd_bark_time / 16) << BARK_WDOG_TIMEOUT_SHIFT)
 			& BARK_WDOG_TIMEOUT_MASK;
-	val |= BITE_WDOG_TIMEOUT_8S;
+	val |= (BITE_WDOG_TIMEOUT_8S | BITE_WDOG_DISABLE_CHARGING_CFG_BIT);
 	rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
 			BITE_WDOG_DISABLE_CHARGING_CFG_BIT |
 			BARK_WDOG_TIMEOUT_MASK | BITE_WDOG_TIMEOUT_MASK,
diff --git a/drivers/power/supply/qcom/smb1390-charger-psy.c b/drivers/power/supply/qcom/smb1390-charger-psy.c
index af4dc03..b00ac7c 100644
--- a/drivers/power/supply/qcom/smb1390-charger-psy.c
+++ b/drivers/power/supply/qcom/smb1390-charger-psy.c
@@ -5,6 +5,7 @@
 
 #define pr_fmt(fmt) "SMB1390: %s: " fmt, __func__
 
+#include <linux/debugfs.h>
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
@@ -84,6 +85,16 @@
 #define SRC_VOTER		"SRC_VOTER"
 #define SWITCHER_TOGGLE_VOTER	"SWITCHER_TOGGLE_VOTER"
 
+#define smb1390_dbg(chip, reason, fmt, ...)				\
+	do {								\
+		if (chip->debug_mask & (reason))			\
+			pr_info("SMB1390: %s: " fmt, __func__,		\
+				##__VA_ARGS__);				\
+		else							\
+			pr_debug("SMB1390: %s: " fmt, __func__,		\
+				##__VA_ARGS__);				\
+	} while (0)
+
 enum {
 	SWITCHER_OFF_WINDOW_IRQ = 0,
 	SWITCHER_OFF_FAULT_IRQ,
@@ -101,6 +112,14 @@
 	SMB_PIN_EN,
 };
 
+enum print_reason {
+	PR_INTERRUPT		= BIT(0),
+	PR_REGISTER		= BIT(1),
+	PR_INFO			= BIT(2),
+	PR_EXT_DEPENDENCY	= BIT(3),
+	PR_MISC			= BIT(4),
+};
+
 struct smb1390_iio {
 	struct iio_channel	*die_temp_chan;
 };
@@ -110,6 +129,7 @@
 	struct regmap		*regmap;
 	struct notifier_block	nb;
 	struct wakeup_source	*cp_ws;
+	struct dentry		*dfs_root;
 
 	/* work structs */
 	struct work_struct	status_change_work;
@@ -140,6 +160,7 @@
 	bool			switcher_enabled;
 	int			die_temp;
 	bool			suspended;
+	u32			debug_mask;
 };
 
 struct smb_irq {
@@ -166,7 +187,8 @@
 {
 	int rc;
 
-	pr_debug("Writing 0x%02x to 0x%04x with mask 0x%02x\n", val, reg, mask);
+	smb1390_dbg(chip, PR_REGISTER, "Writing 0x%02x to 0x%04x with mask 0x%02x\n",
+			val, reg, mask);
 	rc = regmap_update_bits(chip->regmap, reg, mask, val);
 	if (rc < 0)
 		pr_err("Couldn't write 0x%02x to 0x%04x with mask 0x%02x\n",
@@ -180,7 +202,7 @@
 	if (!chip->batt_psy) {
 		chip->batt_psy = power_supply_get_by_name("battery");
 		if (!chip->batt_psy) {
-			pr_debug("Couldn't find battery psy\n");
+			smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find battery psy\n");
 			return false;
 		}
 	}
@@ -188,7 +210,7 @@
 	if (!chip->usb_psy) {
 		chip->usb_psy = power_supply_get_by_name("usb");
 		if (!chip->usb_psy) {
-			pr_debug("Couldn't find usb psy\n");
+			smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find usb psy\n");
 			return false;
 		}
 	}
@@ -196,7 +218,7 @@
 	if (!chip->dc_psy) {
 		chip->dc_psy = power_supply_get_by_name("dc");
 		if (!chip->dc_psy) {
-			pr_debug("Couldn't find dc psy\n");
+			smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find dc psy\n");
 			return false;
 		}
 	}
@@ -204,7 +226,7 @@
 	if (!chip->fcc_votable) {
 		chip->fcc_votable = find_votable("FCC");
 		if (!chip->fcc_votable) {
-			pr_debug("Couldn't find FCC votable\n");
+			smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find FCC votable\n");
 			return false;
 		}
 	}
@@ -212,13 +234,13 @@
 	if (!chip->fv_votable) {
 		chip->fv_votable = find_votable("FV");
 		if (!chip->fv_votable) {
-			pr_debug("Couldn't find FV votable\n");
+			smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find FV votable\n");
 			return false;
 		}
 	}
 
 	if (!chip->disable_votable) {
-		pr_debug("Couldn't find CP DISABLE votable\n");
+		smb1390_dbg(chip, PR_MISC, "Couldn't find CP DISABLE votable\n");
 		return false;
 	}
 
@@ -254,7 +276,8 @@
 		*enable = !!(status & EN_PIN_OUT2_BIT);
 		break;
 	default:
-		pr_debug("cp_en status %d is not supported\n", id);
+		smb1390_dbg(chip, PR_MISC, "cp_en status %d is not supported\n",
+				id);
 		rc = -EINVAL;
 		break;
 	}
@@ -270,7 +293,8 @@
 
 	for (i = 0; i < NUM_IRQS; ++i) {
 		if (irq == chip->irqs[i]) {
-			pr_debug("%s IRQ triggered\n", smb_irqs[i].name);
+			smb1390_dbg(chip, PR_INTERRUPT, "%s IRQ triggered\n",
+				smb_irqs[i].name);
 			chip->irq_status |= 1 << i;
 		}
 	}
@@ -471,10 +495,11 @@
 
 	/* ILIM less than 1A is not accurate; disable charging */
 	if (ilim_uA < 1000000) {
-		pr_debug("ILIM %duA is too low to allow charging\n", ilim_uA);
+		smb1390_dbg(chip, PR_INFO, "ILIM %duA is too low to allow charging\n",
+			ilim_uA);
 		vote(chip->disable_votable, ILIM_VOTER, true, 0);
 	} else {
-		pr_debug("ILIM set to %duA\n", ilim_uA);
+		smb1390_dbg(chip, PR_INFO, "ILIM set to %duA\n", ilim_uA);
 		vote(chip->disable_votable, ILIM_VOTER, false, 0);
 	}
 
@@ -621,7 +646,7 @@
 
 		if (get_effective_result(chip->fv_votable) >
 						chip->taper_entry_fv) {
-			pr_debug("Float voltage increased. Exiting taper\n");
+			smb1390_dbg(chip, PR_INFO, "Float voltage increased. Exiting taper\n");
 			goto out;
 		} else {
 			chip->taper_entry_fv =
@@ -631,7 +656,8 @@
 		if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
 			fcc_uA = get_effective_result(chip->fcc_votable)
 								- 100000;
-			pr_debug("taper work reducing FCC to %duA\n", fcc_uA);
+			smb1390_dbg(chip, PR_INFO, "taper work reducing FCC to %duA\n",
+				fcc_uA);
 			vote(chip->fcc_votable, CP_VOTER, true, fcc_uA);
 
 			if (fcc_uA < 2000000) {
@@ -640,13 +666,13 @@
 				goto out;
 			}
 		} else {
-			pr_debug("In fast charging. Wait for next taper\n");
+			smb1390_dbg(chip, PR_INFO, "In fast charging. Wait for next taper\n");
 		}
 
 		msleep(500);
 	}
 out:
-	pr_debug("taper work exit\n");
+	smb1390_dbg(chip, PR_INFO, "taper work exit\n");
 	vote(chip->fcc_votable, CP_VOTER, false, 0);
 	chip->taper_work_running = false;
 }
@@ -733,7 +759,7 @@
 					+ 500000;
 		break;
 	default:
-		pr_debug("charge pump power supply get prop %d not supported\n",
+		smb1390_dbg(chip, PR_MISC, "charge pump power supply get prop %d not supported\n",
 			prop);
 		return -EINVAL;
 	}
@@ -760,7 +786,7 @@
 		chip->irq_status = val->intval;
 		break;
 	default:
-		pr_debug("charge pump power supply set prop %d not supported\n",
+		smb1390_dbg(chip, PR_MISC, "charge pump power supply set prop %d not supported\n",
 			prop);
 		return -EINVAL;
 	}
@@ -969,6 +995,31 @@
 	return rc;
 }
 
+#ifdef CONFIG_DEBUG_FS
+static void smb1390_create_debugfs(struct smb1390 *chip)
+{
+	struct dentry *entry;
+
+	chip->dfs_root = debugfs_create_dir("smb1390_charger_psy", NULL);
+	if (IS_ERR_OR_NULL(chip->dfs_root)) {
+		pr_err("Failed to create debugfs directory, rc=%ld\n",
+					(long)chip->dfs_root);
+		return;
+	}
+
+	entry = debugfs_create_u32("debug_mask", 0600, chip->dfs_root,
+			&chip->debug_mask);
+	if (IS_ERR_OR_NULL(entry)) {
+		pr_err("Failed to create debug_mask, rc=%ld\n", (long)entry);
+		debugfs_remove_recursive(chip->dfs_root);
+	}
+}
+#else
+static void smb1390_create_debugfs(struct smb1390 *chip)
+{
+}
+#endif
+
 static int smb1390_probe(struct platform_device *pdev)
 {
 	struct smb1390 *chip;
@@ -1034,7 +1085,10 @@
 		goto out_notifier;
 	}
 
+	smb1390_create_debugfs(chip);
+
 	pr_debug("smb1390 probed successfully\n");
+
 	return 0;
 
 out_notifier:
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 27a425a..7c4a9df 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -904,7 +904,7 @@
 	int rc;
 	u8 mask;
 
-	if (chg->hvdcp_disable)
+	if (chg->hvdcp_disable || chg->pd_not_supported)
 		return;
 
 	mask = HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT;
@@ -1079,6 +1079,9 @@
 
 	cancel_delayed_work_sync(&chg->pl_enable_work);
 
+	if (chg->wa_flags & CHG_TERMINATION_WA)
+		alarm_cancel(&chg->chg_termination_alarm);
+
 	if (chg->wa_flags & BOOST_BACK_WA) {
 		data = chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data;
 		if (data) {
@@ -1099,6 +1102,7 @@
 			is_flash_active(chg) ? SDP_CURRENT_UA : SDP_100_MA);
 	vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
 	vote(chg->usb_icl_votable, HVDCP2_ICL_VOTER, false, 0);
+	vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0);
 
 	/* Remove SW thermal regulation WA votes */
 	vote(chg->usb_icl_votable, SW_THERM_REGULATION_VOTER, false, 0);
@@ -1836,6 +1840,16 @@
 		return 0;
 	}
 
+	/*
+	 * If charge termination WA is active and has suspended charging, then
+	 * continue reporting charging status as FULL.
+	 */
+	if (is_client_vote_enabled(chg->usb_icl_votable,
+						CHG_TERMINATION_VOTER)) {
+		val->intval = POWER_SUPPLY_STATUS_FULL;
+		return 0;
+	}
+
 	if (val->intval != POWER_SUPPLY_STATUS_CHARGING)
 		return 0;
 
@@ -2765,6 +2779,12 @@
 		return rc;
 	}
 
+	if (is_client_vote_enabled(chg->dc_suspend_votable,
+						CHG_TERMINATION_VOTER)) {
+		rc = smblib_get_prop_dc_present(chg, val);
+		return rc;
+	}
+
 	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
@@ -2877,6 +2897,12 @@
 		return rc;
 	}
 
+	if (is_client_vote_enabled(chg->usb_icl_votable,
+					CHG_TERMINATION_VOTER)) {
+		rc = smblib_get_prop_usb_present(chg, val);
+		return rc;
+	}
+
 	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
@@ -3350,7 +3376,7 @@
 				    union power_supply_propval *val)
 {
 	union power_supply_propval pval = {0, };
-	int rc = 0;
+	int rc = 0, buck_scale = 1, boost_scale = 1;
 
 	if (chg->iio.usbin_i_chan) {
 		rc = iio_read_channel_processed(chg->iio.usbin_i_chan,
@@ -3363,10 +3389,24 @@
 		/*
 		 * For PM8150B, scaling factor = reciprocal of
 		 * 0.2V/A in Buck mode, 0.4V/A in Boost mode.
+		 * For PMI632, scaling factor = reciprocal of
+		 * 0.4V/A in Buck mode, 0.8V/A in Boost mode.
 		 */
+		switch (chg->smb_version) {
+		case PMI632_SUBTYPE:
+			buck_scale = 40;
+			boost_scale = 80;
+			break;
+		default:
+			buck_scale = 20;
+			boost_scale = 40;
+			break;
+		}
+
 		if (chg->otg_present || smblib_get_prop_dfp_mode(chg) !=
 				POWER_SUPPLY_TYPEC_NONE) {
-			val->intval = DIV_ROUND_CLOSEST(val->intval * 100, 40);
+			val->intval = DIV_ROUND_CLOSEST(val->intval * 100,
+								boost_scale);
 			return rc;
 		}
 
@@ -3381,7 +3421,8 @@
 		if (!pval.intval)
 			val->intval = 0;
 		else
-			val->intval *= 5;
+			val->intval = DIV_ROUND_CLOSEST(val->intval * 100,
+								buck_scale);
 	} else {
 		val->intval = 0;
 		rc = -ENODATA;
@@ -4230,6 +4271,38 @@
 	return IRQ_HANDLED;
 }
 
+#define CHG_TERM_WA_ENTRY_DELAY_MS		300000		/* 5 min */
+#define CHG_TERM_WA_EXIT_DELAY_MS		60000		/* 1 min */
+static void smblib_eval_chg_termination(struct smb_charger *chg, u8 batt_status)
+{
+	union power_supply_propval pval = {0, };
+	int rc = 0;
+
+	rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CAPACITY, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read SOC value, rc=%d\n", rc);
+		return;
+	}
+
+	/*
+	 * Post charge termination, switch to BSM mode triggers the risk of
+	 * over charging as BATFET opening may take some time post the necessity
+	 * of staying in supplemental mode, leading to unintended charging of
+	 * battery. Trigger the charge termination WA once charging is completed
+	 * to prevent overcharing.
+	 */
+	if ((batt_status == TERMINATE_CHARGE) && (pval.intval == 100)) {
+		alarm_start_relative(&chg->chg_termination_alarm,
+				ms_to_ktime(CHG_TERM_WA_ENTRY_DELAY_MS));
+	} else if (pval.intval < 100) {
+		/*
+		 * Reset CC_SOC reference value for charge termination WA once
+		 * we exit the TERMINATE_CHARGE state and soc drops below 100%
+		 */
+		chg->cc_soc_ref = 0;
+	}
+}
+
 irqreturn_t chg_state_change_irq_handler(int irq, void *data)
 {
 	struct smb_irq_data *irq_data = data;
@@ -4247,6 +4320,10 @@
 	}
 
 	stat = stat & BATTERY_CHARGER_STATUS_MASK;
+
+	if (chg->wa_flags & CHG_TERMINATION_WA)
+		smblib_eval_chg_termination(chg, stat);
+
 	power_supply_changed(chg->batt_psy);
 	return IRQ_HANDLED;
 }
@@ -5044,6 +5121,9 @@
 
 	cancel_delayed_work_sync(&chg->pl_enable_work);
 
+	if (chg->wa_flags & CHG_TERMINATION_WA)
+		alarm_cancel(&chg->chg_termination_alarm);
+
 	/* reset input current limit voters */
 	vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
 			is_flash_active(chg) ? SDP_CURRENT_UA : SDP_100_MA);
@@ -5054,6 +5134,7 @@
 	vote(chg->usb_icl_votable, OTG_VOTER, false, 0);
 	vote(chg->usb_icl_votable, CTM_VOTER, false, 0);
 	vote(chg->usb_icl_votable, HVDCP2_ICL_VOTER, false, 0);
+	vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0);
 
 	/* reset usb irq voters */
 	vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
@@ -5813,6 +5894,12 @@
 	u8 stat;
 
 	/*
+	 * Hold awake votable to prevent pm_relax being called prior to
+	 * completion of this work.
+	 */
+	vote(chg->awake_votable, MOISTURE_VOTER, true, 0);
+
+	/*
 	 * Disable 1% duty cycle on CC_ID pin and enable uUSB factory mode
 	 * detection to track any change on RID, as interrupts are disable.
 	 */
@@ -5876,7 +5963,7 @@
 	}
 
 out:
-	pm_relax(chg->dev);
+	vote(chg->awake_votable, MOISTURE_VOTER, false, 0);
 }
 
 static enum alarmtimer_restart moisture_protection_alarm_cb(struct alarm *alarm,
@@ -5895,6 +5982,94 @@
 	return ALARMTIMER_NORESTART;
 }
 
+static void smblib_chg_termination_work(struct work_struct *work)
+{
+	union power_supply_propval pval;
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						chg_termination_work);
+	int rc, input_present, delay = CHG_TERM_WA_ENTRY_DELAY_MS;
+
+	/*
+	 * Hold awake votable to prevent pm_relax being called prior to
+	 * completion of this work.
+	 */
+	vote(chg->awake_votable, CHG_TERMINATION_VOTER, true, 0);
+
+	rc = smblib_is_input_present(chg, &input_present);
+	if ((rc < 0) || !input_present)
+		goto out;
+
+	rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CAPACITY, &pval);
+	if ((rc < 0) || (pval.intval < 100)) {
+		vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0);
+		goto out;
+	}
+
+	rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CHARGE_FULL,
+					&pval);
+	if (rc < 0)
+		goto out;
+
+	/*
+	 * On change in the value of learned capacity, re-initialize the
+	 * reference cc_soc value due to change in cc_soc characteristic value
+	 * at full capacity. Also, in case cc_soc_ref value is reset,
+	 * re-initialize it.
+	 */
+	if (pval.intval != chg->charge_full_cc || !chg->cc_soc_ref) {
+		chg->charge_full_cc = pval.intval;
+		rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CC_SOC,
+					&pval);
+		if (rc < 0)
+			goto out;
+
+		chg->cc_soc_ref = pval.intval;
+	} else {
+		rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CC_SOC,
+					&pval);
+		if (rc < 0)
+			goto out;
+	}
+
+	/*
+	 * Suspend/Unsuspend USB input to keep cc_soc within the 0.5% to 0.75%
+	 * overshoot range of the cc_soc value at termination, to prevent
+	 * overcharging.
+	 */
+	if (pval.intval < DIV_ROUND_CLOSEST(chg->cc_soc_ref * 10050, 10000)) {
+		vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0);
+		vote(chg->dc_suspend_votable, CHG_TERMINATION_VOTER, false, 0);
+		delay = CHG_TERM_WA_ENTRY_DELAY_MS;
+	} else if (pval.intval > DIV_ROUND_CLOSEST(chg->cc_soc_ref * 10075,
+								10000)) {
+		vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, true, 0);
+		vote(chg->dc_suspend_votable, CHG_TERMINATION_VOTER, true, 0);
+		delay = CHG_TERM_WA_EXIT_DELAY_MS;
+	}
+
+	smblib_dbg(chg, PR_MISC, "Chg Term WA readings: cc_soc: %d, cc_soc_ref: %d, delay: %d\n",
+			pval.intval, chg->cc_soc_ref, delay);
+	alarm_start_relative(&chg->chg_termination_alarm, ms_to_ktime(delay));
+out:
+	vote(chg->awake_votable, CHG_TERMINATION_VOTER, false, 0);
+}
+
+static enum alarmtimer_restart chg_termination_alarm_cb(struct alarm *alarm,
+								ktime_t now)
+{
+	struct smb_charger *chg = container_of(alarm, struct smb_charger,
+							chg_termination_alarm);
+
+	smblib_dbg(chg, PR_MISC, "Charge termination WA alarm triggered %lld\n",
+			ktime_to_ms(now));
+
+	/* Atomic context, cannot use voter */
+	pm_stay_awake(chg->dev);
+	schedule_work(&chg->chg_termination_work);
+
+	return ALARMTIMER_NORESTART;
+}
+
 static void jeita_update_work(struct work_struct *work)
 {
 	struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -6266,6 +6441,19 @@
 					smblib_thermal_regulation_work);
 	INIT_DELAYED_WORK(&chg->usbov_dbc_work, smblib_usbov_dbc_work);
 
+	if (chg->wa_flags & CHG_TERMINATION_WA) {
+		INIT_WORK(&chg->chg_termination_work,
+					smblib_chg_termination_work);
+
+		if (alarmtimer_get_rtcdev()) {
+			alarm_init(&chg->chg_termination_alarm, ALARM_BOOTTIME,
+						chg_termination_alarm_cb);
+		} else {
+			smblib_err(chg, "Couldn't get rtc device\n");
+			return -ENODEV;
+		}
+	}
+
 	if (chg->uusb_moisture_protection_enabled) {
 		INIT_WORK(&chg->moisture_protection_work,
 					smblib_moisture_protection_work);
@@ -6375,6 +6563,10 @@
 			alarm_cancel(&chg->moisture_protection_alarm);
 			cancel_work_sync(&chg->moisture_protection_work);
 		}
+		if (chg->wa_flags & CHG_TERMINATION_WA) {
+			alarm_cancel(&chg->chg_termination_alarm);
+			cancel_work_sync(&chg->chg_termination_work);
+		}
 		cancel_work_sync(&chg->bms_update_work);
 		cancel_work_sync(&chg->jeita_update_work);
 		cancel_work_sync(&chg->pl_update_work);
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 70bcdc6..154554a 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -65,6 +65,7 @@
 #define HVDCP2_ICL_VOTER		"HVDCP2_ICL_VOTER"
 #define AICL_THRESHOLD_VOTER		"AICL_THRESHOLD_VOTER"
 #define USBOV_DBC_VOTER			"USBOV_DBC_VOTER"
+#define CHG_TERMINATION_VOTER		"CHG_TERMINATION_VOTER"
 
 #define BOOST_BACK_STORM_COUNT	3
 #define WEAK_CHG_STORM_COUNT	8
@@ -107,6 +108,7 @@
 	SW_THERM_REGULATION_WA		= BIT(1),
 	WEAK_ADAPTER_WA			= BIT(2),
 	USBIN_OV_WA			= BIT(3),
+	CHG_TERMINATION_WA		= BIT(4),
 };
 
 enum jeita_cfg_stat {
@@ -395,6 +397,7 @@
 	struct work_struct	pl_update_work;
 	struct work_struct	jeita_update_work;
 	struct work_struct	moisture_protection_work;
+	struct work_struct	chg_termination_work;
 	struct delayed_work	ps_change_timeout_work;
 	struct delayed_work	clear_hdc_work;
 	struct delayed_work	icl_change_work;
@@ -408,6 +411,7 @@
 
 	struct alarm		lpd_recheck_timer;
 	struct alarm		moisture_protection_alarm;
+	struct alarm		chg_termination_alarm;
 
 	/* secondary charger config */
 	bool			sec_pl_present;
@@ -488,6 +492,8 @@
 	int			aicl_cont_threshold_mv;
 	int			default_aicl_cont_threshold_mv;
 	bool			aicl_max_reached;
+	int			charge_full_cc;
+	int			cc_soc_ref;
 
 	/* workaround flag */
 	u32			wa_flags;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 970654f..2d1f6a5 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -22,6 +22,7 @@
 #include <linux/hashtable.h>
 #include <linux/ip.h>
 #include <linux/refcount.h>
+#include <linux/workqueue.h>
 
 #include <net/ipv6.h>
 #include <net/if_inet6.h>
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index b03515d..56aacf3 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -565,6 +565,7 @@
 		QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
 			"rc=%i\n", dev_name(&card->gdev->dev), rc);
 		atomic_set(&channel->irq_pending, 0);
+		qeth_release_buffer(channel, iob);
 		card->read_or_write_problem = 1;
 		qeth_schedule_recovery(card);
 		wake_up(&card->wait_q);
@@ -1187,6 +1188,8 @@
 		rc = qeth_get_problem(cdev, irb);
 		if (rc) {
 			card->read_or_write_problem = 1;
+			if (iob)
+				qeth_release_buffer(iob->channel, iob);
 			qeth_clear_ipacmd_list(card);
 			qeth_schedule_recovery(card);
 			goto out;
@@ -1852,6 +1855,7 @@
 		QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
 		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
 		atomic_set(&channel->irq_pending, 0);
+		qeth_release_buffer(channel, iob);
 		wake_up(&card->wait_q);
 		return rc;
 	}
@@ -1923,6 +1927,7 @@
 			rc);
 		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
 		atomic_set(&channel->irq_pending, 0);
+		qeth_release_buffer(channel, iob);
 		wake_up(&card->wait_q);
 		return rc;
 	}
@@ -2110,6 +2115,7 @@
 	}
 	reply = qeth_alloc_reply(card);
 	if (!reply) {
+		qeth_release_buffer(channel, iob);
 		return -ENOMEM;
 	}
 	reply->callback = reply_cb;
@@ -2448,11 +2454,12 @@
 	return 0;
 }
 
-static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
+static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
 {
 	if (!q)
 		return;
 
+	qeth_clear_outq_buffers(q, 1);
 	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
 	kfree(q);
 }
@@ -2526,10 +2533,8 @@
 		card->qdio.out_qs[i]->bufs[j] = NULL;
 	}
 out_freeoutq:
-	while (i > 0) {
-		qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
-		qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
-	}
+	while (i > 0)
+		qeth_free_output_queue(card->qdio.out_qs[--i]);
 	kfree(card->qdio.out_qs);
 	card->qdio.out_qs = NULL;
 out_freepool:
@@ -2562,10 +2567,8 @@
 	qeth_free_buffer_pool(card);
 	/* free outbound qdio_qs */
 	if (card->qdio.out_qs) {
-		for (i = 0; i < card->qdio.no_out_queues; ++i) {
-			qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
-			qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
-		}
+		for (i = 0; i < card->qdio.no_out_queues; i++)
+			qeth_free_output_queue(card->qdio.out_qs[i]);
 		kfree(card->qdio.out_qs);
 		card->qdio.out_qs = NULL;
 	}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 76b2fba..b7513c5 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -854,6 +854,8 @@
 
 	if (cgdev->state == CCWGROUP_ONLINE)
 		qeth_l2_set_offline(cgdev);
+
+	cancel_work_sync(&card->close_dev_work);
 	if (qeth_netdev_is_registered(card->dev))
 		unregister_netdev(card->dev);
 }
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index b7f6a83..7f71ca0 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2611,6 +2611,7 @@
 	if (cgdev->state == CCWGROUP_ONLINE)
 		qeth_l3_set_offline(cgdev);
 
+	cancel_work_sync(&card->close_dev_work);
 	if (qeth_netdev_is_registered(card->dev))
 		unregister_netdev(card->dev);
 	qeth_l3_clear_ip_htable(card, 0);
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 6be77b3..ac79f20 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -295,7 +295,7 @@
 	if(tpnt->sdev_attrs == NULL)
 		tpnt->sdev_attrs = NCR_700_dev_attrs;
 
-	memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
+	memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
 				 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
 	if(memory == NULL) {
 		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 6e1b022..3236240 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1304,8 +1304,9 @@
 				  ADD : DELETE;
 				break;
 			}
-			case AifBuManagerEvent:
-				aac_handle_aif_bu(dev, aifcmd);
+			break;
+		case AifBuManagerEvent:
+			aac_handle_aif_bu(dev, aifcmd);
 			break;
 		}
 
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 350257c..bc9f2a2 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -240,6 +240,7 @@
 		return NULL;
 	}
 
+	cmgr->hba = hba;
 	cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
 				  GFP_KERNEL);
 	if (!cmgr->free_list) {
@@ -256,7 +257,6 @@
 		goto mem_err;
 	}
 
-	cmgr->hba = hba;
 	cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
 
 	for (i = 0; i < arr_sz; i++)  {
@@ -295,7 +295,7 @@
 
 	/* Allocate pool of io_bdts - one for each bnx2fc_cmd */
 	mem_size = num_ios * sizeof(struct io_bdt *);
-	cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
+	cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
 	if (!cmgr->io_bdt_pool) {
 		printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
 		goto mem_err;
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
index 8a00403..9bd2bd8 100644
--- a/drivers/scsi/csiostor/csio_attr.c
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -594,12 +594,12 @@
 	}
 
 	fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+	ln->fc_vport = fc_vport;
 
 	if (csio_fcoe_alloc_vnp(hw, ln))
 		goto error;
 
 	*(struct csio_lnode **)fc_vport->dd_data = ln;
-	ln->fc_vport = fc_vport;
 	if (!fc_vport->node_name)
 		fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
 	if (!fc_vport->port_name)
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index be83590..ff943f4 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1726,14 +1726,14 @@
 	    fc_frame_payload_op(fp) != ELS_LS_ACC) {
 		FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
 		fc_lport_error(lport, fp);
-		goto err;
+		goto out;
 	}
 
 	flp = fc_frame_payload_get(fp, sizeof(*flp));
 	if (!flp) {
 		FC_LPORT_DBG(lport, "FLOGI bad response\n");
 		fc_lport_error(lport, fp);
-		goto err;
+		goto out;
 	}
 
 	mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1743,7 +1743,7 @@
 		FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
 			     "lport->mfs:%hu\n", mfs, lport->mfs);
 		fc_lport_error(lport, fp);
-		goto err;
+		goto out;
 	}
 
 	if (mfs <= lport->mfs) {
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 372387a..1797e47 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -184,7 +184,6 @@
 	struct fc_rport_priv *rdata;
 
 	rdata = container_of(kref, struct fc_rport_priv, kref);
-	WARN_ON(!list_empty(&rdata->peers));
 	kfree_rcu(rdata, rcu);
 }
 EXPORT_SYMBOL(fc_rport_destroy);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index fadc99c..a1551ab 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -829,6 +829,7 @@
 		rphy = sas_end_device_alloc(phy->port);
 		if (!rphy)
 			goto out_free;
+		rphy->identify.phy_identifier = phy_id;
 
 		child->rphy = rphy;
 		get_device(&rphy->dev);
@@ -856,6 +857,7 @@
 
 		child->rphy = rphy;
 		get_device(&rphy->dev);
+		rphy->identify.phy_identifier = phy_id;
 		sas_fill_in_rphy(child, rphy);
 
 		list_add_tail(&child->disco_list_node, &parent->port->disco_list);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 918ae18..ca62117 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -297,7 +297,8 @@
 			 lport);
 
 	/* release any threads waiting for the unreg to complete */
-	complete(&lport->lport_unreg_done);
+	if (lport->vport->localport)
+		complete(lport->lport_unreg_cmp);
 }
 
 /* lpfc_nvme_remoteport_delete
@@ -2556,7 +2557,8 @@
  */
 void
 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
-			   struct lpfc_nvme_lport *lport)
+			   struct lpfc_nvme_lport *lport,
+			   struct completion *lport_unreg_cmp)
 {
 #if (IS_ENABLED(CONFIG_NVME_FC))
 	u32 wait_tmo;
@@ -2568,8 +2570,7 @@
 	 */
 	wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
 	while (true) {
-		ret = wait_for_completion_timeout(&lport->lport_unreg_done,
-						  wait_tmo);
+		ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
 		if (unlikely(!ret)) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
 					 "6176 Lport %p Localport %p wait "
@@ -2603,12 +2604,12 @@
 	struct lpfc_nvme_lport *lport;
 	struct lpfc_nvme_ctrl_stat *cstat;
 	int ret;
+	DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
 
 	if (vport->nvmei_support == 0)
 		return;
 
 	localport = vport->localport;
-	vport->localport = NULL;
 	lport = (struct lpfc_nvme_lport *)localport->private;
 	cstat = lport->cstat;
 
@@ -2619,13 +2620,14 @@
 	/* lport's rport list is clear.  Unregister
 	 * lport and release resources.
 	 */
-	init_completion(&lport->lport_unreg_done);
+	lport->lport_unreg_cmp = &lport_unreg_cmp;
 	ret = nvme_fc_unregister_localport(localport);
 
 	/* Wait for completion.  This either blocks
 	 * indefinitely or succeeds
 	 */
-	lpfc_nvme_lport_unreg_wait(vport, lport);
+	lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
+	vport->localport = NULL;
 	kfree(cstat);
 
 	/* Regardless of the unregister upcall response, clear
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index cfd4719..b234d02 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -50,7 +50,7 @@
 /* Declare nvme-based local and remote port definitions. */
 struct lpfc_nvme_lport {
 	struct lpfc_vport *vport;
-	struct completion lport_unreg_done;
+	struct completion *lport_unreg_cmp;
 	/* Add stats counters here */
 	struct lpfc_nvme_ctrl_stat *cstat;
 	atomic_t fc4NvmeLsRequests;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index b766afe..e2575c8 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1003,7 +1003,8 @@
 	struct lpfc_nvmet_tgtport *tport = targetport->private;
 
 	/* release any threads waiting for the unreg to complete */
-	complete(&tport->tport_unreg_done);
+	if (tport->phba->targetport)
+		complete(tport->tport_unreg_cmp);
 }
 
 static void
@@ -1700,6 +1701,7 @@
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct lpfc_queue *wq;
 	uint32_t qidx;
+	DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
 
 	if (phba->nvmet_support == 0)
 		return;
@@ -1709,9 +1711,9 @@
 			wq = phba->sli4_hba.nvme_wq[qidx];
 			lpfc_nvmet_wqfull_flush(phba, wq, NULL);
 		}
-		init_completion(&tgtp->tport_unreg_done);
+		tgtp->tport_unreg_cmp = &tport_unreg_cmp;
 		nvmet_fc_unregister_targetport(phba->targetport);
-		wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
+		wait_for_completion_timeout(&tport_unreg_cmp, 5);
 		lpfc_nvmet_cleanup_io_context(phba);
 	}
 	phba->targetport = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 1aaff63..0ec1082 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -34,7 +34,7 @@
 /* Used for NVME Target */
 struct lpfc_nvmet_tgtport {
 	struct lpfc_hba *phba;
-	struct completion tport_unreg_done;
+	struct completion *tport_unreg_cmp;
 
 	/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
 	atomic_t rcv_ls_req_in;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 60bcc6d..65305b3 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -62,7 +62,7 @@
 
 /* make sure inq_product_rev string corresponds to this version */
 #define SDEBUG_VERSION "0188"	/* format to fit INQUIRY revision field */
-static const char *sdebug_version_date = "20180128";
+static const char *sdebug_version_date = "20190125";
 
 #define MY_NAME "scsi_debug"
 
@@ -735,7 +735,7 @@
 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
 }
 
-static void *fake_store(unsigned long long lba)
+static void *lba2fake_store(unsigned long long lba)
 {
 	lba = do_div(lba, sdebug_store_sectors);
 
@@ -2514,8 +2514,8 @@
 	return ret;
 }
 
-/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
- * arr into fake_store(lba,num) and return true. If comparison fails then
+/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
+ * arr into lba2fake_store(lba,num) and return true. If comparison fails then
  * return false. */
 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
 {
@@ -2643,7 +2643,7 @@
 		if (sdt->app_tag == cpu_to_be16(0xffff))
 			continue;
 
-		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
+		ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
 		if (ret) {
 			dif_errors++;
 			return ret;
@@ -3261,10 +3261,12 @@
 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
 			   u32 ei_lba, bool unmap, bool ndob)
 {
+	int ret;
 	unsigned long iflags;
 	unsigned long long i;
-	int ret;
-	u64 lba_off;
+	u32 lb_size = sdebug_sector_size;
+	u64 block, lbaa;
+	u8 *fs1p;
 
 	ret = check_device_access_params(scp, lba, num);
 	if (ret)
@@ -3276,31 +3278,30 @@
 		unmap_region(lba, num);
 		goto out;
 	}
-
-	lba_off = lba * sdebug_sector_size;
+	lbaa = lba;
+	block = do_div(lbaa, sdebug_store_sectors);
 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
+	fs1p = fake_storep + (block * lb_size);
 	if (ndob) {
-		memset(fake_storep + lba_off, 0, sdebug_sector_size);
+		memset(fs1p, 0, lb_size);
 		ret = 0;
 	} else
-		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
-					  sdebug_sector_size);
+		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
 
 	if (-1 == ret) {
 		write_unlock_irqrestore(&atomic_rw, iflags);
 		return DID_ERROR << 16;
-	} else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
+	} else if (sdebug_verbose && !ndob && (ret < lb_size))
 		sdev_printk(KERN_INFO, scp->device,
 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
-			    my_name, "write same",
-			    sdebug_sector_size, ret);
+			    my_name, "write same", lb_size, ret);
 
 	/* Copy first sector to remaining blocks */
-	for (i = 1 ; i < num ; i++)
-		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
-		       fake_storep + lba_off,
-		       sdebug_sector_size);
-
+	for (i = 1 ; i < num ; i++) {
+		lbaa = lba + i;
+		block = do_div(lbaa, sdebug_store_sectors);
+		memmove(fake_storep + (block * lb_size), fs1p, lb_size);
+	}
 	if (scsi_debug_lbp())
 		map_region(lba, num);
 out:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ffeac4b..c678bf9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -761,6 +761,7 @@
 		set_host_byte(cmd, DID_OK);
 		return BLK_STS_TARGET;
 	case DID_NEXUS_FAILURE:
+		set_host_byte(cmd, DID_OK);
 		return BLK_STS_NEXUS;
 	case DID_ALLOC_FAILURE:
 		set_host_byte(cmd, DID_OK);
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 8cc0151..a4ac607 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1081,18 +1081,19 @@
 static irqreturn_t portal_isr(int irq, void *ptr)
 {
 	struct qman_portal *p = ptr;
-
-	u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
 	u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+	u32 clear = 0;
 
 	if (unlikely(!is))
 		return IRQ_NONE;
 
 	/* DQRR-handling if it's interrupt-driven */
-	if (is & QM_PIRQ_DQRI)
+	if (is & QM_PIRQ_DQRI) {
 		__poll_portal_fast(p, QMAN_POLL_LIMIT);
+		clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
+	}
 	/* Handling of anything else that's interrupt-driven */
-	clear |= __poll_portal_slow(p, is);
+	clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
 	qm_out(&p->p, QM_REG_ISR, clear);
 	return IRQ_HANDLED;
 }
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 22bfc19..a867f71 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -68,6 +68,15 @@
         This information is exported to usespace via sysfs entries and userspace
         algorithms uses info and decide when to turn on/off the cpu cores.
 
+config MSM_QBT_HANDLER
+	bool "Event Handler for QTI Ultrasonic Fingerprint Sensor"
+	help
+	  This driver acts as a interrupt handler, where the interrupt is generated
+	  by the QTI Ultrasonic Fingerprint Sensor. It queues the events for each
+	  interrupt in an event queue and notifies the userspace to read the events
+	  from the queue. It also creates an input device to send key events such as
+	  KEY_POWER, KEY_HOME.
+
 config QCOM_GSBI
         tristate "QCOM General Serial Bus Interface"
         depends on ARCH_QCOM
@@ -586,6 +595,16 @@
 	  This enable the userspace clients to read and write to
 	  some glink packets channel.
 
+config QCOM_SMP2P_SLEEPSTATE
+	bool "SMP2P Sleepstate notifier"
+	depends on QCOM_SMP2P
+	help
+	  When this option is enabled, notifications are sent to remote procs
+	  for the power state changes on the local processor. The notifications
+	  are sent through the smp2p framework. This driver can also receive
+	  notifications from the remote to prevent suspend on the local
+	  processor.
+
 config QCOM_QDSS_BRIDGE
 	bool "Configure bridge driver for QTI/Qualcomm Technologies, Inc. MDM"
 	depends on MHI_BUS
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 43ca8fa..1dc9f25 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -65,7 +65,9 @@
 obj-$(CONFIG_QCOM_FSA4480_I2C) += fsa4480-i2c.o
 obj-$(CONFIG_QCOM_GLINK) += glink_probe.o
 obj-$(CONFIG_QCOM_GLINK_PKT) += glink_pkt.o
+obj-$(CONFIG_QCOM_SMP2P_SLEEPSTATE) += smp2p_sleepstate.o
 obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
+obj-$(CONFIG_MSM_QBT_HANDLER) += qbt_handler.o
 obj-$(CONFIG_QSEE_IPC_IRQ) += qsee_ipc_irq.o
 obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
 obj-$(CONFIG_QPNP_PBS) += qpnp-pbs.o
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 7101500..c074d79 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -145,6 +145,7 @@
 	uint32_t		nr_config[DCC_MAX_LINK_LIST];
 	uint8_t			curr_list;
 	uint8_t			cti_trig;
+	uint8_t			loopoff;
 };
 
 static bool dcc_ready(struct dcc_drvdata *drvdata)
@@ -250,7 +251,6 @@
 				/* write new offset = 1 to continue
 				 * processing the list
 				 */
-				link |= ((0x1 << 8) & BM(8, 14));
 				dcc_sram_writel(drvdata, link, sram_offset);
 				sram_offset += 4;
 				/* Reset link and prev_off */
@@ -283,7 +283,8 @@
 
 			if (loop_start) {
 				loop = (sram_offset - loop_off) / 4;
-				loop |= (loop_cnt << 13) & BM(13, 27);
+				loop |= (loop_cnt << drvdata->loopoff) &
+					BM(drvdata->loopoff, 27);
 				loop |= DCC_LOOP_DESCRIPTOR;
 				total_len += (total_len - loop_len) * loop_cnt;
 
@@ -315,7 +316,6 @@
 				/* write new offset = 1 to continue
 				 * processing the list
 				 */
-				link |= ((0x1 << 8) & BM(8, 14));
 				dcc_sram_writel(drvdata, link, sram_offset);
 				sram_offset += 4;
 				/* Reset link and prev_off */
@@ -1624,6 +1624,8 @@
 	if (ret)
 		return -EINVAL;
 
+	drvdata->loopoff = get_bitmask_order((drvdata->ram_size +
+				drvdata->ram_offset) / 4 - 1);
 	mutex_init(&drvdata->mutex);
 
 	for (i = 0; i < DCC_MAX_LINK_LIST; i++) {
diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c
index 0304fcc..8fae3f1 100644
--- a/drivers/soc/qcom/dfc_qmi.c
+++ b/drivers/soc/qcom/dfc_qmi.c
@@ -1,20 +1,25 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/rtnetlink.h>
 #include <net/pkt_sched.h>
 #include <linux/soc/qcom/qmi.h>
 #include <soc/qcom/rmnet_qmi.h>
+#include <soc/qcom/qmi_rmnet.h>
 
 #include "qmi_rmnet_i.h"
 #define CREATE_TRACE_POINTS
 #include <trace/events/dfc.h>
 
+#define DFC_MASK_TCP_BIDIR 0x1
+#define DFC_MASK_RAT_SWITCH 0x2
+#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
+#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
+
 #define DFC_IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)
 
-#define DFC_MAX_BEARERS_V01 16
 #define DFC_MAX_QOS_ID_V01 2
 
 #define DFC_ACK_TYPE_DISABLE 1
@@ -50,17 +55,20 @@
 	struct work_struct svc_arrive;
 	struct qmi_handle handle;
 	struct sockaddr_qrtr ssctl;
+	struct svc_info svc;
+	struct work_struct qmi_ind_work;
+	struct list_head qmi_ind_q;
+	spinlock_t qmi_ind_lock;
 	int index;
 	int restart_state;
 };
 
 static void dfc_svc_init(struct work_struct *work);
-static void dfc_do_burst_flow_control(struct work_struct *work);
 
 /* **************************************************** */
 #define DFC_SERVICE_ID_V01 0x4E
 #define DFC_SERVICE_VERS_V01 0x01
-#define DFC_TIMEOUT_MS 10000
+#define DFC_TIMEOUT_JF msecs_to_jiffies(1000)
 
 #define QMI_DFC_BIND_CLIENT_REQ_V01 0x0020
 #define QMI_DFC_BIND_CLIENT_RESP_V01 0x0020
@@ -75,6 +83,11 @@
 #define QMI_DFC_FLOW_STATUS_IND_V01 0x0022
 #define QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN 540
 
+#define QMI_DFC_GET_FLOW_STATUS_REQ_V01 0x0023
+#define QMI_DFC_GET_FLOW_STATUS_RESP_V01 0x0023
+#define QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN 20
+#define QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN 543
+
 struct dfc_bind_client_req_msg_v01 {
 	u8 ep_id_valid;
 	struct data_ep_id_type_v01 ep_id;
@@ -298,9 +311,21 @@
 	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
 };
 
+struct dfc_get_flow_status_req_msg_v01 {
+	u8 bearer_id_list_valid;
+	u8 bearer_id_list_len;
+	u8 bearer_id_list[DFC_MAX_BEARERS_V01];
+};
+
+struct dfc_get_flow_status_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 flow_status_valid;
+	u8 flow_status_len;
+	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
+};
+
 struct dfc_svc_ind {
-	struct work_struct work;
-	struct dfc_qmi_data *data;
+	struct list_head list;
 	struct dfc_flow_status_ind_msg_v01 dfc_info;
 };
 
@@ -497,6 +522,100 @@
 	},
 };
 
+static struct qmi_elem_info dfc_get_flow_status_req_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_req_msg_v01,
+					   bearer_id_list_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_req_msg_v01,
+					   bearer_id_list_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= DFC_MAX_BEARERS_V01,
+		.elem_size	= sizeof(u8),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_req_msg_v01,
+					   bearer_id_list),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_get_flow_status_resp_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_resp_msg_v01,
+					   resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_resp_msg_v01,
+					   flow_status_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_resp_msg_v01,
+					   flow_status_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= DFC_MAX_BEARERS_V01,
+		.elem_size	= sizeof(struct
+					 dfc_flow_status_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_resp_msg_v01,
+					   flow_status),
+		.ei_array	= dfc_flow_status_info_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
 static int
 dfc_bind_client_req(struct qmi_handle *dfc_handle,
 		    struct sockaddr_qrtr *ssctl, struct svc_info *svc)
@@ -538,7 +657,7 @@
 		goto out;
 	}
 
-	ret = qmi_txn_wait(&txn, DFC_TIMEOUT_MS);
+	ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
 	if (ret < 0) {
 		pr_err("%s() Response waiting failed, err: %d\n",
 			__func__, ret);
@@ -594,7 +713,7 @@
 		goto out;
 	}
 
-	ret = qmi_txn_wait(&txn, DFC_TIMEOUT_MS);
+	ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
 	if (ret < 0) {
 		pr_err("%s() Response waiting failed, err: %d\n",
 			__func__, ret);
@@ -610,12 +729,65 @@
 	return ret;
 }
 
-static int dfc_init_service(struct dfc_qmi_data *data, struct qmi_info *qmi)
+static int
+dfc_get_flow_status_req(struct qmi_handle *dfc_handle,
+			struct sockaddr_qrtr *ssctl,
+			struct dfc_get_flow_status_resp_msg_v01 *resp)
+{
+	struct dfc_get_flow_status_req_msg_v01 *req;
+	struct qmi_txn *txn;
+	int ret;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+
+	txn = kzalloc(sizeof(*txn), GFP_ATOMIC);
+	if (!txn) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	ret = qmi_txn_init(dfc_handle, txn,
+			   dfc_get_flow_status_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		pr_err("%s() Failed init for response, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	ret = qmi_send_request(dfc_handle, ssctl, txn,
+			       QMI_DFC_GET_FLOW_STATUS_REQ_V01,
+			       QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN,
+			       dfc_get_flow_status_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(txn);
+		pr_err("%s() Failed sending request, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(txn, DFC_TIMEOUT_JF);
+	if (ret < 0) {
+		pr_err("%s() Response waiting failed, err: %d\n",
+			__func__, ret);
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("%s() Request rejected, result: %d, err: %d\n",
+			__func__, resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+	}
+
+out:
+	kfree(txn);
+	kfree(req);
+	return ret;
+}
+
+static int dfc_init_service(struct dfc_qmi_data *data)
 {
 	int rc;
 
-	rc = dfc_bind_client_req(&data->handle, &data->ssctl,
-				 &qmi->fc_info[data->index].svc);
+	rc = dfc_bind_client_req(&data->handle, &data->ssctl, &data->svc);
 	if (rc < 0)
 		return rc;
 
@@ -666,21 +838,18 @@
 			       struct rmnet_bearer_map *bearer,
 			       struct qos_info *qos)
 {
-	struct list_head *p;
 	struct rmnet_flow_map *itm;
 	int rc = 0, qlen;
 	int enable;
 
 	enable = bearer->grant_size ? 1 : 0;
 
-	list_for_each(p, &qos->flow_head) {
-		itm = list_entry(p, struct rmnet_flow_map, list);
-
+	list_for_each_entry(itm, &qos->flow_head, list) {
 		if (itm->bearer_id == bearer->bearer_id) {
 			/*
 			 * Do not flow disable ancillary q if ancillary is true
 			 */
-			if (bearer->ancillary && enable == 0 &&
+			if (bearer->tcp_bidir && enable == 0 &&
 					DFC_IS_ANCILLARY(itm->ip_type))
 				continue;
 
@@ -705,36 +874,39 @@
 				struct qos_info *qos, u8 ack_req, u32 ancillary,
 				struct dfc_flow_status_info_type_v01 *fc_info)
 {
-	struct list_head *p;
-	struct rmnet_bearer_map *bearer_itm = NULL;
-	int enable;
+	struct rmnet_bearer_map *bearer_itm;
+	struct rmnet_flow_map *flow_itm;
+	int rc = 0, qlen;
+	bool enable;
 
-	list_for_each(p, &qos->bearer_head) {
-		bearer_itm = list_entry(p, struct rmnet_bearer_map, list);
+	enable = fc_info->num_bytes > 0 ? 1 : 0;
 
+	list_for_each_entry(bearer_itm, &qos->bearer_head, list) {
 		bearer_itm->grant_size = fc_info->num_bytes;
 		bearer_itm->grant_thresh =
 			qmi_rmnet_grant_per(bearer_itm->grant_size);
 		bearer_itm->seq = fc_info->seq_num;
 		bearer_itm->ack_req = ack_req;
-		bearer_itm->ancillary = ancillary;
+		bearer_itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
+		bearer_itm->last_grant = fc_info->num_bytes;
+		bearer_itm->last_seq = fc_info->seq_num;
 	}
 
-	enable = fc_info->num_bytes > 0 ? 1 : 0;
-
-	if (enable)
-		netif_tx_wake_all_queues(dev);
-	else
-		netif_tx_stop_all_queues(dev);
-
-	trace_dfc_qmi_tc(dev->name, 0xFF, 0, fc_info->num_bytes, 0, 0, enable);
+	list_for_each_entry(flow_itm, &qos->flow_head, list) {
+		qlen = qmi_rmnet_flow_control(dev, flow_itm->tcm_handle,
+					      enable);
+		trace_dfc_qmi_tc(dev->name, flow_itm->bearer_id,
+				 flow_itm->flow_id, fc_info->num_bytes,
+				 qlen, flow_itm->tcm_handle, enable);
+		rc++;
+	}
 
 	if (enable == 0 && ack_req)
 		dfc_send_ack(dev, fc_info->bearer_id,
 			     fc_info->seq_num, fc_info->mux_id,
 			     DFC_ACK_TYPE_DISABLE);
 
-	return 0;
+	return rc;
 }
 
 static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
@@ -743,33 +915,42 @@
 {
 	struct rmnet_bearer_map *itm = NULL;
 	int rc = 0;
-	int action = -1;
+	bool action = false;
 
 	itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
 	if (itm) {
-		if (itm->grant_size == 0 && fc_info->num_bytes > 0)
-			action = 1;
-		else if (itm->grant_size > 0 && fc_info->num_bytes == 0)
-			action = 0;
+		/* The RAT switch flag indicates the start and end of
+		 * the switch. Ignore indications in between.
+		 */
+		if (DFC_IS_RAT_SWITCH(ancillary))
+			itm->rat_switch = !fc_info->num_bytes;
+		else
+			if (itm->rat_switch)
+				return 0;
+
+		if ((itm->grant_size == 0 && fc_info->num_bytes > 0) ||
+		    (itm->grant_size > 0 && fc_info->num_bytes == 0))
+			action = true;
 
 		itm->grant_size = fc_info->num_bytes;
 		itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
 		itm->seq = fc_info->seq_num;
 		itm->ack_req = ack_req;
-		itm->ancillary = ancillary;
+		itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
+		itm->last_grant = fc_info->num_bytes;
+		itm->last_seq = fc_info->seq_num;
 
-		if (action != -1)
+		if (action)
 			rc = dfc_bearer_flow_ctl(dev, itm, qos);
 	} else {
-		pr_debug("grant %u before flow activate\n", fc_info->num_bytes);
 		qos->default_grant = fc_info->num_bytes;
 	}
 	return rc;
 }
 
-static void dfc_do_burst_flow_control(struct work_struct *work)
+static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
+				      struct dfc_svc_ind *svc_ind)
 {
-	struct dfc_svc_ind *svc_ind = (struct dfc_svc_ind *)work;
 	struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->dfc_info;
 	struct net_device *dev;
 	struct qos_info *qos;
@@ -779,11 +960,6 @@
 	u32 ancillary;
 	int i, j;
 
-	if (unlikely(svc_ind->data->restart_state)) {
-		kfree(svc_ind);
-		return;
-	}
-
 	rcu_read_lock();
 
 	for (i = 0; i < ind->flow_status_len; i++) {
@@ -801,7 +977,7 @@
 			}
 		}
 
-		trace_dfc_flow_ind(svc_ind->data->index,
+		trace_dfc_flow_ind(dfc->index,
 				   i, flow_status->mux_id,
 				   flow_status->bearer_id,
 				   flow_status->num_bytes,
@@ -809,7 +985,7 @@
 				   ack_req,
 				   ancillary);
 
-		dev = rmnet_get_rmnet_dev(svc_ind->data->rmnet_port,
+		dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
 					  flow_status->mux_id);
 		if (!dev)
 			goto clean_out;
@@ -832,7 +1008,38 @@
 
 clean_out:
 	rcu_read_unlock();
-	kfree(svc_ind);
+}
+
+static void dfc_qmi_ind_work(struct work_struct *work)
+{
+	struct dfc_qmi_data *dfc = container_of(work, struct dfc_qmi_data,
+						qmi_ind_work);
+	struct dfc_svc_ind *svc_ind;
+	unsigned long flags;
+
+	if (!dfc)
+		return;
+
+	local_bh_disable();
+
+	do {
+		spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
+		svc_ind = list_first_entry_or_null(&dfc->qmi_ind_q,
+						   struct dfc_svc_ind, list);
+		if (svc_ind)
+			list_del(&svc_ind->list);
+		spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
+
+		if (svc_ind) {
+			if (!dfc->restart_state)
+				dfc_do_burst_flow_control(dfc, svc_ind);
+			kfree(svc_ind);
+		}
+	} while (svc_ind != NULL);
+
+	local_bh_enable();
+
+	qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
 }
 
 static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
@@ -842,6 +1049,7 @@
 						handle);
 	struct dfc_flow_status_ind_msg_v01 *ind_msg;
 	struct dfc_svc_ind *svc_ind;
+	unsigned long flags;
 
 	if (qmi != &dfc->handle)
 		return;
@@ -858,13 +1066,13 @@
 		if (!svc_ind)
 			return;
 
-		INIT_WORK((struct work_struct *)svc_ind,
-			  dfc_do_burst_flow_control);
-
 		memcpy(&svc_ind->dfc_info, ind_msg, sizeof(*ind_msg));
-		svc_ind->data = dfc;
 
-		queue_work(dfc->dfc_wq, (struct work_struct *)svc_ind);
+		spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
+		list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
+		spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
+
+		queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
 	}
 }
 
@@ -875,25 +1083,32 @@
 						 svc_arrive);
 	struct qmi_info *qmi;
 
+	if (data->restart_state == 1)
+		return;
+
+	rc = dfc_init_service(data);
+	if (rc < 0) {
+		pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
+		return;
+	}
+
+	rtnl_lock();
 	qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
-	if (!qmi)
-		goto clean_out;
+	if (!qmi) {
+		rtnl_unlock();
+		return;
+	}
 
-	rc = dfc_init_service(data, qmi);
-	if (rc < 0)
-		goto clean_out;
-
-	qmi->fc_info[data->index].dfc_client = (void *)data;
+	qmi->dfc_pending[data->index] = NULL;
+	qmi->dfc_clients[data->index] = (void *)data;
 	trace_dfc_client_state_up(data->index,
-				  qmi->fc_info[data->index].svc.instance,
-				  qmi->fc_info[data->index].svc.ep_type,
-				  qmi->fc_info[data->index].svc.iface_id);
-	return;
+				  data->svc.instance,
+				  data->svc.ep_type,
+				  data->svc.iface_id);
 
-clean_out:
-	qmi_handle_release(&data->handle);
-	destroy_workqueue(data->dfc_wq);
-	kfree(data);
+	rtnl_unlock();
+
+	pr_info("Connection established with the DFC Service\n");
 }
 
 static int dfc_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
@@ -935,11 +1150,15 @@
 	{},
 };
 
-int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi)
+int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
+			struct qmi_info *qmi)
 {
 	struct dfc_qmi_data *data;
 	int rc = -ENOMEM;
 
+	if (!port || !qmi)
+		return -EINVAL;
+
 	data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
@@ -947,6 +1166,11 @@
 	data->rmnet_port = port;
 	data->index = index;
 	data->restart_state = 0;
+	memcpy(&data->svc, psvc, sizeof(data->svc));
+
+	INIT_WORK(&data->qmi_ind_work, dfc_qmi_ind_work);
+	INIT_LIST_HEAD(&data->qmi_ind_q);
+	spin_lock_init(&data->qmi_ind_lock);
 
 	data->dfc_wq = create_singlethread_workqueue("dfc_wq");
 	if (!data->dfc_wq) {
@@ -956,7 +1180,7 @@
 
 	INIT_WORK(&data->svc_arrive, dfc_svc_init);
 	rc = qmi_handle_init(&data->handle,
-			     QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN,
+			     QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN,
 			     &server_ops, qmi_indication_handler);
 	if (rc < 0) {
 		pr_err("%s: failed qmi_handle_init - rc[%d]\n", __func__, rc);
@@ -965,12 +1189,14 @@
 
 	rc = qmi_add_lookup(&data->handle, DFC_SERVICE_ID_V01,
 			    DFC_SERVICE_VERS_V01,
-			    qmi->fc_info[index].svc.instance);
+			    psvc->instance);
 	if (rc < 0) {
 		pr_err("%s: failed qmi_add_lookup - rc[%d]\n", __func__, rc);
 		goto err2;
 	}
 
+	qmi->dfc_pending[index] = (void *)data;
+
 	return 0;
 
 err2:
@@ -1049,8 +1275,45 @@
 	int i;
 
 	for (i = 0; i < MAX_CLIENT_NUM; i++) {
-		dfc_data = (struct dfc_qmi_data *)(qmi->fc_info[i].dfc_client);
+		dfc_data = (struct dfc_qmi_data *)(qmi->dfc_clients[i]);
 		if (dfc_data)
 			flush_workqueue(dfc_data->dfc_wq);
 	}
 }
+
+void dfc_qmi_query_flow(void *dfc_data)
+{
+	struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
+	struct dfc_get_flow_status_resp_msg_v01 *resp;
+	struct dfc_svc_ind *svc_ind;
+	int rc;
+
+	resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
+	if (!resp)
+		return;
+
+	svc_ind = kzalloc(sizeof(*svc_ind), GFP_ATOMIC);
+	if (!svc_ind) {
+		kfree(resp);
+		return;
+	}
+
+	if (!data)
+		goto done;
+
+	rc = dfc_get_flow_status_req(&data->handle, &data->ssctl, resp);
+
+	if (rc < 0 || !resp->flow_status_valid || resp->flow_status_len < 1 ||
+	    resp->flow_status_len > DFC_MAX_BEARERS_V01)
+		goto done;
+
+	svc_ind->dfc_info.flow_status_valid = resp->flow_status_valid;
+	svc_ind->dfc_info.flow_status_len = resp->flow_status_len;
+	memcpy(&svc_ind->dfc_info.flow_status, resp->flow_status,
+		sizeof(resp->flow_status[0]) * resp->flow_status_len);
+	dfc_do_burst_flow_control(data, svc_ind);
+
+done:
+	kfree(svc_ind);
+	kfree(resp);
+}
diff --git a/drivers/soc/qcom/msm_bus/Makefile b/drivers/soc/qcom/msm_bus/Makefile
index a217fd4..a764258 100644
--- a/drivers/soc/qcom/msm_bus/Makefile
+++ b/drivers/soc/qcom/msm_bus/Makefile
@@ -8,7 +8,7 @@
 
 ifdef CONFIG_QCOM_BUS_CONFIG_RPMH
 	obj-y += msm_bus_fabric_rpmh.o msm_bus_arb_rpmh.o msm_bus_rules.o \
-		msm_bus_bimc_rpmh.o msm_bus_noc_rpmh.o
+		msm_bus_bimc_rpmh.o msm_bus_noc_rpmh.o msm_bus_proxy_client.o
 	obj-$(CONFIG_OF) += msm_bus_of_rpmh.o
 else
 	obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o msm_bus_rules.o \
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_proxy_client.c b/drivers/soc/qcom/msm_bus/msm_bus_proxy_client.c
new file mode 100644
index 0000000..d3ca18f
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_proxy_client.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/msm-bus.h>
+
+struct proxy_client {
+	struct msm_bus_scale_pdata *pdata;
+	unsigned int client_handle;
+};
+
+static struct proxy_client proxy_client_info;
+
+static int msm_bus_device_proxy_client_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	proxy_client_info.pdata = msm_bus_cl_get_pdata(pdev);
+
+	if (!proxy_client_info.pdata)
+		return 0;
+
+	proxy_client_info.client_handle =
+		msm_bus_scale_register_client(proxy_client_info.pdata);
+
+	if (!proxy_client_info.client_handle) {
+		dev_err(&pdev->dev, "Unable to register bus client\n");
+		return -ENODEV;
+	}
+
+	ret = msm_bus_scale_client_update_request(
+					proxy_client_info.client_handle, 1);
+	if (ret)
+		dev_err(&pdev->dev, "Bandwidth update failed (%d)\n", ret);
+
+	return ret;
+}
+
+static const struct of_device_id proxy_client_match[] = {
+	{.compatible = "qcom,bus-proxy-client"},
+	{}
+};
+
+static struct platform_driver msm_bus_proxy_client_driver = {
+	.probe = msm_bus_device_proxy_client_probe,
+	.driver = {
+		.name = "msm_bus_proxy_client_device",
+		.of_match_table = proxy_client_match,
+	},
+};
+
+static int __init msm_bus_proxy_client_init_driver(void)
+{
+	int rc;
+
+	rc =  platform_driver_register(&msm_bus_proxy_client_driver);
+	if (rc) {
+		pr_err("Failed to register proxy client device driver\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __init msm_bus_proxy_client_unvote(void)
+{
+	int ret;
+
+	if (!proxy_client_info.pdata || !proxy_client_info.client_handle)
+		return 0;
+
+	ret = msm_bus_scale_client_update_request(
+					proxy_client_info.client_handle, 0);
+	if (ret)
+		pr_err("%s: bandwidth update request failed (%d)\n",
+			__func__, ret);
+
+	msm_bus_scale_unregister_client(proxy_client_info.client_handle);
+
+	return 0;
+}
+
+subsys_initcall_sync(msm_bus_proxy_client_init_driver);
+late_initcall_sync(msm_bus_proxy_client_unvote);
diff --git a/drivers/soc/qcom/qbt_handler.c b/drivers/soc/qcom/qbt_handler.c
new file mode 100644
index 0000000..0c93e1b
--- /dev/null
+++ b/drivers/soc/qcom/qbt_handler.c
@@ -0,0 +1,959 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#define DEBUG
+#define pr_fmt(fmt) "qbt:%s: " fmt, __func__
+
+#include <linux/input.h>
+#include <linux/ktime.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <linux/of_gpio.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <uapi/linux/qbt_handler.h>
+
+#define QBT_DEV "qbt"
+#define MAX_FW_EVENTS 128
+#define MINOR_NUM_FD 0
+#define MINOR_NUM_IPC 1
+#define QBT_INPUT_DEV_NAME "qbt_key_input"
+#define QBT_INPUT_DEV_VERSION 0x0100
+
+struct finger_detect_gpio {
+	int gpio;
+	int active_low;
+	int irq;
+	struct work_struct work;
+	int last_gpio_state;
+	int event_reported;
+	bool irq_enabled;
+};
+
+struct fw_event_desc {
+	enum qbt_fw_event ev;
+};
+
+struct fw_ipc_info {
+	int gpio;
+	int irq;
+	bool irq_enabled;
+	struct work_struct work;
+};
+
+struct qbt_drvdata {
+	struct class	*qbt_class;
+	struct cdev	qbt_fd_cdev;
+	struct cdev	qbt_ipc_cdev;
+	struct input_dev	*in_dev;
+	struct device	*dev;
+	char		*qbt_node;
+	atomic_t	fd_available;
+	atomic_t	ipc_available;
+	struct mutex	mutex;
+	struct mutex	fd_events_mutex;
+	struct mutex	ipc_events_mutex;
+	struct fw_ipc_info	fw_ipc;
+	struct finger_detect_gpio fd_gpio;
+	DECLARE_KFIFO(fd_events, struct fw_event_desc, MAX_FW_EVENTS);
+	DECLARE_KFIFO(ipc_events, struct fw_event_desc, MAX_FW_EVENTS);
+	wait_queue_head_t read_wait_queue_fd;
+	wait_queue_head_t read_wait_queue_ipc;
+	bool is_wuhb_connected;
+};
+
+/**
+ * qbt_open() - Function called when user space opens device.
+ * Successful if driver not currently open.
+ * @inode:	ptr to inode object
+ * @file:	ptr to file object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_open(struct inode *inode, struct file *file)
+{
+	struct qbt_drvdata *drvdata = NULL;
+	int rc = 0;
+	int minor_no = -1;
+
+	if (!inode || !inode->i_cdev || !file) {
+		pr_err("NULL pointer passed\n");
+		return -EINVAL;
+	}
+	minor_no = iminor(inode);
+	if (minor_no == MINOR_NUM_FD) {
+		drvdata = container_of(inode->i_cdev,
+				struct qbt_drvdata, qbt_fd_cdev);
+	} else if (minor_no == MINOR_NUM_IPC) {
+		drvdata = container_of(inode->i_cdev,
+				struct qbt_drvdata, qbt_ipc_cdev);
+	} else {
+		pr_err("Invalid minor number\n");
+		return -EINVAL;
+	}
+
+	file->private_data = drvdata;
+
+	pr_debug("entry minor_no=%d\n", minor_no);
+
+	/* disallowing concurrent opens */
+	if (minor_no == MINOR_NUM_FD &&
+			!atomic_dec_and_test(&drvdata->fd_available)) {
+		atomic_inc(&drvdata->fd_available);
+		rc = -EBUSY;
+	} else if (minor_no == MINOR_NUM_IPC &&
+			!atomic_dec_and_test(&drvdata->ipc_available)) {
+		atomic_inc(&drvdata->ipc_available);
+		rc = -EBUSY;
+	}
+
+	pr_debug("exit : %d\n", rc);
+	return rc;
+}
+
+/**
+ * qbt_release() - Function called when user space closes device.
+
+ * @inode:	ptr to inode object
+ * @file:	ptr to file object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_release(struct inode *inode, struct file *file)
+{
+	struct qbt_drvdata *drvdata;
+	int minor_no = -1;
+
+	if (!file || !file->private_data || !inode) {
+		pr_err("NULL pointer passed\n");
+		return -EINVAL;
+	}
+	drvdata = file->private_data;
+	minor_no = iminor(inode);
+	if (minor_no == MINOR_NUM_FD) {
+		atomic_inc(&drvdata->fd_available);
+	} else if (minor_no == MINOR_NUM_IPC) {
+		atomic_inc(&drvdata->ipc_available);
+	} else {
+		pr_err("Invalid minor number\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * qbt_ioctl() - Function called when user space calls ioctl.
+ * @file:	struct file - not used
+ * @cmd:	cmd identifier such as QBT_IS_WUHB_CONNECTED
+ * @arg:	ptr to relevant structe: either qbt_app or
+ *              qbt_send_tz_cmd depending on which cmd is passed
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static long qbt_ioctl(
+		struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int rc = 0;
+	void __user *priv_arg = (void __user *)arg;
+	struct qbt_drvdata *drvdata;
+
+	if (!file || !file->private_data) {
+		pr_err("NULL pointer passed\n");
+		return -EINVAL;
+	}
+
+	drvdata = file->private_data;
+
+	if (IS_ERR(priv_arg)) {
+		dev_err(drvdata->dev, "%s: invalid user space pointer %lu\n",
+			__func__, arg);
+		return -EINVAL;
+	}
+
+	mutex_lock(&drvdata->mutex);
+
+	pr_debug("cmd received %d\n", cmd);
+
+	switch (cmd) {
+	case QBT_ENABLE_IPC:
+	{
+		if (!drvdata->fw_ipc.irq_enabled) {
+			enable_irq(drvdata->fw_ipc.irq);
+			drvdata->fw_ipc.irq_enabled = true;
+			pr_debug("%s: QBT_ENABLE_IPC\n", __func__);
+		}
+		break;
+	}
+	case QBT_DISABLE_IPC:
+	{
+		if (drvdata->fw_ipc.irq_enabled) {
+			disable_irq(drvdata->fw_ipc.irq);
+			drvdata->fw_ipc.irq_enabled = false;
+			pr_debug("%s: QBT_DISABLE_IPC\n", __func__);
+		}
+		break;
+	}
+	case QBT_ENABLE_FD:
+	{
+		if (drvdata->is_wuhb_connected &&
+				!drvdata->fd_gpio.irq_enabled) {
+			enable_irq(drvdata->fd_gpio.irq);
+			drvdata->fd_gpio.irq_enabled = true;
+			pr_debug("%s: QBT_ENABLE_FD\n", __func__);
+		}
+		break;
+	}
+	case QBT_DISABLE_FD:
+	{
+		if (drvdata->is_wuhb_connected &&
+				drvdata->fd_gpio.irq_enabled) {
+			disable_irq(drvdata->fd_gpio.irq);
+			drvdata->fd_gpio.irq_enabled = false;
+			pr_debug("%s: QBT_DISABLE_FD\n", __func__);
+		}
+		break;
+	}
+	case QBT_IS_WUHB_CONNECTED:
+	{
+		struct qbt_wuhb_connected_status wuhb_connected_status;
+
+		wuhb_connected_status.is_wuhb_connected =
+				drvdata->is_wuhb_connected;
+		rc = copy_to_user((void __user *)priv_arg,
+				&wuhb_connected_status,
+				sizeof(wuhb_connected_status));
+
+		if (rc != 0) {
+			pr_err("Failed to copy wuhb connected status: %d\n",
+					rc);
+			rc = -EFAULT;
+			goto end;
+		}
+
+		break;
+	}
+	case QBT_SEND_KEY_EVENT:
+	{
+		struct qbt_key_event key_event;
+
+		if (copy_from_user(&key_event, priv_arg,
+			sizeof(key_event))
+				!= 0) {
+			rc = -EFAULT;
+			pr_err("failed copy from user space %d\n", rc);
+			goto end;
+		}
+
+		input_event(drvdata->in_dev, EV_KEY,
+				key_event.key, key_event.value);
+		input_sync(drvdata->in_dev);
+		break;
+	}
+	default:
+		pr_err("invalid cmd %d\n", cmd);
+		rc = -ENOIOCTLCMD;
+		goto end;
+	}
+
+end:
+	mutex_unlock(&drvdata->mutex);
+	return rc;
+}
+
+static int get_events_fifo_len_locked(
+		struct qbt_drvdata *drvdata, int minor_no)
+{
+	int len = 0;
+
+	if (minor_no == MINOR_NUM_FD) {
+		mutex_lock(&drvdata->fd_events_mutex);
+		len = kfifo_len(&drvdata->fd_events);
+		mutex_unlock(&drvdata->fd_events_mutex);
+	} else if (minor_no == MINOR_NUM_IPC) {
+		mutex_lock(&drvdata->ipc_events_mutex);
+		len = kfifo_len(&drvdata->ipc_events);
+		mutex_unlock(&drvdata->ipc_events_mutex);
+	}
+
+	return len;
+}
+
+static ssize_t qbt_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	struct fw_event_desc fw_event;
+	struct qbt_drvdata *drvdata;
+	wait_queue_head_t *read_wait_queue = NULL;
+	int rc = 0;
+	int minor_no = -1;
+	int fifo_len;
+
+	pr_debug("entry with numBytes = %zd, minor_no = %d\n", cnt, minor_no);
+
+	if (!filp || !filp->private_data) {
+		pr_err("NULL pointer passed\n");
+		return -EINVAL;
+	}
+	drvdata = filp->private_data;
+
+	if (cnt < sizeof(fw_event.ev)) {
+		pr_err("Num bytes to read is too small\n");
+		return -EINVAL;
+	}
+
+	minor_no = iminor(filp->f_path.dentry->d_inode);
+	if (minor_no == MINOR_NUM_FD) {
+		read_wait_queue = &drvdata->read_wait_queue_fd;
+	} else if (minor_no == MINOR_NUM_IPC) {
+		read_wait_queue = &drvdata->read_wait_queue_ipc;
+	} else {
+		pr_err("Invalid minor number\n");
+		return -EINVAL;
+	}
+
+	fifo_len = get_events_fifo_len_locked(drvdata, minor_no);
+	while (fifo_len == 0) {
+		if (filp->f_flags & O_NONBLOCK) {
+			pr_debug("fw_events fifo: empty, returning\n");
+			return -EAGAIN;
+		}
+		pr_debug("fw_events fifo: empty, waiting\n");
+		if (wait_event_interruptible(*read_wait_queue,
+				(get_events_fifo_len_locked(
+				drvdata, minor_no) > 0)))
+			return -ERESTARTSYS;
+		fifo_len = get_events_fifo_len_locked(drvdata, minor_no);
+	}
+
+	if (minor_no == MINOR_NUM_FD) {
+		mutex_lock(&drvdata->fd_events_mutex);
+		rc = kfifo_get(&drvdata->fd_events, &fw_event);
+		mutex_unlock(&drvdata->fd_events_mutex);
+	} else if (minor_no == MINOR_NUM_IPC) {
+		mutex_lock(&drvdata->ipc_events_mutex);
+		rc = kfifo_get(&drvdata->ipc_events, &fw_event);
+		mutex_unlock(&drvdata->ipc_events_mutex);
+	} else {
+		pr_err("Invalid minor number\n");
+	}
+
+	if (!rc) {
+		pr_err("fw_events fifo: unexpectedly empty\n");
+		return -EINVAL;
+	}
+
+	pr_debug("Firmware event %d at minor no %d read at time %lu uS\n",
+			(int)fw_event.ev, minor_no,
+			(unsigned long)ktime_to_us(ktime_get()));
+	return copy_to_user(ubuf, &fw_event.ev, sizeof(fw_event.ev));
+}
+
+static unsigned int qbt_poll(struct file *filp,
+	struct poll_table_struct *wait)
+{
+	struct qbt_drvdata *drvdata;
+	unsigned int mask = 0;
+	int minor_no = -1;
+
+	if (!filp || !filp->private_data) {
+		pr_err("NULL pointer passed\n");
+		return -EINVAL;
+	}
+	drvdata = filp->private_data;
+
+	minor_no = iminor(filp->f_path.dentry->d_inode);
+	if (minor_no == MINOR_NUM_FD) {
+		poll_wait(filp, &drvdata->read_wait_queue_fd, wait);
+		if (kfifo_len(&drvdata->fd_events) > 0)
+			mask |= (POLLIN | POLLRDNORM);
+	} else if (minor_no == MINOR_NUM_IPC) {
+		poll_wait(filp, &drvdata->read_wait_queue_ipc, wait);
+		if (kfifo_len(&drvdata->ipc_events) > 0)
+			mask |= (POLLIN | POLLRDNORM);
+	} else {
+		pr_err("Invalid minor number\n");
+		return -EINVAL;
+	}
+
+	return mask;
+}
+
+static const struct file_operations qbt_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qbt_ioctl,
+	.open = qbt_open,
+	.release = qbt_release,
+	.read = qbt_read,
+	.poll = qbt_poll
+};
+
+static int qbt_dev_register(struct qbt_drvdata *drvdata)
+{
+	dev_t dev_no, major_no;
+	int ret = 0;
+	size_t node_size;
+	char *node_name = QBT_DEV;
+	struct device *dev = drvdata->dev;
+	struct device *device;
+
+	node_size = strlen(node_name) + 1;
+
+	drvdata->qbt_node = devm_kzalloc(dev, node_size, GFP_KERNEL);
+	if (!drvdata->qbt_node) {
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+
+	strlcpy(drvdata->qbt_node, node_name, node_size);
+
+	ret = alloc_chrdev_region(&dev_no, 0, 2, drvdata->qbt_node);
+	if (ret) {
+		pr_err("alloc_chrdev_region failed %d\n", ret);
+		goto err_alloc;
+	}
+	major_no = MAJOR(dev_no);
+
+	cdev_init(&drvdata->qbt_fd_cdev, &qbt_fops);
+
+	drvdata->qbt_fd_cdev.owner = THIS_MODULE;
+	ret = cdev_add(&drvdata->qbt_fd_cdev,
+			MKDEV(major_no, MINOR_NUM_FD), 1);
+	if (ret) {
+		pr_err("cdev_add failed for fd %d\n", ret);
+		goto err_cdev_add;
+	}
+	cdev_init(&drvdata->qbt_ipc_cdev, &qbt_fops);
+
+	drvdata->qbt_ipc_cdev.owner = THIS_MODULE;
+	ret = cdev_add(&drvdata->qbt_ipc_cdev,
+			MKDEV(major_no, MINOR_NUM_IPC), 1);
+	if (ret) {
+		pr_err("cdev_add failed for ipc %d\n", ret);
+		goto err_cdev_add;
+	}
+
+	drvdata->qbt_class = class_create(THIS_MODULE,
+					   drvdata->qbt_node);
+	if (IS_ERR(drvdata->qbt_class)) {
+		ret = PTR_ERR(drvdata->qbt_class);
+		pr_err("class_create failed %d\n", ret);
+		goto err_class_create;
+	}
+
+	device = device_create(drvdata->qbt_class, NULL,
+			       drvdata->qbt_fd_cdev.dev, drvdata,
+			       "%s_fd", drvdata->qbt_node);
+	if (IS_ERR(device)) {
+		ret = PTR_ERR(device);
+		pr_err("fd device_create failed %d\n", ret);
+		goto err_dev_create;
+	}
+
+	device = device_create(drvdata->qbt_class, NULL,
+				drvdata->qbt_ipc_cdev.dev, drvdata,
+				"%s_ipc", drvdata->qbt_node);
+	if (IS_ERR(device)) {
+		ret = PTR_ERR(device);
+		pr_err("ipc device_create failed %d\n", ret);
+		goto err_dev_create;
+	}
+
+	return 0;
+err_dev_create:
+	class_destroy(drvdata->qbt_class);
+err_class_create:
+	cdev_del(&drvdata->qbt_fd_cdev);
+	cdev_del(&drvdata->qbt_ipc_cdev);
+err_cdev_add:
+	unregister_chrdev_region(drvdata->qbt_fd_cdev.dev, 1);
+	unregister_chrdev_region(drvdata->qbt_ipc_cdev.dev, 1);
+err_alloc:
+	return ret;
+}
+
+/**
+ * qbt1000_create_input_device() - Function allocates an input
+ * device, configures it for key events and registers it
+ *
+ * @drvdata:	ptr to driver data
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_create_input_device(struct qbt_drvdata *drvdata)
+{
+	int rc = 0;
+
+	drvdata->in_dev = input_allocate_device();
+	if (drvdata->in_dev == NULL) {
+		dev_err(drvdata->dev, "%s: input_allocate_device() failed\n",
+			__func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	drvdata->in_dev->name = QBT_INPUT_DEV_NAME;
+	drvdata->in_dev->phys = NULL;
+	drvdata->in_dev->id.bustype = BUS_HOST;
+	drvdata->in_dev->id.vendor  = 0x0001;
+	drvdata->in_dev->id.product = 0x0001;
+	drvdata->in_dev->id.version = QBT_INPUT_DEV_VERSION;
+
+	drvdata->in_dev->evbit[0] = BIT_MASK(EV_KEY) |  BIT_MASK(EV_ABS);
+	drvdata->in_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+	drvdata->in_dev->keybit[BIT_WORD(KEY_HOMEPAGE)] |=
+		BIT_MASK(KEY_HOMEPAGE);
+	drvdata->in_dev->keybit[BIT_WORD(KEY_VOLUMEDOWN)] |=
+		BIT_MASK(KEY_VOLUMEDOWN);
+	drvdata->in_dev->keybit[BIT_WORD(KEY_POWER)] |=
+		BIT_MASK(KEY_POWER);
+
+	input_set_abs_params(drvdata->in_dev, ABS_X,
+			     0,
+			     1000,
+			     0, 0);
+	input_set_abs_params(drvdata->in_dev, ABS_Y,
+			     0,
+			     1000,
+			     0, 0);
+
+	rc = input_register_device(drvdata->in_dev);
+	if (rc) {
+		dev_err(drvdata->dev, "%s: input_reg_dev() failed %d\n",
+			__func__, rc);
+		goto end;
+	}
+
+end:
+	if (rc)
+		input_free_device(drvdata->in_dev);
+	return rc;
+}
+
+static void qbt_fd_report_event(struct qbt_drvdata *drvdata, int state)
+{
+	struct fw_event_desc fw_event;
+
+	if (!drvdata->is_wuhb_connected) {
+		pr_err("Skipping as WUHB_INT is disconnected\n");
+		return;
+	}
+
+	if (drvdata->fd_gpio.event_reported
+			&& state == drvdata->fd_gpio.last_gpio_state)
+		return;
+
+	pr_debug("gpio %d: report state %d current_time %lu uS\n",
+		drvdata->fd_gpio.gpio, state,
+		(unsigned long)ktime_to_us(ktime_get()));
+
+	drvdata->fd_gpio.event_reported = 1;
+	drvdata->fd_gpio.last_gpio_state = state;
+
+	fw_event.ev = (state ? FW_EVENT_FINGER_DOWN : FW_EVENT_FINGER_UP);
+
+	mutex_lock(&drvdata->fd_events_mutex);
+
+	kfifo_reset(&drvdata->fd_events);
+
+	if (!kfifo_put(&drvdata->fd_events, fw_event)) {
+		pr_err("FD events fifo: error adding item\n");
+	} else {
+		pr_debug("FD event %d queued at time %lu uS\n", fw_event.ev,
+				(unsigned long)ktime_to_us(ktime_get()));
+	}
+	mutex_unlock(&drvdata->fd_events_mutex);
+	wake_up_interruptible(&drvdata->read_wait_queue_fd);
+}
+
+static void qbt_gpio_work_func(struct work_struct *work)
+{
+	int state;
+	struct qbt_drvdata *drvdata;
+
+	if (!work) {
+		pr_err("NULL pointer passed\n");
+		return;
+	}
+
+	drvdata = container_of(work, struct qbt_drvdata, fd_gpio.work);
+
+	state = (__gpio_get_value(drvdata->fd_gpio.gpio) ? 1 : 0)
+			^ drvdata->fd_gpio.active_low;
+
+	qbt_fd_report_event(drvdata, state);
+
+	pm_relax(drvdata->dev);
+}
+
+static irqreturn_t qbt_gpio_isr(int irq, void *dev_id)
+{
+	struct qbt_drvdata *drvdata = dev_id;
+
+	if (!drvdata) {
+		pr_err("NULL pointer passed\n");
+		return IRQ_HANDLED;
+	}
+
+	if (irq != drvdata->fd_gpio.irq) {
+		pr_warn("invalid irq %d (expected %d)\n",
+			irq, drvdata->fd_gpio.irq);
+		return IRQ_HANDLED;
+	}
+
+	pr_debug("FD event received at time %lu uS\n",
+			(unsigned long)ktime_to_us(ktime_get()));
+
+	pm_stay_awake(drvdata->dev);
+	schedule_work(&drvdata->fd_gpio.work);
+
+	return IRQ_HANDLED;
+}
+
+static void qbt_irq_report_event(struct work_struct *work)
+{
+	struct qbt_drvdata *drvdata;
+	struct fw_event_desc fw_ev_des;
+
+	if (!work) {
+		pr_err("NULL pointer passed\n");
+		return;
+	}
+	drvdata = container_of(work, struct qbt_drvdata, fw_ipc.work);
+
+	fw_ev_des.ev = FW_EVENT_IPC;
+	mutex_lock(&drvdata->ipc_events_mutex);
+	if (!kfifo_put(&drvdata->ipc_events, fw_ev_des)) {
+		pr_err("ipc events: fifo full, drop event %d\n",
+				(int) fw_ev_des.ev);
+	} else {
+		pr_debug("IPC event %d queued at time %lu uS\n", fw_ev_des.ev,
+				(unsigned long)ktime_to_us(ktime_get()));
+	}
+	mutex_unlock(&drvdata->ipc_events_mutex);
+	wake_up_interruptible(&drvdata->read_wait_queue_ipc);
+	pm_relax(drvdata->dev);
+}
+
+/**
+ * qbt_ipc_irq_handler() - function processes IPC
+ * interrupts on its own thread
+ * @irq:	the interrupt that occurred
+ * @dev_id: pointer to the qbt_drvdata
+ *
+ * Return: IRQ_HANDLED when complete
+ */
+static irqreturn_t qbt_ipc_irq_handler(int irq, void *dev_id)
+{
+	struct qbt_drvdata *drvdata = (struct qbt_drvdata *)dev_id;
+
+	if (!drvdata) {
+		pr_err("NULL pointer passed\n");
+		return IRQ_HANDLED;
+	}
+
+	if (irq != drvdata->fw_ipc.irq) {
+		pr_warn("invalid irq %d (expected %d)\n",
+			irq, drvdata->fw_ipc.irq);
+		return IRQ_HANDLED;
+	}
+
+	pr_debug("IPC event received at time %lu uS\n",
+			(unsigned long)ktime_to_us(ktime_get()));
+
+	pm_stay_awake(drvdata->dev);
+	schedule_work(&drvdata->fw_ipc.work);
+
+	return IRQ_HANDLED;
+}
+
+static int setup_fd_gpio_irq(struct platform_device *pdev,
+		struct qbt_drvdata *drvdata)
+{
+	int rc = 0;
+	int irq;
+	const char *desc = "qbt_finger_detect";
+
+	if (!drvdata->is_wuhb_connected) {
+		pr_err("Skipping as WUHB_INT is disconnected\n");
+		goto end;
+	}
+
+	rc = devm_gpio_request_one(&pdev->dev, drvdata->fd_gpio.gpio,
+		GPIOF_IN, desc);
+	if (rc < 0) {
+		pr_err("failed to request gpio %d, error %d\n",
+			drvdata->fd_gpio.gpio, rc);
+		goto end;
+	}
+
+
+	irq = gpio_to_irq(drvdata->fd_gpio.gpio);
+	if (irq < 0) {
+		rc = irq;
+		pr_err("unable to get irq number for gpio %d, error %d\n",
+			drvdata->fd_gpio.gpio, rc);
+		goto end;
+	}
+
+
+	drvdata->fd_gpio.irq = irq;
+	INIT_WORK(&drvdata->fd_gpio.work, qbt_gpio_work_func);
+
+	rc = devm_request_any_context_irq(&pdev->dev, drvdata->fd_gpio.irq,
+		qbt_gpio_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+		desc, drvdata);
+
+	if (rc < 0) {
+		pr_err("unable to claim irq %d; error %d\n",
+			drvdata->fd_gpio.irq, rc);
+		goto end;
+	}
+
+end:
+	pr_debug("rc %d\n", rc);
+	return rc;
+}
+
+static int setup_ipc_irq(struct platform_device *pdev,
+	struct qbt_drvdata *drvdata)
+{
+	int rc = 0;
+	const char *desc = "qbt_ipc";
+
+	drvdata->fw_ipc.irq = gpio_to_irq(drvdata->fw_ipc.gpio);
+	INIT_WORK(&drvdata->fw_ipc.work, qbt_irq_report_event);
+	pr_debug("irq %d gpio %d\n",
+			drvdata->fw_ipc.irq, drvdata->fw_ipc.gpio);
+
+	if (drvdata->fw_ipc.irq < 0) {
+		rc = drvdata->fw_ipc.irq;
+		pr_err("no irq for gpio %d, error=%d\n",
+		  drvdata->fw_ipc.gpio, rc);
+		goto end;
+	}
+
+	rc = devm_gpio_request_one(&pdev->dev, drvdata->fw_ipc.gpio,
+			GPIOF_IN, desc);
+
+	if (rc < 0) {
+		pr_err("failed to request gpio %d, error %d\n",
+			drvdata->fw_ipc.gpio, rc);
+		goto end;
+	}
+
+	rc = devm_request_threaded_irq(&pdev->dev,
+		drvdata->fw_ipc.irq,
+		NULL,
+		qbt_ipc_irq_handler,
+		IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+		desc,
+		drvdata);
+
+	if (rc < 0) {
+		pr_err("failed to register for ipc irq %d, rc = %d\n",
+			drvdata->fw_ipc.irq, rc);
+		goto end;
+	}
+
+end:
+	return rc;
+}
+
+/**
+ * qbt_read_device_tree() - Function reads device tree
+ * properties into driver data
+ * @pdev:	ptr to platform device object
+ * @drvdata:	ptr to driver data
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_read_device_tree(struct platform_device *pdev,
+	struct qbt_drvdata *drvdata)
+{
+	int rc = 0;
+	int gpio;
+	enum of_gpio_flags flags;
+
+	/* read IPC gpio */
+	drvdata->fw_ipc.gpio = of_get_named_gpio(pdev->dev.of_node,
+		"qcom,ipc-gpio", 0);
+	if (drvdata->fw_ipc.gpio < 0) {
+		rc = drvdata->fw_ipc.gpio;
+		pr_err("ipc gpio not found, error=%d\n", rc);
+		goto end;
+	}
+
+	gpio = of_get_named_gpio_flags(pdev->dev.of_node,
+				"qcom,finger-detect-gpio", 0, &flags);
+	if (gpio < 0) {
+		pr_err("failed to get gpio flags\n");
+		drvdata->is_wuhb_connected = 0;
+		goto end;
+	}
+
+	drvdata->is_wuhb_connected = 1;
+	drvdata->fd_gpio.gpio = gpio;
+	drvdata->fd_gpio.active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+end:
+	return rc;
+}
+
+/**
+ * qbt_probe() - Function loads hardware config from device tree
+ * @pdev:	ptr to platform device object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct qbt_drvdata *drvdata;
+	int rc = 0;
+
+	pr_debug("entry\n");
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+
+	rc = qbt_read_device_tree(pdev, drvdata);
+	if (rc < 0)
+		goto end;
+
+	atomic_set(&drvdata->fd_available, 1);
+	atomic_set(&drvdata->ipc_available, 1);
+
+	mutex_init(&drvdata->mutex);
+	mutex_init(&drvdata->fd_events_mutex);
+	mutex_init(&drvdata->ipc_events_mutex);
+
+	rc = qbt_dev_register(drvdata);
+	if (rc < 0)
+		goto end;
+	rc = qbt_create_input_device(drvdata);
+	if (rc < 0)
+		goto end;
+	INIT_KFIFO(drvdata->fd_events);
+	INIT_KFIFO(drvdata->ipc_events);
+	init_waitqueue_head(&drvdata->read_wait_queue_fd);
+	init_waitqueue_head(&drvdata->read_wait_queue_ipc);
+
+	rc = setup_fd_gpio_irq(pdev, drvdata);
+	if (rc < 0)
+		goto end;
+	drvdata->fd_gpio.irq_enabled = false;
+	disable_irq(drvdata->fd_gpio.irq);
+
+	rc = setup_ipc_irq(pdev, drvdata);
+	if (rc < 0)
+		goto end;
+	drvdata->fw_ipc.irq_enabled = false;
+	disable_irq(drvdata->fw_ipc.irq);
+
+	rc = device_init_wakeup(&pdev->dev, 1);
+	if (rc < 0)
+		goto end;
+
+end:
+	pr_debug("exit : %d\n", rc);
+	return rc;
+}
+
+static int qbt_remove(struct platform_device *pdev)
+{
+	struct qbt_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	mutex_destroy(&drvdata->mutex);
+	mutex_destroy(&drvdata->fd_events_mutex);
+	mutex_destroy(&drvdata->ipc_events_mutex);
+
+	device_destroy(drvdata->qbt_class, drvdata->qbt_fd_cdev.dev);
+	device_destroy(drvdata->qbt_class, drvdata->qbt_ipc_cdev.dev);
+
+	class_destroy(drvdata->qbt_class);
+	cdev_del(&drvdata->qbt_fd_cdev);
+	cdev_del(&drvdata->qbt_ipc_cdev);
+	unregister_chrdev_region(drvdata->qbt_fd_cdev.dev, 1);
+	unregister_chrdev_region(drvdata->qbt_ipc_cdev.dev, 1);
+
+	device_init_wakeup(&pdev->dev, 0);
+
+	return 0;
+}
+
+static int qbt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int rc = 0;
+	struct qbt_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	/*
+	 * Returning an error code if driver currently making a TZ call.
+	 * Note: The purpose of this driver is to ensure that the clocks are on
+	 * while making a TZ call. Hence the clock check to determine if the
+	 * driver will allow suspend to occur.
+	 */
+	if (!mutex_trylock(&drvdata->mutex))
+		return -EBUSY;
+
+	else {
+		if (drvdata->is_wuhb_connected)
+			enable_irq_wake(drvdata->fd_gpio.irq);
+
+		enable_irq_wake(drvdata->fw_ipc.irq);
+	}
+
+	mutex_unlock(&drvdata->mutex);
+
+	return rc;
+}
+
+static int qbt_resume(struct platform_device *pdev)
+{
+	struct qbt_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	if (drvdata->is_wuhb_connected)
+		disable_irq_wake(drvdata->fd_gpio.irq);
+
+	disable_irq_wake(drvdata->fw_ipc.irq);
+
+	return 0;
+}
+
+static const struct of_device_id qbt_match[] = {
+	{ .compatible = "qcom,qbt-handler" },
+	{}
+};
+
+static struct platform_driver qbt_plat_driver = {
+	.probe = qbt_probe,
+	.remove = qbt_remove,
+	.suspend = qbt_suspend,
+	.resume = qbt_resume,
+	.driver = {
+		.name = "qbt_handler",
+		.of_match_table = qbt_match,
+	},
+};
+
+module_platform_driver(qbt_plat_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. QBT HANDLER");
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
index ed2ee73..2d1e465 100644
--- a/drivers/soc/qcom/qdss_bridge.c
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #define KMSG_COMPONENT "QDSS diag bridge"
@@ -18,17 +18,24 @@
 #include <linux/mhi.h>
 #include <linux/usb/usb_qdss.h>
 #include <linux/of.h>
+#include <linux/delay.h>
 #include "qdss_bridge.h"
 
 #define MODULE_NAME "qdss_bridge"
-
-#define QDSS_BUF_SIZE		(16*1024)
-#define MHI_CLIENT_QDSS_IN	9
+#define INIT_STATUS -1
 
 /* Max number of objects needed */
 static int poolsize = 32;
 
 static struct class *mhi_class;
+static enum mhi_dev_state dev_state = INIT_STATUS;
+static enum mhi_ch curr_chan;
+
+static const char * const str_mhi_curr_chan[] = {
+		[QDSS]			= "QDSS",
+		[QDSS_HW]		= "IP_HW_QDSS",
+		[EMPTY]			= "EMPTY",
+};
 
 static const char * const str_mhi_transfer_mode[] = {
 		[MHI_TRANSFER_TYPE_USB]			= "usb",
@@ -136,6 +143,20 @@
 	return NULL;
 }
 
+static int qdss_check_entry(struct qdss_bridge_drvdata *drvdata)
+{
+	struct qdss_buf_tbl_lst *entry;
+	int ret = 0;
+
+	list_for_each_entry(entry, &drvdata->buf_tbl, link) {
+		if (atomic_read(&entry->available) == 0) {
+			ret = 1;
+			return ret;
+		}
+	}
+
+	return ret;
+}
 
 static void qdss_del_buf_tbl_entry(struct qdss_bridge_drvdata *drvdata,
 				void *buf)
@@ -152,7 +173,6 @@
 			return;
 		}
 	}
-
 	spin_unlock_bh(&drvdata->lock);
 }
 
@@ -206,6 +226,14 @@
 			str_mhi_transfer_mode[drvdata->mode]);
 }
 
+static ssize_t curr_chan_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	if (curr_chan < QDSS || curr_chan > EMPTY)
+		return -EINVAL;
+	return scnprintf(buf, PAGE_SIZE, "%s\n", str_mhi_curr_chan[curr_chan]);
+}
+
 static ssize_t mode_store(struct device *dev,
 					struct device_attribute *attr,
 					const char *buf, size_t size)
@@ -275,6 +303,7 @@
 }
 
 static DEVICE_ATTR_RW(mode);
+static DEVICE_ATTR_RO(curr_chan);
 
 static void mhi_read_work_fn(struct work_struct *work)
 {
@@ -776,6 +805,13 @@
 	drvdata = mhi_dev->priv_data;
 	if (!drvdata)
 		return;
+
+	pr_debug("remove dev state: %d\n", mhi_dev->mhi_cntrl->dev_state);
+
+	dev_state = mhi_dev->mhi_cntrl->dev_state;
+	if (mhi_dev->mhi_cntrl->dev_state != MHI_STATE_RESET)
+		curr_chan = EMPTY;
+
 	spin_lock_bh(&drvdata->lock);
 	if (drvdata->opened == ENABLE) {
 		drvdata->opened = SSR;
@@ -787,9 +823,11 @@
 			spin_unlock_bh(&drvdata->lock);
 			if (drvdata->usb_ch && drvdata->usb_ch->priv_usb)
 				usb_qdss_close(drvdata->usb_ch);
+			do {
+				msleep(20);
+			} while (qdss_check_entry(drvdata));
 		}
 		mhi_ch_close(drvdata);
-
 	} else
 		spin_unlock_bh(&drvdata->lock);
 
@@ -823,11 +861,40 @@
 				const struct mhi_device_id *id)
 {
 	int ret;
+	bool def = false;
 	unsigned int baseminor = 0;
 	unsigned int count = 1;
 	struct qdss_bridge_drvdata *drvdata;
+	struct device_node *of_node = mhi_dev->dev.of_node;
 	dev_t dev;
 
+	pr_debug("probe dev state: %d chan: %s curr_chan: %d\n",
+		  mhi_dev->mhi_cntrl->dev_state,
+		  id->chan,
+		  curr_chan);
+
+	def = of_property_read_bool(of_node, "mhi,default-channel");
+	if (dev_state == INIT_STATUS) {
+		if (!def)
+			return -EINVAL;
+		if (!strcmp(id->chan, "QDSS"))
+			curr_chan = QDSS;
+		if (!strcmp(id->chan, "QDSS_HW"))
+			curr_chan = QDSS_HW;
+	} else if (dev_state == MHI_STATE_RESET) {
+		if (strcmp(id->chan, str_mhi_curr_chan[curr_chan]))
+			return -EINVAL;
+	} else {
+		if (curr_chan != EMPTY) {
+			pr_err("Need unbind another channel before bind.\n");
+			return -EINVAL;
+		}
+		if (!strcmp(id->chan, "QDSS"))
+			curr_chan = QDSS;
+		if (!strcmp(id->chan, "QDSS_HW"))
+			curr_chan = QDSS_HW;
+	}
+
 	drvdata = devm_kzalloc(&mhi_dev->dev, sizeof(*drvdata), GFP_KERNEL);
 	if (!drvdata) {
 		ret = -ENOMEM;
@@ -865,7 +932,12 @@
 
 	ret = device_create_file(drvdata->dev, &dev_attr_mode);
 	if (ret) {
-		pr_err("sysfs node create failed error:%d\n", ret);
+		pr_err("mode sysfs node create failed error:%d\n", ret);
+		goto exit_destroy_device;
+	}
+	ret = device_create_file(drvdata->dev, &dev_attr_curr_chan);
+	if (ret) {
+		pr_err("curr_chan sysfs node create failed error:%d\n", ret);
 		goto exit_destroy_device;
 	}
 
@@ -891,6 +963,7 @@
 
 static const struct mhi_device_id qdss_mhi_match_table[] = {
 	{ .chan = "QDSS", .driver_data = 0x4000 },
+	{ .chan = "IP_HW_QDSS", .driver_data = 0x4000 },
 	{},
 };
 
diff --git a/drivers/soc/qcom/qdss_bridge.h b/drivers/soc/qcom/qdss_bridge.h
index 0967aea..81f096f 100644
--- a/drivers/soc/qcom/qdss_bridge.h
+++ b/drivers/soc/qcom/qdss_bridge.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _QDSS_BRIDGE_H
@@ -30,6 +30,12 @@
 	SSR,
 };
 
+enum mhi_ch {
+	QDSS,
+	QDSS_HW,
+	EMPTY,
+};
+
 struct qdss_bridge_drvdata {
 	int alias;
 	enum open_status opened;
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
index 3aaab71..331d67f 100644
--- a/drivers/soc/qcom/qmi_encdec.c
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2019 The Linux Foundation. All rights reserved.
  * Copyright (C) 2017 Linaro Ltd.
  */
 #include <linux/slab.h>
@@ -534,8 +534,8 @@
 		decoded_bytes += rc;
 	}
 
-	if (string_len > temp_ei->elem_len) {
-		pr_err("%s: String len %d > Max Len %d\n",
+	if (string_len >= temp_ei->elem_len) {
+		pr_err("%s: String len %d >= Max Len %d\n",
 		       __func__, string_len, temp_ei->elem_len);
 		return -ETOOSMALL;
 	} else if (string_len > tlv_len) {
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 4400f51..a391dae 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -347,6 +347,13 @@
 
 	ret = wait_for_completion_timeout(&txn->completion, timeout);
 
+	mutex_lock(&txn->lock);
+	if (txn->result == -ENETRESET) {
+		mutex_unlock(&txn->lock);
+		return txn->result;
+	}
+	mutex_unlock(&txn->lock);
+
 	mutex_lock(&qmi->txn_lock);
 	mutex_lock(&txn->lock);
 	idr_remove(&qmi->txns, txn->id);
@@ -446,17 +453,18 @@
 	if (IS_ERR(sock))
 		return;
 
-	mutex_lock(&qmi->sock_lock);
-	sock_release(qmi->sock);
-	qmi->sock = NULL;
-	mutex_unlock(&qmi->sock_lock);
-
 	qmi_recv_del_server(qmi, -1, -1);
 
 	if (qmi->ops.net_reset)
 		qmi->ops.net_reset(qmi);
 
 	mutex_lock(&qmi->sock_lock);
+	/* Already qmi_handle_release() started */
+	if (!qmi->sock) {
+		sock_release(sock);
+		return;
+	}
+	sock_release(qmi->sock);
 	qmi->sock = sock;
 	qmi->sq = sq;
 	mutex_unlock(&qmi->sock_lock);
@@ -570,16 +578,21 @@
 
 static void qmi_data_ready(struct sock *sk)
 {
-	struct qmi_handle *qmi = sk->sk_user_data;
+	struct qmi_handle *qmi = NULL;
 
 	/*
 	 * This will be NULL if we receive data while being in
 	 * qmi_handle_release()
 	 */
-	if (!qmi)
+	read_lock_bh(&sk->sk_callback_lock);
+	qmi = sk->sk_user_data;
+	if (!qmi) {
+		read_unlock_bh(&sk->sk_callback_lock);
 		return;
+	}
 
 	queue_work(qmi->wq, &qmi->work);
+	read_unlock_bh(&sk->sk_callback_lock);
 }
 
 static struct socket *qmi_sock_create(struct qmi_handle *qmi,
@@ -602,6 +615,7 @@
 	sock->sk->sk_user_data = qmi;
 	sock->sk->sk_data_ready = qmi_data_ready;
 	sock->sk->sk_error_report = qmi_data_ready;
+	sock->sk->sk_sndtimeo = HZ * 10;
 
 	return sock;
 }
@@ -682,21 +696,35 @@
  */
 void qmi_handle_release(struct qmi_handle *qmi)
 {
-	struct socket *sock = qmi->sock;
+	struct socket *sock;
 	struct qmi_service *svc, *tmp;
-
-	sock->sk->sk_user_data = NULL;
-	cancel_work_sync(&qmi->work);
-
-	qmi_recv_del_server(qmi, -1, -1);
+	struct qmi_txn *txn;
+	int txn_id;
 
 	mutex_lock(&qmi->sock_lock);
+	sock = qmi->sock;
+	write_lock_bh(&sock->sk->sk_callback_lock);
+	sock->sk->sk_user_data = NULL;
+	write_unlock_bh(&sock->sk->sk_callback_lock);
 	sock_release(sock);
 	qmi->sock = NULL;
 	mutex_unlock(&qmi->sock_lock);
 
+	cancel_work_sync(&qmi->work);
+
+	qmi_recv_del_server(qmi, -1, -1);
+
 	destroy_workqueue(qmi->wq);
 
+	mutex_lock(&qmi->txn_lock);
+	idr_for_each_entry(&qmi->txns, txn, txn_id) {
+		mutex_lock(&txn->lock);
+		idr_remove(&qmi->txns, txn->id);
+		txn->result = -ENETRESET;
+		complete(&txn->completion);
+		mutex_unlock(&txn->lock);
+	}
+	mutex_unlock(&qmi->txn_lock);
 	idr_destroy(&qmi->txns);
 
 	kfree(qmi->recv_buf);
@@ -761,7 +789,7 @@
 	if (qmi->sock) {
 		ret = kernel_sendmsg(qmi->sock, &msghdr, &iv, 1, len);
 		if (ret < 0)
-			pr_err("failed to send QMI message\n");
+			pr_info("failed to send QMI message %d\n", ret);
 	} else {
 		ret = -EPIPE;
 	}
diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c
index 9e00f79..047de99 100644
--- a/drivers/soc/qcom/qmi_rmnet.c
+++ b/drivers/soc/qcom/qmi_rmnet.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <soc/qcom/qmi_rmnet.h>
@@ -25,13 +25,16 @@
 #define FLAG_POWERSAVE_MASK 0x0010
 #define DFC_MODE_MULTIQ 2
 
-unsigned int rmnet_wq_frequency __read_mostly = 4;
+unsigned int rmnet_wq_frequency __read_mostly = 1000;
 
 #define PS_WORK_ACTIVE_BIT 0
-#define PS_INTERVAL (((!rmnet_wq_frequency) ? 1 : rmnet_wq_frequency) * HZ)
+#define PS_INTERVAL (((!rmnet_wq_frequency) ?                             \
+					1 : rmnet_wq_frequency/10) * (HZ/100))
 #define NO_DELAY (0x0000 * HZ)
 
+#ifdef CONFIG_QCOM_QMI_DFC
 static unsigned int qmi_rmnet_scale_factor = 5;
+#endif
 
 struct qmi_elem_info data_ep_id_type_v01_ei[] = {
 	{
@@ -74,8 +77,8 @@
 		return NULL;
 
 	for (i = 0; i < MAX_CLIENT_NUM; i++) {
-		if (qmi->fc_info[i].dfc_client)
-			return qmi->fc_info[i].dfc_client;
+		if (qmi->dfc_clients[i])
+			return qmi->dfc_clients[i];
 	}
 
 	return NULL;
@@ -90,6 +93,22 @@
 	return qmi_rmnet_has_dfc_client(qmi) ? 1 : 0;
 }
 
+static int
+qmi_rmnet_has_pending(struct qmi_info *qmi)
+{
+	int i;
+
+	if (qmi->wda_pending)
+		return 1;
+
+	for (i = 0; i < MAX_CLIENT_NUM; i++) {
+		if (qmi->dfc_pending[i])
+			return 1;
+	}
+
+	return 0;
+}
+
 #ifdef CONFIG_QCOM_QMI_DFC
 static void
 qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev,
@@ -295,6 +314,17 @@
 
 	return 0;
 }
+
+static void qmi_rmnet_query_flows(struct qmi_info *qmi)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENT_NUM; i++) {
+		if (qmi->dfc_clients[i])
+			dfc_qmi_query_flow(qmi->dfc_clients[i]);
+	}
+}
+
 #else
 static inline void
 qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev,
@@ -325,12 +355,17 @@
 {
 	return -EINVAL;
 }
+
+static inline void qmi_rmnet_query_flows(struct qmi_info *qmi)
+{
+}
 #endif
 
 static int
 qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
 {
 	int idx, rc, err = 0;
+	struct svc_info svc;
 
 	ASSERT_RTNL();
 
@@ -341,7 +376,7 @@
 	idx = (tcm->tcm_handle == 0) ? 0 : 1;
 
 	if (!qmi) {
-		qmi = kzalloc(sizeof(struct qmi_info), GFP_KERNEL);
+		qmi = kzalloc(sizeof(struct qmi_info), GFP_ATOMIC);
 		if (!qmi)
 			return -ENOMEM;
 
@@ -349,20 +384,20 @@
 	}
 
 	qmi->flag = tcm->tcm_ifindex;
-	qmi->fc_info[idx].svc.instance = tcm->tcm_handle;
-	qmi->fc_info[idx].svc.ep_type = tcm->tcm_info;
-	qmi->fc_info[idx].svc.iface_id = tcm->tcm_parent;
+	svc.instance = tcm->tcm_handle;
+	svc.ep_type = tcm->tcm_info;
+	svc.iface_id = tcm->tcm_parent;
 
 	if (((tcm->tcm_ifindex & FLAG_DFC_MASK) == DFC_MODE_MULTIQ) &&
-	    (qmi->fc_info[idx].dfc_client == NULL)) {
-		rc = dfc_qmi_client_init(port, idx, qmi);
+	    !qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) {
+		rc = dfc_qmi_client_init(port, idx, &svc, qmi);
 		if (rc < 0)
 			err = rc;
 	}
 
 	if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
-	    (idx == 0) && (qmi->wda_client == NULL)) {
-		rc = wda_qmi_client_init(port, tcm->tcm_handle);
+	    (idx == 0) && !qmi->wda_client && !qmi->wda_pending) {
+		rc = wda_qmi_client_init(port, &svc, qmi);
 		if (rc < 0)
 			err = rc;
 	}
@@ -373,15 +408,22 @@
 static int
 __qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
 {
+	void *data = NULL;
 
 	ASSERT_RTNL();
 
-	if (qmi->fc_info[idx].dfc_client) {
-		dfc_qmi_client_exit(qmi->fc_info[idx].dfc_client);
-		qmi->fc_info[idx].dfc_client = NULL;
+	if (qmi->dfc_clients[idx])
+		data = qmi->dfc_clients[idx];
+	else if (qmi->dfc_pending[idx])
+		data = qmi->dfc_pending[idx];
+
+	if (data) {
+		dfc_qmi_client_exit(data);
+		qmi->dfc_clients[idx] = NULL;
+		qmi->dfc_pending[idx] = NULL;
 	}
 
-	if (!qmi_rmnet_has_client(qmi)) {
+	if (!qmi_rmnet_has_client(qmi) && !qmi_rmnet_has_pending(qmi)) {
 		rmnet_reset_qmi_pt(port);
 		kfree(qmi);
 		return 0;
@@ -394,15 +436,21 @@
 qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
 {
 	int idx;
+	void *data = NULL;
 
 	/* client delete: tcm->tcm_handle - instance*/
 	idx = (tcm->tcm_handle == 0) ? 0 : 1;
 
 	ASSERT_RTNL();
+	if (qmi->wda_client)
+		data = qmi->wda_client;
+	else if (qmi->wda_pending)
+		data = qmi->wda_pending;
 
-	if ((idx == 0) && qmi->wda_client) {
-		wda_qmi_client_exit(qmi->wda_client);
+	if ((idx == 0) && data) {
+		wda_qmi_client_exit(data);
 		qmi->wda_client = NULL;
+		qmi->wda_pending = NULL;
 	}
 
 	__qmi_rmnet_delete_client(port, qmi, idx);
@@ -433,12 +481,15 @@
 			return;
 
 		if (qmi_rmnet_setup_client(port, qmi, tcm) < 0) {
-			if (!qmi_rmnet_has_client(qmi)) {
-				kfree(qmi);
+			/* retrieve qmi again as it could have been changed */
+			qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+			if (qmi &&
+			    !qmi_rmnet_has_client(qmi) &&
+			    !qmi_rmnet_has_pending(qmi)) {
 				rmnet_reset_qmi_pt(port);
+				kfree(qmi);
 			}
-		}
-		if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
+		} else if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
 			qmi_rmnet_work_init(port);
 			rmnet_set_powersave_format(port);
 		}
@@ -446,7 +497,7 @@
 	case NLMSG_CLIENT_DELETE:
 		if (!qmi)
 			return;
-		if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
+		if (tcm->tcm_handle == 0) { /* instance 0 */
 			rmnet_clear_powersave_format(port);
 			qmi_rmnet_work_exit(port);
 		}
@@ -471,6 +522,7 @@
 {
 	struct qmi_info *qmi = (struct qmi_info *)qmi_pt;
 	int i;
+	void *data = NULL;
 
 	if (!qmi)
 		return;
@@ -479,9 +531,15 @@
 
 	qmi_rmnet_work_exit(port);
 
-	if (qmi->wda_client) {
-		wda_qmi_client_exit(qmi->wda_client);
+	if (qmi->wda_client)
+		data = qmi->wda_client;
+	else if (qmi->wda_pending)
+		data = qmi->wda_pending;
+
+	if (data) {
+		wda_qmi_client_exit(data);
 		qmi->wda_client = NULL;
+		qmi->wda_pending = NULL;
 	}
 
 	for (i = 0; i < MAX_CLIENT_NUM; i++) {
@@ -495,7 +553,7 @@
 {
 	struct qos_info *qos;
 	struct rmnet_bearer_map *bearer;
-	int do_wake = 0;
+	bool do_wake = false;
 
 	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
 	if (!qos)
@@ -504,14 +562,14 @@
 	spin_lock_bh(&qos->qos_lock);
 
 	list_for_each_entry(bearer, &qos->bearer_head, list) {
-		bearer->grant_before_ps = bearer->grant_size;
-		bearer->seq_before_ps = bearer->seq;
+		if (!bearer->grant_size)
+			do_wake = true;
 		bearer->grant_size = DEFAULT_GRANT;
-		bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
+		bearer->grant_thresh = DEFAULT_GRANT;
 		bearer->seq = 0;
 		bearer->ack_req = 0;
-		bearer->ancillary = 0;
-		do_wake = 1;
+		bearer->tcp_bidir = false;
+		bearer->rat_switch = false;
 	}
 
 	if (do_wake) {
@@ -523,6 +581,31 @@
 }
 EXPORT_SYMBOL(qmi_rmnet_enable_all_flows);
 
+bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
+{
+	struct qos_info *qos;
+	struct rmnet_bearer_map *bearer;
+	bool ret = true;
+
+	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+	if (!qos)
+		return true;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	list_for_each_entry(bearer, &qos->bearer_head, list) {
+		if (!bearer->grant_size) {
+			ret = false;
+			break;
+		}
+	}
+
+	spin_unlock_bh(&qos->qos_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);
+
 #ifdef CONFIG_QCOM_QMI_DFC
 void qmi_rmnet_burst_fc_check(struct net_device *dev,
 			      int ip_type, u32 mark, unsigned int len)
@@ -633,7 +716,7 @@
 #ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
 static struct workqueue_struct  *rmnet_ps_wq;
 static struct rmnet_powersave_work *rmnet_work;
-static struct list_head ps_list;
+static LIST_HEAD(ps_list);
 
 struct rmnet_powersave_work {
 	struct delayed_work work;
@@ -711,18 +794,19 @@
 
 	if (enable)
 		dfc_qmi_wq_flush(qmi);
+	else
+		qmi_rmnet_query_flows(qmi);
 
 	return 0;
 }
 EXPORT_SYMBOL(qmi_rmnet_set_powersave_mode);
 
-void qmi_rmnet_work_restart(void *port)
+static void qmi_rmnet_work_restart(void *port)
 {
 	if (!rmnet_ps_wq || !rmnet_work)
 		return;
 	queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, NO_DELAY);
 }
-EXPORT_SYMBOL(qmi_rmnet_work_restart);
 
 static void qmi_rmnet_check_stats(struct work_struct *work)
 {
@@ -730,6 +814,7 @@
 	struct qmi_info *qmi;
 	u64 rxd, txd;
 	u64 rx, tx;
+	bool dl_msg_active;
 
 	real_work = container_of(to_delayed_work(work),
 				 struct rmnet_powersave_work, work);
@@ -742,17 +827,15 @@
 		return;
 
 	if (qmi->ps_enabled) {
-		/* Retry after small delay if qmi error
-		 * This resumes UL grants by disabling
-		 * powersave mode if successful.
-		 */
+		/* Register to get QMI DFC and DL marker */
 		if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0) {
+			/* If this failed need to retry quickly */
 			queue_delayed_work(rmnet_ps_wq,
 					   &real_work->work, HZ / 50);
 			return;
 
 		}
-		qmi->ps_enabled = 0;
+		qmi->ps_enabled = false;
 
 		if (rmnet_get_powersave_notif(real_work->port))
 			qmi_rmnet_ps_off_notify(real_work->port);
@@ -767,18 +850,29 @@
 	real_work->old_rx_pkts = rx;
 	real_work->old_tx_pkts = tx;
 
+	dl_msg_active = qmi->dl_msg_active;
+	qmi->dl_msg_active = false;
+
 	if (!rxd && !txd) {
+		/* If no DL msg received and there is a flow disabled,
+		 * (likely in RLF), no need to enter powersave
+		 */
+		if (!dl_msg_active &&
+		    !rmnet_all_flows_enabled(real_work->port))
+			goto end;
+
+		/* Deregister to suppress QMI DFC and DL marker */
 		if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0) {
 			queue_delayed_work(rmnet_ps_wq,
 					   &real_work->work, PS_INTERVAL);
 			return;
 		}
-		qmi->ps_enabled = 1;
-		clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
+		qmi->ps_enabled = true;
 
-		/* Enable flow after clear the bit so a new
-		 * work can be triggered.
+		/* Clear the bit before enabling flow so pending packets
+		 * can trigger the work again
 		 */
+		clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
 		rmnet_enable_all_flows(real_work->port);
 
 		if (rmnet_get_powersave_notif(real_work->port))
@@ -818,7 +912,6 @@
 		rmnet_ps_wq = NULL;
 		return;
 	}
-	INIT_LIST_HEAD(&ps_list);
 	INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
 	rmnet_work->port = port;
 	rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts,
@@ -854,4 +947,16 @@
 	rmnet_work = NULL;
 }
 EXPORT_SYMBOL(qmi_rmnet_work_exit);
+
+void qmi_rmnet_set_dl_msg_active(void *port)
+{
+	struct qmi_info *qmi;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+	if (unlikely(!qmi))
+		return;
+
+	qmi->dl_msg_active = true;
+}
+EXPORT_SYMBOL(qmi_rmnet_set_dl_msg_active);
 #endif
diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h
index 0efff48..7fe4862 100644
--- a/drivers/soc/qcom/qmi_rmnet_i.h
+++ b/drivers/soc/qcom/qmi_rmnet_i.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _RMNET_QMI_I_H
@@ -14,7 +14,8 @@
 
 #define MAX_CLIENT_NUM 2
 #define MAX_FLOW_NUM 32
-#define DEFAULT_GRANT 10240
+#define DEFAULT_GRANT 1
+#define DFC_MAX_BEARERS_V01 16
 
 struct rmnet_flow_map {
 	struct list_head list;
@@ -32,9 +33,10 @@
 	u32 grant_thresh;
 	u16 seq;
 	u8  ack_req;
-	u32 grant_before_ps;
-	u16 seq_before_ps;
-	u32 ancillary;
+	u32 last_grant;
+	u16 last_seq;
+	bool tcp_bidir;
+	bool rat_switch;
 };
 
 struct svc_info {
@@ -43,11 +45,6 @@
 	u32 iface_id;
 };
 
-struct fc_info {
-	struct svc_info svc;
-	void *dfc_client;
-};
-
 struct qos_info {
 	u8 mux_id;
 	struct net_device *real_dev;
@@ -66,9 +63,12 @@
 struct qmi_info {
 	int flag;
 	void *wda_client;
-	struct fc_info fc_info[MAX_CLIENT_NUM];
+	void *wda_pending;
+	void *dfc_clients[MAX_CLIENT_NUM];
+	void *dfc_pending[MAX_CLIENT_NUM];
 	unsigned long ps_work_active;
-	int ps_enabled;
+	bool ps_enabled;
+	bool dl_msg_active;
 };
 
 enum data_ep_type_enum_v01 {
@@ -101,7 +101,8 @@
 
 unsigned int qmi_rmnet_grant_per(unsigned int grant);
 
-int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi);
+int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
+			struct qmi_info *qmi);
 
 void dfc_qmi_client_exit(void *dfc_data);
 
@@ -112,6 +113,7 @@
 
 void dfc_qmi_wq_flush(struct qmi_info *qmi);
 
+void dfc_qmi_query_flow(void *dfc_data);
 #else
 static inline struct rmnet_flow_map *
 qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@@ -121,13 +123,14 @@
 }
 
 static inline struct rmnet_bearer_map *
-qmi_rmnet_get_bearer_map(struct qos_info *qos_info, uint8_t bearer_id)
+qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id)
 {
 	return NULL;
 }
 
 static inline int
-dfc_qmi_client_init(void *port, int modem, struct qmi_info *qmi)
+dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
+		    struct qmi_info *qmi)
 {
 	return -EINVAL;
 }
@@ -146,14 +149,21 @@
 dfc_qmi_wq_flush(struct qmi_info *qmi)
 {
 }
+
+static inline void
+dfc_qmi_query_flow(void *dfc_data)
+{
+}
 #endif
 
 #ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
-int wda_qmi_client_init(void *port, uint32_t instance);
+int
+wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi);
 void wda_qmi_client_exit(void *wda_data);
-int wda_set_powersave_mode(void *wda_data, uint8_t enable);
+int wda_set_powersave_mode(void *wda_data, u8 enable);
 #else
-static inline int wda_qmi_client_init(void *port, uint32_t instance)
+static inline int
+wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
 {
 	return -EINVAL;
 }
@@ -162,7 +172,7 @@
 {
 }
 
-static inline int wda_set_powersave_mode(void *wda_data, uint8_t enable)
+static inline int wda_set_powersave_mode(void *wda_data, u8 enable)
 {
 	return -EINVAL;
 }
diff --git a/drivers/soc/qcom/rq_stats.c b/drivers/soc/qcom/rq_stats.c
index c5d6f07..4906d97 100644
--- a/drivers/soc/qcom/rq_stats.c
+++ b/drivers/soc/qcom/rq_stats.c
@@ -3,248 +3,21 @@
  * Copyright (c) 2010-2015, 2017, 2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/module.h>
-#include <linux/hrtimer.h>
 #include <linux/cpu.h>
 #include <linux/kobject.h>
 #include <linux/sysfs.h>
-#include <linux/notifier.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
 #include <linux/rq_stats.h>
-#include <linux/cpufreq.h>
-#include <linux/kernel_stat.h>
-#include <linux/tick.h>
-#include <asm/smp_plat.h>
-#include <linux/suspend.h>
 
 #define MAX_LONG_SIZE 24
-#define DEFAULT_RQ_POLL_JIFFIES 1
 #define DEFAULT_DEF_TIMER_JIFFIES 5
 
-struct notifier_block freq_transition;
-
-struct cpu_load_data {
-	u64 prev_cpu_idle;
-	u64 prev_cpu_wall;
-	unsigned int avg_load_maxfreq;
-	unsigned int samples;
-	unsigned int window_size;
-	unsigned int cur_freq;
-	unsigned int policy_max;
-	cpumask_var_t related_cpus;
-	struct mutex cpu_load_mutex;
-};
-
-static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
-
-
-static int update_average_load(unsigned int freq, unsigned int cpu)
-{
-
-	struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
-	u64 cur_wall_time, cur_idle_time;
-	unsigned int idle_time, wall_time;
-	unsigned int cur_load, load_at_max_freq;
-
-	cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time, 0);
-
-	wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
-	pcpu->prev_cpu_wall = cur_wall_time;
-
-	idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
-	pcpu->prev_cpu_idle = cur_idle_time;
-
-
-	if (unlikely(wall_time <= 0 || wall_time < idle_time))
-		return 0;
-
-	cur_load = 100 * (wall_time - idle_time) / wall_time;
-
-	/* Calculate the scaled load across CPU */
-	load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
-
-	if (!pcpu->avg_load_maxfreq) {
-		/* This is the first sample in this window*/
-		pcpu->avg_load_maxfreq = load_at_max_freq;
-		pcpu->window_size = wall_time;
-	} else {
-		/*
-		 * The is already a sample available in this window.
-		 * Compute weighted average with prev entry, so that we get
-		 * the precise weighted load.
-		 */
-		pcpu->avg_load_maxfreq =
-			((pcpu->avg_load_maxfreq * pcpu->window_size) +
-			(load_at_max_freq * wall_time)) /
-			(wall_time + pcpu->window_size);
-
-		pcpu->window_size += wall_time;
-	}
-
-	return 0;
-}
-
-static unsigned int report_load_at_max_freq(void)
-{
-	int cpu;
-	struct cpu_load_data *pcpu;
-	unsigned int total_load = 0;
-
-	for_each_online_cpu(cpu) {
-		pcpu = &per_cpu(cpuload, cpu);
-		mutex_lock(&pcpu->cpu_load_mutex);
-		update_average_load(pcpu->cur_freq, cpu);
-		total_load += pcpu->avg_load_maxfreq;
-		pcpu->avg_load_maxfreq = 0;
-		mutex_unlock(&pcpu->cpu_load_mutex);
-	}
-	return total_load;
-}
-
-static int cpufreq_transition_handler(struct notifier_block *nb,
-			unsigned long val, void *data)
-{
-	struct cpufreq_freqs *freqs = data;
-	struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
-	int j;
-
-	switch (val) {
-	case CPUFREQ_POSTCHANGE:
-		for_each_cpu(j, this_cpu->related_cpus) {
-			struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
-
-			mutex_lock(&pcpu->cpu_load_mutex);
-			update_average_load(freqs->old, j);
-			pcpu->cur_freq = freqs->new;
-			mutex_unlock(&pcpu->cpu_load_mutex);
-		}
-		break;
-	}
-	return 0;
-}
-
-static void update_related_cpus(void)
-{
-	unsigned int cpu;
-
-	for_each_cpu(cpu, cpu_online_mask) {
-		struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
-		struct cpufreq_policy cpu_policy;
-
-		cpufreq_get_policy(&cpu_policy, cpu);
-		cpumask_copy(this_cpu->related_cpus, cpu_policy.cpus);
-	}
-}
-
-static int cpu_online_handler(unsigned int cpu)
-{
-	struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
-
-	if (!this_cpu->cur_freq)
-		this_cpu->cur_freq = cpufreq_quick_get(cpu);
-	update_related_cpus();
-	this_cpu->avg_load_maxfreq = 0;
-	return 0;
-}
-
-static int system_suspend_handler(struct notifier_block *nb,
-				unsigned long val, void *data)
-{
-	switch (val) {
-	case PM_POST_HIBERNATION:
-	case PM_POST_SUSPEND:
-	case PM_POST_RESTORE:
-		rq_info.hotplug_disabled = 0;
-		break;
-	case PM_HIBERNATION_PREPARE:
-	case PM_SUSPEND_PREPARE:
-		rq_info.hotplug_disabled = 1;
-		break;
-	default:
-		return NOTIFY_DONE;
-	}
-	return NOTIFY_OK;
-}
-
-
-static ssize_t hotplug_disable_show(struct kobject *kobj,
-		struct kobj_attribute *attr, char *buf)
-{
-	unsigned int val = rq_info.hotplug_disabled;
-
-	return snprintf(buf, MAX_LONG_SIZE, "%d\n", val);
-}
-
-static struct kobj_attribute hotplug_disabled_attr = __ATTR_RO(hotplug_disable);
-
 static void def_work_fn(struct work_struct *work)
 {
 	/* Notify polling threads on change of value */
 	sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
 }
 
-static ssize_t run_queue_avg_show(struct kobject *kobj,
-		struct kobj_attribute *attr, char *buf)
-{
-	unsigned int val = 0;
-	unsigned long flags = 0;
-
-	spin_lock_irqsave(&rq_lock, flags);
-	/* rq avg currently available only on one core */
-	val = rq_info.rq_avg;
-	rq_info.rq_avg = 0;
-	spin_unlock_irqrestore(&rq_lock, flags);
-
-	return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
-}
-
-static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
-
-static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
-				      struct kobj_attribute *attr, char *buf)
-{
-	int ret = 0;
-	unsigned long flags = 0;
-
-	spin_lock_irqsave(&rq_lock, flags);
-	ret = snprintf(buf, MAX_LONG_SIZE, "%u\n",
-		       jiffies_to_msecs(rq_info.rq_poll_jiffies));
-	spin_unlock_irqrestore(&rq_lock, flags);
-
-	return ret;
-}
-
-static ssize_t store_run_queue_poll_ms(struct kobject *kobj,
-				       struct kobj_attribute *attr,
-				       const char *buf, size_t count)
-{
-	unsigned int val = 0;
-	unsigned long flags = 0;
-	static DEFINE_MUTEX(lock_poll_ms);
-
-	mutex_lock(&lock_poll_ms);
-
-	spin_lock_irqsave(&rq_lock, flags);
-	if (kstrtouint(buf, 0, &val))
-		count = -EINVAL;
-	else
-		rq_info.rq_poll_jiffies = msecs_to_jiffies(val);
-	spin_unlock_irqrestore(&rq_lock, flags);
-
-	mutex_unlock(&lock_poll_ms);
-
-	return count;
-}
-
-static struct kobj_attribute run_queue_poll_ms_attr =
-	__ATTR(run_queue_poll_ms, 0600, show_run_queue_poll_ms,
-			store_run_queue_poll_ms);
-
 static ssize_t show_def_timer_ms(struct kobject *kobj,
 		struct kobj_attribute *attr, char *buf)
 {
@@ -276,22 +49,8 @@
 	__ATTR(def_timer_ms, 0600, show_def_timer_ms,
 			store_def_timer_ms);
 
-static ssize_t show_cpu_normalized_load(struct kobject *kobj,
-		struct kobj_attribute *attr, char *buf)
-{
-	return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq());
-}
-
-static struct kobj_attribute cpu_normalized_load_attr =
-	__ATTR(cpu_normalized_load, 0600, show_cpu_normalized_load,
-			NULL);
-
 static struct attribute *rq_attrs[] = {
-	&cpu_normalized_load_attr.attr,
 	&def_timer_ms_attr.attr,
-	&run_queue_avg_attr.attr,
-	&run_queue_poll_ms_attr.attr,
-	&hotplug_disabled_attr.attr,
 	NULL,
 };
 
@@ -303,7 +62,6 @@
 {
 	int err;
 
-	rq_info.rq_avg = 0;
 	rq_info.attr_group = &rq_attr_group;
 
 	/* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
@@ -324,8 +82,6 @@
 static int __init msm_rq_stats_init(void)
 {
 	int ret;
-	int i;
-	struct cpufreq_policy cpu_policy;
 
 #ifndef CONFIG_SMP
 	/* Bail out if this is not an SMP Target */
@@ -337,44 +93,12 @@
 	WARN_ON(!rq_wq);
 	INIT_WORK(&rq_info.def_timer_work, def_work_fn);
 	spin_lock_init(&rq_lock);
-	rq_info.rq_poll_jiffies = DEFAULT_RQ_POLL_JIFFIES;
 	rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
-	rq_info.rq_poll_last_jiffy = 0;
 	rq_info.def_timer_last_jiffy = 0;
-	rq_info.hotplug_disabled = 0;
 	ret = init_rq_attribs();
 
 	rq_info.init = 1;
 
-	for_each_possible_cpu(i) {
-		struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
-
-		mutex_init(&pcpu->cpu_load_mutex);
-		cpufreq_get_policy(&cpu_policy, i);
-		pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
-		if (cpu_online(i))
-			pcpu->cur_freq = cpufreq_quick_get(i);
-		cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
-	}
-	freq_transition.notifier_call = cpufreq_transition_handler;
-	cpufreq_register_notifier(&freq_transition,
-					CPUFREQ_TRANSITION_NOTIFIER);
-	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "rq_stats:online",
-			cpu_online_handler, NULL);
-
 	return ret;
 }
 late_initcall(msm_rq_stats_init);
-
-static int __init msm_rq_stats_early_init(void)
-{
-#ifndef CONFIG_SMP
-	/* Bail out if this is not an SMP Target */
-	rq_info.init = 0;
-	return -EPERM;
-#endif
-
-	pm_notifier(system_suspend_handler, 0);
-	return 0;
-}
-core_initcall(msm_rq_stats_early_init);
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index d9d4b82..599a8d9 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -238,10 +238,17 @@
 #define BATCH_MAX_SIZE SZ_2M
 #define BATCH_MAX_SECTIONS 32
 
-int hyp_assign_table(struct sg_table *table,
+/*
+ *  When -EAGAIN is returned it is safe for the caller to try to call
+ *  __hyp_assign_table again.
+ *
+ *  When -EADDRNOTAVAIL is returned the memory may no longer be in
+ *  a usable state and should no longer be accessed by the HLOS.
+ */
+static int __hyp_assign_table(struct sg_table *table,
 			u32 *source_vm_list, int source_nelems,
 			int *dest_vmids, int *dest_perms,
-			int dest_nelems)
+			int dest_nelems, bool try_lock)
 {
 	int ret = 0;
 	struct scm_desc desc = {0};
@@ -271,10 +278,17 @@
 					  &dest_vm_copy_size);
 	if (!dest_vm_copy) {
 		ret = -ENOMEM;
-		goto out_free;
+		goto out_free_src;
 	}
 
-	mutex_lock(&secure_buffer_mutex);
+	if (try_lock) {
+		if (!mutex_trylock(&secure_buffer_mutex)) {
+			ret = -EAGAIN;
+			goto out_free_dest;
+		}
+	} else {
+		mutex_lock(&secure_buffer_mutex);
+	}
 
 	sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
 	if (!sg_table_copy) {
@@ -330,6 +344,12 @@
 		if (ret) {
 			pr_info("%s: Failed to assign memory protection, ret = %d\n",
 				__func__, ret);
+
+			/*
+			 * Make it clear to clients that the memory may no
+			 * longer be in a usable state.
+			 */
+			ret = -EADDRNOTAVAIL;
 			break;
 		}
 		batch_start = batch_end;
@@ -337,12 +357,31 @@
 
 out_unlock:
 	mutex_unlock(&secure_buffer_mutex);
+out_free_dest:
 	kfree(dest_vm_copy);
-out_free:
+out_free_src:
 	kfree(source_vm_copy);
 	return ret;
 }
 
+int hyp_assign_table(struct sg_table *table,
+			u32 *source_vm_list, int source_nelems,
+			int *dest_vmids, int *dest_perms,
+			int dest_nelems)
+{
+	return __hyp_assign_table(table, source_vm_list, source_nelems,
+				  dest_vmids, dest_perms, dest_nelems, false);
+}
+
+int try_hyp_assign_table(struct sg_table *table,
+			u32 *source_vm_list, int source_nelems,
+			int *dest_vmids, int *dest_perms,
+			int dest_nelems)
+{
+	return __hyp_assign_table(table, source_vm_list, source_nelems,
+				  dest_vmids, dest_perms, dest_nelems, true);
+}
+
 int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
 			int source_nelems, int *dest_vmids,
 			int *dest_perms, int dest_nelems)
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index b8379f1..b8585d1 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -26,6 +26,7 @@
 #include <linux/soc/qcom/smem.h>
 #include <linux/soc/qcom/smem_state.h>
 #include <linux/spinlock.h>
+#include <linux/pm_wakeup.h>
 
 #include <linux/ipc_logging.h>
 
@@ -160,6 +161,7 @@
 	struct regmap *ipc_regmap;
 	int ipc_offset;
 	int ipc_bit;
+	struct wakeup_source ws;
 
 	struct mbox_client mbox_client;
 	struct mbox_chan *mbox_chan;
@@ -297,6 +299,14 @@
 	}
 }
 
+static irqreturn_t qcom_smp2p_isr(int irq, void *data)
+{
+	struct qcom_smp2p *smp2p = data;
+
+	__pm_stay_awake(&smp2p->ws);
+	return IRQ_WAKE_THREAD;
+}
+
 /**
  * qcom_smp2p_intr() - interrupt handler for incoming notifications
  * @irq:	unused
@@ -321,7 +331,7 @@
 		if (IS_ERR(in)) {
 			dev_err(smp2p->dev,
 				"Unable to acquire remote smp2p item\n");
-			return IRQ_HANDLED;
+			goto out;
 		}
 
 		smp2p->in = in;
@@ -340,6 +350,8 @@
 			qcom_smp2p_do_ssr_ack(smp2p);
 	}
 
+out:
+	__pm_relax(&smp2p->ws);
 	return IRQ_HANDLED;
 }
 
@@ -636,12 +648,13 @@
 			list_add(&entry->node, &smp2p->outbound);
 		}
 	}
+	wakeup_source_init(&smp2p->ws, "smp2p");
 
 	/* Kick the outgoing edge after allocating entries */
 	qcom_smp2p_kick(smp2p);
 
 	ret = devm_request_threaded_irq(&pdev->dev, smp2p->irq,
-					NULL, qcom_smp2p_intr,
+					qcom_smp2p_isr, qcom_smp2p_intr,
 					IRQF_NO_SUSPEND | IRQF_ONESHOT,
 					"smp2p", (void *)smp2p);
 	if (ret) {
diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c
new file mode 100644
index 0000000..f090415
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_sleepstate.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/suspend.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/pm_wakeup.h>
+
+#define PROC_AWAKE_ID 12 /* 12th bit */
+#define AWAKE_BIT BIT(PROC_AWAKE_ID)
+static struct qcom_smem_state *state;
+static struct wakeup_source notify_ws;
+
+/**
+ * sleepstate_pm_notifier() - PM notifier callback function.
+ * @nb:		Pointer to the notifier block.
+ * @event:	Suspend state event from PM module.
+ * @unused:	Null pointer from PM module.
+ *
+ * This function is register as callback function to get notifications
+ * from the PM module on the system suspend state.
+ */
+static int sleepstate_pm_notifier(struct notifier_block *nb,
+				  unsigned long event, void *unused)
+{
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		qcom_smem_state_update_bits(state, AWAKE_BIT, 0);
+		break;
+
+	case PM_POST_SUSPEND:
+		qcom_smem_state_update_bits(state, AWAKE_BIT, AWAKE_BIT);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block sleepstate_pm_nb = {
+	.notifier_call = sleepstate_pm_notifier,
+	.priority = INT_MAX,
+};
+
+static irqreturn_t smp2p_sleepstate_handler(int irq, void *ctxt)
+{
+	__pm_wakeup_event(&notify_ws, 200);
+	return IRQ_HANDLED;
+}
+
+static int smp2p_sleepstate_probe(struct platform_device *pdev)
+{
+	int ret;
+	int irq;
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+
+	state = qcom_smem_state_get(&pdev->dev, 0, &ret);
+	if (IS_ERR(state))
+		return PTR_ERR(state);
+	qcom_smem_state_update_bits(state, AWAKE_BIT, AWAKE_BIT);
+
+	ret = register_pm_notifier(&sleepstate_pm_nb);
+	if (ret) {
+		dev_err(dev, "%s: power state notif error %d\n", __func__, ret);
+		return ret;
+	}
+	wakeup_source_init(&notify_ws, "smp2p-sleepstate");
+
+	irq = of_irq_get_byname(node, "smp2p-sleepstate-in");
+	if (irq <= 0) {
+		dev_err(dev, "failed to get irq for smp2p_sleep_state\n");
+		ret = -EPROBE_DEFER;
+		goto err;
+	}
+	dev_dbg(dev, "got smp2p-sleepstate-in irq %d\n", irq);
+	ret = devm_request_threaded_irq(dev, irq, NULL,
+					smp2p_sleepstate_handler,
+					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+					"smp2p_sleepstate", dev);
+	if (ret) {
+		dev_err(dev, "fail to register smp2p threaded_irq=%d\n", irq);
+		goto err;
+	}
+	return 0;
+err:
+	wakeup_source_trash(&notify_ws);
+	unregister_pm_notifier(&sleepstate_pm_nb);
+	return ret;
+}
+
+static const struct of_device_id smp2p_slst_match_table[] = {
+	{.compatible = "qcom,smp2p-sleepstate"},
+	{},
+};
+
+static struct platform_driver smp2p_sleepstate_driver = {
+	.probe = smp2p_sleepstate_probe,
+	.driver = {
+		.name = "smp2p_sleepstate",
+		.of_match_table = smp2p_slst_match_table,
+	},
+};
+
+static int __init smp2p_sleepstate_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&smp2p_sleepstate_driver);
+	if (ret) {
+		pr_err("%s: register failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+module_init(smp2p_sleepstate_init);
+MODULE_DESCRIPTION("SMP2P SLEEP STATE");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 7a4edd7..595f0de 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -400,7 +400,7 @@
 			goto exit_err;
 		}
 	} else {
-		pr_debug("pending data size [%zu], requested size [%zu], ch->txn_id %d\n",
+		pr_debug("pending data size [%zu], requested size [%u], ch->txn_id %d\n",
 			 ch->actual_rx_size, size, ch->txn_id);
 	}
 	if (!ch->rpmsg_rx_buf) {
@@ -895,7 +895,7 @@
 	}
 
 	if (cmd->arg > (unsigned int)INT_MAX) {
-		pr_err("int overflow [%ld]\n", cmd->arg);
+		pr_err("int overflow [%u]\n", cmd->arg);
 		return -EINVAL;
 	}
 	fd = cmd->arg;
@@ -926,7 +926,7 @@
 		if (ch->dmabuf_handle_table[i] == NULL) {
 			ch->dmabuf_handle_table[i] = dma_buf;
 			ch->dmabuf_fd_table[i] = fd;
-			pr_debug("ch [%s] locked ion buf #%d fd [%d] dma_buf=0x%x\n",
+			pr_debug("ch [%s] locked ion buf #%d fd [%d] dma_buf=0x%pK\n",
 				ch->name, i,
 				ch->dmabuf_fd_table[i],
 				ch->dmabuf_handle_table[i]);
@@ -964,7 +964,7 @@
 		return -EINVAL;
 	}
 	if (cmd->arg > (unsigned int)INT_MAX) {
-		pr_err("int overflow [%ld]\n", cmd->arg);
+		pr_err("int overflow [%u]\n", cmd->arg);
 		return -EINVAL;
 	}
 	fd = cmd->arg;
@@ -1000,7 +1000,7 @@
 			if (!ch->dmabuf_handle_table[i])
 				continue;
 			if (ch->dmabuf_handle_table[i] == dma_buf) {
-				pr_debug("ch [%s] unlocked ion buf #%d fd [%d] dma_buf=0x%x\n",
+				pr_debug("ch [%s] unlocked ion buf #%d fd [%d] dma_buf=0x%pK\n",
 					ch->name, i,
 					ch->dmabuf_fd_table[i],
 					ch->dmabuf_handle_table[i]);
@@ -1369,7 +1369,7 @@
 	ch->is_busy = false;
 	ch->pid = 0;
 	if (ch->rpmsg_rx_buf) {
-		pr_debug("ch [%s] discarting unconsumed rx packet actual_rx_size=%d\n",
+		pr_debug("ch [%s] discarting unconsumed rx packet actual_rx_size=%zd\n",
 		       name, ch->actual_rx_size);
 		kfree(ch->rpmsg_rx_buf);
 		ch->rpmsg_rx_buf = NULL;
@@ -1884,7 +1884,7 @@
 
 		if (ch->rpmsg_abort) {
 			if (ch->rpmsg_rx_buf) {
-				pr_debug("ch [%s] rx aborted free %d bytes\n",
+				pr_debug("ch [%s] rx aborted free %zd bytes\n",
 					ch->name, ch->actual_rx_size);
 				kfree(ch->rpmsg_rx_buf);
 				ch->actual_rx_size = 0;
@@ -1892,7 +1892,7 @@
 			goto rx_aborted;
 		}
 		if (ch->rpmsg_rx_buf) {
-			pr_err("ch [%s] previous buffer not consumed %d bytes\n",
+			pr_err("ch [%s] previous buffer not consumed %zd bytes\n",
 			       ch->name, ch->actual_rx_size);
 			kfree(ch->rpmsg_rx_buf);
 			ch->rpmsg_rx_buf = NULL;
diff --git a/drivers/soc/qcom/wda_qmi.c b/drivers/soc/qcom/wda_qmi.c
index 2c15cb8..4fc5c3e 100644
--- a/drivers/soc/qcom/wda_qmi.c
+++ b/drivers/soc/qcom/wda_qmi.c
@@ -1,8 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/rtnetlink.h>
 #include <linux/soc/qcom/qmi.h>
 #include <soc/qcom/rmnet_qmi.h>
 #define CREATE_TRACE_POINTS
@@ -15,13 +16,14 @@
 	struct work_struct svc_arrive;
 	struct qmi_handle handle;
 	struct sockaddr_qrtr ssctl;
+	struct svc_info svc;
 };
 
 static void wda_svc_config(struct work_struct *work);
 /* **************************************************** */
 #define WDA_SERVICE_ID_V01 0x1A
 #define WDA_SERVICE_VERS_V01 0x01
-#define WDA_TIMEOUT_MS  20
+#define WDA_TIMEOUT_JF  msecs_to_jiffies(1000)
 
 #define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01 0x002D
 #define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01 0x002D
@@ -231,7 +233,7 @@
 		goto out;
 	}
 
-	ret = qmi_txn_wait(&txn, WDA_TIMEOUT_MS);
+	ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF);
 	if (ret < 0) {
 		pr_err("%s() Response waiting failed, err: %d\n",
 			__func__, ret);
@@ -247,8 +249,7 @@
 	return ret;
 }
 
-static int wda_set_powersave_config_req(struct qmi_handle *wda_handle,
-					struct qmi_info *qmi)
+static int wda_set_powersave_config_req(struct qmi_handle *wda_handle)
 {
 	struct wda_qmi_data *data = container_of(wda_handle,
 						 struct wda_qmi_data, handle);
@@ -275,8 +276,8 @@
 		goto out;
 	}
 
-	req->ep_id.ep_type = qmi->fc_info[0].svc.ep_type;
-	req->ep_id.iface_id = qmi->fc_info[0].svc.iface_id;
+	req->ep_id.ep_type = data->svc.ep_type;
+	req->ep_id.iface_id = data->svc.iface_id;
 	req->req_data_cfg_valid = 1;
 	req->req_data_cfg = WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01;
 	ret = qmi_send_request(wda_handle, &data->ssctl, &txn,
@@ -289,7 +290,7 @@
 		goto out;
 	}
 
-	ret = qmi_txn_wait(&txn, WDA_TIMEOUT_MS);
+	ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF);
 	if (ret < 0) {
 		pr_err("%s() Response waiting failed, err: %d\n",
 			__func__, ret);
@@ -310,28 +311,30 @@
 	struct wda_qmi_data *data = container_of(work, struct wda_qmi_data,
 						 svc_arrive);
 	struct qmi_info *qmi;
+	int rc;
 
-	qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
-	if (!qmi)
-		goto clean_out;
-
-	if (wda_set_powersave_config_req(&data->handle, qmi) < 0) {
-		pr_err("%s() failed, qmi handle pt: %p\n",
-			__func__, &data->handle);
-		goto clean_out;
+	rc = wda_set_powersave_config_req(&data->handle);
+	if (rc < 0) {
+		pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
+		return;
 	}
 
-	trace_wda_client_state_up(qmi->fc_info[0].svc.instance,
-				  qmi->fc_info[0].svc.ep_type,
-				  qmi->fc_info[0].svc.iface_id);
-	qmi->wda_client = (void *)data;
-	pr_info("Connection established with the WDA Service\n");
-	return;
+	rtnl_lock();
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
+	if (!qmi) {
+		rtnl_unlock();
+		return;
+	}
 
-clean_out:
-	qmi_handle_release(&data->handle);
-	destroy_workqueue(data->wda_wq);
-	kfree(data);
+	qmi->wda_pending = NULL;
+	qmi->wda_client = (void *)data;
+	trace_wda_client_state_up(data->svc.instance,
+				  data->svc.ep_type,
+				  data->svc.iface_id);
+
+	rtnl_unlock();
+
+	pr_info("Connection established with the WDA Service\n");
 }
 
 static int wda_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
@@ -362,16 +365,15 @@
 	.del_server = wda_svc_exit,
 };
 
-int wda_qmi_client_init(void *port, uint32_t instance)
+int
+wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
 {
 	struct wda_qmi_data *data;
-	int rc = 0;
+	int rc = -ENOMEM;
 
-	if (!port)
+	if (!port || !qmi)
 		return -EINVAL;
 
-	pr_info("%s\n", __func__);
-
 	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
@@ -379,11 +381,11 @@
 	data->wda_wq = create_singlethread_workqueue("wda_wq");
 	if (!data->wda_wq) {
 		pr_err("%s Could not create workqueue\n", __func__);
-		kfree(data);
-		return -ENOMEM;
+		goto err0;
 	}
 
 	data->rmnet_port = port;
+	memcpy(&data->svc, psvc, sizeof(data->svc));
 	INIT_WORK(&data->svc_arrive, wda_svc_config);
 
 	rc = qmi_handle_init(&data->handle,
@@ -391,19 +393,25 @@
 			     &server_ops, NULL);
 	if (rc < 0) {
 		pr_err("%s: Failed qmi_handle_init, err: %d\n", __func__, rc);
-		kfree(data);
-		return rc;
+		goto err1;
 	}
 
 	rc = qmi_add_lookup(&data->handle, WDA_SERVICE_ID_V01,
-			    WDA_SERVICE_VERS_V01, instance);
+			    WDA_SERVICE_VERS_V01, psvc->instance);
 	if (rc < 0) {
 		pr_err("%s(): Failed qmi_add_lookup, err: %d\n", __func__, rc);
-		qmi_handle_release(&data->handle);
-		destroy_workqueue(data->wda_wq);
-		kfree(data);
+		goto err2;
 	}
 
+	qmi->wda_pending = (void *)data;
+	return 0;
+
+err2:
+	qmi_handle_release(&data->handle);
+err1:
+	destroy_workqueue(data->wda_wq);
+err0:
+	kfree(data);
 	return rc;
 }
 
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 36435f7..ffe21e8 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -188,7 +188,7 @@
 
 	res_freq = (sclk_freq / (*clk_div));
 
-	dev_dbg(mas->dev, "%s: req %u resultant %u sclk %lu, idx %d, div %d\n",
+	dev_dbg(mas->dev, "%s: req %u resultant %lu sclk %lu, idx %d, div %d\n",
 		__func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div);
 
 	ret = clk_set_rate(rsc->se_clk, sclk_freq);
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index fdcf307..4b77fa1 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -15,6 +15,7 @@
 
 #include <linux/clk.h>
 #include <linux/dmaengine.h>
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/pinctrl/consumer.h>
diff --git a/drivers/spmi/spmi-pmic-arb-debug.c b/drivers/spmi/spmi-pmic-arb-debug.c
index 83ef9ed..31f72ea 100644
--- a/drivers/spmi/spmi-pmic-arb-debug.c
+++ b/drivers/spmi/spmi-pmic-arb-debug.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. */
 
 #include <linux/delay.h>
 #include <linux/err.h>
@@ -328,17 +328,7 @@
 	},
 };
 
-int __init spmi_pmic_arb_debug_init(void)
-{
-	return platform_driver_register(&spmi_pmic_arb_debug_driver);
-}
-arch_initcall(spmi_pmic_arb_debug_init);
-
-static void __exit spmi_pmic_arb_debug_exit(void)
-{
-	platform_driver_unregister(&spmi_pmic_arb_debug_driver);
-}
-module_exit(spmi_pmic_arb_debug_exit);
+module_platform_driver(spmi_pmic_arb_debug_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:spmi_pmic_arb_debug");
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index a880b5c..be81533 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -75,6 +75,9 @@
 /* LRU list of unpinned pages, protected by ashmem_mutex */
 static LIST_HEAD(ashmem_lru_list);
 
+static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
+
 /*
  * long lru_count - The count of pages on our LRU list.
  *
@@ -168,19 +171,15 @@
  * @end:	   The ending page (inclusive)
  *
  * This function is protected by ashmem_mutex.
- *
- * Return: 0 if successful, or -ENOMEM if there is an error
  */
-static int range_alloc(struct ashmem_area *asma,
-		       struct ashmem_range *prev_range, unsigned int purged,
-		       size_t start, size_t end)
+static void range_alloc(struct ashmem_area *asma,
+			struct ashmem_range *prev_range, unsigned int purged,
+			size_t start, size_t end,
+			struct ashmem_range **new_range)
 {
-	struct ashmem_range *range;
+	struct ashmem_range *range = *new_range;
 
-	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
-	if (!range)
-		return -ENOMEM;
-
+	*new_range = NULL;
 	range->asma = asma;
 	range->pgstart = start;
 	range->pgend = end;
@@ -190,8 +189,6 @@
 
 	if (range_on_lru(range))
 		lru_add(range);
-
-	return 0;
 }
 
 /**
@@ -438,7 +435,6 @@
 static unsigned long
 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-	struct ashmem_range *range, *next;
 	unsigned long freed = 0;
 
 	/* We might recurse into filesystem code, so bail out if necessary */
@@ -448,21 +444,33 @@
 	if (!mutex_trylock(&ashmem_mutex))
 		return -1;
 
-	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+	while (!list_empty(&ashmem_lru_list)) {
+		struct ashmem_range *range =
+			list_first_entry(&ashmem_lru_list, typeof(*range), lru);
 		loff_t start = range->pgstart * PAGE_SIZE;
 		loff_t end = (range->pgend + 1) * PAGE_SIZE;
+		struct file *f = range->asma->file;
 
-		range->asma->file->f_op->fallocate(range->asma->file,
-				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
-				start, end - start);
+		get_file(f);
+		atomic_inc(&ashmem_shrink_inflight);
 		range->purged = ASHMEM_WAS_PURGED;
 		lru_del(range);
 
 		freed += range_size(range);
+		mutex_unlock(&ashmem_mutex);
+		f->f_op->fallocate(f,
+				   FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+				   start, end - start);
+		fput(f);
+		if (atomic_dec_and_test(&ashmem_shrink_inflight))
+			wake_up_all(&ashmem_shrink_wait);
+		if (!mutex_trylock(&ashmem_mutex))
+			goto out;
 		if (--sc->nr_to_scan <= 0)
 			break;
 	}
 	mutex_unlock(&ashmem_mutex);
+out:
 	return freed;
 }
 
@@ -582,7 +590,8 @@
  *
  * Caller must hold ashmem_mutex.
  */
-static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
+		      struct ashmem_range **new_range)
 {
 	struct ashmem_range *range, *next;
 	int ret = ASHMEM_NOT_PURGED;
@@ -635,7 +644,7 @@
 			 * second half and adjust the first chunk's endpoint.
 			 */
 			range_alloc(asma, range, range->purged,
-				    pgend + 1, range->pgend);
+				    pgend + 1, range->pgend, new_range);
 			range_shrink(range, range->pgstart, pgstart - 1);
 			break;
 		}
@@ -649,7 +658,8 @@
  *
  * Caller must hold ashmem_mutex.
  */
-static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
+			struct ashmem_range **new_range)
 {
 	struct ashmem_range *range, *next;
 	unsigned int purged = ASHMEM_NOT_PURGED;
@@ -675,7 +685,8 @@
 		}
 	}
 
-	return range_alloc(asma, range, purged, pgstart, pgend);
+	range_alloc(asma, range, purged, pgstart, pgend, new_range);
+	return 0;
 }
 
 /*
@@ -708,11 +719,19 @@
 	struct ashmem_pin pin;
 	size_t pgstart, pgend;
 	int ret = -EINVAL;
+	struct ashmem_range *range = NULL;
 
 	if (copy_from_user(&pin, p, sizeof(pin)))
 		return -EFAULT;
 
+	if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
+		range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+		if (!range)
+			return -ENOMEM;
+	}
+
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	if (!asma->file)
 		goto out_unlock;
@@ -735,10 +754,10 @@
 
 	switch (cmd) {
 	case ASHMEM_PIN:
-		ret = ashmem_pin(asma, pgstart, pgend);
+		ret = ashmem_pin(asma, pgstart, pgend, &range);
 		break;
 	case ASHMEM_UNPIN:
-		ret = ashmem_unpin(asma, pgstart, pgend);
+		ret = ashmem_unpin(asma, pgstart, pgend, &range);
 		break;
 	case ASHMEM_GET_PIN_STATUS:
 		ret = ashmem_get_pin_status(asma, pgstart, pgend);
@@ -747,6 +766,8 @@
 
 out_unlock:
 	mutex_unlock(&ashmem_mutex);
+	if (range)
+		kmem_cache_free(ashmem_range_cachep, range);
 
 	return ret;
 }
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index f116a64..5a53ae0 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -172,8 +172,7 @@
 void ion_buffer_destroy(struct ion_buffer *buffer)
 {
 	if (buffer->kmap_cnt > 0) {
-		pr_warn_once("%s: buffer still mapped in the kernel\n",
-			     __func__);
+		pr_warn_ratelimited("ION client likely missing a call to dma_buf_kunmap or dma_buf_vunmap\n");
 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
 	}
 	buffer->heap->ops->free(buffer);
@@ -220,7 +219,7 @@
 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
 {
 	if (buffer->kmap_cnt == 0) {
-		pr_warn_ratelimited("Call dma_buf_begin_cpu_access before dma_buf_end_cpu_access, pid:%d\n",
+		pr_warn_ratelimited("ION client likely missing a call to dma_buf_kmap or dma_buf_vmap, pid:%d\n",
 				    current->pid);
 		return;
 	}
@@ -310,9 +309,9 @@
 	struct ion_buffer *buffer = dmabuf->priv;
 
 	mutex_lock(&buffer->lock);
-	free_duped_table(a->table);
 	list_del(&a->list);
 	mutex_unlock(&buffer->lock);
+	free_duped_table(a->table);
 
 	kfree(a);
 }
@@ -495,31 +494,59 @@
 	struct ion_buffer *buffer = dmabuf->priv;
 
 	_ion_buffer_destroy(buffer);
-}
-
-static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
-{
-	struct ion_buffer *buffer = dmabuf->priv;
-
-	WARN(!buffer->vaddr, "Call dma_buf_begin_cpu_access before dma_buf_kmap\n");
-	return buffer->vaddr + offset * PAGE_SIZE;
-}
-
-static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
-			       void *ptr)
-{
+	kfree(dmabuf->exp_name);
 }
 
 static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
 {
 	struct ion_buffer *buffer = dmabuf->priv;
+	void *vaddr = ERR_PTR(-EINVAL);
 
-	WARN(!buffer->vaddr, "Call dma_buf_begin_cpu_access before dma_buf_vmap\n");
-	return buffer->vaddr;
+	if (buffer->heap->ops->map_kernel) {
+		mutex_lock(&buffer->lock);
+		vaddr = ion_buffer_kmap_get(buffer);
+		mutex_unlock(&buffer->lock);
+	} else {
+		pr_warn_ratelimited("heap %s doesn't support map_kernel\n",
+				    buffer->heap->name);
+	}
+
+	return vaddr;
 }
 
 static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
 {
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	if (buffer->heap->ops->map_kernel) {
+		mutex_lock(&buffer->lock);
+		ion_buffer_kmap_put(buffer);
+		mutex_unlock(&buffer->lock);
+	}
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+	/*
+	 * TODO: Once clients remove their hacks where they assume kmap(ed)
+	 * addresses are virtually contiguous implement this properly
+	 */
+	void *vaddr = ion_dma_buf_vmap(dmabuf);
+
+	if (IS_ERR(vaddr))
+		return vaddr;
+
+	return vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+			       void *ptr)
+{
+	/*
+	 * TODO: Once clients remove their hacks where they assume kmap(ed)
+	 * addresses are virtually contiguous implement this properly
+	 */
+	ion_dma_buf_vunmap(dmabuf, ptr);
 }
 
 static int ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl,
@@ -604,7 +631,6 @@
 					  bool sync_only_mapped)
 {
 	struct ion_buffer *buffer = dmabuf->priv;
-	void *vaddr;
 	struct ion_dma_buf_attachment *a;
 	int ret = 0;
 
@@ -617,19 +643,6 @@
 		goto out;
 	}
 
-	/*
-	 * TODO: Move this elsewhere because we don't always need a vaddr
-	 */
-	if (buffer->heap->ops->map_kernel) {
-		mutex_lock(&buffer->lock);
-		vaddr = ion_buffer_kmap_get(buffer);
-		if (IS_ERR(vaddr)) {
-			ret = PTR_ERR(vaddr);
-			goto unlock;
-		}
-		mutex_unlock(&buffer->lock);
-	}
-
 	if (!(buffer->flags & ION_FLAG_CACHED)) {
 		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
 						    true, direction,
@@ -701,8 +714,6 @@
 		}
 
 	}
-
-unlock:
 	mutex_unlock(&buffer->lock);
 out:
 	return ret;
@@ -725,12 +736,6 @@
 		goto out;
 	}
 
-	if (buffer->heap->ops->map_kernel) {
-		mutex_lock(&buffer->lock);
-		ion_buffer_kmap_put(buffer);
-		mutex_unlock(&buffer->lock);
-	}
-
 	if (!(buffer->flags & ION_FLAG_CACHED)) {
 		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
 						  true, direction,
@@ -833,7 +838,6 @@
 						unsigned int len)
 {
 	struct ion_buffer *buffer = dmabuf->priv;
-	void *vaddr;
 	struct ion_dma_buf_attachment *a;
 	int ret = 0;
 
@@ -846,15 +850,6 @@
 		goto out;
 	}
 
-	/*
-	 * TODO: Move this elsewhere because we don't always need a vaddr
-	 */
-	if (buffer->heap->ops->map_kernel) {
-		mutex_lock(&buffer->lock);
-		vaddr = ion_buffer_kmap_get(buffer);
-		mutex_unlock(&buffer->lock);
-	}
-
 	if (!(buffer->flags & ION_FLAG_CACHED)) {
 		trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
 						    true, dir,
@@ -934,12 +929,6 @@
 		goto out;
 	}
 
-	if (buffer->heap->ops->map_kernel) {
-		mutex_lock(&buffer->lock);
-		ion_buffer_kmap_put(buffer);
-		mutex_unlock(&buffer->lock);
-	}
-
 	if (!(buffer->flags & ION_FLAG_CACHED)) {
 		trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
 						  true, direction,
@@ -1038,6 +1027,7 @@
 	struct ion_heap *heap;
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 	struct dma_buf *dmabuf;
+	char task_comm[TASK_COMM_LEN];
 
 	pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
 		 len, heap_id_mask, flags);
@@ -1069,14 +1059,20 @@
 	if (IS_ERR(buffer))
 		return ERR_CAST(buffer);
 
+	get_task_comm(task_comm, current->group_leader);
+
 	exp_info.ops = &dma_buf_ops;
 	exp_info.size = buffer->size;
 	exp_info.flags = O_RDWR;
 	exp_info.priv = buffer;
+	exp_info.exp_name = kasprintf(GFP_KERNEL, "%s-%s-%d-%s", KBUILD_MODNAME,
+				      heap->name, current->tgid, task_comm);
 
 	dmabuf = dma_buf_export(&exp_info);
-	if (IS_ERR(dmabuf))
+	if (IS_ERR(dmabuf)) {
 		_ion_buffer_destroy(buffer);
+		kfree(exp_info.exp_name);
+	}
 
 	return dmabuf;
 }
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index a9aed00..8b29a76 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -3,7 +3,7 @@
  * drivers/staging/android/ion/ion.h
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  *
  */
 
@@ -30,6 +30,7 @@
 #define ION_MM_HEAP_NAME	"mm"
 #define ION_SPSS_HEAP_NAME	"spss"
 #define ION_SECURE_CARVEOUT_HEAP_NAME	"secure_carveout"
+#define ION_USER_CONTIG_HEAP_NAME	"user_contig"
 #define ION_QSECOM_HEAP_NAME	"qsecom"
 #define ION_QSECOM_TA_HEAP_NAME	"qsecom_ta"
 #define ION_SECURE_HEAP_NAME	"secure_heap"
diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c
index 5a18b27..8e28ba0 100644
--- a/drivers/staging/android/ion/ion_cma_secure_heap.c
+++ b/drivers/staging/android/ion/ion_cma_secure_heap.c
@@ -343,8 +343,8 @@
 	kfree(chunk);
 }
 
-static void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap,
-					 int max_nr)
+static unsigned long
+__ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
 {
 	struct list_head *entry, *_n;
 	unsigned long drained_size = 0, skipped_size = 0;
@@ -368,6 +368,7 @@
 	}
 
 	trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
+	return drained_size;
 }
 
 int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
@@ -385,6 +386,7 @@
 static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
 					     struct shrink_control *sc)
 {
+	unsigned long freed;
 	struct ion_cma_secure_heap *sheap = container_of(shrinker,
 					struct ion_cma_secure_heap, shrinker);
 	int nr_to_scan = sc->nr_to_scan;
@@ -397,11 +399,11 @@
 	if (!mutex_trylock(&sheap->chunk_lock))
 		return -EAGAIN;
 
-	__ion_secure_cma_shrink_pool(sheap, nr_to_scan);
+	freed = __ion_secure_cma_shrink_pool(sheap, nr_to_scan);
 
 	mutex_unlock(&sheap->chunk_lock);
 
-	return atomic_read(&sheap->total_pool_size);
+	return freed;
 }
 
 static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker,
diff --git a/drivers/staging/android/ion/ion_secure_util.c b/drivers/staging/android/ion/ion_secure_util.c
index df88427..1c1d4dd 100644
--- a/drivers/staging/android/ion/ion_secure_util.c
+++ b/drivers/staging/android/ion/ion_secure_util.c
@@ -89,7 +89,8 @@
 }
 
 int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
-			int source_nelems, bool clear_page_private)
+			int source_nelems, bool clear_page_private,
+			bool try_lock)
 {
 	u32 dest_vmid = VMID_HLOS;
 	u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
@@ -103,11 +104,16 @@
 		goto out;
 	}
 
-	ret = hyp_assign_table(sgt, source_vm_list, source_nelems,
-			       &dest_vmid, &dest_perms, 1);
+	if (try_lock)
+		ret = try_hyp_assign_table(sgt, source_vm_list, source_nelems,
+					   &dest_vmid, &dest_perms, 1);
+	else
+		ret = hyp_assign_table(sgt, source_vm_list, source_nelems,
+				       &dest_vmid, &dest_perms, 1);
 	if (ret) {
-		pr_err("%s: Unassign call failed.\n",
-		       __func__);
+		if (!try_lock)
+			pr_err("%s: Unassign call failed.\n",
+			       __func__);
 		goto out;
 	}
 	if (clear_page_private)
@@ -183,7 +189,7 @@
 	}
 
 	ret = ion_hyp_unassign_sg(sgt, source_vm_list, source_nelems,
-				  set_page_private);
+				  set_page_private, false);
 
 out_free_source:
 	kfree(source_vm_list);
diff --git a/drivers/staging/android/ion/ion_secure_util.h b/drivers/staging/android/ion/ion_secure_util.h
index 6267342..bd525e5 100644
--- a/drivers/staging/android/ion/ion_secure_util.h
+++ b/drivers/staging/android/ion/ion_secure_util.h
@@ -13,7 +13,8 @@
 int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list,
 		      int dest_nelems, bool set_page_private);
 int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
-			int source_nelems, bool clear_page_private);
+			int source_nelems, bool clear_page_private,
+			bool try_lock);
 int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
 				   bool set_page_private);
 int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index aa781f5..35355e5 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -3,7 +3,7 @@
  * drivers/staging/android/ion/ion_system_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  *
  */
 
@@ -158,6 +158,9 @@
 	struct page_info *info;
 	int i;
 
+	if (buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)
+		goto force_alloc;
+
 	info = kmalloc(sizeof(*info), GFP_KERNEL);
 	if (!info)
 		return ERR_PTR(-ENOMEM);
@@ -189,6 +192,7 @@
 	}
 
 	kfree(info);
+force_alloc:
 	return alloc_largest_available(heap, buffer, size, max_order);
 }
 
@@ -325,8 +329,10 @@
 		goto err;
 
 	table = kzalloc(sizeof(*table), GFP_KERNEL);
-	if (!table)
+	if (!table) {
+		ret = -ENOMEM;
 		goto err_free_data_pages;
+	}
 
 	ret = sg_alloc_table(table, i, GFP_KERNEL);
 	if (ret)
@@ -388,7 +394,7 @@
 	buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
 
 	if (vmid > 0)
-		ion_hyp_unassign_sg(table, &vmid, 1, true);
+		ion_hyp_unassign_sg(table, &vmid, 1, true, false);
 
 	for_each_sg(table->sgl, sg, table->nents, i)
 		free_buffer_page(sys_heap, buffer, sg_page(sg),
@@ -429,7 +435,7 @@
 		if (vmid < 0)
 			ion_heap_buffer_zero(buffer);
 	} else if (vmid > 0) {
-		if (ion_hyp_unassign_sg(table, &vmid, 1, true))
+		if (ion_hyp_unassign_sg(table, &vmid, 1, true, false))
 			return;
 	}
 
@@ -613,6 +619,7 @@
 					bool cached)
 {
 	int i;
+
 	for (i = 0; i < NUM_ORDERS; i++) {
 		struct ion_page_pool *pool;
 		gfp_t gfp_flags = low_order_gfp_flags;
diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c
index 53fcd55..f0d8d72 100644
--- a/drivers/staging/android/ion/ion_system_secure_heap.c
+++ b/drivers/staging/android/ion/ion_system_secure_heap.c
@@ -149,7 +149,8 @@
 	return total << PAGE_SHIFT;
 }
 
-static void process_one_shrink(struct ion_heap *sys_heap,
+static void process_one_shrink(struct ion_system_secure_heap *secure_heap,
+			       struct ion_heap *sys_heap,
 			       struct prefetch_info *info)
 {
 	struct ion_buffer buffer;
@@ -157,7 +158,7 @@
 	int ret;
 
 	memset(&buffer, 0, sizeof(struct ion_buffer));
-	buffer.heap = sys_heap;
+	buffer.heap = &secure_heap->heap;
 	buffer.flags = info->vmid;
 
 	pool_size = ion_system_secure_heap_page_pool_total(sys_heap,
@@ -171,6 +172,7 @@
 	}
 
 	buffer.private_flags = ION_PRIV_FLAG_SHRINKER_FREE;
+	buffer.heap = sys_heap;
 	sys_heap->ops->free(&buffer);
 }
 
@@ -190,7 +192,7 @@
 		spin_unlock_irqrestore(&secure_heap->work_lock, flags);
 
 		if (info->shrink)
-			process_one_shrink(sys_heap, info);
+			process_one_shrink(secure_heap, sys_heap, info);
 		else
 			process_one_prefetch(sys_heap, info);
 
@@ -205,7 +207,7 @@
 			       struct list_head *items)
 {
 	struct prefetch_info *info;
-	u64 __user *user_sizes;
+	u64 user_sizes;
 	int err;
 	unsigned int nr_sizes, vmid, i;
 
@@ -226,7 +228,7 @@
 		if (!info)
 			return -ENOMEM;
 
-		err = get_user(info->size, &user_sizes[i]);
+		err = get_user(info->size, ((u64 __user *)user_sizes + i));
 		if (err)
 			goto out_free;
 
@@ -260,7 +262,10 @@
 		return -EINVAL;
 
 	for (i = 0; i < data->nr_regions; i++) {
-		ret = alloc_prefetch_info(&data->regions[i], shrink, &items);
+		struct ion_prefetch_regions *r;
+
+		r = (struct ion_prefetch_regions *)data->regions + i;
+		ret = alloc_prefetch_info(r, shrink, &items);
 		if (ret)
 			goto out_free;
 	}
@@ -270,9 +275,9 @@
 		spin_unlock_irqrestore(&secure_heap->work_lock, flags);
 		goto out_free;
 	}
-	list_splice_init(&items, &secure_heap->prefetch_list);
-	schedule_delayed_work(&secure_heap->prefetch_work,
-			      shrink ? msecs_to_jiffies(SHRINK_DELAY) : 0);
+	list_splice_tail_init(&items, &secure_heap->prefetch_list);
+	queue_delayed_work(system_unbound_wq, &secure_heap->prefetch_work,
+			   shrink ?  msecs_to_jiffies(SHRINK_DELAY) : 0);
 	spin_unlock_irqrestore(&secure_heap->work_lock, flags);
 
 	return 0;
@@ -449,7 +454,10 @@
 		sg = sg_next(sg);
 	}
 
-	if (ion_hyp_unassign_sg(&sgt, &vmid, 1, true))
+	ret = ion_hyp_unassign_sg(&sgt, &vmid, 1, true, true);
+	if (ret == -EADDRNOTAVAIL)
+		goto out3;
+	else if (ret < 0)
 		goto out2;
 
 	list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -460,6 +468,8 @@
 	sg_free_table(&sgt);
 	return freed;
 
+out2:
+	sg_free_table(&sgt);
 out1:
 	/* Restore pages to secure pool */
 	list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -467,7 +477,7 @@
 		ion_page_pool_free(pool, page);
 	}
 	return 0;
-out2:
+out3:
 	/*
 	 * The security state of the pages is unknown after a failure;
 	 * They can neither be added back to the secure pool nor buddy system.
diff --git a/drivers/staging/android/ion/msm/msm_ion_of.c b/drivers/staging/android/ion/msm/msm_ion_of.c
index 4c313b9..a1dc3f8 100644
--- a/drivers/staging/android/ion/msm/msm_ion_of.c
+++ b/drivers/staging/android/ion/msm/msm_ion_of.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/err.h>
@@ -41,6 +41,10 @@
 		.name	= ION_MM_HEAP_NAME,
 	},
 	{
+		.id	= ION_USER_CONTIG_HEAP_ID,
+		.name	= ION_USER_CONTIG_HEAP_NAME,
+	},
+	{
 		.id	= ION_QSECOM_HEAP_ID,
 		.name	= ION_QSECOM_HEAP_NAME,
 	},
@@ -161,6 +165,10 @@
 				base = cma_get_base(dev->cma_area);
 				size = cma_get_size(dev->cma_area);
 				ret = 0;
+			} else if (dev->dma_mem) {
+				base = dma_get_device_base(dev, dev->dma_mem);
+				size = dma_get_size(dev->dma_mem);
+				ret = 0;
 			}
 		} else {
 			base = of_translate_address(pnode, basep);
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index f6016c7..860ec69 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -53,6 +53,7 @@
 #define ION_QSECOM_TA_HEAP_ID		19
 #define ION_AUDIO_HEAP_ID		28
 #define ION_CAMERA_HEAP_ID		20
+#define ION_USER_CONTIG_HEAP_ID		26
 /**
  * Flags to be used when allocating from the secure heap for
  * content protection
@@ -100,15 +101,15 @@
 #define ION_IOC_MSM_MAGIC 'M'
 
 struct ion_prefetch_regions {
+	__u64 sizes;
 	__u32 vmid;
-	__u64 __user *sizes;
 	__u32 nr_sizes;
 };
 
 struct ion_prefetch_data {
-	__u32 heap_id;
 	__u64 len;
-	struct ion_prefetch_regions __user *regions;
+	__u64 regions;
+	__u32 heap_id;
 	__u32 nr_regions;
 };
 
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index e521ed9..35bd4d2 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -602,6 +602,7 @@
 	case NI_660X_PFI_OUTPUT_DIO:
 		if (chan > 31)
 			return -EINVAL;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
index 9e7815f..7448744 100644
--- a/drivers/staging/erofs/inode.c
+++ b/drivers/staging/erofs/inode.c
@@ -184,16 +184,16 @@
 		/* setup the new inode */
 		if (S_ISREG(inode->i_mode)) {
 #ifdef CONFIG_EROFS_FS_XATTR
-			if (vi->xattr_isize)
-				inode->i_op = &erofs_generic_xattr_iops;
+			inode->i_op = &erofs_generic_xattr_iops;
 #endif
 			inode->i_fop = &generic_ro_fops;
 		} else if (S_ISDIR(inode->i_mode)) {
 			inode->i_op =
 #ifdef CONFIG_EROFS_FS_XATTR
-				vi->xattr_isize ? &erofs_dir_xattr_iops :
-#endif
+				&erofs_dir_xattr_iops;
+#else
 				&erofs_dir_iops;
+#endif
 			inode->i_fop = &erofs_dir_fops;
 		} else if (S_ISLNK(inode->i_mode)) {
 			/* by default, page_get_link is used for symlink */
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index 9f44ed8..58d8cbc 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -260,6 +260,7 @@
 }
 
 #define __erofs_workgroup_get(grp)	atomic_inc(&(grp)->refcount)
+#define __erofs_workgroup_put(grp)	atomic_dec(&(grp)->refcount)
 
 extern int erofs_workgroup_put(struct erofs_workgroup *grp);
 
@@ -327,12 +328,17 @@
 	return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
 }
 
-#define inode_set_inited_xattr(inode)   (EROFS_V(inode)->flags |= 1)
-#define inode_has_inited_xattr(inode)   (EROFS_V(inode)->flags & 1)
+/* atomic flag definitions */
+#define EROFS_V_EA_INITED_BIT	0
+
+/* bitlock definitions (arranged in reverse order) */
+#define EROFS_V_BL_XATTR_BIT	(BITS_PER_LONG - 1)
 
 struct erofs_vnode {
 	erofs_nid_t nid;
-	unsigned int flags;
+
+	/* atomic flags (including bitlocks) */
+	unsigned long flags;
 
 	unsigned char data_mapping_mode;
 	/* inline size in bytes */
@@ -485,8 +491,9 @@
 };
 
 
-static inline struct page *erofs_get_inline_page(struct inode *inode,
-	erofs_blk_t blkaddr)
+static inline struct page *
+erofs_get_inline_page(struct inode *inode,
+		      erofs_blk_t blkaddr)
 {
 	return erofs_get_meta_page(inode->i_sb,
 		blkaddr, S_ISDIR(inode->i_mode));
diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
index 546a471..023f64f 100644
--- a/drivers/staging/erofs/namei.c
+++ b/drivers/staging/erofs/namei.c
@@ -15,74 +15,77 @@
 
 #include <trace/events/erofs.h>
 
-/* based on the value of qn->len is accurate */
-static inline int dirnamecmp(struct qstr *qn,
-	struct qstr *qd, unsigned *matched)
+struct erofs_qstr {
+	const unsigned char *name;
+	const unsigned char *end;
+};
+
+/* based on the end of qn is accurate and it must have the trailing '\0' */
+static inline int dirnamecmp(const struct erofs_qstr *qn,
+			     const struct erofs_qstr *qd,
+			     unsigned int *matched)
 {
-	unsigned i = *matched, len = min(qn->len, qd->len);
-loop:
-	if (unlikely(i >= len)) {
-		*matched = i;
-		if (qn->len < qd->len) {
-			/*
-			 * actually (qn->len == qd->len)
-			 * when qd->name[i] == '\0'
-			 */
-			return qd->name[i] == '\0' ? 0 : -1;
+	unsigned int i = *matched;
+
+	/*
+	 * on-disk error, let's only BUG_ON in the debugging mode.
+	 * otherwise, it will return 1 to just skip the invalid name
+	 * and go on (in consideration of the lookup performance).
+	 */
+	DBG_BUGON(qd->name > qd->end);
+
+	/* qd could not have trailing '\0' */
+	/* However it is absolutely safe if < qd->end */
+	while (qd->name + i < qd->end && qd->name[i] != '\0') {
+		if (qn->name[i] != qd->name[i]) {
+			*matched = i;
+			return qn->name[i] > qd->name[i] ? 1 : -1;
 		}
-		return (qn->len > qd->len);
+		++i;
 	}
-
-	if (qn->name[i] != qd->name[i]) {
-		*matched = i;
-		return qn->name[i] > qd->name[i] ? 1 : -1;
-	}
-
-	++i;
-	goto loop;
+	*matched = i;
+	/* See comments in __d_alloc on the terminating NUL character */
+	return qn->name[i] == '\0' ? 0 : 1;
 }
 
-static struct erofs_dirent *find_target_dirent(
-	struct qstr *name,
-	u8 *data, int maxsize)
+#define nameoff_from_disk(off, sz)	(le16_to_cpu(off) & ((sz) - 1))
+
+static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
+					       u8 *data,
+					       unsigned int dirblksize,
+					       const int ndirents)
 {
-	unsigned ndirents, head, back;
-	unsigned startprfx, endprfx;
+	int head, back;
+	unsigned int startprfx, endprfx;
 	struct erofs_dirent *const de = (struct erofs_dirent *)data;
 
-	/* make sure that maxsize is valid */
-	BUG_ON(maxsize < sizeof(struct erofs_dirent));
-
-	ndirents = le16_to_cpu(de->nameoff) / sizeof(*de);
-
-	/* corrupted dir (may be unnecessary...) */
-	BUG_ON(!ndirents);
-
-	head = 0;
+	/* since the 1st dirent has been evaluated previously */
+	head = 1;
 	back = ndirents - 1;
 	startprfx = endprfx = 0;
 
 	while (head <= back) {
-		unsigned mid = head + (back - head) / 2;
-		unsigned nameoff = le16_to_cpu(de[mid].nameoff);
-		unsigned matched = min(startprfx, endprfx);
-
-		struct qstr dname = QSTR_INIT(data + nameoff,
-			unlikely(mid >= ndirents - 1) ?
-				maxsize - nameoff :
-				le16_to_cpu(de[mid + 1].nameoff) - nameoff);
+		const int mid = head + (back - head) / 2;
+		const int nameoff = nameoff_from_disk(de[mid].nameoff,
+						      dirblksize);
+		unsigned int matched = min(startprfx, endprfx);
+		struct erofs_qstr dname = {
+			.name = data + nameoff,
+			.end = unlikely(mid >= ndirents - 1) ?
+				data + dirblksize :
+				data + nameoff_from_disk(de[mid + 1].nameoff,
+							 dirblksize)
+		};
 
 		/* string comparison without already matched prefix */
 		int ret = dirnamecmp(name, &dname, &matched);
 
-		if (unlikely(!ret))
+		if (unlikely(!ret)) {
 			return de + mid;
-		else if (ret > 0) {
+		} else if (ret > 0) {
 			head = mid + 1;
 			startprfx = matched;
-		} else if (unlikely(mid < 1))	/* fix "mid" overflow */
-			break;
-		else {
+		} else {
 			back = mid - 1;
 			endprfx = matched;
 		}
@@ -91,12 +94,12 @@
 	return ERR_PTR(-ENOENT);
 }
 
-static struct page *find_target_block_classic(
-	struct inode *dir,
-	struct qstr *name, int *_diff)
+static struct page *find_target_block_classic(struct inode *dir,
+					      struct erofs_qstr *name,
+					      int *_ndirents)
 {
-	unsigned startprfx, endprfx;
-	unsigned head, back;
+	unsigned int startprfx, endprfx;
+	int head, back;
 	struct address_space *const mapping = dir->i_mapping;
 	struct page *candidate = ERR_PTR(-ENOENT);
 
@@ -105,41 +108,43 @@
 	back = inode_datablocks(dir) - 1;
 
 	while (head <= back) {
-		unsigned mid = head + (back - head) / 2;
+		const int mid = head + (back - head) / 2;
 		struct page *page = read_mapping_page(mapping, mid, NULL);
 
-		if (IS_ERR(page)) {
-exact_out:
-			if (!IS_ERR(candidate)) /* valid candidate */
-				put_page(candidate);
-			return page;
-		} else {
-			int diff;
-			unsigned ndirents, matched;
-			struct qstr dname;
+		if (!IS_ERR(page)) {
 			struct erofs_dirent *de = kmap_atomic(page);
-			unsigned nameoff = le16_to_cpu(de->nameoff);
+			const int nameoff = nameoff_from_disk(de->nameoff,
+							      EROFS_BLKSIZ);
+			const int ndirents = nameoff / sizeof(*de);
+			int diff;
+			unsigned int matched;
+			struct erofs_qstr dname;
 
-			ndirents = nameoff / sizeof(*de);
-
-			/* corrupted dir (should have one entry at least) */
-			BUG_ON(!ndirents || nameoff > PAGE_SIZE);
+			if (unlikely(!ndirents)) {
+				DBG_BUGON(1);
+				kunmap_atomic(de);
+				put_page(page);
+				page = ERR_PTR(-EIO);
+				goto out;
+			}
 
 			matched = min(startprfx, endprfx);
 
 			dname.name = (u8 *)de + nameoff;
-			dname.len = ndirents == 1 ?
-				/* since the rest of the last page is 0 */
-				EROFS_BLKSIZ - nameoff
-				: le16_to_cpu(de[1].nameoff) - nameoff;
+			if (ndirents == 1)
+				dname.end = (u8 *)de + EROFS_BLKSIZ;
+			else
+				dname.end = (u8 *)de +
+					nameoff_from_disk(de[1].nameoff,
+							  EROFS_BLKSIZ);
 
 			/* string comparison without already matched prefix */
 			diff = dirnamecmp(name, &dname, &matched);
 			kunmap_atomic(de);
 
 			if (unlikely(!diff)) {
-				*_diff = 0;
-				goto exact_out;
+				*_ndirents = 0;
+				goto out;
 			} else if (diff > 0) {
 				head = mid + 1;
 				startprfx = matched;
@@ -147,45 +152,51 @@
 				if (likely(!IS_ERR(candidate)))
 					put_page(candidate);
 				candidate = page;
+				*_ndirents = ndirents;
 			} else {
 				put_page(page);
 
-				if (unlikely(mid < 1))	/* fix "mid" overflow */
-					break;
-
 				back = mid - 1;
 				endprfx = matched;
 			}
+			continue;
 		}
+out:		/* free if the candidate is valid */
+		if (!IS_ERR(candidate))
+			put_page(candidate);
+		return page;
 	}
-	*_diff = 1;
 	return candidate;
 }
 
 int erofs_namei(struct inode *dir,
-	struct qstr *name,
-	erofs_nid_t *nid, unsigned *d_type)
+		struct qstr *name,
+		erofs_nid_t *nid, unsigned int *d_type)
 {
-	int diff;
+	int ndirents;
 	struct page *page;
-	u8 *data;
+	void *data;
 	struct erofs_dirent *de;
+	struct erofs_qstr qn;
 
 	if (unlikely(!dir->i_size))
 		return -ENOENT;
 
-	diff = 1;
-	page = find_target_block_classic(dir, name, &diff);
+	qn.name = name->name;
+	qn.end = name->name + name->len;
+
+	ndirents = 0;
+	page = find_target_block_classic(dir, &qn, &ndirents);
 
 	if (unlikely(IS_ERR(page)))
 		return PTR_ERR(page);
 
 	data = kmap_atomic(page);
 	/* the target page has been mapped */
-	de = likely(diff) ?
-		/* since the rest of the last page is 0 */
-		find_target_dirent(name, data, EROFS_BLKSIZ) :
-		(struct erofs_dirent *)data;
+	if (ndirents)
+		de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
+	else
+		de = (struct erofs_dirent *)data;
 
 	if (likely(!IS_ERR(de))) {
 		*nid = le64_to_cpu(de->nid);
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 1279241..f44662d 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -57,15 +57,30 @@
 	Z_EROFS_VLE_WORK_SECONDARY,
 	Z_EROFS_VLE_WORK_PRIMARY,
 	/*
-	 * The current work has at least been linked with the following
-	 * processed chained works, which means if the processing page
-	 * is the tail partial page of the work, the current work can
-	 * safely use the whole page, as illustrated below:
-	 * +--------------+-------------------------------------------+
-	 * |  tail page   |      head page (of the previous work)     |
-	 * +--------------+-------------------------------------------+
-	 *   /\  which belongs to the current work
-	 * [  (*) this page can be used for the current work itself.  ]
+	 * The current work was the tail of an exist chain, and the previous
+	 * processed chained works are all decided to be hooked up to it.
+	 * A new chain should be created for the remaining unprocessed works,
+	 * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
+	 * the next work cannot reuse the whole page in the following scenario:
+	 *  ________________________________________________________________
+	 * |      tail (partial) page     |       head (partial) page       |
+	 * |  (belongs to the next work)  |  (belongs to the current work)  |
+	 * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
+	 */
+	Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
+	/*
+	 * The current work has been linked with the processed chained works,
+	 * and could be also linked with the potential remaining works, which
+	 * means if the processing page is the tail partial page of the work,
+	 * the current work can safely use the whole page (since the next work
+	 * is under control) for in-place decompression, as illustrated below:
+	 *  ________________________________________________________________
+	 * |  tail (partial) page  |          head (partial) page           |
+	 * | (of the current work) |         (of the previous work)         |
+	 * |  PRIMARY_FOLLOWED or  |                                        |
+	 * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
+	 *
+	 * [  (*) the above page can be used for the current work itself.  ]
 	 */
 	Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
 	Z_EROFS_VLE_WORK_MAX
@@ -234,10 +249,10 @@
 	return ret ? 0 : -EAGAIN;
 }
 
-static inline bool try_to_claim_workgroup(
-	struct z_erofs_vle_workgroup *grp,
-	z_erofs_vle_owned_workgrp_t *owned_head,
-	bool *hosted)
+static enum z_erofs_vle_work_role
+try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
+		       z_erofs_vle_owned_workgrp_t *owned_head,
+		       bool *hosted)
 {
 	DBG_BUGON(*hosted == true);
 
@@ -251,6 +266,9 @@
 
 		*owned_head = grp;
 		*hosted = true;
+		/* lucky, I am the followee :) */
+		return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+
 	} else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
 		/*
 		 * type 2, link to the end of a existing open chain,
@@ -260,12 +278,11 @@
 		if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
 			Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
 			goto retry;
-
 		*owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
-	} else
-		return false;	/* :( better luck next time */
+		return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
+	}
 
-	return true;	/* lucky, I am the followee :) */
+	return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
 }
 
 static struct z_erofs_vle_work *
@@ -337,12 +354,8 @@
 	*hosted = false;
 	if (!primary)
 		*role = Z_EROFS_VLE_WORK_SECONDARY;
-	/* claim the workgroup if possible */
-	else if (try_to_claim_workgroup(grp, owned_head, hosted))
-		*role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
-	else
-		*role = Z_EROFS_VLE_WORK_PRIMARY;
-
+	else	/* claim the workgroup if possible */
+		*role = try_to_claim_workgroup(grp, owned_head, hosted);
 	return work;
 }
 
@@ -419,6 +432,9 @@
 	}
 }
 
+#define builder_is_hooked(builder) \
+	((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
+
 #define builder_is_followed(builder) \
 	((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
 
@@ -583,7 +599,7 @@
 	struct z_erofs_vle_work_builder *const builder = &fe->builder;
 	const loff_t offset = page_offset(page);
 
-	bool tight = builder_is_followed(builder);
+	bool tight = builder_is_hooked(builder);
 	struct z_erofs_vle_work *work = builder->work;
 
 #ifdef EROFS_FS_HAS_MANAGED_CACHE
@@ -606,8 +622,12 @@
 
 	/* lucky, within the range of the current map_blocks */
 	if (offset + cur >= map->m_la &&
-		offset + cur < map->m_la + map->m_llen)
+		offset + cur < map->m_la + map->m_llen) {
+		/* didn't get a valid unzip work previously (very rare) */
+		if (!builder->work)
+			goto restart_now;
 		goto hitted;
+	}
 
 	/* go ahead the next map_blocks */
 	debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
@@ -621,6 +641,7 @@
 	if (unlikely(err))
 		goto err_out;
 
+restart_now:
 	if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
 		goto hitted;
 
@@ -646,7 +667,7 @@
 		builder->role = Z_EROFS_VLE_WORK_PRIMARY;
 #endif
 
-	tight &= builder_is_followed(builder);
+	tight &= builder_is_hooked(builder);
 	work = builder->work;
 hitted:
 	cur = end - min_t(unsigned, offset + end - map->m_la, end);
@@ -661,6 +682,9 @@
 			(tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
 				Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
 
+	if (cur)
+		tight &= builder_is_followed(builder);
+
 retry:
 	err = z_erofs_vle_work_add_page(builder, page, page_type);
 	/* should allocate an additional staging page for pagevec */
@@ -901,11 +925,10 @@
 	if (llen > grp->llen)
 		llen = grp->llen;
 
-	err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
-		clusterpages, pages, llen, work->pageofs,
-		z_erofs_onlinepage_endio);
+	err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
+					    pages, llen, work->pageofs);
 	if (err != -ENOTSUPP)
-		goto out_percpu;
+		goto out;
 
 	if (sparsemem_pages >= nr_pages)
 		goto skip_allocpage;
@@ -926,21 +949,7 @@
 	erofs_vunmap(vout, nr_pages);
 
 out:
-	for (i = 0; i < nr_pages; ++i) {
-		page = pages[i];
-		DBG_BUGON(page->mapping == NULL);
-
-		/* recycle all individual staging pages */
-		if (z_erofs_gather_if_stagingpage(page_pool, page))
-			continue;
-
-		if (unlikely(err < 0))
-			SetPageError(page);
-
-		z_erofs_onlinepage_endio(page);
-	}
-
-out_percpu:
+	/* must handle all compressed pages before endding pages */
 	for (i = 0; i < clusterpages; ++i) {
 		page = compressed_pages[i];
 
@@ -954,6 +963,23 @@
 		WRITE_ONCE(compressed_pages[i], NULL);
 	}
 
+	for (i = 0; i < nr_pages; ++i) {
+		page = pages[i];
+		if (!page)
+			continue;
+
+		DBG_BUGON(page->mapping == NULL);
+
+		/* recycle all individual staging pages */
+		if (z_erofs_gather_if_stagingpage(page_pool, page))
+			continue;
+
+		if (unlikely(err < 0))
+			SetPageError(page);
+
+		z_erofs_onlinepage_endio(page);
+	}
+
 	if (pages == z_pagemap_global)
 		mutex_unlock(&z_pagemap_global_lock);
 	else if (unlikely(pages != pages_onstack))
diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
index 3316bc3..684ff06 100644
--- a/drivers/staging/erofs/unzip_vle.h
+++ b/drivers/staging/erofs/unzip_vle.h
@@ -218,8 +218,7 @@
 
 extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
 	unsigned clusterpages, struct page **pages,
-	unsigned outlen, unsigned short pageofs,
-	void (*endio)(struct page *));
+	unsigned int outlen, unsigned short pageofs);
 
 extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
 	unsigned clusterpages, void *vaddr, unsigned llen,
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
index 9cb35cd..055420e 100644
--- a/drivers/staging/erofs/unzip_vle_lz4.c
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -105,8 +105,7 @@
 				  unsigned clusterpages,
 				  struct page **pages,
 				  unsigned outlen,
-				  unsigned short pageofs,
-				  void (*endio)(struct page *))
+				  unsigned short pageofs)
 {
 	void *vin, *vout;
 	unsigned nr_pages, i, j;
@@ -128,31 +127,30 @@
 	ret = z_erofs_unzip_lz4(vin, vout + pageofs,
 		clusterpages * PAGE_SIZE, outlen);
 
-	if (ret >= 0) {
-		outlen = ret;
-		ret = 0;
-	}
+	if (ret < 0)
+		goto out;
+	ret = 0;
 
 	for (i = 0; i < nr_pages; ++i) {
 		j = min((unsigned)PAGE_SIZE - pageofs, outlen);
 
 		if (pages[i] != NULL) {
-			if (ret < 0)
-				SetPageError(pages[i]);
-			else if (clusterpages == 1 && pages[i] == compressed_pages[0])
+			if (clusterpages == 1 &&
+			    pages[i] == compressed_pages[0]) {
 				memcpy(vin + pageofs, vout + pageofs, j);
-			else {
+			} else {
 				void *dst = kmap_atomic(pages[i]);
 
 				memcpy(dst + pageofs, vout + pageofs, j);
 				kunmap_atomic(dst);
 			}
-			endio(pages[i]);
 		}
 		vout += PAGE_SIZE;
 		outlen -= j;
 		pageofs = 0;
 	}
+
+out:
 	preempt_enable();
 
 	if (clusterpages == 1)
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
index dd2ac9d..2d96820 100644
--- a/drivers/staging/erofs/utils.c
+++ b/drivers/staging/erofs/utils.c
@@ -87,12 +87,21 @@
 		grp = (void *)((unsigned long)grp |
 			1UL << RADIX_TREE_EXCEPTIONAL_SHIFT);
 
-	err = radix_tree_insert(&sbi->workstn_tree,
-		grp->index, grp);
+	/*
+	 * Bump up reference count before making this workgroup
+	 * visible to other users in order to avoid potential UAF
+	 * without serialized by erofs_workstn_lock.
+	 */
+	__erofs_workgroup_get(grp);
 
-	if (!err) {
-		__erofs_workgroup_get(grp);
-	}
+	err = radix_tree_insert(&sbi->workstn_tree,
+				grp->index, grp);
+	if (unlikely(err))
+		/*
+		 * it's safe to decrease since the workgroup isn't visible
+		 * and refcount >= 2 (cannot be freezed).
+		 */
+		__erofs_workgroup_put(grp);
 
 	erofs_workstn_unlock(sbi);
 	radix_tree_preload_end();
@@ -101,19 +110,99 @@
 
 extern void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
 
+static void  __erofs_workgroup_free(struct erofs_workgroup *grp)
+{
+	atomic_long_dec(&erofs_global_shrink_cnt);
+	erofs_workgroup_free_rcu(grp);
+}
+
 int erofs_workgroup_put(struct erofs_workgroup *grp)
 {
 	int count = atomic_dec_return(&grp->refcount);
 
 	if (count == 1)
 		atomic_long_inc(&erofs_global_shrink_cnt);
-	else if (!count) {
-		atomic_long_dec(&erofs_global_shrink_cnt);
-		erofs_workgroup_free_rcu(grp);
-	}
+	else if (!count)
+		__erofs_workgroup_free(grp);
 	return count;
 }
 
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+/* for cache-managed case, customized reclaim paths exist */
+static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
+{
+	erofs_workgroup_unfreeze(grp, 0);
+	__erofs_workgroup_free(grp);
+}
+
+bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
+				    struct erofs_workgroup *grp,
+				    bool cleanup)
+{
+	void *entry;
+
+	/*
+	 * for managed cache enabled, the refcount of workgroups
+	 * themselves could be < 0 (freezed). So there is no guarantee
+	 * that all refcount > 0 if managed cache is enabled.
+	 */
+	if (!erofs_workgroup_try_to_freeze(grp, 1))
+		return false;
+
+	/*
+	 * note that all cached pages should be unlinked
+	 * before delete it from the radix tree.
+	 * Otherwise some cached pages of an orphan old workgroup
+	 * could be still linked after the new one is available.
+	 */
+	if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
+		erofs_workgroup_unfreeze(grp, 1);
+		return false;
+	}
+
+	/*
+	 * it is impossible to fail after the workgroup is freezed,
+	 * however in order to avoid some race conditions, add a
+	 * DBG_BUGON to observe this in advance.
+	 */
+	entry = radix_tree_delete(&sbi->workstn_tree, grp->index);
+	DBG_BUGON((void *)((unsigned long)entry &
+			   ~RADIX_TREE_EXCEPTIONAL_ENTRY) != grp);
+
+	/*
+	 * if managed cache is enable, the last refcount
+	 * should indicate the related workstation.
+	 */
+	erofs_workgroup_unfreeze_final(grp);
+	return true;
+}
+
+#else
+/* for nocache case, no customized reclaim path at all */
+bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
+				    struct erofs_workgroup *grp,
+				    bool cleanup)
+{
+	int cnt = atomic_read(&grp->refcount);
+	void *entry;
+
+	DBG_BUGON(cnt <= 0);
+	DBG_BUGON(cleanup && cnt != 1);
+
+	if (cnt > 1)
+		return false;
+
+	entry = radix_tree_delete(&sbi->workstn_tree, grp->index);
+	DBG_BUGON((void *)((unsigned long)entry &
+			   ~RADIX_TREE_EXCEPTIONAL_ENTRY) != grp);
+
+	/* (rarely) could be grabbed again when freeing */
+	erofs_workgroup_put(grp);
+	return true;
+}
+
+#endif
+
 unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
 				       unsigned long nr_shrink,
 				       bool cleanup)
@@ -130,44 +219,16 @@
 		batch, first_index, PAGEVEC_SIZE);
 
 	for (i = 0; i < found; ++i) {
-		int cnt;
 		struct erofs_workgroup *grp = (void *)
 			((unsigned long)batch[i] &
 				~RADIX_TREE_EXCEPTIONAL_ENTRY);
 
 		first_index = grp->index + 1;
 
-		cnt = atomic_read(&grp->refcount);
-		BUG_ON(cnt <= 0);
-
-		if (cleanup)
-			BUG_ON(cnt != 1);
-
-#ifndef EROFS_FS_HAS_MANAGED_CACHE
-		else if (cnt > 1)
-#else
-		if (!erofs_workgroup_try_to_freeze(grp, 1))
-#endif
+		/* try to shrink each valid workgroup */
+		if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
 			continue;
 
-		if (radix_tree_delete(&sbi->workstn_tree,
-			grp->index) != grp) {
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-skip:
-			erofs_workgroup_unfreeze(grp, 1);
-#endif
-			continue;
-		}
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-		if (erofs_try_to_free_all_cached_pages(sbi, grp))
-			goto skip;
-
-		erofs_workgroup_unfreeze(grp, 1);
-#endif
-		/* (rarely) grabbed again when freeing */
-		erofs_workgroup_put(grp);
-
 		++freed;
 		if (unlikely(!--nr_shrink))
 			break;
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
index 0e9cfec..2db99cf 100644
--- a/drivers/staging/erofs/xattr.c
+++ b/drivers/staging/erofs/xattr.c
@@ -24,36 +24,77 @@
 
 static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
 {
-	/* only init_inode_xattrs use non-atomic once */
+	/* the only user of kunmap() is 'init_inode_xattrs' */
 	if (unlikely(!atomic))
 		kunmap(it->page);
 	else
 		kunmap_atomic(it->kaddr);
+
 	unlock_page(it->page);
 	put_page(it->page);
 }
 
-static void init_inode_xattrs(struct inode *inode)
+static inline void xattr_iter_end_final(struct xattr_iter *it)
 {
+	if (!it->page)
+		return;
+
+	xattr_iter_end(it, true);
+}
+
+static int init_inode_xattrs(struct inode *inode)
+{
+	struct erofs_vnode *const vi = EROFS_V(inode);
 	struct xattr_iter it;
 	unsigned i;
 	struct erofs_xattr_ibody_header *ih;
 	struct erofs_sb_info *sbi;
-	struct erofs_vnode *vi;
 	bool atomic_map;
+	int ret = 0;
 
-	if (likely(inode_has_inited_xattr(inode)))
-		return;
+	/* the most case is that xattrs of this inode are initialized. */
+	if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
+		return 0;
 
-	vi = EROFS_V(inode);
-	BUG_ON(!vi->xattr_isize);
+	if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
+		return -ERESTARTSYS;
+
+	/* someone has initialized xattrs for us? */
+	if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
+		goto out_unlock;
+
+	/*
+	 * bypass all xattr operations if ->xattr_isize is not greater than
+	 * sizeof(struct erofs_xattr_ibody_header), in detail:
+	 * 1) it is not enough to contain erofs_xattr_ibody_header then
+	 *    ->xattr_isize should be 0 (it means no xattr);
+	 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
+	 *    undefined right now (maybe use later with some new sb feature).
+	 */
+	if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
+		errln("xattr_isize %d of nid %llu is not supported yet",
+		      vi->xattr_isize, vi->nid);
+		ret = -ENOTSUPP;
+		goto out_unlock;
+	} else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
+		if (unlikely(vi->xattr_isize)) {
+			DBG_BUGON(1);
+			ret = -EIO;
+			goto out_unlock;	/* xattr ondisk layout error */
+		}
+		ret = -ENOATTR;
+		goto out_unlock;
+	}
 
 	sbi = EROFS_I_SB(inode);
 	it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
 	it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
 
 	it.page = erofs_get_inline_page(inode, it.blkaddr);
-	BUG_ON(IS_ERR(it.page));
+	if (IS_ERR(it.page)) {
+		ret = PTR_ERR(it.page);
+		goto out_unlock;
+	}
 
 	/* read in shared xattr array (non-atomic, see kmalloc below) */
 	it.kaddr = kmap(it.page);
@@ -62,9 +103,13 @@
 	ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
 
 	vi->xattr_shared_count = ih->h_shared_count;
-	vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
-		vi->xattr_shared_count, sizeof(unsigned),
-		GFP_KERNEL | __GFP_NOFAIL);
+	vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
+						sizeof(uint), GFP_KERNEL);
+	if (!vi->xattr_shared_xattrs) {
+		xattr_iter_end(&it, atomic_map);
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
 
 	/* let's skip ibody header */
 	it.ofs += sizeof(struct erofs_xattr_ibody_header);
@@ -77,7 +122,12 @@
 
 			it.page = erofs_get_meta_page(inode->i_sb,
 				++it.blkaddr, S_ISDIR(inode->i_mode));
-			BUG_ON(IS_ERR(it.page));
+			if (IS_ERR(it.page)) {
+				kfree(vi->xattr_shared_xattrs);
+				vi->xattr_shared_xattrs = NULL;
+				ret = PTR_ERR(it.page);
+				goto out_unlock;
+			}
 
 			it.kaddr = kmap_atomic(it.page);
 			atomic_map = true;
@@ -89,7 +139,11 @@
 	}
 	xattr_iter_end(&it, atomic_map);
 
-	inode_set_inited_xattr(inode);
+	set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
+
+out_unlock:
+	clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
+	return ret;
 }
 
 struct xattr_iter_handlers {
@@ -99,18 +153,25 @@
 	void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
 };
 
-static void xattr_iter_fixup(struct xattr_iter *it)
+static inline int xattr_iter_fixup(struct xattr_iter *it)
 {
-	if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
-		xattr_iter_end(it, true);
+	if (it->ofs < EROFS_BLKSIZ)
+		return 0;
 
-		it->blkaddr += erofs_blknr(it->ofs);
-		it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
-		BUG_ON(IS_ERR(it->page));
+	xattr_iter_end(it, true);
 
-		it->kaddr = kmap_atomic(it->page);
-		it->ofs = erofs_blkoff(it->ofs);
+	it->blkaddr += erofs_blknr(it->ofs);
+	it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
+	if (IS_ERR(it->page)) {
+		int err = PTR_ERR(it->page);
+
+		it->page = NULL;
+		return err;
 	}
+
+	it->kaddr = kmap_atomic(it->page);
+	it->ofs = erofs_blkoff(it->ofs);
+	return 0;
 }
 
 static int inline_xattr_iter_begin(struct xattr_iter *it,
@@ -132,21 +193,24 @@
 	it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
 
 	it->page = erofs_get_inline_page(inode, it->blkaddr);
-	BUG_ON(IS_ERR(it->page));
-	it->kaddr = kmap_atomic(it->page);
+	if (IS_ERR(it->page))
+		return PTR_ERR(it->page);
 
+	it->kaddr = kmap_atomic(it->page);
 	return vi->xattr_isize - xattr_header_sz;
 }
 
 static int xattr_foreach(struct xattr_iter *it,
-	struct xattr_iter_handlers *op, unsigned *tlimit)
+	const struct xattr_iter_handlers *op, unsigned int *tlimit)
 {
 	struct erofs_xattr_entry entry;
 	unsigned value_sz, processed, slice;
 	int err;
 
 	/* 0. fixup blkaddr, ofs, ipage */
-	xattr_iter_fixup(it);
+	err = xattr_iter_fixup(it);
+	if (err)
+		return err;
 
 	/*
 	 * 1. read xattr entry to the memory,
@@ -178,7 +242,9 @@
 		if (it->ofs >= EROFS_BLKSIZ) {
 			BUG_ON(it->ofs > EROFS_BLKSIZ);
 
-			xattr_iter_fixup(it);
+			err = xattr_iter_fixup(it);
+			if (err)
+				goto out;
 			it->ofs = 0;
 		}
 
@@ -210,7 +276,10 @@
 	while (processed < value_sz) {
 		if (it->ofs >= EROFS_BLKSIZ) {
 			BUG_ON(it->ofs > EROFS_BLKSIZ);
-			xattr_iter_fixup(it);
+
+			err = xattr_iter_fixup(it);
+			if (err)
+				goto out;
 			it->ofs = 0;
 		}
 
@@ -270,7 +339,7 @@
 	memcpy(it->buffer + processed, buf, len);
 }
 
-static struct xattr_iter_handlers find_xattr_handlers = {
+static const struct xattr_iter_handlers find_xattr_handlers = {
 	.entry = xattr_entrymatch,
 	.name = xattr_namematch,
 	.alloc_buffer = xattr_checkbuffer,
@@ -291,8 +360,11 @@
 		ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
 		if (ret >= 0)
 			break;
+
+		if (ret != -ENOATTR)	/* -ENOMEM, -EIO, etc. */
+			break;
 	}
-	xattr_iter_end(&it->it, true);
+	xattr_iter_end_final(&it->it);
 
 	return ret < 0 ? ret : it->buffer_size;
 }
@@ -315,8 +387,10 @@
 				xattr_iter_end(&it->it, true);
 
 			it->it.page = erofs_get_meta_page(inode->i_sb,
-				blkaddr, false);
-			BUG_ON(IS_ERR(it->it.page));
+							  blkaddr, false);
+			if (IS_ERR(it->it.page))
+				return PTR_ERR(it->it.page);
+
 			it->it.kaddr = kmap_atomic(it->it.page);
 			it->it.blkaddr = blkaddr;
 		}
@@ -324,9 +398,12 @@
 		ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
 		if (ret >= 0)
 			break;
+
+		if (ret != -ENOATTR)	/* -ENOMEM, -EIO, etc. */
+			break;
 	}
 	if (vi->xattr_shared_count)
-		xattr_iter_end(&it->it, true);
+		xattr_iter_end_final(&it->it);
 
 	return ret < 0 ? ret : it->buffer_size;
 }
@@ -351,7 +428,9 @@
 	if (unlikely(name == NULL))
 		return -EINVAL;
 
-	init_inode_xattrs(inode);
+	ret = init_inode_xattrs(inode);
+	if (ret)
+		return ret;
 
 	it.index = index;
 
@@ -374,7 +453,6 @@
 		struct dentry *unused, struct inode *inode,
 		const char *name, void *buffer, size_t size)
 {
-	struct erofs_vnode *const vi = EROFS_V(inode);
 	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
 
 	switch (handler->flags) {
@@ -392,9 +470,6 @@
 		return -EINVAL;
 	}
 
-	if (!vi->xattr_isize)
-		return -ENOATTR;
-
 	return erofs_getxattr(inode, handler->flags, name, buffer, size);
 }
 
@@ -494,7 +569,7 @@
 	return 1;
 }
 
-static struct xattr_iter_handlers list_xattr_handlers = {
+static const struct xattr_iter_handlers list_xattr_handlers = {
 	.entry = xattr_entrylist,
 	.name = xattr_namelist,
 	.alloc_buffer = xattr_skipvalue,
@@ -516,7 +591,7 @@
 		if (ret < 0)
 			break;
 	}
-	xattr_iter_end(&it->it, true);
+	xattr_iter_end_final(&it->it);
 	return ret < 0 ? ret : it->buffer_ofs;
 }
 
@@ -538,8 +613,10 @@
 				xattr_iter_end(&it->it, true);
 
 			it->it.page = erofs_get_meta_page(inode->i_sb,
-				blkaddr, false);
-			BUG_ON(IS_ERR(it->it.page));
+							  blkaddr, false);
+			if (IS_ERR(it->it.page))
+				return PTR_ERR(it->it.page);
+
 			it->it.kaddr = kmap_atomic(it->it.page);
 			it->it.blkaddr = blkaddr;
 		}
@@ -549,7 +626,7 @@
 			break;
 	}
 	if (vi->xattr_shared_count)
-		xattr_iter_end(&it->it, true);
+		xattr_iter_end_final(&it->it);
 
 	return ret < 0 ? ret : it->buffer_ofs;
 }
@@ -560,7 +637,9 @@
 	int ret;
 	struct listxattr_iter it;
 
-	init_inode_xattrs(d_inode(dentry));
+	ret = init_inode_xattrs(d_inode(dentry));
+	if (ret)
+		return ret;
 
 	it.dentry = dentry;
 	it.buffer = buffer;
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index bcc8dfa..9efb4dc 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -850,18 +850,18 @@
 #define IP_FMT "%pI4"
 #define IP_ARG(x) (x)
 
-extern __inline int is_multicast_mac_addr(const u8 *addr)
+static inline int is_multicast_mac_addr(const u8 *addr)
 {
         return ((addr[0] != 0xff) && (0x01 & addr[0]));
 }
 
-extern __inline int is_broadcast_mac_addr(const u8 *addr)
+static inline int is_broadcast_mac_addr(const u8 *addr)
 {
 	return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) &&   \
 		(addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
 }
 
-extern __inline int is_zero_mac_addr(const u8 *addr)
+static inline int is_zero_mac_addr(const u8 *addr)
 {
 	return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) &&   \
 		(addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 3b8d237..649caae 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1090,8 +1090,8 @@
 		vif->wilc = *wilc;
 		vif->ndev = ndev;
 		wl->vif[i] = vif;
-		wl->vif_num = i;
-		vif->idx = wl->vif_num;
+		wl->vif_num = i + 1;
+		vif->idx = i;
 
 		ndev->netdev_ops = &wilc_netdev_ops;
 
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 284cf2c..8e1cf4d 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -84,7 +84,12 @@
 	struct pci_dev *pci_dev; \
 	struct platform_device *pdev; \
 	struct proc_thermal_device *proc_dev; \
-\
+	\
+	if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
+		dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
+		return 0; \
+	} \
+	\
 	if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
 		pdev = to_platform_device(dev); \
 		proc_dev = platform_get_drvdata(pdev); \
@@ -298,11 +303,6 @@
 	*priv = proc_priv;
 
 	ret = proc_thermal_read_ppcc(proc_priv);
-	if (!ret) {
-		ret = sysfs_create_group(&dev->kobj,
-					 &power_limit_attribute_group);
-
-	}
 	if (ret)
 		return ret;
 
@@ -316,8 +316,7 @@
 
 	proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
 	if (IS_ERR(proc_priv->int340x_zone)) {
-		ret = PTR_ERR(proc_priv->int340x_zone);
-		goto remove_group;
+		return PTR_ERR(proc_priv->int340x_zone);
 	} else
 		ret = 0;
 
@@ -331,9 +330,6 @@
 
 remove_zone:
 	int340x_thermal_zone_remove(proc_priv->int340x_zone);
-remove_group:
-	sysfs_remove_group(&proc_priv->dev->kobj,
-			   &power_limit_attribute_group);
 
 	return ret;
 }
@@ -364,7 +360,10 @@
 	platform_set_drvdata(pdev, proc_priv);
 	proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
 
-	return 0;
+	dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n");
+
+	return sysfs_create_group(&pdev->dev.kobj,
+					 &power_limit_attribute_group);
 }
 
 static int int3401_remove(struct platform_device *pdev)
@@ -423,7 +422,7 @@
 		proc_priv->soc_dts = intel_soc_dts_iosf_init(
 					INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
 
-		if (proc_priv->soc_dts && pdev->irq) {
+		if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
 			ret = pci_enable_msi(pdev);
 			if (!ret) {
 				ret = request_threaded_irq(pdev->irq, NULL,
@@ -441,7 +440,10 @@
 			dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
 	}
 
-	return 0;
+	dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n");
+
+	return sysfs_create_group(&pdev->dev.kobj,
+					 &power_limit_attribute_group);
 }
 
 static void  proc_thermal_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/thermal/qcom/qmi_sensors.c b/drivers/thermal/qcom/qmi_sensors.c
index 25e9850..98259a8 100644
--- a/drivers/thermal/qcom/qmi_sensors.c
+++ b/drivers/thermal/qcom/qmi_sensors.c
@@ -32,6 +32,7 @@
 enum qmi_ts_sensor {
 	QMI_TS_PA,
 	QMI_TS_PA_1,
+	QMI_TS_PA_2,
 	QMI_TS_QFE_PA_0,
 	QMI_TS_QFE_WTR_0,
 	QMI_TS_MODEM_MODEM,
@@ -75,6 +76,7 @@
 static char sensor_clients[QMI_TS_MAX_NR][QMI_CLIENT_NAME_LENGTH] = {
 	{"pa"},
 	{"pa_1"},
+	{"pa_2"},
 	{"qfe_pa0"},
 	{"qfe_wtr0"},
 	{"modem_tsens"},
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 7d030c2..50b6746 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1695,7 +1695,7 @@
 	}
 
 	/* ask the core to calculate the divisor */
-	baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
+	baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4);
 
 	spin_lock_irqsave(&sport->port.lock, flags);
 
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 1a74da9..0593b4f 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -1931,6 +1931,13 @@
 	geni_serial_write_term_regs(uport, port->loopback, tx_trans_cfg,
 		tx_parity_cfg, rx_trans_cfg, rx_parity_cfg, bits_per_char,
 		stop_bit_len, ser_clk_cfg);
+
+	if (termios->c_cflag & CRTSCTS) {
+		geni_write_reg_nolog(0x0, uport->membase, SE_UART_MANUAL_RFR);
+		IPC_LOG_MSG(port->ipc_log_misc, "%s: Manual flow off\n",
+				__func__);
+	}
+
 	IPC_LOG_MSG(port->ipc_log_misc, "%s: baud %d\n", __func__, baud);
 	IPC_LOG_MSG(port->ipc_log_misc, "Tx: trans_cfg%d parity %d\n",
 						tx_trans_cfg, tx_parity_cfg);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 1515074..35d1f6fa 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -221,7 +221,7 @@
 	unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
 	u32 geni_ios;
 
-	if (uart_console(uport) || !uart_cts_enabled(uport)) {
+	if (uart_console(uport)) {
 		mctrl |= TIOCM_CTS;
 	} else {
 		geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS);
@@ -237,7 +237,7 @@
 {
 	u32 uart_manual_rfr = 0;
 
-	if (uart_console(uport) || !uart_cts_enabled(uport))
+	if (uart_console(uport))
 		return;
 
 	if (!(mctrl & TIOCM_RTS))
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 6bbea04..b57591c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2210,10 +2210,8 @@
 
 		ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
 				msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
-		if (ret == 0) {
+		if (ret == 0)
 			dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
-			return -ETIMEDOUT;
-		}
 	}
 
 	spin_lock_irqsave(&dwc->lock, flags);
@@ -2423,6 +2421,7 @@
 
 	/* begin to receive SETUP packets */
 	dwc->ep0state = EP0_SETUP_PHASE;
+	dwc->link_state = DWC3_LINK_STATE_SS_DIS;
 	dwc3_ep0_out_start(dwc);
 
 	dwc3_gadget_enable_irq(dwc);
@@ -3963,6 +3962,8 @@
 	dwc3_disconnect_gadget(dwc);
 	__dwc3_gadget_stop(dwc);
 
+	synchronize_irq(dwc->irq_gadget);
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 7c2b88d..730ba2b 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -146,6 +146,7 @@
 			struct usb_function *f,
 			struct usb_ep *_ep)
 {
+	struct usb_composite_dev *cdev;
 	struct usb_endpoint_descriptor *chosen_desc = NULL;
 	struct usb_descriptor_header **speed_desc = NULL;
 
@@ -157,6 +158,8 @@
 	if (!g || !f || !_ep)
 		return -EIO;
 
+	cdev = get_gadget_data(g);
+
 	/* select desired speed */
 	switch (g->speed) {
 	case USB_SPEED_SUPER_PLUS:
@@ -182,6 +185,13 @@
 	default:
 		speed_desc = f->fs_descriptors;
 	}
+
+	if (!speed_desc) {
+		DBG(cdev, "%s desc not present for function %s\n",
+			usb_speed_string(g->speed), f->name);
+		return -EIO;
+	}
+
 	/* find descriptors */
 	for_each_ep_desc(speed_desc, d_spd) {
 		chosen_desc = (struct usb_endpoint_descriptor *)*d_spd;
@@ -225,12 +235,9 @@
 			_ep->maxburst = comp_desc->bMaxBurst + 1;
 			break;
 		default:
-			if (comp_desc->bMaxBurst != 0) {
-				struct usb_composite_dev *cdev;
-
-				cdev = get_gadget_data(g);
+			if (comp_desc->bMaxBurst != 0)
 				ERROR(cdev, "ep0 bMaxBurst must be 0\n");
-			}
+
 			_ep->maxburst = 1;
 			break;
 		}
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index d1b0725..3309c1f 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -15,11 +15,16 @@
 #include <linux/kdev_t.h>
 #include <linux/usb/ch9.h>
 
+#ifdef CONFIG_USB_F_NCM
+#include <function/u_ncm.h>
+#endif
+
 #ifdef CONFIG_USB_CONFIGFS_F_ACC
 extern int acc_ctrlrequest(struct usb_composite_dev *cdev,
 				const struct usb_ctrlrequest *ctrl);
 void acc_disconnect(void);
 #endif
+
 static struct class *android_class;
 static struct device *android_device;
 static int index;
@@ -1508,6 +1513,18 @@
 		}
 	}
 
+#ifdef CONFIG_USB_F_NCM
+	if (value < 0)
+		value = ncm_ctrlrequest(cdev, c);
+
+	/*
+	 * for mirror link command case, if it already been handled,
+	 * do not pass to composite_setup
+	 */
+	if (value == 0)
+		return value;
+#endif
+
 #ifdef CONFIG_USB_CONFIGFS_F_ACC
 	if (value < 0)
 		value = acc_ctrlrequest(cdev, c);
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index f4dcbc8..6060761 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -128,6 +128,8 @@
 	unsigned long           nbytes_to_port_bridge;
 	unsigned long		nbytes_from_port_bridge;
 
+	struct dentry		*debugfs_root;
+
 	/* To test remote wakeup using debugfs */
 	u8 debugfs_rw_enable;
 };
@@ -139,12 +141,6 @@
 	u8 port_num;
 };
 
-struct usb_cser_debugfs {
-	struct dentry *debugfs_root;
-};
-
-static struct usb_cser_debugfs debugfs;
-
 static int major, minors;
 struct class *fcdev_classp;
 static DEFINE_IDA(chardev_ida);
@@ -157,7 +153,7 @@
 static void usb_cser_disconnect(struct f_cdev *port);
 static struct f_cdev *f_cdev_alloc(char *func_name, int portno);
 static void usb_cser_free_req(struct usb_ep *ep, struct usb_request *req);
-static void usb_cser_debugfs_exit(void);
+static void usb_cser_debugfs_exit(struct f_cdev *port);
 
 static struct usb_interface_descriptor cser_interface_desc = {
 	.bLength =		USB_DT_INTERFACE_SIZE,
@@ -858,9 +854,9 @@
 	if (opts->port) {
 		device_destroy(fcdev_classp, MKDEV(major, opts->port->minor));
 		cdev_del(&opts->port->fcdev_cdev);
+		usb_cser_debugfs_exit(opts->port);
 	}
 	usb_cser_chardev_deinit();
-	usb_cser_debugfs_exit();
 	kfree(opts->func_name);
 	kfree(opts->port);
 	kfree(opts);
@@ -1638,17 +1634,17 @@
 
 static void usb_cser_debugfs_init(struct f_cdev *port)
 {
-	debugfs.debugfs_root = debugfs_create_dir(port->name, NULL);
-	if (IS_ERR(debugfs.debugfs_root))
+	port->debugfs_root = debugfs_create_dir(port->name, NULL);
+	if (IS_ERR(port->debugfs_root))
 		return;
 
 	debugfs_create_file("remote_wakeup", 0600,
-			debugfs.debugfs_root, port, &cser_rem_wakeup_fops);
+			port->debugfs_root, port, &cser_rem_wakeup_fops);
 }
 
-static void usb_cser_debugfs_exit(void)
+static void usb_cser_debugfs_exit(struct f_cdev *port)
 {
-	debugfs_remove_recursive(debugfs.debugfs_root);
+	debugfs_remove_recursive(port->debugfs_root);
 }
 
 static struct f_cdev *f_cdev_alloc(char *func_name, int portno)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 3137125..9b1224c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -38,6 +38,18 @@
 
 #define FUNCTIONFS_MAGIC	0xa647361 /* Chosen by a honest dice roll ;) */
 
+#define NUM_PAGES	10 /* # of pages for ipc logging */
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define ffs_log(fmt, ...) do { \
+	ipc_log_string(ffs->ipc_log, "%s: " fmt,  __func__, ##__VA_ARGS__); \
+	dynamic_pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+#else
+#define ffs_log(fmt, ...) \
+	ipc_log_string(ffs->ipc_log, "%s: " fmt,  __func__, ##__VA_ARGS__)
+#endif
+
 /* Reference counter handling */
 static void ffs_data_get(struct ffs_data *ffs);
 static void ffs_data_put(struct ffs_data *ffs);
@@ -275,6 +287,9 @@
 
 	spin_unlock_irq(&ffs->ev.waitq.lock);
 
+	ffs_log("enter: state %d setup_state %d flags %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	req->buf      = data;
 	req->length   = len;
 
@@ -299,11 +314,18 @@
 	}
 
 	ffs->setup_state = FFS_NO_SETUP;
+
+	ffs_log("exit: state %d setup_state %d flags %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	return req->status ? req->status : req->actual;
 }
 
 static int __ffs_ep0_stall(struct ffs_data *ffs)
 {
+	ffs_log("state %d setup_state %d flags %lu can_stall %d", ffs->state,
+		ffs->setup_state, ffs->flags, ffs->ev.can_stall);
+
 	if (ffs->ev.can_stall) {
 		pr_vdebug("ep0 stall\n");
 		usb_ep_set_halt(ffs->gadget->ep0);
@@ -324,6 +346,9 @@
 
 	ENTER();
 
+	ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	/* Fast check if setup was canceled */
 	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
 		return -EIDRM;
@@ -452,6 +477,9 @@
 		break;
 	}
 
+	ffs_log("exit:ret %zd state %d setup_state %d flags %lu", ret,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	mutex_unlock(&ffs->mutex);
 	return ret;
 }
@@ -486,6 +514,10 @@
 			ffs->ev.count * sizeof *ffs->ev.types);
 
 	spin_unlock_irq(&ffs->ev.waitq.lock);
+
+	ffs_log("state %d setup_state %d flags %lu #evt %zu", ffs->state,
+		ffs->setup_state, ffs->flags, n);
+
 	mutex_unlock(&ffs->mutex);
 
 	return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
@@ -501,6 +533,9 @@
 
 	ENTER();
 
+	ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	/* Fast check if setup was canceled */
 	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
 		return -EIDRM;
@@ -590,8 +625,12 @@
 
 	spin_unlock_irq(&ffs->ev.waitq.lock);
 done_mutex:
+	ffs_log("exit:ret %d state %d setup_state %d flags %lu", ret,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	mutex_unlock(&ffs->mutex);
 	kfree(data);
+
 	return ret;
 }
 
@@ -601,6 +640,9 @@
 
 	ENTER();
 
+	ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	if (unlikely(ffs->state == FFS_CLOSING))
 		return -EBUSY;
 
@@ -616,6 +658,9 @@
 
 	ENTER();
 
+	ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	ffs_data_closed(ffs);
 
 	return 0;
@@ -629,6 +674,9 @@
 
 	ENTER();
 
+	ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	if (code == FUNCTIONFS_INTERFACE_REVMAP) {
 		struct ffs_function *func = ffs->func;
 		ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
@@ -647,6 +695,9 @@
 	__poll_t mask = EPOLLWRNORM;
 	int ret;
 
+	ffs_log("enter:state %d setup_state %d flags %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	poll_wait(file, &ffs->ev.waitq, wait);
 
 	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
@@ -677,6 +728,8 @@
 		break;
 	}
 
+	ffs_log("exit: mask %u", mask);
+
 	mutex_unlock(&ffs->mutex);
 
 	return mask;
@@ -753,10 +806,13 @@
 {
 	struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
 						   work);
+	struct ffs_data *ffs = io_data->ffs;
 	int ret = io_data->req->status ? io_data->req->status :
 					 io_data->req->actual;
 	bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
 
+	ffs_log("enter: ret %d for %s", ret, io_data->read ? "read" : "write");
+
 	if (io_data->read && ret > 0) {
 		mm_segment_t oldfs = get_fs();
 
@@ -778,6 +834,8 @@
 		kfree(io_data->to_free);
 	kfree(io_data->buf);
 	kfree(io_data);
+
+	ffs_log("exit");
 }
 
 static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
@@ -788,6 +846,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	INIT_WORK(&io_data->work, ffs_user_copy_worker);
 	queue_work(ffs->io_completion_wq, &io_data->work);
 }
@@ -877,12 +937,15 @@
 static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
 {
 	struct ffs_epfile *epfile = file->private_data;
+	struct ffs_data *ffs = epfile->ffs;
 	struct usb_request *req;
 	struct ffs_ep *ep;
 	char *data = NULL;
 	ssize_t ret, data_len = -EINVAL;
 	int halt;
 
+	ffs_log("enter: %s", epfile->name);
+
 	/* Are we still active? */
 	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
 		return -ENODEV;
@@ -1000,6 +1063,8 @@
 
 		spin_unlock_irq(&epfile->ffs->eps_lock);
 
+		ffs_log("queued %d bytes on %s", data_len, epfile->name);
+
 		if (unlikely(wait_for_completion_interruptible(&done))) {
 			/*
 			 * To avoid race condition with ffs_epfile_io_complete,
@@ -1011,6 +1076,9 @@
 			interrupted = ep->status < 0;
 		}
 
+		ffs_log("%s:ep status %d for req %pK", epfile->name, ep->status,
+				req);
+
 		if (interrupted)
 			ret = -EINTR;
 		else if (io_data->read && ep->status > 0)
@@ -1039,6 +1107,8 @@
 			goto error_lock;
 		}
 
+		ffs_log("queued %d bytes on %s", data_len, epfile->name);
+
 		ret = -EIOCBQUEUED;
 		/*
 		 * Do not kfree the buffer in this function.  It will be freed
@@ -1053,6 +1123,9 @@
 	mutex_unlock(&epfile->mutex);
 error:
 	kfree(data);
+
+	ffs_log("exit: %s ret %zd", epfile->name, ret);
+
 	return ret;
 }
 
@@ -1060,9 +1133,14 @@
 ffs_epfile_open(struct inode *inode, struct file *file)
 {
 	struct ffs_epfile *epfile = inode->i_private;
+	struct ffs_data *ffs = epfile->ffs;
 
 	ENTER();
 
+	ffs_log("%s: state %d setup_state %d flag %lu", epfile->name,
+		epfile->ffs->state, epfile->ffs->setup_state,
+		epfile->ffs->flags);
+
 	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
 		return -ENODEV;
 
@@ -1076,10 +1154,14 @@
 {
 	struct ffs_io_data *io_data = kiocb->private;
 	struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+	struct ffs_data *ffs = epfile->ffs;
 	int value;
 
 	ENTER();
 
+	ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+		epfile->ffs->setup_state, epfile->ffs->flags);
+
 	spin_lock_irq(&epfile->ffs->eps_lock);
 
 	if (likely(io_data && io_data->ep && io_data->req))
@@ -1089,16 +1171,22 @@
 
 	spin_unlock_irq(&epfile->ffs->eps_lock);
 
+	ffs_log("exit: value %d", value);
+
 	return value;
 }
 
 static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
 {
+	struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+	struct ffs_data *ffs = epfile->ffs;
 	struct ffs_io_data io_data, *p = &io_data;
 	ssize_t res;
 
 	ENTER();
 
+	ffs_log("enter");
+
 	if (!is_sync_kiocb(kiocb)) {
 		p = kmalloc(sizeof(io_data), GFP_KERNEL);
 		if (unlikely(!p))
@@ -1125,16 +1213,23 @@
 		kfree(p);
 	else
 		*from = p->data;
+
+	ffs_log("exit: ret %zd", res);
+
 	return res;
 }
 
 static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
 {
+	struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+	struct ffs_data *ffs = epfile->ffs;
 	struct ffs_io_data io_data, *p = &io_data;
 	ssize_t res;
 
 	ENTER();
 
+	ffs_log("enter");
+
 	if (!is_sync_kiocb(kiocb)) {
 		p = kmalloc(sizeof(io_data), GFP_KERNEL);
 		if (unlikely(!p))
@@ -1173,6 +1268,9 @@
 	} else {
 		*to = p->data;
 	}
+
+	ffs_log("exit: ret %zd", res);
+
 	return res;
 }
 
@@ -1180,10 +1278,15 @@
 ffs_epfile_release(struct inode *inode, struct file *file)
 {
 	struct ffs_epfile *epfile = inode->i_private;
+	struct ffs_data *ffs = epfile->ffs;
 
 	ENTER();
 
 	__ffs_epfile_read_buffer_free(epfile);
+	ffs_log("%s: state %d setup_state %d flag %lu", epfile->name,
+			epfile->ffs->state, epfile->ffs->setup_state,
+			epfile->ffs->flags);
+
 	ffs_data_closed(epfile->ffs);
 
 	return 0;
@@ -1193,11 +1296,16 @@
 			     unsigned long value)
 {
 	struct ffs_epfile *epfile = file->private_data;
+	struct ffs_data *ffs = epfile->ffs;
 	struct ffs_ep *ep;
 	int ret;
 
 	ENTER();
 
+	ffs_log("%s: code 0x%08x value %#lx state %d setup_state %d flag %lu",
+		epfile->name, code, value, epfile->ffs->state,
+		epfile->ffs->setup_state, epfile->ffs->flags);
+
 	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
 		return -ENODEV;
 
@@ -1263,6 +1371,8 @@
 	}
 	spin_unlock_irq(&epfile->ffs->eps_lock);
 
+	ffs_log("exit: %s: ret %d\n", epfile->name, ret);
+
 	return ret;
 }
 
@@ -1301,10 +1411,13 @@
 		  const struct inode_operations *iops,
 		  struct ffs_file_perms *perms)
 {
+	struct ffs_data	*ffs = sb->s_fs_info;
 	struct inode *inode;
 
 	ENTER();
 
+	ffs_log("enter");
+
 	inode = new_inode(sb);
 
 	if (likely(inode)) {
@@ -1338,6 +1451,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	dentry = d_alloc_name(sb->s_root, name);
 	if (unlikely(!dentry))
 		return NULL;
@@ -1349,6 +1464,7 @@
 	}
 
 	d_add(dentry, inode);
+
 	return dentry;
 }
 
@@ -1374,6 +1490,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	ffs->sb              = sb;
 	data->ffs_data       = NULL;
 	sb->s_fs_info        = ffs;
@@ -1541,6 +1659,7 @@
 		ffs_release_dev(data.ffs_data);
 		ffs_data_put(data.ffs_data);
 	}
+
 	return rv;
 }
 
@@ -1600,6 +1719,8 @@
 {
 	ENTER();
 
+	ffs_log("ref %u", refcount_read(&ffs->ref));
+
 	refcount_inc(&ffs->ref);
 }
 
@@ -1607,6 +1728,10 @@
 {
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu opened %d ref %d",
+		ffs->state, ffs->setup_state, ffs->flags,
+		atomic_read(&ffs->opened), refcount_read(&ffs->ref));
+
 	refcount_inc(&ffs->ref);
 	if (atomic_add_return(1, &ffs->opened) == 1 &&
 			ffs->state == FFS_DEACTIVATED) {
@@ -1619,6 +1744,8 @@
 {
 	ENTER();
 
+	ffs_log("ref %u", refcount_read(&ffs->ref));
+
 	if (unlikely(refcount_dec_and_test(&ffs->ref))) {
 		pr_info("%s(): freeing\n", __func__);
 		ffs_data_clear(ffs);
@@ -1626,6 +1753,7 @@
 		       waitqueue_active(&ffs->ep0req_completion.wait) ||
 		       waitqueue_active(&ffs->wait));
 		destroy_workqueue(ffs->io_completion_wq);
+		ipc_log_context_destroy(ffs->ipc_log);
 		kfree(ffs->dev_name);
 		kfree(ffs);
 	}
@@ -1635,6 +1763,9 @@
 {
 	ENTER();
 
+	ffs_log("state %d setup_state %d flag %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	if (atomic_dec_and_test(&ffs->opened)) {
 		if (ffs->no_disconnect) {
 			ffs->state = FFS_DEACTIVATED;
@@ -1660,6 +1791,7 @@
 
 static struct ffs_data *ffs_data_new(const char *dev_name)
 {
+	char ipcname[24] = "usb_ffs_";
 	struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
 	if (unlikely(!ffs))
 		return NULL;
@@ -1684,6 +1816,11 @@
 	/* XXX REVISIT need to update it in some places, or do we? */
 	ffs->ev.can_stall = 1;
 
+	strlcat(ipcname, dev_name, sizeof(ipcname));
+	ffs->ipc_log = ipc_log_context_create(NUM_PAGES, ipcname, 0);
+	if (IS_ERR_OR_NULL(ffs->ipc_log))
+		ffs->ipc_log =  NULL;
+
 	return ffs;
 }
 
@@ -1691,6 +1828,11 @@
 {
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
+	pr_debug("%s: ffs->gadget= %pK, ffs->flags= %lu\n",
+				__func__, ffs->gadget, ffs->flags);
 	ffs_closed(ffs);
 
 	BUG_ON(ffs->gadget);
@@ -1710,6 +1852,9 @@
 {
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	ffs_data_clear(ffs);
 
 	ffs->epfiles = NULL;
@@ -1742,6 +1887,9 @@
 
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	if (WARN_ON(ffs->state != FFS_ACTIVE
 		 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
 		return -EBADFD;
@@ -1767,6 +1915,7 @@
 	}
 
 	ffs->gadget = cdev->gadget;
+
 	ffs_data_get(ffs);
 	return 0;
 }
@@ -1780,6 +1929,8 @@
 		ffs->ep0req = NULL;
 		ffs->gadget = NULL;
 		clear_bit(FFS_FL_BOUND, &ffs->flags);
+		ffs_log("state %d setup_state %d flag %lu gadget %pK\n",
+			ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
 		ffs_data_put(ffs);
 	}
 }
@@ -1791,6 +1942,9 @@
 
 	ENTER();
 
+	ffs_log("enter: eps_count %u state %d setup_state %d flag %lu",
+		ffs->eps_count, ffs->state, ffs->setup_state, ffs->flags);
+
 	count = ffs->eps_count;
 	epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
 	if (!epfiles)
@@ -1814,15 +1968,19 @@
 	}
 
 	ffs->epfiles = epfiles;
+
 	return 0;
 }
 
 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
 {
 	struct ffs_epfile *epfile = epfiles;
+	struct ffs_data *ffs = epfiles->ffs;
 
 	ENTER();
 
+	ffs_log("enter: count %u", count);
+
 	for (; count; --count, ++epfile) {
 		BUG_ON(mutex_is_locked(&epfile->mutex));
 		if (epfile->dentry) {
@@ -1838,10 +1996,14 @@
 static void ffs_func_eps_disable(struct ffs_function *func)
 {
 	struct ffs_ep *ep         = func->eps;
+	struct ffs_data *ffs      = func->ffs;
 	struct ffs_epfile *epfile = func->ffs->epfiles;
 	unsigned count            = func->ffs->eps_count;
 	unsigned long flags;
 
+	ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+		func->ffs->setup_state, func->ffs->flags);
+
 	spin_lock_irqsave(&func->ffs->eps_lock, flags);
 	while (count--) {
 		/* pending requests get nuked */
@@ -1867,6 +2029,9 @@
 	unsigned long flags;
 	int ret = 0;
 
+	ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+		func->ffs->setup_state, func->ffs->flags);
+
 	spin_lock_irqsave(&func->ffs->eps_lock, flags);
 	while(count--) {
 		ep->ep->driver_data = ep;
@@ -1883,7 +2048,9 @@
 			epfile->ep = ep;
 			epfile->in = usb_endpoint_dir_in(ep->ep->desc);
 			epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc);
+			ffs_log("usb_ep_enable %s", ep->ep->name);
 		} else {
+			ffs_log("usb_ep_enable %s ret %d", ep->ep->name, ret);
 			break;
 		}
 
@@ -1924,7 +2091,8 @@
 				    struct usb_os_desc_header *h, void *data,
 				    unsigned len, void *priv);
 
-static int __must_check ffs_do_single_desc(char *data, unsigned len,
+static int __must_check ffs_do_single_desc(struct ffs_data *ffs,
+					   char *data, unsigned int len,
 					   ffs_entity_callback entity,
 					   void *priv)
 {
@@ -1934,6 +2102,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %u", len);
+
 	/* At least two bytes are required: length and type */
 	if (len < 2) {
 		pr_vdebug("descriptor too short\n");
@@ -2050,10 +2220,13 @@
 #undef __entity_check_STRING
 #undef __entity_check_ENDPOINT
 
+	ffs_log("exit: desc type %d length %d", _ds->bDescriptorType, length);
+
 	return length;
 }
 
-static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
+static int __must_check ffs_do_descs(struct ffs_data *ffs, unsigned int count,
+				     char *data, unsigned int len,
 				     ffs_entity_callback entity, void *priv)
 {
 	const unsigned _len = len;
@@ -2061,6 +2234,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %u", len);
+
 	for (;;) {
 		int ret;
 
@@ -2078,7 +2253,7 @@
 		if (!data)
 			return _len - len;
 
-		ret = ffs_do_single_desc(data, len, entity, priv);
+		ret = ffs_do_single_desc(ffs, data, len, entity, priv);
 		if (unlikely(ret < 0)) {
 			pr_debug("%s returns %d\n", __func__, ret);
 			return ret;
@@ -2095,10 +2270,13 @@
 				void *priv)
 {
 	struct ffs_desc_helper *helper = priv;
+	struct ffs_data *ffs = helper->ffs;
 	struct usb_endpoint_descriptor *d;
 
 	ENTER();
 
+	ffs_log("enter: type %u", type);
+
 	switch (type) {
 	case FFS_DESCRIPTOR:
 		break;
@@ -2140,12 +2318,15 @@
 	return 0;
 }
 
-static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
+static int __ffs_do_os_desc_header(struct ffs_data *ffs,
+				   enum ffs_os_desc_type *next_type,
 				   struct usb_os_desc_header *desc)
 {
 	u16 bcd_version = le16_to_cpu(desc->bcdVersion);
 	u16 w_index = le16_to_cpu(desc->wIndex);
 
+	ffs_log("enter: bcd:%x w_index:%d", bcd_version, w_index);
+
 	if (bcd_version != 1) {
 		pr_vdebug("unsupported os descriptors version: %d",
 			  bcd_version);
@@ -2170,7 +2351,8 @@
  * Process all extended compatibility/extended property descriptors
  * of a feature descriptor
  */
-static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
+static int __must_check ffs_do_single_os_desc(struct ffs_data *ffs,
+					      char *data, unsigned int len,
 					      enum ffs_os_desc_type type,
 					      u16 feature_count,
 					      ffs_os_desc_callback entity,
@@ -2182,22 +2364,27 @@
 
 	ENTER();
 
+	ffs_log("enter: len %u os desc type %d", len, type);
+
 	/* loop over all ext compat/ext prop descriptors */
 	while (feature_count--) {
 		ret = entity(type, h, data, len, priv);
 		if (unlikely(ret < 0)) {
-			pr_debug("bad OS descriptor, type: %d\n", type);
+			ffs_log("bad OS descriptor, type: %d\n", type);
 			return ret;
 		}
 		data += ret;
 		len -= ret;
 	}
+
+
 	return _len - len;
 }
 
 /* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
-static int __must_check ffs_do_os_descs(unsigned count,
-					char *data, unsigned len,
+static int __must_check ffs_do_os_descs(struct ffs_data *ffs,
+					unsigned int count, char *data,
+					unsigned int len,
 					ffs_os_desc_callback entity, void *priv)
 {
 	const unsigned _len = len;
@@ -2205,6 +2392,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %u", len);
+
 	for (num = 0; num < count; ++num) {
 		int ret;
 		enum ffs_os_desc_type type;
@@ -2224,9 +2413,9 @@
 		if (le32_to_cpu(desc->dwLength) > len)
 			return -EINVAL;
 
-		ret = __ffs_do_os_desc_header(&type, desc);
+		ret = __ffs_do_os_desc_header(ffs, &type, desc);
 		if (unlikely(ret < 0)) {
-			pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
+			ffs_log("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
 				 num, ret);
 			return ret;
 		}
@@ -2244,16 +2433,17 @@
 		 * Process all function/property descriptors
 		 * of this Feature Descriptor
 		 */
-		ret = ffs_do_single_os_desc(data, len, type,
+		ret = ffs_do_single_os_desc(ffs, data, len, type,
 					    feature_count, entity, priv, desc);
 		if (unlikely(ret < 0)) {
-			pr_debug("%s returns %d\n", __func__, ret);
+			ffs_log("%s returns %d\n", __func__, ret);
 			return ret;
 		}
 
 		len -= ret;
 		data += ret;
 	}
+
 	return _len - len;
 }
 
@@ -2269,6 +2459,8 @@
 
 	ENTER();
 
+	ffs_log("enter: type %d len %u", type, len);
+
 	switch (type) {
 	case FFS_OS_DESC_EXT_COMPAT: {
 		struct usb_ext_compat_desc *d = data;
@@ -2333,6 +2525,7 @@
 		pr_vdebug("unknown descriptor: %d\n", type);
 		return -EINVAL;
 	}
+
 	return length;
 }
 
@@ -2346,6 +2539,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %zu", len);
+
 	if (get_unaligned_le32(data + 4) != len)
 		goto error;
 
@@ -2419,7 +2614,7 @@
 			continue;
 		helper.interfaces_count = 0;
 		helper.eps_count = 0;
-		ret = ffs_do_descs(counts[i], data, len,
+		ret = ffs_do_descs(ffs, counts[i], data, len,
 				   __ffs_data_do_entity, &helper);
 		if (ret < 0)
 			goto error;
@@ -2440,7 +2635,7 @@
 		len  -= ret;
 	}
 	if (os_descs_count) {
-		ret = ffs_do_os_descs(os_descs_count, data, len,
+		ret = ffs_do_os_descs(ffs, os_descs_count, data, len,
 				      __ffs_data_do_os_desc, ffs);
 		if (ret < 0)
 			goto error;
@@ -2478,6 +2673,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %zu", len);
+
 	if (unlikely(len < 16 ||
 		     get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
 		     get_unaligned_le32(data + 4) != len))
@@ -2610,6 +2807,9 @@
 	enum usb_functionfs_event_type rem_type1, rem_type2 = type;
 	int neg = 0;
 
+	ffs_log("enter: type %d state %d setup_state %d flag %lu", type,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	/*
 	 * Abort any unhandled setup
 	 *
@@ -2698,11 +2898,14 @@
 {
 	struct usb_endpoint_descriptor *ds = (void *)desc;
 	struct ffs_function *func = priv;
+	struct ffs_data *ffs = func->ffs;
 	struct ffs_ep *ffs_ep;
 	unsigned ep_desc_id;
 	int idx;
 	static const char *speed_names[] = { "full", "high", "super" };
 
+	ffs_log("enter");
+
 	if (type != FFS_DESCRIPTOR)
 		return 0;
 
@@ -2786,9 +2989,12 @@
 				   void *priv)
 {
 	struct ffs_function *func = priv;
+	struct ffs_data *ffs = func->ffs;
 	unsigned idx;
 	u8 newValue;
 
+	ffs_log("enter: type %d", type);
+
 	switch (type) {
 	default:
 	case FFS_DESCRIPTOR:
@@ -2833,6 +3039,9 @@
 
 	pr_vdebug("%02x -> %02x\n", *valuep, newValue);
 	*valuep = newValue;
+
+	ffs_log("exit: newValue %d", newValue);
+
 	return 0;
 }
 
@@ -2841,8 +3050,11 @@
 				      unsigned len, void *priv)
 {
 	struct ffs_function *func = priv;
+	struct ffs_data *ffs = func->ffs;
 	u8 length = 0;
 
+	ffs_log("enter: type %d", type);
+
 	switch (type) {
 	case FFS_OS_DESC_EXT_COMPAT: {
 		struct usb_ext_compat_desc *desc = data;
@@ -2921,6 +3133,7 @@
 	struct ffs_function *func = ffs_func_from_usb(f);
 	struct f_fs_opts *ffs_opts =
 		container_of(f->fi, struct f_fs_opts, func_inst);
+	struct ffs_data *ffs = ffs_opts->dev->ffs_data;
 	int ret;
 
 	ENTER();
@@ -2953,8 +3166,10 @@
 	 */
 	if (!ffs_opts->refcnt) {
 		ret = functionfs_bind(func->ffs, c->cdev);
-		if (ret)
+		if (ret) {
+			ffs_log("functionfs_bind returned %d", ret);
 			return ERR_PTR(ret);
+		}
 	}
 	ffs_opts->refcnt++;
 	func->function.strings = func->ffs->stringtabs;
@@ -3002,6 +3217,9 @@
 
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	/* Has descriptors only for speeds gadget does not support */
 	if (unlikely(!(full | high | super)))
 		return -ENOTSUPP;
@@ -3039,7 +3257,7 @@
 	 */
 	if (likely(full)) {
 		func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
-		fs_len = ffs_do_descs(ffs->fs_descs_count,
+		fs_len = ffs_do_descs(ffs, ffs->fs_descs_count,
 				      vla_ptr(vlabuf, d, raw_descs),
 				      d_raw_descs__sz,
 				      __ffs_func_bind_do_descs, func);
@@ -3053,7 +3271,7 @@
 
 	if (likely(high)) {
 		func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
-		hs_len = ffs_do_descs(ffs->hs_descs_count,
+		hs_len = ffs_do_descs(ffs, ffs->hs_descs_count,
 				      vla_ptr(vlabuf, d, raw_descs) + fs_len,
 				      d_raw_descs__sz - fs_len,
 				      __ffs_func_bind_do_descs, func);
@@ -3067,7 +3285,7 @@
 
 	if (likely(super)) {
 		func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
-		ss_len = ffs_do_descs(ffs->ss_descs_count,
+		ss_len = ffs_do_descs(ffs, ffs->ss_descs_count,
 				vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
 				d_raw_descs__sz - fs_len - hs_len,
 				__ffs_func_bind_do_descs, func);
@@ -3085,7 +3303,7 @@
 	 * endpoint numbers rewriting.  We can do that in one go
 	 * now.
 	 */
-	ret = ffs_do_descs(ffs->fs_descs_count +
+	ret = ffs_do_descs(ffs, ffs->fs_descs_count +
 			   (high ? ffs->hs_descs_count : 0) +
 			   (super ? ffs->ss_descs_count : 0),
 			   vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
@@ -3105,7 +3323,7 @@
 				vla_ptr(vlabuf, d, ext_compat) + i * 16;
 			INIT_LIST_HEAD(&desc->ext_prop);
 		}
-		ret = ffs_do_os_descs(ffs->ms_os_descs_count,
+		ret = ffs_do_os_descs(ffs, ffs->ms_os_descs_count,
 				      vla_ptr(vlabuf, d, raw_descs) +
 				      fs_len + hs_len + ss_len,
 				      d_raw_descs__sz - fs_len - hs_len -
@@ -3119,10 +3337,12 @@
 
 	/* And we're done */
 	ffs_event_add(ffs, FUNCTIONFS_BIND);
+
 	return 0;
 
 error:
 	/* XXX Do we need to release all claimed endpoints here? */
+	ffs_log("exit: ret %d", ret);
 	return ret;
 }
 
@@ -3131,11 +3351,14 @@
 {
 	struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
 	struct ffs_function *func = ffs_func_from_usb(f);
+	struct ffs_data *ffs = func->ffs;
 	int ret;
 
 	if (IS_ERR(ffs_opts))
 		return PTR_ERR(ffs_opts);
 
+	ffs_log("enter");
+
 	ret = _ffs_func_bind(c, f);
 	if (ret && !--ffs_opts->refcnt)
 		functionfs_unbind(func->ffs);
@@ -3150,6 +3373,9 @@
 {
 	struct ffs_data *ffs = container_of(work,
 		struct ffs_data, reset_work);
+
+	ffs_log("enter");
+
 	ffs_data_reset(ffs);
 }
 
@@ -3160,6 +3386,8 @@
 	struct ffs_data *ffs = func->ffs;
 	int ret = 0, intf;
 
+	ffs_log("enter: alt %d", (int)alt);
+
 	if (alt != (unsigned)-1) {
 		intf = ffs_func_revmap_intf(func, interface);
 		if (unlikely(intf < 0))
@@ -3189,11 +3417,16 @@
 	ret = ffs_func_eps_enable(func);
 	if (likely(ret >= 0))
 		ffs_event_add(ffs, FUNCTIONFS_ENABLE);
+
 	return ret;
 }
 
 static void ffs_func_disable(struct usb_function *f)
 {
+	struct ffs_function *func = ffs_func_from_usb(f);
+	struct ffs_data *ffs = func->ffs;
+
+	ffs_log("enter");
 	ffs_func_set_alt(f, 0, (unsigned)-1);
 }
 
@@ -3213,6 +3446,11 @@
 	pr_vdebug("creq->wIndex       = %04x\n", le16_to_cpu(creq->wIndex));
 	pr_vdebug("creq->wLength      = %04x\n", le16_to_cpu(creq->wLength));
 
+	ffs_log("enter: state %d reqtype=%02x req=%02x wv=%04x wi=%04x wl=%04x",
+			ffs->state, creq->bRequestType, creq->bRequest,
+			le16_to_cpu(creq->wValue), le16_to_cpu(creq->wIndex),
+			le16_to_cpu(creq->wLength));
+
 	/*
 	 * Most requests directed to interface go through here
 	 * (notable exceptions are set/get interface) so we need to
@@ -3281,13 +3519,23 @@
 
 static void ffs_func_suspend(struct usb_function *f)
 {
+	struct ffs_data *ffs = ffs_func_from_usb(f)->ffs;
+
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
 }
 
 static void ffs_func_resume(struct usb_function *f)
 {
+	struct ffs_data *ffs = ffs_func_from_usb(f)->ffs;
+
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
 }
 
@@ -3360,7 +3608,9 @@
 	if (dev)
 		return dev;
 
-	return _ffs_do_find_dev(name);
+	dev = _ffs_do_find_dev(name);
+
+	return dev;
 }
 
 /* Configfs support *********************************************************/
@@ -3451,6 +3701,10 @@
 	unsigned long flags;
 
 	ENTER();
+
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	if (ffs->func == func) {
 		ffs_func_eps_disable(func);
 		ffs->func = NULL;
@@ -3481,6 +3735,9 @@
 	func->interfaces_nums = NULL;
 
 	ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+
+	ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
 }
 
 static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
@@ -3568,6 +3825,7 @@
 		dev->single = true;
 
 	ffs_dev_unlock();
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(ffs_single_dev);
@@ -3593,6 +3851,7 @@
 	struct ffs_dev *ffs_dev;
 
 	ENTER();
+
 	ffs_dev_lock();
 
 	ffs_dev = _ffs_find_dev(dev_name);
@@ -3607,6 +3866,7 @@
 		ffs_dev->mounted = true;
 
 	ffs_dev_unlock();
+
 	return ffs_dev;
 }
 
@@ -3615,6 +3875,7 @@
 	struct ffs_dev *ffs_dev;
 
 	ENTER();
+
 	ffs_dev_lock();
 
 	ffs_dev = ffs_data->private_data;
@@ -3634,6 +3895,9 @@
 	int ret = 0;
 
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_dev_lock();
 
 	ffs_obj = ffs->private_data;
@@ -3658,6 +3922,9 @@
 	set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
 done:
 	ffs_dev_unlock();
+
+	ffs_log("exit: ret %d", ret);
+
 	return ret;
 }
 
@@ -3668,6 +3935,9 @@
 	struct config_item *ci;
 
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_dev_lock();
 
 	ffs_obj = ffs->private_data;
@@ -3693,11 +3963,16 @@
 	ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
 	ffs_dev_unlock();
 
-	if (test_bit(FFS_FL_BOUND, &ffs->flags))
+	if (test_bit(FFS_FL_BOUND, &ffs->flags)) {
 		unregister_gadget_item(ci);
+		ffs_log("unreg gadget done");
+	}
+
 	return;
 done:
 	ffs_dev_unlock();
+
+	ffs_log("exit error");
 }
 
 /* Misc helper functions ****************************************************/
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index b9bf791..3708033 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -23,7 +23,7 @@
 #define GSI_MBIM_CTRL_NAME "android_mbim"
 #define GSI_DPL_CTRL_NAME "dpl_ctrl"
 #define GSI_CTRL_NAME_LEN (sizeof(GSI_MBIM_CTRL_NAME)+2)
-#define GSI_MAX_CTRL_PKT_SIZE 4096
+#define GSI_MAX_CTRL_PKT_SIZE 8192
 #define GSI_CTRL_DTR (1 << 0)
 
 #define GSI_NUM_IN_RNDIS_BUFFERS 50
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 4713a1c..9b86d55 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1050,6 +1050,12 @@
 			goto fail_f_midi;
 	}
 
+	if (gadget_is_superspeed_plus(c->cdev->gadget)) {
+		f->ssp_descriptors = usb_copy_descriptors(midi_function);
+		if (!f->ssp_descriptors)
+			goto fail_f_midi;
+	}
+
 	kfree(midi_function);
 
 	return 0;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 19556f0..97cce3b 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1580,10 +1580,58 @@
 	.ct_owner	= THIS_MODULE,
 };
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+
+struct ncm_setup_desc {
+	struct work_struct work;
+	struct device *device;
+	uint8_t major; // Mirror Link major version
+	uint8_t minor; // Mirror Link minor version
+};
+
+static struct ncm_setup_desc *_ncm_setup_desc;
+
+#define MIRROR_LINK_STRING_LENGTH_MAX 32
+static void ncm_setup_work(struct work_struct *data)
+{
+	char mirror_link_string[MIRROR_LINK_STRING_LENGTH_MAX];
+	char *envp[2] = { mirror_link_string, NULL };
+
+	snprintf(mirror_link_string, MIRROR_LINK_STRING_LENGTH_MAX,
+		"MirrorLink=V%d.%d",
+		_ncm_setup_desc->major, _ncm_setup_desc->minor);
+	kobject_uevent_env(&_ncm_setup_desc->device->kobj, KOBJ_CHANGE, envp);
+}
+
+int ncm_ctrlrequest(struct usb_composite_dev *cdev,
+			const struct usb_ctrlrequest *ctrl)
+{
+	int value = -EOPNOTSUPP;
+
+	if (ctrl->bRequestType == 0x40 && ctrl->bRequest == 0xF0
+			&& _ncm_setup_desc) {
+		_ncm_setup_desc->minor = (uint8_t)(ctrl->wValue >> 8);
+		_ncm_setup_desc->major = (uint8_t)(ctrl->wValue & 0xFF);
+		schedule_work(&_ncm_setup_desc->work);
+		value = 0;
+	}
+
+	return value;
+}
+#endif
+
 static void ncm_free_inst(struct usb_function_instance *f)
 {
 	struct f_ncm_opts *opts;
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	cancel_work_sync(&_ncm_setup_desc->work);
+	/* release _ncm_setup_desc related resource */
+	device_destroy(_ncm_setup_desc->device->class,
+		_ncm_setup_desc->device->devt);
+	kfree(_ncm_setup_desc);
+#endif
+
 	opts = container_of(f, struct f_ncm_opts, func_inst);
 	if (opts->bound)
 		gether_cleanup(netdev_priv(opts->net));
@@ -1602,6 +1650,14 @@
 
 	config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	_ncm_setup_desc = kzalloc(sizeof(*_ncm_setup_desc), GFP_KERNEL);
+	if (!_ncm_setup_desc)
+		return ERR_PTR(-ENOMEM);
+	INIT_WORK(&_ncm_setup_desc->work, ncm_setup_work);
+	_ncm_setup_desc->device = create_function_device("f_ncm");
+#endif
+
 	return &opts->func_inst;
 }
 
@@ -1626,6 +1682,8 @@
 
 	DBG(c->cdev, "ncm unbind\n");
 
+	opts->bound = false;
+
 	hrtimer_cancel(&ncm->task_timer);
 
 	ncm_string_defs[0].id = 0;
@@ -1635,7 +1693,6 @@
 	usb_ep_free_request(ncm->notify, ncm->notify_req);
 
 	gether_cleanup(netdev_priv(opts->net));
-	opts->bound = false;
 }
 
 static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 9cdef10..ed68a48 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -838,7 +838,7 @@
 
 	ss = kzalloc(sizeof(*ss), GFP_KERNEL);
 	if (!ss)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	ss_opts =  container_of(fi, struct f_ss_opts, func_inst);
 
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index b0cf25c..959f666 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -32,6 +32,11 @@
 		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
 		int result;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		result = gether_get_dev_addr(opts->net, page, PAGE_SIZE); \
 		mutex_unlock(&opts->lock);				\
@@ -45,6 +50,11 @@
 		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
 		int ret;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		if (opts->refcnt) {					\
 			mutex_unlock(&opts->lock);			\
@@ -67,6 +77,11 @@
 		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
 		int result;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		result = gether_get_host_addr(opts->net, page, PAGE_SIZE); \
 		mutex_unlock(&opts->lock);				\
@@ -80,6 +95,11 @@
 		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
 		int ret;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		if (opts->refcnt) {					\
 			mutex_unlock(&opts->lock);			\
@@ -102,6 +122,11 @@
 		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
 		unsigned qmult;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		qmult = gether_get_qmult(opts->net);			\
 		mutex_unlock(&opts->lock);				\
@@ -115,6 +140,11 @@
 		u8 val;							\
 		int ret;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		if (opts->refcnt) {					\
 			ret = -EBUSY;					\
@@ -141,6 +171,11 @@
 		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
 		int ret;						\
 									\
+		if (opts->bound == false) {		\
+			pr_err("Gadget function do not bind yet.\n");	\
+			return -ENODEV;			\
+		}							\
+									\
 		mutex_lock(&opts->lock);				\
 		ret = gether_get_ifname(opts->net, page, PAGE_SIZE);	\
 		mutex_unlock(&opts->lock);				\
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index c3aba4d..0856ca3 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -18,6 +18,7 @@
 #include <linux/mutex.h>
 #include <linux/workqueue.h>
 #include <linux/refcount.h>
+#include <linux/ipc_logging.h>
 
 #ifdef VERBOSE_DEBUG
 #ifndef pr_vdebug
@@ -285,6 +286,8 @@
 	 * destroyed by ffs_epfiles_destroy().
 	 */
 	struct ffs_epfile		*epfiles;
+
+	void				*ipc_log;
 };
 
 
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index 67324f9..785bda0 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -30,4 +30,8 @@
 	int				refcnt;
 };
 
+extern struct device *create_function_device(char *name);
+int ncm_ctrlrequest(struct usb_composite_dev *cdev,
+		const struct usb_ctrlrequest *ctrl);
+
 #endif /* U_NCM_H */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 09bf6b4..1493d0f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -187,6 +187,7 @@
 		xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
 	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
 	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
 		xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 1a0cf5d..f87c991 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -21,7 +21,7 @@
 
 config FSL_USB2_OTG
 	bool "Freescale USB OTG Transceiver Driver"
-	depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
+	depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
 	depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
 	select USB_PHY
 	help
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index c0777a3..4c66edf 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -61,6 +61,7 @@
 	{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
 	{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
 	{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+	{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
 	{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
 	{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
 	{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
@@ -1353,8 +1354,13 @@
 	if (priv->partnum == CP210X_PARTNUM_CP2105)
 		req_type = REQTYPE_INTERFACE_TO_HOST;
 
+	result = usb_autopm_get_interface(serial->interface);
+	if (result)
+		return result;
+
 	result = cp210x_read_vendor_block(serial, req_type,
 					  CP210X_READ_LATCH, &buf, sizeof(buf));
+	usb_autopm_put_interface(serial->interface);
 	if (result < 0)
 		return result;
 
@@ -1375,6 +1381,10 @@
 
 	buf.mask = BIT(gpio);
 
+	result = usb_autopm_get_interface(serial->interface);
+	if (result)
+		goto out;
+
 	if (priv->partnum == CP210X_PARTNUM_CP2105) {
 		result = cp210x_write_vendor_block(serial,
 						   REQTYPE_HOST_TO_INTERFACE,
@@ -1392,6 +1402,8 @@
 					 NULL, 0, USB_CTRL_SET_TIMEOUT);
 	}
 
+	usb_autopm_put_interface(serial->interface);
+out:
 	if (result < 0) {
 		dev_err(&serial->interface->dev, "failed to set GPIO value: %d\n",
 				result);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b5cef32..1d8077e 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1015,6 +1015,8 @@
 	{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
 	{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
 	{ USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
+	/* EZPrototypes devices */
+	{ USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
 	{ }					/* Terminating entry */
 };
 
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 975d026..b863bed 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1309,6 +1309,12 @@
 #define IONICS_PLUGCOMPUTER_PID		0x0102
 
 /*
+ * EZPrototypes (PID reseller)
+ */
+#define EZPROTOTYPES_VID		0x1c40
+#define HJELMSLUND_USB485_ISO_PID	0x0477
+
+/*
  * Dresden Elektronik Sensor Terminal Board
  */
 #define DE_VID			0x1cf1 /* Vendor ID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index fb54434..faf833e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1148,6 +1148,8 @@
 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
 	  .driver_info = NCTRL(0) | RSVD(3) },
+	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff),	/* Telit ME910 (ECM) */
+	  .driver_info = NCTRL(0) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index fa93f67..e440f87 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -642,7 +642,7 @@
 		hash_del_rcu(&vsock->hash);
 
 	vsock->guest_cid = guest_cid;
-	hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
+	hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
 	spin_unlock_bh(&vhost_vsock_lock);
 
 	return 0;
diff --git a/fs/aio.c b/fs/aio.c
index 44551d9..45d5ef8 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1661,6 +1661,7 @@
 	struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
 	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
 	__poll_t mask = key_to_poll(key);
+	unsigned long flags;
 
 	req->woken = true;
 
@@ -1669,10 +1670,15 @@
 		if (!(mask & req->events))
 			return 0;
 
-		/* try to complete the iocb inline if we can: */
-		if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
+		/*
+		 * Try to complete the iocb inline if we can. Use
+		 * irqsave/irqrestore because not all filesystems (e.g. fuse)
+		 * call this function with IRQs disabled and because IRQs
+		 * have to be disabled before ctx_lock is obtained.
+		 */
+		if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
 			list_del(&iocb->ki_list);
-			spin_unlock(&iocb->ki_ctx->ctx_lock);
+			spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
 
 			list_del_init(&req->wait.entry);
 			aio_poll_complete(iocb, mask);
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index d441244..28d9c2b 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -596,7 +596,6 @@
 	pkt.len = dentry->d_name.len;
 	memcpy(pkt.name, dentry->d_name.name, pkt.len);
 	pkt.name[pkt.len] = '\0';
-	dput(dentry);
 
 	if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
 		ret = -EFAULT;
@@ -609,6 +608,8 @@
 	complete_all(&ino->expire_complete);
 	spin_unlock(&sbi->fs_lock);
 
+	dput(dentry);
+
 	return ret;
 }
 
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 846c052..3c14a8e 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -255,8 +255,10 @@
 	}
 	root_inode = autofs_get_inode(s, S_IFDIR | 0755);
 	root = d_make_root(root_inode);
-	if (!root)
+	if (!root) {
+		ret = -ENOMEM;
 		goto fail_ino;
+	}
 	pipe = NULL;
 
 	root->d_fsdata = ino;
diff --git a/fs/buffer.c b/fs/buffer.c
index 6f1ae3a..c083c4b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -200,6 +200,7 @@
 	struct buffer_head *head;
 	struct page *page;
 	int all_mapped = 1;
+	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
 
 	index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
 	page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
@@ -227,15 +228,15 @@
 	 * file io on the block device and getblk.  It gets dealt with
 	 * elsewhere, don't buffer_error if we had some unmapped buffers
 	 */
-	if (all_mapped) {
-		printk("__find_get_block_slow() failed. "
-			"block=%llu, b_blocknr=%llu\n",
-			(unsigned long long)block,
-			(unsigned long long)bh->b_blocknr);
-		printk("b_state=0x%08lx, b_size=%zu\n",
-			bh->b_state, bh->b_size);
-		printk("device %pg blocksize: %d\n", bdev,
-			1 << bd_inode->i_blkbits);
+	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
+	if (all_mapped && __ratelimit(&last_warned)) {
+		printk("__find_get_block_slow() failed. block=%llu, "
+		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
+		       "device %pg blocksize: %d\n",
+		       (unsigned long long)block,
+		       (unsigned long long)bh->b_blocknr,
+		       bh->b_state, bh->b_size, bdev,
+		       1 << bd_inode->i_blkbits);
 	}
 out_unlock:
 	spin_unlock(&bd_mapping->private_lock);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 1e5a117..a2d7017 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2243,10 +2243,12 @@
 {
 	int i;
 
-	cifs_small_buf_release(rqst->rq_iov[0].iov_base);
-	for (i = 1; i < rqst->rq_nvec; i++)
-		if (rqst->rq_iov[i].iov_base != smb2_padding)
-			kfree(rqst->rq_iov[i].iov_base);
+	if (rqst && rqst->rq_iov) {
+		cifs_small_buf_release(rqst->rq_iov[0].iov_base);
+		for (i = 1; i < rqst->rq_nvec; i++)
+			if (rqst->rq_iov[i].iov_base != smb2_padding)
+				kfree(rqst->rq_iov[i].iov_base);
+	}
 }
 
 int
@@ -2535,7 +2537,8 @@
 void
 SMB2_close_free(struct smb_rqst *rqst)
 {
-	cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+	if (rqst && rqst->rq_iov)
+		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
 }
 
 int
@@ -2685,7 +2688,8 @@
 void
 SMB2_query_info_free(struct smb_rqst *rqst)
 {
-	cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+	if (rqst && rqst->rq_iov)
+		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
 }
 
 static int
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 8fb7887..437257d 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -84,8 +84,8 @@
 
 #define NUMBER_OF_SMB2_COMMANDS	0x0013
 
-/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
-#define MAX_SMB2_HDR_SIZE 0x00b0
+/* 52 transform hdr + 64 hdr + 88 create rsp */
+#define MAX_SMB2_HDR_SIZE 204
 
 #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
 #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 53ba123..1dcd800 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -720,6 +720,7 @@
 	unsigned long fs_count;	/* Number of filesystem-sized blocks */
 	int create;
 	unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
+	loff_t i_size;
 
 	/*
 	 * If there was a memory error and we've overwritten all the
@@ -749,8 +750,8 @@
 		 */
 		create = dio->op == REQ_OP_WRITE;
 		if (dio->flags & DIO_SKIP_HOLES) {
-			if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
-							i_blkbits))
+			i_size = i_size_read(dio->inode);
+			if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits)
 				create = 0;
 		}
 
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 8237701..d31b6c7 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -21,8 +21,13 @@
 	spin_lock(&sb->s_inode_list_lock);
 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
 		spin_lock(&inode->i_lock);
+		/*
+		 * We must skip inodes in unusual state. We may also skip
+		 * inodes without pages but we deliberately won't in case
+		 * we need to reschedule to avoid softlockups.
+		 */
 		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
-		    (inode->i_mapping->nrpages == 0)) {
+		    (inode->i_mapping->nrpages == 0 && !need_resched())) {
 			spin_unlock(&inode->i_lock);
 			continue;
 		}
@@ -30,6 +35,7 @@
 		spin_unlock(&inode->i_lock);
 		spin_unlock(&sb->s_inode_list_lock);
 
+		cond_resched();
 		invalidate_mapping_pages(inode->i_mapping, 0, -1);
 		iput(toput_inode);
 		toput_inode = inode;
diff --git a/fs/exec.c b/fs/exec.c
index c7e3417..77c03ce 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -929,7 +929,7 @@
 		bytes = kernel_read(file, *buf + pos, i_size - pos, &pos);
 		if (bytes < 0) {
 			ret = bytes;
-			goto out;
+			goto out_free;
 		}
 
 		if (bytes == 0)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 471d863..82ce6d4 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -331,11 +331,22 @@
 	struct work_struct	work;
 };
 
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+	down_write(&bdi->wb_switch_rwsem);
+}
+
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+	up_write(&bdi->wb_switch_rwsem);
+}
+
 static void inode_switch_wbs_work_fn(struct work_struct *work)
 {
 	struct inode_switch_wbs_context *isw =
 		container_of(work, struct inode_switch_wbs_context, work);
 	struct inode *inode = isw->inode;
+	struct backing_dev_info *bdi = inode_to_bdi(inode);
 	struct address_space *mapping = inode->i_mapping;
 	struct bdi_writeback *old_wb = inode->i_wb;
 	struct bdi_writeback *new_wb = isw->new_wb;
@@ -344,6 +355,12 @@
 	void **slot;
 
 	/*
+	 * If @inode switches cgwb membership while sync_inodes_sb() is
+	 * being issued, sync_inodes_sb() might miss it.  Synchronize.
+	 */
+	down_read(&bdi->wb_switch_rwsem);
+
+	/*
 	 * By the time control reaches here, RCU grace period has passed
 	 * since I_WB_SWITCH assertion and all wb stat update transactions
 	 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
@@ -435,6 +452,8 @@
 	spin_unlock(&new_wb->list_lock);
 	spin_unlock(&old_wb->list_lock);
 
+	up_read(&bdi->wb_switch_rwsem);
+
 	if (switched) {
 		wb_wakeup(new_wb);
 		wb_put(old_wb);
@@ -475,9 +494,18 @@
 	if (inode->i_state & I_WB_SWITCH)
 		return;
 
+	/*
+	 * Avoid starting new switches while sync_inodes_sb() is in
+	 * progress.  Otherwise, if the down_write protected issue path
+	 * blocks heavily, we might end up starting a large number of
+	 * switches which will block on the rwsem.
+	 */
+	if (!down_read_trylock(&bdi->wb_switch_rwsem))
+		return;
+
 	isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
 	if (!isw)
-		return;
+		goto out_unlock;
 
 	/* find and pin the new wb */
 	rcu_read_lock();
@@ -511,12 +539,14 @@
 	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 	 */
 	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
-	return;
+	goto out_unlock;
 
 out_free:
 	if (isw->new_wb)
 		wb_put(isw->new_wb);
 	kfree(isw);
+out_unlock:
+	up_read(&bdi->wb_switch_rwsem);
 }
 
 /**
@@ -894,6 +924,9 @@
 
 #else	/* CONFIG_CGROUP_WRITEBACK */
 
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+
 static struct bdi_writeback *
 locked_inode_to_wb_and_lock_list(struct inode *inode)
 	__releases(&inode->i_lock)
@@ -2420,8 +2453,11 @@
 		return;
 	WARN_ON(!rwsem_is_locked(&sb->s_umount));
 
+	/* protect against inode wb switch, see inode_switch_wbs_work_fn() */
+	bdi_down_write_wb_switch_rwsem(bdi);
 	bdi_split_work_to_wbs(bdi, &work, false);
 	wb_wait_for_completion(bdi, &done);
+	bdi_up_write_wb_switch_rwsem(bdi);
 
 	wait_sb_inodes(sb);
 }
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 4614ee2..9d566e6 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -107,7 +107,7 @@
 
 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
 {
-	u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
+	u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
 
 	return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
 }
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 1978581..b0eef00 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -859,6 +859,18 @@
 	rc = migrate_huge_page_move_mapping(mapping, newpage, page);
 	if (rc != MIGRATEPAGE_SUCCESS)
 		return rc;
+
+	/*
+	 * page_private is subpool pointer in hugetlb pages.  Transfer to
+	 * new page.  PagePrivate is not associated with page_private for
+	 * hugetlb pages and can not be set here as only page_huge_active
+	 * pages can be migrated.
+	 */
+	if (page_private(page)) {
+		set_page_private(newpage, page_private(page));
+		set_page_private(page, 0);
+	}
+
 	if (mode != MIGRATE_SYNC_NO_COPY)
 		migrate_page_copy(newpage, page);
 	else
diff --git a/fs/iomap.c b/fs/iomap.c
index e57fb1e..fac4520 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -117,6 +117,12 @@
 	atomic_set(&iop->read_count, 0);
 	atomic_set(&iop->write_count, 0);
 	bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
+
+	/*
+	 * migrate_page_move_mapping() assumes that pages with private data have
+	 * their count elevated by 1.
+	 */
+	get_page(page);
 	set_page_private(page, (unsigned long)iop);
 	SetPagePrivate(page);
 	return iop;
@@ -133,6 +139,7 @@
 	WARN_ON_ONCE(atomic_read(&iop->write_count));
 	ClearPagePrivate(page);
 	set_page_private(page, 0);
+	put_page(page);
 	kfree(iop);
 }
 
@@ -565,8 +572,10 @@
 
 	if (page_has_private(page)) {
 		ClearPagePrivate(page);
+		get_page(newpage);
 		set_page_private(newpage, page_private(page));
 		set_page_private(page, 0);
+		put_page(page);
 		SetPagePrivate(newpage);
 	}
 
@@ -1778,6 +1787,7 @@
 	loff_t pos = iocb->ki_pos, start = pos;
 	loff_t end = iocb->ki_pos + count - 1, ret = 0;
 	unsigned int flags = IOMAP_DIRECT;
+	bool wait_for_completion = is_sync_kiocb(iocb);
 	struct blk_plug plug;
 	struct iomap_dio *dio;
 
@@ -1797,7 +1807,6 @@
 	dio->end_io = end_io;
 	dio->error = 0;
 	dio->flags = 0;
-	dio->wait_for_completion = is_sync_kiocb(iocb);
 
 	dio->submit.iter = iter;
 	dio->submit.waiter = current;
@@ -1852,7 +1861,7 @@
 		dio_warn_stale_pagecache(iocb->ki_filp);
 	ret = 0;
 
-	if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
+	if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
 	    !inode->i_sb->s_dio_done_wq) {
 		ret = sb_init_dio_done_wq(inode->i_sb);
 		if (ret < 0)
@@ -1868,7 +1877,7 @@
 		if (ret <= 0) {
 			/* magic error code to fall back to buffered I/O */
 			if (ret == -ENOTBLK) {
-				dio->wait_for_completion = true;
+				wait_for_completion = true;
 				ret = 0;
 			}
 			break;
@@ -1890,8 +1899,24 @@
 	if (dio->flags & IOMAP_DIO_WRITE_FUA)
 		dio->flags &= ~IOMAP_DIO_NEED_SYNC;
 
+	/*
+	 * We are about to drop our additional submission reference, which
+	 * might be the last reference to the dio.  There are three three
+	 * different ways we can progress here:
+	 *
+	 *  (a) If this is the last reference we will always complete and free
+	 *	the dio ourselves.
+	 *  (b) If this is not the last reference, and we serve an asynchronous
+	 *	iocb, we must never touch the dio after the decrement, the
+	 *	I/O completion handler will complete and free it.
+	 *  (c) If this is not the last reference, but we serve a synchronous
+	 *	iocb, the I/O completion handler will wake us up on the drop
+	 *	of the final reference, and we will complete and free it here
+	 *	after we got woken by the I/O completion handler.
+	 */
+	dio->wait_for_completion = wait_for_completion;
 	if (!atomic_dec_and_test(&dio->ref)) {
-		if (!dio->wait_for_completion)
+		if (!wait_for_completion)
 			return -EIOCBQUEUED;
 
 		for (;;) {
@@ -1908,9 +1933,7 @@
 		__set_current_state(TASK_RUNNING);
 	}
 
-	ret = iomap_dio_complete(dio);
-
-	return ret;
+	return iomap_dio_complete(dio);
 
 out_free_dio:
 	kfree(dio);
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index dbf5bc2..2d8b91f 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -832,26 +832,35 @@
  * to see if it supports poll (Neither 'poll' nor 'select' return
  * an appropriate error code).  When in doubt, set a suitable timeout value.
  */
+__poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait)
+{
+	struct kernfs_node *kn = kernfs_dentry_node(of->file->f_path.dentry);
+	struct kernfs_open_node *on = kn->attr.open;
+
+	poll_wait(of->file, &on->poll, wait);
+
+	if (of->event != atomic_read(&on->event))
+		return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
+
+	return DEFAULT_POLLMASK;
+}
+
 static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
 {
 	struct kernfs_open_file *of = kernfs_of(filp);
 	struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
-	struct kernfs_open_node *on = kn->attr.open;
+	__poll_t ret;
 
 	if (!kernfs_get_active(kn))
-		goto trigger;
+		return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
 
-	poll_wait(filp, &on->poll, wait);
+	if (kn->attr.ops->poll)
+		ret = kn->attr.ops->poll(of, wait);
+	else
+		ret = kernfs_generic_poll(of, wait);
 
 	kernfs_put_active(kn);
-
-	if (of->event != atomic_read(&on->event))
-		goto trigger;
-
-	return DEFAULT_POLLMASK;
-
- trigger:
-	return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
+	return ret;
 }
 
 static void kernfs_notify_workfn(struct work_struct *work)
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 5ef2c71..6b666d1 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1906,6 +1906,11 @@
 	size_t len;
 	char *end;
 
+	if (unlikely(!dev_name || !*dev_name)) {
+		dfprintk(MOUNT, "NFS: device name not specified\n");
+		return -EINVAL;
+	}
+
 	/* Is the host name protected with square brakcets? */
 	if (*dev_name == '[') {
 		end = strchr(++dev_name, ']');
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 1cc797a..e6b5d62 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -829,7 +829,7 @@
 		dput(parent);
 		dput(next);
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return err;
 }
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index b2aadd3..2e4af5f 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -567,7 +567,8 @@
 		override_cred->fsgid = inode->i_gid;
 		if (!attr->hardlink) {
 			err = security_dentry_create_files_as(dentry,
-					attr->mode, &dentry->d_name, old_cred,
+					attr->mode, &dentry->d_name,
+					old_cred ? old_cred : current_cred(),
 					override_cred);
 			if (err) {
 				put_cred(override_cred);
@@ -583,7 +584,7 @@
 			err = ovl_create_over_whiteout(dentry, inode, attr);
 	}
 out_revert_creds:
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	return err;
 }
 
@@ -659,7 +660,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	err = ovl_set_redirect(dentry, false);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return err;
 }
@@ -857,7 +858,7 @@
 		err = ovl_remove_upper(dentry, is_dir, &list);
 	else
 		err = ovl_remove_and_whiteout(dentry, &list);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	if (!err) {
 		if (is_dir)
 			clear_nlink(dentry->d_inode);
@@ -1225,7 +1226,7 @@
 out_unlock:
 	unlock_rename(new_upperdir, old_upperdir);
 out_revert_creds:
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	ovl_nlink_end(new, locked);
 out_drop_write:
 	ovl_drop_write(old);
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 986313d..da7d785 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -33,7 +33,7 @@
 	old_cred = ovl_override_creds(inode->i_sb);
 	realfile = open_with_fake_path(&file->f_path, file->f_flags | O_NOATIME,
 				       realinode, current_cred());
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n",
 		 file, file, ovl_whatisit(inode, realinode), file->f_flags,
@@ -208,7 +208,7 @@
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
 			    ovl_iocb_to_rwf(iocb));
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	ovl_file_accessed(file);
 
@@ -244,7 +244,7 @@
 	ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
 			     ovl_iocb_to_rwf(iocb));
 	file_end_write(real.file);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	/* Update size */
 	ovl_copyattr(ovl_inode_real(inode), inode);
@@ -271,7 +271,7 @@
 	if (file_inode(real.file) == ovl_inode_upper(file_inode(file))) {
 		old_cred = ovl_override_creds(file_inode(file)->i_sb);
 		ret = vfs_fsync_range(real.file, start, end, datasync);
-		revert_creds(old_cred);
+		ovl_revert_creds(old_cred);
 	}
 
 	fdput(real);
@@ -295,7 +295,7 @@
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	ret = call_mmap(vma->vm_file, vma);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	if (ret) {
 		/* Drop reference count from new vm_file value */
@@ -323,7 +323,7 @@
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	ret = vfs_fallocate(real.file, mode, offset, len);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	/* Update size */
 	ovl_copyattr(ovl_inode_real(inode), inode);
@@ -345,7 +345,7 @@
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	ret = vfs_fadvise(real.file, offset, len, advice);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	fdput(real);
 
@@ -365,7 +365,7 @@
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	ret = vfs_ioctl(real.file, cmd, arg);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	fdput(real);
 
@@ -470,7 +470,7 @@
 						real_out.file, pos_out, len);
 		break;
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	/* Update size */
 	ovl_copyattr(ovl_inode_real(inode_out), inode_out);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 3b7ed5d..b3c6126 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -64,7 +64,7 @@
 		inode_lock(upperdentry->d_inode);
 		old_cred = ovl_override_creds(dentry->d_sb);
 		err = notify_change(upperdentry, attr, NULL);
-		revert_creds(old_cred);
+		ovl_revert_creds(old_cred);
 		if (!err)
 			ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
 		inode_unlock(upperdentry->d_inode);
@@ -260,7 +260,7 @@
 		stat->nlink = dentry->d_inode->i_nlink;
 
 out:
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return err;
 }
@@ -294,7 +294,7 @@
 		mask |= MAY_READ;
 	}
 	err = inode_permission(realinode, mask);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return err;
 }
@@ -311,7 +311,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	p = vfs_get_link(ovl_dentry_real(dentry), done);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	return p;
 }
 
@@ -354,7 +354,7 @@
 		WARN_ON(flags != XATTR_REPLACE);
 		err = vfs_removexattr(realdentry, name);
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	/* copy c/mtime */
 	ovl_copyattr(d_inode(realdentry), inode);
@@ -375,7 +375,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	res = vfs_getxattr(realdentry, name, value, size);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	return res;
 }
 
@@ -399,7 +399,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	res = vfs_listxattr(realdentry, list, size);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	if (res <= 0 || size == 0)
 		return res;
 
@@ -434,7 +434,7 @@
 
 	old_cred = ovl_override_creds(inode->i_sb);
 	acl = get_acl(realinode, type);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return acl;
 }
@@ -472,7 +472,7 @@
 		filemap_write_and_wait(realinode->i_mapping);
 
 	err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return err;
 }
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index efd3723..2fd199e 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -1069,7 +1069,7 @@
 			goto out_free_oe;
 	}
 
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	if (origin_path) {
 		dput(origin_path->dentry);
 		kfree(origin_path);
@@ -1096,7 +1096,7 @@
 	kfree(upperredirect);
 out:
 	kfree(d.redirect);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	return ERR_PTR(err);
 }
 
@@ -1150,7 +1150,7 @@
 			dput(this);
 		}
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return positive;
 }
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index a3c0d95..552a19a 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -208,6 +208,7 @@
 void ovl_drop_write(struct dentry *dentry);
 struct dentry *ovl_workdir(struct dentry *dentry);
 const struct cred *ovl_override_creds(struct super_block *sb);
+void ovl_revert_creds(const struct cred *oldcred);
 struct super_block *ovl_same_sb(struct super_block *sb);
 int ovl_can_decode_fh(struct super_block *sb);
 struct dentry *ovl_indexdir(struct super_block *sb);
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index ec23703..e38eea8 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -20,6 +20,7 @@
 	bool nfs_export;
 	int xino;
 	bool metacopy;
+	bool override_creds;
 };
 
 struct ovl_sb {
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index cc8303a..ec591b4 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -289,7 +289,7 @@
 		}
 		inode_unlock(dir->d_inode);
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 	return err;
 }
@@ -921,7 +921,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	err = ovl_dir_read_merged(dentry, list, &root);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 	if (err)
 		return err;
 
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 0fb0a59..df77062 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -56,6 +56,11 @@
 MODULE_PARM_DESC(ovl_xino_auto_def,
 		 "Auto enable xino feature");
 
+static bool __read_mostly ovl_override_creds_def = true;
+module_param_named(override_creds, ovl_override_creds_def, bool, 0644);
+MODULE_PARM_DESC(ovl_override_creds_def,
+		 "Use mounter's credentials for accesses");
+
 static void ovl_entry_stack_free(struct ovl_entry *oe)
 {
 	unsigned int i;
@@ -362,6 +367,9 @@
 	if (ofs->config.metacopy != ovl_metacopy_def)
 		seq_printf(m, ",metacopy=%s",
 			   ofs->config.metacopy ? "on" : "off");
+	if (ofs->config.override_creds != ovl_override_creds_def)
+		seq_show_option(m, "override_creds",
+				ofs->config.override_creds ? "on" : "off");
 	return 0;
 }
 
@@ -401,6 +409,8 @@
 	OPT_XINO_AUTO,
 	OPT_METACOPY_ON,
 	OPT_METACOPY_OFF,
+	OPT_OVERRIDE_CREDS_ON,
+	OPT_OVERRIDE_CREDS_OFF,
 	OPT_ERR,
 };
 
@@ -419,6 +429,8 @@
 	{OPT_XINO_AUTO,			"xino=auto"},
 	{OPT_METACOPY_ON,		"metacopy=on"},
 	{OPT_METACOPY_OFF,		"metacopy=off"},
+	{OPT_OVERRIDE_CREDS_ON,		"override_creds=on"},
+	{OPT_OVERRIDE_CREDS_OFF,	"override_creds=off"},
 	{OPT_ERR,			NULL}
 };
 
@@ -477,6 +489,7 @@
 	config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
 	if (!config->redirect_mode)
 		return -ENOMEM;
+	config->override_creds = ovl_override_creds_def;
 
 	while ((p = ovl_next_opt(&opt)) != NULL) {
 		int token;
@@ -557,6 +570,14 @@
 			config->metacopy = false;
 			break;
 
+		case OPT_OVERRIDE_CREDS_ON:
+			config->override_creds = true;
+			break;
+
+		case OPT_OVERRIDE_CREDS_OFF:
+			config->override_creds = false;
+			break;
+
 		default:
 			pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
 			return -EINVAL;
@@ -1521,7 +1542,6 @@
 		       ovl_dentry_lower(root_dentry), NULL);
 
 	sb->s_root = root_dentry;
-
 	return 0;
 
 out_free_oe:
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index ace4fe4..470310e 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -40,9 +40,17 @@
 {
 	struct ovl_fs *ofs = sb->s_fs_info;
 
+	if (!ofs->config.override_creds)
+		return NULL;
 	return override_creds(ofs->creator_cred);
 }
 
+void ovl_revert_creds(const struct cred *old_cred)
+{
+	if (old_cred)
+		revert_creds(old_cred);
+}
+
 struct super_block *ovl_same_sb(struct super_block *sb)
 {
 	struct ovl_fs *ofs = sb->s_fs_info;
@@ -783,7 +791,7 @@
 	 * value relative to the upper inode nlink in an upper inode xattr.
 	 */
 	err = ovl_set_nlink_upper(dentry);
-	revert_creds(old_cred);
+	ovl_revert_creds(old_cred);
 
 out:
 	if (err)
@@ -803,7 +811,7 @@
 
 			old_cred = ovl_override_creds(dentry->d_sb);
 			ovl_cleanup_index(dentry);
-			revert_creds(old_cred);
+			ovl_revert_creds(old_cred);
 		}
 
 		mutex_unlock(&OVL_I(d_inode(dentry))->lock);
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 4d96a7c..cad2c60 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -100,7 +100,6 @@
 
 config PROC_UID
 	bool "Include /proc/uid/ files"
-	default y
 	depends on PROC_FS && RT_MUTEXES
 	help
 	Provides aggregated per-uid information under /proc/uid.
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 8ae1094..e39bac9 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -256,7 +256,7 @@
 		inode = proc_get_inode(dir->i_sb, de);
 		if (!inode)
 			return ERR_PTR(-ENOMEM);
-		d_set_d_op(dentry, &proc_misc_dentry_ops);
+		d_set_d_op(dentry, de->proc_dops);
 		return d_splice_alias(inode, dentry);
 	}
 	read_unlock(&proc_subdir_lock);
@@ -429,6 +429,8 @@
 	INIT_LIST_HEAD(&ent->pde_openers);
 	proc_set_user(ent, (*parent)->uid, (*parent)->gid);
 
+	ent->proc_dops = &proc_misc_dentry_ops;
+
 out:
 	return ent;
 }
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index c0c7abb..bacad3e 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -44,6 +44,7 @@
 	struct completion *pde_unload_completion;
 	const struct inode_operations *proc_iops;
 	const struct file_operations *proc_fops;
+	const struct dentry_operations *proc_dops;
 	union {
 		const struct seq_operations *seq_ops;
 		int (*single_show)(struct seq_file *, void *);
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index d5e0fcb..a7b1243 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -38,6 +38,22 @@
 	return maybe_get_net(PDE_NET(PDE(inode)));
 }
 
+static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+	return 0;
+}
+
+static const struct dentry_operations proc_net_dentry_ops = {
+	.d_revalidate	= proc_net_d_revalidate,
+	.d_delete	= always_delete_dentry,
+};
+
+static void pde_force_lookup(struct proc_dir_entry *pde)
+{
+	/* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
+	pde->proc_dops = &proc_net_dentry_ops;
+}
+
 static int seq_open_net(struct inode *inode, struct file *file)
 {
 	unsigned int state_size = PDE(inode)->state_size;
@@ -90,6 +106,7 @@
 	p = proc_create_reg(name, mode, &parent, data);
 	if (!p)
 		return NULL;
+	pde_force_lookup(p);
 	p->proc_fops = &proc_net_seq_fops;
 	p->seq_ops = ops;
 	p->state_size = state_size;
@@ -133,6 +150,7 @@
 	p = proc_create_reg(name, mode, &parent, data);
 	if (!p)
 		return NULL;
+	pde_force_lookup(p);
 	p->proc_fops = &proc_net_seq_fops;
 	p->seq_ops = ops;
 	p->state_size = state_size;
@@ -181,6 +199,7 @@
 	p = proc_create_reg(name, mode, &parent, data);
 	if (!p)
 		return NULL;
+	pde_force_lookup(p);
 	p->proc_fops = &proc_net_single_fops;
 	p->single_show = show;
 	return proc_register(parent, p);
@@ -223,6 +242,7 @@
 	p = proc_create_reg(name, mode, &parent, data);
 	if (!p)
 		return NULL;
+	pde_force_lookup(p);
 	p->proc_fops = &proc_net_single_fops;
 	p->single_show = show;
 	p->write = write;
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index bfe1639..97fc498 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -47,6 +47,24 @@
 	return false;
 #elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
 	return false;
+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+	/*
+	 * The DRM driver stack is designed to work with cache coherent devices
+	 * only, but permits an optimization to be enabled in some cases, where
+	 * for some buffers, both the CPU and the GPU use uncached mappings,
+	 * removing the need for DMA snooping and allocation in the CPU caches.
+	 *
+	 * The use of uncached GPU mappings relies on the correct implementation
+	 * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU
+	 * will use cached mappings nonetheless. On x86 platforms, this does not
+	 * seem to matter, as uncached CPU mappings will snoop the caches in any
+	 * case. However, on ARM and arm64, enabling this optimization on a
+	 * platform where NoSnoop is ignored results in loss of coherency, which
+	 * breaks correct operation of the device. Since we have no way of
+	 * detecting whether NoSnoop works or not, just disable this
+	 * optimization entirely for ARM and arm64.
+	 */
+	return false;
 #else
 	return true;
 #endif
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 7f78d26..a72efa0 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -634,4 +634,12 @@
 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
 				 struct drm_dp_mst_port *port, bool power_up);
 
+int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+			   struct drm_dp_mst_port *port,
+			   int offset, int size, u8 *bytes);
+
+int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+				 struct drm_dp_mst_port *port,
+				 int offset, int size, u8 *bytes);
+
 #endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-kona.h b/include/dt-bindings/clock/qcom,dispcc-kona.h
index f48b27a..60b8d4a 100644
--- a/include/dt-bindings/clock/qcom,dispcc-kona.h
+++ b/include/dt-bindings/clock/qcom,dispcc-kona.h
@@ -21,57 +21,53 @@
 #define DISP_CC_MDSS_DP_AUX1_CLK_SRC				11
 #define DISP_CC_MDSS_DP_AUX_CLK					12
 #define DISP_CC_MDSS_DP_AUX_CLK_SRC				13
-#define DISP_CC_MDSS_DP_CRYPTO1_CLK				14
-#define DISP_CC_MDSS_DP_CRYPTO1_CLK_SRC				15
-#define DISP_CC_MDSS_DP_CRYPTO_CLK				16
-#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC				17
-#define DISP_CC_MDSS_DP_LINK1_CLK				18
-#define DISP_CC_MDSS_DP_LINK1_CLK_SRC				19
-#define DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC			20
-#define DISP_CC_MDSS_DP_LINK1_INTF_CLK				21
-#define DISP_CC_MDSS_DP_LINK_CLK				22
-#define DISP_CC_MDSS_DP_LINK_CLK_SRC				23
-#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC			24
-#define DISP_CC_MDSS_DP_LINK_INTF_CLK				25
-#define DISP_CC_MDSS_DP_PIXEL1_CLK				26
-#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC				27
-#define DISP_CC_MDSS_DP_PIXEL2_CLK				28
-#define DISP_CC_MDSS_DP_PIXEL2_CLK_SRC				29
-#define DISP_CC_MDSS_DP_PIXEL_CLK				30
-#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC				31
-#define DISP_CC_MDSS_EDP_AUX_CLK				32
-#define DISP_CC_MDSS_EDP_AUX_CLK_SRC				33
-#define DISP_CC_MDSS_EDP_GTC_CLK				34
-#define DISP_CC_MDSS_EDP_GTC_CLK_SRC				35
-#define DISP_CC_MDSS_EDP_LINK_CLK				36
-#define DISP_CC_MDSS_EDP_LINK_CLK_SRC				37
-#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC			38
-#define DISP_CC_MDSS_EDP_LINK_INTF_CLK				39
-#define DISP_CC_MDSS_EDP_PIXEL_CLK				40
-#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC				41
-#define DISP_CC_MDSS_ESC0_CLK					42
-#define DISP_CC_MDSS_ESC0_CLK_SRC				43
-#define DISP_CC_MDSS_ESC1_CLK					44
-#define DISP_CC_MDSS_ESC1_CLK_SRC				45
-#define DISP_CC_MDSS_MDP_CLK					46
-#define DISP_CC_MDSS_MDP_CLK_SRC				47
-#define DISP_CC_MDSS_MDP_LUT_CLK				48
-#define DISP_CC_MDSS_NON_GDSC_AHB_CLK				49
-#define DISP_CC_MDSS_PCLK0_CLK					50
-#define DISP_CC_MDSS_PCLK0_CLK_SRC				51
-#define DISP_CC_MDSS_PCLK1_CLK					52
-#define DISP_CC_MDSS_PCLK1_CLK_SRC				53
-#define DISP_CC_MDSS_ROT_CLK					54
-#define DISP_CC_MDSS_ROT_CLK_SRC				55
-#define DISP_CC_MDSS_RSCC_AHB_CLK				56
-#define DISP_CC_MDSS_RSCC_VSYNC_CLK				57
-#define DISP_CC_MDSS_VSYNC_CLK					58
-#define DISP_CC_MDSS_VSYNC_CLK_SRC				59
-#define DISP_CC_PLL0						60
-#define DISP_CC_PLL1						61
-#define DISP_CC_SLEEP_CLK					62
-#define DISP_CC_SLEEP_CLK_SRC					63
-#define DISP_CC_XO_CLK						64
+#define DISP_CC_MDSS_DP_LINK1_CLK				14
+#define DISP_CC_MDSS_DP_LINK1_CLK_SRC				15
+#define DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC			16
+#define DISP_CC_MDSS_DP_LINK1_INTF_CLK				17
+#define DISP_CC_MDSS_DP_LINK_CLK				18
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC				19
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC			20
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK				21
+#define DISP_CC_MDSS_DP_PIXEL1_CLK				22
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC				23
+#define DISP_CC_MDSS_DP_PIXEL2_CLK				24
+#define DISP_CC_MDSS_DP_PIXEL2_CLK_SRC				25
+#define DISP_CC_MDSS_DP_PIXEL_CLK				26
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC				27
+#define DISP_CC_MDSS_EDP_AUX_CLK				28
+#define DISP_CC_MDSS_EDP_AUX_CLK_SRC				29
+#define DISP_CC_MDSS_EDP_GTC_CLK				30
+#define DISP_CC_MDSS_EDP_GTC_CLK_SRC				31
+#define DISP_CC_MDSS_EDP_LINK_CLK				32
+#define DISP_CC_MDSS_EDP_LINK_CLK_SRC				33
+#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC			34
+#define DISP_CC_MDSS_EDP_LINK_INTF_CLK				35
+#define DISP_CC_MDSS_EDP_PIXEL_CLK				36
+#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC				37
+#define DISP_CC_MDSS_ESC0_CLK					38
+#define DISP_CC_MDSS_ESC0_CLK_SRC				39
+#define DISP_CC_MDSS_ESC1_CLK					40
+#define DISP_CC_MDSS_ESC1_CLK_SRC				41
+#define DISP_CC_MDSS_MDP_CLK					42
+#define DISP_CC_MDSS_MDP_CLK_SRC				43
+#define DISP_CC_MDSS_MDP_LUT_CLK				44
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK				45
+#define DISP_CC_MDSS_PCLK0_CLK					46
+#define DISP_CC_MDSS_PCLK0_CLK_SRC				47
+#define DISP_CC_MDSS_PCLK1_CLK					48
+#define DISP_CC_MDSS_PCLK1_CLK_SRC				49
+#define DISP_CC_MDSS_ROT_CLK					50
+#define DISP_CC_MDSS_ROT_CLK_SRC				51
+#define DISP_CC_MDSS_RSCC_AHB_CLK				52
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK				53
+#define DISP_CC_MDSS_VSYNC_CLK					54
+#define DISP_CC_MDSS_VSYNC_CLK_SRC				55
+#define DISP_CC_PLL0						56
+#define DISP_CC_PLL1						57
+#define DISP_CC_SLEEP_CLK					58
+#define DISP_CC_SLEEP_CLK_SRC					59
+#define DISP_CC_XO_CLK						60
 
 /* DISP_CC resets */
 #define DISP_CC_MDSS_CORE_BCR					0
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index 2b122c1..d6c1dff 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -23,5 +23,7 @@
 #define RPMH_RF_CLKD3_A				15
 #define RPMH_RF_CLKD4				16
 #define RPMH_RF_CLKD4_A				17
+#define RPMH_RF_CLK4				18
+#define RPMH_RF_CLK4_A				19
 
 #endif
diff --git a/include/dt-bindings/sound/qcom,bolero-clk-rsc.h b/include/dt-bindings/sound/qcom,bolero-clk-rsc.h
new file mode 100644
index 0000000..038c066
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,bolero-clk-rsc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __BOLERO_CODEC_CLK_RSC_H
+#define __BOLERO_CODEC_CLK_RSC_H
+
+/* Bolero clock types */
+#define TX_CORE_CLK	0
+#define RX_CORE_CLK	1
+#define WSA_CORE_CLK	2
+#define VA_CORE_CLK	3
+#define TX_NPL_CLK	4
+#define RX_NPL_CLK	5
+#define WSA_NPL_CLK	6
+#define VA_NPL_CLK	7
+#define MAX_CLK	8
+
+#endif /* __BOLERO_CODEC_CLK_RSC_H */
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index c311571..07e02d6 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -190,6 +190,7 @@
 	struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
 	struct rb_root cgwb_congested_tree; /* their congested states */
 	struct mutex cgwb_release_mutex;  /* protect shutdown of wb structs */
+	struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
 #else
 	struct bdi_writeback_congested *wb_congested;
 #endif
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 5e1694f..6f9ea86 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -32,6 +32,7 @@
 struct kernfs_ops;
 struct kernfs_open_file;
 struct seq_file;
+struct poll_table_struct;
 
 #define MAX_CGROUP_TYPE_NAMELEN 32
 #define MAX_CGROUP_ROOT_NAMELEN 64
@@ -573,6 +574,9 @@
 	ssize_t (*write)(struct kernfs_open_file *of,
 			 char *buf, size_t nbytes, loff_t off);
 
+	__poll_t (*poll)(struct kernfs_open_file *of,
+			 struct poll_table_struct *pt);
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	struct lock_class_key	lockdep_key;
 #endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 6881973..dae9863 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -254,20 +254,12 @@
 static struct freq_attr _name =			\
 __ATTR(_name, 0200, NULL, store_##_name)
 
-struct global_attr {
-	struct attribute attr;
-	ssize_t (*show)(struct kobject *kobj,
-			struct attribute *attr, char *buf);
-	ssize_t (*store)(struct kobject *a, struct attribute *b,
-			 const char *c, size_t count);
-};
-
 #define define_one_global_ro(_name)		\
-static struct global_attr _name =		\
+static struct kobj_attribute _name =		\
 __ATTR(_name, 0444, show_##_name, NULL)
 
 #define define_one_global_rw(_name)		\
-static struct global_attr _name =		\
+static struct kobj_attribute _name =		\
 __ATTR(_name, 0644, show_##_name, store_##_name)
 
 
diff --git a/include/linux/cpufreq_times.h b/include/linux/cpufreq_times.h
index 757bf0c..0eb6dc9 100644
--- a/include/linux/cpufreq_times.h
+++ b/include/linux/cpufreq_times.h
@@ -27,7 +27,8 @@
 			    struct pid *pid, struct task_struct *p);
 void cpufreq_acct_update_power(struct task_struct *p, u64 cputime);
 void cpufreq_times_create_policy(struct cpufreq_policy *policy);
-void cpufreq_times_record_transition(struct cpufreq_freqs *freq);
+void cpufreq_times_record_transition(struct cpufreq_policy *policy,
+                                     unsigned int new_freq);
 void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end);
 int single_uid_time_in_state_open(struct inode *inode, struct file *file);
 #else
@@ -38,7 +39,7 @@
 					     u64 cputime) {}
 static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
 static inline void cpufreq_times_record_transition(
-	struct cpufreq_freqs *freq) {}
+	struct cpufreq_policy *policy, unsigned int new_freq) {}
 static inline void cpufreq_task_times_remove_uids(uid_t uid_start,
 						  uid_t uid_end) {}
 #endif /* CONFIG_CPU_FREQ_TIMES */
diff --git a/include/linux/dma-buf-ref.h b/include/linux/dma-buf-ref.h
new file mode 100644
index 0000000..5bdf1f2
--- /dev/null
+++ b/include/linux/dma-buf-ref.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DMA_BUF_REF_H
+#define _DMA_BUF_REF_H
+
+struct dma_buf;
+struct seq_file;
+
+#ifdef CONFIG_DEBUG_DMA_BUF_REF
+void dma_buf_ref_init(struct dma_buf *b);
+void dma_buf_ref_destroy(struct dma_buf *b);
+void dma_buf_ref_mod(struct dma_buf *b, int nr);
+int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf);
+
+#else
+static inline void dma_buf_ref_init(struct dma_buf *b) {}
+static inline void dma_buf_ref_destroy(struct dma_buf *b) {}
+static inline void dma_buf_ref_mod(struct dma_buf *b, int nr) {}
+static inline int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf)
+{
+	return -ENOMEM;
+}
+#endif
+
+
+#endif /* _DMA_BUF_REF_H */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 38ebfdc..2ba99cc 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -31,6 +31,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/fs.h>
 #include <linux/dma-fence.h>
+#include <linux/dma-buf-ref.h>
 #include <linux/wait.h>
 
 struct device;
@@ -381,6 +382,7 @@
  * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
  * @exp_name: name of the exporter; useful for debugging.
  * @name: unique name for the buffer
+ * @ktime: time (in jiffies) at which the buffer was born
  * @owner: pointer to exporter module; used for refcounting when exporter is a
  *         kernel module.
  * @list_node: node for dma_buf accounting and debugging.
@@ -409,6 +411,7 @@
 	void *vmap_ptr;
 	const char *exp_name;
 	char *name;
+	ktime_t ktime;
 	struct module *owner;
 	struct list_head list_node;
 	void *priv;
@@ -423,6 +426,8 @@
 
 		__poll_t active;
 	} cb_excl, cb_shared;
+
+	struct list_head refs;
 };
 
 /**
@@ -495,6 +500,7 @@
 static inline void get_dma_buf(struct dma_buf *dmabuf)
 {
 	get_file(dmabuf->file);
+	dma_buf_ref_mod(dmabuf, 1);
 }
 
 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
diff --git a/include/linux/hdcp_qseecom.h b/include/linux/hdcp_qseecom.h
index 96b24a1..34ffb9f 100644
--- a/include/linux/hdcp_qseecom.h
+++ b/include/linux/hdcp_qseecom.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __HDCP_QSEECOM_H
@@ -11,6 +11,7 @@
 
 enum hdcp2_app_cmd {
 	HDCP2_CMD_START,
+	HDCP2_CMD_START_AUTH,
 	HDCP2_CMD_STOP,
 	HDCP2_CMD_PROCESS_MSG,
 	HDCP2_CMD_TIMEOUT,
@@ -35,6 +36,8 @@
 	switch (cmd) {
 	case HDCP2_CMD_START:
 		return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START);
+	case HDCP2_CMD_START_AUTH:
+		return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START_AUTH);
 	case HDCP2_CMD_STOP:
 		return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_STOP);
 	case HDCP2_CMD_PROCESS_MSG:
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 0e644e5..39f8279 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -1178,6 +1178,7 @@
 
 enum ipa_smmu_client_type {
 	IPA_SMMU_WLAN_CLIENT,
+	IPA_SMMU_AP_CLIENT,
 	IPA_SMMU_CLIENT_MAX
 };
 
diff --git a/include/linux/ipa_wdi3.h b/include/linux/ipa_wdi3.h
index aca7fba..3c8a72c 100644
--- a/include/linux/ipa_wdi3.h
+++ b/include/linux/ipa_wdi3.h
@@ -97,10 +97,12 @@
  * @transfer_ring_size:  size of the transfer ring
  * @transfer_ring_doorbell_pa:  physical address of the doorbell that
 	IPA uC will update the tailpointer of the transfer ring
+ * @is_txr_rn_db_pcie_addr: Bool indicated txr ring DB is pcie or not
  * @event_ring_base_pa:  physical address of the base of the event ring
  * @event_ring_size:  event ring size
  * @event_ring_doorbell_pa:  physical address of the doorbell that IPA uC
 	will update the headpointer of the event ring
+ * @is_evt_rn_db_pcie_addr: Bool indicated evt ring DB is pcie or not
  * @num_pkt_buffers:  Number of pkt buffers allocated. The size of the event
 	ring and the transfer ring has to be atleast ( num_pkt_buffers + 1)
  * @pkt_offset: packet offset (wdi header length)
@@ -113,10 +115,12 @@
 	phys_addr_t  transfer_ring_base_pa;
 	u32  transfer_ring_size;
 	phys_addr_t  transfer_ring_doorbell_pa;
+	bool is_txr_rn_db_pcie_addr;
 
 	phys_addr_t  event_ring_base_pa;
 	u32  event_ring_size;
 	phys_addr_t  event_ring_doorbell_pa;
+	bool is_evt_rn_db_pcie_addr;
 	u16  num_pkt_buffers;
 
 	u16 pkt_offset;
@@ -132,10 +136,12 @@
  * @transfer_ring_size:  size of the transfer ring
  * @transfer_ring_doorbell_pa:  physical address of the doorbell that
 	IPA uC will update the tailpointer of the transfer ring
+ * @is_txr_rn_db_pcie_addr: Bool indicated  txr ring DB is pcie or not
  * @event_ring_base_pa:  physical address of the base of the event ring
  * @event_ring_size:  event ring size
  * @event_ring_doorbell_pa:  physical address of the doorbell that IPA uC
 	will update the headpointer of the event ring
+ * @is_evt_rn_db_pcie_addr: Bool indicated evt ring DB is pcie or not
  * @num_pkt_buffers:  Number of pkt buffers allocated. The size of the event
 	ring and the transfer ring has to be atleast ( num_pkt_buffers + 1)
  * @pkt_offset: packet offset (wdi header length)
@@ -148,10 +154,12 @@
 	struct sg_table  transfer_ring_base;
 	u32  transfer_ring_size;
 	phys_addr_t  transfer_ring_doorbell_pa;
+	bool is_txr_rn_db_pcie_addr;
 
 	struct sg_table  event_ring_base;
 	u32  event_ring_size;
 	phys_addr_t  event_ring_doorbell_pa;
+	bool is_evt_rn_db_pcie_addr;
 	u16  num_pkt_buffers;
 
 	u16 pkt_offset;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 201de12..c9bffda 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1151,7 +1151,8 @@
 void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
 int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
 void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+				unsigned int *mapped_cpu);
 void irq_matrix_reserve(struct irq_matrix *m);
 void irq_matrix_remove_reserved(struct irq_matrix *m);
 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 8bdbb5f..3188c0b 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -319,7 +319,7 @@
 #define GITS_TYPER_PLPIS		(1UL << 0)
 #define GITS_TYPER_VLPIS		(1UL << 1)
 #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT	4
-#define GITS_TYPER_ITT_ENTRY_SIZE(r)	((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
+#define GITS_TYPER_ITT_ENTRY_SIZE(r)	((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
 #define GITS_TYPER_IDBITS_SHIFT		8
 #define GITS_TYPER_DEVBITS_SHIFT	13
 #define GITS_TYPER_DEVBITS(r)		((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 814643f..444869d 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -25,6 +25,7 @@
 struct vm_area_struct;
 struct super_block;
 struct file_system_type;
+struct poll_table_struct;
 
 struct kernfs_open_node;
 struct kernfs_iattrs;
@@ -261,6 +262,9 @@
 	ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
 			 loff_t off);
 
+	__poll_t (*poll)(struct kernfs_open_file *of,
+			 struct poll_table_struct *pt);
+
 	int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -350,6 +354,8 @@
 int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
 		     const char *new_name, const void *new_ns);
 int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
+__poll_t kernfs_generic_poll(struct kernfs_open_file *of,
+			     struct poll_table_struct *pt);
 void kernfs_notify(struct kernfs_node *kn);
 
 const void *kernfs_super_ns(struct super_block *sb);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c196176..edf8f86 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -4,7 +4,6 @@
 /* Simple interface for creating and stopping kernel threads without mess. */
 #include <linux/err.h>
 #include <linux/sched.h>
-#include <linux/cgroup.h>
 
 __printf(4, 5)
 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
@@ -198,6 +197,8 @@
 
 void kthread_destroy_worker(struct kthread_worker *worker);
 
+struct cgroup_subsys_state;
+
 #ifdef CONFIG_BLK_CGROUP
 void kthread_associate_blkcg(struct cgroup_subsys_state *css);
 struct cgroup_subsys_state *kthread_blkcg(void);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index d8b7855..90e2653 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -21,14 +21,16 @@
  * walkers which rely on the fully initialized page->flags and others
  * should use this rather than pfn_valid && pfn_to_page
  */
-#define pfn_to_online_page(pfn)				\
-({							\
-	struct page *___page = NULL;			\
-	unsigned long ___nr = pfn_to_section_nr(pfn);	\
-							\
-	if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
-		___page = pfn_to_page(pfn);		\
-	___page;					\
+#define pfn_to_online_page(pfn)					   \
+({								   \
+	struct page *___page = NULL;				   \
+	unsigned long ___pfn = pfn;				   \
+	unsigned long ___nr = pfn_to_section_nr(___pfn);	   \
+								   \
+	if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
+	    pfn_valid_within(___pfn))				   \
+		___page = pfn_to_page(___pfn);			   \
+	___page;						   \
 })
 
 /*
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index fd39487..6fc8151 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -702,6 +702,20 @@
 };
 
 /**
+* gsi_mhip_channel_scratch - MHI PRIME protocol SW config area of
+* channel scratch
+* @assert_bit_40: Valid only for non-host channels.
+* Set to 1 for MHI’ channels when running over PCIe.
+* @host_channel: Set to 1 for MHIP channel running on host.
+*
+*/
+struct __packed gsi_mhip_channel_scratch {
+	uint32_t assert_bit_40:1;
+	uint32_t host_channel:1;
+	uint32_t resvd1:30;
+};
+
+/**
  * gsi_11ad_rx_channel_scratch - 11AD protocol SW config area of
  * RX channel scratch
  *
@@ -789,6 +803,7 @@
 	struct __packed gsi_11ad_rx_channel_scratch rx_11ad;
 	struct __packed gsi_11ad_tx_channel_scratch tx_11ad;
 	struct __packed gsi_wdi3_channel_scratch wdi3;
+	struct __packed gsi_mhip_channel_scratch mhip;
 	struct __packed {
 		uint32_t word1;
 		uint32_t word2;
@@ -829,6 +844,22 @@
 };
 
 /**
+* gsi_mhip_evt_scratch - MHI PRIME protocol SW config area of
+* event scratch
+*/
+struct __packed gsi_mhip_evt_scratch {
+	uint32_t rp_mod_threshold:8;
+	uint32_t rp_mod_timer:4;
+	uint32_t rp_mod_counter:8;
+	uint32_t rp_mod_timer_id:4;
+	uint32_t rp_mod_timer_running:1;
+	uint32_t resvd1:7;
+	uint32_t fixed_buffer_sz:16;
+	uint32_t resvd2:16;
+};
+
+
+/**
  * gsi_xdci_evt_scratch - xDCI protocol SW config area of
  * event scratch
  *
@@ -893,6 +924,7 @@
 	struct __packed gsi_wdi_evt_scratch wdi;
 	struct __packed gsi_11ad_evt_scratch w11ad;
 	struct __packed gsi_wdi3_evt_scratch wdi3;
+	struct __packed gsi_mhip_evt_scratch mhip;
 	struct __packed {
 		uint32_t word1;
 		uint32_t word2;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 100247c..dc56925 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -323,6 +323,7 @@
 	POWER_SUPPLY_PROP_QC_OPTI_DISABLE,
 	POWER_SUPPLY_PROP_CC_SOC,
 	POWER_SUPPLY_PROP_BATT_AGE_LEVEL,
+	POWER_SUPPLY_PROP_SCALE_MODE_EN,
 	/* Charge pump properties */
 	POWER_SUPPLY_PROP_CP_STATUS1,
 	POWER_SUPPLY_PROP_CP_STATUS2,
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 7006008..af892c2 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -4,6 +4,7 @@
 #include <linux/jump_label.h>
 #include <linux/psi_types.h>
 #include <linux/sched.h>
+#include <linux/poll.h>
 
 struct seq_file;
 struct css_set;
@@ -26,6 +27,13 @@
 int psi_cgroup_alloc(struct cgroup *cgrp);
 void psi_cgroup_free(struct cgroup *cgrp);
 void cgroup_move_task(struct task_struct *p, struct css_set *to);
+
+struct psi_trigger *psi_trigger_create(struct psi_group *group,
+			char *buf, size_t nbytes, enum psi_res res);
+void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t);
+
+__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
+			poll_table *wait);
 #endif
 
 #else /* CONFIG_PSI */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 2cf422d..07aaf9b 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -1,8 +1,11 @@
 #ifndef _LINUX_PSI_TYPES_H
 #define _LINUX_PSI_TYPES_H
 
+#include <linux/kthread.h>
 #include <linux/seqlock.h>
 #include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/wait.h>
 
 #ifdef CONFIG_PSI
 
@@ -11,7 +14,7 @@
 	NR_IOWAIT,
 	NR_MEMSTALL,
 	NR_RUNNING,
-	NR_PSI_TASK_COUNTS,
+	NR_PSI_TASK_COUNTS = 3,
 };
 
 /* Task state bitmasks */
@@ -24,7 +27,7 @@
 	PSI_IO,
 	PSI_MEM,
 	PSI_CPU,
-	NR_PSI_RESOURCES,
+	NR_PSI_RESOURCES = 3,
 };
 
 /*
@@ -41,7 +44,13 @@
 	PSI_CPU_SOME,
 	/* Only per-CPU, to weigh the CPU in the global average: */
 	PSI_NONIDLE,
-	NR_PSI_STATES,
+	NR_PSI_STATES = 6,
+};
+
+enum psi_aggregators {
+	PSI_AVGS = 0,
+	PSI_POLL,
+	NR_PSI_AGGREGATORS,
 };
 
 struct psi_group_cpu {
@@ -53,6 +62,9 @@
 	/* States of the tasks belonging to this group */
 	unsigned int tasks[NR_PSI_TASK_COUNTS];
 
+	/* Aggregate pressure state derived from the tasks */
+	u32 state_mask;
+
 	/* Period time sampling buckets for each state of interest (ns) */
 	u32 times[NR_PSI_STATES];
 
@@ -62,25 +74,94 @@
 	/* 2nd cacheline updated by the aggregator */
 
 	/* Delta detection against the sampling buckets */
-	u32 times_prev[NR_PSI_STATES] ____cacheline_aligned_in_smp;
+	u32 times_prev[NR_PSI_AGGREGATORS][NR_PSI_STATES]
+			____cacheline_aligned_in_smp;
+};
+
+/* PSI growth tracking window */
+struct psi_window {
+	/* Window size in ns */
+	u64 size;
+
+	/* Start time of the current window in ns */
+	u64 start_time;
+
+	/* Value at the start of the window */
+	u64 start_value;
+
+	/* Value growth in the previous window */
+	u64 prev_growth;
+};
+
+struct psi_trigger {
+	/* PSI state being monitored by the trigger */
+	enum psi_states state;
+
+	/* User-spacified threshold in ns */
+	u64 threshold;
+
+	/* List node inside triggers list */
+	struct list_head node;
+
+	/* Backpointer needed during trigger destruction */
+	struct psi_group *group;
+
+	/* Wait queue for polling */
+	wait_queue_head_t event_wait;
+
+	/* Pending event flag */
+	int event;
+
+	/* Tracking window */
+	struct psi_window win;
+
+	/*
+	 * Time last event was generated. Used for rate-limiting
+	 * events to one per window
+	 */
+	u64 last_event_time;
+
+	/* Refcounting to prevent premature destruction */
+	struct kref refcount;
 };
 
 struct psi_group {
-	/* Protects data updated during an aggregation */
-	struct mutex stat_lock;
+	/* Protects data used by the aggregator */
+	struct mutex avgs_lock;
 
 	/* Per-cpu task state & time tracking */
 	struct psi_group_cpu __percpu *pcpu;
 
-	/* Periodic aggregation state */
-	u64 total_prev[NR_PSI_STATES - 1];
-	u64 last_update;
-	u64 next_update;
-	struct delayed_work clock_work;
+	/* Running pressure averages */
+	u64 avg_total[NR_PSI_STATES - 1];
+	u64 avg_last_update;
+	u64 avg_next_update;
+
+	/* Aggregator work control */
+	struct delayed_work avgs_work;
 
 	/* Total stall times and sampled pressure averages */
-	u64 total[NR_PSI_STATES - 1];
+	u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1];
 	unsigned long avg[NR_PSI_STATES - 1][3];
+
+	/* Monitor work control */
+	atomic_t poll_scheduled;
+	struct kthread_worker __rcu *poll_kworker;
+	struct kthread_delayed_work poll_work;
+
+	/* Protects data used by the monitor */
+	struct mutex trigger_lock;
+
+	/* Configured polling triggers */
+	struct list_head triggers;
+	u32 nr_triggers[NR_PSI_STATES - 1];
+	u32 poll_states;
+	u64 poll_min_period;
+
+	/* Total stall times at the start of monitor activation */
+	u64 polling_total[NR_PSI_STATES - 1];
+	u64 polling_next_update;
+	u64 polling_until;
 };
 
 #else /* CONFIG_PSI */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 9df8d9b..bb7faa6 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _LINUX_QCOM_GENI_SE
@@ -37,9 +37,16 @@
  * @m_ahb_clk:		Handle to the primary AHB clock.
  * @s_ahb_clk:		Handle to the secondary AHB clock.
  * @ab_list:		List Head of Average bus banwidth list.
+ * @ab_list_noc:	List Head of Average DDR path bus
+			bandwidth list.
  * @ab:			Average bus bandwidth request value.
+ * @ab_noc:		Average DDR path bus bandwidth request value.
  * @ib_list:		List Head of Instantaneous bus banwidth list.
+ * @ib_list_noc:	List Head of Instantaneous DDR path bus
+			bandwidth list.
  * @ib:			Instantaneous bus bandwidth request value.
+ * @ib_noc:		Instantaneous DDR path bus bandwidth
+			request value.
  * @geni_pinctrl:	Handle to the pinctrl configuration.
  * @geni_gpio_active:	Handle to the default/active pinctrl state.
  * @geni_gpi_sleep:	Handle to the sleep pinctrl state.
@@ -51,9 +58,13 @@
 	struct clk *m_ahb_clk;
 	struct clk *s_ahb_clk;
 	struct list_head ab_list;
+	struct list_head ab_list_noc;
 	unsigned long ab;
+	unsigned long ab_noc;
 	struct list_head ib_list;
+	struct list_head ib_list_noc;
 	unsigned long ib;
+	unsigned long ib_noc;
 	struct pinctrl *geni_pinctrl;
 	struct pinctrl_state *geni_gpio_active;
 	struct pinctrl_state *geni_gpio_sleep;
diff --git a/include/linux/rq_stats.h b/include/linux/rq_stats.h
index a0bccf1..59440af 100644
--- a/include/linux/rq_stats.h
+++ b/include/linux/rq_stats.h
@@ -1,17 +1,11 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2011,2013-2014,2019, The Linux Foundation. All rights reserved.
- *
  */
 
 struct rq_data {
-	unsigned int rq_avg;
-	unsigned long rq_poll_jiffies;
 	unsigned long def_timer_jiffies;
-	unsigned long rq_poll_last_jiffy;
-	unsigned long rq_poll_total_jiffies;
 	unsigned long def_timer_last_jiffy;
-	unsigned int hotplug_disabled;
 	int64_t def_start_time;
 	struct attribute_group *attr_group;
 	struct kobject *kobj;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1b79884..8bd1a9b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -25,7 +25,6 @@
 #include <linux/latencytop.h>
 #include <linux/sched/prio.h>
 #include <linux/signal_types.h>
-#include <linux/psi_types.h>
 #include <linux/mm_types_task.h>
 #include <linux/task_io_accounting.h>
 #include <linux/rseq.h>
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 7ddfc65..4335bd7 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -184,6 +184,7 @@
 	struct clk *pclk;
 	struct clk *clk_ptp_ref;
 	unsigned int clk_ptp_rate;
+	unsigned int clk_ref_rate;
 	struct reset_control *stmmac_rst;
 	struct stmmac_axi *axi;
 	int has_gmac4;
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index ec9d6bc..fabee6d 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -276,7 +276,7 @@
 int  bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
 int  bt_sock_wait_ready(struct sock *sk, unsigned long flags);
 
-void bt_accept_enqueue(struct sock *parent, struct sock *sk);
+void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
 void bt_accept_unlink(struct sock *sk);
 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
 
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 3ef2743..8665bf2 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -22,6 +22,7 @@
 
 #include <net/inet_sock.h>
 #include <net/snmp.h>
+#include <net/ip.h>
 
 struct icmp_err {
   int		errno;
@@ -39,7 +40,13 @@
 struct sk_buff;
 struct net;
 
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+		 const struct ip_options *opt);
+static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+	__icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
+}
+
 int icmp_rcv(struct sk_buff *skb);
 void icmp_err(struct sk_buff *skb, u32 info);
 int icmp_init(void);
diff --git a/include/net/ip.h b/include/net/ip.h
index ddaa2bb5..0693b82 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -641,6 +641,8 @@
 }
 
 void ip_options_fragment(struct sk_buff *skb);
+int __ip_options_compile(struct net *net, struct ip_options *opt,
+			 struct sk_buff *skb, __be32 *info);
 int ip_options_compile(struct net *net, struct ip_options *opt,
 		       struct sk_buff *skb);
 int ip_options_get(struct net *net, struct ip_options_rcu **optp,
@@ -690,7 +692,7 @@
 int ip_misc_proc_init(void);
 #endif
 
-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
 				struct netlink_ext_ack *extack);
 
 #endif	/* _IP_H */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index a6d0009..c44da48 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -47,7 +47,10 @@
 struct qdisc_skb_head {
 	struct sk_buff	*head;
 	struct sk_buff	*tail;
-	__u32		qlen;
+	union {
+		u32		qlen;
+		atomic_t	atomic_qlen;
+	};
 	spinlock_t	lock;
 };
 
@@ -384,27 +387,19 @@
 	BUILD_BUG_ON(sizeof(qcb->data) < sz);
 }
 
-static inline int qdisc_qlen_cpu(const struct Qdisc *q)
-{
-	return this_cpu_ptr(q->cpu_qstats)->qlen;
-}
-
 static inline int qdisc_qlen(const struct Qdisc *q)
 {
 	return q->q.qlen;
 }
 
-static inline int qdisc_qlen_sum(const struct Qdisc *q)
+static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
 {
-	__u32 qlen = q->qstats.qlen;
-	int i;
+	u32 qlen = q->qstats.qlen;
 
-	if (q->flags & TCQ_F_NOLOCK) {
-		for_each_possible_cpu(i)
-			qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
-	} else {
+	if (q->flags & TCQ_F_NOLOCK)
+		qlen += atomic_read(&q->q.atomic_qlen);
+	else
 		qlen += q->q.qlen;
-	}
 
 	return qlen;
 }
@@ -776,14 +771,14 @@
 	this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
 }
 
-static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
+static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
 {
-	this_cpu_inc(sch->cpu_qstats->qlen);
+	atomic_inc(&sch->q.atomic_qlen);
 }
 
-static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
+static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
 {
-	this_cpu_dec(sch->cpu_qstats->qlen);
+	atomic_dec(&sch->q.atomic_qlen);
 }
 
 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
diff --git a/include/soc/qcom/qmi_rmnet.h b/include/soc/qcom/qmi_rmnet.h
index 019ac14..f75e538 100644
--- a/include/soc/qcom/qmi_rmnet.h
+++ b/include/soc/qcom/qmi_rmnet.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _QMI_RMNET_H
@@ -20,6 +20,7 @@
 void qmi_rmnet_qmi_exit(void *qmi_pt, void *port);
 void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt);
 void qmi_rmnet_enable_all_flows(struct net_device *dev);
+bool qmi_rmnet_all_flows_enabled(struct net_device *dev);
 #else
 static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
 {
@@ -34,6 +35,12 @@
 qmi_rmnet_enable_all_flows(struct net_device *dev)
 {
 }
+
+static inline bool
+qmi_rmnet_all_flows_enabled(struct net_device *dev)
+{
+	return true;
+}
 #endif
 
 #ifdef CONFIG_QCOM_QMI_DFC
@@ -71,7 +78,7 @@
 void qmi_rmnet_work_init(void *port);
 void qmi_rmnet_work_exit(void *port);
 void qmi_rmnet_work_maybe_restart(void *port);
-void qmi_rmnet_work_restart(void *port);
+void qmi_rmnet_set_dl_msg_active(void *port);
 
 int qmi_rmnet_ps_ind_register(void *port,
 			      struct qmi_rmnet_ps_ind *ps_ind);
@@ -88,18 +95,16 @@
 static inline void qmi_rmnet_work_init(void *port)
 {
 }
-static inline void qmi_rmnet_work_restart(void *port)
-{
-
-}
 static inline void qmi_rmnet_work_exit(void *port)
 {
 }
-
 static inline void qmi_rmnet_work_maybe_restart(void *port)
 {
 
 }
+static inline void qmi_rmnet_set_dl_msg_active(void *port)
+{
+}
 
 static inline int qmi_rmnet_ps_ind_register(struct rmnet_port *port,
 				     struct qmi_rmnet_ps_ind *ps_ind)
diff --git a/include/soc/qcom/rmnet_qmi.h b/include/soc/qcom/rmnet_qmi.h
index a5289c8..9096b10 100644
--- a/include/soc/qcom/rmnet_qmi.h
+++ b/include/soc/qcom/rmnet_qmi.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _RMNET_QMI_H
@@ -19,6 +19,7 @@
 void rmnet_reset_qmi_pt(void *port);
 void rmnet_init_qmi_pt(void *port, void *qmi);
 void rmnet_enable_all_flows(void *port);
+bool rmnet_all_flows_enabled(void *port);
 void rmnet_set_powersave_format(void *port);
 void rmnet_clear_powersave_format(void *port);
 void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
@@ -57,6 +58,11 @@
 {
 }
 
+static inline bool rmnet_all_flows_enabled(void *port)
+{
+	return true;
+}
+
 static inline void rmnet_set_port_format(void *port)
 {
 }
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 03f03319..602055a 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -48,6 +48,12 @@
 			u32 *source_vm_list, int source_nelems,
 			int *dest_vmids, int *dest_perms,
 			int dest_nelems);
+
+int try_hyp_assign_table(struct sg_table *table,
+			 u32 *source_vm_list, int source_nelems,
+			 int *dest_vmids, int *dest_perms,
+			 int dest_nelems);
+
 extern int hyp_assign_phys(phys_addr_t addr, u64 size,
 			u32 *source_vmlist, int source_nelems,
 			int *dest_vmids, int *dest_perms, int dest_nelems);
@@ -72,6 +78,14 @@
 	return -EINVAL;
 }
 
+static inline int try_hyp_assign_table(struct sg_table *table,
+				       u32 *source_vm_list, int source_nelems,
+				       int *dest_vmids, int *dest_perms,
+				       int dest_nelems)
+{
+	return -EINVAL;
+}
+
 static inline int hyp_assign_phys(phys_addr_t addr, u64 size,
 			u32 *source_vmlist, int source_nelems,
 			int *dest_vmids, int *dest_perms, int dest_nelems)
diff --git a/include/trace/events/dfc.h b/include/trace/events/dfc.h
index 4ba0fd4..cb62767 100644
--- a/include/trace/events/dfc.h
+++ b/include/trace/events/dfc.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #undef TRACE_SYSTEM
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index f5271bc..93333c0 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -119,7 +119,8 @@
 #define  DRM_MODE_FLAG_SUPPORTS_RGB		(1<<23)
 
 #define  DRM_MODE_FLAG_SUPPORTS_YUV		(1<<24)
-
+#define  DRM_MODE_FLAG_VID_MODE_PANEL	(1<<29)
+#define  DRM_MODE_FLAG_CMD_MODE_PANEL	(1<<30)
 #define  DRM_MODE_FLAG_SEAMLESS			(1<<31)
 
 #define  DRM_MODE_FLAG_ALL	(DRM_MODE_FLAG_PHSYNC |		\
@@ -136,6 +137,8 @@
 				 DRM_MODE_FLAG_CLKDIV2 |	\
 				 DRM_MODE_FLAG_SUPPORTS_RGB |	\
 				 DRM_MODE_FLAG_SUPPORTS_YUV |	\
+				 DRM_MODE_FLAG_VID_MODE_PANEL |	\
+				 DRM_MODE_FLAG_CMD_MODE_PANEL |	\
 				 DRM_MODE_FLAG_3D_MASK)
 
 /* DPMS flags */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index ada1bce..dc1a320 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -13,6 +13,7 @@
 endif
 
 header-y += nfc/
+header-y += qbt_handler.h
 
 ifneq ($(VSERVICES_SUPPORT), "")
 include include/linux/Kbuild.vservices
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 4a1c285..7d62fcf 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -88,6 +88,16 @@
 	 * scheduling policy from the caller (for synchronous transactions).
 	 */
 	FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
+
+#ifdef __KERNEL__
+	/**
+	 * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
+	 *
+	 * Only when set, causes senders to include their security
+	 * context
+	 */
+	FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
+#endif /* __KERNEL__ */
 };
 
 #ifdef BINDER_IPC_32BIT
@@ -265,6 +275,7 @@
 #define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
 #define BINDER_GET_NODE_DEBUG_INFO	_IOWR('b', 11, struct binder_node_debug_info)
 #define BINDER_GET_NODE_INFO_FOR_REF	_IOWR('b', 12, struct binder_node_info_for_ref)
+#define BINDER_SET_CONTEXT_MGR_EXT	_IOW('b', 13, struct flat_binder_object)
 
 /*
  * NOTE: Two special error codes you should check for when calling
@@ -323,6 +334,13 @@
 	} data;
 };
 
+#ifdef __KERNEL__
+struct binder_transaction_data_secctx {
+	struct binder_transaction_data transaction_data;
+	binder_uintptr_t secctx;
+};
+#endif /* __KERNEL__ */
+
 struct binder_transaction_data_sg {
 	struct binder_transaction_data transaction_data;
 	binder_size_t buffers_size;
@@ -358,6 +376,13 @@
 	BR_OK = _IO('r', 1),
 	/* No parameters! */
 
+#ifdef __KERNEL__
+	BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
+				      struct binder_transaction_data_secctx),
+	/*
+	 * binder_transaction_data_secctx: the received command.
+	 */
+#endif /* __KERNEL__ */
 	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
 	BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
 	/*
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 54e2915..772c9aa 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -351,9 +351,17 @@
 
 	/* RESERVED PROD			= 94, */
 	IPA_CLIENT_APPS_WAN_COAL_CONS		= 95,
+
+	IPA_CLIENT_MHI_PRIME_TETH_PROD		= 96,
+	IPA_CLIENT_MHI_PRIME_TETH_CONS		= 97,
+
+	IPA_CLIENT_MHI_PRIME_RMNET_PROD		= 98,
+	IPA_CLIENT_MHI_PRIME_RMNET_CONS		= 99,
+
+	IPA_CLIENT_MHI_PRIME_DPL_PROD		= 100,
 };
 
-#define IPA_CLIENT_MAX (IPA_CLIENT_APPS_WAN_COAL_CONS + 1)
+#define IPA_CLIENT_MAX (IPA_CLIENT_MHI_PRIME_DPL_PROD + 1)
 
 #define IPA_CLIENT_WLAN2_PROD IPA_CLIENT_A5_WLAN_AMPDU_PROD
 #define IPA_CLIENT_Q6_DL_NLO_DATA_PROD IPA_CLIENT_Q6_DL_NLO_DATA_PROD
@@ -368,6 +376,11 @@
 #define IPA_CLIENT_WIGIG3_CONS IPA_CLIENT_WIGIG3_CONS
 #define IPA_CLIENT_WIGIG4_CONS IPA_CLIENT_WIGIG4_CONS
 #define IPA_CLIENT_APPS_WAN_COAL_CONS IPA_CLIENT_APPS_WAN_COAL_CONS
+#define IPA_CLIENT_MHI_PRIME_TETH_PROD IPA_CLIENT_MHI_PRIME_TETH_PROD
+#define IPA_CLIENT_MHI_PRIME_TETH_CONS IPA_CLIENT_MHI_PRIME_TETH_CONS
+#define IPA_CLIENT_MHI_PRIME_RMNET_PROD IPA_CLIENT_MHI_PRIME_RMNET_PROD
+#define IPA_CLIENT_MHI_PRIME_RMNET_CONS IPA_CLIENT_MHI_PRIME_RMNET_CONS
+#define IPA_CLIENT_MHI_PRIME_DPL_PROD IPA_CLIENT_MHI_PRIME_DPL_PROD
 
 #define IPA_CLIENT_IS_APPS_CONS(client) \
 	((client) == IPA_CLIENT_APPS_LAN_CONS || \
@@ -605,6 +618,7 @@
 enum ipa_wlan_fw_ssr_event {
 	WLAN_FWR_SSR_BEFORE_SHUTDOWN = BRIDGE_VLAN_MAPPING_MAX,
 	IPA_WLAN_FW_SSR_EVENT_MAX,
+#define IPA_WLAN_FW_SSR_EVENT_MAX IPA_WLAN_FW_SSR_EVENT_MAX
 };
 
 enum ipa_gsb_event {
diff --git a/include/uapi/linux/qbt_handler.h b/include/uapi/linux/qbt_handler.h
new file mode 100644
index 0000000..8ebbf1f
--- /dev/null
+++ b/include/uapi/linux/qbt_handler.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UAPI_QBT_HANDLER_H_
+#define _UAPI_QBT_HANDLER_H_
+
+#define MAX_NAME_SIZE 32
+
+#define QBT_IS_WUHB_CONNECTED    100
+#define QBT_SEND_KEY_EVENT       101
+#define QBT_ENABLE_IPC           102
+#define QBT_DISABLE_IPC          103
+#define QBT_ENABLE_FD            104
+#define QBT_DISABLE_FD           105
+
+/*
+ * enum qbt_fw_event -
+ *      enumeration of firmware events
+ * @FW_EVENT_FINGER_DOWN - finger down detected
+ * @FW_EVENT_FINGER_UP - finger up detected
+ * @FW_EVENT_IPC - an IPC from the firmware is pending
+ */
+enum qbt_fw_event {
+	FW_EVENT_FINGER_DOWN = 1,
+	FW_EVENT_FINGER_UP = 2,
+	FW_EVENT_IPC = 3,
+};
+
+/*
+ * struct qbt_wuhb_connected_status -
+ *		used to query whether WUHB INT line is connected
+ * @is_wuhb_connected - if non-zero, WUHB INT line is connected
+ */
+struct qbt_wuhb_connected_status {
+	bool is_wuhb_connected;
+};
+
+/*
+ * struct qbt_key_event -
+ *		used to send key event
+ * @key - the key event to send
+ * @value - value of the key event
+ */
+struct qbt_key_event {
+	int key;
+	int value;
+};
+
+#endif /* _UAPI_QBT_HANDLER_H_ */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index e2d30c7..1496008 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -962,6 +962,9 @@
 #define V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE \
 	(V4L2_CID_MPEG_MSM_VIDC_BASE + 119)
 
+#define V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 131)
+
 #define V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER \
 	(V4L2_CID_MPEG_MSM_VIDC_BASE + 120)
 enum v4l2_mpeg_vidc_video_hevc_max_hier_coding_layer {
diff --git a/include/uapi/media/cam_isp.h b/include/uapi/media/cam_isp.h
index 306450e..bba8eeb 100644
--- a/include/uapi/media/cam_isp.h
+++ b/include/uapi/media/cam_isp.h
@@ -96,6 +96,7 @@
 #define CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG   4
 #define CAM_ISP_GENERIC_BLOB_TYPE_FE_CONFIG           5
 #define CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V2      6
+#define CAM_ISP_GENERIC_BLOB_TYPE_IFE_CORE_CONFIG     7
 
 /* Query devices */
 /**
@@ -435,6 +436,37 @@
 /* Acquire Device/HW v2 */
 
 /**
+ * struct cam_isp_core_config - ISP core registers configuration
+ *
+ * @version:                    Version info
+ * @vid_ds16_r2pd:              Enables Y and C merging PD output for video DS16
+ * @vid_ds4_r2pd:               Enables Y and C merging PD output for video DS4
+ * @disp_ds16_r2pd:             Enables Y and C merging PD output for disp DS16
+ * @disp_ds4_r2pd:              Enables Y and C merging PD output for disp DS4
+ * @dsp_streaming_tap_point:    This selects source for DSP streaming interface
+ * @ihist_src_sel:              Selects input for IHIST module
+ * @hdr_be_src_sel:             Selects input for HDR BE module
+ * @hdr_bhist_src_sel:          Selects input for HDR BHIST module
+ * @input_mux_sel_pdaf:         Selects input for PDAF
+ * @input_mux_sel_pp:           Selects input for Pixel Pipe
+ * @reserved:                   Reserved
+ */
+struct cam_isp_core_config {
+	uint32_t     version;
+	uint32_t     vid_ds16_r2pd;
+	uint32_t     vid_ds4_r2pd;
+	uint32_t     disp_ds16_r2pd;
+	uint32_t     disp_ds4_r2pd;
+	uint32_t     dsp_streaming_tap_point;
+	uint32_t     ihist_src_sel;
+	uint32_t     hdr_be_src_sel;
+	uint32_t     hdr_bhist_src_sel;
+	uint32_t     input_mux_sel_pdaf;
+	uint32_t     input_mux_sel_pp;
+	uint32_t     reserved;
+} __attribute__((packed));
+
+/**
  * struct cam_isp_acquire_hw_info - ISP acquire HW params
  *
  * @common_info_version  : Version of common info struct used
diff --git a/include/uapi/media/msm_cvp_private.h b/include/uapi/media/msm_cvp_private.h
index 7eb1166..1200c5c 100644
--- a/include/uapi/media/msm_cvp_private.h
+++ b/include/uapi/media/msm_cvp_private.h
@@ -12,91 +12,103 @@
 
 /* VIDIOC private cvp command */
 #define VIDIOC_CVP_CMD \
-		_IOWR('V', BASE_VIDIOC_PRIVATE_CVP, struct msm_cvp_arg)
+		_IOWR('V', BASE_VIDIOC_PRIVATE_CVP, struct cvp_kmd_arg)
 
 /* Commands type */
-#define MSM_VIDC_CMD_START		0x10000000
-#define MSM_CVP_CMD_START		(MSM_VIDC_CMD_START + 0x1000)
+#define CVP_KMD_CMD_BASE		0x10000000
+#define CVP_KMD_CMD_START		(CVP_KMD_CMD_BASE + 0x1000)
 
 /*
  * userspace clients pass one of the below arguments type
- * in struct msm_cvp_arg (@type field).
+ * in struct cvp_kmd_arg (@type field).
  */
 
 /*
- * MSM_CVP_GET_SESSION_INFO - this argument type is used to
+ * CVP_KMD_GET_SESSION_INFO - this argument type is used to
  *          get the session information from driver. it passes
- *          struct msm_cvp_session_info {}
+ *          struct cvp_kmd_session_info {}
  */
-#define MSM_CVP_GET_SESSION_INFO	(MSM_CVP_CMD_START + 1)
+#define CVP_KMD_GET_SESSION_INFO	(CVP_KMD_CMD_START + 1)
 
 /*
- * MSM_CVP_REQUEST_POWER - this argument type is used to
+ * CVP_KMD_REQUEST_POWER - this argument type is used to
  *          set the power required to driver. it passes
- *          struct msm_cvp_request_power {}
+ *          struct cvp_kmd_request_power {}
  */
-#define MSM_CVP_REQUEST_POWER		(MSM_CVP_CMD_START + 2)
+#define CVP_KMD_REQUEST_POWER		(CVP_KMD_CMD_START + 2)
 
 /*
- * MSM_CVP_REGISTER_BUFFER - this argument type is used to
+ * CVP_KMD_REGISTER_BUFFER - this argument type is used to
  *          register the buffer to driver. it passes
- *          struct msm_cvp_buffer {}
+ *          struct cvp_kmd_buffer {}
  */
-#define MSM_CVP_REGISTER_BUFFER		(MSM_CVP_CMD_START + 3)
+#define CVP_KMD_REGISTER_BUFFER		(CVP_KMD_CMD_START + 3)
 
 /*
- * MSM_CVP_REGISTER_BUFFER - this argument type is used to
+ * CVP_KMD_REGISTER_BUFFER - this argument type is used to
  *          unregister the buffer to driver. it passes
- *          struct msm_cvp_buffer {}
+ *          struct cvp_kmd_buffer {}
  */
-#define MSM_CVP_UNREGISTER_BUFFER	(MSM_CVP_CMD_START + 4)
+#define CVP_KMD_UNREGISTER_BUFFER	(CVP_KMD_CMD_START + 4)
 
-#define MSM_CVP_HFI_SEND_CMD        (MSM_CVP_CMD_START + 5)
+#define CVP_KMD_HFI_SEND_CMD        (CVP_KMD_CMD_START + 5)
 
-#define MSM_CVP_HFI_DFS_CONFIG_CMD  (MSM_CVP_CMD_START + 6)
+#define CVP_KMD_HFI_DFS_CONFIG_CMD  (CVP_KMD_CMD_START + 6)
 
-#define MSM_CVP_HFI_DFS_FRAME_CMD  (MSM_CVP_CMD_START + 7)
+#define CVP_KMD_HFI_DFS_FRAME_CMD  (CVP_KMD_CMD_START + 7)
 
-#define MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE  (MSM_CVP_CMD_START + 8)
+#define CVP_KMD_HFI_DFS_FRAME_CMD_RESPONSE  (CVP_KMD_CMD_START + 8)
 
-#define MSM_CVP_HFI_DME_CONFIG_CMD  (MSM_CVP_CMD_START + 9)
+#define CVP_KMD_HFI_DME_CONFIG_CMD  (CVP_KMD_CMD_START + 9)
 
-#define MSM_CVP_HFI_DME_FRAME_CMD  (MSM_CVP_CMD_START + 10)
+#define CVP_KMD_HFI_DME_FRAME_CMD  (CVP_KMD_CMD_START + 10)
 
-#define MSM_CVP_HFI_DME_FRAME_CMD_RESPONSE  (MSM_CVP_CMD_START + 11)
+#define CVP_KMD_HFI_DME_FRAME_CMD_RESPONSE  (CVP_KMD_CMD_START + 11)
 
-#define MSM_CVP_HFI_PERSIST_CMD  (MSM_CVP_CMD_START + 12)
+#define CVP_KMD_HFI_PERSIST_CMD  (CVP_KMD_CMD_START + 12)
 
-#define MSM_CVP_HFI_PERSIST_CMD_RESPONSE  (MSM_CVP_CMD_START + 13)
+#define CVP_KMD_HFI_PERSIST_CMD_RESPONSE  (CVP_KMD_CMD_START + 13)
+
+#define CVP_KMD_HFI_DME_FRAME_FENCE_CMD  (CVP_KMD_CMD_START + 14)
+
+#define CVP_KMD_SEND_CMD_PKT	(CVP_KMD_CMD_START + 64)
+
+#define CVP_KMD_RECEIVE_MSG_PKT	 (CVP_KMD_CMD_START + 65)
+
+#define CVP_KMD_SET_SYS_PROPERTY	(CVP_KMD_CMD_START + 66)
+
+#define CVP_KMD_GET_SYS_PROPERTY	(CVP_KMD_CMD_START + 67)
+
+#define CVP_KMD_SESSION_CONTROL		(CVP_KMD_CMD_START + 68)
 
 /* flags */
-#define MSM_CVP_FLAG_UNSECURE			0x00000000
-#define MSM_CVP_FLAG_SECURE			0x00000001
+#define CVP_KMD_FLAG_UNSECURE			0x00000000
+#define CVP_KMD_FLAG_SECURE			0x00000001
 
 /* buffer type */
-#define MSM_CVP_BUFTYPE_INPUT			0x00000001
-#define MSM_CVP_BUFTYPE_OUTPUT			0x00000002
-#define MSM_CVP_BUFTYPE_INTERNAL_1		0x00000003
-#define MSM_CVP_BUFTYPE_INTERNAL_2		0x00000004
+#define CVP_KMD_BUFTYPE_INPUT			0x00000001
+#define CVP_KMD_BUFTYPE_OUTPUT			0x00000002
+#define CVP_KMD_BUFTYPE_INTERNAL_1		0x00000003
+#define CVP_KMD_BUFTYPE_INTERNAL_2		0x00000004
 
 
 /**
- * struct msm_cvp_session_info - session information
+ * struct cvp_kmd_session_info - session information
  * @session_id:    current session id
  */
-struct msm_cvp_session_info {
+struct cvp_kmd_session_info {
 	unsigned int session_id;
 	unsigned int reserved[10];
 };
 
 /**
- * struct msm_cvp_request_power - power / clock data information
+ * struct cvp_kmd_request_power - power / clock data information
  * @clock_cycles_a:  clock cycles per second required for hardware_a
  * @clock_cycles_b:  clock cycles per second required for hardware_b
  * @ddr_bw:        bandwidth required for ddr in bps
  * @sys_cache_bw:  bandwidth required for system cache in bps
  */
-struct msm_cvp_request_power {
+struct cvp_kmd_request_power {
 	unsigned int clock_cycles_a;
 	unsigned int clock_cycles_b;
 	unsigned int ddr_bw;
@@ -105,7 +117,7 @@
 };
 
 /**
- * struct msm_cvp_buffer - buffer information to be registered
+ * struct cvp_kmd_buffer - buffer information to be registered
  * @index:         index of buffer
  * @type:          buffer type
  * @fd:            file descriptor of buffer
@@ -114,7 +126,7 @@
  * @pixelformat:   fourcc format
  * @flags:         buffer flags
  */
-struct msm_cvp_buffer {
+struct cvp_kmd_buffer {
 	unsigned int index;
 	unsigned int type;
 	unsigned int fd;
@@ -126,42 +138,42 @@
 };
 
 /**
- * struct msm_cvp_send_cmd - sending generic HFI command
+ * struct cvp_kmd_send_cmd - sending generic HFI command
  * @cmd_address_fd:   file descriptor of cmd_address
  * @cmd_size:         allocated size of buffer
  */
-struct msm_cvp_send_cmd {
+struct cvp_kmd_send_cmd {
 	unsigned int cmd_address_fd;
 	unsigned int cmd_size;
 	unsigned int reserved[10];
 };
 
 /**
- * struct msm_cvp_color_plane_info - color plane info
+ * struct cvp_kmd_color_plane_info - color plane info
  * @stride:      stride of plane
  * @buf_size:    size of plane
  */
-struct msm_cvp_color_plane_info {
+struct cvp_kmd_color_plane_info {
 	int stride[HFI_MAX_PLANES];
 	unsigned int buf_size[HFI_MAX_PLANES];
 };
 
 /**
- * struct msm_cvp_client_data - store generic client
+ * struct cvp_kmd_client_data - store generic client
  *                              data
  * @transactionid:  transaction id
  * @client_data1:   client data to be used during callback
  * @client_data2:   client data to be used during callback
  */
-struct msm_cvp_client_data {
+struct cvp_kmd_client_data {
 	unsigned int transactionid;
 	unsigned int client_data1;
 	unsigned int client_data2;
 };
 
 #define CVP_COLOR_PLANE_INFO_SIZE \
-	sizeof(struct msm_cvp_color_plane_info)
-#define CVP_CLIENT_DATA_SIZE	sizeof(struct msm_cvp_client_data)
+	sizeof(struct cvp_kmd_color_plane_info)
+#define CVP_CLIENT_DATA_SIZE	sizeof(struct cvp_kmd_client_data)
 #define CVP_DFS_CONFIG_CMD_SIZE   38
 #define CVP_DFS_FRAME_CMD_SIZE 16
 #define CVP_DFS_FRAME_BUFFERS_OFFSET 8
@@ -175,29 +187,56 @@
 #define CVP_PERSIST_BUFFERS_OFFSET 7
 #define CVP_PSRSIST_BUF_NUM	2
 
-struct msm_cvp_dfs_config {
+struct cvp_kmd_dfs_config {
 	unsigned int cvp_dfs_config[CVP_DFS_CONFIG_CMD_SIZE];
 };
 
-struct msm_cvp_dfs_frame {
+struct cvp_kmd_dfs_frame {
 	unsigned int frame_data[CVP_DFS_FRAME_CMD_SIZE];
 };
 
-struct msm_cvp_dme_config {
+struct cvp_kmd_dme_config {
 	unsigned int cvp_dme_config[CVP_DME_CONFIG_CMD_SIZE];
 };
 
-struct msm_cvp_dme_frame {
+struct cvp_kmd_dme_frame {
 	unsigned int frame_data[CVP_DME_FRAME_CMD_SIZE];
 };
 
-struct msm_cvp_persist_buf {
+struct cvp_kmd_persist_buf {
 	unsigned int persist_data[CVP_PERSIST_CMD_SIZE];
 };
 
+#define	MAX_HFI_PKT_SIZE	250
+
+struct cvp_kmd_hfi_packet {
+	unsigned int pkt_data[MAX_HFI_PKT_SIZE];
+};
+
+struct cvp_kmd_sys_property {
+	unsigned int prop_type;
+	unsigned int data;
+};
+
+struct cvp_kmd_sys_properties {
+	unsigned int prop_num;
+	struct cvp_kmd_sys_property prop_data;
+};
+
+#define MAX_HFI_FENCE_SIZE        16
+#define	MAX_HFI_FENCE_OFFSET	(MAX_HFI_PKT_SIZE-MAX_HFI_FENCE_SIZE)
+struct cvp_kmd_hfi_fence_packet {
+	unsigned int pkt_data[MAX_HFI_FENCE_OFFSET];
+	unsigned int fence_data[MAX_HFI_FENCE_SIZE];
+};
+
+
 /**
- * struct msm_cvp_arg - argument passed with VIDIOC_CVP_CMD
+ * struct cvp_kmd_arg - argument passed with VIDIOC_CVP_CMD
+ * To be deprecated
  * @type:          command type
+ * @buf_offset:    offset to buffer list in the command
+ * @buf_num:       number of buffers in the command
  * @session:       session information
  * @req_power:     power information
  * @regbuf:        buffer to be registered
@@ -205,22 +244,28 @@
  * @send_cmd:      sending generic HFI command
  * @dfs_config:    sending DFS config command
  * @dfs_frame:     sending DFS frame command
+ * @hfi_pkt:       HFI packet created by user library
+ * @sys_properties System properties read or set by user library
+ * @hfi_fence_pkt: HFI fence packet created by user library
  */
-struct msm_cvp_arg {
+struct cvp_kmd_arg {
 	unsigned int type;
-	union data_t {
-		struct msm_cvp_session_info session;
-		struct msm_cvp_request_power req_power;
-		struct msm_cvp_buffer regbuf;
-		struct msm_cvp_buffer unregbuf;
-		struct msm_cvp_send_cmd send_cmd;
-		struct msm_cvp_dfs_config dfs_config;
-		struct msm_cvp_dfs_frame dfs_frame;
-		struct msm_cvp_dme_config dme_config;
-		struct msm_cvp_dme_frame dme_frame;
-		struct msm_cvp_persist_buf pbuf_cmd;
+	unsigned int buf_offset;
+	unsigned int buf_num;
+	union cvp_data_t {
+		struct cvp_kmd_session_info session;
+		struct cvp_kmd_request_power req_power;
+		struct cvp_kmd_buffer regbuf;
+		struct cvp_kmd_buffer unregbuf;
+		struct cvp_kmd_send_cmd send_cmd;
+		struct cvp_kmd_dfs_config dfs_config;
+		struct cvp_kmd_dfs_frame dfs_frame;
+		struct cvp_kmd_dme_config dme_config;
+		struct cvp_kmd_dme_frame dme_frame;
+		struct cvp_kmd_persist_buf pbuf_cmd;
+		struct cvp_kmd_hfi_packet hfi_pkt;
+		struct cvp_kmd_sys_properties sys_properties;
+		struct cvp_kmd_hfi_fence_packet hfi_fence_pkt;
 	} data;
-	unsigned int reserved[12];
 };
-
 #endif
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 03cc59e..cebadd6 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -677,7 +677,7 @@
 	}
 
 	if (htab_is_prealloc(htab)) {
-		pcpu_freelist_push(&htab->freelist, &l->fnode);
+		__pcpu_freelist_push(&htab->freelist, &l->fnode);
 	} else {
 		atomic_dec(&htab->count);
 		l->htab = htab;
@@ -739,7 +739,7 @@
 		} else {
 			struct pcpu_freelist_node *l;
 
-			l = pcpu_freelist_pop(&htab->freelist);
+			l = __pcpu_freelist_pop(&htab->freelist);
 			if (!l)
 				return ERR_PTR(-E2BIG);
 			l_new = container_of(l, struct htab_elem, fnode);
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index 673fa6f..0c1b4ba 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -28,8 +28,8 @@
 	free_percpu(s->freelist);
 }
 
-static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
-					struct pcpu_freelist_node *node)
+static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
+					 struct pcpu_freelist_node *node)
 {
 	raw_spin_lock(&head->lock);
 	node->next = head->first;
@@ -37,12 +37,22 @@
 	raw_spin_unlock(&head->lock);
 }
 
-void pcpu_freelist_push(struct pcpu_freelist *s,
+void __pcpu_freelist_push(struct pcpu_freelist *s,
 			struct pcpu_freelist_node *node)
 {
 	struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
 
-	__pcpu_freelist_push(head, node);
+	___pcpu_freelist_push(head, node);
+}
+
+void pcpu_freelist_push(struct pcpu_freelist *s,
+			struct pcpu_freelist_node *node)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	__pcpu_freelist_push(s, node);
+	local_irq_restore(flags);
 }
 
 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
@@ -63,7 +73,7 @@
 	for_each_possible_cpu(cpu) {
 again:
 		head = per_cpu_ptr(s->freelist, cpu);
-		__pcpu_freelist_push(head, buf);
+		___pcpu_freelist_push(head, buf);
 		i++;
 		buf += elem_size;
 		if (i == nr_elems)
@@ -74,14 +84,12 @@
 	local_irq_restore(flags);
 }
 
-struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
+struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
 {
 	struct pcpu_freelist_head *head;
 	struct pcpu_freelist_node *node;
-	unsigned long flags;
 	int orig_cpu, cpu;
 
-	local_irq_save(flags);
 	orig_cpu = cpu = raw_smp_processor_id();
 	while (1) {
 		head = per_cpu_ptr(s->freelist, cpu);
@@ -89,16 +97,25 @@
 		node = head->first;
 		if (node) {
 			head->first = node->next;
-			raw_spin_unlock_irqrestore(&head->lock, flags);
+			raw_spin_unlock(&head->lock);
 			return node;
 		}
 		raw_spin_unlock(&head->lock);
 		cpu = cpumask_next(cpu, cpu_possible_mask);
 		if (cpu >= nr_cpu_ids)
 			cpu = 0;
-		if (cpu == orig_cpu) {
-			local_irq_restore(flags);
+		if (cpu == orig_cpu)
 			return NULL;
-		}
 	}
 }
+
+struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
+{
+	struct pcpu_freelist_node *ret;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	ret = __pcpu_freelist_pop(s);
+	local_irq_restore(flags);
+	return ret;
+}
diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
index 3049aae..c396011 100644
--- a/kernel/bpf/percpu_freelist.h
+++ b/kernel/bpf/percpu_freelist.h
@@ -22,8 +22,12 @@
 	struct pcpu_freelist_node *next;
 };
 
+/* pcpu_freelist_* do spin_lock_irqsave. */
 void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
 struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
+/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
+void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
+struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
 			    u32 nr_elems);
 int pcpu_freelist_init(struct pcpu_freelist *);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 382c09d..cc40b8b 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -701,8 +701,13 @@
 
 	if (bpf_map_is_dev_bound(map)) {
 		err = bpf_map_offload_lookup_elem(map, key, value);
-	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
-		   map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+		goto done;
+	}
+
+	preempt_disable();
+	this_cpu_inc(bpf_prog_active);
+	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 		err = bpf_percpu_hash_copy(map, key, value);
 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 		err = bpf_percpu_array_copy(map, key, value);
@@ -722,7 +727,10 @@
 		rcu_read_unlock();
 		err = ptr ? 0 : -ENOENT;
 	}
+	this_cpu_dec(bpf_prog_active);
+	preempt_enable();
 
+done:
 	if (err)
 		goto free_value;
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4d81be2..bcb42aa 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6035,7 +6035,8 @@
 			u32 off_reg;
 
 			aux = &env->insn_aux_data[i + delta];
-			if (!aux->alu_state)
+			if (!aux->alu_state ||
+			    aux->alu_state == BPF_ALU_NON_POINTER)
 				continue;
 
 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 498c6bc..eba5cab 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -3430,7 +3430,65 @@
 {
 	return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_CPU);
 }
-#endif
+
+static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
+					  size_t nbytes, enum psi_res res)
+{
+	struct psi_trigger *new;
+	struct cgroup *cgrp;
+
+	cgrp = cgroup_kn_lock_live(of->kn, false);
+	if (!cgrp)
+		return -ENODEV;
+
+	cgroup_get(cgrp);
+	cgroup_kn_unlock(of->kn);
+
+	new = psi_trigger_create(&cgrp->psi, buf, nbytes, res);
+	if (IS_ERR(new)) {
+		cgroup_put(cgrp);
+		return PTR_ERR(new);
+	}
+
+	psi_trigger_replace(&of->priv, new);
+
+	cgroup_put(cgrp);
+
+	return nbytes;
+}
+
+static ssize_t cgroup_io_pressure_write(struct kernfs_open_file *of,
+					  char *buf, size_t nbytes,
+					  loff_t off)
+{
+	return cgroup_pressure_write(of, buf, nbytes, PSI_IO);
+}
+
+static ssize_t cgroup_memory_pressure_write(struct kernfs_open_file *of,
+					  char *buf, size_t nbytes,
+					  loff_t off)
+{
+	return cgroup_pressure_write(of, buf, nbytes, PSI_MEM);
+}
+
+static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of,
+					  char *buf, size_t nbytes,
+					  loff_t off)
+{
+	return cgroup_pressure_write(of, buf, nbytes, PSI_CPU);
+}
+
+static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
+					  poll_table *pt)
+{
+	return psi_trigger_poll(&of->priv, of->file, pt);
+}
+
+static void cgroup_pressure_release(struct kernfs_open_file *of)
+{
+	psi_trigger_replace(&of->priv, NULL);
+}
+#endif /* CONFIG_PSI */
 
 static int cgroup_file_open(struct kernfs_open_file *of)
 {
@@ -3499,6 +3557,16 @@
 	return ret ?: nbytes;
 }
 
+static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt)
+{
+	struct cftype *cft = of->kn->priv;
+
+	if (cft->poll)
+		return cft->poll(of, pt);
+
+	return kernfs_generic_poll(of, pt);
+}
+
 static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
 {
 	return seq_cft(seq)->seq_start(seq, ppos);
@@ -3537,6 +3605,7 @@
 	.open			= cgroup_file_open,
 	.release		= cgroup_file_release,
 	.write			= cgroup_file_write,
+	.poll			= cgroup_file_poll,
 	.seq_show		= cgroup_seqfile_show,
 };
 
@@ -3545,6 +3614,7 @@
 	.open			= cgroup_file_open,
 	.release		= cgroup_file_release,
 	.write			= cgroup_file_write,
+	.poll			= cgroup_file_poll,
 	.seq_start		= cgroup_seqfile_start,
 	.seq_next		= cgroup_seqfile_next,
 	.seq_stop		= cgroup_seqfile_stop,
@@ -4572,18 +4642,27 @@
 		.name = "io.pressure",
 		.flags = CFTYPE_NOT_ON_ROOT,
 		.seq_show = cgroup_io_pressure_show,
+		.write = cgroup_io_pressure_write,
+		.poll = cgroup_pressure_poll,
+		.release = cgroup_pressure_release,
 	},
 	{
 		.name = "memory.pressure",
 		.flags = CFTYPE_NOT_ON_ROOT,
 		.seq_show = cgroup_memory_pressure_show,
+		.write = cgroup_memory_pressure_write,
+		.poll = cgroup_pressure_poll,
+		.release = cgroup_pressure_release,
 	},
 	{
 		.name = "cpu.pressure",
 		.flags = CFTYPE_NOT_ON_ROOT,
 		.seq_show = cgroup_cpu_pressure_show,
+		.write = cgroup_cpu_pressure_write,
+		.poll = cgroup_pressure_poll,
+		.release = cgroup_pressure_release,
 	},
-#endif
+#endif /* CONFIG_PSI */
 	{ }	/* terminate */
 };
 
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0709f85..c89f8ea 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -469,18 +469,18 @@
 		void __user *buffer, size_t *lenp,
 		loff_t *ppos)
 {
-	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-
-	if (ret || !write)
-		return ret;
-
+	int ret;
+	int perf_cpu = sysctl_perf_cpu_time_max_percent;
 	/*
 	 * If throttling is disabled don't allow the write:
 	 */
-	if (sysctl_perf_cpu_time_max_percent == 100 ||
-	    sysctl_perf_cpu_time_max_percent == 0)
+	if (write && (perf_cpu == 100 || perf_cpu == 0))
 		return -EINVAL;
 
+	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+	if (ret || !write)
+		return ret;
+
 	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
 	perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
 	update_perf_cpu_limits();
diff --git a/kernel/exit.c b/kernel/exit.c
index 7b5be76..ddd2aa9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -307,7 +307,7 @@
 	 *        MB (A)	      MB (B)
 	 *    [L] cond		  [L] tsk
 	 */
-	smp_rmb(); /* (B) */
+	smp_mb(); /* (B) */
 
 	/*
 	 * Avoid using task_rcu_dereference() magic as long as we are careful,
diff --git a/kernel/futex.c b/kernel/futex.c
index d7c465f..c5fca74 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1444,11 +1444,7 @@
 	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
 		return;
 
-	/*
-	 * Queue the task for later wakeup for after we've released
-	 * the hb->lock. wake_q_add() grabs reference to p.
-	 */
-	wake_q_add(wake_q, p);
+	get_task_struct(p);
 	__unqueue_futex(q);
 	/*
 	 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
@@ -1458,6 +1454,13 @@
 	 * plist_del in __unqueue_futex().
 	 */
 	smp_store_release(&q->lock_ptr, NULL);
+
+	/*
+	 * Queue the task for later wakeup for after we've released
+	 * the hb->lock. wake_q_add() grabs reference to p.
+	 */
+	wake_q_add(wake_q, p);
+	put_task_struct(p);
 }
 
 /*
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index a2b3d9d..e521950 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1376,6 +1376,10 @@
 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
 {
 	data = data->parent_data;
+
+	if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
+		return 0;
+
 	if (data->chip->irq_set_wake)
 		return data->chip->irq_set_wake(data, on);
 
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 6dfdb4d..eb584ad 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -396,6 +396,9 @@
 	}
 
 	cpumask_and(&mask, cpu_online_mask, set);
+	if (cpumask_empty(&mask))
+		cpumask_copy(&mask, cpu_online_mask);
+
 	if (node != NUMA_NO_NODE) {
 		const struct cpumask *nodemask = cpumask_of_node(node);
 
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 5092494..9233770 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -14,6 +14,7 @@
 	unsigned int		available;
 	unsigned int		allocated;
 	unsigned int		managed;
+	unsigned int		managed_allocated;
 	bool			initialized;
 	bool			online;
 	unsigned long		alloc_map[IRQ_MATRIX_SIZE];
@@ -124,6 +125,48 @@
 	return area;
 }
 
+/* Find the best CPU which has the lowest vector allocation count */
+static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
+					const struct cpumask *msk)
+{
+	unsigned int cpu, best_cpu, maxavl = 0;
+	struct cpumap *cm;
+
+	best_cpu = UINT_MAX;
+
+	for_each_cpu(cpu, msk) {
+		cm = per_cpu_ptr(m->maps, cpu);
+
+		if (!cm->online || cm->available <= maxavl)
+			continue;
+
+		best_cpu = cpu;
+		maxavl = cm->available;
+	}
+	return best_cpu;
+}
+
+/* Find the best CPU which has the lowest number of managed IRQs allocated */
+static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
+						const struct cpumask *msk)
+{
+	unsigned int cpu, best_cpu, allocated = UINT_MAX;
+	struct cpumap *cm;
+
+	best_cpu = UINT_MAX;
+
+	for_each_cpu(cpu, msk) {
+		cm = per_cpu_ptr(m->maps, cpu);
+
+		if (!cm->online || cm->managed_allocated > allocated)
+			continue;
+
+		best_cpu = cpu;
+		allocated = cm->managed_allocated;
+	}
+	return best_cpu;
+}
+
 /**
  * irq_matrix_assign_system - Assign system wide entry in the matrix
  * @m:		Matrix pointer
@@ -239,11 +282,21 @@
  * @m:		Matrix pointer
  * @cpu:	On which CPU the interrupt should be allocated
  */
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+			     unsigned int *mapped_cpu)
 {
-	struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
-	unsigned int bit, end = m->alloc_end;
+	unsigned int bit, cpu, end = m->alloc_end;
+	struct cpumap *cm;
 
+	if (cpumask_empty(msk))
+		return -EINVAL;
+
+	cpu = matrix_find_best_cpu_managed(m, msk);
+	if (cpu == UINT_MAX)
+		return -ENOSPC;
+
+	cm = per_cpu_ptr(m->maps, cpu);
+	end = m->alloc_end;
 	/* Get managed bit which are not allocated */
 	bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
 	bit = find_first_bit(m->scratch_map, end);
@@ -251,7 +304,9 @@
 		return -ENOSPC;
 	set_bit(bit, cm->alloc_map);
 	cm->allocated++;
+	cm->managed_allocated++;
 	m->total_allocated++;
+	*mapped_cpu = cpu;
 	trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
 	return bit;
 }
@@ -322,37 +377,27 @@
 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
 		     bool reserved, unsigned int *mapped_cpu)
 {
-	unsigned int cpu, best_cpu, maxavl = 0;
+	unsigned int cpu, bit;
 	struct cpumap *cm;
-	unsigned int bit;
 
-	best_cpu = UINT_MAX;
-	for_each_cpu(cpu, msk) {
-		cm = per_cpu_ptr(m->maps, cpu);
+	cpu = matrix_find_best_cpu(m, msk);
+	if (cpu == UINT_MAX)
+		return -ENOSPC;
 
-		if (!cm->online || cm->available <= maxavl)
-			continue;
+	cm = per_cpu_ptr(m->maps, cpu);
+	bit = matrix_alloc_area(m, cm, 1, false);
+	if (bit >= m->alloc_end)
+		return -ENOSPC;
+	cm->allocated++;
+	cm->available--;
+	m->total_allocated++;
+	m->global_available--;
+	if (reserved)
+		m->global_reserved--;
+	*mapped_cpu = cpu;
+	trace_irq_matrix_alloc(bit, cpu, m, cm);
+	return bit;
 
-		best_cpu = cpu;
-		maxavl = cm->available;
-	}
-
-	if (maxavl) {
-		cm = per_cpu_ptr(m->maps, best_cpu);
-		bit = matrix_alloc_area(m, cm, 1, false);
-		if (bit < m->alloc_end) {
-			cm->allocated++;
-			cm->available--;
-			m->total_allocated++;
-			m->global_available--;
-			if (reserved)
-				m->global_reserved--;
-			*mapped_cpu = best_cpu;
-			trace_irq_matrix_alloc(bit, best_cpu, m, cm);
-			return bit;
-		}
-	}
-	return -ENOSPC;
 }
 
 /**
@@ -373,6 +418,8 @@
 
 	clear_bit(bit, cm->alloc_map);
 	cm->allocated--;
+	if(managed)
+		cm->managed_allocated--;
 
 	if (cm->online)
 		m->total_allocated--;
@@ -442,13 +489,14 @@
 	seq_printf(sf, "Total allocated:  %6u\n", m->total_allocated);
 	seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
 		   m->system_map);
-	seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
+	seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " ");
 	cpus_read_lock();
 	for_each_online_cpu(cpu) {
 		struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
 
-		seq_printf(sf, "%*s %4d  %4u  %4u  %4u  %*pbl\n", ind, " ",
-			   cpu, cm->available, cm->managed, cm->allocated,
+		seq_printf(sf, "%*s %4d  %4u  %4u  %4u %4u  %*pbl\n", ind, " ",
+			   cpu, cm->available, cm->managed,
+			   cm->managed_allocated, cm->allocated,
 			   m->matrix_bits, cm->alloc_map);
 	}
 	cpus_read_unlock();
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 087d18d..e3dfad2 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -11,6 +11,7 @@
 #include <linux/kthread.h>
 #include <linux/completion.h>
 #include <linux/err.h>
+#include <linux/cgroup.h>
 #include <linux/cpuset.h>
 #include <linux/unistd.h>
 #include <linux/file.h>
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 3064c50..ef90935 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -198,15 +198,22 @@
 		woken++;
 		tsk = waiter->task;
 
-		wake_q_add(wake_q, tsk);
+		get_task_struct(tsk);
 		list_del(&waiter->list);
 		/*
-		 * Ensure that the last operation is setting the reader
+		 * Ensure calling get_task_struct() before setting the reader
 		 * waiter to nil such that rwsem_down_read_failed() cannot
 		 * race with do_exit() by always holding a reference count
 		 * to the task to wakeup.
 		 */
 		smp_store_release(&waiter->task, NULL);
+		/*
+		 * Ensure issuing the wakeup (either by us or someone else)
+		 * after setting the reader waiter to nil.
+		 */
+		wake_q_add(wake_q, tsk);
+		/* wake_q_add() already take the task ref */
+		put_task_struct(tsk);
 	}
 
 	adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
diff --git a/kernel/relay.c b/kernel/relay.c
index 04f2486..9e0f523 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -428,6 +428,8 @@
 	dentry = chan->cb->create_buf_file(tmpname, chan->parent,
 					   S_IRUSR, buf,
 					   &chan->is_global);
+	if (IS_ERR(dentry))
+		dentry = NULL;
 
 	kfree(tmpname);
 
@@ -461,7 +463,7 @@
 		dentry = chan->cb->create_buf_file(NULL, NULL,
 						   S_IRUSR, buf,
 						   &chan->is_global);
-		if (WARN_ON(dentry))
+		if (IS_ERR_OR_NULL(dentry))
 			goto free_buf;
 	}
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 594c3fa..fcfe2b3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -408,10 +408,11 @@
 	 * its already queued (either by us or someone else) and will get the
 	 * wakeup due to that.
 	 *
-	 * This cmpxchg() executes a full barrier, which pairs with the full
-	 * barrier executed by the wakeup in wake_up_q().
+	 * In order to ensure that a pending wakeup will observe our pending
+	 * state, even in the failed case, an explicit smp_mb() must be used.
 	 */
-	if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
+	smp_mb__before_atomic();
+	if (cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))
 		return;
 
 	head->count++;
@@ -6134,7 +6135,9 @@
 	smp_call_function_any(&avail_cpus, timer_quiesce_cpu, &cpu, 1);
 
 	watchdog_disable(cpu);
+	irq_lock_sparse();
 	stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0);
+	irq_unlock_sparse();
 
 	calc_load_migrate(rq);
 	update_max_interval();
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 176040f..3a84a1a 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -968,7 +968,7 @@
 
 /********************** cpufreq governor interface *********************/
 
-struct cpufreq_governor schedutil_gov;
+static struct cpufreq_governor schedutil_gov;
 
 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
 {
@@ -1272,7 +1272,7 @@
 	sg_policy->need_freq_update = true;
 }
 
-struct cpufreq_governor schedutil_gov = {
+static struct cpufreq_governor schedutil_gov = {
 	.name			= "schedutil",
 	.owner			= THIS_MODULE,
 	.dynamic_switching	= true,
@@ -1295,36 +1295,3 @@
 	return cpufreq_register_governor(&schedutil_gov);
 }
 fs_initcall(sugov_register);
-
-#ifdef CONFIG_ENERGY_MODEL
-extern bool sched_energy_update;
-extern struct mutex sched_energy_mutex;
-
-static void rebuild_sd_workfn(struct work_struct *work)
-{
-	mutex_lock(&sched_energy_mutex);
-	sched_energy_update = true;
-	rebuild_sched_domains();
-	sched_energy_update = false;
-	mutex_unlock(&sched_energy_mutex);
-}
-static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
-
-/*
- * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
- * on governor changes to make sure the scheduler knows about it.
- */
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-				  struct cpufreq_governor *old_gov)
-{
-	if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
-		/*
-		 * When called from the cpufreq_register_driver() path, the
-		 * cpu_hotplug_lock is already held, so use a work item to
-		 * avoid nested locking in rebuild_sched_domains().
-		 */
-		schedule_work(&rebuild_sd_work);
-	}
-
-}
-#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6a45c8a..9d51fd1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7193,6 +7193,24 @@
 	return min(util, capacity_orig_of(cpu));
 }
 
+#ifdef CONFIG_SCHED_WALT
+static inline unsigned long
+cpu_util_next_walt(int cpu, struct task_struct *p, int dst_cpu)
+{
+	unsigned long util =
+			cpu_rq(cpu)->walt_stats.cumulative_runnable_avg_scaled;
+	bool queued = task_on_rq_queued(p);
+
+	if (unlikely(queued && task_cpu(p) == cpu && dst_cpu != cpu))
+		util = max_t(long, util - task_util(p), 0);
+	else if (task_cpu(p) != cpu && dst_cpu == cpu &&
+						p->state == TASK_WAKING)
+		util += task_util(p);
+
+	return min_t(unsigned long, util, capacity_orig_of(cpu));
+}
+#endif
+
 /*
  * compute_energy(): Estimates the energy that would be consumed if @p was
  * migrated to @dst_cpu. compute_energy() predicts what will be the utilization
@@ -7219,9 +7237,13 @@
 		 * by compute_energy().
 		 */
 		for_each_cpu_and(cpu, perf_domain_span(pd), cpu_online_mask) {
+#ifdef CONFIG_SCHED_WALT
+			util = cpu_util_next_walt(cpu, p, dst_cpu);
+#else
 			util = cpu_util_next(cpu, p, dst_cpu);
 			util += cpu_util_rt(cpu_rq(cpu));
 			util = schedutil_energy_util(cpu, util);
+#endif
 			max_util = max(util, max_util);
 			sum_util += util;
 		}
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 0e97ca9..e88918e 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -4,6 +4,9 @@
  * Copyright (c) 2018 Facebook, Inc.
  * Author: Johannes Weiner <hannes@cmpxchg.org>
  *
+ * Polling support by Suren Baghdasaryan <surenb@google.com>
+ * Copyright (c) 2018 Google, Inc.
+ *
  * When CPU, memory and IO are contended, tasks experience delays that
  * reduce throughput and introduce latencies into the workload. Memory
  * and IO contention, in addition, can cause a full loss of forward
@@ -129,9 +132,13 @@
 #include <linux/seq_file.h>
 #include <linux/proc_fs.h>
 #include <linux/seqlock.h>
+#include <linux/uaccess.h>
 #include <linux/cgroup.h>
 #include <linux/module.h>
 #include <linux/sched.h>
+#include <linux/ctype.h>
+#include <linux/file.h>
+#include <linux/poll.h>
 #include <linux/psi.h>
 #include "sched.h"
 
@@ -140,9 +147,9 @@
 DEFINE_STATIC_KEY_FALSE(psi_disabled);
 
 #ifdef CONFIG_PSI_DEFAULT_DISABLED
-bool psi_enable;
+static bool psi_enable;
 #else
-bool psi_enable = true;
+static bool psi_enable = true;
 #endif
 static int __init setup_psi(char *str)
 {
@@ -156,6 +163,11 @@
 #define EXP_60s		1981		/* 1/exp(2s/60s) */
 #define EXP_300s	2034		/* 1/exp(2s/300s) */
 
+/* PSI trigger definitions */
+#define WINDOW_MIN_US 500000	/* Min window size is 500ms */
+#define WINDOW_MAX_US 10000000	/* Max window size is 10s */
+#define UPDATES_PER_WINDOW 10	/* 10 updates per window */
+
 /* Sampling frequency in nanoseconds */
 static u64 psi_period __read_mostly;
 
@@ -165,7 +177,7 @@
 	.pcpu = &system_group_pcpu,
 };
 
-static void psi_update_work(struct work_struct *work);
+static void psi_avgs_work(struct work_struct *work);
 
 static void group_init(struct psi_group *group)
 {
@@ -173,9 +185,20 @@
 
 	for_each_possible_cpu(cpu)
 		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
-	group->next_update = sched_clock() + psi_period;
-	INIT_DELAYED_WORK(&group->clock_work, psi_update_work);
-	mutex_init(&group->stat_lock);
+	group->avg_next_update = sched_clock() + psi_period;
+	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
+	mutex_init(&group->avgs_lock);
+	/* Init trigger-related members */
+	atomic_set(&group->poll_scheduled, 0);
+	mutex_init(&group->trigger_lock);
+	INIT_LIST_HEAD(&group->triggers);
+	memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
+	group->poll_states = 0;
+	group->poll_min_period = U32_MAX;
+	memset(group->polling_total, 0, sizeof(group->polling_total));
+	group->polling_next_update = ULLONG_MAX;
+	group->polling_until = 0;
+	rcu_assign_pointer(group->poll_kworker, NULL);
 }
 
 void __init psi_init(void)
@@ -210,20 +233,24 @@
 	}
 }
 
-static void get_recent_times(struct psi_group *group, int cpu, u32 *times)
+static void get_recent_times(struct psi_group *group, int cpu,
+			     enum psi_aggregators aggregator, u32 *times,
+			     u32 *pchanged_states)
 {
 	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
-	unsigned int tasks[NR_PSI_TASK_COUNTS];
 	u64 now, state_start;
+	enum psi_states s;
 	unsigned int seq;
-	int s;
+	u32 state_mask;
+
+	*pchanged_states = 0;
 
 	/* Snapshot a coherent view of the CPU state */
 	do {
 		seq = read_seqcount_begin(&groupc->seq);
 		now = cpu_clock(cpu);
 		memcpy(times, groupc->times, sizeof(groupc->times));
-		memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
+		state_mask = groupc->state_mask;
 		state_start = groupc->state_start;
 	} while (read_seqcount_retry(&groupc->seq, seq));
 
@@ -239,13 +266,15 @@
 		 * (u32) and our reported pressure close to what's
 		 * actually happening.
 		 */
-		if (test_state(tasks, s))
+		if (state_mask & (1 << s))
 			times[s] += now - state_start;
 
-		delta = times[s] - groupc->times_prev[s];
-		groupc->times_prev[s] = times[s];
+		delta = times[s] - groupc->times_prev[aggregator][s];
+		groupc->times_prev[aggregator][s] = times[s];
 
 		times[s] = delta;
+		if (delta)
+			*pchanged_states |= (1 << s);
 	}
 }
 
@@ -269,17 +298,16 @@
 	avg[2] = calc_load(avg[2], EXP_300s, pct);
 }
 
-static bool update_stats(struct psi_group *group)
+static void collect_percpu_times(struct psi_group *group,
+				 enum psi_aggregators aggregator,
+				 u32 *pchanged_states)
 {
 	u64 deltas[NR_PSI_STATES - 1] = { 0, };
-	unsigned long missed_periods = 0;
 	unsigned long nonidle_total = 0;
-	u64 now, expires, period;
+	u32 changed_states = 0;
 	int cpu;
 	int s;
 
-	mutex_lock(&group->stat_lock);
-
 	/*
 	 * Collect the per-cpu time buckets and average them into a
 	 * single time sample that is normalized to wallclock time.
@@ -291,8 +319,11 @@
 	for_each_possible_cpu(cpu) {
 		u32 times[NR_PSI_STATES];
 		u32 nonidle;
+		u32 cpu_changed_states;
 
-		get_recent_times(group, cpu, times);
+		get_recent_times(group, cpu, aggregator, times,
+				&cpu_changed_states);
+		changed_states |= cpu_changed_states;
 
 		nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
 		nonidle_total += nonidle;
@@ -315,13 +346,22 @@
 
 	/* total= */
 	for (s = 0; s < NR_PSI_STATES - 1; s++)
-		group->total[s] += div_u64(deltas[s], max(nonidle_total, 1UL));
+		group->total[aggregator][s] +=
+				div_u64(deltas[s], max(nonidle_total, 1UL));
+
+	if (pchanged_states)
+		*pchanged_states = changed_states;
+}
+
+static u64 update_averages(struct psi_group *group, u64 now)
+{
+	unsigned long missed_periods = 0;
+	u64 expires, period;
+	u64 avg_next_update;
+	int s;
 
 	/* avgX= */
-	now = sched_clock();
-	expires = group->next_update;
-	if (now < expires)
-		goto out;
+	expires = group->avg_next_update;
 	if (now - expires >= psi_period)
 		missed_periods = div_u64(now - expires, psi_period);
 
@@ -332,14 +372,14 @@
 	 * But the deltas we sample out of the per-cpu buckets above
 	 * are based on the actual time elapsing between clock ticks.
 	 */
-	group->next_update = expires + ((1 + missed_periods) * psi_period);
-	period = now - (group->last_update + (missed_periods * psi_period));
-	group->last_update = now;
+	avg_next_update = expires + ((1 + missed_periods) * psi_period);
+	period = now - (group->avg_last_update + (missed_periods * psi_period));
+	group->avg_last_update = now;
 
 	for (s = 0; s < NR_PSI_STATES - 1; s++) {
 		u32 sample;
 
-		sample = group->total[s] - group->total_prev[s];
+		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
 		/*
 		 * Due to the lockless sampling of the time buckets,
 		 * recorded time deltas can slip into the next period,
@@ -359,23 +399,30 @@
 		 */
 		if (sample > period)
 			sample = period;
-		group->total_prev[s] += sample;
+		group->avg_total[s] += sample;
 		calc_avgs(group->avg[s], missed_periods, sample, period);
 	}
-out:
-	mutex_unlock(&group->stat_lock);
-	return nonidle_total;
+
+	return avg_next_update;
 }
 
-static void psi_update_work(struct work_struct *work)
+static void psi_avgs_work(struct work_struct *work)
 {
 	struct delayed_work *dwork;
 	struct psi_group *group;
+	u32 changed_states;
 	bool nonidle;
+	u64 now;
 
 	dwork = to_delayed_work(work);
-	group = container_of(dwork, struct psi_group, clock_work);
+	group = container_of(dwork, struct psi_group, avgs_work);
 
+	mutex_lock(&group->avgs_lock);
+
+	now = sched_clock();
+
+	collect_percpu_times(group, PSI_AVGS, &changed_states);
+	nonidle = changed_states & (1 << PSI_NONIDLE);
 	/*
 	 * If there is task activity, periodically fold the per-cpu
 	 * times and feed samples into the running averages. If things
@@ -383,18 +430,196 @@
 	 * Once restarted, we'll catch up the running averages in one
 	 * go - see calc_avgs() and missed_periods.
 	 */
-
-	nonidle = update_stats(group);
+	if (now >= group->avg_next_update)
+		group->avg_next_update = update_averages(group, now);
 
 	if (nonidle) {
-		unsigned long delay = 0;
-		u64 now;
-
-		now = sched_clock();
-		if (group->next_update > now)
-			delay = nsecs_to_jiffies(group->next_update - now) + 1;
-		schedule_delayed_work(dwork, delay);
+		schedule_delayed_work(dwork, nsecs_to_jiffies(
+				group->avg_next_update - now) + 1);
 	}
+
+	mutex_unlock(&group->avgs_lock);
+}
+
+/* Trigger tracking window manupulations */
+static void window_reset(struct psi_window *win, u64 now, u64 value,
+			 u64 prev_growth)
+{
+	win->start_time = now;
+	win->start_value = value;
+	win->prev_growth = prev_growth;
+}
+
+/*
+ * PSI growth tracking window update and growth calculation routine.
+ *
+ * This approximates a sliding tracking window by interpolating
+ * partially elapsed windows using historical growth data from the
+ * previous intervals. This minimizes memory requirements (by not storing
+ * all the intermediate values in the previous window) and simplifies
+ * the calculations. It works well because PSI signal changes only in
+ * positive direction and over relatively small window sizes the growth
+ * is close to linear.
+ */
+static u64 window_update(struct psi_window *win, u64 now, u64 value)
+{
+	u64 elapsed;
+	u64 growth;
+
+	elapsed = now - win->start_time;
+	growth = value - win->start_value;
+	/*
+	 * After each tracking window passes win->start_value and
+	 * win->start_time get reset and win->prev_growth stores
+	 * the average per-window growth of the previous window.
+	 * win->prev_growth is then used to interpolate additional
+	 * growth from the previous window assuming it was linear.
+	 */
+	if (elapsed > win->size)
+		window_reset(win, now, value, growth);
+	else {
+		u32 remaining;
+
+		remaining = win->size - elapsed;
+		growth += div_u64(win->prev_growth * remaining, win->size);
+	}
+
+	return growth;
+}
+
+static void init_triggers(struct psi_group *group, u64 now)
+{
+	struct psi_trigger *t;
+
+	list_for_each_entry(t, &group->triggers, node)
+		window_reset(&t->win, now,
+				group->total[PSI_POLL][t->state], 0);
+	memcpy(group->polling_total, group->total[PSI_POLL],
+		   sizeof(group->polling_total));
+	group->polling_next_update = now + group->poll_min_period;
+}
+
+static u64 update_triggers(struct psi_group *group, u64 now)
+{
+	struct psi_trigger *t;
+	bool new_stall = false;
+	u64 *total = group->total[PSI_POLL];
+
+	/*
+	 * On subsequent updates, calculate growth deltas and let
+	 * watchers know when their specified thresholds are exceeded.
+	 */
+	list_for_each_entry(t, &group->triggers, node) {
+		u64 growth;
+
+		/* Check for stall activity */
+		if (group->polling_total[t->state] == total[t->state])
+			continue;
+
+		/*
+		 * Multiple triggers might be looking at the same state,
+		 * remember to update group->polling_total[] once we've
+		 * been through all of them. Also remember to extend the
+		 * polling time if we see new stall activity.
+		 */
+		new_stall = true;
+
+		/* Calculate growth since last update */
+		growth = window_update(&t->win, now, total[t->state]);
+		if (growth < t->threshold)
+			continue;
+
+		/* Limit event signaling to once per window */
+		if (now < t->last_event_time + t->win.size)
+			continue;
+
+		/* Generate an event */
+		if (cmpxchg(&t->event, 0, 1) == 0)
+			wake_up_interruptible(&t->event_wait);
+		t->last_event_time = now;
+	}
+
+	if (new_stall)
+		memcpy(group->polling_total, total,
+				sizeof(group->polling_total));
+
+	return now + group->poll_min_period;
+}
+
+/*
+ * Schedule polling if it's not already scheduled. It's safe to call even from
+ * hotpath because even though kthread_queue_delayed_work takes worker->lock
+ * spinlock that spinlock is never contended due to poll_scheduled atomic
+ * preventing such competition.
+ */
+static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
+{
+	struct kthread_worker *kworker;
+
+	/* Do not reschedule if already scheduled */
+	if (atomic_cmpxchg(&group->poll_scheduled, 0, 1) != 0)
+		return;
+
+	rcu_read_lock();
+
+	kworker = rcu_dereference(group->poll_kworker);
+	/*
+	 * kworker might be NULL in case psi_trigger_destroy races with
+	 * psi_task_change (hotpath) which can't use locks
+	 */
+	if (likely(kworker))
+		kthread_queue_delayed_work(kworker, &group->poll_work, delay);
+	else
+		atomic_set(&group->poll_scheduled, 0);
+
+	rcu_read_unlock();
+}
+
+static void psi_poll_work(struct kthread_work *work)
+{
+	struct kthread_delayed_work *dwork;
+	struct psi_group *group;
+	u32 changed_states;
+	u64 now;
+
+	dwork = container_of(work, struct kthread_delayed_work, work);
+	group = container_of(dwork, struct psi_group, poll_work);
+
+	atomic_set(&group->poll_scheduled, 0);
+
+	mutex_lock(&group->trigger_lock);
+
+	now = sched_clock();
+
+	collect_percpu_times(group, PSI_POLL, &changed_states);
+
+	if (changed_states & group->poll_states) {
+		/* Initialize trigger windows when entering polling mode */
+		if (now > group->polling_until)
+			init_triggers(group, now);
+
+		/*
+		 * Keep the monitor active for at least the duration of the
+		 * minimum tracking window as long as monitor states are
+		 * changing.
+		 */
+		group->polling_until = now +
+			group->poll_min_period * UPDATES_PER_WINDOW;
+	}
+
+	if (now > group->polling_until) {
+		group->polling_next_update = ULLONG_MAX;
+		goto out;
+	}
+
+	if (now >= group->polling_next_update)
+		group->polling_next_update = update_triggers(group, now);
+
+	psi_schedule_poll_work(group,
+		nsecs_to_jiffies(group->polling_next_update - now) + 1);
+
+out:
+	mutex_unlock(&group->trigger_lock);
 }
 
 static void record_times(struct psi_group_cpu *groupc, int cpu,
@@ -407,15 +632,15 @@
 	delta = now - groupc->state_start;
 	groupc->state_start = now;
 
-	if (test_state(groupc->tasks, PSI_IO_SOME)) {
+	if (groupc->state_mask & (1 << PSI_IO_SOME)) {
 		groupc->times[PSI_IO_SOME] += delta;
-		if (test_state(groupc->tasks, PSI_IO_FULL))
+		if (groupc->state_mask & (1 << PSI_IO_FULL))
 			groupc->times[PSI_IO_FULL] += delta;
 	}
 
-	if (test_state(groupc->tasks, PSI_MEM_SOME)) {
+	if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
 		groupc->times[PSI_MEM_SOME] += delta;
-		if (test_state(groupc->tasks, PSI_MEM_FULL))
+		if (groupc->state_mask & (1 << PSI_MEM_FULL))
 			groupc->times[PSI_MEM_FULL] += delta;
 		else if (memstall_tick) {
 			u32 sample;
@@ -436,18 +661,20 @@
 		}
 	}
 
-	if (test_state(groupc->tasks, PSI_CPU_SOME))
+	if (groupc->state_mask & (1 << PSI_CPU_SOME))
 		groupc->times[PSI_CPU_SOME] += delta;
 
-	if (test_state(groupc->tasks, PSI_NONIDLE))
+	if (groupc->state_mask & (1 << PSI_NONIDLE))
 		groupc->times[PSI_NONIDLE] += delta;
 }
 
-static void psi_group_change(struct psi_group *group, int cpu,
-			     unsigned int clear, unsigned int set)
+static u32 psi_group_change(struct psi_group *group, int cpu,
+			    unsigned int clear, unsigned int set)
 {
 	struct psi_group_cpu *groupc;
 	unsigned int t, m;
+	enum psi_states s;
+	u32 state_mask = 0;
 
 	groupc = per_cpu_ptr(group->pcpu, cpu);
 
@@ -480,7 +707,16 @@
 		if (set & (1 << t))
 			groupc->tasks[t]++;
 
+	/* Calculate state mask representing active states */
+	for (s = 0; s < NR_PSI_STATES; s++) {
+		if (test_state(groupc->tasks, s))
+			state_mask |= (1 << s);
+	}
+	groupc->state_mask = state_mask;
+
 	write_seqcount_end(&groupc->seq);
+
+	return state_mask;
 }
 
 static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
@@ -537,13 +773,17 @@
 	 */
 	if (unlikely((clear & TSK_RUNNING) &&
 		     (task->flags & PF_WQ_WORKER) &&
-		     wq_worker_last_func(task) == psi_update_work))
+		     wq_worker_last_func(task) == psi_avgs_work))
 		wake_clock = false;
 
 	while ((group = iterate_groups(task, &iter))) {
-		psi_group_change(group, cpu, clear, set);
-		if (wake_clock && !delayed_work_pending(&group->clock_work))
-			schedule_delayed_work(&group->clock_work, PSI_FREQ);
+		u32 state_mask = psi_group_change(group, cpu, clear, set);
+
+		if (state_mask & group->poll_states)
+			psi_schedule_poll_work(group, 1);
+
+		if (wake_clock && !delayed_work_pending(&group->avgs_work))
+			schedule_delayed_work(&group->avgs_work, PSI_FREQ);
 	}
 }
 
@@ -640,8 +880,10 @@
 	if (static_branch_likely(&psi_disabled))
 		return;
 
-	cancel_delayed_work_sync(&cgroup->psi.clock_work);
+	cancel_delayed_work_sync(&cgroup->psi.avgs_work);
 	free_percpu(cgroup->psi.pcpu);
+	/* All triggers must be removed by now */
+	WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
 }
 
 /**
@@ -697,11 +939,18 @@
 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
 {
 	int full;
+	u64 now;
 
 	if (static_branch_likely(&psi_disabled))
 		return -EOPNOTSUPP;
 
-	update_stats(group);
+	/* Update averages before reporting them */
+	mutex_lock(&group->avgs_lock);
+	now = sched_clock();
+	collect_percpu_times(group, PSI_AVGS, NULL);
+	if (now >= group->avg_next_update)
+		group->avg_next_update = update_averages(group, now);
+	mutex_unlock(&group->avgs_lock);
 
 	for (full = 0; full < 2 - (res == PSI_CPU); full++) {
 		unsigned long avg[3];
@@ -710,7 +959,8 @@
 
 		for (w = 0; w < 3; w++)
 			avg[w] = group->avg[res * 2 + full][w];
-		total = div_u64(group->total[res * 2 + full], NSEC_PER_USEC);
+		total = div_u64(group->total[PSI_AVGS][res * 2 + full],
+				NSEC_PER_USEC);
 
 		seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
 			   full ? "full" : "some",
@@ -753,25 +1003,270 @@
 	return single_open(file, psi_cpu_show, NULL);
 }
 
+struct psi_trigger *psi_trigger_create(struct psi_group *group,
+			char *buf, size_t nbytes, enum psi_res res)
+{
+	struct psi_trigger *t;
+	enum psi_states state;
+	u32 threshold_us;
+	u32 window_us;
+
+	if (static_branch_likely(&psi_disabled))
+		return ERR_PTR(-EOPNOTSUPP);
+
+	if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
+		state = PSI_IO_SOME + res * 2;
+	else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
+		state = PSI_IO_FULL + res * 2;
+	else
+		return ERR_PTR(-EINVAL);
+
+	if (state >= PSI_NONIDLE)
+		return ERR_PTR(-EINVAL);
+
+	if (window_us < WINDOW_MIN_US ||
+		window_us > WINDOW_MAX_US)
+		return ERR_PTR(-EINVAL);
+
+	/* Check threshold */
+	if (threshold_us == 0 || threshold_us > window_us)
+		return ERR_PTR(-EINVAL);
+
+	t = kmalloc(sizeof(*t), GFP_KERNEL);
+	if (!t)
+		return ERR_PTR(-ENOMEM);
+
+	t->group = group;
+	t->state = state;
+	t->threshold = threshold_us * NSEC_PER_USEC;
+	t->win.size = window_us * NSEC_PER_USEC;
+	window_reset(&t->win, 0, 0, 0);
+
+	t->event = 0;
+	t->last_event_time = 0;
+	init_waitqueue_head(&t->event_wait);
+	kref_init(&t->refcount);
+
+	mutex_lock(&group->trigger_lock);
+
+	if (!rcu_access_pointer(group->poll_kworker)) {
+		struct sched_param param = {
+			.sched_priority = MAX_RT_PRIO - 1,
+		};
+		struct kthread_worker *kworker;
+
+		kworker = kthread_create_worker(0, "psimon");
+		if (IS_ERR(kworker)) {
+			kfree(t);
+			mutex_unlock(&group->trigger_lock);
+			return ERR_CAST(kworker);
+		}
+		sched_setscheduler(kworker->task, SCHED_FIFO, &param);
+		kthread_init_delayed_work(&group->poll_work,
+				psi_poll_work);
+		rcu_assign_pointer(group->poll_kworker, kworker);
+	}
+
+	list_add(&t->node, &group->triggers);
+	group->poll_min_period = min(group->poll_min_period,
+		div_u64(t->win.size, UPDATES_PER_WINDOW));
+	group->nr_triggers[t->state]++;
+	group->poll_states |= (1 << t->state);
+
+	mutex_unlock(&group->trigger_lock);
+
+	return t;
+}
+
+static void psi_trigger_destroy(struct kref *ref)
+{
+	struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
+	struct psi_group *group = t->group;
+	struct kthread_worker *kworker_to_destroy = NULL;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	/*
+	 * Wakeup waiters to stop polling. Can happen if cgroup is deleted
+	 * from under a polling process.
+	 */
+	wake_up_interruptible(&t->event_wait);
+
+	mutex_lock(&group->trigger_lock);
+
+	if (!list_empty(&t->node)) {
+		struct psi_trigger *tmp;
+		u64 period = ULLONG_MAX;
+
+		list_del(&t->node);
+		group->nr_triggers[t->state]--;
+		if (!group->nr_triggers[t->state])
+			group->poll_states &= ~(1 << t->state);
+		/* reset min update period for the remaining triggers */
+		list_for_each_entry(tmp, &group->triggers, node)
+			period = min(period, div_u64(tmp->win.size,
+					UPDATES_PER_WINDOW));
+		group->poll_min_period = period;
+		/* Destroy poll_kworker when the last trigger is destroyed */
+		if (group->poll_states == 0) {
+			group->polling_until = 0;
+			kworker_to_destroy = rcu_dereference_protected(
+					group->poll_kworker,
+					lockdep_is_held(&group->trigger_lock));
+			rcu_assign_pointer(group->poll_kworker, NULL);
+		}
+	}
+
+	mutex_unlock(&group->trigger_lock);
+
+	/*
+	 * Wait for both *trigger_ptr from psi_trigger_replace and
+	 * poll_kworker RCUs to complete their read-side critical sections
+	 * before destroying the trigger and optionally the poll_kworker
+	 */
+	synchronize_rcu();
+	/*
+	 * Destroy the kworker after releasing trigger_lock to prevent a
+	 * deadlock while waiting for psi_poll_work to acquire trigger_lock
+	 */
+	if (kworker_to_destroy) {
+		kthread_cancel_delayed_work_sync(&group->poll_work);
+		kthread_destroy_worker(kworker_to_destroy);
+	}
+	kfree(t);
+}
+
+void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new)
+{
+	struct psi_trigger *old = *trigger_ptr;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	rcu_assign_pointer(*trigger_ptr, new);
+	if (old)
+		kref_put(&old->refcount, psi_trigger_destroy);
+}
+
+__poll_t psi_trigger_poll(void **trigger_ptr,
+				struct file *file, poll_table *wait)
+{
+	__poll_t ret = DEFAULT_POLLMASK;
+	struct psi_trigger *t;
+
+	if (static_branch_likely(&psi_disabled))
+		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
+
+	rcu_read_lock();
+
+	t = rcu_dereference(*(void __rcu __force **)trigger_ptr);
+	if (!t) {
+		rcu_read_unlock();
+		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
+	}
+	kref_get(&t->refcount);
+
+	rcu_read_unlock();
+
+	poll_wait(file, &t->event_wait, wait);
+
+	if (cmpxchg(&t->event, 1, 0) == 1)
+		ret |= EPOLLPRI;
+
+	kref_put(&t->refcount, psi_trigger_destroy);
+
+	return ret;
+}
+
+static ssize_t psi_write(struct file *file, const char __user *user_buf,
+			 size_t nbytes, enum psi_res res)
+{
+	char buf[32];
+	size_t buf_size;
+	struct seq_file *seq;
+	struct psi_trigger *new;
+
+	if (static_branch_likely(&psi_disabled))
+		return -EOPNOTSUPP;
+
+	buf_size = min(nbytes, (sizeof(buf) - 1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size - 1] = '\0';
+
+	new = psi_trigger_create(&psi_system, buf, nbytes, res);
+	if (IS_ERR(new))
+		return PTR_ERR(new);
+
+	seq = file->private_data;
+	/* Take seq->lock to protect seq->private from concurrent writes */
+	mutex_lock(&seq->lock);
+	psi_trigger_replace(&seq->private, new);
+	mutex_unlock(&seq->lock);
+
+	return nbytes;
+}
+
+static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
+			    size_t nbytes, loff_t *ppos)
+{
+	return psi_write(file, user_buf, nbytes, PSI_IO);
+}
+
+static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
+				size_t nbytes, loff_t *ppos)
+{
+	return psi_write(file, user_buf, nbytes, PSI_MEM);
+}
+
+static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
+			     size_t nbytes, loff_t *ppos)
+{
+	return psi_write(file, user_buf, nbytes, PSI_CPU);
+}
+
+static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
+{
+	struct seq_file *seq = file->private_data;
+
+	return psi_trigger_poll(&seq->private, file, wait);
+}
+
+static int psi_fop_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+
+	psi_trigger_replace(&seq->private, NULL);
+	return single_release(inode, file);
+}
+
 static const struct file_operations psi_io_fops = {
 	.open           = psi_io_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
-	.release        = single_release,
+	.write          = psi_io_write,
+	.poll           = psi_fop_poll,
+	.release        = psi_fop_release,
 };
 
 static const struct file_operations psi_memory_fops = {
 	.open           = psi_memory_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
-	.release        = single_release,
+	.write          = psi_memory_write,
+	.poll           = psi_fop_poll,
+	.release        = psi_fop_release,
 };
 
 static const struct file_operations psi_cpu_fops = {
 	.open           = psi_cpu_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
-	.release        = single_release,
+	.write          = psi_cpu_write,
+	.poll           = psi_fop_poll,
+	.release        = psi_fop_release,
 };
 
 static int __init psi_proc_init(void)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a0b7281..1516804 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2659,7 +2659,7 @@
 }
 #endif
 
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+#ifdef CONFIG_ENERGY_MODEL
 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
 #else
 #define perf_domain_span(pd) NULL
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 06dbb45..9c392dd 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -202,9 +202,7 @@
 }
 
 DEFINE_STATIC_KEY_FALSE(sched_energy_present);
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-DEFINE_MUTEX(sched_energy_mutex);
-bool sched_energy_update;
+#ifdef CONFIG_ENERGY_MODEL
 
 static void free_pd(struct perf_domain *pd)
 {
@@ -292,7 +290,6 @@
  *    1. an Energy Model (EM) is available;
  *    2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
  *    3. the EM complexity is low enough to keep scheduling overheads low;
- *    4. schedutil is driving the frequency of all CPUs of the rd;
  *
  * The complexity of the Energy Model is defined as:
  *
@@ -312,15 +309,12 @@
  */
 #define EM_MAX_COMPLEXITY 2048
 
-extern struct cpufreq_governor schedutil_gov;
 static bool build_perf_domains(const struct cpumask *cpu_map)
 {
 	int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map);
 	struct perf_domain *pd = NULL, *tmp;
 	int cpu = cpumask_first(cpu_map);
 	struct root_domain *rd = cpu_rq(cpu)->rd;
-	struct cpufreq_policy *policy;
-	struct cpufreq_governor *gov;
 
 	/* EAS is enabled for asymmetric CPU capacity topologies. */
 	if (!per_cpu(sd_asym_cpucapacity, cpu)) {
@@ -336,19 +330,6 @@
 		if (find_pd(pd, i))
 			continue;
 
-		/* Do not attempt EAS if schedutil is not being used. */
-		policy = cpufreq_cpu_get(i);
-		if (!policy)
-			goto free;
-		gov = policy->governor;
-		cpufreq_cpu_put(policy);
-		if (gov != &schedutil_gov) {
-			if (rd->pd)
-				pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n",
-						cpumask_pr_args(cpu_map));
-			goto free;
-		}
-
 		/* Create the new pd and add it to the local list. */
 		tmp = pd_init(i);
 		if (!tmp)
@@ -392,7 +373,7 @@
 }
 #else
 static void free_pd(struct perf_domain *pd) { }
-#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
+#endif /* CONFIG_ENERGY_MODEL */
 
 static void free_rootdomain(struct rcu_head *rcu)
 {
@@ -2214,10 +2195,10 @@
 		;
 	}
 
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+#ifdef CONFIG_ENERGY_MODEL
 	/* Build perf. domains: */
 	for (i = 0; i < ndoms_new; i++) {
-		for (j = 0; j < n && !sched_energy_update; j++) {
+		for (j = 0; j < n; j++) {
 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
 			    cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
 				has_eas = true;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 8116c86..4d506f6 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -499,6 +499,12 @@
 	struct hrtimer_clock_base *base;
 	ktime_t expires;
 
+	/*
+	 * Skip initializing cpu_base->next_timer to NULL as we skip updating
+	 * next_timer in below loop if the timer is being exluded.
+	 */
+	if (!exclude)
+		cpu_base->next_timer = NULL;
 	for_each_active_base(base, cpu_base, active) {
 		struct timerqueue_node *next;
 		struct hrtimer *timer;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index cf9780a..778ce93 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1264,31 +1264,6 @@
  * High resolution timer specific code
  */
 #ifdef CONFIG_HIGH_RES_TIMERS
-static void update_rq_stats(void)
-{
-	unsigned long jiffy_gap = 0;
-	unsigned int rq_avg = 0;
-	unsigned long flags = 0;
-
-	jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy;
-	if (jiffy_gap >= rq_info.rq_poll_jiffies) {
-		spin_lock_irqsave(&rq_lock, flags);
-		if (!rq_info.rq_avg)
-			rq_info.rq_poll_total_jiffies = 0;
-		rq_avg = nr_running() * 10;
-		if (rq_info.rq_poll_total_jiffies) {
-			rq_avg = (rq_avg * jiffy_gap) +
-				(rq_info.rq_avg *
-				 rq_info.rq_poll_total_jiffies);
-			do_div(rq_avg,
-				rq_info.rq_poll_total_jiffies + jiffy_gap);
-		}
-		rq_info.rq_avg = rq_avg;
-		rq_info.rq_poll_total_jiffies += jiffy_gap;
-		rq_info.rq_poll_last_jiffy = jiffies;
-		spin_unlock_irqrestore(&rq_lock, flags);
-	}
-}
 static void wakeup_user(void)
 {
 	unsigned long jiffy_gap;
@@ -1322,10 +1297,6 @@
 		if (rq_info.init == 1 &&
 				tick_do_timer_cpu == smp_processor_id()) {
 			/*
-			 * update run queue statistics
-			 */
-			update_rq_stats();
-			/*
 			 * wakeup user if needed
 			 */
 			wakeup_user();
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 9864a35..6c28d51 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1158,22 +1158,12 @@
 
 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
 {
-	int err;
-
-	mutex_lock(&bpf_event_mutex);
-	err = __bpf_probe_register(btp, prog);
-	mutex_unlock(&bpf_event_mutex);
-	return err;
+	return __bpf_probe_register(btp, prog);
 }
 
 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
 {
-	int err;
-
-	mutex_lock(&bpf_event_mutex);
-	err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
-	mutex_unlock(&bpf_event_mutex);
-	return err;
+	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
 }
 
 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 5574e86..5a1c64a 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1301,7 +1301,7 @@
 		/* go past the last quote */
 		i++;
 
-	} else if (isdigit(str[i])) {
+	} else if (isdigit(str[i]) || str[i] == '-') {
 
 		/* Make sure the field is not a string */
 		if (is_string_field(field)) {
@@ -1314,6 +1314,9 @@
 			goto err_free;
 		}
 
+		if (str[i] == '-')
+			i++;
+
 		/* We allow 0xDEADBEEF */
 		while (isalnum(str[i]))
 			i++;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9ef9ece..7d8ae47 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -920,6 +920,16 @@
  * CONTEXT:
  * spin_lock_irq(rq->lock)
  *
+ * This function is called during schedule() when a kworker is going
+ * to sleep. It's used by psi to identify aggregation workers during
+ * dequeuing, to allow periodic aggregation to shut-off when that
+ * worker is the last task in the system or cgroup to go to sleep.
+ *
+ * As this function doesn't involve any workqueue-related locking, it
+ * only returns stable values when called from inside the scheduler's
+ * queuing and dequeuing paths, when @task, which must be a kworker,
+ * is guaranteed to not be processing any works.
+ *
  * Return:
  * The last work function %current executed as a worker, NULL if it
  * hasn't executed any work yet.
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index d82d022..9cf7762 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -632,7 +632,7 @@
 	config->test_driver = NULL;
 
 	kfree_const(config->test_fs);
-	config->test_driver = NULL;
+	config->test_fs = NULL;
 }
 
 static void kmod_config_free(struct kmod_test_device *test_dev)
diff --git a/mm/Kconfig b/mm/Kconfig
index 9dce3a8..6975182 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -781,6 +781,28 @@
 	  information includes global and per chunk statistics, which can
 	  be used to help understand percpu memory usage.
 
+config ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
+       def_bool n
+
+config SPECULATIVE_PAGE_FAULT
+       bool "Speculative page faults"
+       default y
+       depends on ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
+       depends on MMU && SMP
+       help
+         Try to handle user space page faults without holding the mmap_sem.
+
+	 This should allow better concurrency for massively threaded process
+	 since the page fault handler will not wait for other threads memory
+	 layout change to be done, assuming that this change is done in another
+	 part of the process's memory space. This type of page fault is named
+	 speculative page fault.
+
+	 If the speculative page fault fails because of a concurrency is
+	 detected or because underlying PMD or PTE tables are not yet
+	 allocating, it is failing its processing and a classic page fault
+	 is then tried.
+
 config GUP_BENCHMARK
 	bool "Enable infrastructure for get_user_pages_fast() benchmarking"
 	default n
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8a8bb87..72e6d0c 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -689,6 +689,7 @@
 	INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
 	bdi->cgwb_congested_tree = RB_ROOT;
 	mutex_init(&bdi->cgwb_release_mutex);
+	init_rwsem(&bdi->wb_switch_rwsem);
 
 	ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 	if (!ret) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 10e8367..9e5f66c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3624,7 +3624,6 @@
 	copy_user_huge_page(new_page, old_page, address, vma,
 			    pages_per_huge_page(h));
 	__SetPageUptodate(new_page);
-	set_page_huge_active(new_page);
 
 	mmun_start = haddr;
 	mmun_end = mmun_start + huge_page_size(h);
@@ -3646,6 +3645,7 @@
 				make_huge_pte(vma, new_page, 1));
 		page_remove_rmap(old_page, true);
 		hugepage_add_new_anon_rmap(new_page, vma, haddr);
+		set_page_huge_active(new_page);
 		/* Make the old page be freed below */
 		new_page = old_page;
 	}
@@ -3730,6 +3730,7 @@
 	pte_t new_pte;
 	spinlock_t *ptl;
 	unsigned long haddr = address & huge_page_mask(h);
+	bool new_page = false;
 
 	/*
 	 * Currently, we are forced to kill the process in the event the
@@ -3791,7 +3792,7 @@
 		}
 		clear_huge_page(page, address, pages_per_huge_page(h));
 		__SetPageUptodate(page);
-		set_page_huge_active(page);
+		new_page = true;
 
 		if (vma->vm_flags & VM_MAYSHARE) {
 			int err = huge_add_to_page_cache(page, mapping, idx);
@@ -3862,6 +3863,15 @@
 	}
 
 	spin_unlock(ptl);
+
+	/*
+	 * Only make newly allocated pages active.  Existing pages found
+	 * in the pagecache could be !page_huge_active() if they have been
+	 * isolated for migration.
+	 */
+	if (new_page)
+		set_page_huge_active(page);
+
 	unlock_page(page);
 out:
 	return ret;
@@ -4096,7 +4106,6 @@
 	 * the set_pte_at() write.
 	 */
 	__SetPageUptodate(page);
-	set_page_huge_active(page);
 
 	mapping = dst_vma->vm_file->f_mapping;
 	idx = vma_hugecache_offset(h, dst_vma, dst_addr);
@@ -4164,6 +4173,7 @@
 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 
 	spin_unlock(ptl);
+	set_page_huge_active(page);
 	if (vm_shared)
 		unlock_page(page);
 	ret = 0;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 37c5c51..2cf470a 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1287,11 +1287,13 @@
 	return PageBuddy(page) && page_order(page) >= pageblock_order;
 }
 
-/* Return the start of the next active pageblock after a given page */
-static struct page *next_active_pageblock(struct page *page)
+/* Return the pfn of the start of the next active pageblock after a given pfn */
+static unsigned long next_active_pageblock(unsigned long pfn)
 {
+	struct page *page = pfn_to_page(pfn);
+
 	/* Ensure the starting page is pageblock-aligned */
-	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
+	BUG_ON(pfn & (pageblock_nr_pages - 1));
 
 	/* If the entire pageblock is free, move to the end of free page */
 	if (pageblock_free(page)) {
@@ -1299,16 +1301,16 @@
 		/* be careful. we don't have locks, page_order can be changed.*/
 		order = page_order(page);
 		if ((order < MAX_ORDER) && (order >= pageblock_order))
-			return page + (1 << order);
+			return pfn + (1 << order);
 	}
 
-	return page + pageblock_nr_pages;
+	return pfn + pageblock_nr_pages;
 }
 
-static bool is_pageblock_removable_nolock(struct page *page)
+static bool is_pageblock_removable_nolock(unsigned long pfn)
 {
+	struct page *page = pfn_to_page(pfn);
 	struct zone *zone;
-	unsigned long pfn;
 
 	/*
 	 * We have to be careful here because we are iterating over memory
@@ -1331,12 +1333,14 @@
 /* Checks if this range of memory is likely to be hot-removable. */
 bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 {
-	struct page *page = pfn_to_page(start_pfn);
-	struct page *end_page = page + nr_pages;
+	unsigned long end_pfn, pfn;
+
+	end_pfn = min(start_pfn + nr_pages,
+			zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
 
 	/* Check the starting page of each pageblock within the range */
-	for (; page < end_page; page = next_active_pageblock(page)) {
-		if (!is_pageblock_removable_nolock(page))
+	for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
+		if (!is_pageblock_removable_nolock(pfn))
 			return false;
 		cond_resched();
 	}
@@ -1372,6 +1376,9 @@
 				i++;
 			if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
 				continue;
+			/* Check if we got outside of the zone */
+			if (zone && !zone_spans_pfn(zone, pfn + i))
+				return 0;
 			page = pfn_to_page(pfn + i);
 			if (zone && page_zone(page) != zone)
 				return 0;
diff --git a/mm/migrate.c b/mm/migrate.c
index f381635..b80f4da 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1305,6 +1305,16 @@
 		lock_page(hpage);
 	}
 
+	/*
+	 * Check for pages which are in the process of being freed.  Without
+	 * page_mapping() set, hugetlbfs specific move page routine will not
+	 * be called and we could leak usage counts for subpools.
+	 */
+	if (page_private(hpage) && !page_mapping(hpage)) {
+		rc = -EBUSY;
+		goto out_unlock;
+	}
+
 	if (PageAnon(hpage))
 		anon_vma = page_get_anon_vma(hpage);
 
@@ -1335,6 +1345,7 @@
 		put_new_page = NULL;
 	}
 
+out_unlock:
 	unlock_page(hpage);
 out:
 	if (rc != -EAGAIN)
diff --git a/mm/mmap.c b/mm/mmap.c
index 53bbe0d..2ffb564 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2400,12 +2400,11 @@
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct vm_area_struct *prev;
-	int error;
+	int error = 0;
 
 	address &= PAGE_MASK;
-	error = security_mmap_addr(address);
-	if (error)
-		return error;
+	if (address < mmap_min_addr)
+		return -EPERM;
 
 	/* Enforce stack_guard_gap */
 	prev = vma->vm_prev;
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index e8090f0..ef0dec2 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -104,6 +104,9 @@
 
 		ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
 
+		/* free the TID stats immediately */
+		cfg80211_sinfo_release_content(&sinfo);
+
 		dev_put(real_netdev);
 		if (ret == -ENOENT) {
 			/* Node is not associated anymore! It would be
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 6fa61b8..a4d6d77 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -183,15 +183,25 @@
 }
 EXPORT_SYMBOL(bt_sock_unlink);
 
-void bt_accept_enqueue(struct sock *parent, struct sock *sk)
+void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
 {
 	BT_DBG("parent %p, sk %p", parent, sk);
 
 	sock_hold(sk);
-	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+	if (bh)
+		bh_lock_sock_nested(sk);
+	else
+		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
 	list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
 	bt_sk(sk)->parent = parent;
-	release_sock(sk);
+
+	if (bh)
+		bh_unlock_sock(sk);
+	else
+		release_sock(sk);
+
 	parent->sk_ack_backlog++;
 }
 EXPORT_SYMBOL(bt_accept_enqueue);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 686bdc6..a3a2cd5 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1252,7 +1252,7 @@
 
 	l2cap_sock_init(sk, parent);
 
-	bt_accept_enqueue(parent, sk);
+	bt_accept_enqueue(parent, sk, false);
 
 	release_sock(parent);
 
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index d606e92..c044ff2 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -988,7 +988,7 @@
 	rfcomm_pi(sk)->channel = channel;
 
 	sk->sk_state = BT_CONFIG;
-	bt_accept_enqueue(parent, sk);
+	bt_accept_enqueue(parent, sk, true);
 
 	/* Accept connection and return socket DLC */
 	*d = rfcomm_pi(sk)->dlc;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 8f0f927..a4ca55d 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -193,7 +193,7 @@
 	conn->sk = sk;
 
 	if (parent)
-		bt_accept_enqueue(parent, sk);
+		bt_accept_enqueue(parent, sk, true);
 }
 
 static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 5e55cef..6693e20 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2293,9 +2293,12 @@
 
 	xt_compat_lock(NFPROTO_BRIDGE);
 
-	ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
-	if (ret < 0)
-		goto out_unlock;
+	if (tmp.nentries) {
+		ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
+		if (ret < 0)
+			goto out_unlock;
+	}
+
 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
 	if (ret < 0)
 		goto out_unlock;
diff --git a/net/core/filter.c b/net/core/filter.c
index fb0080e..bed9061 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3909,10 +3909,12 @@
 		/* Only some socketops are supported */
 		switch (optname) {
 		case SO_RCVBUF:
+			val = min_t(u32, val, sysctl_rmem_max);
 			sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
 			sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
 			break;
 		case SO_SNDBUF:
+			val = min_t(u32, val, sysctl_wmem_max);
 			sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
 			sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
 			break;
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 188d693..e2fd8ba 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -256,7 +256,6 @@
 	for_each_possible_cpu(i) {
 		const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
 
-		qstats->qlen = 0;
 		qstats->backlog += qcpu->backlog;
 		qstats->drops += qcpu->drops;
 		qstats->requeues += qcpu->requeues;
@@ -272,7 +271,6 @@
 	if (cpu) {
 		__gnet_stats_copy_queue_cpu(qstats, cpu);
 	} else {
-		qstats->qlen = q->qlen;
 		qstats->backlog = q->backlog;
 		qstats->drops = q->drops;
 		qstats->requeues = q->requeues;
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index acf45dd..e095fb8 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -13,22 +13,36 @@
 {
 	struct net_device *dev = skb->dev;
 	struct gro_cell *cell;
+	int res;
 
-	if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev))
-		return netif_rx(skb);
+	rcu_read_lock();
+	if (unlikely(!(dev->flags & IFF_UP)))
+		goto drop;
+
+	if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
+		res = netif_rx(skb);
+		goto unlock;
+	}
 
 	cell = this_cpu_ptr(gcells->cells);
 
 	if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+drop:
 		atomic_long_inc(&dev->rx_dropped);
 		kfree_skb(skb);
-		return NET_RX_DROP;
+		res = NET_RX_DROP;
+		goto unlock;
 	}
 
 	__skb_queue_tail(&cell->napi_skbs, skb);
 	if (skb_queue_len(&cell->napi_skbs) == 1)
 		napi_schedule(&cell->napi);
-	return NET_RX_SUCCESS;
+
+	res = NET_RX_SUCCESS;
+
+unlock:
+	rcu_read_unlock();
+	return res;
 }
 EXPORT_SYMBOL(gro_cells_receive);
 
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index bd67c4d..2aabb7e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1547,6 +1547,9 @@
 error:
 	netdev_queue_update_kobjects(dev, txq, 0);
 	net_rx_queue_update_kobjects(dev, rxq, 0);
+#ifdef CONFIG_SYSFS
+	kset_unregister(dev->queues_kset);
+#endif
 	return error;
 }
 
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index b8cd43c..a97bf32 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -94,9 +94,8 @@
 			&& (old_operstate != IF_OPER_UP)) {
 		/* Went up */
 		hsr->announce_count = 0;
-		hsr->announce_timer.expires = jiffies +
-				msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
-		add_timer(&hsr->announce_timer);
+		mod_timer(&hsr->announce_timer,
+			  jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
 	}
 
 	if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
@@ -332,6 +331,7 @@
 {
 	struct hsr_priv *hsr;
 	struct hsr_port *master;
+	unsigned long interval;
 
 	hsr = from_timer(hsr, t, announce_timer);
 
@@ -343,18 +343,16 @@
 				hsr->protVersion);
 		hsr->announce_count++;
 
-		hsr->announce_timer.expires = jiffies +
-				msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+		interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
 	} else {
 		send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
 				hsr->protVersion);
 
-		hsr->announce_timer.expires = jiffies +
-				msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+		interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
 	}
 
 	if (is_admin_up(master->dev))
-		add_timer(&hsr->announce_timer);
+		mod_timer(&hsr->announce_timer, jiffies + interval);
 
 	rcu_read_unlock();
 }
@@ -486,7 +484,7 @@
 
 	res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
 	if (res)
-		return res;
+		goto err_add_port;
 
 	res = register_netdevice(hsr_dev);
 	if (res)
@@ -506,6 +504,8 @@
 fail:
 	hsr_for_each_port(hsr, port)
 		hsr_del_port(port);
+err_add_port:
+	hsr_del_node(&hsr->self_node_db);
 
 	return res;
 }
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 286ceb4..9af16cb 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -124,6 +124,18 @@
 	return 0;
 }
 
+void hsr_del_node(struct list_head *self_node_db)
+{
+	struct hsr_node *node;
+
+	rcu_read_lock();
+	node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
+	rcu_read_unlock();
+	if (node) {
+		list_del_rcu(&node->mac_list);
+		kfree(node);
+	}
+}
 
 /* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
  * seq_out is used to initialize filtering of outgoing duplicate frames
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 370b459..531fd3d 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -16,6 +16,7 @@
 
 struct hsr_node;
 
+void hsr_del_node(struct list_head *self_node_db);
 struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
 			      u16 seq_out);
 struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 777fa3b..f0165c5 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -667,7 +667,8 @@
 	case CIPSO_V4_MAP_PASS:
 		return 0;
 	case CIPSO_V4_MAP_TRANS:
-		if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
+		if ((level < doi_def->map.std->lvl.cipso_size) &&
+		    (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
 			return 0;
 		break;
 	}
@@ -1735,13 +1736,26 @@
  */
 void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
 {
+	unsigned char optbuf[sizeof(struct ip_options) + 40];
+	struct ip_options *opt = (struct ip_options *)optbuf;
+
 	if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
 		return;
 
+	/*
+	 * We might be called above the IP layer,
+	 * so we can not use icmp_send and IPCB here.
+	 */
+
+	memset(opt, 0, sizeof(struct ip_options));
+	opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
+	if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
+		return;
+
 	if (gateway)
-		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
+		__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
 	else
-		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
+		__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
 }
 
 /**
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 958e185..dae743b 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -700,6 +700,10 @@
 		case RTA_GATEWAY:
 			cfg->fc_gw = nla_get_be32(attr);
 			break;
+		case RTA_VIA:
+			NL_SET_ERR_MSG(extack, "IPv4 does not support RTA_VIA attribute");
+			err = -EINVAL;
+			goto errout;
 		case RTA_PRIORITY:
 			cfg->fc_priority = nla_get_u32(attr);
 			break;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 695979b..ad75c46 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -570,7 +570,8 @@
  *			MUST reply to only the first fragment.
  */
 
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+		 const struct ip_options *opt)
 {
 	struct iphdr *iph;
 	int room;
@@ -691,7 +692,7 @@
 					  iph->tos;
 	mark = IP4_REPLY_MARK(net, skb_in->mark);
 
-	if (ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in))
+	if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
 		goto out_unlock;
 
 
@@ -742,7 +743,7 @@
 	local_bh_enable();
 out:;
 }
-EXPORT_SYMBOL(icmp_send);
+EXPORT_SYMBOL(__icmp_send);
 
 
 static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 797c4ff..0680f87 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -307,11 +307,10 @@
 }
 
 static int ip_rcv_finish_core(struct net *net, struct sock *sk,
-			      struct sk_buff *skb)
+			      struct sk_buff *skb, struct net_device *dev)
 {
 	const struct iphdr *iph = ip_hdr(skb);
 	int (*edemux)(struct sk_buff *skb);
-	struct net_device *dev = skb->dev;
 	struct rtable *rt;
 	int err;
 
@@ -400,6 +399,7 @@
 
 static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	struct net_device *dev = skb->dev;
 	int ret;
 
 	/* if ingress device is enslaved to an L3 master device pass the
@@ -409,7 +409,7 @@
 	if (!skb)
 		return NET_RX_SUCCESS;
 
-	ret = ip_rcv_finish_core(net, sk, skb);
+	ret = ip_rcv_finish_core(net, sk, skb, dev);
 	if (ret != NET_RX_DROP)
 		ret = dst_input(skb);
 	return ret;
@@ -549,6 +549,7 @@
 
 	INIT_LIST_HEAD(&sublist);
 	list_for_each_entry_safe(skb, next, head, list) {
+		struct net_device *dev = skb->dev;
 		struct dst_entry *dst;
 
 		skb_list_del_init(skb);
@@ -558,7 +559,7 @@
 		skb = l3mdev_ip_rcv(skb);
 		if (!skb)
 			continue;
-		if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
+		if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
 			continue;
 
 		dst = skb_dst(skb);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index ed194d4..32a3504 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -251,8 +251,9 @@
  * If opt == NULL, then skb->data should point to IP header.
  */
 
-int ip_options_compile(struct net *net,
-		       struct ip_options *opt, struct sk_buff *skb)
+int __ip_options_compile(struct net *net,
+			 struct ip_options *opt, struct sk_buff *skb,
+			 __be32 *info)
 {
 	__be32 spec_dst = htonl(INADDR_ANY);
 	unsigned char *pp_ptr = NULL;
@@ -468,11 +469,22 @@
 		return 0;
 
 error:
-	if (skb) {
-		icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
-	}
+	if (info)
+		*info = htonl((pp_ptr-iph)<<24);
 	return -EINVAL;
 }
+
+int ip_options_compile(struct net *net,
+		       struct ip_options *opt, struct sk_buff *skb)
+{
+	int ret;
+	__be32 info;
+
+	ret = __ip_options_compile(net, opt, skb, &info);
+	if (ret != 0 && skb)
+		icmp_send(skb, ICMP_PARAMETERPROB, 0, info);
+	return ret;
+}
 EXPORT_SYMBOL(ip_options_compile);
 
 /*
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 7f56944..40a7cd5 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -74,6 +74,33 @@
 	return 0;
 }
 
+static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
+		     int encap_type)
+{
+	struct ip_tunnel *tunnel;
+	const struct iphdr *iph = ip_hdr(skb);
+	struct net *net = dev_net(skb->dev);
+	struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+
+	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+				  iph->saddr, iph->daddr, 0);
+	if (tunnel) {
+		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+			goto drop;
+
+		XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
+
+		skb->dev = tunnel->dev;
+
+		return xfrm_input(skb, nexthdr, spi, encap_type);
+	}
+
+	return -EINVAL;
+drop:
+	kfree_skb(skb);
+	return 0;
+}
+
 static int vti_rcv(struct sk_buff *skb)
 {
 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
@@ -82,6 +109,14 @@
 	return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
 }
 
+static int vti_rcv_ipip(struct sk_buff *skb)
+{
+	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+
+	return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
+}
+
 static int vti_rcv_cb(struct sk_buff *skb, int err)
 {
 	unsigned short family;
@@ -435,6 +470,12 @@
 	.priority	=	100,
 };
 
+static struct xfrm_tunnel ipip_handler __read_mostly = {
+	.handler	=	vti_rcv_ipip,
+	.err_handler	=	vti4_err,
+	.priority	=	0,
+};
+
 static int __net_init vti_init_net(struct net *net)
 {
 	int err;
@@ -603,6 +644,13 @@
 	if (err < 0)
 		goto xfrm_proto_comp_failed;
 
+	msg = "ipip tunnel";
+	err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
+	if (err < 0) {
+		pr_info("%s: cant't register tunnel\n",__func__);
+		goto xfrm_tunnel_failed;
+	}
+
 	msg = "netlink interface";
 	err = rtnl_link_register(&vti_link_ops);
 	if (err < 0)
@@ -612,6 +660,8 @@
 
 rtnl_link_failed:
 	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+xfrm_tunnel_failed:
+	xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
 xfrm_proto_comp_failed:
 	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
 xfrm_proto_ah_failed:
diff --git a/net/ipv4/netlink.c b/net/ipv4/netlink.c
index f86bb4f..d8e3a1f 100644
--- a/net/ipv4/netlink.c
+++ b/net/ipv4/netlink.c
@@ -3,9 +3,10 @@
 #include <linux/types.h>
 #include <net/net_namespace.h>
 #include <net/netlink.h>
+#include <linux/in6.h>
 #include <net/ip.h>
 
-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
 				struct netlink_ext_ack *extack)
 {
 	*ip_proto = nla_get_u8(attr);
@@ -13,11 +14,19 @@
 	switch (*ip_proto) {
 	case IPPROTO_TCP:
 	case IPPROTO_UDP:
-	case IPPROTO_ICMP:
 		return 0;
-	default:
-		NL_SET_ERR_MSG(extack, "Unsupported ip proto");
-		return -EOPNOTSUPP;
+	case IPPROTO_ICMP:
+		if (family != AF_INET)
+			break;
+		return 0;
+#if IS_ENABLED(CONFIG_IPV6)
+	case IPPROTO_ICMPV6:
+		if (family != AF_INET6)
+			break;
+		return 0;
+#endif
 	}
+	NL_SET_ERR_MSG(extack, "Unsupported ip proto");
+	return -EOPNOTSUPP;
 }
 EXPORT_SYMBOL_GPL(rtm_getroute_parse_ip_proto);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 436b46c..7a556e4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1308,6 +1308,10 @@
 		if (fnhe->fnhe_daddr == daddr) {
 			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
 				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
+			/* set fnhe_daddr to 0 to ensure it won't bind with
+			 * new dsts in rt_bind_exception().
+			 */
+			fnhe->fnhe_daddr = 0;
 			fnhe_flush_routes(fnhe);
 			kfree_rcu(fnhe, rcu);
 			break;
@@ -2155,12 +2159,13 @@
 		int our = 0;
 		int err = -EINVAL;
 
-		if (in_dev)
-			our = ip_check_mc_rcu(in_dev, daddr, saddr,
-					      ip_hdr(skb)->protocol);
+		if (!in_dev)
+			return err;
+		our = ip_check_mc_rcu(in_dev, daddr, saddr,
+				      ip_hdr(skb)->protocol);
 
 		/* check l3 master if no match yet */
-		if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
+		if (!our && netif_is_l3_slave(dev)) {
 			struct in_device *l3_in_dev;
 
 			l3_in_dev = __in_dev_get_rcu(skb->dev);
@@ -2814,7 +2819,7 @@
 
 	if (tb[RTA_IP_PROTO]) {
 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
-						  &ip_proto, extack);
+						  &ip_proto, AF_INET, extack);
 		if (err)
 			return err;
 	}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index c3387df..f66b2e6 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -216,7 +216,12 @@
 		refcount_set(&req->rsk_refcnt, 1);
 		tcp_sk(child)->tsoffset = tsoff;
 		sock_rps_save_rxhash(child, skb);
-		inet_csk_reqsk_queue_add(sk, req, child);
+		if (!inet_csk_reqsk_queue_add(sk, req, child)) {
+			bh_unlock_sock(child);
+			sock_put(child);
+			child = NULL;
+			reqsk_put(req);
+		}
 	} else {
 		reqsk_free(req);
 	}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d561464..ca38aca 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1901,6 +1901,11 @@
 		inq = tp->rcv_nxt - tp->copied_seq;
 		release_sock(sk);
 	}
+	/* After receiving a FIN, tell the user-space to continue reading
+	 * by returning a non-zero inq.
+	 */
+	if (inq == 0 && sock_flag(sk, SOCK_DONE))
+		inq = 1;
 	return inq;
 }
 
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0e9fbdf..16f2c84 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6493,7 +6493,13 @@
 		af_ops->send_synack(fastopen_sk, dst, &fl, req,
 				    &foc, TCP_SYNACK_FASTOPEN);
 		/* Add the child socket directly into the accept queue */
-		inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
+		if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
+			reqsk_fastopen_remove(fastopen_sk, req, false);
+			bh_unlock_sock(fastopen_sk);
+			sock_put(fastopen_sk);
+			reqsk_put(req);
+			goto drop;
+		}
 		sk->sk_data_ready(sk);
 		bh_unlock_sock(fastopen_sk);
 		sock_put(fastopen_sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5f880b0..ce66c23 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1646,15 +1646,8 @@
 int tcp_filter(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcphdr *th = (struct tcphdr *)skb->data;
-	unsigned int eaten = skb->len;
-	int err;
 
-	err = sk_filter_trim_cap(sk, skb, th->doff * 4);
-	if (!err) {
-		eaten -= skb->len;
-		TCP_SKB_CB(skb)->end_seq -= eaten;
-	}
-	return err;
+	return sk_filter_trim_cap(sk, skb, th->doff * 4);
 }
 EXPORT_SYMBOL(tcp_filter);
 
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 10aafea..35e7092 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1954,10 +1954,10 @@
 
 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-			IPSTATS_MIB_OUTFORWDATAGRAMS);
-	__IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
-			IPSTATS_MIB_OUTOCTETS, skb->len);
+	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+		      IPSTATS_MIB_OUTFORWDATAGRAMS);
+	IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
+		      IPSTATS_MIB_OUTOCTETS, skb->len);
 	return dst_output(net, sk, skb);
 }
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7b832c3..509a49f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1282,18 +1282,29 @@
 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
 				 struct rt6_exception *rt6_ex)
 {
+	struct fib6_info *from;
 	struct net *net;
 
 	if (!bucket || !rt6_ex)
 		return;
 
 	net = dev_net(rt6_ex->rt6i->dst.dev);
+	net->ipv6.rt6_stats->fib_rt_cache--;
+
+	/* purge completely the exception to allow releasing the held resources:
+	 * some [sk] cache may keep the dst around for unlimited time
+	 */
+	from = rcu_dereference_protected(rt6_ex->rt6i->from,
+					 lockdep_is_held(&rt6_exception_lock));
+	rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
+	fib6_info_release(from);
+	dst_dev_put(&rt6_ex->rt6i->dst);
+
 	hlist_del_rcu(&rt6_ex->hlist);
 	dst_release(&rt6_ex->rt6i->dst);
 	kfree_rcu(rt6_ex, rcu);
 	WARN_ON_ONCE(!bucket->depth);
 	bucket->depth--;
-	net->ipv6.rt6_stats->fib_rt_cache--;
 }
 
 /* Remove oldest rt6_ex in bucket and free the memory
@@ -1612,15 +1623,15 @@
 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
 {
 	struct rt6_exception_bucket *bucket;
-	struct fib6_info *from = rt->from;
 	struct in6_addr *src_key = NULL;
 	struct rt6_exception *rt6_ex;
-
-	if (!from ||
-	    !(rt->rt6i_flags & RTF_CACHE))
-		return;
+	struct fib6_info *from;
 
 	rcu_read_lock();
+	from = rcu_dereference(rt->from);
+	if (!from || !(rt->rt6i_flags & RTF_CACHE))
+		goto unlock;
+
 	bucket = rcu_dereference(from->rt6i_exception_bucket);
 
 #ifdef CONFIG_IPV6_SUBTREES
@@ -1639,6 +1650,7 @@
 	if (rt6_ex)
 		rt6_ex->stamp = jiffies;
 
+unlock:
 	rcu_read_unlock();
 }
 
@@ -2796,20 +2808,24 @@
 	u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
 	u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
+	struct fib6_info *from;
 	struct rt6_info *grt;
 	int err;
 
 	err = 0;
 	grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
 	if (grt) {
+		rcu_read_lock();
+		from = rcu_dereference(grt->from);
 		if (!grt->dst.error &&
 		    /* ignore match if it is the default route */
-		    grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) &&
+		    from && !ipv6_addr_any(&from->fib6_dst.addr) &&
 		    (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
 			NL_SET_ERR_MSG(extack,
 				       "Nexthop has invalid gateway or device mismatch");
 			err = -EINVAL;
 		}
+		rcu_read_unlock();
 
 		ip6_rt_put(grt);
 	}
@@ -4189,6 +4205,10 @@
 		cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
 		cfg->fc_flags |= RTF_GATEWAY;
 	}
+	if (tb[RTA_VIA]) {
+		NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
+		goto errout;
+	}
 
 	if (tb[RTA_DST]) {
 		int plen = (rtm->rtm_dst_len + 7) >> 3;
@@ -4682,7 +4702,7 @@
 		table = rt->fib6_table->tb6_id;
 	else
 		table = RT6_TABLE_UNSPEC;
-	rtm->rtm_table = table;
+	rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
 	if (nla_put_u32(skb, RTA_TABLE, table))
 		goto nla_put_failure;
 
@@ -4883,7 +4903,8 @@
 
 	if (tb[RTA_IP_PROTO]) {
 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
-						  &fl6.flowi6_proto, extack);
+						  &fl6.flowi6_proto, AF_INET6,
+						  extack);
 		if (err)
 			goto errout;
 	}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index da6d5a3..de9aa5c 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -778,8 +778,9 @@
 		pbw0 = tunnel->ip6rd.prefixlen >> 5;
 		pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
 
-		d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
-		    tunnel->ip6rd.relay_prefixlen;
+		d = tunnel->ip6rd.relay_prefixlen < 32 ?
+			(ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
+		    tunnel->ip6rd.relay_prefixlen : 0;
 
 		pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
 		if (pbi1 > 0)
@@ -1873,6 +1874,7 @@
 
 err_reg_dev:
 	ipip6_dev_free(sitn->fb_tunnel_dev);
+	free_netdev(sitn->fb_tunnel_dev);
 err_alloc_dev:
 	return err;
 }
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 0ae6899..37a69df 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -674,9 +674,6 @@
 	if (flags & MSG_OOB)
 		goto out;
 
-	if (addr_len)
-		*addr_len = sizeof(*lsa);
-
 	if (flags & MSG_ERRQUEUE)
 		return ipv6_recv_error(sk, msg, len, addr_len);
 
@@ -706,6 +703,7 @@
 		lsa->l2tp_conn_id = 0;
 		if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
 			lsa->l2tp_scope_id = inet6_iif(skb);
+		*addr_len = sizeof(*lsa);
 	}
 
 	if (np->rxopt.all)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c2abe9d..40c5102 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1478,6 +1478,10 @@
 	if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
 		sta->sta.tdls = true;
 
+	if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
+	    !sdata->u.mgd.associated)
+		return -EINVAL;
+
 	err = sta_apply_parameters(local, sta, params);
 	if (err) {
 		sta_info_free(local, sta);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 828348b..e946ee4 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -221,7 +221,7 @@
 		struct ieee80211_hdr_3addr hdr;
 		u8 category;
 		u8 action_code;
-	} __packed action;
+	} __packed __aligned(2) action;
 
 	if (!sdata)
 		return;
@@ -2678,7 +2678,9 @@
 	skb_set_queue_mapping(skb, q);
 
 	if (!--mesh_hdr->ttl) {
-		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
+		if (!is_multicast_ether_addr(hdr->addr1))
+			IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
+						     dropped_frames_ttl);
 		goto out;
 	}
 
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index c7ccd7b..743cde6 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3614,10 +3614,10 @@
 		/* We need a bit of data queued to build aggregates properly, so
 		 * instruct the TCP stack to allow more than a single ms of data
 		 * to be queued in the stack. The value is a bit-shift of 1
-		 * second, so 8 is ~4ms of queued data. Only affects local TCP
+		 * second, so 7 is ~8ms of queued data. Only affects local TCP
 		 * sockets.
 		 */
-		sk_pacing_shift_update(skb->sk, 8);
+		sk_pacing_shift_update(skb->sk, 7);
 
 		fast_tx = rcu_dereference(sta->fast_tx);
 
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 8fbe6cd..d5a4db5 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1822,6 +1822,9 @@
 				goto errout;
 			break;
 		}
+		case RTA_GATEWAY:
+			NL_SET_ERR_MSG(extack, "MPLS does not support RTA_GATEWAY attribute");
+			goto errout;
 		case RTA_VIA:
 		{
 			if (nla_get_via(nla, &cfg->rc_via_alen,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 518364f..55a7731 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2221,6 +2221,18 @@
 		  u->udp_timeout);
 
 #ifdef CONFIG_IP_VS_PROTO_TCP
+	if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
+	    u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
+		return -EINVAL;
+	}
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_UDP
+	if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
+		return -EINVAL;
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_TCP
 	if (u->tcp_timeout) {
 		pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
 		pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 277d02a..895171a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1007,6 +1007,22 @@
 		}
 
 		if (nf_ct_key_equal(h, tuple, zone, net)) {
+			/* Tuple is taken already, so caller will need to find
+			 * a new source port to use.
+			 *
+			 * Only exception:
+			 * If the *original tuples* are identical, then both
+			 * conntracks refer to the same flow.
+			 * This is a rare situation, it can occur e.g. when
+			 * more than one UDP packet is sent from same socket
+			 * in different threads.
+			 *
+			 * Let nf_ct_resolve_clash() deal with this later.
+			 */
+			if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+					      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
+				continue;
+
 			NF_CT_STAT_INC_ATOMIC(net, found);
 			rcu_read_unlock();
 			return 1;
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index ea7c670..ee3e5b6 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -903,7 +903,8 @@
 		    (state == 0 && (byte & bitmask) == 0))
 			return bit_spot;
 
-		bit_spot++;
+		if (++bit_spot >= bitmap_len)
+			return -1;
 		bitmask >>= 1;
 		if (bitmask == 0) {
 			byte = bitmap[++byte_offset];
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index 6a196e4..d1fc019e 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -419,6 +419,10 @@
 						      sock->service_name,
 						      sock->service_name_len,
 						      &service_name_tlv_length);
+		if (!service_name_tlv) {
+			err = -ENOMEM;
+			goto error_tlv;
+		}
 		size += service_name_tlv_length;
 	}
 
@@ -429,9 +433,17 @@
 
 	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
 				      &miux_tlv_length);
+	if (!miux_tlv) {
+		err = -ENOMEM;
+		goto error_tlv;
+	}
 	size += miux_tlv_length;
 
 	rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+	if (!rw_tlv) {
+		err = -ENOMEM;
+		goto error_tlv;
+	}
 	size += rw_tlv_length;
 
 	pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
@@ -484,9 +496,17 @@
 
 	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
 				      &miux_tlv_length);
+	if (!miux_tlv) {
+		err = -ENOMEM;
+		goto error_tlv;
+	}
 	size += miux_tlv_length;
 
 	rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+	if (!rw_tlv) {
+		err = -ENOMEM;
+		goto error_tlv;
+	}
 	size += rw_tlv_length;
 
 	skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index ef4026a..4fa0152 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -532,10 +532,10 @@
 
 static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
 {
-	u8 *gb_cur, *version_tlv, version, version_length;
-	u8 *lto_tlv, lto_length;
-	u8 *wks_tlv, wks_length;
-	u8 *miux_tlv, miux_length;
+	u8 *gb_cur, version, version_length;
+	u8 lto_length, wks_length, miux_length;
+	u8 *version_tlv = NULL, *lto_tlv = NULL,
+	   *wks_tlv = NULL, *miux_tlv = NULL;
 	__be16 wks = cpu_to_be16(local->local_wks);
 	u8 gb_len = 0;
 	int ret = 0;
@@ -543,17 +543,33 @@
 	version = LLCP_VERSION_11;
 	version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
 					 1, &version_length);
+	if (!version_tlv) {
+		ret = -ENOMEM;
+		goto out;
+	}
 	gb_len += version_length;
 
 	lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, &lto_length);
+	if (!lto_tlv) {
+		ret = -ENOMEM;
+		goto out;
+	}
 	gb_len += lto_length;
 
 	pr_debug("Local wks 0x%lx\n", local->local_wks);
 	wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length);
+	if (!wks_tlv) {
+		ret = -ENOMEM;
+		goto out;
+	}
 	gb_len += wks_length;
 
 	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
 				      &miux_length);
+	if (!miux_tlv) {
+		ret = -ENOMEM;
+		goto out;
+	}
 	gb_len += miux_length;
 
 	gb_len += ARRAY_SIZE(llcp_magic);
diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig
index ee794d5..8b9d3c2 100644
--- a/net/qrtr/Kconfig
+++ b/net/qrtr/Kconfig
@@ -47,4 +47,12 @@
 	  transport provides bulk endpoints to facilitate sending and receiving
 	  IPC Router data packets.
 
+config QRTR_FIFO
+	tristate "FIFO IPC Router channels"
+	help
+	Say Y here to support FIFO based ipcrouter channels. FIFO Transport
+	Layer enables IPC Router communication between two virtual machines.
+	The shared memory between virtual machines will be allocated by the
+	hypervisor and signal other VMs through virtualized interrupts.
+
 endif # QRTR
diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile
index d3c3a19..cae5493 100644
--- a/net/qrtr/Makefile
+++ b/net/qrtr/Makefile
@@ -9,3 +9,6 @@
 
 obj-$(CONFIG_QRTR_USB) += qrtr-usb.o
 qrtr-usb-y      := usb.o
+
+obj-$(CONFIG_QRTR_FIFO) += qrtr-fifo.o
+qrtr-fifo-y	:= fifo.o
diff --git a/net/qrtr/fifo.c b/net/qrtr/fifo.c
new file mode 100644
index 0000000..0a494a6
--- /dev/null
+++ b/net/qrtr/fifo.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <microvisor/microvisor.h>
+
+#include "qrtr.h"
+
+#define FIFO_MAGIC_KEY	0x24495043 /* "$IPC" */
+#define FIFO_SIZE	0x4000
+#define FIFO_0_START	0x1000
+#define FIFO_1_START	(FIFO_0_START + FIFO_SIZE)
+#define FIFO_MAGIC_IDX	0x0
+#define TAIL_0_IDX	0x1
+#define HEAD_0_IDX	0x2
+#define TAIL_1_IDX	0x3
+#define HEAD_1_IDX	0x4
+
+struct fifo_pipe {
+	__le32 *tail;
+	__le32 *head;
+
+	void *fifo;
+	size_t length;
+};
+
+/**
+ * qrtr_fifo_xprt - qrtr FIFO transport structure
+ * @ep: qrtr endpoint specific info.
+ * @tx_pipe: TX FIFO specific info.
+ * @rx_pipe: RX FIFO specific info.
+ * @fifo_base: Base of the shared FIFO.
+ * @fifo_size: FIFO Size.
+ * @tx_fifo_idx: TX FIFO index.
+ * @kcap: Register info to raise irq to other VM.
+ */
+struct qrtr_fifo_xprt {
+	struct qrtr_endpoint ep;
+	struct fifo_pipe tx_pipe;
+	struct fifo_pipe rx_pipe;
+	void *fifo_base;
+	size_t fifo_size;
+	int tx_fifo_idx;
+	okl4_kcap_t kcap;
+};
+
+static void qrtr_fifo_raise_virq(struct qrtr_fifo_xprt *xprtp);
+
+static size_t fifo_rx_avail(struct fifo_pipe *pipe)
+{
+	u32 head;
+	u32 tail;
+
+	head = le32_to_cpu(*pipe->head);
+	tail = le32_to_cpu(*pipe->tail);
+
+	if (head < tail)
+		return pipe->length - tail + head;
+
+	return head - tail;
+}
+
+static void fifo_rx_peak(struct fifo_pipe *pipe,
+			 void *data, unsigned int offset, size_t count)
+{
+	size_t len;
+	u32 tail;
+
+	tail = le32_to_cpu(*pipe->tail);
+	tail += offset;
+	if (tail >= pipe->length)
+		tail -= pipe->length;
+
+	len = min_t(size_t, count, pipe->length - tail);
+	if (len)
+		memcpy_fromio(data, pipe->fifo + tail, len);
+
+	if (len != count)
+		memcpy_fromio(data + len, pipe->fifo, (count - len));
+}
+
+static void fifo_rx_advance(struct fifo_pipe *pipe, size_t count)
+{
+	u32 tail;
+
+	tail = le32_to_cpu(*pipe->tail);
+
+	tail += count;
+	if (tail > pipe->length)
+		tail -= pipe->length;
+
+	*pipe->tail = cpu_to_le32(tail);
+}
+
+static size_t fifo_tx_avail(struct fifo_pipe *pipe)
+{
+	u32 head;
+	u32 tail;
+	u32 avail;
+
+	head = le32_to_cpu(*pipe->head);
+	tail = le32_to_cpu(*pipe->tail);
+
+	if (tail <= head)
+		avail = pipe->length - head + tail;
+	else
+		avail = tail - head;
+
+	return avail;
+}
+
+static void fifo_tx_write(struct fifo_pipe *pipe,
+			  const void *data, size_t count)
+{
+	size_t len;
+	u32 head;
+
+	head = le32_to_cpu(*pipe->head);
+
+	len = min_t(size_t, count, pipe->length - head);
+	if (len)
+		memcpy_toio(pipe->fifo + head, data, len);
+
+	if (len != count)
+		memcpy_toio(pipe->fifo, data + len, count - len);
+
+	head += count;
+	if (head >= pipe->length)
+		head -= pipe->length;
+
+	/* Ensure ordering of fifo and head update */
+	wmb();
+
+	*pipe->head = cpu_to_le32(head);
+}
+
+/* from qrtr to FIFO */
+static int xprt_write(struct qrtr_endpoint *ep, struct sk_buff *skb)
+{
+	struct qrtr_fifo_xprt *xprtp;
+	int rc;
+
+	xprtp = container_of(ep, struct qrtr_fifo_xprt, ep);
+
+	rc = skb_linearize(skb);
+	if (rc) {
+		kfree_skb(skb);
+		return rc;
+	}
+
+	if (fifo_tx_avail(&xprtp->tx_pipe) < skb->len) {
+		pr_err("No Space in FIFO\n");
+		return -EAGAIN;
+	}
+
+	fifo_tx_write(&xprtp->tx_pipe, skb->data, skb->len);
+	kfree_skb(skb);
+
+	qrtr_fifo_raise_virq(xprtp);
+
+	return 0;
+}
+
+static void xprt_read_data(struct qrtr_fifo_xprt *xprtp)
+{
+	int rc;
+	u32 hdr[8];
+	void *data;
+	size_t pkt_len;
+	size_t rx_avail;
+	size_t hdr_len = sizeof(hdr);
+
+	while (fifo_rx_avail(&xprtp->rx_pipe)) {
+		fifo_rx_peak(&xprtp->rx_pipe, &hdr, 0, hdr_len);
+		pkt_len = qrtr_peek_pkt_size((void *)&hdr);
+		if ((int)pkt_len < 0) {
+			pr_err("invalid pkt_len %zu\n", pkt_len);
+			break;
+		}
+
+		data = kzalloc(pkt_len, GFP_ATOMIC);
+		if (!data)
+			break;
+
+		rx_avail = fifo_rx_avail(&xprtp->rx_pipe);
+		if (rx_avail < pkt_len) {
+			pr_err_ratelimited("Not FULL pkt in FIFO %zu %zu\n",
+					   rx_avail, pkt_len);
+			break;
+		}
+
+		fifo_rx_peak(&xprtp->rx_pipe, data, 0, pkt_len);
+		fifo_rx_advance(&xprtp->rx_pipe, pkt_len);
+
+		rc = qrtr_endpoint_post(&xprtp->ep, data, pkt_len);
+		if (rc == -EINVAL)
+			pr_err("invalid ipcrouter packet\n");
+		kfree(data);
+		data = NULL;
+	}
+}
+
+static void qrtr_fifo_raise_virq(struct qrtr_fifo_xprt *xprtp)
+{
+	okl4_error_t err;
+	unsigned long payload = 0xffff;
+
+	err = _okl4_sys_vinterrupt_raise(xprtp->kcap, payload);
+}
+
+static irqreturn_t qrtr_fifo_virq_handler(int irq, void *dev_id)
+{
+	xprt_read_data((struct qrtr_fifo_xprt *)dev_id);
+	return IRQ_HANDLED;
+}
+
+/**
+ * qrtr_fifo_config_init() - init FIFO xprt configs
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the FIFO XPRT pointer with
+ * the FIFO XPRT configurations either from device tree or static arrays.
+ */
+static void qrtr_fifo_config_init(struct qrtr_fifo_xprt *xprtp)
+{
+	__le32 *descs;
+
+	descs = xprtp->fifo_base;
+	descs[FIFO_MAGIC_IDX] = FIFO_MAGIC_KEY;
+
+	if (xprtp->tx_fifo_idx) {
+		xprtp->tx_pipe.tail = &descs[TAIL_0_IDX];
+		xprtp->tx_pipe.head = &descs[HEAD_0_IDX];
+		xprtp->tx_pipe.fifo = xprtp->fifo_base + FIFO_0_START;
+		xprtp->tx_pipe.length = FIFO_SIZE;
+
+		xprtp->rx_pipe.tail = &descs[TAIL_1_IDX];
+		xprtp->rx_pipe.head = &descs[HEAD_1_IDX];
+		xprtp->rx_pipe.fifo = xprtp->fifo_base + FIFO_1_START;
+		xprtp->rx_pipe.length = FIFO_SIZE;
+	} else {
+		xprtp->tx_pipe.tail = &descs[TAIL_1_IDX];
+		xprtp->tx_pipe.head = &descs[HEAD_1_IDX];
+		xprtp->tx_pipe.fifo = xprtp->fifo_base + FIFO_1_START;
+		xprtp->tx_pipe.length = FIFO_SIZE;
+
+		xprtp->rx_pipe.tail = &descs[TAIL_0_IDX];
+		xprtp->rx_pipe.head = &descs[HEAD_0_IDX];
+		xprtp->rx_pipe.fifo = xprtp->fifo_base + FIFO_0_START;
+		xprtp->rx_pipe.length = FIFO_SIZE;
+	}
+
+	/* Reset respective index */
+	*xprtp->tx_pipe.head = 0;
+	*xprtp->rx_pipe.tail = 0;
+}
+
+/**
+ * qrtr_fifo_xprt_probe() - Probe an FIFO xprt
+ *
+ * @pdev: Platform device corresponding to FIFO xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an FIFO transport.
+ */
+static int qrtr_fifo_xprt_probe(struct platform_device *pdev)
+{
+	int irq;
+	int ret;
+	struct resource *r;
+	struct device *parent;
+	struct qrtr_fifo_xprt *xprtp;
+	struct device_node *ipc_irq_np;
+	struct device_node *ipc_shm_np;
+	struct platform_device *ipc_shm_dev;
+
+	xprtp = devm_kzalloc(&pdev->dev, sizeof(*xprtp), GFP_KERNEL);
+	if (!xprtp)
+		return -ENOMEM;
+
+	parent = &pdev->dev;
+	ipc_irq_np = parent->of_node;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return -ENODEV;
+
+	ret = devm_request_irq(parent, irq, qrtr_fifo_virq_handler,
+			       IRQF_TRIGGER_RISING, dev_name(parent),
+			       xprtp);
+	if (ret < 0)
+		return -ENODEV;
+
+	/* this kcap is required to raise VIRQ */
+	ret = of_property_read_u32(ipc_irq_np, "reg", &xprtp->kcap);
+	if (ret < 0)
+		return -ENODEV;
+
+	ipc_shm_np = of_parse_phandle(ipc_irq_np, "qcom,ipc-shm", 0);
+	if (!ipc_shm_np)
+		return -ENODEV;
+
+	ipc_shm_dev = of_find_device_by_node(ipc_shm_np);
+	if (!ipc_shm_dev) {
+		of_node_put(ipc_shm_np);
+		return -ENODEV;
+	}
+
+	r = platform_get_resource(ipc_shm_dev, IORESOURCE_MEM, 0);
+	if (!r) {
+		pr_err("failed to get shared FIFO\n");
+		of_node_put(ipc_shm_np);
+		return -ENODEV;
+	}
+
+	xprtp->tx_fifo_idx = of_property_read_bool(ipc_shm_np,
+						   "qcom,tx-is-first");
+	of_node_put(ipc_shm_np);
+
+	xprtp->fifo_size = resource_size(r);
+	xprtp->fifo_base = devm_ioremap_nocache(&pdev->dev, r->start,
+						resource_size(r));
+	if (!xprtp->fifo_base) {
+		pr_err("ioreamp_nocache() failed\n");
+		return -ENOMEM;
+	}
+	qrtr_fifo_config_init(xprtp);
+
+	xprtp->ep.xmit = xprt_write;
+	ret = qrtr_endpoint_register(&xprtp->ep, QRTR_EP_NID_AUTO);
+	if (ret)
+		return ret;
+
+	if (fifo_rx_avail(&xprtp->rx_pipe))
+		xprt_read_data(xprtp);
+
+	return 0;
+}
+
+static const struct of_device_id qrtr_fifo_xprt_match_table[] = {
+	{ .compatible = "qcom,ipcr-fifo-xprt" },
+	{},
+};
+
+static struct platform_driver qrtr_fifo_xprt_driver = {
+	.probe = qrtr_fifo_xprt_probe,
+	.driver = {
+		.name = "qcom_fifo_qrtr",
+		.of_match_table = qrtr_fifo_xprt_match_table,
+	 },
+};
+
+static int __init qrtr_fifo_xprt_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&qrtr_fifo_xprt_driver);
+	if (rc) {
+		pr_err("driver register failed %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+module_init(qrtr_fifo_xprt_init);
+MODULE_DESCRIPTION("QTI IPC-router FIFO XPRT");
+MODULE_LICENSE("GPL v2");
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 6c32eb9..5c3d455 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -530,14 +530,12 @@
 	hdr->type = cpu_to_le32(type);
 	hdr->src_node_id = cpu_to_le32(from->sq_node);
 	hdr->src_port_id = cpu_to_le32(from->sq_port);
-	if (to->sq_port == QRTR_PORT_CTRL) {
+	if (to->sq_node == QRTR_NODE_BCAST)
 		hdr->dst_node_id = cpu_to_le32(node->nid);
-		hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
-	} else {
+	else
 		hdr->dst_node_id = cpu_to_le32(to->sq_node);
-		hdr->dst_port_id = cpu_to_le32(to->sq_port);
-	}
 
+	hdr->dst_port_id = cpu_to_le32(to->sq_port);
 	hdr->size = cpu_to_le32(len);
 	hdr->confirm_rx = !!confirm_rx;
 
@@ -780,49 +778,44 @@
 static struct qrtr_sock *qrtr_port_lookup(int port);
 static void qrtr_port_put(struct qrtr_sock *ipc);
 
-static bool qrtr_must_forward(u32 src_nid, u32 dst_nid, u32 type)
+static bool qrtr_must_forward(struct qrtr_node *src,
+			      struct qrtr_node *dst, u32 type)
 {
-	struct qrtr_node *dst;
-	struct qrtr_node *src;
-	bool ret = false;
-
-	if (src_nid == qrtr_local_nid)
+	/* Node structure is not maintained for local processor.
+	 * Hence src is null in that case.
+	 */
+	if (!src)
 		return true;
 
-	if (type == QRTR_TYPE_HELLO || type == QRTR_TYPE_RESUME_TX)
-		return ret;
+	if (!dst)
+		return false;
 
-	dst = qrtr_node_lookup(dst_nid);
-	src = qrtr_node_lookup(src_nid);
-	if (!dst || !src)
-		goto out;
-	if (dst == src)
-		goto out;
-	if (dst->nid == QRTR_EP_NID_AUTO)
-		goto out;
+	if (type == QRTR_TYPE_HELLO || type == QRTR_TYPE_RESUME_TX)
+		return false;
+
+	if (dst == src || dst->nid == QRTR_EP_NID_AUTO)
+		return false;
 
 	if (abs(dst->net_id - src->net_id) > 1)
-		ret = true;
+		return true;
 
-out:
-	qrtr_node_release(dst);
-	qrtr_node_release(src);
-
-	return ret;
+	return false;
 }
 
 static void qrtr_fwd_ctrl_pkt(struct sk_buff *skb)
 {
 	struct qrtr_node *node;
+	struct qrtr_node *src;
 	struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
 
+	src = qrtr_node_lookup(cb->src_node);
 	down_read(&qrtr_node_lock);
 	list_for_each_entry(node, &qrtr_all_epts, item) {
 		struct sockaddr_qrtr from;
 		struct sockaddr_qrtr to;
 		struct sk_buff *skbn;
 
-		if (!qrtr_must_forward(cb->src_node, node->nid, cb->type))
+		if (!qrtr_must_forward(src, node, cb->type))
 			continue;
 
 		skbn = skb_clone(skb, GFP_KERNEL);
@@ -840,6 +833,7 @@
 		qrtr_node_enqueue(node, skbn, cb->type, &from, &to, 0);
 	}
 	up_read(&qrtr_node_lock);
+	qrtr_node_release(src);
 }
 
 static void qrtr_fwd_pkt(struct sk_buff *skb, struct qrtr_cb *cb)
@@ -992,7 +986,7 @@
 	struct sk_buff *skb;
 
 	list_for_each_entry(dst, &qrtr_all_epts, item) {
-		if (!qrtr_must_forward(nid, dst->nid, QRTR_TYPE_DEL_PROC))
+		if (!qrtr_must_forward(src, dst, QRTR_TYPE_DEL_PROC))
 			continue;
 
 		skb = qrtr_alloc_ctrl_packet(&pkt);
@@ -1199,7 +1193,8 @@
 
 		sock_hold(&ipc->sk);
 		ipc->sk.sk_err = ENETRESET;
-		ipc->sk.sk_error_report(&ipc->sk);
+		if (ipc->sk.sk_error_report)
+			ipc->sk.sk_error_report(&ipc->sk);
 		sock_put(&ipc->sk);
 	}
 }
@@ -1300,7 +1295,8 @@
 	if (sk && sk->sk_err == ENETRESET) {
 		sock_hold(sk);
 		sk->sk_err = ENETRESET;
-		sk->sk_error_report(sk);
+		if (sk->sk_error_report)
+			sk->sk_error_report(sk);
 		sock_put(sk);
 		kfree_skb(skb);
 		return 0;
@@ -1355,6 +1351,7 @@
 	struct sock *sk = sock->sk;
 	struct qrtr_ctrl_pkt *pkt;
 	struct qrtr_node *node;
+	struct qrtr_node *srv_node;
 	struct sk_buff *skb;
 	size_t plen;
 	u32 type = QRTR_TYPE_DATA;
@@ -1392,6 +1389,7 @@
 	}
 
 	node = NULL;
+	srv_node = NULL;
 	if (addr->sq_node == QRTR_NODE_BCAST) {
 		enqueue_fn = qrtr_bcast_enqueue;
 		if (addr->sq_port != QRTR_PORT_CTRL) {
@@ -1445,11 +1443,14 @@
 
 		/* drop new server cmds that are not forwardable to dst node*/
 		pkt = (struct qrtr_ctrl_pkt *)skb->data;
-		if (!qrtr_must_forward(pkt->server.node, addr->sq_node, type)) {
+		srv_node = qrtr_node_lookup(pkt->server.node);
+		if (!qrtr_must_forward(srv_node, node, type)) {
 			rc = 0;
 			kfree_skb(skb);
+			qrtr_node_release(srv_node);
 			goto out_node;
 		}
+		qrtr_node_release(srv_node);
 	}
 
 	rc = enqueue_fn(node, skb, type, &ipc->us, addr, msg->msg_flags);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 521189f..6e419b1 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -353,7 +353,7 @@
 	 * normally have to take channel_lock but we do this before anyone else
 	 * can see the connection.
 	 */
-	list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
+	list_add(&call->chan_wait_link, &candidate->waiting_calls);
 
 	if (cp->exclusive) {
 		call->conn = candidate;
@@ -432,7 +432,7 @@
 	call->conn = conn;
 	call->security_ix = conn->security_ix;
 	call->service_id = conn->service_id;
-	list_add(&call->chan_wait_link, &conn->waiting_calls);
+	list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
 	spin_unlock(&conn->channel_lock);
 	_leave(" = 0 [extant %d]", conn->debug_id);
 	return 0;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8525de81..334f3a05 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -199,8 +199,7 @@
 err2:
 	kfree(tname);
 err1:
-	if (ret == ACT_P_CREATED)
-		tcf_idr_release(*a, bind);
+	tcf_idr_release(*a, bind);
 	return err;
 }
 
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 73e44ce..86d90fc 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -191,8 +191,7 @@
 
 	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
 	if (unlikely(!params_new)) {
-		if (ret == ACT_P_CREATED)
-			tcf_idr_release(*a, bind);
+		tcf_idr_release(*a, bind);
 		return -ENOMEM;
 	}
 
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 0f6601f..72d9c43 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -377,7 +377,8 @@
 	return ret;
 
 release_tun_meta:
-	dst_release(&metadata->dst);
+	if (metadata)
+		dst_release(&metadata->dst);
 
 err_out:
 	if (exists)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 84893bc..09b3597 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1213,6 +1213,24 @@
 	if (err < 0)
 		goto errout;
 
+	if (tb[TCA_FLOWER_FLAGS]) {
+		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
+
+		if (!tc_flags_valid(fnew->flags)) {
+			err = -EINVAL;
+			goto errout;
+		}
+	}
+
+	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
+			   tp->chain->tmplt_priv, extack);
+	if (err)
+		goto errout;
+
+	err = fl_check_assign_mask(head, fnew, fold, mask);
+	if (err)
+		goto errout;
+
 	if (!handle) {
 		handle = 1;
 		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
@@ -1223,37 +1241,19 @@
 				    handle, GFP_KERNEL);
 	}
 	if (err)
-		goto errout;
+		goto errout_mask;
 	fnew->handle = handle;
 
-	if (tb[TCA_FLOWER_FLAGS]) {
-		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
-
-		if (!tc_flags_valid(fnew->flags)) {
-			err = -EINVAL;
-			goto errout_idr;
-		}
-	}
-
-	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
-			   tp->chain->tmplt_priv, extack);
-	if (err)
-		goto errout_idr;
-
-	err = fl_check_assign_mask(head, fnew, fold, mask);
-	if (err)
-		goto errout_idr;
-
 	if (!tc_skip_sw(fnew->flags)) {
 		if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
 			err = -EEXIST;
-			goto errout_mask;
+			goto errout_idr;
 		}
 
 		err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
 					     fnew->mask->filter_ht_params);
 		if (err)
-			goto errout_mask;
+			goto errout_idr;
 	}
 
 	if (!tc_skip_hw(fnew->flags)) {
@@ -1290,12 +1290,13 @@
 	kfree(mask);
 	return 0;
 
-errout_mask:
-	fl_mask_put(head, fnew->mask, false);
-
 errout_idr:
 	if (!fold)
 		idr_remove(&head->handle_idr, fnew->handle);
+
+errout_mask:
+	fl_mask_put(head, fnew->mask, false);
+
 errout:
 	tcf_exts_destroy(&fnew->exts);
 	kfree(fnew);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 69078c8..77b289d 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -68,7 +68,7 @@
 			skb = __skb_dequeue(&q->skb_bad_txq);
 			if (qdisc_is_percpu_stats(q)) {
 				qdisc_qstats_cpu_backlog_dec(q, skb);
-				qdisc_qstats_cpu_qlen_dec(q);
+				qdisc_qstats_atomic_qlen_dec(q);
 			} else {
 				qdisc_qstats_backlog_dec(q, skb);
 				q->q.qlen--;
@@ -108,7 +108,7 @@
 
 	if (qdisc_is_percpu_stats(q)) {
 		qdisc_qstats_cpu_backlog_inc(q, skb);
-		qdisc_qstats_cpu_qlen_inc(q);
+		qdisc_qstats_atomic_qlen_inc(q);
 	} else {
 		qdisc_qstats_backlog_inc(q, skb);
 		q->q.qlen++;
@@ -147,7 +147,7 @@
 
 		qdisc_qstats_cpu_requeues_inc(q);
 		qdisc_qstats_cpu_backlog_inc(q, skb);
-		qdisc_qstats_cpu_qlen_inc(q);
+		qdisc_qstats_atomic_qlen_inc(q);
 
 		skb = next;
 	}
@@ -252,7 +252,7 @@
 			skb = __skb_dequeue(&q->gso_skb);
 			if (qdisc_is_percpu_stats(q)) {
 				qdisc_qstats_cpu_backlog_dec(q, skb);
-				qdisc_qstats_cpu_qlen_dec(q);
+				qdisc_qstats_atomic_qlen_dec(q);
 			} else {
 				qdisc_qstats_backlog_dec(q, skb);
 				q->q.qlen--;
@@ -633,7 +633,7 @@
 	if (unlikely(err))
 		return qdisc_drop_cpu(skb, qdisc, to_free);
 
-	qdisc_qstats_cpu_qlen_inc(qdisc);
+	qdisc_qstats_atomic_qlen_inc(qdisc);
 	/* Note: skb can not be used after skb_array_produce(),
 	 * so we better not use qdisc_qstats_cpu_backlog_inc()
 	 */
@@ -658,7 +658,7 @@
 	if (likely(skb)) {
 		qdisc_qstats_cpu_backlog_dec(qdisc, skb);
 		qdisc_bstats_cpu_update(qdisc, skb);
-		qdisc_qstats_cpu_qlen_dec(qdisc);
+		qdisc_qstats_atomic_qlen_dec(qdisc);
 	}
 
 	return skb;
@@ -702,7 +702,6 @@
 		struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
 
 		q->backlog = 0;
-		q->qlen = 0;
 	}
 }
 
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 74c0f65..4dfe10b9 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -440,6 +440,7 @@
 	int nb = 0;
 	int count = 1;
 	int rc = NET_XMIT_SUCCESS;
+	int rc_drop = NET_XMIT_DROP;
 
 	/* Do not fool qdisc_drop_all() */
 	skb->prev = NULL;
@@ -479,6 +480,7 @@
 		q->duplicate = 0;
 		rootq->enqueue(skb2, rootq, to_free);
 		q->duplicate = dupsave;
+		rc_drop = NET_XMIT_SUCCESS;
 	}
 
 	/*
@@ -491,7 +493,7 @@
 		if (skb_is_gso(skb)) {
 			segs = netem_segment(skb, sch, to_free);
 			if (!segs)
-				return NET_XMIT_DROP;
+				return rc_drop;
 		} else {
 			segs = skb;
 		}
@@ -514,8 +516,10 @@
 			1<<(prandom_u32() % 8);
 	}
 
-	if (unlikely(sch->q.qlen >= sch->limit))
-		return qdisc_drop_all(skb, sch, to_free);
+	if (unlikely(sch->q.qlen >= sch->limit)) {
+		qdisc_drop_all(skb, sch, to_free);
+		return rc_drop;
+	}
 
 	qdisc_qstats_backlog_inc(sch, skb);
 
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index e5e70cf..1b16250 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1884,6 +1884,7 @@
 
 		pr_debug("%s: aborting association:%p\n", __func__, asoc);
 		sctp_primitive_ABORT(net, asoc, chunk);
+		iov_iter_revert(&msg->msg_iter, msg_len);
 
 		return 0;
 	}
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 2936ed1..3b47457 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -230,8 +230,6 @@
 	for (i = 0; i < stream->outcnt; i++)
 		SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
 
-	sched->init(stream);
-
 in:
 	sctp_stream_interleave_init(stream);
 	if (!incnt)
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 5721416..adbdf19 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -113,9 +113,9 @@
 } __aligned(8);
 
 enum smc_urg_state {
-	SMC_URG_VALID,			/* data present */
-	SMC_URG_NOTYET,			/* data pending */
-	SMC_URG_READ			/* data was already read */
+	SMC_URG_VALID	= 1,			/* data present */
+	SMC_URG_NOTYET	= 2,			/* data pending */
+	SMC_URG_READ	= 3,			/* data was already read */
 };
 
 struct smc_connection {
diff --git a/net/socket.c b/net/socket.c
index 7d2703f..7a0ddf8 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -587,6 +587,7 @@
 		if (inode)
 			inode_lock(inode);
 		sock->ops->release(sock);
+		sock->sk = NULL;
 		if (inode)
 			inode_unlock(inode);
 		sock->ops = NULL;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e1bdaf0..88c307e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -377,11 +377,13 @@
 
 #define tipc_wait_for_cond(sock_, timeo_, condition_)			       \
 ({                                                                             \
+	DEFINE_WAIT_FUNC(wait_, woken_wake_function);                          \
 	struct sock *sk_;						       \
 	int rc_;							       \
 									       \
 	while ((rc_ = !(condition_))) {					       \
-		DEFINE_WAIT_FUNC(wait_, woken_wake_function);	               \
+		/* coupled with smp_wmb() in tipc_sk_proto_rcv() */            \
+		smp_rmb();                                                     \
 		sk_ = (sock_)->sk;					       \
 		rc_ = tipc_sk_sock_err((sock_), timeo_);		       \
 		if (rc_)						       \
@@ -1318,7 +1320,7 @@
 
 	if (unlikely(!dest)) {
 		dest = &tsk->peer;
-		if (!syn || dest->family != AF_TIPC)
+		if (!syn && dest->family != AF_TIPC)
 			return -EDESTADDRREQ;
 	}
 
@@ -1961,6 +1963,8 @@
 		return;
 	case SOCK_WAKEUP:
 		tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
+		/* coupled with smp_rmb() in tipc_wait_for_cond() */
+		smp_wmb();
 		tsk->cong_link_cnt--;
 		wakeup = true;
 		break;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c754f3a..f601933 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -888,7 +888,7 @@
 	addr->hash ^= sk->sk_type;
 
 	__unix_remove_socket(sk);
-	u->addr = addr;
+	smp_store_release(&u->addr, addr);
 	__unix_insert_socket(&unix_socket_table[addr->hash], sk);
 	spin_unlock(&unix_table_lock);
 	err = 0;
@@ -1058,7 +1058,7 @@
 
 	err = 0;
 	__unix_remove_socket(sk);
-	u->addr = addr;
+	smp_store_release(&u->addr, addr);
 	__unix_insert_socket(list, sk);
 
 out_unlock:
@@ -1329,15 +1329,29 @@
 	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
 	otheru = unix_sk(other);
 
-	/* copy address information from listening to new sock*/
-	if (otheru->addr) {
-		refcount_inc(&otheru->addr->refcnt);
-		newu->addr = otheru->addr;
-	}
+	/* copy address information from listening to new sock
+	 *
+	 * The contents of *(otheru->addr) and otheru->path
+	 * are seen fully set up here, since we have found
+	 * otheru in hash under unix_table_lock.  Insertion
+	 * into the hash chain we'd found it in had been done
+	 * in an earlier critical area protected by unix_table_lock,
+	 * the same one where we'd set *(otheru->addr) contents,
+	 * as well as otheru->path and otheru->addr itself.
+	 *
+	 * Using smp_store_release() here to set newu->addr
+	 * is enough to make those stores, as well as stores
+	 * to newu->path visible to anyone who gets newu->addr
+	 * by smp_load_acquire().  IOW, the same warranties
+	 * as for unix_sock instances bound in unix_bind() or
+	 * in unix_autobind().
+	 */
 	if (otheru->path.dentry) {
 		path_get(&otheru->path);
 		newu->path = otheru->path;
 	}
+	refcount_inc(&otheru->addr->refcnt);
+	smp_store_release(&newu->addr, otheru->addr);
 
 	/* Set credentials */
 	copy_peercred(sk, other);
@@ -1451,7 +1465,7 @@
 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
 {
 	struct sock *sk = sock->sk;
-	struct unix_sock *u;
+	struct unix_address *addr;
 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
 	int err = 0;
 
@@ -1466,19 +1480,15 @@
 		sock_hold(sk);
 	}
 
-	u = unix_sk(sk);
-	unix_state_lock(sk);
-	if (!u->addr) {
+	addr = smp_load_acquire(&unix_sk(sk)->addr);
+	if (!addr) {
 		sunaddr->sun_family = AF_UNIX;
 		sunaddr->sun_path[0] = 0;
 		err = sizeof(short);
 	} else {
-		struct unix_address *addr = u->addr;
-
 		err = addr->len;
 		memcpy(sunaddr, addr->name, addr->len);
 	}
-	unix_state_unlock(sk);
 	sock_put(sk);
 out:
 	return err;
@@ -2071,11 +2081,11 @@
 
 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
 {
-	struct unix_sock *u = unix_sk(sk);
+	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
 
-	if (u->addr) {
-		msg->msg_namelen = u->addr->len;
-		memcpy(msg->msg_name, u->addr->name, u->addr->len);
+	if (addr) {
+		msg->msg_namelen = addr->len;
+		memcpy(msg->msg_name, addr->name, addr->len);
 	}
 }
 
@@ -2579,15 +2589,14 @@
 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
-	unix_state_lock(sk);
-	path = unix_sk(sk)->path;
-	if (!path.dentry) {
-		unix_state_unlock(sk);
+	if (!smp_load_acquire(&unix_sk(sk)->addr))
 		return -ENOENT;
-	}
+
+	path = unix_sk(sk)->path;
+	if (!path.dentry)
+		return -ENOENT;
 
 	path_get(&path);
-	unix_state_unlock(sk);
 
 	fd = get_unused_fd_flags(O_CLOEXEC);
 	if (fd < 0)
@@ -2828,7 +2837,7 @@
 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
 			sock_i_ino(s));
 
-		if (u->addr) {
+		if (u->addr) {	// under unix_table_lock here
 			int i, len;
 			seq_putc(seq, ' ');
 
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 384c84e..3183d9b 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -10,7 +10,8 @@
 
 static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
 {
-	struct unix_address *addr = unix_sk(sk)->addr;
+	/* might or might not have unix_table_lock */
+	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
 
 	if (!addr)
 		return 0;
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 5d3cce9..15eb5d3 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -75,6 +75,9 @@
 {
 	struct virtio_vsock *vsock = virtio_vsock_get();
 
+	if (!vsock)
+		return VMADDR_CID_ANY;
+
 	return vsock->guest_cid;
 }
 
@@ -584,10 +587,6 @@
 
 	virtio_vsock_update_guest_cid(vsock);
 
-	ret = vsock_core_init(&virtio_transport.transport);
-	if (ret < 0)
-		goto out_vqs;
-
 	vsock->rx_buf_nr = 0;
 	vsock->rx_buf_max_nr = 0;
 	atomic_set(&vsock->queued_replies, 0);
@@ -618,8 +617,6 @@
 	mutex_unlock(&the_virtio_vsock_mutex);
 	return 0;
 
-out_vqs:
-	vsock->vdev->config->del_vqs(vsock->vdev);
 out:
 	kfree(vsock);
 	mutex_unlock(&the_virtio_vsock_mutex);
@@ -637,6 +634,9 @@
 	flush_work(&vsock->event_work);
 	flush_work(&vsock->send_pkt_work);
 
+	/* Reset all connected sockets when the device disappear */
+	vsock_for_each_connected_socket(virtio_vsock_reset_sock);
+
 	vdev->config->reset(vdev);
 
 	mutex_lock(&vsock->rx_lock);
@@ -669,7 +669,6 @@
 
 	mutex_lock(&the_virtio_vsock_mutex);
 	the_virtio_vsock = NULL;
-	vsock_core_exit();
 	mutex_unlock(&the_virtio_vsock_mutex);
 
 	vdev->config->del_vqs(vdev);
@@ -702,14 +701,28 @@
 	virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
 	if (!virtio_vsock_workqueue)
 		return -ENOMEM;
+
 	ret = register_virtio_driver(&virtio_vsock_driver);
 	if (ret)
-		destroy_workqueue(virtio_vsock_workqueue);
+		goto out_wq;
+
+	ret = vsock_core_init(&virtio_transport.transport);
+	if (ret)
+		goto out_vdr;
+
+	return 0;
+
+out_vdr:
+	unregister_virtio_driver(&virtio_vsock_driver);
+out_wq:
+	destroy_workqueue(virtio_vsock_workqueue);
 	return ret;
+
 }
 
 static void __exit virtio_vsock_exit(void)
 {
+	vsock_core_exit();
 	unregister_virtio_driver(&virtio_vsock_driver);
 	destroy_workqueue(virtio_vsock_workqueue);
 }
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index d5d0f31..1c2d500 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1291,7 +1291,7 @@
  * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"),
  * however it is safe for now to assume that a frequency rule should not be
  * part of a frequency's band if the start freq or end freq are off by more
- * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the
+ * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the
  * 60 GHz band.
  * This resolution can be lowered and should be considered as we add
  * regulatory rule support for other "bands".
@@ -1306,7 +1306,7 @@
 	 * with the Channel starting frequency above 45 GHz.
 	 */
 	u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
-			10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
+			20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
 	if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
 		return true;
 	if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index fef473c..f7f53f9 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -679,8 +679,7 @@
 	struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
 	int len, i, rc = 0;
 
-	if (!sock_flag(sk, SOCK_ZAPPED) ||
-	    addr_len != sizeof(struct sockaddr_x25) ||
+	if (addr_len != sizeof(struct sockaddr_x25) ||
 	    addr->sx25_family != AF_X25) {
 		rc = -EINVAL;
 		goto out;
@@ -695,9 +694,13 @@
 	}
 
 	lock_sock(sk);
-	x25_sk(sk)->source_addr = addr->sx25_addr;
-	x25_insert_socket(sk);
-	sock_reset_flag(sk, SOCK_ZAPPED);
+	if (sock_flag(sk, SOCK_ZAPPED)) {
+		x25_sk(sk)->source_addr = addr->sx25_addr;
+		x25_insert_socket(sk);
+		sock_reset_flag(sk, SOCK_ZAPPED);
+	} else {
+		rc = -EINVAL;
+	}
 	release_sock(sk);
 	SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
 out:
@@ -813,8 +816,13 @@
 	sock->state = SS_CONNECTED;
 	rc = 0;
 out_put_neigh:
-	if (rc)
+	if (rc) {
+		read_lock_bh(&x25_list_lock);
 		x25_neigh_put(x25->neighbour);
+		x25->neighbour = NULL;
+		read_unlock_bh(&x25_list_lock);
+		x25->state = X25_STATE_0;
+	}
 out_put_route:
 	x25_route_put(rt);
 out:
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 08c88de..11975ec 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -1444,7 +1444,10 @@
 			new = aa_label_merge(label, target, GFP_KERNEL);
 		if (IS_ERR_OR_NULL(new)) {
 			info = "failed to build target label";
-			error = PTR_ERR(new);
+			if (!new)
+				error = -ENOMEM;
+			else
+				error = PTR_ERR(new);
 			new = NULL;
 			perms.allow = 0;
 			goto audit;
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index f840010..33028c0 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -321,6 +321,7 @@
 		if (a->u.net->sk) {
 			struct sock *sk = a->u.net->sk;
 			struct unix_sock *u;
+			struct unix_address *addr;
 			int len = 0;
 			char *p = NULL;
 
@@ -351,14 +352,15 @@
 #endif
 			case AF_UNIX:
 				u = unix_sk(sk);
+				addr = smp_load_acquire(&u->addr);
+				if (!addr)
+					break;
 				if (u->path.dentry) {
 					audit_log_d_path(ab, " path=", &u->path);
 					break;
 				}
-				if (!u->addr)
-					break;
-				len = u->addr->len-sizeof(short);
-				p = &u->addr->name->sun_path[0];
+				len = addr->len-sizeof(short);
+				p = &addr->name->sun_path[0];
 				audit_log_format(ab, " path=");
 				if (*p)
 					audit_log_untrustedstring(ab, p);
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 5fb078a..009e469 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -532,7 +532,8 @@
 {
 	/* first let's check the buffer parameter's */
 	if (params->buffer.fragment_size == 0 ||
-	    params->buffer.fragments > U32_MAX / params->buffer.fragment_size)
+	    params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
+	    params->buffer.fragments == 0)
 		return -EINVAL;
 
 	/* now codec parameters */
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index de4af8a..5636e89 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -474,7 +474,19 @@
 	/* Focusrite, SaffirePro 26 I/O */
 	SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec),
 	/* Focusrite, SaffirePro 10 I/O */
-	SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000006, &saffirepro_10_spec),
+	{
+		// The combination of vendor_id and model_id is the same as the
+		// same as the one of Liquid Saffire 56.
+		.match_flags	= IEEE1394_MATCH_VENDOR_ID |
+				  IEEE1394_MATCH_MODEL_ID |
+				  IEEE1394_MATCH_SPECIFIER_ID |
+				  IEEE1394_MATCH_VERSION,
+		.vendor_id	= VEN_FOCUSRITE,
+		.model_id	= 0x000006,
+		.specifier_id	= 0x00a02d,
+		.version	= 0x010001,
+		.driver_data	= (kernel_ulong_t)&saffirepro_10_spec,
+	},
 	/* Focusrite, Saffire(no label and LE) */
 	SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, MODEL_FOCUSRITE_SAFFIRE_BOTH,
 			    &saffire_spec),
diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
index f0555a2..6c9b743 100644
--- a/sound/firewire/motu/amdtp-motu.c
+++ b/sound/firewire/motu/amdtp-motu.c
@@ -136,7 +136,9 @@
 		byte = (u8 *)buffer + p->pcm_byte_offset;
 
 		for (c = 0; c < channels; ++c) {
-			*dst = (byte[0] << 24) | (byte[1] << 16) | byte[2];
+			*dst = (byte[0] << 24) |
+			       (byte[1] << 16) |
+			       (byte[2] << 8);
 			byte += 3;
 			dst++;
 		}
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 617ff1a..27eb027 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -144,9 +144,9 @@
 		return -ENODEV;
 	if (!acomp->ops) {
 		request_module("i915");
-		/* 10s timeout */
+		/* 60s timeout */
 		wait_for_completion_timeout(&bind_complete,
-					    msecs_to_jiffies(10 * 1000));
+					    msecs_to_jiffies(60 * 1000));
 	}
 	if (!acomp->ops) {
 		dev_info(bus->dev, "couldn't bind with audio component\n");
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index fead0ac..3cbd211 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -936,6 +936,9 @@
 	SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
 	SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bf1ffca..8772931 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -118,6 +118,7 @@
 	unsigned int has_alc5505_dsp:1;
 	unsigned int no_depop_delay:1;
 	unsigned int done_hp_init:1;
+	unsigned int no_shutup_pins:1;
 
 	/* for PLL fix */
 	hda_nid_t pll_nid;
@@ -476,6 +477,14 @@
 		set_eapd(codec, *p, on);
 }
 
+static void alc_shutup_pins(struct hda_codec *codec)
+{
+	struct alc_spec *spec = codec->spec;
+
+	if (!spec->no_shutup_pins)
+		snd_hda_shutup_pins(codec);
+}
+
 /* generic shutup callback;
  * just turning off EAPD and a little pause for avoiding pop-noise
  */
@@ -486,7 +495,7 @@
 	alc_auto_setup_eapd(codec, false);
 	if (!spec->no_depop_delay)
 		msleep(200);
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
 }
 
 /* generic EAPD initialization */
@@ -814,7 +823,7 @@
 	if (spec && spec->shutup)
 		spec->shutup(codec);
 	else
-		snd_hda_shutup_pins(codec);
+		alc_shutup_pins(codec);
 }
 
 static void alc_reboot_notify(struct hda_codec *codec)
@@ -2950,7 +2959,7 @@
 			(alc_get_coef0(codec) & 0x00ff) == 0x018) {
 		msleep(150);
 	}
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
 }
 
 static struct coef_fw alc282_coefs[] = {
@@ -3053,14 +3062,15 @@
 	if (hp_pin_sense)
 		msleep(85);
 
-	snd_hda_codec_write(codec, hp_pin, 0,
-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+	if (!spec->no_shutup_pins)
+		snd_hda_codec_write(codec, hp_pin, 0,
+				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
 
 	if (hp_pin_sense)
 		msleep(100);
 
 	alc_auto_setup_eapd(codec, false);
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
 	alc_write_coef_idx(codec, 0x78, coef78);
 }
 
@@ -3166,15 +3176,16 @@
 	if (hp_pin_sense)
 		msleep(100);
 
-	snd_hda_codec_write(codec, hp_pin, 0,
-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+	if (!spec->no_shutup_pins)
+		snd_hda_codec_write(codec, hp_pin, 0,
+				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
 
 	alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
 
 	if (hp_pin_sense)
 		msleep(100);
 	alc_auto_setup_eapd(codec, false);
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
 	alc_write_coef_idx(codec, 0x43, 0x9614);
 }
 
@@ -3240,14 +3251,15 @@
 	/* NOTE: call this before clearing the pin, otherwise codec stalls */
 	alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
 
-	snd_hda_codec_write(codec, hp_pin, 0,
-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+	if (!spec->no_shutup_pins)
+		snd_hda_codec_write(codec, hp_pin, 0,
+				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
 
 	if (hp_pin_sense)
 		msleep(100);
 
 	alc_auto_setup_eapd(codec, false);
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
 }
 
 static void alc225_init(struct hda_codec *codec)
@@ -3334,7 +3346,7 @@
 		msleep(100);
 
 	alc_auto_setup_eapd(codec, false);
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
 }
 
 static void alc_default_init(struct hda_codec *codec)
@@ -3388,14 +3400,15 @@
 	if (hp_pin_sense)
 		msleep(85);
 
-	snd_hda_codec_write(codec, hp_pin, 0,
-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+	if (!spec->no_shutup_pins)
+		snd_hda_codec_write(codec, hp_pin, 0,
+				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
 
 	if (hp_pin_sense)
 		msleep(100);
 
 	alc_auto_setup_eapd(codec, false);
-	snd_hda_shutup_pins(codec);
+	alc_shutup_pins(codec);
 }
 
 static void alc294_hp_init(struct hda_codec *codec)
@@ -3412,8 +3425,9 @@
 
 	msleep(100);
 
-	snd_hda_codec_write(codec, hp_pin, 0,
-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+	if (!spec->no_shutup_pins)
+		snd_hda_codec_write(codec, hp_pin, 0,
+				    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
 
 	alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
 	alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
@@ -5007,16 +5021,12 @@
 	}
 }
 
-static void alc_no_shutup(struct hda_codec *codec)
-{
-}
-
 static void alc_fixup_no_shutup(struct hda_codec *codec,
 				const struct hda_fixup *fix, int action)
 {
 	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
 		struct alc_spec *spec = codec->spec;
-		spec->shutup = alc_no_shutup;
+		spec->no_shutup_pins = 1;
 	}
 }
 
@@ -5602,6 +5612,7 @@
 	ALC294_FIXUP_ASUS_SPK,
 	ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
 	ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
+	ALC255_FIXUP_ACER_HEADSET_MIC,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6546,6 +6557,16 @@
 		.chained = true,
 		.chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
 	},
+	[ALC255_FIXUP_ACER_HEADSET_MIC] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x19, 0x03a11130 },
+			{ 0x1a, 0x90a60140 }, /* use as internal mic */
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC
+	},
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6565,6 +6586,7 @@
 	SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
 	SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
 	SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -6596,6 +6618,7 @@
 	SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
 	SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
 	SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+	SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
 	SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
 	SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
 	SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
@@ -6670,11 +6693,13 @@
 	SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
 	SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
 	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+	SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
 	SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
 	SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
-	SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
-	SND_PCI_QUIRK(0x103c, 0x82c0, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -6690,7 +6715,6 @@
 	SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
-	SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
@@ -7306,6 +7330,10 @@
 	SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
 		{0x12, 0x90a60130},
 		{0x17, 0x90170110},
+		{0x21, 0x03211020}),
+	SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
+		{0x12, 0x90a60130},
+		{0x17, 0x90170110},
 		{0x21, 0x04211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC295_STANDARD_PINS,
diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c
index d88e673..18a931c 100644
--- a/sound/soc/codecs/rt274.c
+++ b/sound/soc/codecs/rt274.c
@@ -1126,8 +1126,11 @@
 		return ret;
 	}
 
-	regmap_read(rt274->regmap,
+	ret = regmap_read(rt274->regmap,
 		RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
+	if (ret)
+		return ret;
+
 	if (val != RT274_VENDOR_ID) {
 		dev_err(&i2c->dev,
 			"Device with ID register %#x is not rt274\n", val);
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index 8068140..cdd659f 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -849,18 +849,18 @@
 #define RT5682_SCLK_SRC_PLL2			(0x2 << 13)
 #define RT5682_SCLK_SRC_SDW			(0x3 << 13)
 #define RT5682_SCLK_SRC_RCCLK			(0x4 << 13)
-#define RT5682_PLL1_SRC_MASK			(0x3 << 10)
-#define RT5682_PLL1_SRC_SFT			10
-#define RT5682_PLL1_SRC_MCLK			(0x0 << 10)
-#define RT5682_PLL1_SRC_BCLK1			(0x1 << 10)
-#define RT5682_PLL1_SRC_SDW			(0x2 << 10)
-#define RT5682_PLL1_SRC_RC			(0x3 << 10)
-#define RT5682_PLL2_SRC_MASK			(0x3 << 8)
-#define RT5682_PLL2_SRC_SFT			8
-#define RT5682_PLL2_SRC_MCLK			(0x0 << 8)
-#define RT5682_PLL2_SRC_BCLK1			(0x1 << 8)
-#define RT5682_PLL2_SRC_SDW			(0x2 << 8)
-#define RT5682_PLL2_SRC_RC			(0x3 << 8)
+#define RT5682_PLL2_SRC_MASK			(0x3 << 10)
+#define RT5682_PLL2_SRC_SFT			10
+#define RT5682_PLL2_SRC_MCLK			(0x0 << 10)
+#define RT5682_PLL2_SRC_BCLK1			(0x1 << 10)
+#define RT5682_PLL2_SRC_SDW			(0x2 << 10)
+#define RT5682_PLL2_SRC_RC			(0x3 << 10)
+#define RT5682_PLL1_SRC_MASK			(0x3 << 8)
+#define RT5682_PLL1_SRC_SFT			8
+#define RT5682_PLL1_SRC_MCLK			(0x0 << 8)
+#define RT5682_PLL1_SRC_BCLK1			(0x1 << 8)
+#define RT5682_PLL1_SRC_SDW			(0x2 << 8)
+#define RT5682_PLL1_SRC_RC			(0x3 << 8)
 
 
 
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index 392d5ee..99e07b0 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -86,49 +86,49 @@
 	if (!buf)
 		return -ENOMEM;
 
-	ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
+	ret = scnprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
 		       pdcr, ptcr);
 
 	if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR)
-		ret += snprintf(buf + ret, PAGE_SIZE - ret,
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 				"TxFS output from %s, ",
 				audmux_port_string((ptcr >> 27) & 0x7));
 	else
-		ret += snprintf(buf + ret, PAGE_SIZE - ret,
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 				"TxFS input, ");
 
 	if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR)
-		ret += snprintf(buf + ret, PAGE_SIZE - ret,
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 				"TxClk output from %s",
 				audmux_port_string((ptcr >> 22) & 0x7));
 	else
-		ret += snprintf(buf + ret, PAGE_SIZE - ret,
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 				"TxClk input");
 
-	ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
 
 	if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) {
-		ret += snprintf(buf + ret, PAGE_SIZE - ret,
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 				"Port is symmetric");
 	} else {
 		if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR)
-			ret += snprintf(buf + ret, PAGE_SIZE - ret,
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 					"RxFS output from %s, ",
 					audmux_port_string((ptcr >> 17) & 0x7));
 		else
-			ret += snprintf(buf + ret, PAGE_SIZE - ret,
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 					"RxFS input, ");
 
 		if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR)
-			ret += snprintf(buf + ret, PAGE_SIZE - ret,
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 					"RxClk output from %s",
 					audmux_port_string((ptcr >> 12) & 0x7));
 		else
-			ret += snprintf(buf + ret, PAGE_SIZE - ret,
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 					"RxClk input");
 	}
 
-	ret += snprintf(buf + ret, PAGE_SIZE - ret,
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 			"\nData received from %s\n",
 			audmux_port_string((pdcr >> 13) & 0x7));
 
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index 7b0ee67..78ec97b 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -192,7 +192,7 @@
 		.stream_name = "Loopback",
 		.cpu_dai_name = "Loopback Pin",
 		.platform_name = "haswell-pcm-audio",
-		.dynamic = 0,
+		.dynamic = 1,
 		.codec_name = "snd-soc-dummy",
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index eab1f43..a402298 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -146,7 +146,7 @@
 		.stream_name = "Loopback",
 		.cpu_dai_name = "Loopback Pin",
 		.platform_name = "haswell-pcm-audio",
-		.dynamic = 0,
+		.dynamic = 1,
 		.codec_name = "snd-soc-dummy",
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index f8d35c5..252ff3f 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2039,19 +2039,19 @@
 		out = is_connected_output_ep(w, NULL, NULL);
 	}
 
-	ret = snprintf(buf, PAGE_SIZE, "%s: %s%s  in %d out %d",
+	ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s  in %d out %d",
 		       w->name, w->power ? "On" : "Off",
 		       w->force ? " (forced)" : "", in, out);
 
 	if (w->reg >= 0)
-		ret += snprintf(buf + ret, PAGE_SIZE - ret,
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 				" - R%d(0x%x) mask 0x%x",
 				w->reg, w->reg, w->mask << w->shift);
 
-	ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+	ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
 
 	if (w->sname)
-		ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
 				w->sname,
 				w->active ? "active" : "inactive");
 
@@ -2064,7 +2064,7 @@
 			if (!p->connect)
 				continue;
 
-			ret += snprintf(buf + ret, PAGE_SIZE - ret,
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 					" %s  \"%s\" \"%s\"\n",
 					(rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out",
 					p->name ? p->name : "static",
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index b455930..ec73d83 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -370,6 +370,20 @@
 	return argv + i;
 }
 
+/* on per cpu maps we must copy the provided value on all value instances */
+static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
+{
+	unsigned int i, n, step;
+
+	if (!map_is_per_cpu(info->type))
+		return;
+
+	n = get_possible_cpus();
+	step = round_up(info->value_size, 8);
+	for (i = 1; i < n; i++)
+		memcpy(value + i * step, value, info->value_size);
+}
+
 static int parse_elem(char **argv, struct bpf_map_info *info,
 		      void *key, void *value, __u32 key_size, __u32 value_size,
 		      __u32 *flags, __u32 **value_fd)
@@ -449,6 +463,8 @@
 			argv = parse_bytes(argv, "value", value, value_size);
 			if (!argv)
 				return -1;
+
+			fill_per_cpu_value(info, value);
 		}
 
 		return parse_elem(argv, info, key, NULL, key_size, value_size,
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 0de024a..bbba0d6 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -109,13 +109,14 @@
 
 static int prog_fd_by_tag(unsigned char *tag)
 {
-	struct bpf_prog_info info = {};
-	__u32 len = sizeof(info);
 	unsigned int id = 0;
 	int err;
 	int fd;
 
 	while (true) {
+		struct bpf_prog_info info = {};
+		__u32 len = sizeof(info);
+
 		err = bpf_prog_get_next_id(id, &id);
 		if (err) {
 			p_err("%s", strerror(errno));
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 6c1e7ce..53c11fc 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1589,13 +1589,8 @@
 		.force_header = false,
 	};
 	struct perf_evsel *ev2;
-	static bool init;
 	u64 val;
 
-	if (!init) {
-		perf_stat__init_shadow_stats();
-		init = true;
-	}
 	if (!evsel->stats)
 		perf_evlist__alloc_stats(script->session->evlist, false);
 	if (evsel_script(evsel->leader)->gnum++ == 0)
@@ -1658,7 +1653,7 @@
 		return;
 	}
 
-	if (PRINT_FIELD(TRACE)) {
+	if (PRINT_FIELD(TRACE) && sample->raw_data) {
 		event_format__fprintf(evsel->tp_format, sample->cpu,
 				      sample->raw_data, sample->raw_size, fp);
 	}
@@ -2214,6 +2209,8 @@
 
 	signal(SIGINT, sig_handler);
 
+	perf_stat__init_shadow_stats();
+
 	/* override event processing functions */
 	if (script->show_task_events) {
 		script->tool.comm = process_comm_event;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 22ab8e6..3f43aed 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -2263,19 +2263,30 @@
 
 static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
 {
-	struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
+	bool found = false;
+	struct perf_evsel *evsel, *tmp;
+	struct parse_events_error err = { .idx = 0, };
+	int ret = parse_events(evlist, "probe:vfs_getname*", &err);
 
-	if (IS_ERR(evsel))
+	if (ret)
 		return false;
 
-	if (perf_evsel__field(evsel, "pathname") == NULL) {
+	evlist__for_each_entry_safe(evlist, evsel, tmp) {
+		if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
+			continue;
+
+		if (perf_evsel__field(evsel, "pathname")) {
+			evsel->handler = trace__vfs_getname;
+			found = true;
+			continue;
+		}
+
+		list_del_init(&evsel->node);
+		evsel->evlist = NULL;
 		perf_evsel__delete(evsel);
-		return false;
 	}
 
-	evsel->handler = trace__vfs_getname;
-	perf_evlist__add(evlist, evsel);
-	return true;
+	return found;
 }
 
 static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 1ccbd33..383674f 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -134,7 +134,12 @@
 	if (!cpu_list)
 		return cpu_map__read_all_cpu_map();
 
-	if (!isdigit(*cpu_list))
+	/*
+	 * must handle the case of empty cpumap to cover
+	 * TOPOLOGY header for NUMA nodes with no CPU
+	 * ( e.g., because of CPU hotplug)
+	 */
+	if (!isdigit(*cpu_list) && *cpu_list != '\0')
 		goto out;
 
 	while (isdigit(*cpu_list)) {
@@ -181,8 +186,10 @@
 
 	if (nr_cpus > 0)
 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
-	else
+	else if (*cpu_list != '\0')
 		cpus = cpu_map__default_new();
+	else
+		cpus = cpu_map__dummy_new();
 invalid:
 	free(tmp_cpus);
 out:
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 6e70cc0..a701a8a 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -87,6 +87,11 @@
 	return GELF_ST_TYPE(sym->st_info);
 }
 
+static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
+{
+	return GELF_ST_VISIBILITY(sym->st_other);
+}
+
 #ifndef STT_GNU_IFUNC
 #define STT_GNU_IFUNC 10
 #endif
@@ -111,7 +116,9 @@
 	return elf_sym__type(sym) == STT_NOTYPE &&
 		sym->st_name != 0 &&
 		sym->st_shndx != SHN_UNDEF &&
-		sym->st_shndx != SHN_ABS;
+		sym->st_shndx != SHN_ABS &&
+		elf_sym__visibility(sym) != STV_HIDDEN &&
+		elf_sym__visibility(sym) != STV_INTERNAL;
 }
 
 static bool elf_sym__filter(GElf_Sym *sym)
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index 315a44f..84fd6f1 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -13,7 +13,7 @@
 	unsigned int start, end, possible_cpus = 0;
 	char buff[128];
 	FILE *fp;
-	int n;
+	int len, n, i, j = 0;
 
 	fp = fopen(fcpu, "r");
 	if (!fp) {
@@ -21,17 +21,27 @@
 		exit(1);
 	}
 
-	while (fgets(buff, sizeof(buff), fp)) {
-		n = sscanf(buff, "%u-%u", &start, &end);
-		if (n == 0) {
-			printf("Failed to retrieve # possible CPUs!\n");
-			exit(1);
-		} else if (n == 1) {
-			end = start;
-		}
-		possible_cpus = start == 0 ? end + 1 : 0;
-		break;
+	if (!fgets(buff, sizeof(buff), fp)) {
+		printf("Failed to read %s!\n", fcpu);
+		exit(1);
 	}
+
+	len = strlen(buff);
+	for (i = 0; i <= len; i++) {
+		if (buff[i] == ',' || buff[i] == '\0') {
+			buff[i] = '\0';
+			n = sscanf(&buff[j], "%u-%u", &start, &end);
+			if (n <= 0) {
+				printf("Failed to retrieve # possible CPUs!\n");
+				exit(1);
+			} else if (n == 1) {
+				end = start;
+			}
+			possible_cpus += end - start + 1;
+			j = i + 1;
+		}
+	}
+
 	fclose(fp);
 
 	return possible_cpus;
diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
index bab13dd..0d26b5e 100755
--- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
+++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
@@ -37,6 +37,10 @@
 		exit $ksft_skip
 	fi
 
+	present_cpus=`cat $SYSFS/devices/system/cpu/present`
+	present_max=${present_cpus##*-}
+	echo "present_cpus = $present_cpus present_max = $present_max"
+
 	echo -e "\t Cpus in online state: $online_cpus"
 
 	offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
@@ -151,6 +155,8 @@
 online_max=0
 offline_cpus=0
 offline_max=0
+present_cpus=0
+present_max=0
 
 while getopts e:ahp: opt; do
 	case $opt in
@@ -190,9 +196,10 @@
 	online_cpu_expect_success $online_max
 
 	if [[ $offline_cpus -gt 0 ]]; then
-		echo -e "\t offline to online to offline: cpu $offline_max"
-		online_cpu_expect_success $offline_max
-		offline_cpu_expect_success $offline_max
+		echo -e "\t offline to online to offline: cpu $present_max"
+		online_cpu_expect_success $present_max
+		offline_cpu_expect_success $present_max
+		online_cpu $present_max
 	fi
 	exit 0
 else
diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
index 6c5f1b2..1cbb12e 100755
--- a/tools/testing/selftests/firmware/fw_lib.sh
+++ b/tools/testing/selftests/firmware/fw_lib.sh
@@ -91,7 +91,7 @@
 	if [ "$TEST_REQS_FW_SYSFS_FALLBACK" = "yes" ]; then
 		if [ ! "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
 			echo "usermode helper disabled so ignoring test"
-			exit $ksft_skip
+			exit 0
 		fi
 	fi
 }
diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
index f8d468f..aaa1e9f 100644
--- a/tools/testing/selftests/gpio/gpio-mockup-chardev.c
+++ b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
@@ -37,7 +37,7 @@
 	struct libmnt_table *tb;
 	struct libmnt_iter *itr = NULL;
 	struct libmnt_fs *fs;
-	int found = 0;
+	int found = 0, ret;
 
 	cxt = mnt_new_context();
 	if (!cxt)
@@ -58,8 +58,11 @@
 			break;
 		}
 	}
-	if (found)
-		asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
+	if (found) {
+		ret = asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
+		if (ret < 0)
+			err(EXIT_FAILURE, "failed to format string");
+	}
 
 	mnt_free_iter(itr);
 	mnt_free_context(cxt);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 6fd8c08..fb5d2d1 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -590,7 +590,7 @@
 	 * already exist.
 	 */
 	region = (struct userspace_mem_region *) userspace_mem_region_find(
-		vm, guest_paddr, guest_paddr + npages * vm->page_size);
+		vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
 	if (region != NULL)
 		TEST_ASSERT(false, "overlapping userspace_mem_region already "
 			"exists\n"
@@ -606,15 +606,10 @@
 		region = region->next) {
 		if (region->region.slot == slot)
 			break;
-		if ((guest_paddr <= (region->region.guest_phys_addr
-				+ region->region.memory_size))
-			&& ((guest_paddr + npages * vm->page_size)
-				>= region->region.guest_phys_addr))
-			break;
 	}
 	if (region != NULL)
 		TEST_ASSERT(false, "A mem region with the requested slot "
-			"or overlapping physical memory range already exists.\n"
+			"already exists.\n"
 			"  requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
 			"  existing slot: %u paddr: 0x%lx size: 0x%lx",
 			slot, guest_paddr, npages,
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 919aa2a..9a3764a 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -18,6 +18,6 @@
 KSFT_KHDR_INSTALL := 1
 include ../lib.mk
 
-$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
+$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
 $(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
 $(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 47ed6ce..c9ff2b4 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for netfilter selftests
 
-TEST_PROGS := nft_trans_stress.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh
 
 include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
index 1017313..59caa8f 100644
--- a/tools/testing/selftests/netfilter/config
+++ b/tools/testing/selftests/netfilter/config
@@ -1,2 +1,2 @@
 CONFIG_NET_NS=y
-NF_TABLES_INET=y
+CONFIG_NF_TABLES_INET=y
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
new file mode 100755
index 0000000..8ec7668
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -0,0 +1,762 @@
+#!/bin/bash
+#
+# This test is for basic NAT functionality: snat, dnat, redirect, masquerade.
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+	echo "SKIP: Could not run test without nft tool"
+	exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+	echo "SKIP: Could not run test without ip tool"
+	exit $ksft_skip
+fi
+
+ip netns add ns0
+ip netns add ns1
+ip netns add ns2
+
+ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
+ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
+
+ip -net ns0 link set lo up
+ip -net ns0 link set veth0 up
+ip -net ns0 addr add 10.0.1.1/24 dev veth0
+ip -net ns0 addr add dead:1::1/64 dev veth0
+
+ip -net ns0 link set veth1 up
+ip -net ns0 addr add 10.0.2.1/24 dev veth1
+ip -net ns0 addr add dead:2::1/64 dev veth1
+
+for i in 1 2; do
+  ip -net ns$i link set lo up
+  ip -net ns$i link set eth0 up
+  ip -net ns$i addr add 10.0.$i.99/24 dev eth0
+  ip -net ns$i route add default via 10.0.$i.1
+  ip -net ns$i addr add dead:$i::99/64 dev eth0
+  ip -net ns$i route add default via dead:$i::1
+done
+
+bad_counter()
+{
+	local ns=$1
+	local counter=$2
+	local expect=$3
+
+	echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2
+	ip netns exec $ns nft list counter inet filter $counter 1>&2
+}
+
+check_counters()
+{
+	ns=$1
+	local lret=0
+
+	cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84")
+	if [ $? -ne 0 ]; then
+		bad_counter $ns ns0in "packets 1 bytes 84"
+		lret=1
+	fi
+	cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84")
+	if [ $? -ne 0 ]; then
+		bad_counter $ns ns0out "packets 1 bytes 84"
+		lret=1
+	fi
+
+	expect="packets 1 bytes 104"
+	cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect")
+	if [ $? -ne 0 ]; then
+		bad_counter $ns ns0in6 "$expect"
+		lret=1
+	fi
+	cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect")
+	if [ $? -ne 0 ]; then
+		bad_counter $ns ns0out6 "$expect"
+		lret=1
+	fi
+
+	return $lret
+}
+
+check_ns0_counters()
+{
+	local ns=$1
+	local lret=0
+
+	cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
+	if [ $? -ne 0 ]; then
+		bad_counter ns0 ns0in "packets 0 bytes 0"
+		lret=1
+	fi
+
+	cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
+	if [ $? -ne 0 ]; then
+		bad_counter ns0 ns0in6 "packets 0 bytes 0"
+		lret=1
+	fi
+
+	cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
+	if [ $? -ne 0 ]; then
+		bad_counter ns0 ns0out "packets 0 bytes 0"
+		lret=1
+	fi
+	cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
+	if [ $? -ne 0 ]; then
+		bad_counter ns0 ns0out6 "packets 0 bytes 0"
+		lret=1
+	fi
+
+	for dir in "in" "out" ; do
+		expect="packets 1 bytes 84"
+		cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 $ns$dir "$expect"
+			lret=1
+		fi
+
+		expect="packets 1 bytes 104"
+		cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 $ns$dir6 "$expect"
+			lret=1
+		fi
+	done
+
+	return $lret
+}
+
+reset_counters()
+{
+	for i in 0 1 2;do
+		ip netns exec ns$i nft reset counters inet > /dev/null
+	done
+}
+
+test_local_dnat6()
+{
+	local lret=0
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+	chain output {
+		type nat hook output priority 0; policy accept;
+		ip6 daddr dead:1::99 dnat to dead:2::99
+	}
+}
+EOF
+	if [ $? -ne 0 ]; then
+		echo "SKIP: Could not add add ip6 dnat hook"
+		return $ksft_skip
+	fi
+
+	# ping netns1, expect rewrite to netns2
+	ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
+	if [ $? -ne 0 ]; then
+		lret=1
+		echo "ERROR: ping6 failed"
+		return $lret
+	fi
+
+	expect="packets 0 bytes 0"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	expect="packets 1 bytes 104"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 ns2$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# expect 0 count in ns1
+	expect="packets 0 bytes 0"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# expect 1 packet in ns2
+	expect="packets 1 bytes 104"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
+	ip netns exec ns0 nft flush chain ip6 nat output
+
+	return $lret
+}
+
+test_local_dnat()
+{
+	local lret=0
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+	chain output {
+		type nat hook output priority 0; policy accept;
+		ip daddr 10.0.1.99 dnat to 10.0.2.99
+	}
+}
+EOF
+	# ping netns1, expect rewrite to netns2
+	ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+	if [ $? -ne 0 ]; then
+		lret=1
+		echo "ERROR: ping failed"
+		return $lret
+	fi
+
+	expect="packets 0 bytes 0"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 ns2$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# expect 0 count in ns1
+	expect="packets 0 bytes 0"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# expect 1 packet in ns2
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
+
+	ip netns exec ns0 nft flush chain ip nat output
+
+	reset_counters
+	ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+	if [ $? -ne 0 ]; then
+		lret=1
+		echo "ERROR: ping failed"
+		return $lret
+	fi
+
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+	expect="packets 0 bytes 0"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 ns2$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# expect 1 count in ns1
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns0 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# expect 0 packet in ns2
+	expect="packets 0 bytes 0"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns2$dir "$expect"
+			lret=1
+		fi
+	done
+
+	test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
+
+	return $lret
+}
+
+
+test_masquerade6()
+{
+	local lret=0
+
+	ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+
+	ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannot ping ns1 from ns2 via ipv6"
+		return 1
+		lret=1
+	fi
+
+	expect="packets 1 bytes 104"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns2$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	reset_counters
+
+# add masquerading rule
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+	chain postrouting {
+		type nat hook postrouting priority 0; policy accept;
+		meta oif veth0 masquerade
+	}
+}
+EOF
+	ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
+		lret=1
+	fi
+
+	# ns1 should have seen packets from ns0, due to masquerade
+	expect="packets 1 bytes 104"
+	for dir in "in6" "out6" ; do
+
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# ns1 should not have seen packets from ns2, due to masquerade
+	expect="packets 0 bytes 0"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	ip netns exec ns0 nft flush chain ip6 nat postrouting
+	if [ $? -ne 0 ]; then
+		echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
+		lret=1
+	fi
+
+	test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
+
+	return $lret
+}
+
+test_masquerade()
+{
+	local lret=0
+
+	ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+	ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+	ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: canot ping ns1 from ns2"
+		lret=1
+	fi
+
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns2$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	reset_counters
+
+# add masquerading rule
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+	chain postrouting {
+		type nat hook postrouting priority 0; policy accept;
+		meta oif veth0 masquerade
+	}
+}
+EOF
+	ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
+		lret=1
+	fi
+
+	# ns1 should have seen packets from ns0, due to masquerade
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# ns1 should not have seen packets from ns2, due to masquerade
+	expect="packets 0 bytes 0"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	ip netns exec ns0 nft flush chain ip nat postrouting
+	if [ $? -ne 0 ]; then
+		echo "ERROR: Could not flush nat postrouting" 1>&2
+		lret=1
+	fi
+
+	test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
+
+	return $lret
+}
+
+test_redirect6()
+{
+	local lret=0
+
+	ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+
+	ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
+		lret=1
+	fi
+
+	expect="packets 1 bytes 104"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns2$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	reset_counters
+
+# add redirect rule
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+	chain prerouting {
+		type nat hook prerouting priority 0; policy accept;
+		meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
+	}
+}
+EOF
+	ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
+		lret=1
+	fi
+
+	# ns1 should have seen no packets from ns2, due to redirection
+	expect="packets 0 bytes 0"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# ns0 should have seen packets from ns2, due to masquerade
+	expect="packets 1 bytes 104"
+	for dir in "in6" "out6" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	ip netns exec ns0 nft delete table ip6 nat
+	if [ $? -ne 0 ]; then
+		echo "ERROR: Could not delete ip6 nat table" 1>&2
+		lret=1
+	fi
+
+	test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
+
+	return $lret
+}
+
+test_redirect()
+{
+	local lret=0
+
+	ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+	ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+	ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannot ping ns1 from ns2"
+		lret=1
+	fi
+
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns2$dir "$expect"
+			lret=1
+		fi
+
+		cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns2 ns1$dir "$expect"
+			lret=1
+		fi
+	done
+
+	reset_counters
+
+# add redirect rule
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+	chain prerouting {
+		type nat hook prerouting priority 0; policy accept;
+		meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
+	}
+}
+EOF
+	ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+	if [ $? -ne 0 ] ; then
+		echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
+		lret=1
+	fi
+
+	# ns1 should have seen no packets from ns2, due to redirection
+	expect="packets 0 bytes 0"
+	for dir in "in" "out" ; do
+
+		cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	# ns0 should have seen packets from ns2, due to masquerade
+	expect="packets 1 bytes 84"
+	for dir in "in" "out" ; do
+		cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+		if [ $? -ne 0 ]; then
+			bad_counter ns1 ns0$dir "$expect"
+			lret=1
+		fi
+	done
+
+	ip netns exec ns0 nft delete table ip nat
+	if [ $? -ne 0 ]; then
+		echo "ERROR: Could not delete nat table" 1>&2
+		lret=1
+	fi
+
+	test $lret -eq 0 && echo "PASS: IP redirection for ns2"
+
+	return $lret
+}
+
+
+# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
+for i in 0 1 2; do
+ip netns exec ns$i nft -f - <<EOF
+table inet filter {
+	counter ns0in {}
+	counter ns1in {}
+	counter ns2in {}
+
+	counter ns0out {}
+	counter ns1out {}
+	counter ns2out {}
+
+	counter ns0in6 {}
+	counter ns1in6 {}
+	counter ns2in6 {}
+
+	counter ns0out6 {}
+	counter ns1out6 {}
+	counter ns2out6 {}
+
+	map nsincounter {
+		type ipv4_addr : counter
+		elements = { 10.0.1.1 : "ns0in",
+			     10.0.2.1 : "ns0in",
+			     10.0.1.99 : "ns1in",
+			     10.0.2.99 : "ns2in" }
+	}
+
+	map nsincounter6 {
+		type ipv6_addr : counter
+		elements = { dead:1::1 : "ns0in6",
+			     dead:2::1 : "ns0in6",
+			     dead:1::99 : "ns1in6",
+			     dead:2::99 : "ns2in6" }
+	}
+
+	map nsoutcounter {
+		type ipv4_addr : counter
+		elements = { 10.0.1.1 : "ns0out",
+			     10.0.2.1 : "ns0out",
+			     10.0.1.99: "ns1out",
+			     10.0.2.99: "ns2out" }
+	}
+
+	map nsoutcounter6 {
+		type ipv6_addr : counter
+		elements = { dead:1::1 : "ns0out6",
+			     dead:2::1 : "ns0out6",
+			     dead:1::99 : "ns1out6",
+			     dead:2::99 : "ns2out6" }
+	}
+
+	chain input {
+		type filter hook input priority 0; policy accept;
+		counter name ip saddr map @nsincounter
+		icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6
+	}
+	chain output {
+		type filter hook output priority 0; policy accept;
+		counter name ip daddr map @nsoutcounter
+		icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6
+	}
+}
+EOF
+done
+
+sleep 3
+# test basic connectivity
+for i in 1 2; do
+  ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
+  if [ $? -ne 0 ];then
+  	echo "ERROR: Could not reach other namespace(s)" 1>&2
+	ret=1
+  fi
+
+  ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
+  if [ $? -ne 0 ];then
+	echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
+	ret=1
+  fi
+  check_counters ns$i
+  if [ $? -ne 0 ]; then
+	ret=1
+  fi
+
+  check_ns0_counters ns$i
+  if [ $? -ne 0 ]; then
+	ret=1
+  fi
+  reset_counters
+done
+
+if [ $ret -eq 0 ];then
+	echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
+fi
+
+reset_counters
+test_local_dnat
+test_local_dnat6
+
+reset_counters
+test_masquerade
+test_masquerade6
+
+reset_counters
+test_redirect
+test_redirect6
+
+for i in 0 1 2; do ip netns del ns$i;done
+
+exit $ret
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index 82121a8..29bac5e 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -10,4 +10,5 @@
 /proc-uptime-002
 /read
 /self
+/setns-dcache
 /thread-self
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
index 1c12c34..434d033 100644
--- a/tools/testing/selftests/proc/Makefile
+++ b/tools/testing/selftests/proc/Makefile
@@ -14,6 +14,7 @@
 TEST_GEN_PROGS += proc-uptime-002
 TEST_GEN_PROGS += read
 TEST_GEN_PROGS += self
+TEST_GEN_PROGS += setns-dcache
 TEST_GEN_PROGS += thread-self
 
 include ../lib.mk
diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c
new file mode 100644
index 0000000..60ab197
--- /dev/null
+++ b/tools/testing/selftests/proc/setns-dcache.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Test that setns(CLONE_NEWNET) points to new /proc/net content even
+ * if old one is in dcache.
+ *
+ * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled.
+ */
+#undef NDEBUG
+#include <assert.h>
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+
+static pid_t pid = -1;
+
+static void f(void)
+{
+	if (pid > 0) {
+		kill(pid, SIGTERM);
+	}
+}
+
+int main(void)
+{
+	int fd[2];
+	char _ = 0;
+	int nsfd;
+
+	atexit(f);
+
+	/* Check for priviledges and syscall availability straight away. */
+	if (unshare(CLONE_NEWNET) == -1) {
+		if (errno == ENOSYS || errno == EPERM) {
+			return 4;
+		}
+		return 1;
+	}
+	/* Distinguisher between two otherwise empty net namespaces. */
+	if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) {
+		return 1;
+	}
+
+	if (pipe(fd) == -1) {
+		return 1;
+	}
+
+	pid = fork();
+	if (pid == -1) {
+		return 1;
+	}
+
+	if (pid == 0) {
+		if (unshare(CLONE_NEWNET) == -1) {
+			return 1;
+		}
+
+		if (write(fd[1], &_, 1) != 1) {
+			return 1;
+		}
+
+		pause();
+
+		return 0;
+	}
+
+	if (read(fd[0], &_, 1) != 1) {
+		return 1;
+	}
+
+	{
+		char buf[64];
+		snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid);
+		nsfd = open(buf, O_RDONLY);
+		if (nsfd == -1) {
+			return 1;
+		}
+	}
+
+	/* Reliably pin dentry into dcache. */
+	(void)open("/proc/net/unix", O_RDONLY);
+
+	if (setns(nsfd, CLONE_NEWNET) == -1) {
+		return 1;
+	}
+
+	kill(pid, SIGTERM);
+	pid = 0;
+
+	{
+		char buf[4096];
+		ssize_t rv;
+		int fd;
+
+		fd = open("/proc/net/unix", O_RDONLY);
+		if (fd == -1) {
+			return 1;
+		}
+
+#define S "Num       RefCount Protocol Flags    Type St Inode Path\n"
+		rv = read(fd, buf, sizeof(buf));
+
+		assert(rv == strlen(S));
+		assert(memcmp(buf, S, strlen(S)) == 0);
+	}
+
+	return 0;
+}
diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c
index e20b017..b206553 100644
--- a/tools/testing/selftests/rtc/rtctest.c
+++ b/tools/testing/selftests/rtc/rtctest.c
@@ -145,15 +145,12 @@
 
 	rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
 	ASSERT_NE(-1, rc);
-	EXPECT_NE(0, rc);
+	ASSERT_NE(0, rc);
 
 	/* Disable alarm interrupts */
 	rc = ioctl(self->fd, RTC_AIE_OFF, 0);
 	ASSERT_NE(-1, rc);
 
-	if (rc == 0)
-		return;
-
 	rc = read(self->fd, &data, sizeof(unsigned long));
 	ASSERT_NE(-1, rc);
 	TH_LOG("data: %lx", data);
@@ -202,7 +199,109 @@
 
 	rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
 	ASSERT_NE(-1, rc);
-	EXPECT_NE(0, rc);
+	ASSERT_NE(0, rc);
+
+	rc = read(self->fd, &data, sizeof(unsigned long));
+	ASSERT_NE(-1, rc);
+
+	rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+	ASSERT_NE(-1, rc);
+
+	new = timegm((struct tm *)&tm);
+	ASSERT_EQ(new, secs);
+}
+
+TEST_F(rtc, alarm_alm_set_minute) {
+	struct timeval tv = { .tv_sec = 62 };
+	unsigned long data;
+	struct rtc_time tm;
+	fd_set readfds;
+	time_t secs, new;
+	int rc;
+
+	rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+	ASSERT_NE(-1, rc);
+
+	secs = timegm((struct tm *)&tm) + 60 - tm.tm_sec;
+	gmtime_r(&secs, (struct tm *)&tm);
+
+	rc = ioctl(self->fd, RTC_ALM_SET, &tm);
+	if (rc == -1) {
+		ASSERT_EQ(EINVAL, errno);
+		TH_LOG("skip alarms are not supported.");
+		return;
+	}
+
+	rc = ioctl(self->fd, RTC_ALM_READ, &tm);
+	ASSERT_NE(-1, rc);
+
+	TH_LOG("Alarm time now set to %02d:%02d:%02d.",
+	       tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+	/* Enable alarm interrupts */
+	rc = ioctl(self->fd, RTC_AIE_ON, 0);
+	ASSERT_NE(-1, rc);
+
+	FD_ZERO(&readfds);
+	FD_SET(self->fd, &readfds);
+
+	rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
+	ASSERT_NE(-1, rc);
+	ASSERT_NE(0, rc);
+
+	/* Disable alarm interrupts */
+	rc = ioctl(self->fd, RTC_AIE_OFF, 0);
+	ASSERT_NE(-1, rc);
+
+	rc = read(self->fd, &data, sizeof(unsigned long));
+	ASSERT_NE(-1, rc);
+	TH_LOG("data: %lx", data);
+
+	rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+	ASSERT_NE(-1, rc);
+
+	new = timegm((struct tm *)&tm);
+	ASSERT_EQ(new, secs);
+}
+
+TEST_F(rtc, alarm_wkalm_set_minute) {
+	struct timeval tv = { .tv_sec = 62 };
+	struct rtc_wkalrm alarm = { 0 };
+	struct rtc_time tm;
+	unsigned long data;
+	fd_set readfds;
+	time_t secs, new;
+	int rc;
+
+	rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time);
+	ASSERT_NE(-1, rc);
+
+	secs = timegm((struct tm *)&alarm.time) + 60 - alarm.time.tm_sec;
+	gmtime_r(&secs, (struct tm *)&alarm.time);
+
+	alarm.enabled = 1;
+
+	rc = ioctl(self->fd, RTC_WKALM_SET, &alarm);
+	if (rc == -1) {
+		ASSERT_EQ(EINVAL, errno);
+		TH_LOG("skip alarms are not supported.");
+		return;
+	}
+
+	rc = ioctl(self->fd, RTC_WKALM_RD, &alarm);
+	ASSERT_NE(-1, rc);
+
+	TH_LOG("Alarm time now set to %02d/%02d/%02d %02d:%02d:%02d.",
+	       alarm.time.tm_mday, alarm.time.tm_mon + 1,
+	       alarm.time.tm_year + 1900, alarm.time.tm_hour,
+	       alarm.time.tm_min, alarm.time.tm_sec);
+
+	FD_ZERO(&readfds);
+	FD_SET(self->fd, &readfds);
+
+	rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
+	ASSERT_NE(-1, rc);
+	ASSERT_NE(0, rc);
 
 	rc = read(self->fd, &data, sizeof(unsigned long));
 	ASSERT_NE(-1, rc);
diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile
index fce7f4c..1760b3e 100644
--- a/tools/testing/selftests/seccomp/Makefile
+++ b/tools/testing/selftests/seccomp/Makefile
@@ -9,7 +9,7 @@
 CFLAGS += -Wl,-no-as-needed -Wall
 
 seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h
-	$(CC) $(CFLAGS) $(LDFLAGS) -lpthread $< -o $@
+	$(CC) $(CFLAGS) $(LDFLAGS) $< -lpthread -o $@
 
 TEST_PROGS += $(BINARIES)
 EXTRA_CLEAN := $(BINARIES)
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index c02683c..7656c7c 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 CFLAGS += -O3 -Wl,-no-as-needed -Wall
-LDFLAGS += -lrt -lpthread -lm
+LDLIBS += -lrt -lpthread -lm
 
 # these are all "safe" tests that don't modify
 # system time or require escalated privileges
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
index 36df551..9601bc2 100644
--- a/tools/testing/selftests/vm/gup_benchmark.c
+++ b/tools/testing/selftests/vm/gup_benchmark.c
@@ -22,6 +22,7 @@
 	__u64 size;
 	__u32 nr_pages_per_call;
 	__u32 flags;
+	__u64 expansion[10];	/* For future use */
 };
 
 int main(int argc, char **argv)